atomicshop 2.9.0__py3-none-any.whl → 2.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of atomicshop might be problematic. Click here for more details.

atomicshop/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  """Atomic Basic functions and classes to make developer life easier"""
2
2
 
3
3
  __author__ = "Den Kras"
4
- __version__ = '2.9.0'
4
+ __version__ = '2.9.1'
@@ -6,7 +6,8 @@ from . import system_resources
6
6
 
7
7
 
8
8
  def run_check_system_resources(
9
- interval, get_cpu, get_memory, get_disk_io, get_disk_used_percent, shared_results, queue=None):
9
+ interval, get_cpu, get_memory, get_disk_io_bytes, get_disk_files_count, get_disk_used_percent,
10
+ calculate_maximum_changed_disk_io, maximum_disk_io, shared_results, queue=None):
10
11
  """
11
12
  Continuously update the system resources in the shared results dictionary.
12
13
  This function runs in a separate process.
@@ -16,8 +17,20 @@ def run_check_system_resources(
16
17
  # Get the results of the system resources check function and store them in temporary results dictionary.
17
18
  results = system_resources.check_system_resources(
18
19
  interval=interval, get_cpu=get_cpu, get_memory=get_memory,
19
- get_disk_io=get_disk_io,
20
+ get_disk_io_bytes=get_disk_io_bytes, get_disk_files_count=get_disk_files_count,
20
21
  get_disk_used_percent=get_disk_used_percent)
22
+
23
+ if calculate_maximum_changed_disk_io:
24
+ if results['disk_io_read'] > maximum_disk_io['read_bytes_per_sec']:
25
+ maximum_disk_io['read_bytes_per_sec'] = results['disk_io_read']
26
+ if results['disk_io_write'] > maximum_disk_io['write_bytes_per_sec']:
27
+ maximum_disk_io['write_bytes_per_sec'] = results['disk_io_write']
28
+ if results['disk_files_count_read'] > maximum_disk_io['read_files_count_per_sec']:
29
+ maximum_disk_io['read_files_count_per_sec'] = results['disk_files_count_read']
30
+ if results['disk_files_count_write'] > maximum_disk_io['write_files_count_per_sec']:
31
+ maximum_disk_io['write_files_count_per_sec'] = results['disk_files_count_write']
32
+ results['maximum_disk_io'] = maximum_disk_io
33
+
21
34
  # Update the shared results dictionary with the temporary results dictionary.
22
35
  # This is done in separate steps to avoid overwriting the special 'multiprocessing.Manager.dict' object.
23
36
  # So we update the shared results dictionary with the temporary results dictionary.
@@ -36,8 +49,10 @@ class SystemResourceMonitor:
36
49
  interval: float = 1,
37
50
  get_cpu: bool = True,
38
51
  get_memory: bool = True,
39
- get_disk_io: bool = True,
52
+ get_disk_io_bytes: bool = True,
53
+ get_disk_files_count: bool = True,
40
54
  get_disk_used_percent: bool = True,
55
+ calculate_maximum_changed_disk_io: bool = False,
41
56
  use_queue: bool = False
42
57
  ):
43
58
  """
@@ -46,8 +61,11 @@ class SystemResourceMonitor:
46
61
  Default is 1 second.
47
62
  :param get_cpu: bool, get the CPU usage.
48
63
  :param get_memory: bool, get the memory usage.
49
- :param get_disk_io: bool, get the disk I/O utilization.
64
+ :param get_disk_io_bytes: bool, get the disk I/O utilization of bytes.
65
+ :param get_disk_files_count: bool, get the disk files count in the interval.
50
66
  :param get_disk_used_percent: bool, get the disk used percentage.
67
+ :param calculate_maximum_changed_disk_io: bool, calculate the maximum changed disk I/O. This includes the
68
+ maximum changed disk I/O read and write in bytes/s and the maximum changed disk files count.
51
69
  :param use_queue: bool, use queue to store results.
52
70
  If you need ot get the queue, you can access it through the 'queue' attribute:
53
71
  SystemResourceMonitor.queue
@@ -84,15 +102,23 @@ class SystemResourceMonitor:
84
102
  print(results)
85
103
  """
86
104
  # Store parameters as instance attributes
87
- self.interval = interval
88
- self.get_cpu = get_cpu
89
- self.get_memory = get_memory
90
- self.get_disk_io = get_disk_io
91
- self.get_disk_used_percent = get_disk_used_percent
105
+ self.interval: float = interval
106
+ self.get_cpu: bool = get_cpu
107
+ self.get_memory: bool = get_memory
108
+ self.get_disk_io_bytes: bool = get_disk_io_bytes
109
+ self.get_disk_files_count: bool = get_disk_files_count
110
+ self.get_disk_used_percent: bool = get_disk_used_percent
111
+ self.calculate_maximum_changed_disk_io: bool = calculate_maximum_changed_disk_io
92
112
 
93
113
  self.manager = multiprocessing.Manager()
94
114
  self.shared_results = self.manager.dict()
95
115
  self.process = None
116
+ self.maximum_disk_io: dict = {
117
+ 'read_bytes_per_sec': 0,
118
+ 'write_bytes_per_sec': 0,
119
+ 'read_files_count_per_sec': 0,
120
+ 'write_files_count_per_sec': 0
121
+ }
96
122
 
97
123
  if use_queue:
98
124
  self.queue = multiprocessing.Queue()
@@ -110,8 +136,9 @@ class SystemResourceMonitor:
110
136
 
111
137
  if self.process is None or not self.process.is_alive():
112
138
  self.process = multiprocessing.Process(target=run_check_system_resources, args=(
113
- self.interval, self.get_cpu, self.get_memory, self.get_disk_io,
114
- self.get_disk_used_percent, self.shared_results))
139
+ self.interval, self.get_cpu, self.get_memory, self.get_disk_io_bytes, self.get_disk_files_count,
140
+ self.get_disk_used_percent, self.calculate_maximum_changed_disk_io, self.maximum_disk_io,
141
+ self.shared_results, self.queue))
115
142
  self.process.start()
116
143
  else:
117
144
  print_api("Monitoring process is already running.", color='yellow', **print_kwargs)
@@ -137,12 +164,15 @@ class SystemResourceMonitor:
137
164
  SYSTEM_RESOURCES_MONITOR: Union[SystemResourceMonitor, None] = None
138
165
 
139
166
 
140
- def start_system_resources_monitoring(
167
+ def start_monitoring(
141
168
  interval: float = 1,
142
169
  get_cpu: bool = True,
143
170
  get_memory: bool = True,
144
- get_disk_io: bool = True,
171
+ get_disk_io_bytes: bool = True,
172
+ get_disk_files_count: bool = True,
145
173
  get_disk_used_percent: bool = True,
174
+ calculate_maximum_changed_disk_io: bool = False,
175
+ use_queue: bool = False,
146
176
  print_kwargs: dict = None
147
177
  ):
148
178
  """
@@ -150,10 +180,22 @@ def start_system_resources_monitoring(
150
180
  :param interval: float, interval in seconds.
151
181
  :param get_cpu: bool, get CPU usage.
152
182
  :param get_memory: bool, get memory usage.
153
- :param get_disk_io: bool, get TOTAL disk I/O utilization in bytes/s.
183
+ :param get_disk_io_bytes: bool, get TOTAL disk I/O utilization in bytes/s.
184
+ :param get_disk_files_count: bool, get TOTAL disk files count.
154
185
  :param get_disk_used_percent: bool, get TOTAL disk used percentage.
186
+ :param calculate_maximum_changed_disk_io: bool, calculate the maximum changed disk I/O. This includes the
187
+ maximum changed disk I/O read and write in bytes/s and the maximum changed disk files count.
188
+ :param use_queue: bool, use queue to store results.
189
+ Usage Example:
190
+ system_resources.start_monitoring(use_queue=True)
191
+ queue = system_resources.get_monitoring_queue()
192
+ while True:
193
+ if not queue.empty():
194
+ results = queue.get()
195
+ print(results)
196
+
155
197
  :param print_kwargs: dict, print kwargs.
156
- :return: SystemResourceMonitor
198
+ :return:
157
199
  """
158
200
 
159
201
  # if print_kwargs is None:
@@ -166,15 +208,18 @@ def start_system_resources_monitoring(
166
208
  interval=interval,
167
209
  get_cpu=get_cpu,
168
210
  get_memory=get_memory,
169
- get_disk_io=get_disk_io,
170
- get_disk_used_percent=get_disk_used_percent
211
+ get_disk_io_bytes=get_disk_io_bytes,
212
+ get_disk_files_count=get_disk_files_count,
213
+ get_disk_used_percent=get_disk_used_percent,
214
+ calculate_maximum_changed_disk_io=calculate_maximum_changed_disk_io,
215
+ use_queue=use_queue
171
216
  )
172
217
  SYSTEM_RESOURCES_MONITOR.start()
173
218
  else:
174
219
  print_api("System resources monitoring is already running.", color='yellow', **(print_kwargs or {}))
175
220
 
176
221
 
177
- def stop_system_resources_monitoring():
222
+ def stop_monitoring():
178
223
  """
179
224
  Stop monitoring system resources.
180
225
  :return: None
@@ -184,7 +229,7 @@ def stop_system_resources_monitoring():
184
229
  SYSTEM_RESOURCES_MONITOR.stop()
185
230
 
186
231
 
187
- def get_system_resources_monitoring_instance() -> SystemResourceMonitor:
232
+ def get_monitoring_instance() -> SystemResourceMonitor:
188
233
  """
189
234
  Get the system resources monitoring instance.
190
235
  :return: SystemResourceMonitor
@@ -193,16 +238,16 @@ def get_system_resources_monitoring_instance() -> SystemResourceMonitor:
193
238
  return SYSTEM_RESOURCES_MONITOR
194
239
 
195
240
 
196
- def get_system_resources_monitoring_result():
241
+ def get_result():
197
242
  """
198
243
  Get system resources monitoring result.
199
244
 
200
245
  Usage Example:
201
- system_resources.start_system_resources_monitoring()
246
+ system_resources.start_monitoring()
202
247
 
203
248
  while True:
204
249
  time.sleep(1)
205
- result = system_resources.get_system_resources_monitoring_result()
250
+ result = system_resources.get_result()
206
251
 
207
252
  if result:
208
253
  print(
@@ -217,4 +262,37 @@ def get_system_resources_monitoring_result():
217
262
  if SYSTEM_RESOURCES_MONITOR is not None:
218
263
  return SYSTEM_RESOURCES_MONITOR.get_latest_results()
219
264
  else:
220
- return {}
265
+ raise RuntimeError("System resources monitoring is not running.")
266
+
267
+
268
+ def get_result_by_queue():
269
+ """
270
+ Get system resources monitoring result by queue.
271
+
272
+ Usage Example:
273
+ system_resources.start_system_resources_monitoring()
274
+
275
+ while True:
276
+ result = system_resources.get_result_by_queue()
277
+ print(result)
278
+
279
+ :return: dict
280
+ """
281
+ global SYSTEM_RESOURCES_MONITOR
282
+ if SYSTEM_RESOURCES_MONITOR is not None:
283
+ if not SYSTEM_RESOURCES_MONITOR.queue.empty():
284
+ return SYSTEM_RESOURCES_MONITOR.queue.get()
285
+ else:
286
+ raise RuntimeError("System resources monitoring is not running.")
287
+
288
+
289
+ def get_monitoring_queue() -> Union[multiprocessing.Queue, None]:
290
+ """
291
+ Get the monitoring queue.
292
+ :return: multiprocessing.Queue
293
+ """
294
+ global SYSTEM_RESOURCES_MONITOR
295
+ if SYSTEM_RESOURCES_MONITOR is not None:
296
+ return SYSTEM_RESOURCES_MONITOR.queue
297
+ else:
298
+ raise RuntimeError("System resources monitoring is not running.")
@@ -1,18 +1,20 @@
1
1
  import os
2
- import shutil
3
- import tempfile
4
2
  import time
3
+ import tempfile
4
+ import shutil
5
5
  import threading
6
6
 
7
7
  from .print_api import print_api
8
8
  from .wrappers.psutilw import cpus, memories, disks
9
+ from . import system_resource_monitor
9
10
 
10
11
 
11
12
  def check_system_resources(
12
13
  interval: float = 1,
13
14
  get_cpu: bool = True,
14
15
  get_memory: bool = True,
15
- get_disk_io: bool = True,
16
+ get_disk_io_bytes: bool = True,
17
+ get_disk_files_count: bool = False,
16
18
  get_disk_used_percent: bool = True
17
19
  ):
18
20
  """
@@ -20,7 +22,8 @@ def check_system_resources(
20
22
  :param interval: float, interval in seconds.
21
23
  :param get_cpu: bool, get CPU usage.
22
24
  :param get_memory: bool, get memory usage.
23
- :param get_disk_io: bool, get TOTAL disk I/O utilization in bytes/s.
25
+ :param get_disk_io_bytes: bool, get TOTAL disk I/O utilization in bytes/s.
26
+ :param get_disk_files_count: bool, get TOTAL disk files count.
24
27
  :param get_disk_used_percent: bool, get TOTAL disk used percentage.
25
28
  :return:
26
29
  """
@@ -31,6 +34,8 @@ def check_system_resources(
31
34
  'memory_usage': None,
32
35
  'disk_io_write': None,
33
36
  'disk_io_read': None,
37
+ 'disk_files_count_read': None,
38
+ 'disk_files_count_write': None,
34
39
  'disk_used_percent': None
35
40
  }
36
41
 
@@ -40,21 +45,30 @@ def check_system_resources(
40
45
  def set_memory_usage():
41
46
  result['memory_usage'] = memories.get_memory_usage()
42
47
 
43
- def set_disk_io_utilization():
44
- aggregated_disk_io_utilization = disks.get_disk_io_utilization(interval=interval)['aggregated']
48
+ def set_disk_io_bytes_change():
49
+ aggregated_disk_io_utilization = (
50
+ disks.get_disk_io(interval=interval, aggregated=True, io_change_bytes=True))['aggregated']
45
51
  result['disk_io_read'] = aggregated_disk_io_utilization['read_change_per_sec']
46
52
  result['disk_io_write'] = aggregated_disk_io_utilization['write_change_per_sec']
47
53
 
54
+ def set_disk_files_count():
55
+ aggregated_disk_files_count = (
56
+ disks.get_disk_io(interval=interval, aggregated=True, io_file_count=True))['aggregated']
57
+ result['disk_files_count_read'] = aggregated_disk_files_count['read_file_count_per_sec']
58
+ result['disk_files_count_write'] = aggregated_disk_files_count['write_file_count_per_sec']
59
+
48
60
  def set_disk_used_percent():
49
- result['disk_used_percent'] = disks.get_disk_usage()['total'].percent
61
+ result['disk_used_percent'] = disks.get_disk_usage()['aggregated'].percent
50
62
 
51
63
  # Create threads for each system resource check.
52
64
  if get_cpu:
53
65
  threads.append(threading.Thread(target=set_cpu_usage))
54
66
  if get_memory:
55
67
  threads.append(threading.Thread(target=set_memory_usage))
56
- if get_disk_io:
57
- threads.append(threading.Thread(target=set_disk_io_utilization))
68
+ if get_disk_io_bytes:
69
+ threads.append(threading.Thread(target=set_disk_io_bytes_change))
70
+ if get_disk_files_count:
71
+ threads.append(threading.Thread(target=set_disk_files_count))
58
72
  if get_disk_used_percent:
59
73
  threads.append(threading.Thread(target=set_disk_used_percent))
60
74
 
@@ -85,17 +99,44 @@ def wait_for_resource_availability(cpu_percent_max: int = 80, memory_percent_max
85
99
  time.sleep(wait_time) # Wait for 'wait_time' seconds before checking again
86
100
 
87
101
 
88
- def test_disk_speed(file_size_bytes, file_count, remove_file_after_each_copy: bool = True, target_directory=None):
102
+ def test_disk_speed_with_monitoring(
103
+ file_settings: list[dict],
104
+ remove_file_after_each_copy: bool = False,
105
+ target_directory=None,
106
+ read_full_file: bool = False,
107
+ monitoring: bool = True,
108
+ print_kwargs: dict = None
109
+ ):
89
110
  """
90
- Tests disk write and read speeds by generating files in a 'source' directory, copying them to a 'dest' directory
91
- within the target directory, and optionally removing them after each copy. Now also returns the total execution time.
111
+ Generates files and performs write and read operations in the specified target directory,
112
+ while monitoring disk I/O speeds in a separate thread. Returns the maximum read and write rates,
113
+ and the total operation time.
114
+
115
+ :param file_settings: list of dicts, of file settings. Each dict will contain:
116
+ 'size': int, size of each file in bytes.
117
+ 'count': int, number of files to generate and copy.
92
118
 
93
- :param file_size_bytes: Size of each file in bytes.
94
- :param file_count: Number of files to generate and copy.
119
+ Example:
120
+ file_setting = [
121
+ {'size': 100000000, 'count': 100},
122
+ {'size': 500000000, 'count': 50}
123
+ ]
124
+
125
+ This will generate 100 files of 100 MB each, and 50 files of 500 MB each.
95
126
  :param remove_file_after_each_copy: Whether to remove the file after copying to target directory.
96
127
  :param target_directory: Directory where files will be copied. Uses a temporary directory if None.
97
- :return: Tuple of peak write speed, peak read speed, and total execution time in seconds.
128
+ :param read_full_file: Whether to read the full file after copying, or read in chunks.
129
+ :param monitoring: Whether to skip monitoring disk I/O speeds.
130
+ :return: A tuple containing the total operation time in seconds and maximum_io_changes.
98
131
  """
132
+
133
+ max_io_changes: dict = {}
134
+
135
+ if monitoring:
136
+ system_resource_monitor.start_monitoring(
137
+ interval=1, get_cpu=False, get_memory=False, get_disk_io_bytes=True, get_disk_used_percent=False,
138
+ get_disk_files_count=True, calculate_maximum_changed_disk_io=True, use_queue=True)
139
+
99
140
  if target_directory is None:
100
141
  target_directory = tempfile.mkdtemp()
101
142
  else:
@@ -106,48 +147,44 @@ def test_disk_speed(file_size_bytes, file_count, remove_file_after_each_copy: bo
106
147
  os.makedirs(source_directory, exist_ok=True)
107
148
  os.makedirs(dest_directory, exist_ok=True)
108
149
 
109
- write_speeds = []
110
- read_speeds = []
111
- created_files = [] # Keep track of all created files for cleanup
112
-
113
150
  overall_start_time = time.time() # Start timing the entire operation
114
151
 
115
- for i in range(file_count):
116
- # Generate file in source directory
117
- src_file_path = os.path.join(source_directory, f"tempfile_{i}")
118
- with open(src_file_path, "wb") as file:
119
- file.write(os.urandom(file_size_bytes))
120
- created_files.append(src_file_path) # Add the file for cleanup
121
-
122
- # Measure write speed
123
- start_time = time.time()
124
- shutil.copy(src_file_path, dest_directory)
125
- end_time = time.time()
126
- write_speeds.append(file_size_bytes / (end_time - start_time))
127
-
128
- target_file_path = os.path.join(dest_directory, os.path.basename(src_file_path))
129
-
130
- # Measure read speed
131
- with open(target_file_path, "rb") as file:
132
- start_time = time.time()
133
- while file.read(1024 * 1024): # Read in chunks of 1 MB
134
- pass
135
- end_time = time.time()
136
- read_speeds.append(file_size_bytes / (end_time - start_time))
137
-
138
- if remove_file_after_each_copy:
139
- os.remove(target_file_path)
140
- os.remove(src_file_path)
152
+ for file_setting in file_settings:
153
+ for i in range(file_setting['count']):
154
+ # Generate file in source directory
155
+ src_file_path = os.path.join(source_directory, f"tempfile_{i}")
156
+ with open(src_file_path, "wb") as file:
157
+ file.write(os.urandom(file_setting['size']))
141
158
 
142
- overall_end_time = time.time()
159
+ # Measure write speed.
160
+ shutil.copy(src_file_path, dest_directory)
161
+
162
+ target_file_path = os.path.join(dest_directory, os.path.basename(src_file_path))
163
+ print_api(f"Copied: {target_file_path}", **(print_kwargs or {}))
143
164
 
144
- # Calculate peak speeds in Bytes/s and total execution time
145
- peak_write_speed = max(write_speeds)
146
- peak_read_speed = max(read_speeds)
165
+ # Measure read speed.
166
+ with open(target_file_path, "rb") as file:
167
+ if read_full_file:
168
+ file.read()
169
+ else:
170
+ while file.read(1024 * 1024): # Read in chunks of 1 MB
171
+ pass
172
+
173
+ if remove_file_after_each_copy:
174
+ os.remove(target_file_path)
175
+ os.remove(src_file_path)
176
+
177
+ overall_end_time = time.time()
147
178
  total_execution_time = overall_end_time - overall_start_time
179
+ print_api(f"Total execution time: {total_execution_time}", **(print_kwargs or {}))
148
180
 
149
181
  # Cleanup. Remove all created files and directories.
150
182
  shutil.rmtree(source_directory)
151
183
  shutil.rmtree(dest_directory)
152
184
 
153
- return peak_write_speed, peak_read_speed, total_execution_time
185
+ if monitoring:
186
+ # Stop the I/O monitoring.
187
+ max_io_changes = system_resource_monitor.get_result()['maximum_disk_io']
188
+ system_resource_monitor.stop_monitoring()
189
+
190
+ return total_execution_time, max_io_changes
@@ -1,19 +1,16 @@
1
- import os
2
1
  import time
3
- import tempfile
4
- import shutil
5
- import threading
6
2
 
7
3
  import psutil
8
4
 
9
- from ... import system_resources
10
5
 
11
-
12
- def get_disk_io_utilization(
6
+ def get_disk_io(
13
7
  interval: float = 1,
14
8
  disk_list: list = None,
15
9
  aggregated: bool = True,
16
- separated: bool = False
10
+ separated: bool = False,
11
+ io_change_bytes: bool = False,
12
+ io_file_count: bool = False,
13
+ io_busy_time: bool = False,
17
14
  ) -> dict:
18
15
  """
19
16
  Get disk utilization based on disk I/O changes, allowing for both aggregated and separated values.
@@ -25,122 +22,142 @@ def get_disk_io_utilization(
25
22
  :param disk_list: List of disks to measure. If None, measure all disks. Affects only when separated is True.
26
23
  :param aggregated: Boolean indicating whether to return aggregated utilization.
27
24
  :param separated: Boolean indicating whether to return separate utilizations for each disk.
28
- :return: Disk utilization data.
29
- """
30
-
31
- io_start_aggregated = None
32
- io_end_aggregated = None
33
-
34
- io_start_separated = psutil.disk_io_counters(perdisk=True)
35
- if aggregated:
36
- io_start_aggregated = psutil.disk_io_counters(perdisk=False)
37
- time.sleep(interval)
38
- io_end_separated = psutil.disk_io_counters(perdisk=True)
39
- if aggregated:
40
- io_end_aggregated = psutil.disk_io_counters(perdisk=False)
41
-
42
- io_change = {}
43
- if separated:
44
- for disk in io_start_separated.keys():
45
- if disk_list is None or disk in disk_list:
46
- read_change = io_end_separated[disk].read_bytes - io_start_separated[disk].read_bytes
47
- write_change = io_end_separated[disk].write_bytes - io_start_separated[disk].write_bytes
48
- read_change_per_sec = read_change / interval
49
- write_change_per_sec = write_change / interval
50
- io_change[disk] = {
51
- 'read_change_bytes': read_change,
52
- 'write_change_bytes': write_change,
53
- 'read_change_per_sec': read_change_per_sec,
54
- 'write_change_per_sec': write_change_per_sec,
55
- }
56
-
57
- if aggregated:
58
- if not io_start_aggregated or not io_end_aggregated:
59
- raise ValueError('Aggregated disk I/O counters are not available.')
60
- total_read_change = io_end_aggregated.read_bytes - io_start_aggregated.read_bytes
61
- total_write_change = io_end_aggregated.write_bytes - io_start_aggregated.write_bytes
62
- total_read_change_per_sec = total_read_change / interval
63
- total_write_change_per_sec = total_write_change / interval
64
- io_change['aggregated'] = {
65
- 'read_change_bytes': total_read_change,
66
- 'write_change_bytes': total_write_change,
67
- 'read_change_per_sec': total_read_change_per_sec,
68
- 'write_change_per_sec': total_write_change_per_sec,
25
+ :param io_change_bytes: Boolean indicating whether to return I/O change in bytes. Returned dictionary:
26
+ {
27
+ 'read_change_bytes': int,
28
+ 'write_change_bytes': int,
29
+ 'read_change_per_sec': float,
30
+ 'write_change_per_sec': float
69
31
  }
70
-
71
- return io_change
72
-
73
-
74
- def _get_disk_busy_time(
75
- interval: float = 1,
76
- disk_list: list = None,
77
- aggregated: bool = True,
78
- separated: bool = False
79
- ) -> dict:
32
+ :param io_file_count: Boolean indicating whether to return I/O file count. Returned dictionary:
33
+ {
34
+ 'read_file_count': int,
35
+ 'write_file_count': int,
36
+ 'read_file_count_per_sec': float,
37
+ 'write_file_count_per_sec': float
38
+ }
39
+ :param io_busy_time: Boolean indicating whether to return I/O busy time.
40
+ !!! For some reason on Windows it gets the count of files read or written and not the time in ms.
41
+ Returned dictionary:
42
+ {
43
+ 'read_time_ms': int,
44
+ 'write_time_ms': int,
45
+ 'read_time_per_sec': float,
46
+ 'write_time_per_sec': float,
47
+ 'busy_time': int,
48
+ 'busy_time_per_sec': float,
49
+ 'busy_time_percent': float
50
+ }
51
+ :return: Disk utilization data.
80
52
  """
81
- !!! For some reason on Windows it gets the count of files read or written and not the time in ms.
82
53
 
83
- Get disk busy time, allowing for both aggregated and separated values.
84
- Windows: because 'psutil.disk_io_counters' before using this function, you may need to execute the following
85
- command in the command prompt:
86
- diskperf -y
54
+ if not aggregated and not separated:
55
+ raise ValueError('At least one of aggregated or separated must be True.')
87
56
 
88
- :param interval: Sampling interval in seconds to measure I/O changes.
89
- :param disk_list: List of disks to measure. If None, measure all disks. Affects only when separated is True.
90
- :param aggregated: Boolean indicating whether to return aggregated utilization.
91
- :param separated: Boolean indicating whether to return separate utilizations for each disk.
92
- :return: Disk utilization data.
93
- """
57
+ if not io_change_bytes and not io_file_count and not io_busy_time:
58
+ raise ValueError('At least one of io_change_bytes, io_file_count, or io_busy_time must be True.')
94
59
 
95
60
  io_start_aggregated = None
96
61
  io_end_aggregated = None
97
62
 
98
- io_start_separated = psutil.disk_io_counters(perdisk=True)
63
+ if separated:
64
+ io_start_separated = psutil.disk_io_counters(perdisk=True)
99
65
  if aggregated:
100
66
  io_start_aggregated = psutil.disk_io_counters(perdisk=False)
67
+
101
68
  time.sleep(interval)
102
- io_end_separated = psutil.disk_io_counters(perdisk=True)
69
+
70
+ if separated:
71
+ io_end_separated = psutil.disk_io_counters(perdisk=True)
103
72
  if aggregated:
104
73
  io_end_aggregated = psutil.disk_io_counters(perdisk=False)
105
74
 
106
- busy_time = {}
75
+ io_change = {}
107
76
  if separated:
108
77
  for disk in io_start_separated.keys():
109
78
  if disk_list is None or disk in disk_list:
110
- read_time = io_end_separated[disk].read_time - io_start_separated[disk].read_time
111
- write_time = io_end_separated[disk].write_time - io_start_separated[disk].write_time
112
- read_time_per_sec = read_time / interval
113
- write_time_per_sec = write_time / interval
114
- busy_time[disk] = {
115
- 'read_time_ms': read_time,
116
- 'write_time_ms': write_time,
117
- 'read_time_per_sec': read_time_per_sec,
118
- 'write_time_per_sec': write_time_per_sec,
119
- 'busy_time': read_time + write_time,
120
- 'busy_time_per_sec': read_time_per_sec + write_time_per_sec,
121
- 'busy_time_percent': (read_time + write_time) / interval
122
- }
79
+ io_change[disk] = {}
80
+ if io_change_bytes:
81
+ read_change = io_end_separated[disk].read_bytes - io_start_separated[disk].read_bytes
82
+ write_change = io_end_separated[disk].write_bytes - io_start_separated[disk].write_bytes
83
+ read_change_per_sec = read_change / interval
84
+ write_change_per_sec = write_change / interval
85
+ io_change[disk].update({
86
+ 'read_change_bytes': read_change,
87
+ 'write_change_bytes': write_change,
88
+ 'read_change_per_sec': read_change_per_sec,
89
+ 'write_change_per_sec': write_change_per_sec,
90
+ })
91
+ if io_file_count:
92
+ read_count = io_end_separated[disk].read_count - io_start_separated[disk].read_count
93
+ write_count = io_end_separated[disk].write_count - io_start_separated[disk].write_count
94
+ read_count_per_sec = read_count / interval
95
+ write_count_per_sec = write_count / interval
96
+ io_change[disk].update({
97
+ 'read_file_count': read_count,
98
+ 'write_file_count': write_count,
99
+ 'read_file_count_per_sec': read_count_per_sec,
100
+ 'write_file_count_per_sec': write_count_per_sec,
101
+ })
102
+ if io_busy_time:
103
+ read_time = io_end_separated[disk].read_time - io_start_separated[disk].read_time
104
+ write_time = io_end_separated[disk].write_time - io_start_separated[disk].write_time
105
+ read_time_per_sec = read_time / interval
106
+ write_time_per_sec = write_time / interval
107
+ io_change[disk].update({
108
+ 'read_time_ms': read_time,
109
+ 'write_time_ms': write_time,
110
+ 'read_time_per_sec': read_time_per_sec,
111
+ 'write_time_per_sec': write_time_per_sec,
112
+ 'busy_time': read_time + write_time,
113
+ 'busy_time_per_sec': read_time_per_sec + write_time_per_sec,
114
+ 'busy_time_percent': (read_time + write_time) / interval
115
+ })
123
116
 
124
117
  if aggregated:
125
118
  if not io_start_aggregated or not io_end_aggregated:
126
119
  raise ValueError('Aggregated disk I/O counters are not available.')
127
- # total_read_time = io_end_aggregated.read_time - io_start_aggregated.read_time
128
- # total_write_time = io_end_aggregated.write_time - io_start_aggregated.write_time
129
- total_read_time = io_end_aggregated.read_time
130
- total_write_time = io_end_aggregated.write_time
131
- total_read_time_per_sec = total_read_time / interval
132
- total_write_time_per_sec = total_write_time / interval
133
- busy_time['aggregated'] = {
134
- 'read_time_ms': total_read_time,
135
- 'write_time_ms': total_write_time,
136
- 'read_time_per_sec': total_read_time_per_sec,
137
- 'write_time_per_sec': total_write_time_per_sec,
138
- 'busy_time': total_read_time + total_write_time,
139
- 'busy_time_per_sec': total_read_time_per_sec + total_write_time_per_sec,
140
- 'busy_time_percent': (total_read_time + total_write_time) / interval
141
- }
142
120
 
143
- return busy_time
121
+ io_change['aggregated'] = {}
122
+
123
+ if io_change_bytes:
124
+ total_read_change = io_end_aggregated.read_bytes - io_start_aggregated.read_bytes
125
+ total_write_change = io_end_aggregated.write_bytes - io_start_aggregated.write_bytes
126
+ total_read_change_per_sec = total_read_change / interval
127
+ total_write_change_per_sec = total_write_change / interval
128
+ io_change['aggregated'] = {
129
+ 'read_change_bytes': total_read_change,
130
+ 'write_change_bytes': total_write_change,
131
+ 'read_change_per_sec': total_read_change_per_sec,
132
+ 'write_change_per_sec': total_write_change_per_sec,
133
+ }
134
+ if io_file_count:
135
+ total_read_count = io_end_aggregated.read_count - io_start_aggregated.read_count
136
+ total_write_count = io_end_aggregated.write_count - io_start_aggregated.write_count
137
+ total_read_count_per_sec = total_read_count / interval
138
+ total_write_count_per_sec = total_write_count / interval
139
+ io_change['aggregated'].update({
140
+ 'read_file_count': total_read_count,
141
+ 'write_file_count': total_write_count,
142
+ 'read_file_count_per_sec': total_read_count_per_sec,
143
+ 'write_file_count_per_sec': total_write_count_per_sec,
144
+ })
145
+ if io_busy_time:
146
+ total_read_time = io_end_aggregated.read_time - io_start_aggregated.read_time
147
+ total_write_time = io_end_aggregated.write_time - io_start_aggregated.write_time
148
+ total_read_time_per_sec = total_read_time / interval
149
+ total_write_time_per_sec = total_write_time / interval
150
+ io_change['aggregated'].update({
151
+ 'read_time_ms': total_read_time,
152
+ 'write_time_ms': total_write_time,
153
+ 'read_time_per_sec': total_read_time_per_sec,
154
+ 'write_time_per_sec': total_write_time_per_sec,
155
+ 'busy_time': total_read_time + total_write_time,
156
+ 'busy_time_per_sec': total_read_time_per_sec + total_write_time_per_sec,
157
+ 'busy_time_percent': (total_read_time + total_write_time) / interval
158
+ })
159
+
160
+ return io_change
144
161
 
145
162
 
146
163
  def get_disk_usage(disk_list: list = None) -> dict:
@@ -160,89 +177,6 @@ def get_disk_usage(disk_list: list = None) -> dict:
160
177
  disk_usage[disk.device] = str(e)
161
178
 
162
179
  # Get total disk usage.
163
- disk_usage['total'] = psutil.disk_usage('/')
180
+ disk_usage['aggregated'] = psutil.disk_usage('/')
164
181
 
165
182
  return disk_usage
166
-
167
-
168
- def test_disk_speed_with_monitoring(
169
- file_size_bytes: int, file_count: int, remove_file_after_each_copy: bool = False, target_directory=None):
170
- """
171
- Generates files and performs write and read operations in the specified target directory,
172
- while monitoring disk I/O speeds in a separate thread. Returns the maximum read and write rates,
173
- and the total operation time.
174
-
175
- :param file_size_bytes: Size of each file in bytes.
176
- :param file_count: Number of files to generate and copy.
177
- :param remove_file_after_each_copy: Whether to remove the file after copying to target directory.
178
- :param target_directory: Directory where files will be copied. Uses a temporary directory if None.
179
- :return: A tuple containing the maximum write speed, maximum read speed, and total operation time in seconds.
180
- """
181
- io_speeds = {'write_speed': [], 'read_speed': []}
182
- stop_thread = False
183
-
184
- def io_monitor():
185
- """
186
- Monitors disk I/O speed by checking psutil disk_io_counters every second and calculates the speed.
187
- This function is intended to run in a separate thread.
188
- """
189
-
190
- while not stop_thread:
191
- result = system_resources.check_system_resources(
192
- interval=1, get_disk_io=True, get_cpu=False, get_memory=False, get_disk_used_percent=False)
193
-
194
- io_speeds['write_speed'].append(result['disk_io_write'])
195
- io_speeds['read_speed'].append(result['disk_io_read'])
196
-
197
- # Start the I/O monitoring thread
198
- monitor_thread = threading.Thread(target=io_monitor)
199
- monitor_thread.start()
200
-
201
- if target_directory is None:
202
- target_directory = tempfile.mkdtemp()
203
- else:
204
- os.makedirs(target_directory, exist_ok=True)
205
-
206
- source_directory = os.path.join(target_directory, 'source')
207
- dest_directory = os.path.join(target_directory, 'dest')
208
- os.makedirs(source_directory, exist_ok=True)
209
- os.makedirs(dest_directory, exist_ok=True)
210
-
211
- write_speeds = []
212
- read_speeds = []
213
- created_files = [] # Keep track of all created files for cleanup
214
-
215
- overall_start_time = time.time() # Start timing the entire operation
216
-
217
- for i in range(file_count):
218
- # Generate file in source directory
219
- src_file_path = os.path.join(source_directory, f"tempfile_{i}")
220
- with open(src_file_path, "wb") as file:
221
- file.write(os.urandom(file_size_bytes))
222
- created_files.append(src_file_path) # Add the file for cleanup
223
-
224
- shutil.copy(src_file_path, dest_directory)
225
- target_file_path = os.path.join(dest_directory, os.path.basename(src_file_path))
226
-
227
- # Measure read speed
228
- with open(target_file_path, "rb") as file:
229
- start_time = time.time()
230
- while file.read(1024 * 1024): # Read in chunks of 1 MB
231
- pass
232
- end_time = time.time()
233
- read_speeds.append(file_size_bytes / (end_time - start_time))
234
-
235
- if remove_file_after_each_copy:
236
- os.remove(target_file_path)
237
- os.remove(src_file_path)
238
-
239
- overall_end_time = time.time()
240
-
241
- stop_thread = True
242
- monitor_thread.join()
243
-
244
- # Determine maximum speeds
245
- max_write_speed = max(io_speeds['write_speed']) if io_speeds['write_speed'] else 0
246
- max_read_speed = max(io_speeds['read_speed']) if io_speeds['read_speed'] else 0
247
-
248
- return max_write_speed, max_read_speed, total_operation_time
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: atomicshop
3
- Version: 2.9.0
3
+ Version: 2.9.1
4
4
  Summary: Atomic functions and classes to make developer life easier
5
5
  Author: Denis Kras
6
6
  License: MIT License
@@ -1,4 +1,4 @@
1
- atomicshop/__init__.py,sha256=IbBAx01GvuncS_hxpSRIAExpJW75V-EhMbbVnwzyzVo,122
1
+ atomicshop/__init__.py,sha256=6KyCPoNOFCE-qMHSsE5D6KYM2ED2PeS6vsl4SuxrWGU,122
2
2
  atomicshop/_basics_temp.py,sha256=6cu2dd6r2dLrd1BRNcVDKTHlsHs_26Gpw8QS6v32lQ0,3699
3
3
  atomicshop/_patch_import.py,sha256=ENp55sKVJ0e6-4lBvZnpz9PQCt3Otbur7F6aXDlyje4,6334
4
4
  atomicshop/appointment_management.py,sha256=N3wVGJgrqJfsj_lqiRfaL3FxMEe57by5Stzanh189mk,7263
@@ -36,8 +36,8 @@ atomicshop/sound.py,sha256=KSzWRF8dkpEVXmFidIv-Eftc3kET-hQzQOxZRE7rMto,24297
36
36
  atomicshop/speech_recognize.py,sha256=55-dIjgkpF93mvJnJuxSFuft5H5eRvGNlUj9BeIOZxk,5903
37
37
  atomicshop/ssh_remote.py,sha256=Sas3nrQv8ardxR51t59xZZsYm8nvUcA7tMSqEDViRLk,17155
38
38
  atomicshop/sys_functions.py,sha256=MTBxRve5bh58SPvhX3gMiGqHlSBuI_rdNN1NnnBBWqI,906
39
- atomicshop/system_resource_monitor.py,sha256=SF376RE952sYbZJxVqHt32Bek0zMFxBVDocbUfon6-E,7538
40
- atomicshop/system_resources.py,sha256=y5SjM87k3h4mffvBHn6P4DqNiMmLjtdgUb6gkZU_p0U,5882
39
+ atomicshop/system_resource_monitor.py,sha256=1LgMKCawatUiLAXkbm31vdmTu2cEE8Y1UhsESJWUslE,11289
40
+ atomicshop/system_resources.py,sha256=w4F1si9mJNS26UeHiFm68UwrfEb618QRNekb6Git_C8,7523
41
41
  atomicshop/tempfiles.py,sha256=uq1ve2WlWehZ3NOTXJnpBBMt6HyCdBufqedF0HyzA6k,2517
42
42
  atomicshop/timer.py,sha256=KxBBgVM8po6pUJDW8TgY1UXj0iiDmRmL5XDCq0VHAfU,1670
43
43
  atomicshop/urls.py,sha256=CQl1j1kjEVDlAuYJqYD9XxPF1SUSgrmG8PjlcXNEKsQ,597
@@ -205,7 +205,7 @@ atomicshop/wrappers/playwrightw/mouse.py,sha256=-2FZbQtjgH7tdXWld6ZPGqlKFUdf5in0
205
205
  atomicshop/wrappers/playwrightw/scenarios.py,sha256=Wz7aVYfG7K4fuSe_TUAc1jhFXVq5jYvZKbDtvqUiONc,5236
206
206
  atomicshop/wrappers/playwrightw/waits.py,sha256=308fdOu6YDqQ7K7xywj7R27sSmFanPBQqpZyBC-NFmo,7015
207
207
  atomicshop/wrappers/psutilw/cpus.py,sha256=w6LPBMINqS-T_X8vzdYkLS2Wzuve28Ydp_GafTCngrc,236
208
- atomicshop/wrappers/psutilw/disks.py,sha256=AfeQIi-ugZIzAd7dmkMwFku1rb6Yl0Uzt8dFYvJMRnI,10490
208
+ atomicshop/wrappers/psutilw/disks.py,sha256=7Ly6QlKI36lYw2AeUn8MlQsYMRTvDz-O9Kp-PRq_ym4,8463
209
209
  atomicshop/wrappers/psutilw/memories.py,sha256=wpdKEkQ9Wty_r7ZJKkfli7wIHMXdQOMlmDlzmc_0FWo,161
210
210
  atomicshop/wrappers/psutilw/psutilw.py,sha256=W9PSEZmrm_Ct_-6oKqAcbgbyF21CwcIbbHOkVqgMiow,20866
211
211
  atomicshop/wrappers/psycopgw/psycopgw.py,sha256=XJvVf0oAUjCHkrYfKeFuGCpfn0Oxj3u4SbKMKA1508E,7118
@@ -226,8 +226,8 @@ atomicshop/wrappers/socketw/socket_server_tester.py,sha256=AhpurHJmP2kgzHaUbq5ey
226
226
  atomicshop/wrappers/socketw/socket_wrapper.py,sha256=aXBwlEIJhFT0-c4i8iNlFx2It9VpCEpsv--5Oqcpxao,11624
227
227
  atomicshop/wrappers/socketw/ssl_base.py,sha256=k4V3gwkbq10MvOH4btU4onLX2GNOsSfUAdcHmL1rpVE,2274
228
228
  atomicshop/wrappers/socketw/statistics_csv.py,sha256=t3dtDEfN47CfYVi0CW6Kc2QHTEeZVyYhc57IYYh5nmA,826
229
- atomicshop-2.9.0.dist-info/LICENSE.txt,sha256=lLU7EYycfYcK2NR_1gfnhnRC8b8ccOTElACYplgZN88,1094
230
- atomicshop-2.9.0.dist-info/METADATA,sha256=6vLhMQYgyIHS0NE4S8s-m8cpEaK4gm-8chZ9LnUpZgk,10369
231
- atomicshop-2.9.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
232
- atomicshop-2.9.0.dist-info/top_level.txt,sha256=EgKJB-7xcrAPeqTRF2laD_Np2gNGYkJkd4OyXqpJphA,11
233
- atomicshop-2.9.0.dist-info/RECORD,,
229
+ atomicshop-2.9.1.dist-info/LICENSE.txt,sha256=lLU7EYycfYcK2NR_1gfnhnRC8b8ccOTElACYplgZN88,1094
230
+ atomicshop-2.9.1.dist-info/METADATA,sha256=1u71oL74BRp4FmrfREViR7QCTlEwf4m1MC870Dv2RSM,10369
231
+ atomicshop-2.9.1.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
232
+ atomicshop-2.9.1.dist-info/top_level.txt,sha256=EgKJB-7xcrAPeqTRF2laD_Np2gNGYkJkd4OyXqpJphA,11
233
+ atomicshop-2.9.1.dist-info/RECORD,,