cloud-files 4.27.0__py3-none-any.whl → 6.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,724 @@
1
+ from typing import Optional
2
+
3
+ import enum
4
+ import queue
5
+ import uuid
6
+ import time
7
+ import threading
8
+
9
+ import intervaltree
10
+ import numpy as np
11
+ import numpy.typing as npt
12
+
13
+ class IOEnum(enum.Enum):
14
+ RX = 1
15
+ TX = 2
16
+
17
+ class TransmissionMonitor:
18
+ """Monitors the current transmissing rate of a file set."""
19
+ def __init__(self, direction:IOEnum):
20
+ self._intervaltree = intervaltree.IntervalTree()
21
+ self._lock = threading.Lock()
22
+ self._total_bytes_landed = 0
23
+ self._in_flight = {}
24
+ self._errors = set()
25
+
26
+ # NOTE: _in_flight_bytes doesn't work for downloads b/c we are not
27
+ # requesting the size of the file up front to avoid perf impact.
28
+ # _in_flight_bytes isn't necessary unless we are modeling the contribution
29
+ # of CloudFiles to machine network usage to implement throttling.
30
+ self._in_flight_bytes = 0
31
+ self._direction = direction
32
+
33
+ @classmethod
34
+ def merge(klass, tms:list["TransmissionMonitor"]) -> "TransmissionMonitor":
35
+ if len(tms) == 0:
36
+ return TransmissionMonitor(IOEnum.TX)
37
+
38
+ tm = TransmissionMonitor(tms[0]._direction)
39
+
40
+ with tm._lock:
41
+ for other in tms:
42
+ with other._lock:
43
+ tm._intervaltree = tm._intervaltree.union(other._intervaltree)
44
+
45
+ return tm
46
+
47
+ def start_io(self, num_bytes:int, start_time:Optional[float] = None) -> uuid.UUID:
48
+ flight_id = uuid.uuid1()
49
+ with self._lock:
50
+ if start_time is None:
51
+ start_time = time.monotonic()
52
+ self._in_flight[flight_id] = start_time
53
+ self._in_flight_bytes += num_bytes
54
+ return flight_id
55
+
56
+ def end_error(self, flight_id:uuid.UUID) -> None:
57
+ with self._lock:
58
+ self._errors.add(flight_id)
59
+
60
+ def end_io(self, flight_id:uuid.UUID, num_bytes:int) -> None:
61
+ """Add a new value to the interval set."""
62
+ end_us = int(time.monotonic() * 1e6)
63
+
64
+ with self._lock:
65
+ start_us = int(self._in_flight.pop(flight_id) * 1e6)
66
+ self._in_flight_bytes -= num_bytes
67
+ self._intervaltree.addi(start_us, end_us, [flight_id, num_bytes])
68
+ self._total_bytes_landed += num_bytes
69
+
70
+ def total_bps(self) -> float:
71
+ """Average bits per second sent during the entire session."""
72
+ with self._lock:
73
+ begin = self._intervaltree.begin()
74
+ end = self._intervaltree.end()
75
+ return self._total_bytes_landed / ((end - begin) / 1e6) * 8
76
+
77
+ def total_bytes(self) -> int:
78
+ """Sum of all bytes sent."""
79
+ num_bytes = 0
80
+ with self._lock:
81
+ for interval in self._intervaltree:
82
+ num_bytes += interval.data[1]
83
+ return num_bytes
84
+
85
+ def current_bps(self, look_back_sec:float = 2.0) -> float:
86
+ """
87
+ Compute the current bits per a second with a lookback
88
+ value given in microseconds.
89
+ """
90
+ look_back_us = int(look_back_sec * 1e6)
91
+
92
+ with self._lock:
93
+ now_us = int(time.monotonic() * 1e6)
94
+ query_us = now_us - look_back_us
95
+ lookback_intervals = self._intervaltree[query_us:]
96
+ begin_us = self._intervaltree.begin()
97
+
98
+ num_bytes = 0
99
+ for interval in lookback_intervals:
100
+ if interval.begin > query_us:
101
+ num_bytes += interval.data[1]
102
+ else:
103
+ adjustment_factor = (interval.end - query_us) / (interval.end - interval.begin)
104
+ num_bytes += int(round(interval.data[1] * adjustment_factor))
105
+
106
+ window_us = min(look_back_us, now_us - begin_us)
107
+
108
+ return float(num_bytes) / (window_us / 1e6) * 8
109
+
110
+ def total_Mbps(self, *args, **kwargs) -> float:
111
+ """The total rate in megabits per a second over all files this run."""
112
+ return self.total_bps(*args, **kwargs) / 1e6
113
+
114
+ def total_Gbps(self, *args, **kwargs) -> float:
115
+ """The total rate in gigabits per a second over all files this run."""
116
+ return self.total_bps(*args, **kwargs) / 1e9
117
+
118
+ def total_MBps(self, *args, **kwargs) -> float:
119
+ """The total rate in megabytes per a second over all files this run."""
120
+ return self.total_Mbps(*args, **kwargs) / 8.0
121
+
122
+ def total_GBps(self, *args, **kwargs) -> float:
123
+ """The total rate in gigabytes per a second over all files this run."""
124
+ return self.total_Gbps(*args, **kwargs) / 8.0
125
+
126
+ def current_Mbps(self, *args, **kwargs) -> float:
127
+ """The current rate in megabits per a second."""
128
+ return self.current_bps(*args, **kwargs) / 1e6
129
+
130
+ def current_Gbps(self, *args, **kwargs) -> float:
131
+ """The current rate in gigabits per a second."""
132
+ return self.current_bps(*args, **kwargs) / 1e9
133
+
134
+ def current_MBps(self, *args, **kwargs) -> float:
135
+ """The current rate in megabytes per a second."""
136
+ return self.current_Mbps(*args, **kwargs) / 8.0
137
+
138
+ def current_GBps(self, *args, **kwargs) -> float:
139
+ """The current rate in gigabytes per a second."""
140
+ return self.current_Gbps(*args, **kwargs) / 8.0
141
+
142
+ def start_unix_time(self):
143
+ with self._lock:
144
+ return self._intervaltree.begin() / 1e6
145
+
146
+ def end_unix_time(self):
147
+ with self._lock:
148
+ return self._intervaltree.end() / 1e6
149
+
150
+ def peak_bps(self) -> float:
151
+ return np.max(self.histogram(resolution=1.0)) * 8
152
+
153
+ def histogram(self, resolution:float = 1.0) -> npt.NDArray[np.uint32]:
154
+
155
+ if resolution <= 0:
156
+ raise ValueError(f"Resolution must be positive. Got: {resolution}")
157
+
158
+ if not self._intervaltree:
159
+ return np.array([], dtype=np.uint32)
160
+
161
+ with self._lock:
162
+ all_begin = int(np.floor(self._intervaltree.begin() / 1e6))
163
+ all_end = int(np.ceil(self._intervaltree.end() / 1e6))
164
+
165
+ num_bins = int(np.ceil((all_end - all_begin) / resolution))
166
+ bins = np.zeros([ num_bins ], dtype=np.float64)
167
+
168
+ for interval in self._intervaltree:
169
+ begin = interval.begin / 1e6
170
+ end = interval.end / 1e6
171
+ duration = end - begin
172
+ total_bytes = interval.data[1]
173
+
174
+ first_bin = int((begin - all_begin) / resolution)
175
+ last_bin = int((end - all_begin) / resolution)
176
+
177
+ if first_bin == last_bin:
178
+ bins[first_bin] += total_bytes
179
+ else:
180
+ bin_start = all_begin + first_bin * resolution
181
+ bin_end = all_begin + last_bin * resolution
182
+
183
+ first_bin_coverage = (bin_start + resolution) - begin
184
+ bins[first_bin] += total_bytes * (first_bin_coverage / duration)
185
+
186
+ last_bin_coverage = end - bin_end
187
+ bins[last_bin] += total_bytes * (last_bin_coverage / duration)
188
+
189
+ full_bins_count = last_bin - first_bin - 1
190
+ if full_bins_count > 0:
191
+ per_bin_bytes = total_bytes * (resolution / duration)
192
+ bins[first_bin+1:last_bin] += per_bin_bytes
193
+
194
+ return bins.round().astype(np.uint32)
195
+
196
+ def plot_histogram(self, resolution:float = 1.0, filename:Optional[str] = None) -> None:
197
+ """
198
+ Plot a bar chart showing the number of bytes transmitted
199
+ per a unit time. Resolution is specified in seconds.
200
+ """
201
+ import matplotlib.pyplot as plt
202
+
203
+ xfer = self.histogram(resolution) * 8
204
+ xfer = xfer.astype(np.float32)
205
+ peak = np.max(xfer)
206
+
207
+ if peak < 1000:
208
+ ylabel = 'bps'
209
+ factor = 1.0
210
+ elif 1000 <= peak < int(1e6):
211
+ ylabel = 'Kbps'
212
+ factor = 1000.0
213
+ elif int(1e6) <= peak < int(1e9):
214
+ ylabel = 'Mbps'
215
+ factor = 1e6
216
+ else:
217
+ ylabel = "Gbps"
218
+ factor = 1e9
219
+
220
+ xfer /= factor
221
+
222
+ plt.figure(figsize=(10, 6))
223
+
224
+ x_values = np.arange(len(xfer)) * resolution
225
+ shade_alpha = 0.4
226
+
227
+ plt.plot(
228
+ x_values,
229
+ xfer,
230
+ color='dodgerblue',
231
+ linestyle='-',
232
+ linewidth=1.5,
233
+ alpha=0.8,
234
+ marker='', # Remove markers for cleaner look
235
+ )
236
+ plt.fill_between(
237
+ x_values, 0, xfer,
238
+ color='dodgerblue', alpha=shade_alpha
239
+ )
240
+
241
+ direction_text = "Download"
242
+ if self._direction == IOEnum.TX:
243
+ direction_text = "Upload"
244
+
245
+ plt.title(f'Estimated Data {direction_text} Rate')
246
+ plt.xlabel('Time (seconds)')
247
+ plt.ylabel(ylabel)
248
+ plt.grid(axis='y', linestyle='--', alpha=0.7)
249
+ plt.tight_layout()
250
+
251
+ if filename is not None:
252
+ plt.savefig(filename)
253
+ else:
254
+ plt.show()
255
+
256
+ plt.gca().clear()
257
+ plt.close('all')
258
+
259
+ def plot_gantt(
260
+ self,
261
+ filename:Optional[str] = None,
262
+ title:Optional[str] = None,
263
+ show_size_labels:Optional[bool] = None,
264
+ ):
265
+ import matplotlib.pyplot as plt
266
+ import matplotlib.colors as colors
267
+ from matplotlib.cm import ScalarMappable
268
+
269
+ start_time = self.start_unix_time()
270
+
271
+ file_sizes = []
272
+ with self._lock:
273
+ for interval in self._intervaltree:
274
+ file_sizes.append(interval.data[1])
275
+
276
+ if show_size_labels is None:
277
+ show_size_labels = len(file_sizes) < 40
278
+
279
+ if len(file_sizes):
280
+ min_file_size = min(file_sizes)
281
+ max_file_size = max(file_sizes)
282
+ else:
283
+ min_file_size = 0
284
+ max_file_size = 0
285
+
286
+ del file_sizes
287
+
288
+ fig, ax = plt.subplots(figsize=(10, 5))
289
+
290
+ if max_file_size == min_file_size:
291
+ norm = colors.Normalize(vmin=0, vmax=max_file_size*1.1)
292
+ else:
293
+ norm = colors.Normalize(vmin=min_file_size, vmax=max_file_size)
294
+ cmap = plt.cm.viridis
295
+
296
+ def human_readable_bytes(x:int) -> str:
297
+ factor = (1, 'B')
298
+ if x < 1000:
299
+ return f"{x} B"
300
+ elif 1000 <= x < int(1e6):
301
+ factor = (1000, 'kB')
302
+ elif int(1e6) <= x < int(1e9):
303
+ factor = (1e6, 'MB')
304
+ elif int(1e9) <= x < int(1e12):
305
+ factor = (1e9, 'GB')
306
+ elif int(1e12) <= x < int(1e15):
307
+ factor = (1e12, 'TB')
308
+ else:
309
+ factor = (1e15, 'EB')
310
+
311
+ return f"{x/factor[0]:.2f} {factor[1]}"
312
+
313
+ with self._lock:
314
+ for i, interval in enumerate(self._intervaltree):
315
+ duration = (interval.end - interval.begin) / 1e6
316
+ left = (interval.begin / 1e6) - start_time
317
+ flight_id = interval.data[0]
318
+
319
+ if flight_id in self._errors:
320
+ color = "red"
321
+ else:
322
+ cval = norm(interval.data[1])
323
+ color = cmap(cval)
324
+
325
+ ax.barh(
326
+ str(i),
327
+ width=duration,
328
+ left=left,
329
+ height=1,
330
+ color=color,
331
+ )
332
+ if show_size_labels:
333
+ ax.text(
334
+ x=left + (duration/2),
335
+ y=i,
336
+ s=human_readable_bytes(int(interval.data[1])),
337
+ ha='center',
338
+ va='center',
339
+ color='black' if cval > 0.5 else '0.8',
340
+ fontsize=8,
341
+ )
342
+
343
+ sm = ScalarMappable(cmap=cmap, norm=norm)
344
+ sm.set_array([]) # Required for ScalarMappable with empty data
345
+ cbar = plt.colorbar(sm, ax=ax, label='File Size (bytes)')
346
+
347
+ direction_text = "Download"
348
+ if self._direction == IOEnum.TX:
349
+ direction_text = "Upload"
350
+
351
+ if title is None:
352
+ title = f"File {direction_text} Recording"
353
+
354
+ plt.xlabel("Time (seconds)")
355
+ plt.ylabel("Files in Flight")
356
+ ax.set_yticks([])
357
+ plt.title(title)
358
+ plt.tight_layout()
359
+
360
+ if filename is not None:
361
+ plt.savefig(filename)
362
+ else:
363
+ plt.show()
364
+
365
+ plt.gca().clear()
366
+ plt.close('all')
367
+
368
+ def __getstate__(self):
369
+ # Copy the object's state from self.__dict__ which contains
370
+ # all our instance attributes. Always use the dict.copy()
371
+ # method to avoid modifying the original state.
372
+ state = self.__dict__.copy()
373
+ # Remove the unpicklable entries.
374
+ del state['_lock']
375
+ return state
376
+
377
+ def __setstate__(self, state):
378
+ # Restore instance attributes (i.e., filename and lineno).
379
+ self.__dict__.update(state)
380
+ self._lock = threading.Lock()
381
+
382
+ class IOSampler:
383
+ def __init__(
384
+ self,
385
+ buffer_sec:float = 600.0,
386
+ interval:float = 0.25
387
+ ):
388
+ if buffer_sec <= 0 or interval <= 0:
389
+ raise ValueError(
390
+ f"Buffer and interval must be positive. buffer sec: {buffer_sec}, interval: {interval}"
391
+ )
392
+
393
+ self._terminate = threading.Event()
394
+ self._thread = None
395
+ self._interval = interval
396
+ self._buffer_sec = buffer_sec
397
+
398
+ self._sample_lock = threading.Lock()
399
+ self._init_sample_buffers()
400
+
401
+ def peak_bps(self, window:float = 1.0) -> tuple[float,float]:
402
+ """Returns the peak rate over the look back window given in seconds.
403
+
404
+ Returns (download, upload) in bits per second (bps).
405
+ """
406
+ N = self.num_samples()
407
+ if N <= 1:
408
+ return (0.0, 0.0)
409
+
410
+ rx, tx, ts = self.samples()
411
+
412
+ def measure_peak(data):
413
+ peak_rate = 0.0
414
+ for i in range(len(ts) - 1):
415
+ for j in range(i + 1, len(ts)):
416
+ elapsed = (ts[j] - ts[i])
417
+ if elapsed >= window:
418
+ rate = (data[j] - data[i]) / elapsed * 8
419
+ peak_rate = max(peak_rate, rate)
420
+ break
421
+
422
+ if (ts[-1] - ts[i]) > 0:
423
+ rate = (data[-1] - data[i]) / (ts[-1] - ts[i]) * 8
424
+ peak_rate = max(peak_rate, rate)
425
+
426
+ return peak_rate
427
+
428
+ return (measure_peak(rx), measure_peak(tx))
429
+
430
+ def current_bps(self, look_back_sec:float = 2.0) -> tuple[float,float]:
431
+ N = self.num_samples()
432
+ if N <= 1:
433
+ return (0.0, 0.0)
434
+
435
+ rx, tx, ts = self.samples()
436
+ i = ts.size - 2
437
+ elapsed = 0
438
+ t = ts[-1]
439
+ while (i >= 0) and not (elapsed >= look_back_sec):
440
+ elapsed = t - ts[i]
441
+ i -= 1
442
+
443
+ i += 1
444
+
445
+ if elapsed < 1e-4:
446
+ return (0.0, 0.0)
447
+
448
+ compute_rate = lambda data: (data[-1] - data[i]) / elapsed * 8
449
+ return (compute_rate(rx), compute_rate(tx))
450
+
451
+ def _histogram(self, resolution, bs, ts) -> npt.NDArray[np.uint32]:
452
+ bs = bs[1:] - bs[:-1]
453
+ num_bins = int(np.ceil((ts[-1] - ts[0]) / resolution))
454
+ bins = np.zeros([ num_bins ], dtype=np.uint32)
455
+
456
+ for i in range(bs.size):
457
+ j = int((ts[i] - ts[0]) / resolution)
458
+ bins[j] += bs[i]
459
+
460
+ return bins
461
+
462
+ def histogram_rx(self, resolution:float = 1.0) -> npt.NDArray[np.uint32]:
463
+ N = self.num_samples()
464
+ if N <= 1:
465
+ return np.array([], dtype=np.uint32)
466
+
467
+ rx, tx, ts = self.samples()
468
+ return self._histogram(resolution, rx, ts)
469
+
470
+ def histogram_tx(self, resolution:float = 1.0) -> npt.NDArray[np.uint32]:
471
+ N = self.num_samples()
472
+ if N <= 1:
473
+ return np.array([], dtype=np.uint32)
474
+
475
+ rx, tx, ts = self.samples()
476
+ return self._histogram(resolution, tx, ts)
477
+
478
+ def plot_histogram(self, resolution:float = None, filename:Optional[str] = None) -> None:
479
+ """
480
+ Plot a bar chart showing the number of bytes transmitted
481
+ per a unit time. Resolution is specified in seconds.
482
+ """
483
+ import matplotlib.pyplot as plt
484
+
485
+ if resolution is None:
486
+ resolution = 1.0
487
+ elif resolution < self._interval:
488
+ raise ValueError(
489
+ f"Can't create histogram bins at a higher resolution "
490
+ f"than the sample rate. Got: {resolution} Sample Rate: {self._interval}"
491
+ )
492
+
493
+ download_bps = self.histogram_rx(resolution) * 8
494
+ upload_bps = self.histogram_tx(resolution) * 8
495
+
496
+ download_bps = download_bps.astype(np.float32)
497
+ upload_bps = upload_bps.astype(np.float32)
498
+
499
+ peak_down = np.max(download_bps)
500
+ peak_up = np.max(upload_bps)
501
+ peak = max(peak_up, peak_down)
502
+
503
+ if peak < 1000:
504
+ ylabel = 'bps'
505
+ factor = 1.0
506
+ elif 1000 <= peak < int(1e6):
507
+ ylabel = 'Kbps'
508
+ factor = 1000.0
509
+ elif int(1e6) <= peak < int(1e9):
510
+ ylabel = 'Mbps'
511
+ factor = 1e6
512
+ else:
513
+ ylabel = "Gbps"
514
+ factor = 1e9
515
+
516
+ download_bps /= factor
517
+ upload_bps /= factor
518
+
519
+ plt.figure(figsize=(10, 6))
520
+
521
+ min_length = min(len(download_bps), len(upload_bps))
522
+ x_values = np.arange(min_length) * resolution
523
+ shade_alpha = 0.4
524
+
525
+ plt.plot(
526
+ x_values,
527
+ download_bps[:min_length],
528
+ color='dodgerblue',
529
+ linestyle='-',
530
+ linewidth=1.5,
531
+ label='Download',
532
+ alpha=0.8,
533
+ marker='', # Remove markers for cleaner look
534
+ )
535
+ plt.fill_between(
536
+ x_values, 0, download_bps[:min_length],
537
+ color='dodgerblue', alpha=shade_alpha
538
+ )
539
+
540
+ plt.plot(
541
+ x_values,
542
+ upload_bps[:min_length],
543
+ color='salmon',
544
+ linestyle='-',
545
+ linewidth=1.5,
546
+ label='Upload',
547
+ alpha=0.8,
548
+ marker='',
549
+ )
550
+ plt.fill_between(
551
+ x_values, 0, upload_bps[:min_length],
552
+ color='salmon', alpha=shade_alpha
553
+ )
554
+
555
+ plt.legend()
556
+ plt.tight_layout()
557
+
558
+ plt.title(f'Measured Data Transfer Rate')
559
+ plt.xlabel('Time (seconds)')
560
+ plt.ylabel(ylabel)
561
+ plt.grid(axis='y', linestyle='--', alpha=0.7)
562
+ plt.tight_layout()
563
+
564
+ if filename is not None:
565
+ plt.savefig(filename)
566
+ else:
567
+ plt.show()
568
+
569
+ plt.gca().clear()
570
+ plt.close('all')
571
+
572
+ def _init_sample_buffers(self):
573
+ buffer_size = int(max(np.ceil(self._buffer_sec / self._interval) + 1, 1))
574
+ self._samples_bytes_rx = np.full(buffer_size, -1, dtype=np.int64)
575
+ self._samples_bytes_tx = np.full(buffer_size, -1, dtype=np.int64)
576
+ self._samples_time = np.full(buffer_size, -1, dtype=np.float64)
577
+ self._cursor = 0
578
+ self._num_samples = 0
579
+
580
+ def start_sampling(self, force=False):
581
+ if force == False and self.is_sampling():
582
+ return
583
+
584
+ self._terminate.set()
585
+ self._terminate = threading.Event()
586
+
587
+ if self._thread is not None:
588
+ self._thread.join()
589
+
590
+ self._init_sample_buffers()
591
+
592
+ self._thread = threading.Thread(
593
+ target=self.sample_loop,
594
+ args=(self._terminate, self._interval)
595
+ )
596
+ self._thread.daemon = True
597
+ self._thread.start()
598
+
599
+ def num_samples(self) -> int:
600
+ with self._sample_lock:
601
+ return self._num_samples
602
+
603
+ def start_time(self) -> float:
604
+ with self._sample_lock:
605
+ if self._num_samples == 0:
606
+ return 0.0
607
+
608
+ if self.num_samples < self._samples_time.size:
609
+ return self._samples_time[0]
610
+ else:
611
+ pos = (self._cursor + 1) % self._samples_time.size
612
+ return self._samples_time[pos]
613
+
614
+ def end_time(self) -> float:
615
+ with self._sample_lock:
616
+ if self._num_samples == 0:
617
+ return 0.0
618
+ return self._samples_time[self._cursor]
619
+
620
+ def samples(self) -> tuple[npt.NDArray[np.uint64], npt.NDArray[np.float64]]:
621
+ with self._sample_lock:
622
+ byte_samples_rx = np.copy(self._samples_bytes_rx)
623
+ byte_samples_tx = np.copy(self._samples_bytes_tx)
624
+ time_samples = np.copy(self._samples_time)
625
+ cursor = self._cursor
626
+
627
+ if self._num_samples < byte_samples_rx.size:
628
+ byte_samples_rx = byte_samples_rx[:cursor]
629
+ byte_samples_tx = byte_samples_tx[:cursor]
630
+ time_samples = time_samples[:cursor]
631
+ else:
632
+ byte_samples_rx = np.concatenate([byte_samples_rx[cursor:], byte_samples_rx[:cursor]])
633
+ byte_samples_tx = np.concatenate([byte_samples_tx[cursor:], byte_samples_tx[:cursor]])
634
+ time_samples = np.concatenate([time_samples[cursor:], time_samples[:cursor]])
635
+
636
+ return (byte_samples_rx, byte_samples_tx, time_samples)
637
+
638
+ def _do_sample(self, time_correction:float) -> float:
639
+ import psutil
640
+ net = psutil.net_io_counters(nowrap=True)
641
+ t = time.monotonic() - time_correction
642
+
643
+ with self._sample_lock:
644
+ self._samples_bytes_rx[self._cursor] = net.bytes_recv
645
+ self._samples_bytes_tx[self._cursor] = net.bytes_sent
646
+ self._samples_time[self._cursor] = t
647
+
648
+ self._cursor += 1
649
+ if self._cursor >= self._samples_time.size:
650
+ self._cursor = 0
651
+ self._num_samples += 1
652
+
653
+ def sample_loop(self, terminate_evt:threading.Event, interval:float):
654
+ import psutil
655
+
656
+ # measure time to measure time
657
+ def measure_correction():
658
+ s = time.monotonic()
659
+ time.monotonic()
660
+ time.monotonic()
661
+ time.monotonic()
662
+ time.monotonic()
663
+ time.monotonic()
664
+ e = time.monotonic()
665
+ return (e - s) / 5
666
+
667
+ time_correction = measure_correction()
668
+ psutil.net_io_counters.cache_clear()
669
+
670
+ recorrection_start = time.monotonic()
671
+ e = time.monotonic()
672
+
673
+ while not terminate_evt.is_set():
674
+ s = time.monotonic()
675
+ self._do_sample(time_correction)
676
+
677
+ if (recorrection_start-s) > 60:
678
+ time_correction = measure_correction()
679
+ recorrection_start = time.monotonic()
680
+
681
+ e = time.monotonic()
682
+
683
+ wait = interval - (e-s)
684
+ if wait > 0:
685
+ time.sleep(wait)
686
+
687
+ if (interval * 0.5) < (time.monotonic() - e):
688
+ self._do_sample(time_correction)
689
+
690
+ def is_sampling(self):
691
+ return self._thread is not None
692
+
693
+ def stop_sampling(self):
694
+ self._terminate.set()
695
+ if self._thread is not None:
696
+ self._thread.join()
697
+ self._thread = None
698
+
699
+ def __del__(self):
700
+ self.stop_sampling()
701
+
702
+ def __getstate__(self):
703
+ # Copy the object's state from self.__dict__ which contains
704
+ # all our instance attributes. Always use the dict.copy()
705
+ # method to avoid modifying the original state.
706
+ state = self.__dict__.copy()
707
+ # Remove the unpicklable entries.
708
+ del state['_sample_lock']
709
+ del state['_terminate']
710
+ del state['_thread']
711
+ return state
712
+
713
+ def __setstate__(self, state):
714
+ self.__dict__.update(state)
715
+ self._sample_lock = threading.Lock()
716
+ self._terminate_evt = threading.Event()
717
+ self._thread = None
718
+
719
+ def __enter__(self):
720
+ self.start_sampling()
721
+ return self
722
+
723
+ def __exit__(self, exc_type, exc_val, exc_tb):
724
+ self.stop_sampling()