osbot-utils 2.11.0__py3-none-any.whl → 2.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. osbot_utils/context_managers/capture_duration.py +19 -12
  2. osbot_utils/helpers/CPrint.py +0 -1
  3. osbot_utils/helpers/trace/Trace_Call.py +1 -2
  4. osbot_utils/helpers/trace/Trace_Call__Handler.py +14 -14
  5. osbot_utils/helpers/xml/Xml__File.py +1 -1
  6. osbot_utils/helpers/xml/Xml__File__To_Dict.py +1 -1
  7. osbot_utils/helpers/xml/Xml__File__To_Xml.py +1 -1
  8. osbot_utils/testing/performance/Performance_Measure__Session.py +108 -0
  9. osbot_utils/testing/performance/__init__.py +0 -0
  10. osbot_utils/testing/performance/models/Model__Performance_Measure__Measurement.py +14 -0
  11. osbot_utils/testing/performance/models/Model__Performance_Measure__Result.py +10 -0
  12. osbot_utils/testing/performance/models/__init__.py +0 -0
  13. osbot_utils/type_safe/Type_Safe.py +35 -418
  14. osbot_utils/type_safe/Type_Safe__Base.py +8 -24
  15. osbot_utils/type_safe/Type_Safe__Dict.py +9 -8
  16. osbot_utils/type_safe/shared/Type_Safe__Annotations.py +29 -0
  17. osbot_utils/type_safe/shared/Type_Safe__Cache.py +143 -0
  18. osbot_utils/type_safe/shared/Type_Safe__Convert.py +46 -0
  19. osbot_utils/type_safe/shared/Type_Safe__Not_Cached.py +24 -0
  20. osbot_utils/type_safe/shared/Type_Safe__Raise_Exception.py +14 -0
  21. osbot_utils/type_safe/shared/Type_Safe__Shared__Variables.py +4 -0
  22. osbot_utils/type_safe/shared/Type_Safe__Validation.py +246 -0
  23. osbot_utils/type_safe/shared/__init__.py +0 -0
  24. osbot_utils/type_safe/steps/Type_Safe__Step__Class_Kwargs.py +110 -0
  25. osbot_utils/type_safe/steps/Type_Safe__Step__Default_Kwargs.py +42 -0
  26. osbot_utils/type_safe/steps/Type_Safe__Step__Default_Value.py +74 -0
  27. osbot_utils/type_safe/steps/Type_Safe__Step__From_Json.py +138 -0
  28. osbot_utils/type_safe/steps/Type_Safe__Step__Init.py +24 -0
  29. osbot_utils/type_safe/steps/Type_Safe__Step__Set_Attr.py +92 -0
  30. osbot_utils/type_safe/steps/__init__.py +0 -0
  31. osbot_utils/utils/Objects.py +27 -232
  32. osbot_utils/utils/Status.py +0 -2
  33. osbot_utils/version +1 -1
  34. {osbot_utils-2.11.0.dist-info → osbot_utils-2.12.0.dist-info}/METADATA +2 -2
  35. {osbot_utils-2.11.0.dist-info → osbot_utils-2.12.0.dist-info}/RECORD +37 -17
  36. {osbot_utils-2.11.0.dist-info → osbot_utils-2.12.0.dist-info}/LICENSE +0 -0
  37. {osbot_utils-2.11.0.dist-info → osbot_utils-2.12.0.dist-info}/WHEEL +0 -0
@@ -1,26 +1,33 @@
1
+ import time
2
+
1
3
  from osbot_utils.type_safe.Type_Safe import Type_Safe
2
- from osbot_utils.utils.Misc import timestamp_utc_now
3
4
 
4
5
 
5
6
  class capture_duration(Type_Safe):
6
- action_name : str
7
- duration : float
8
- start_timestamp : int
9
- end_timestamp : int
10
- seconds : float
7
+ action_name : str
8
+ duration : float
9
+ start_time : float
10
+ end_time : float
11
+ seconds : float
12
+ precision : int = 3 # Default rounding to 3 decimal places
13
+
11
14
 
12
15
  def __enter__(self):
13
- self.start_timestamp = timestamp_utc_now()
16
+ self.start_time = time.perf_counter() # Start the performance counter
14
17
  return self
15
18
 
16
19
  def __exit__(self, exc_type, exc_val, exc_tb):
17
- self.end_timestamp = timestamp_utc_now()
18
- self.duration = self.end_timestamp - self.start_timestamp
19
- self.seconds = round(self.duration / 1000, 3) # Duration in seconds (rounded to the 3 digits)
20
- return False # ensures that any exceptions that happened are rethrown
20
+ self.end_time = time.perf_counter() # End the performance counter
21
+ self.duration = self.end_time - self.start_time
22
+ self.seconds = round(self.duration, self.precision) # Use configurable precision
23
+ return False # Ensures that exceptions are rethrown
21
24
 
22
25
  def data(self):
23
- return dict(start = self.start_timestamp, end = self.end_timestamp, seconds = self.seconds)
26
+ return {
27
+ "start": self.start_time,
28
+ "end": self.end_time,
29
+ "seconds": self.seconds,
30
+ }
24
31
 
25
32
  def print(self):
26
33
  print()
@@ -1,4 +1,3 @@
1
- from osbot_utils.base_classes.Kwargs_To_Self import Kwargs_To_Self
2
1
  from osbot_utils.helpers.CFormat import CFormat, CFormat_Colors
3
2
 
4
3
 
@@ -1,8 +1,7 @@
1
1
  import linecache
2
2
  import sys
3
3
  import threading
4
- from functools import wraps
5
-
4
+ from functools import wraps
6
5
  from osbot_utils.base_classes.Kwargs_To_Self import Kwargs_To_Self
7
6
  from osbot_utils.helpers.trace.Trace_Call__Config import Trace_Call__Config, PRINT_MAX_STRING_LENGTH
8
7
  from osbot_utils.helpers.trace.Trace_Call__Handler import Trace_Call__Handler
@@ -8,29 +8,29 @@ from osbot_utils.helpers.trace.Trace_Call__Stack_Node import Trace_Call__Stack
8
8
  from osbot_utils.helpers.trace.Trace_Call__Stats import Trace_Call__Stats
9
9
 
10
10
  DEFAULT_ROOT_NODE_NODE_TITLE = 'Trace Session'
11
- GLOBAL_FUNCTIONS_TO_IGNORE = ['value_type_matches_obj_annotation_for_attr' , # these are type safety functions which introduce quite a lot of noise in the traces (and unless one is debugging type safety, they will not be needed)
12
- 'value_type_matches_obj_annotation_for_union_and_annotated' , # todo: map out and document why exactly these methods are ignore (and what is the side effect)
13
- 'are_types_compatible_for_assigment' ,
14
- 'obj_attribute_annotation' ,
15
- 'all_annotations' ,
16
- 'get_origin' ,
17
- 'getmro' ,
18
- 'default_value' ,
19
- 'raise_exception_on_obj_type_annotation_mismatch' ,
20
- '__cls_kwargs__' ,
21
- '__default__value__' ,
22
- '__setattr__' ,
23
- '<module>']
11
+ # GLOBAL_FUNCTIONS_TO_IGNORE = ['value_type_matches_obj_annotation_for_attr' , # these are type safety functions which introduce quite a lot of noise in the traces (and unless one is debugging type safety, they will not be needed)
12
+ # 'value_type_matches_obj_annotation_for_union_and_annotated' , # todo: map out and document why exactly these methods are ignore (and what is the side effect)
13
+ # 'are_types_compatible_for_assigment' ,
14
+ # 'obj_attribute_annotation' ,
15
+ # 'all_annotations' ,
16
+ # 'get_origin' ,
17
+ # 'getmro' ,
18
+ # 'default_value' ,
19
+ # '__cls_kwargs__' ,
20
+ # '__default__value__' ,
21
+ # '__setattr__' ,
22
+ # '<module>']
24
23
  GLOBAL_MODULES_TO_IGNORE = ['osbot_utils.helpers.trace.Trace_Call' , # todo: map out and document why exactly these modules are ignore (and what is the side effect)
25
24
  'osbot_utils.helpers.trace.Trace_Call__Config' ,
26
25
  'osbot_utils.helpers.trace.Trace_Call__View_Model' ,
27
26
  'osbot_utils.helpers.trace.Trace_Call__Print_Traces' ,
28
27
  'osbot_utils.helpers.trace.Trace_Call__Stack' ,
29
- 'osbot_utils.base_classes.Type_Safe' ,
28
+ # 'osbot_utils.base_classes.Type_Safe' ,
30
29
  'osbot_utils.helpers.CPrint' , # also see if this should be done here or at the print/view stage
31
30
  'osbot_utils.helpers.Print_Table' ,
32
31
  'osbot_utils.decorators.methods.cache_on_self' ,
33
32
  'codecs' ]
33
+ GLOBAL_FUNCTIONS_TO_IGNORE = []
34
34
 
35
35
  #GLOBAL_MODULES_TO_IGNORE = []
36
36
  #GLOBAL_FUNCTIONS_TO_IGNORE = []
@@ -1,5 +1,5 @@
1
1
  from typing import Dict
2
- from osbot_utils.type_safe.Type_Safe import Type_Safe
2
+ from osbot_utils.type_safe.Type_Safe import Type_Safe
3
3
  from osbot_utils.helpers.xml.Xml__Element import XML__Element
4
4
 
5
5
  class Xml__File(Type_Safe):
@@ -1,5 +1,5 @@
1
1
  from typing import Dict, Any
2
- from osbot_utils.type_safe.Type_Safe import Type_Safe
2
+ from osbot_utils.type_safe.Type_Safe import Type_Safe
3
3
  from osbot_utils.helpers.xml.Xml__Element import XML__Element
4
4
  from osbot_utils.helpers.xml.Xml__File import Xml__File
5
5
 
@@ -1,7 +1,7 @@
1
1
  from typing import Optional
2
2
  from xml.etree.ElementTree import Element, SubElement, tostring
3
3
  from xml.dom import minidom
4
- from osbot_utils.type_safe.Type_Safe import Type_Safe
4
+ from osbot_utils.type_safe.Type_Safe import Type_Safe
5
5
  from osbot_utils.helpers.xml.Xml__Element import XML__Element
6
6
  from osbot_utils.helpers.xml.Xml__File import Xml__File
7
7
 
@@ -0,0 +1,108 @@
1
+ import time
2
+ from typing import Callable, List
3
+ from statistics import mean, median, stdev
4
+ from osbot_utils.utils.Env import in_github_action
5
+ from osbot_utils.testing.performance.models.Model__Performance_Measure__Measurement import Model__Performance_Measure__Measurement
6
+ from osbot_utils.testing.performance.models.Model__Performance_Measure__Result import Model__Performance_Measure__Result
7
+ from osbot_utils.type_safe.Type_Safe import Type_Safe
8
+
9
+ MEASURE__INVOCATION__LOOPS = [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610] # Fibonacci sequence for measurement loops
10
+
11
+ class Performance_Measure__Session(Type_Safe):
12
+ result : Model__Performance_Measure__Result = None # Current measurement result
13
+ assert_enabled: bool = True
14
+
15
+ def calculate_raw_score(self, times: List[int]) -> int: # Calculate raw performance score
16
+ if len(times) < 3: # Need at least 3 values for stability
17
+ return mean(times)
18
+
19
+ sorted_times = sorted(times) # Sort times for analysis
20
+ trim_size = max(1, len(times) // 10) # Remove ~10% from each end
21
+
22
+ trimmed = sorted_times[trim_size:-trim_size] # Remove outliers
23
+ med = median(trimmed) # Get median of trimmed data
24
+ trimmed_mean = mean (trimmed) # Get mean of trimmed data
25
+
26
+ raw_score = int(med * 0.6 + trimmed_mean * 0.4) # Weighted combination favoring median
27
+ return raw_score
28
+
29
+ def calculate_stable_score(self, raw_score: float) -> int: # Calculate stable performance score
30
+ if raw_score < 1_000: # Dynamic normalization based on score magnitude
31
+ return int(round(raw_score / 100) * 100) # Under 1µs: nearest 100ns
32
+ elif raw_score < 10_000:
33
+ return int(round(raw_score / 1000) * 1000) # Under 10µs: nearest 500ns
34
+ elif raw_score < 100_000:
35
+ return int(round(raw_score / 10000) * 10000) # Under 100µs: nearest 1000ns
36
+ else:
37
+ return int(round(raw_score / 100000) * 100000) # Above 100µs: nearest 5000ns
38
+
39
+ def calculate_metrics(self, times: List[int]) -> Model__Performance_Measure__Measurement: # Calculate statistical metrics
40
+ if not times:
41
+ raise ValueError("Cannot calculate metrics from empty time list")
42
+ raw_score = self.calculate_raw_score (times)
43
+ score = self.calculate_stable_score(raw_score)
44
+ return Model__Performance_Measure__Measurement(
45
+ avg_time = int(mean(times)) ,
46
+ min_time = min(times) ,
47
+ max_time = max(times) ,
48
+ median_time = int(median(times)) ,
49
+ stddev_time = stdev(times) if len(times) > 1 else 0 ,
50
+ raw_times = times ,
51
+ sample_size = len(times) ,
52
+ raw_score = raw_score ,
53
+ score = score )
54
+
55
+ def measure(self, target: Callable) -> 'Performance_Measure__Session': # Perform measurements
56
+ name = target.__name__
57
+ measurements = {}
58
+ all_times = [] # Collect all times for final score
59
+
60
+ for loop_size in MEASURE__INVOCATION__LOOPS: # Measure each loop size
61
+ loop_times = []
62
+ for i in range(loop_size):
63
+ start = time.perf_counter_ns()
64
+ target()
65
+ end = time.perf_counter_ns()
66
+ time_taken = end - start
67
+ loop_times.append(time_taken)
68
+ all_times.append(time_taken) # Add to overall collection
69
+
70
+ measurements[loop_size] = self.calculate_metrics(loop_times) # Store metrics for this loop size
71
+
72
+ raw_score = self.calculate_raw_score (all_times)
73
+ final_score = self.calculate_stable_score(raw_score) # Calculate final stable score
74
+
75
+ self.result = Model__Performance_Measure__Result(
76
+ measurements = measurements ,
77
+ name = name ,
78
+ raw_score = raw_score ,
79
+ final_score = final_score )
80
+
81
+ return self
82
+
83
+ def print_measurement(self, measurement: Model__Performance_Measure__Measurement): # Format measurement details
84
+ print(f"Samples : {measurement.sample_size}")
85
+ print(f"Score : {measurement.score:,.0f}ns")
86
+ print(f"Avg : {measurement.avg_time:,}ns")
87
+ print(f"Min : {measurement.min_time:,}ns")
88
+ print(f"Max : {measurement.max_time:,}ns")
89
+ print(f"Median : {measurement.median_time:,}ns")
90
+ print(f"StdDev : {measurement.stddev_time:,.2f}ns")
91
+
92
+ def print(self, padding=12 ): # Print measurement results
93
+ if not self.result:
94
+ print("No measurements taken yet")
95
+ return
96
+ print(f"{self.result.name:{padding}} | score: {self.result.final_score:7,d} ns | raw: {self.result.raw_score:7,d} ns") # Print name and normalized score
97
+
98
+ return self
99
+
100
+ def assert_time(self, *expected_time: int): # Assert that the final score matches the expected normalized time"""
101
+ if self.assert_enabled is False:
102
+ return
103
+ if in_github_action():
104
+ last_expected_time = expected_time[-1] + 100 # +100 in case it is 0
105
+ new_expected_time = last_expected_time * 5 # using last_expected_time * 5 as the upper limit (since these tests are significantly slowed in GitHUb Actions)
106
+ assert last_expected_time <= self.result.final_score <= new_expected_time, f"Performance changed for {self.result.name}: expected {last_expected_time} < {self.result.final_score:,d}ns, expected {new_expected_time}"
107
+ else:
108
+ assert self.result.final_score in expected_time, f"Performance changed for {self.result.name}: got {self.result.final_score:,d}ns, expected {expected_time}"
File without changes
@@ -0,0 +1,14 @@
1
+ from typing import List
2
+ from osbot_utils.type_safe.Type_Safe import Type_Safe
3
+
4
+
5
+ class Model__Performance_Measure__Measurement(Type_Safe): # Pure data container for measurement metrics
6
+ avg_time : int # Average time in nanoseconds
7
+ min_time : int # Minimum time observed
8
+ max_time : int # Maximum time observed
9
+ median_time : int # Median time
10
+ stddev_time : float # Standard deviation
11
+ raw_times : List[int] # Raw measurements for analysis
12
+ sample_size : int # Number of measurements taken
13
+ score : float
14
+ raw_score : float
@@ -0,0 +1,10 @@
1
+ from typing import Dict
2
+ from osbot_utils.testing.performance.models.Model__Performance_Measure__Measurement import Model__Performance_Measure__Measurement
3
+ from osbot_utils.type_safe.Type_Safe import Type_Safe
4
+
5
+ class Model__Performance_Measure__Result(Type_Safe): # Pure data container for measurement results
6
+ measurements : Dict[int, Model__Performance_Measure__Measurement] # Results per loop size
7
+ name : str # Name of measured target
8
+ raw_score : float
9
+ final_score : float
10
+
File without changes