osbot-utils 2.11.0__py3-none-any.whl → 2.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. osbot_utils/context_managers/capture_duration.py +19 -12
  2. osbot_utils/helpers/CPrint.py +0 -1
  3. osbot_utils/helpers/Obj_Id.py +29 -0
  4. osbot_utils/helpers/trace/Trace_Call.py +1 -2
  5. osbot_utils/helpers/trace/Trace_Call__Handler.py +14 -14
  6. osbot_utils/helpers/xml/Xml__File.py +1 -1
  7. osbot_utils/helpers/xml/Xml__File__To_Dict.py +1 -1
  8. osbot_utils/helpers/xml/Xml__File__To_Xml.py +1 -1
  9. osbot_utils/testing/performance/Performance_Measure__Session.py +128 -0
  10. osbot_utils/testing/performance/__init__.py +0 -0
  11. osbot_utils/testing/performance/models/Model__Performance_Measure__Measurement.py +14 -0
  12. osbot_utils/testing/performance/models/Model__Performance_Measure__Result.py +10 -0
  13. osbot_utils/testing/performance/models/__init__.py +0 -0
  14. osbot_utils/type_safe/Type_Safe.py +35 -418
  15. osbot_utils/type_safe/Type_Safe__Base.py +8 -24
  16. osbot_utils/type_safe/Type_Safe__Dict.py +9 -8
  17. osbot_utils/type_safe/shared/Type_Safe__Annotations.py +29 -0
  18. osbot_utils/type_safe/shared/Type_Safe__Cache.py +143 -0
  19. osbot_utils/type_safe/shared/Type_Safe__Convert.py +47 -0
  20. osbot_utils/type_safe/shared/Type_Safe__Not_Cached.py +24 -0
  21. osbot_utils/type_safe/shared/Type_Safe__Raise_Exception.py +14 -0
  22. osbot_utils/type_safe/shared/Type_Safe__Shared__Variables.py +4 -0
  23. osbot_utils/type_safe/shared/Type_Safe__Validation.py +246 -0
  24. osbot_utils/type_safe/shared/__init__.py +0 -0
  25. osbot_utils/type_safe/steps/Type_Safe__Step__Class_Kwargs.py +114 -0
  26. osbot_utils/type_safe/steps/Type_Safe__Step__Default_Kwargs.py +42 -0
  27. osbot_utils/type_safe/steps/Type_Safe__Step__Default_Value.py +74 -0
  28. osbot_utils/type_safe/steps/Type_Safe__Step__From_Json.py +138 -0
  29. osbot_utils/type_safe/steps/Type_Safe__Step__Init.py +24 -0
  30. osbot_utils/type_safe/steps/Type_Safe__Step__Set_Attr.py +92 -0
  31. osbot_utils/type_safe/steps/__init__.py +0 -0
  32. osbot_utils/utils/Objects.py +27 -232
  33. osbot_utils/utils/Status.py +0 -2
  34. osbot_utils/version +1 -1
  35. {osbot_utils-2.11.0.dist-info → osbot_utils-2.13.0.dist-info}/METADATA +2 -2
  36. {osbot_utils-2.11.0.dist-info → osbot_utils-2.13.0.dist-info}/RECORD +38 -17
  37. {osbot_utils-2.11.0.dist-info → osbot_utils-2.13.0.dist-info}/LICENSE +0 -0
  38. {osbot_utils-2.11.0.dist-info → osbot_utils-2.13.0.dist-info}/WHEEL +0 -0
@@ -1,26 +1,33 @@
1
+ import time
2
+
1
3
  from osbot_utils.type_safe.Type_Safe import Type_Safe
2
- from osbot_utils.utils.Misc import timestamp_utc_now
3
4
 
4
5
 
5
6
  class capture_duration(Type_Safe):
6
- action_name : str
7
- duration : float
8
- start_timestamp : int
9
- end_timestamp : int
10
- seconds : float
7
+ action_name : str
8
+ duration : float
9
+ start_time : float
10
+ end_time : float
11
+ seconds : float
12
+ precision : int = 3 # Default rounding to 3 decimal places
13
+
11
14
 
12
15
  def __enter__(self):
13
- self.start_timestamp = timestamp_utc_now()
16
+ self.start_time = time.perf_counter() # Start the performance counter
14
17
  return self
15
18
 
16
19
  def __exit__(self, exc_type, exc_val, exc_tb):
17
- self.end_timestamp = timestamp_utc_now()
18
- self.duration = self.end_timestamp - self.start_timestamp
19
- self.seconds = round(self.duration / 1000, 3) # Duration in seconds (rounded to the 3 digits)
20
- return False # ensures that any exceptions that happened are rethrown
20
+ self.end_time = time.perf_counter() # End the performance counter
21
+ self.duration = self.end_time - self.start_time
22
+ self.seconds = round(self.duration, self.precision) # Use configurable precision
23
+ return False # Ensures that exceptions are rethrown
21
24
 
22
25
  def data(self):
23
- return dict(start = self.start_timestamp, end = self.end_timestamp, seconds = self.seconds)
26
+ return {
27
+ "start": self.start_time,
28
+ "end": self.end_time,
29
+ "seconds": self.seconds,
30
+ }
24
31
 
25
32
  def print(self):
26
33
  print()
@@ -1,4 +1,3 @@
1
- from osbot_utils.base_classes.Kwargs_To_Self import Kwargs_To_Self
2
1
  from osbot_utils.helpers.CFormat import CFormat, CFormat_Colors
3
2
 
4
3
 
@@ -0,0 +1,29 @@
1
+ import random
2
+
3
+ _hex_table = [f"{i:02x}" for i in range(256)]
4
+
5
+ def is_obj_id(value: str):
6
+ var_type = type(value)
7
+ if var_type is Obj_Id:
8
+ return True
9
+ if var_type is str:
10
+ if len(value) == 8: # todo: add efficient check if we only have hex values
11
+ return True
12
+ return False
13
+
14
+ def new_obj_id():
15
+ return hex(random.getrandbits(32))[2:].zfill(8) # slice off '0x' and pad
16
+
17
+ class Obj_Id(str):
18
+ def __new__(cls, value: str=None):
19
+ if value:
20
+ if is_obj_id(value):
21
+ obj_id = value
22
+ else:
23
+ raise ValueError(f'in Obj_Id: value provided was not a valid Obj_Id: {value}')
24
+ else:
25
+ obj_id = new_obj_id()
26
+ return super().__new__(cls, obj_id) # Return a new instance of Guid initialized with the string version of the UUID
27
+
28
+ def __str__(self):
29
+ return self
@@ -1,8 +1,7 @@
1
1
  import linecache
2
2
  import sys
3
3
  import threading
4
- from functools import wraps
5
-
4
+ from functools import wraps
6
5
  from osbot_utils.base_classes.Kwargs_To_Self import Kwargs_To_Self
7
6
  from osbot_utils.helpers.trace.Trace_Call__Config import Trace_Call__Config, PRINT_MAX_STRING_LENGTH
8
7
  from osbot_utils.helpers.trace.Trace_Call__Handler import Trace_Call__Handler
@@ -8,29 +8,29 @@ from osbot_utils.helpers.trace.Trace_Call__Stack_Node import Trace_Call__Stack
8
8
  from osbot_utils.helpers.trace.Trace_Call__Stats import Trace_Call__Stats
9
9
 
10
10
  DEFAULT_ROOT_NODE_NODE_TITLE = 'Trace Session'
11
- GLOBAL_FUNCTIONS_TO_IGNORE = ['value_type_matches_obj_annotation_for_attr' , # these are type safety functions which introduce quite a lot of noise in the traces (and unless one is debugging type safety, they will not be needed)
12
- 'value_type_matches_obj_annotation_for_union_and_annotated' , # todo: map out and document why exactly these methods are ignore (and what is the side effect)
13
- 'are_types_compatible_for_assigment' ,
14
- 'obj_attribute_annotation' ,
15
- 'all_annotations' ,
16
- 'get_origin' ,
17
- 'getmro' ,
18
- 'default_value' ,
19
- 'raise_exception_on_obj_type_annotation_mismatch' ,
20
- '__cls_kwargs__' ,
21
- '__default__value__' ,
22
- '__setattr__' ,
23
- '<module>']
11
+ # GLOBAL_FUNCTIONS_TO_IGNORE = ['value_type_matches_obj_annotation_for_attr' , # these are type safety functions which introduce quite a lot of noise in the traces (and unless one is debugging type safety, they will not be needed)
12
+ # 'value_type_matches_obj_annotation_for_union_and_annotated' , # todo: map out and document why exactly these methods are ignore (and what is the side effect)
13
+ # 'are_types_compatible_for_assigment' ,
14
+ # 'obj_attribute_annotation' ,
15
+ # 'all_annotations' ,
16
+ # 'get_origin' ,
17
+ # 'getmro' ,
18
+ # 'default_value' ,
19
+ # '__cls_kwargs__' ,
20
+ # '__default__value__' ,
21
+ # '__setattr__' ,
22
+ # '<module>']
24
23
  GLOBAL_MODULES_TO_IGNORE = ['osbot_utils.helpers.trace.Trace_Call' , # todo: map out and document why exactly these modules are ignore (and what is the side effect)
25
24
  'osbot_utils.helpers.trace.Trace_Call__Config' ,
26
25
  'osbot_utils.helpers.trace.Trace_Call__View_Model' ,
27
26
  'osbot_utils.helpers.trace.Trace_Call__Print_Traces' ,
28
27
  'osbot_utils.helpers.trace.Trace_Call__Stack' ,
29
- 'osbot_utils.base_classes.Type_Safe' ,
28
+ # 'osbot_utils.base_classes.Type_Safe' ,
30
29
  'osbot_utils.helpers.CPrint' , # also see if this should be done here or at the print/view stage
31
30
  'osbot_utils.helpers.Print_Table' ,
32
31
  'osbot_utils.decorators.methods.cache_on_self' ,
33
32
  'codecs' ]
33
+ GLOBAL_FUNCTIONS_TO_IGNORE = []
34
34
 
35
35
  #GLOBAL_MODULES_TO_IGNORE = []
36
36
  #GLOBAL_FUNCTIONS_TO_IGNORE = []
@@ -1,5 +1,5 @@
1
1
  from typing import Dict
2
- from osbot_utils.type_safe.Type_Safe import Type_Safe
2
+ from osbot_utils.type_safe.Type_Safe import Type_Safe
3
3
  from osbot_utils.helpers.xml.Xml__Element import XML__Element
4
4
 
5
5
  class Xml__File(Type_Safe):
@@ -1,5 +1,5 @@
1
1
  from typing import Dict, Any
2
- from osbot_utils.type_safe.Type_Safe import Type_Safe
2
+ from osbot_utils.type_safe.Type_Safe import Type_Safe
3
3
  from osbot_utils.helpers.xml.Xml__Element import XML__Element
4
4
  from osbot_utils.helpers.xml.Xml__File import Xml__File
5
5
 
@@ -1,7 +1,7 @@
1
1
  from typing import Optional
2
2
  from xml.etree.ElementTree import Element, SubElement, tostring
3
3
  from xml.dom import minidom
4
- from osbot_utils.type_safe.Type_Safe import Type_Safe
4
+ from osbot_utils.type_safe.Type_Safe import Type_Safe
5
5
  from osbot_utils.helpers.xml.Xml__Element import XML__Element
6
6
  from osbot_utils.helpers.xml.Xml__File import Xml__File
7
7
 
@@ -0,0 +1,128 @@
1
+ import time
2
+ from typing import Callable, List
3
+ from statistics import mean, median, stdev
4
+ from osbot_utils.utils.Env import in_github_action
5
+ from osbot_utils.testing.performance.models.Model__Performance_Measure__Measurement import Model__Performance_Measure__Measurement
6
+ from osbot_utils.testing.performance.models.Model__Performance_Measure__Result import Model__Performance_Measure__Result
7
+ from osbot_utils.type_safe.Type_Safe import Type_Safe
8
+
9
+ MEASURE__INVOCATION__LOOPS = [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610] # Fibonacci sequence for measurement loops
10
+
11
+ class Performance_Measure__Session(Type_Safe):
12
+ result : Model__Performance_Measure__Result = None # Current measurement result
13
+ assert_enabled: bool = True
14
+ padding : int = 30
15
+
16
+ def calculate_raw_score(self, times: List[int]) -> int: # Calculate raw performance score
17
+ if len(times) < 3: # Need at least 3 values for stability
18
+ return mean(times)
19
+
20
+ sorted_times = sorted(times) # Sort times for analysis
21
+ trim_size = max(1, len(times) // 10) # Remove ~10% from each end
22
+
23
+ trimmed = sorted_times[trim_size:-trim_size] # Remove outliers
24
+ med = median(trimmed) # Get median of trimmed data
25
+ trimmed_mean = mean (trimmed) # Get mean of trimmed data
26
+
27
+ raw_score = int(med * 0.6 + trimmed_mean * 0.4) # Weighted combination favoring median
28
+ return raw_score
29
+
30
+ def calculate_stable_score(self, raw_score: float) -> int: # Calculate stable performance score
31
+ if raw_score < 1_000: # Dynamic normalization based on score magnitude
32
+ return int(round(raw_score / 100) * 100) # Under 1µs: nearest 100ns
33
+ elif raw_score < 10_000:
34
+ return int(round(raw_score / 1000) * 1000) # Under 10µs: nearest 500ns
35
+ elif raw_score < 100_000:
36
+ return int(round(raw_score / 10000) * 10000) # Under 100µs: nearest 1000ns
37
+ else:
38
+ return int(round(raw_score / 100000) * 100000) # Above 100µs: nearest 5000ns
39
+
40
+ def calculate_metrics(self, times: List[int]) -> Model__Performance_Measure__Measurement: # Calculate statistical metrics
41
+ if not times:
42
+ raise ValueError("Cannot calculate metrics from empty time list")
43
+ raw_score = self.calculate_raw_score (times)
44
+ score = self.calculate_stable_score(raw_score)
45
+ return Model__Performance_Measure__Measurement(
46
+ avg_time = int(mean(times)) ,
47
+ min_time = min(times) ,
48
+ max_time = max(times) ,
49
+ median_time = int(median(times)) ,
50
+ stddev_time = stdev(times) if len(times) > 1 else 0 ,
51
+ raw_times = times ,
52
+ sample_size = len(times) ,
53
+ raw_score = raw_score ,
54
+ score = score )
55
+
56
+ def measure(self, target: Callable) -> 'Performance_Measure__Session': # Perform measurements
57
+ name = target.__name__
58
+ measurements = {}
59
+ all_times = [] # Collect all times for final score
60
+
61
+ for loop_size in MEASURE__INVOCATION__LOOPS: # Measure each loop size
62
+ loop_times = []
63
+ for i in range(loop_size):
64
+ start = time.perf_counter_ns()
65
+ target()
66
+ end = time.perf_counter_ns()
67
+ time_taken = end - start
68
+ loop_times.append(time_taken)
69
+ all_times.append(time_taken) # Add to overall collection
70
+
71
+ measurements[loop_size] = self.calculate_metrics(loop_times) # Store metrics for this loop size
72
+
73
+ raw_score = self.calculate_raw_score (all_times)
74
+ final_score = self.calculate_stable_score(raw_score) # Calculate final stable score
75
+
76
+ self.result = Model__Performance_Measure__Result(
77
+ measurements = measurements ,
78
+ name = name ,
79
+ raw_score = raw_score ,
80
+ final_score = final_score )
81
+
82
+ return self
83
+
84
+ def print_measurement(self, measurement: Model__Performance_Measure__Measurement): # Format measurement details
85
+ print(f"Samples : {measurement.sample_size}")
86
+ print(f"Score : {measurement.score:,.0f}ns")
87
+ print(f"Avg : {measurement.avg_time:,}ns")
88
+ print(f"Min : {measurement.min_time:,}ns")
89
+ print(f"Max : {measurement.max_time:,}ns")
90
+ print(f"Median : {measurement.median_time:,}ns")
91
+ print(f"StdDev : {measurement.stddev_time:,.2f}ns")
92
+
93
+ def print(self): # Print measurement results
94
+ if not self.result:
95
+ print("No measurements taken yet")
96
+ return
97
+ print(f"{self.result.name:{self.padding}} | score: {self.result.final_score:7,d} ns | raw: {self.result.raw_score:7,d} ns") # Print name and normalized score
98
+
99
+ return self
100
+
101
+ def assert_time(self, *expected_time: int): # Assert that the final score matches the expected normalized time"""
102
+ if self.assert_enabled is False:
103
+ return
104
+ if in_github_action():
105
+ last_expected_time = expected_time[-1] + 100 # +100 in case it is 0
106
+ new_expected_time = last_expected_time * 5 # using last_expected_time * 5 as the upper limit (since these tests are significantly slowed in GitHUb Actions)
107
+ assert last_expected_time <= self.result.final_score <= new_expected_time, f"Performance changed for {self.result.name}: expected {last_expected_time} < {self.result.final_score:,d}ns, expected {new_expected_time}"
108
+ else:
109
+ assert self.result.final_score in expected_time, f"Performance changed for {self.result.name}: got {self.result.final_score:,d}ns, expected {expected_time}"
110
+
111
+ def assert_time(self, *expected_time: int): # Assert that the final score matches the expected normalized time"""
112
+ if self.assert_enabled is False:
113
+ return
114
+ if in_github_action():
115
+ last_expected_time = expected_time[-1] + 100 # +100 in case it is 0
116
+ new_expected_time = last_expected_time * 5 # using last_expected_time * 5 as the upper limit (since these tests are significantly slowed in GitHUb Actions)
117
+ assert last_expected_time <= self.result.final_score <= new_expected_time, f"Performance changed for {self.result.name}: expected {last_expected_time} < {self.result.final_score:,d}ns, expected {new_expected_time}"
118
+ else:
119
+ assert self.result.final_score in expected_time, f"Performance changed for {self.result.name}: got {self.result.final_score:,d}ns, expected {expected_time}"
120
+
121
+ def assert_time__less_than(self, max_time: int): # Assert that the final score matches the expected normalized time"""
122
+ if self.assert_enabled is False:
123
+ return
124
+ if in_github_action():
125
+ max_time = max_time * 5 # adjust for GitHub's slowness
126
+
127
+ assert self.result.final_score <= max_time, f"Performance changed for {self.result.name}: got {self.result.final_score:,d}ns, expected less than {max_time}ns"
128
+
File without changes
@@ -0,0 +1,14 @@
1
+ from typing import List
2
+ from osbot_utils.type_safe.Type_Safe import Type_Safe
3
+
4
+
5
+ class Model__Performance_Measure__Measurement(Type_Safe): # Pure data container for measurement metrics
6
+ avg_time : int # Average time in nanoseconds
7
+ min_time : int # Minimum time observed
8
+ max_time : int # Maximum time observed
9
+ median_time : int # Median time
10
+ stddev_time : float # Standard deviation
11
+ raw_times : List[int] # Raw measurements for analysis
12
+ sample_size : int # Number of measurements taken
13
+ score : float
14
+ raw_score : float
@@ -0,0 +1,10 @@
1
+ from typing import Dict
2
+ from osbot_utils.testing.performance.models.Model__Performance_Measure__Measurement import Model__Performance_Measure__Measurement
3
+ from osbot_utils.type_safe.Type_Safe import Type_Safe
4
+
5
+ class Model__Performance_Measure__Result(Type_Safe): # Pure data container for measurement results
6
+ measurements : Dict[int, Model__Performance_Measure__Measurement] # Results per loop size
7
+ name : str # Name of measured target
8
+ raw_score : float
9
+ final_score : float
10
+
File without changes