stouputils 1.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (140) hide show
  1. stouputils/__init__.py +40 -0
  2. stouputils/__main__.py +86 -0
  3. stouputils/_deprecated.py +37 -0
  4. stouputils/all_doctests.py +160 -0
  5. stouputils/applications/__init__.py +22 -0
  6. stouputils/applications/automatic_docs.py +634 -0
  7. stouputils/applications/upscaler/__init__.py +39 -0
  8. stouputils/applications/upscaler/config.py +128 -0
  9. stouputils/applications/upscaler/image.py +247 -0
  10. stouputils/applications/upscaler/video.py +287 -0
  11. stouputils/archive.py +344 -0
  12. stouputils/backup.py +488 -0
  13. stouputils/collections.py +244 -0
  14. stouputils/continuous_delivery/__init__.py +27 -0
  15. stouputils/continuous_delivery/cd_utils.py +243 -0
  16. stouputils/continuous_delivery/github.py +522 -0
  17. stouputils/continuous_delivery/pypi.py +130 -0
  18. stouputils/continuous_delivery/pyproject.py +147 -0
  19. stouputils/continuous_delivery/stubs.py +86 -0
  20. stouputils/ctx.py +408 -0
  21. stouputils/data_science/config/get.py +51 -0
  22. stouputils/data_science/config/set.py +125 -0
  23. stouputils/data_science/data_processing/image/__init__.py +66 -0
  24. stouputils/data_science/data_processing/image/auto_contrast.py +79 -0
  25. stouputils/data_science/data_processing/image/axis_flip.py +58 -0
  26. stouputils/data_science/data_processing/image/bias_field_correction.py +74 -0
  27. stouputils/data_science/data_processing/image/binary_threshold.py +73 -0
  28. stouputils/data_science/data_processing/image/blur.py +59 -0
  29. stouputils/data_science/data_processing/image/brightness.py +54 -0
  30. stouputils/data_science/data_processing/image/canny.py +110 -0
  31. stouputils/data_science/data_processing/image/clahe.py +92 -0
  32. stouputils/data_science/data_processing/image/common.py +30 -0
  33. stouputils/data_science/data_processing/image/contrast.py +53 -0
  34. stouputils/data_science/data_processing/image/curvature_flow_filter.py +74 -0
  35. stouputils/data_science/data_processing/image/denoise.py +378 -0
  36. stouputils/data_science/data_processing/image/histogram_equalization.py +123 -0
  37. stouputils/data_science/data_processing/image/invert.py +64 -0
  38. stouputils/data_science/data_processing/image/laplacian.py +60 -0
  39. stouputils/data_science/data_processing/image/median_blur.py +52 -0
  40. stouputils/data_science/data_processing/image/noise.py +59 -0
  41. stouputils/data_science/data_processing/image/normalize.py +65 -0
  42. stouputils/data_science/data_processing/image/random_erase.py +66 -0
  43. stouputils/data_science/data_processing/image/resize.py +69 -0
  44. stouputils/data_science/data_processing/image/rotation.py +80 -0
  45. stouputils/data_science/data_processing/image/salt_pepper.py +68 -0
  46. stouputils/data_science/data_processing/image/sharpening.py +55 -0
  47. stouputils/data_science/data_processing/image/shearing.py +64 -0
  48. stouputils/data_science/data_processing/image/threshold.py +64 -0
  49. stouputils/data_science/data_processing/image/translation.py +71 -0
  50. stouputils/data_science/data_processing/image/zoom.py +83 -0
  51. stouputils/data_science/data_processing/image_augmentation.py +118 -0
  52. stouputils/data_science/data_processing/image_preprocess.py +183 -0
  53. stouputils/data_science/data_processing/prosthesis_detection.py +359 -0
  54. stouputils/data_science/data_processing/technique.py +481 -0
  55. stouputils/data_science/dataset/__init__.py +45 -0
  56. stouputils/data_science/dataset/dataset.py +292 -0
  57. stouputils/data_science/dataset/dataset_loader.py +135 -0
  58. stouputils/data_science/dataset/grouping_strategy.py +296 -0
  59. stouputils/data_science/dataset/image_loader.py +100 -0
  60. stouputils/data_science/dataset/xy_tuple.py +696 -0
  61. stouputils/data_science/metric_dictionnary.py +106 -0
  62. stouputils/data_science/metric_utils.py +847 -0
  63. stouputils/data_science/mlflow_utils.py +206 -0
  64. stouputils/data_science/models/abstract_model.py +149 -0
  65. stouputils/data_science/models/all.py +85 -0
  66. stouputils/data_science/models/base_keras.py +765 -0
  67. stouputils/data_science/models/keras/all.py +38 -0
  68. stouputils/data_science/models/keras/convnext.py +62 -0
  69. stouputils/data_science/models/keras/densenet.py +50 -0
  70. stouputils/data_science/models/keras/efficientnet.py +60 -0
  71. stouputils/data_science/models/keras/mobilenet.py +56 -0
  72. stouputils/data_science/models/keras/resnet.py +52 -0
  73. stouputils/data_science/models/keras/squeezenet.py +233 -0
  74. stouputils/data_science/models/keras/vgg.py +42 -0
  75. stouputils/data_science/models/keras/xception.py +38 -0
  76. stouputils/data_science/models/keras_utils/callbacks/__init__.py +20 -0
  77. stouputils/data_science/models/keras_utils/callbacks/colored_progress_bar.py +219 -0
  78. stouputils/data_science/models/keras_utils/callbacks/learning_rate_finder.py +148 -0
  79. stouputils/data_science/models/keras_utils/callbacks/model_checkpoint_v2.py +31 -0
  80. stouputils/data_science/models/keras_utils/callbacks/progressive_unfreezing.py +249 -0
  81. stouputils/data_science/models/keras_utils/callbacks/warmup_scheduler.py +66 -0
  82. stouputils/data_science/models/keras_utils/losses/__init__.py +12 -0
  83. stouputils/data_science/models/keras_utils/losses/next_generation_loss.py +56 -0
  84. stouputils/data_science/models/keras_utils/visualizations.py +416 -0
  85. stouputils/data_science/models/model_interface.py +939 -0
  86. stouputils/data_science/models/sandbox.py +116 -0
  87. stouputils/data_science/range_tuple.py +234 -0
  88. stouputils/data_science/scripts/augment_dataset.py +77 -0
  89. stouputils/data_science/scripts/exhaustive_process.py +133 -0
  90. stouputils/data_science/scripts/preprocess_dataset.py +70 -0
  91. stouputils/data_science/scripts/routine.py +168 -0
  92. stouputils/data_science/utils.py +285 -0
  93. stouputils/decorators.py +605 -0
  94. stouputils/image.py +441 -0
  95. stouputils/installer/__init__.py +18 -0
  96. stouputils/installer/common.py +67 -0
  97. stouputils/installer/downloader.py +101 -0
  98. stouputils/installer/linux.py +144 -0
  99. stouputils/installer/main.py +223 -0
  100. stouputils/installer/windows.py +136 -0
  101. stouputils/io.py +486 -0
  102. stouputils/parallel.py +483 -0
  103. stouputils/print.py +482 -0
  104. stouputils/py.typed +1 -0
  105. stouputils/stouputils/__init__.pyi +15 -0
  106. stouputils/stouputils/_deprecated.pyi +12 -0
  107. stouputils/stouputils/all_doctests.pyi +46 -0
  108. stouputils/stouputils/applications/__init__.pyi +2 -0
  109. stouputils/stouputils/applications/automatic_docs.pyi +106 -0
  110. stouputils/stouputils/applications/upscaler/__init__.pyi +3 -0
  111. stouputils/stouputils/applications/upscaler/config.pyi +18 -0
  112. stouputils/stouputils/applications/upscaler/image.pyi +109 -0
  113. stouputils/stouputils/applications/upscaler/video.pyi +60 -0
  114. stouputils/stouputils/archive.pyi +67 -0
  115. stouputils/stouputils/backup.pyi +109 -0
  116. stouputils/stouputils/collections.pyi +86 -0
  117. stouputils/stouputils/continuous_delivery/__init__.pyi +5 -0
  118. stouputils/stouputils/continuous_delivery/cd_utils.pyi +129 -0
  119. stouputils/stouputils/continuous_delivery/github.pyi +162 -0
  120. stouputils/stouputils/continuous_delivery/pypi.pyi +53 -0
  121. stouputils/stouputils/continuous_delivery/pyproject.pyi +67 -0
  122. stouputils/stouputils/continuous_delivery/stubs.pyi +39 -0
  123. stouputils/stouputils/ctx.pyi +211 -0
  124. stouputils/stouputils/decorators.pyi +252 -0
  125. stouputils/stouputils/image.pyi +172 -0
  126. stouputils/stouputils/installer/__init__.pyi +5 -0
  127. stouputils/stouputils/installer/common.pyi +39 -0
  128. stouputils/stouputils/installer/downloader.pyi +24 -0
  129. stouputils/stouputils/installer/linux.pyi +39 -0
  130. stouputils/stouputils/installer/main.pyi +57 -0
  131. stouputils/stouputils/installer/windows.pyi +31 -0
  132. stouputils/stouputils/io.pyi +213 -0
  133. stouputils/stouputils/parallel.pyi +216 -0
  134. stouputils/stouputils/print.pyi +136 -0
  135. stouputils/stouputils/version_pkg.pyi +15 -0
  136. stouputils/version_pkg.py +189 -0
  137. stouputils-1.14.0.dist-info/METADATA +178 -0
  138. stouputils-1.14.0.dist-info/RECORD +140 -0
  139. stouputils-1.14.0.dist-info/WHEEL +4 -0
  140. stouputils-1.14.0.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,244 @@
1
+ """
2
+ This module provides utilities for collection manipulation:
3
+
4
+ - unique_list: Remove duplicates from a list while preserving order using object id, hash or str
5
+ - sort_dict_keys: Sort dictionary keys using a given order list (ascending or descending)
6
+ - upsert_in_dataframe: Insert or update a row in a Polars DataFrame based on primary keys
7
+ - array_to_disk: Easily handle large numpy arrays on disk using zarr for efficient storage and access.
8
+
9
+ .. image:: https://raw.githubusercontent.com/Stoupy51/stouputils/refs/heads/main/assets/collections_module.gif
10
+ :alt: stouputils collections examples
11
+ """
12
+
13
+ # Imports
14
+ import atexit
15
+ import os
16
+ import shutil
17
+ import tempfile
18
+ from collections.abc import Iterable
19
+ from typing import TYPE_CHECKING, Any, Literal, TypeVar
20
+
21
+ # Lazy imports for typing
22
+ if TYPE_CHECKING:
23
+ import numpy as np
24
+ import polars as pl
25
+ import zarr # pyright: ignore[reportMissingTypeStubs]
26
+ from numpy.typing import NDArray
27
+
28
+ # Typing
29
+ T = TypeVar("T")
30
+
31
+ # Functions
32
+ def unique_list[T](list_to_clean: Iterable[T], method: Literal["id", "hash", "str"] = "str") -> list[T]:
33
+ """ Remove duplicates from the list while keeping the order using ids (default) or hash or str
34
+
35
+ Args:
36
+ list_to_clean (Iterable[T]): The list to clean
37
+ method (Literal["id", "hash", "str"]): The method to use to identify duplicates
38
+ Returns:
39
+ list[T]: The cleaned list
40
+
41
+ Examples:
42
+ >>> unique_list([1, 2, 3, 2, 1], method="id")
43
+ [1, 2, 3]
44
+
45
+ >>> s1 = {1, 2, 3}
46
+ >>> s2 = {2, 3, 4}
47
+ >>> s3 = {1, 2, 3}
48
+ >>> unique_list([s1, s2, s1, s1, s3, s2, s3], method="id")
49
+ [{1, 2, 3}, {2, 3, 4}, {1, 2, 3}]
50
+
51
+ >>> s1 = {1, 2, 3}
52
+ >>> s2 = {2, 3, 4}
53
+ >>> s3 = {1, 2, 3}
54
+ >>> unique_list([s1, s2, s1, s1, s3, s2, s3], method="str")
55
+ [{1, 2, 3}, {2, 3, 4}]
56
+ """
57
+ # Initialize the seen ids set and the result list
58
+ seen: set[int | str] = set()
59
+ result: list[T] = []
60
+
61
+ # Iterate over each item in the list
62
+ for item in list_to_clean:
63
+ if method == "id":
64
+ item_identifier = id(item)
65
+ elif method == "hash":
66
+ item_identifier = hash(item)
67
+ elif method == "str":
68
+ item_identifier = str(item)
69
+ else:
70
+ raise ValueError(f"Invalid method: {method}")
71
+
72
+ # If the item id is not in the seen ids set, add it to the seen ids set and append the item to the result list
73
+ if item_identifier not in seen:
74
+ seen.add(item_identifier)
75
+ result.append(item)
76
+
77
+ # Return the cleaned list
78
+ return result
79
+
80
+ def sort_dict_keys[T](dictionary: dict[T, Any], order: list[T], reverse: bool = False) -> dict[T, Any]:
81
+ """ Sort dictionary keys using a given order list (reverse optional)
82
+
83
+ Args:
84
+ dictionary (dict[T, Any]): The dictionary to sort
85
+ order (list[T]): The order list
86
+ reverse (bool): Whether to sort in reverse order (given to sorted function which behaves differently than order.reverse())
87
+ Returns:
88
+ dict[T, Any]: The sorted dictionary
89
+
90
+ Examples:
91
+ >>> sort_dict_keys({'b': 2, 'a': 1, 'c': 3}, order=["a", "b", "c"])
92
+ {'a': 1, 'b': 2, 'c': 3}
93
+
94
+ >>> sort_dict_keys({'b': 2, 'a': 1, 'c': 3}, order=["a", "b", "c"], reverse=True)
95
+ {'c': 3, 'b': 2, 'a': 1}
96
+
97
+ >>> sort_dict_keys({'b': 2, 'a': 1, 'c': 3, 'd': 4}, order=["c", "b"])
98
+ {'c': 3, 'b': 2, 'a': 1, 'd': 4}
99
+ """
100
+ return dict(sorted(dictionary.items(), key=lambda x: order.index(x[0]) if x[0] in order else len(order), reverse=reverse))
101
+
102
+ def upsert_in_dataframe(
103
+ df: "pl.DataFrame",
104
+ new_entry: dict[str, Any],
105
+ primary_keys: dict[str, Any] | None = None
106
+ ) -> "pl.DataFrame":
107
+ """ Insert or update a row in the Polars DataFrame based on primary keys.
108
+
109
+ Args:
110
+ df (pl.DataFrame): The Polars DataFrame to update.
111
+ new_entry (dict[str, Any]): The new entry to insert or update.
112
+ primary_keys (dict[str, Any]): The primary keys to identify the row (default: empty).
113
+ Returns:
114
+ pl.DataFrame: The updated Polars DataFrame.
115
+ """
116
+ # Imports
117
+ import polars as pl
118
+
119
+ # Create new DataFrame if file doesn't exist or is invalid
120
+ if df.is_empty():
121
+ return pl.DataFrame([new_entry])
122
+
123
+ # If no primary keys provided, return DataFrame with new entry appended
124
+ if not primary_keys:
125
+ new_row_df = pl.DataFrame([new_entry])
126
+ return pl.concat([df, new_row_df], how="diagonal_relaxed")
127
+
128
+ # Build mask based on primary keys
129
+ mask: pl.Expr = pl.lit(True)
130
+ for key, value in primary_keys.items():
131
+ if key in df.columns:
132
+ mask = mask & (df[key] == value)
133
+ else:
134
+ # Primary key column doesn't exist, so no match possible
135
+ mask = pl.lit(False)
136
+ break
137
+
138
+ # Insert or update row based on primary keys
139
+ if df.select(mask).to_series().any():
140
+ # Update existing row
141
+ for key, value in new_entry.items():
142
+ if key in df.columns:
143
+ df = df.with_columns(pl.when(mask).then(pl.lit(value)).otherwise(pl.col(key)).alias(key))
144
+ else:
145
+ # Add new column if it doesn't exist
146
+ df = df.with_columns(pl.when(mask).then(pl.lit(value)).otherwise(None).alias(key))
147
+ return df
148
+ else:
149
+ # Insert new row
150
+ new_row_df = pl.DataFrame([new_entry])
151
+ return pl.concat([df, new_row_df], how="diagonal_relaxed")
152
+
153
+ def array_to_disk(
154
+ data: "NDArray[Any] | zarr.Array",
155
+ delete_input: bool = True,
156
+ more_data: "NDArray[Any] | zarr.Array | None" = None
157
+ ) -> tuple["zarr.Array", str, int]:
158
+ """ Easily handle large numpy arrays on disk using zarr for efficient storage and access.
159
+
160
+ Zarr provides a simpler and more efficient alternative to np.memmap with better compression
161
+ and chunking capabilities.
162
+
163
+ Args:
164
+ data (NDArray | zarr.Array): The data to save/load as a zarr array
165
+ delete_input (bool): Whether to delete the input data after creating the zarr array
166
+ more_data (NDArray | zarr.Array | None): Additional data to append to the zarr array
167
+ Returns:
168
+ tuple[zarr.Array, str, int]: The zarr array, the directory path, and the total size in bytes
169
+
170
+ Examples:
171
+ >>> import numpy as np
172
+ >>> data = np.random.rand(1000, 1000)
173
+ >>> zarr_array = array_to_disk(data)[0]
174
+ >>> zarr_array.shape
175
+ (1000, 1000)
176
+
177
+ >>> more_data = np.random.rand(500, 1000)
178
+ >>> longer_array, dir_path, total_size = array_to_disk(zarr_array, more_data=more_data)
179
+ """
180
+ def dir_size(directory: str) -> int:
181
+ return sum(
182
+ os.path.getsize(os.path.join(dirpath, filename))
183
+ for dirpath, _, filenames in os.walk(directory)
184
+ for filename in filenames
185
+ )
186
+
187
+ # Imports
188
+ try:
189
+ import zarr # pyright: ignore[reportMissingTypeStubs]
190
+ except ImportError as e:
191
+ raise ImportError("zarr is required for array_to_disk function. Please install it via 'pip install zarr'.") from e
192
+
193
+ # If data is already a zarr.Array and more_data is present, just append and return
194
+ if isinstance(data, zarr.Array) and more_data is not None:
195
+ original_size: int = data.shape[0]
196
+ new_shape: tuple[int, ...] = (original_size + more_data.shape[0], *data.shape[1:])
197
+ data.resize(new_shape)
198
+ data[original_size:] = more_data[:]
199
+
200
+ # Delete more_data if specified, calculate size, and return
201
+ if delete_input:
202
+ del more_data
203
+ store_path: str = str(data.store.path if hasattr(data.store, 'path') else data.store) # type: ignore
204
+ return data, store_path, dir_size(store_path)
205
+
206
+ # Create a temporary directory to store the zarr array (with compression (auto-chunking for optimal performance))
207
+ temp_dir: str = tempfile.mkdtemp()
208
+ zarr_array: zarr.Array = zarr.open_array(temp_dir, mode="w", shape=data.shape, dtype=data.dtype, chunks=True) # pyright: ignore[reportUnknownMemberType]
209
+ zarr_array[:] = data[:]
210
+
211
+ # If additional data is provided, resize and append
212
+ if more_data is not None:
213
+ original_size = data.shape[0]
214
+ new_shape = (original_size + more_data.shape[0], *data.shape[1:])
215
+ zarr_array.resize(new_shape)
216
+ zarr_array[original_size:] = more_data[:]
217
+
218
+ # Delete the original data from memory if specified
219
+ if delete_input:
220
+ del data
221
+ if more_data is not None:
222
+ del more_data
223
+
224
+ # Register a cleanup function to delete the zarr directory at exit
225
+ atexit.register(lambda: shutil.rmtree(temp_dir, ignore_errors=True))
226
+
227
+ # Return all
228
+ return zarr_array, temp_dir, dir_size(temp_dir)
229
+
230
+ if __name__ == "__main__":
231
+
232
+ # Example usage of array_to_disk (now using zarr)
233
+ print("\nZarr Example:")
234
+ data = np.random.rand(1000, 1000)
235
+ zarr_array, dir_path, total_size = array_to_disk(data, delete_input=True)
236
+ print(f"Zarr array shape: {zarr_array.shape}, directory: {dir_path}, size: {total_size:,} bytes")
237
+ print(f"Compression ratio: {(data.nbytes / total_size):.2f}x")
238
+
239
+ # Make it longer (1000x1000 -> 1500x1000)
240
+ data2 = np.random.rand(500, 1000)
241
+ longer_array, dir_path, total_size = array_to_disk(zarr_array, more_data=data2)
242
+ print(f"\nLonger zarr array shape: {longer_array.shape}, directory: {dir_path}, size: {total_size:,} bytes")
243
+ print(f"Compression ratio: {(1500 * 1000 * 8 / total_size):.2f}x")
244
+
@@ -0,0 +1,27 @@
1
+ """ Continuous delivery and deployment utilities.
2
+
3
+ This module provides tools for automating software delivery and deployment:
4
+
5
+ Key Features:
6
+ - GitHub release management and uploads
7
+ - PyPI package publishing utilities
8
+ - pyproject.toml file management
9
+ - Common CD/CI utilities
10
+
11
+ Components:
12
+ - cd_utils: Common utilities for continuous delivery
13
+ - github: GitHub-specific utilities (upload_to_github)
14
+ - pypi: PyPI publishing tools (pypi_full_routine)
15
+ - pyproject: pyproject.toml file management
16
+ - stubs: Stub file generation using pyright (stubs_full_routine)
17
+
18
+ """
19
+ # ruff: noqa: F403
20
+
21
+ # Imports
22
+ from .cd_utils import *
23
+ from .github import *
24
+ from .pypi import *
25
+ from .pyproject import *
26
+ from .stubs import *
27
+
@@ -0,0 +1,243 @@
1
+ """ This module contains utilities for continuous delivery, such as loading credentials from a file.
2
+ It is mainly used by the `stouputils.continuous_delivery.github` module.
3
+ """
4
+
5
+ # Imports
6
+ import os
7
+ from typing import TYPE_CHECKING, Any
8
+
9
+ from ..decorators import handle_error
10
+ from ..io import clean_path, json_load
11
+ from ..print import warning
12
+
13
+ if TYPE_CHECKING:
14
+ import requests
15
+
16
+
17
+ # Load credentials from file
18
+ @handle_error()
19
+ def load_credentials(credentials_path: str) -> dict[str, Any]:
20
+ """ Load credentials from a JSON or YAML file into a dictionary.
21
+
22
+ Loads credentials from either a JSON or YAML file and returns them as a dictionary.
23
+ The file must contain the required credentials in the appropriate format.
24
+
25
+ Args:
26
+ credentials_path (str): Path to the credentials file (.json or .yml)
27
+ Returns:
28
+ dict[str, Any]: Dictionary containing the credentials
29
+
30
+ Example JSON format:
31
+
32
+ .. code-block:: json
33
+
34
+ {
35
+ "github": {
36
+ "username": "Stoupy51",
37
+ "api_key": "ghp_XXXXXXXXXXXXXXXXXXXXXXXXXX"
38
+ }
39
+ }
40
+
41
+ Example YAML format:
42
+
43
+ .. code-block:: yaml
44
+
45
+ github:
46
+ username: "Stoupy51"
47
+ api_key: "ghp_XXXXXXXXXXXXXXXXXXXXXXXXXX"
48
+ """
49
+ # Get the absolute path of the credentials file
50
+ warning(
51
+ "Be cautious when loading credentials from external sources like this, "
52
+ "as they might contain malicious code that could compromise your credentials without your knowledge"
53
+ )
54
+ credentials_path = clean_path(credentials_path)
55
+
56
+ # Check if the file exists
57
+ if not os.path.exists(credentials_path):
58
+ raise FileNotFoundError(f"Credentials file not found at '{credentials_path}'")
59
+
60
+ # Load the file if it's a JSON file
61
+ if credentials_path.endswith(".json"):
62
+ return json_load(credentials_path)
63
+
64
+ # Else, load the file if it's a YAML file
65
+ elif credentials_path.endswith((".yml", ".yaml")):
66
+ from msgspec import yaml
67
+ with open(credentials_path) as f:
68
+ return yaml.decode(f.read())
69
+
70
+ # Else, raise an error
71
+ else:
72
+ raise ValueError("Credentials file must be .json or .yml format")
73
+
74
+ # Handle a response
75
+ def handle_response(response: "requests.Response", error_message: str) -> None:
76
+ """ Handle a response from the API by raising an error if the response is not successful (status code not in 200-299).
77
+
78
+ Args:
79
+ response (requests.Response): The response from the API
80
+ error_message (str): The error message to raise if the response is not successful
81
+ """
82
+ if response.status_code < 200 or response.status_code >= 300:
83
+ import requests
84
+ try:
85
+ raise ValueError(f"{error_message}, response code {response.status_code} with response {response.json()}")
86
+ except requests.exceptions.JSONDecodeError as e:
87
+ raise ValueError(f"{error_message}, response code {response.status_code} with response {response.text}") from e
88
+
89
+ # Clean a version string
90
+ def clean_version(version: str, keep: str = "") -> str:
91
+ """ Clean a version string
92
+
93
+ Args:
94
+ version (str): The version string to clean
95
+ keep (str): The characters to keep in the version string
96
+ Returns:
97
+ str: The cleaned version string
98
+
99
+ >>> clean_version("v1.e0.zfezf0.1.2.3zefz")
100
+ '1.0.0.1.2.3'
101
+ >>> clean_version("v1.e0.zfezf0.1.2.3zefz", keep="v")
102
+ 'v1.0.0.1.2.3'
103
+ >>> clean_version("v1.2.3b", keep="ab")
104
+ '1.2.3b'
105
+ """
106
+ return "".join(c for c in version if c in "0123456789." + keep)
107
+
108
+ # Convert a version string to a float
109
+ def version_to_float(version: str, error: bool = True) -> Any:
110
+ """ Converts a version string into a float for comparison purposes.
111
+ The version string is expected to follow the format of major.minor.patch.something_else....,
112
+ where each part is separated by a dot and can be extended indefinitely.
113
+ Supports pre-release suffixes with numbers: devN/dN (dev), aN (alpha), bN (beta), rcN/cN (release candidate).
114
+ Ordering: 1.0.0 > 1.0.0rc2 > 1.0.0rc1 > 1.0.0b2 > 1.0.0b1 > 1.0.0a2 > 1.0.0a1 > 1.0.0dev1
115
+
116
+ Args:
117
+ version (str): The version string to convert. (e.g. "v1.0.0.1.2.3", "v2.0.0b2", "v1.0.0rc1")
118
+ error (bool): Return None on error instead of raising an exception
119
+ Returns:
120
+ float: The float representation of the version. (e.g. 0)
121
+
122
+ >>> version_to_float("v1.0.0")
123
+ 1.0
124
+ >>> version_to_float("v1.0.0.1")
125
+ 1.000000001
126
+ >>> version_to_float("v2.3.7")
127
+ 2.003007
128
+ >>> version_to_float("v1.0.0.1.2.3")
129
+ 1.0000000010020031
130
+ >>> version_to_float("v2.0") > version_to_float("v1.0.0.1")
131
+ True
132
+ >>> version_to_float("v2.0.0") > version_to_float("v2.0.0rc") > version_to_float("v2.0.0b") > version_to_float("v2.0.0a") > version_to_float("v2.0.0dev")
133
+ True
134
+ >>> version_to_float("v1.0.0b") > version_to_float("v1.0.0a")
135
+ True
136
+ >>> version_to_float("v1.0.0") > version_to_float("v1.0.0b")
137
+ True
138
+ >>> version_to_float("v3.0.0a") > version_to_float("v2.9.9")
139
+ True
140
+ >>> version_to_float("v1.2.3b") < version_to_float("v1.2.3")
141
+ True
142
+ >>> version_to_float("1.0.0") == version_to_float("v1.0.0")
143
+ True
144
+ >>> version_to_float("2.0.0.0.0.0.1b") > version_to_float("2.0.0.0.0.0.1a")
145
+ True
146
+ >>> version_to_float("2.0.0.0.0.0.1") > version_to_float("2.0.0.0.0.0.1b")
147
+ True
148
+ >>> version_to_float("v1.0.0rc") == version_to_float("v1.0.0c")
149
+ True
150
+ >>> version_to_float("v1.0.0c") > version_to_float("v1.0.0b")
151
+ True
152
+ >>> version_to_float("v1.0.0d") < version_to_float("v1.0.0a")
153
+ True
154
+ >>> version_to_float("v1.0.0dev") < version_to_float("v1.0.0a")
155
+ True
156
+ >>> version_to_float("v1.0.0dev") == version_to_float("v1.0.0d")
157
+ True
158
+ >>> version_to_float("v1.0.0rc2") > version_to_float("v1.0.0rc1")
159
+ True
160
+ >>> version_to_float("v1.0.0b2") > version_to_float("v1.0.0b1")
161
+ True
162
+ >>> version_to_float("v1.0.0a2") > version_to_float("v1.0.0a1")
163
+ True
164
+ >>> version_to_float("v1.0.0dev2") > version_to_float("v1.0.0dev1")
165
+ True
166
+ >>> version_to_float("v1.0.0") > version_to_float("v1.0.0rc2") > version_to_float("v1.0.0rc1")
167
+ True
168
+ >>> version_to_float("v1.0.0rc1") > version_to_float("v1.0.0b2")
169
+ True
170
+ >>> version_to_float("v1.0.0b1") > version_to_float("v1.0.0a2")
171
+ True
172
+ >>> version_to_float("v1.0.0a1") > version_to_float("v1.0.0dev2")
173
+ True
174
+ >>> versions = ["v1.0.0", "v1.0.0rc2", "v1.0.0rc1", "v1.0.0b2", "v1.0.0b1", "v1.0.0a2", "v1.0.0a1", "v1.0.0dev2", "v1.0.0dev1"]
175
+ >>> sorted_versions = sorted(versions, key=version_to_float, reverse=True)
176
+ >>> sorted_versions == versions
177
+ True
178
+ """
179
+ try:
180
+ # Check for pre-release suffixes and calculate suffix modifier
181
+ # Suffixes are ordered from longest to shortest to avoid partial matches
182
+ suffix_modifiers: dict[str, int] = {
183
+ "dev": 4, # dev is lowest
184
+ "d": 4, # d (dev) is lowest
185
+ "a": 3, # alpha
186
+ "b": 2, # beta
187
+ "rc": 1, # rc is highest pre-release
188
+ "c": 1, # c (release candidate)
189
+ }
190
+ suffix_type: int = 0 # 0 = no suffix, 1-4 = rc/c, b, a, dev/d
191
+ suffix_number: int = 0
192
+
193
+ # Check for suffixes with optional numbers
194
+ for suffix, modifier in suffix_modifiers.items():
195
+ if suffix in version:
196
+ # Find the suffix position
197
+ suffix_pos: int = version.rfind(suffix)
198
+ after_suffix: str = version[suffix_pos + len(suffix):]
199
+
200
+ # Check if there's a number after the suffix
201
+ if after_suffix.isdigit():
202
+ suffix_number = int(after_suffix)
203
+ version = version[:suffix_pos]
204
+ elif after_suffix == "":
205
+ # Suffix at the end without number
206
+ version = version[:suffix_pos]
207
+ else:
208
+ # Not a valid suffix match, continue searching
209
+ continue
210
+
211
+ # Found a valid suffix, set the type and break
212
+ suffix_type = modifier
213
+ break
214
+
215
+ # Clean the version string by keeping only the numbers and dots
216
+ version = clean_version(version)
217
+
218
+ # Split the version string into parts
219
+ version_parts: list[str] = version.split(".")
220
+ total: float = 0.0
221
+ multiplier: float = 1.0
222
+
223
+ # Iterate over the parts and add lesser and lesser weight to each part
224
+ for part in version_parts:
225
+ total += int(part) * multiplier
226
+ multiplier /= 1_000
227
+
228
+ # Apply pre-release modifier
229
+ # Pre-releases are represented as negative offsets from the base version
230
+ # Lower suffix_type = closer to release (rc=1 is closest, dev=4 is furthest)
231
+ # Higher suffix_number = closer to release within the same suffix type
232
+ # Formula: base_version - (suffix_type * 1000 - suffix_number) * 1e-9
233
+ # This ensures: 1.0.0 > 1.0.0rc2 > 1.0.0rc1 > 1.0.0b2 > 1.0.0a2 > 1.0.0dev2
234
+ if suffix_type > 0:
235
+ total -= (suffix_type * 1000 - suffix_number) * 1e-9
236
+
237
+ return total
238
+ except Exception as e:
239
+ if error:
240
+ raise ValueError(f"Invalid version string: '{version}'") from e
241
+ else:
242
+ return None # type: ignore
243
+