stouputils 1.3.10__tar.gz → 1.3.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. {stouputils-1.3.10 → stouputils-1.3.12}/PKG-INFO +1 -1
  2. {stouputils-1.3.10 → stouputils-1.3.12}/pyproject.toml +1 -1
  3. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/continuous_delivery/github.py +43 -10
  4. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/model_interface.py +105 -40
  5. {stouputils-1.3.10 → stouputils-1.3.12}/.gitignore +0 -0
  6. {stouputils-1.3.10 → stouputils-1.3.12}/LICENSE +0 -0
  7. {stouputils-1.3.10 → stouputils-1.3.12}/README.md +0 -0
  8. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/__init__.py +0 -0
  9. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/all_doctests.py +0 -0
  10. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/applications/__init__.py +0 -0
  11. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/applications/automatic_docs.py +0 -0
  12. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/applications/upscaler/__init__.py +0 -0
  13. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/applications/upscaler/config.py +0 -0
  14. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/applications/upscaler/image.py +0 -0
  15. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/applications/upscaler/video.py +0 -0
  16. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/archive.py +0 -0
  17. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/backup.py +0 -0
  18. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/collections.py +0 -0
  19. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/continuous_delivery/__init__.py +0 -0
  20. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/continuous_delivery/cd_utils.py +0 -0
  21. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/continuous_delivery/pypi.py +0 -0
  22. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/continuous_delivery/pyproject.py +0 -0
  23. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/ctx.py +0 -0
  24. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/config/get.py +0 -0
  25. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/config/set.py +0 -0
  26. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/__init__.py +0 -0
  27. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/auto_contrast.py +0 -0
  28. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/axis_flip.py +0 -0
  29. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/bias_field_correction.py +0 -0
  30. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/binary_threshold.py +0 -0
  31. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/blur.py +0 -0
  32. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/brightness.py +0 -0
  33. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/canny.py +0 -0
  34. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/clahe.py +0 -0
  35. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/common.py +0 -0
  36. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/contrast.py +0 -0
  37. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/curvature_flow_filter.py +0 -0
  38. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/denoise.py +0 -0
  39. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/histogram_equalization.py +0 -0
  40. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/invert.py +0 -0
  41. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/laplacian.py +0 -0
  42. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/median_blur.py +0 -0
  43. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/noise.py +0 -0
  44. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/normalize.py +0 -0
  45. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/random_erase.py +0 -0
  46. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/resize.py +0 -0
  47. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/rotation.py +0 -0
  48. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/salt_pepper.py +0 -0
  49. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/sharpening.py +0 -0
  50. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/shearing.py +0 -0
  51. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/threshold.py +0 -0
  52. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/translation.py +0 -0
  53. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image/zoom.py +0 -0
  54. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image_augmentation.py +0 -0
  55. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/image_preprocess.py +0 -0
  56. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/prosthesis_detection.py +0 -0
  57. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/data_processing/technique.py +0 -0
  58. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/dataset/__init__.py +0 -0
  59. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/dataset/dataset.py +0 -0
  60. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/dataset/dataset_loader.py +0 -0
  61. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/dataset/grouping_strategy.py +0 -0
  62. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/dataset/image_loader.py +0 -0
  63. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/dataset/xy_tuple.py +0 -0
  64. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/metric_dictionnary.py +0 -0
  65. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/metric_utils.py +0 -0
  66. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/mlflow_utils.py +0 -0
  67. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/abstract_model.py +0 -0
  68. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/all.py +0 -0
  69. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/base_keras.py +0 -0
  70. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras/all.py +0 -0
  71. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras/convnext.py +0 -0
  72. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras/densenet.py +0 -0
  73. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras/efficientnet.py +0 -0
  74. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras/mobilenet.py +0 -0
  75. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras/resnet.py +0 -0
  76. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras/squeezenet.py +0 -0
  77. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras/vgg.py +0 -0
  78. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras/xception.py +0 -0
  79. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras_utils/callbacks/__init__.py +0 -0
  80. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras_utils/callbacks/colored_progress_bar.py +0 -0
  81. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras_utils/callbacks/learning_rate_finder.py +0 -0
  82. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras_utils/callbacks/model_checkpoint_v2.py +0 -0
  83. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras_utils/callbacks/progressive_unfreezing.py +0 -0
  84. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras_utils/callbacks/warmup_scheduler.py +0 -0
  85. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras_utils/losses/__init__.py +0 -0
  86. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras_utils/losses/next_generation_loss.py +0 -0
  87. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/keras_utils/visualizations.py +0 -0
  88. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/models/sandbox.py +0 -0
  89. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/range_tuple.py +0 -0
  90. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/scripts/augment_dataset.py +0 -0
  91. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/scripts/exhaustive_process.py +0 -0
  92. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/scripts/preprocess_dataset.py +0 -0
  93. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/scripts/routine.py +0 -0
  94. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/data_science/utils.py +0 -0
  95. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/decorators.py +0 -0
  96. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/dont_look/zip_file_override.py +0 -0
  97. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/image.py +0 -0
  98. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/installer/__init__.py +0 -0
  99. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/installer/common.py +0 -0
  100. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/installer/downloader.py +0 -0
  101. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/installer/linux.py +0 -0
  102. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/installer/main.py +0 -0
  103. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/installer/windows.py +0 -0
  104. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/io.py +0 -0
  105. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/parallel.py +0 -0
  106. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/print.py +0 -0
  107. {stouputils-1.3.10 → stouputils-1.3.12}/stouputils/py.typed +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: stouputils
3
- Version: 1.3.10
3
+ Version: 1.3.12
4
4
  Summary: Stouputils is a collection of utility modules designed to simplify and enhance the development process. It includes a range of tools for tasks such as execution of doctests, display utilities, decorators, as well as context managers, and many more.
5
5
  Project-URL: Homepage, https://github.com/Stoupy51/stouputils
6
6
  Project-URL: Issues, https://github.com/Stoupy51/stouputils/issues
@@ -5,7 +5,7 @@ build-backend = "hatchling.build"
5
5
 
6
6
  [project]
7
7
  name = "stouputils"
8
- version = "1.3.10"
8
+ version = "1.3.12"
9
9
  description = "Stouputils is a collection of utility modules designed to simplify and enhance the development process. It includes a range of tools for tasks such as execution of doctests, display utilities, decorators, as well as context managers, and many more."
10
10
  readme = "README.md"
11
11
  requires-python = ">=3.10"
@@ -247,7 +247,7 @@ def generate_changelog(
247
247
  ) -> str:
248
248
  """ Generate changelog from commits. They must follow the conventional commits convention.
249
249
 
250
- Convention format: <type>: <description>
250
+ Convention format: <type>: <description> or <type>(<sub-category>): <description>
251
251
 
252
252
  Args:
253
253
  commits (list[dict]): List of commits to generate changelog from
@@ -261,7 +261,7 @@ def generate_changelog(
261
261
  https://www.conventionalcommits.org/en/v1.0.0/
262
262
  """
263
263
  # Initialize the commit groups
264
- commit_groups: dict[str, list[tuple[str, str]]] = {}
264
+ commit_groups: dict[str, list[tuple[str, str, str | None]]] = {}
265
265
 
266
266
  # Iterate over the commits
267
267
  for commit in commits:
@@ -270,17 +270,27 @@ def generate_changelog(
270
270
 
271
271
  # If the message contains a colon, split the message into a type and a description
272
272
  if ":" in message:
273
- commit_type, desc = message.split(":", 1)
274
-
275
- # Clean the type, ex: 'feat(hand)/refactor(feet)' -> 'feat'
276
- commit_type = commit_type.split('(')[0].split('/')[0]
273
+ commit_type_part, desc = message.split(":", 1)
274
+
275
+ # Extract sub-category if present (e.g., 'feat(Project)' -> 'feat', 'Project')
276
+ sub_category: str|None = None
277
+ if "(" in commit_type_part and ")" in commit_type_part:
278
+ # Extract the base type (before parentheses)
279
+ commit_type: str = commit_type_part.split('(')[0].split('/')[0]
280
+ # Extract the sub-category (between parentheses)
281
+ sub_category = commit_type_part.split('(')[1].split(')')[0]
282
+ else:
283
+ # No sub-category, just clean the type
284
+ commit_type: str = commit_type_part.split('/')[0]
285
+
286
+ # Clean the type to only keep letters
277
287
  commit_type = "".join(c for c in commit_type.lower().strip() if c in "abcdefghijklmnopqrstuvwxyz")
278
288
  commit_type = COMMIT_TYPES.get(commit_type, commit_type.title())
279
289
 
280
290
  # Add the commit to the commit groups
281
291
  if commit_type not in commit_groups:
282
292
  commit_groups[commit_type] = []
283
- commit_groups[commit_type].append((desc.strip(), sha))
293
+ commit_groups[commit_type].append((desc.strip(), sha, sub_category))
284
294
 
285
295
  # Initialize the changelog
286
296
  changelog: str = "## Changelog\n\n"
@@ -289,9 +299,32 @@ def generate_changelog(
289
299
  for commit_type in sorted(commit_groups.keys()):
290
300
  changelog += f"### {commit_type}\n"
291
301
 
292
- # Reverse the list to display the most recent commits in last
293
- for desc, sha in commit_groups[commit_type][::-1]:
294
- changelog += f"- {desc} ([{sha[:7]}](https://github.com/{owner}/{project_name}/commit/{sha}))\n"
302
+ # Group commits by sub-category
303
+ sub_category_groups: dict[str|None, list[tuple[str, str, str|None]]] = {}
304
+ for desc, sha, sub_category in commit_groups[commit_type]:
305
+ if sub_category not in sub_category_groups:
306
+ sub_category_groups[sub_category] = []
307
+ sub_category_groups[sub_category].append((desc, sha, sub_category))
308
+
309
+ # Sort sub-categories (None comes first, then alphabetical)
310
+ sorted_sub_categories = sorted(
311
+ sub_category_groups.keys(),
312
+ key=lambda x: (x is None, x or "")
313
+ )
314
+
315
+ # Iterate over sub-categories
316
+ for sub_category in sorted_sub_categories:
317
+
318
+ # Add commits for this sub-category
319
+ for desc, sha, _ in reversed(sub_category_groups[sub_category]):
320
+
321
+ # Prepend sub-category to description if present
322
+ if sub_category:
323
+ formatted_desc = f"[{sub_category.replace('_', ' ').title()}] {desc}"
324
+ else:
325
+ formatted_desc = desc
326
+ changelog += f"- {formatted_desc} ([{sha[:7]}](https://github.com/{owner}/{project_name}/commit/{sha}))\n"
327
+
295
328
  changelog += "\n"
296
329
 
297
330
  # Add the full changelog link if there is a latest tag and return the changelog
@@ -1,11 +1,53 @@
1
1
  """ Base implementation for machine learning models with common functionality.
2
2
  Provides shared infrastructure for model training, evaluation, and MLflow integration.
3
3
 
4
- Implements core workflow methods:
4
+ Implements comprehensive workflow methods and features:
5
5
 
6
+ Core Training & Evaluation:
6
7
  - Full training/evaluation pipeline (routine_full)
7
- - Transfer learning weight management
8
- - MLflow experiment tracking integration
8
+ - K-fold cross-validation with stratified splitting
9
+ - Transfer learning weight management (ImageNet, custom datasets)
10
+ - Model prediction and evaluation with comprehensive metrics
11
+
12
+ Hyperparameter Optimization:
13
+ - Learning Rate Finder with automatic best LR detection
14
+ - Unfreeze Percentage Finder for fine-tuning optimization
15
+ - Class weight balancing for imbalanced datasets
16
+ - Learning rate warmup and scheduling (ReduceLROnPlateau)
17
+
18
+ Advanced Training Features:
19
+ - Early stopping with configurable patience
20
+ - Model checkpointing with delay options
21
+ - Additional training data integration (bypasses CV splitting)
22
+ - Multi-processing support for memory management
23
+ - Automatic retry mechanisms with error handling
24
+
25
+ MLflow Integration:
26
+ - Complete experiment tracking and logging
27
+ - Parameter logging (training, optimizer, callback parameters)
28
+ - Metric logging with averages and standard deviations
29
+ - Model artifact saving and versioning
30
+ - Training history visualization and plotting
31
+
32
+ Model Architecture Support:
33
+ - Keras/TensorFlow and PyTorch compatibility
34
+ - Automatic layer counting and fine-tuning
35
+ - Configurable unfreeze percentages for transfer learning
36
+ - Memory leak prevention with subprocess training
37
+
38
+ Evaluation & Visualization:
39
+ - ROC and PR curve generation
40
+ - Comprehensive metric calculation (Sensitivity, Specificity, AUC, etc.)
41
+ - Training history plotting and analysis
42
+ - Saliency maps and GradCAM visualization (single sample)
43
+ - Cross-validation results aggregation
44
+
45
+ Configuration & Utilities:
46
+ - Extensive parameter override system
47
+ - Verbosity control throughout pipeline
48
+ - Temporary directory management for artifacts
49
+ - Garbage collection and memory optimization
50
+ - Error logging and handling with retry mechanisms
9
51
  """
10
52
  # pyright: reportUnknownMemberType=false
11
53
  # pyright: reportUnknownArgumentType=false
@@ -16,6 +58,7 @@ from __future__ import annotations
16
58
  import gc
17
59
  import multiprocessing
18
60
  import multiprocessing.queues
61
+ import time
19
62
  from collections.abc import Generator, Iterable
20
63
  from tempfile import TemporaryDirectory
21
64
  from typing import Any
@@ -27,7 +70,7 @@ from numpy.typing import NDArray
27
70
  from sklearn.utils import class_weight
28
71
 
29
72
  from ...decorators import handle_error, measure_time
30
- from ...print import progress, debug, info
73
+ from ...print import progress, debug, info, warning
31
74
  from ...ctx import Muffle, MeasureTime
32
75
  from ...io import clean_path
33
76
 
@@ -741,17 +784,25 @@ class ModelInterface(AbstractModel):
741
784
  Returns:
742
785
  float: The best learning rate found.
743
786
  """
744
- if DataScienceConfig.DO_FIT_IN_SUBPROCESS:
745
- queue: multiprocessing.queues.Queue[dict[str, Any]] = multiprocessing.Queue()
746
- process: multiprocessing.Process = multiprocessing.Process(
747
- target=self._find_best_learning_rate_subprocess,
748
- kwargs={"dataset": dataset, "queue": queue, "verbose": verbose}
749
- )
750
- process.start()
751
- process.join()
752
- results: dict[str, Any] = queue.get(timeout=60)
753
- else:
754
- results: dict[str, Any] = self._find_best_learning_rate_subprocess(dataset, verbose=verbose)
787
+ results: dict[str, Any] = {}
788
+ for try_count in range(10):
789
+ try:
790
+ if DataScienceConfig.DO_FIT_IN_SUBPROCESS:
791
+ queue: multiprocessing.queues.Queue[dict[str, Any]] = multiprocessing.Queue()
792
+ process: multiprocessing.Process = multiprocessing.Process(
793
+ target=self._find_best_learning_rate_subprocess,
794
+ kwargs={"dataset": dataset, "queue": queue, "verbose": verbose}
795
+ )
796
+ process.start()
797
+ process.join()
798
+ results = queue.get(timeout=60)
799
+ else:
800
+ results = self._find_best_learning_rate_subprocess(dataset, verbose=verbose)
801
+ if results:
802
+ break
803
+ except Exception as e:
804
+ warning(f"Error finding best learning rate: {e}\nRetrying in 60 seconds ({try_count + 1}/10)...")
805
+ time.sleep(60)
755
806
 
756
807
  # Plot the learning rate vs loss and find the best learning rate
757
808
  return MetricUtils.find_best_x_and_plot(
@@ -777,17 +828,25 @@ class ModelInterface(AbstractModel):
777
828
  Returns:
778
829
  float: The best unfreeze percentage found.
779
830
  """
780
- if DataScienceConfig.DO_FIT_IN_SUBPROCESS:
781
- queue: multiprocessing.queues.Queue[dict[str, Any]] = multiprocessing.Queue()
782
- process: multiprocessing.Process = multiprocessing.Process(
783
- target=self._find_best_unfreeze_percentage_subprocess,
784
- kwargs={"dataset": dataset, "queue": queue, "verbose": verbose}
785
- )
786
- process.start()
787
- process.join()
788
- results: dict[str, Any] = queue.get(timeout=60)
789
- else:
790
- results: dict[str, Any] = self._find_best_unfreeze_percentage_subprocess(dataset, verbose=verbose)
831
+ results: dict[str, Any] = {}
832
+ for try_count in range(10):
833
+ try:
834
+ if DataScienceConfig.DO_FIT_IN_SUBPROCESS:
835
+ queue: multiprocessing.queues.Queue[dict[str, Any]] = multiprocessing.Queue()
836
+ process: multiprocessing.Process = multiprocessing.Process(
837
+ target=self._find_best_unfreeze_percentage_subprocess,
838
+ kwargs={"dataset": dataset, "queue": queue, "verbose": verbose}
839
+ )
840
+ process.start()
841
+ process.join()
842
+ results = queue.get(timeout=60)
843
+ else:
844
+ results = self._find_best_unfreeze_percentage_subprocess(dataset, verbose=verbose)
845
+ if results:
846
+ break
847
+ except Exception as e:
848
+ warning(f"Error finding best unfreeze percentage: {e}\nRetrying in 60 seconds ({try_count + 1}/10)...")
849
+ time.sleep(60)
791
850
 
792
851
  # Plot the unfreeze percentage vs loss and find the best unfreeze percentage
793
852
  return MetricUtils.find_best_x_and_plot(
@@ -823,20 +882,26 @@ class ModelInterface(AbstractModel):
823
882
  temp_dir = TemporaryDirectory()
824
883
 
825
884
  # Create and run the process
826
- if DataScienceConfig.DO_FIT_IN_SUBPROCESS and fold_number > 0:
827
- queue: multiprocessing.queues.Queue[dict[str, Any]] = multiprocessing.Queue()
828
- process: multiprocessing.Process = multiprocessing.Process(
829
- target=self._train_subprocess,
830
- args=(dataset, checkpoint_path, temp_dir),
831
- kwargs={"queue": queue, "verbose": verbose}
832
- )
833
- process.start()
834
- process.join()
835
- return_values: dict[str, Any] = queue.get(timeout=60)
836
- else:
837
- return_values: dict[str, Any] = self._train_subprocess(
838
- dataset, checkpoint_path, temp_dir, verbose=verbose
839
- )
885
+ return_values: dict[str, Any] = {}
886
+ for try_count in range(10):
887
+ try:
888
+ if DataScienceConfig.DO_FIT_IN_SUBPROCESS and fold_number > 0:
889
+ queue: multiprocessing.queues.Queue[dict[str, Any]] = multiprocessing.Queue()
890
+ process: multiprocessing.Process = multiprocessing.Process(
891
+ target=self._train_subprocess,
892
+ args=(dataset, checkpoint_path, temp_dir),
893
+ kwargs={"queue": queue, "verbose": verbose}
894
+ )
895
+ process.start()
896
+ process.join()
897
+ return_values = queue.get(timeout=60)
898
+ else:
899
+ return_values = self._train_subprocess(dataset, checkpoint_path, temp_dir, verbose=verbose)
900
+ if return_values:
901
+ break
902
+ except Exception as e:
903
+ warning(f"Error during _train_fold: {e}\nRetrying in 60 seconds ({try_count + 1}/10)...")
904
+ time.sleep(60)
840
905
  history: dict[str, Any] = return_values["history"]
841
906
  eval_results: dict[str, Any] = return_values["eval_results"]
842
907
  predictions: NDArray[Any] = return_values["predictions"]
File without changes
File without changes
File without changes