balancr 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
balancr/cli/commands.py CHANGED
@@ -1535,7 +1535,7 @@ def run_comparison(args):
1535
1535
  )
1536
1536
 
1537
1537
  # Process each classifier and save its results in a separate directory
1538
- standard_start_time = time.time()
1538
+ orig_start_time = time.time()
1539
1539
  for classifier_name in current_config.get("classifiers", {}):
1540
1540
  logging.info(f"Processing results for classifier: {classifier_name}")
1541
1541
 
@@ -1543,18 +1543,18 @@ def run_comparison(args):
1543
1543
  classifier_dir = output_dir / classifier_name
1544
1544
  classifier_dir.mkdir(exist_ok=True)
1545
1545
 
1546
- # Create standard metrics directory
1547
- std_metrics_dir = classifier_dir / "standard_metrics"
1548
- std_metrics_dir.mkdir(exist_ok=True)
1546
+ # Create original dataset metrics directory
1547
+ orig_metrics_dir = classifier_dir / "metrics_on_original_test"
1548
+ orig_metrics_dir.mkdir(exist_ok=True)
1549
1549
 
1550
- # Save standard metrics in requested formats
1550
+ # Save original dataset metrics in requested formats
1551
1551
  for format_type in save_metrics_formats:
1552
1552
  if format_type == "none":
1553
1553
  continue
1554
1554
 
1555
- results_file = std_metrics_dir / f"comparison_results.{format_type}"
1555
+ results_file = orig_metrics_dir / f"comparison_results.{format_type}"
1556
1556
  logging.info(
1557
- f"Saving standard metrics for {classifier_name} to {results_file}"
1557
+ f"Saving metrics from testing against original test data for {classifier_name} to {results_file}"
1558
1558
  )
1559
1559
 
1560
1560
  # We need a modified save_results method that can extract a specific classifier's results
@@ -1565,15 +1565,16 @@ def run_comparison(args):
1565
1565
  file_type=format_type,
1566
1566
  )
1567
1567
 
1568
- # Generate and save standard metrics visualisations
1568
+ # Generate and save original test data metrics visualisations
1569
1569
  for format_type in save_vis_formats:
1570
1570
  if format_type == "none":
1571
1571
  continue
1572
1572
 
1573
1573
  if "metrics" in vis_types_to_generate or "all" in visualisations:
1574
- metrics_path = std_metrics_dir / f"metrics_comparison.{format_type}"
1574
+ metrics_path = orig_metrics_dir / f"metrics_comparison.{format_type}"
1575
1575
  logging.info(
1576
- f"Generating metrics comparison for {classifier_name} in {format_type} format..."
1576
+ f"Generating metrics comparison for {classifier_name}, against original test data,"
1577
+ f"in {format_type} format..."
1577
1578
  )
1578
1579
 
1579
1580
  metrics_to_plot = current_config.get("output", {}).get(
@@ -1591,7 +1592,7 @@ def run_comparison(args):
1591
1592
 
1592
1593
  if "radar" in vis_types_to_generate or "all" in visualisations:
1593
1594
  std_radar_path = (
1594
- classifier_dir / f"standard_metrics_radar.{format_type}"
1595
+ classifier_dir / f"metrics_on_original_test_radar.{format_type}"
1595
1596
  )
1596
1597
  plot_radar_chart(
1597
1598
  results,
@@ -1603,7 +1604,7 @@ def run_comparison(args):
1603
1604
  )
1604
1605
 
1605
1606
  if "3d" in vis_types_to_generate or "all" in visualisations:
1606
- std_3d_path = output_dir / "standard_metrics_3d.html"
1607
+ std_3d_path = output_dir / "metrics_on_original_test_3d.html"
1607
1608
  plot_3d_scatter(
1608
1609
  results,
1609
1610
  metric_type="standard_metrics",
@@ -1612,9 +1613,47 @@ def run_comparison(args):
1612
1613
  display=display_visualisations,
1613
1614
  )
1614
1615
 
1615
- standard_total_time = time.time() - standard_start_time
1616
+ if (
1617
+ "learning_curves" in vis_types_to_generate
1618
+ or "all" in visualisations
1619
+ ):
1620
+ orig_learning_curve_path = (
1621
+ orig_metrics_dir / f"learning_curves.{format_type}"
1622
+ )
1623
+
1624
+ start_time = time.time()
1625
+ logging.info(
1626
+ f"Generating learning curves for {classifier_name}, against original test data "
1627
+ f"in {format_type} format..."
1628
+ )
1629
+
1630
+ # Get learning curve parameters from config
1631
+ learning_curve_points = eval_config.get(
1632
+ "learning_curve_points", 10
1633
+ )
1634
+ learning_curve_folds = eval_config.get(
1635
+ "learning_curve_folds", 5
1636
+ )
1637
+ train_sizes = np.linspace(0.1, 1.0, learning_curve_points)
1638
+
1639
+ learning_curve_type = "Original Dataset"
1640
+ framework.generate_learning_curves(
1641
+ classifier_name=classifier_name,
1642
+ learning_curve_type=learning_curve_type,
1643
+ train_sizes=train_sizes,
1644
+ n_folds=learning_curve_folds,
1645
+ save_path=str(orig_learning_curve_path),
1646
+ display=display_visualisations,
1647
+ )
1648
+ cv_learning_curves_time = time.time() - start_time
1649
+ logging.info(
1650
+ f"Successfully generated cv learning curves for {classifier_name}, against original test data "
1651
+ f"(Time Taken: {format_time(cv_learning_curves_time)})"
1652
+ )
1653
+
1654
+ orig_total_time = time.time() - orig_start_time
1616
1655
  logging.info(
1617
- f"Standard metrics evaluation total time: {format_time(standard_total_time)}"
1656
+ f"Metrics evaluation against original test data total time: {format_time(orig_total_time)}"
1618
1657
  )
1619
1658
 
1620
1659
  # If cross-validation is enabled, create CV metrics directory and save results
@@ -1625,7 +1664,7 @@ def run_comparison(args):
1625
1664
  classifier_dir = output_dir / classifier_name
1626
1665
  classifier_dir.mkdir(exist_ok=True)
1627
1666
 
1628
- cv_metrics_dir = classifier_dir / "cv_metrics"
1667
+ cv_metrics_dir = classifier_dir / "metrics_on_balanced_cv"
1629
1668
  cv_metrics_dir.mkdir(exist_ok=True)
1630
1669
 
1631
1670
  # Save CV metrics in requested formats
@@ -1717,8 +1756,10 @@ def run_comparison(args):
1717
1756
  )
1718
1757
  train_sizes = np.linspace(0.1, 1.0, learning_curve_points)
1719
1758
 
1759
+ learning_curve_type = "Balanced Datasets"
1720
1760
  framework.generate_learning_curves(
1721
1761
  classifier_name=classifier_name,
1762
+ learning_curve_type=learning_curve_type,
1722
1763
  train_sizes=train_sizes,
1723
1764
  n_folds=learning_curve_folds,
1724
1765
  save_path=str(cv_learning_curve_path),
@@ -1739,13 +1780,13 @@ def run_comparison(args):
1739
1780
 
1740
1781
  # Print summary of timing results
1741
1782
  print("\nExecution Time Summary:\n")
1742
- print(f" Data Loading: {format_time(load_time)}")
1743
- print(f" Balancing: {format_time(balancing_time)}")
1744
- print(f" Training Classifiers: {format_time(training_time)}")
1745
- print(f" Standard Metrics Evaluation: {format_time(standard_total_time)}")
1783
+ print(f" Data Loading: {format_time(load_time)}")
1784
+ print(f" Balancing: {format_time(balancing_time)}")
1785
+ print(f" Training Classifiers: {format_time(training_time)}")
1786
+ print(f" Metrics From Testing Against Original Test Dataset: {format_time(orig_total_time)}")
1746
1787
  if cv_enabled:
1747
- print(f" CV Metrics Evaluation: {format_time(cv_total_time)}")
1748
- print(f" Total Time: {format_time(total_time)}")
1788
+ print(f" CV Metrics From Testing Against Balanced Test Datasets: {format_time(cv_total_time)}")
1789
+ print(f" Total Time: {format_time(total_time)}")
1749
1790
 
1750
1791
  print("\nResults Summary:")
1751
1792
 
@@ -1758,7 +1799,7 @@ def run_comparison(args):
1758
1799
  )
1759
1800
 
1760
1801
  if has_standard_metrics:
1761
- print("\nStandard Metrics:")
1802
+ print("\nMetrics From Testing Against Original Test Dataset:")
1762
1803
  for classifier_name, classifier_results in results.items():
1763
1804
  print(f"\n{classifier_name}:")
1764
1805
  for technique_name, technique_metrics in classifier_results.items():
@@ -1783,7 +1824,7 @@ def run_comparison(args):
1783
1824
  )
1784
1825
 
1785
1826
  if has_cv_metrics:
1786
- print("\nCross Validation Metrics:")
1827
+ print("\nCross Validation Metrics From Testing Against Balanced Test Datasets:")
1787
1828
  for classifier_name, classifier_results in results.items():
1788
1829
  print(f"\n{classifier_name}:")
1789
1830
  for technique_name, technique_metrics in classifier_results.items():
@@ -7,6 +7,7 @@ from .metrics import (
7
7
  get_cv_scores,
8
8
  get_learning_curve_data,
9
9
  get_learning_curve_data_multiple_techniques,
10
+ get_learning_curve_data_against_imbalanced_multiple_techniques
10
11
  )
11
12
 
12
13
  from .visualisation import (
@@ -4,6 +4,8 @@ import logging
4
4
  import time
5
5
  from typing import Dict
6
6
  import numpy as np
7
+ import pandas as pd
8
+ from sklearn.base import clone
7
9
  from sklearn.metrics import (
8
10
  accuracy_score,
9
11
  precision_score,
@@ -13,7 +15,7 @@ from sklearn.metrics import (
13
15
  average_precision_score,
14
16
  confusion_matrix,
15
17
  )
16
- from sklearn.model_selection import learning_curve
18
+ from sklearn.model_selection import StratifiedKFold, learning_curve
17
19
 
18
20
 
19
21
  def format_time(seconds):
@@ -415,8 +417,8 @@ def get_learning_curve_data_multiple_techniques(
415
417
 
416
418
  start_time = time.time()
417
419
  logging.info(
418
- f"Generating learning curve for {classifier_name} trained on data"
419
- f"balanced by {technique_name}..."
420
+ f"Generating learning curve for {classifier_name} trained on data "
421
+ f"balanced by {technique_name}, against balanced data..."
420
422
  )
421
423
  train_sizes_abs, train_scores, val_scores = learning_curve(
422
424
  estimator=classifier,
@@ -429,8 +431,9 @@ def get_learning_curve_data_multiple_techniques(
429
431
  )
430
432
  curve_generating_time = time.time() - start_time
431
433
  logging.info(
432
- f"Generated learning curve for {classifier_name} trained on data"
433
- f"balanced by {technique_name} (Time Taken: {format_time(curve_generating_time)})"
434
+ f"Generated learning curve for {classifier_name} trained on data "
435
+ f"balanced by {technique_name}, against balanced data"
436
+ f"(Time Taken: {format_time(curve_generating_time)})"
434
437
  )
435
438
 
436
439
  learning_curve_data[technique_name] = {
@@ -440,3 +443,82 @@ def get_learning_curve_data_multiple_techniques(
440
443
  }
441
444
 
442
445
  return learning_curve_data
446
+
447
+
448
+ def get_learning_curve_data_against_imbalanced_multiple_techniques(
449
+ classifier_name: str,
450
+ classifier,
451
+ techniques_data: Dict[str, Dict[str, np.ndarray]],
452
+ X_test,
453
+ y_test,
454
+ train_sizes: np.ndarray = np.linspace(0.1, 1.0, 10),
455
+ n_folds: int = 5,
456
+ ) -> Dict[str, Dict[str, np.ndarray]]:
457
+ """
458
+ Custom learning curve function that trains on balanced data
459
+ and evaluates on original imbalanced test set.
460
+
461
+ Returns training and validation scores at each learning curve point.
462
+ Validation scores are computed on X_test / y_test.
463
+ """
464
+
465
+ def safe_index(X, indices):
466
+ return X.iloc[indices] if isinstance(X, (pd.DataFrame, pd.Series)) else X[indices]
467
+
468
+ learning_curve_data = {}
469
+
470
+ for technique_name, data in techniques_data.items():
471
+ X_balanced = data["X_balanced"]
472
+ y_balanced = data["y_balanced"]
473
+
474
+ n_samples = X_balanced.shape[0]
475
+ train_sizes_abs = (train_sizes * n_samples).astype(int)
476
+
477
+ train_scores = []
478
+ val_scores = []
479
+
480
+ start_time = time.time()
481
+ logging.info(
482
+ f"Generating learning curve for {classifier_name} trained on data "
483
+ f"balanced by {technique_name}, against original test data..."
484
+ )
485
+
486
+ for train_size in train_sizes_abs:
487
+ X_subset = safe_index(X_balanced, np.arange(train_size))
488
+ y_subset = safe_index(y_balanced, np.arange(train_size))
489
+
490
+ fold_train_scores = []
491
+ fold_val_scores = []
492
+
493
+ kf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=42)
494
+
495
+ for train_idx, _ in kf.split(X_subset, y_subset):
496
+ X_fold_train = safe_index(X_subset, train_idx)
497
+ y_fold_train = safe_index(y_subset, train_idx)
498
+
499
+ clf = clone(classifier)
500
+ clf.fit(X_fold_train, y_fold_train)
501
+
502
+ train_acc = clf.score(X_fold_train, y_fold_train)
503
+ val_acc = clf.score(X_test, y_test)
504
+
505
+ fold_train_scores.append(train_acc)
506
+ fold_val_scores.append(val_acc)
507
+
508
+ train_scores.append(fold_train_scores)
509
+ val_scores.append(fold_val_scores)
510
+
511
+ curve_generating_time = time.time() - start_time
512
+ logging.info(
513
+ f"Generated learning curve for {classifier_name} trained on data "
514
+ f"balanced by {technique_name}, against original test data "
515
+ f"(Time Taken: {format_time(curve_generating_time)})"
516
+ )
517
+
518
+ learning_curve_data[technique_name] = {
519
+ "train_sizes": train_sizes_abs,
520
+ "train_scores": np.array(train_scores),
521
+ "val_scores": np.array(val_scores),
522
+ }
523
+
524
+ return learning_curve_data
@@ -290,6 +290,11 @@ def plot_learning_curves(
290
290
  if num_techniques == 1:
291
291
  axes = [axes]
292
292
 
293
+ fig.suptitle(
294
+ title,
295
+ size=16,
296
+ )
297
+
293
298
  for idx, (technique_name, data) in enumerate(learning_curve_data.items()):
294
299
  # Extract the train_sizes, train_scores, and val_scores from the dictionary
295
300
  train_sizes = data["train_sizes"]
@@ -317,7 +322,7 @@ def plot_learning_curves(
317
322
  train_sizes, val_mean - val_std, val_mean + val_std, alpha=0.1, color="red"
318
323
  )
319
324
 
320
- ax.set_title(f"{technique_name} - Learning Curves")
325
+ ax.set_title(f"{technique_name}")
321
326
  ax.set_xlabel("Training Examples")
322
327
  ax.set_ylabel("Score")
323
328
  ax.legend(loc="best")
@@ -15,6 +15,7 @@ from .evaluation import (
15
15
  get_metrics,
16
16
  get_cv_scores,
17
17
  get_learning_curve_data_multiple_techniques,
18
+ get_learning_curve_data_against_imbalanced_multiple_techniques,
18
19
  )
19
20
  from .evaluation import (
20
21
  plot_class_distribution,
@@ -558,6 +559,7 @@ class BalancingFramework:
558
559
  def generate_learning_curves(
559
560
  self,
560
561
  classifier_name: str,
562
+ learning_curve_type: str,
561
563
  train_sizes: np.ndarray = np.linspace(0.1, 1.0, 10),
562
564
  n_folds: int = 5,
563
565
  save_path: Optional[str] = None,
@@ -598,19 +600,39 @@ class BalancingFramework:
598
600
  # Create classifier instance with the same parameters used in training
599
601
  classifier = clf_class(**clf_params)
600
602
 
601
- learning_curve_data = get_learning_curve_data_multiple_techniques(
602
- classifier_name=classifier_name,
603
- classifier=classifier,
604
- techniques_data=self.current_balanced_datasets,
605
- train_sizes=train_sizes,
606
- n_folds=n_folds,
607
- )
603
+ if learning_curve_type == "Balanced Datasets":
604
+ learning_curve_data = get_learning_curve_data_multiple_techniques(
605
+ classifier_name=classifier_name,
606
+ classifier=classifier,
607
+ techniques_data=self.current_balanced_datasets,
608
+ train_sizes=train_sizes,
609
+ n_folds=n_folds,
610
+ )
608
611
 
609
- title = f"{classifier_name} - Learning Curves"
612
+ title = f"Learning Curves for {classifier_name} Evaluated Against {learning_curve_type}"
610
613
 
611
- plot_learning_curves(
612
- learning_curve_data, title=title, save_path=save_path, display=display
613
- )
614
+ plot_learning_curves(
615
+ learning_curve_data,
616
+ title=title,
617
+ save_path=save_path,
618
+ display=display
619
+ )
620
+ elif learning_curve_type == "Original Dataset":
621
+ learning_curve_data = get_learning_curve_data_against_imbalanced_multiple_techniques(
622
+ classifier_name=classifier_name,
623
+ classifier=classifier,
624
+ techniques_data=self.current_balanced_datasets,
625
+ X_test=self.X_test,
626
+ y_test=self.y_test,
627
+ train_sizes=train_sizes,
628
+ n_folds=n_folds,
629
+ )
630
+
631
+ title = f"Learning Curves for {classifier_name} Evaluated Against {learning_curve_type}"
632
+
633
+ plot_learning_curves(
634
+ learning_curve_data, title=title, save_path=save_path, display=display
635
+ )
614
636
 
615
637
  except Exception as e:
616
638
  logging.warning(
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: balancr
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: A unified framework for analysing and comparing techniques for handling imbalanced datasets
5
5
  Home-page: https://github.com/Ruaskill/balancr
6
6
  Author: Conor Doherty
@@ -38,6 +38,20 @@ Provides-Extra: dev
38
38
  Requires-Dist: pytest>=6.0.0; extra == "dev"
39
39
  Requires-Dist: black>=21.0.0; extra == "dev"
40
40
  Requires-Dist: flake8>=3.9.0; extra == "dev"
41
+ Dynamic: author
42
+ Dynamic: author-email
43
+ Dynamic: classifier
44
+ Dynamic: description
45
+ Dynamic: description-content-type
46
+ Dynamic: home-page
47
+ Dynamic: keywords
48
+ Dynamic: license
49
+ Dynamic: license-file
50
+ Dynamic: project-url
51
+ Dynamic: provides-extra
52
+ Dynamic: requires-dist
53
+ Dynamic: requires-python
54
+ Dynamic: summary
41
55
 
42
56
  # Balancr: A Unified Framework for Analysing Data Balancing Techniques
43
57
 
@@ -1,25 +1,25 @@
1
1
  balancr/__init__.py,sha256=L1Ko0WvrjehAhjuQJxtEJLQkva8zzW1BS6SalbZJneY,254
2
2
  balancr/base.py,sha256=3bALV9IdBm2cng_pKZ91zR3W9OktnD0OYpaZnCDHb0o,355
3
3
  balancr/classifier_registry.py,sha256=FljlaB4XqZeXqFme1zTEFgGQDzfFkZxUOBFdtrO7Xow,12074
4
- balancr/imbalance_analyser.py,sha256=sPVA0NuOXRffsAwHX9jVWAskBmosYQftZjdLahjqHu8,25671
4
+ balancr/imbalance_analyser.py,sha256=MNwXbIkyqdA1QEDZYr7ccFa-wTO25qZbC6cqsB6GvDs,26738
5
5
  balancr/technique_registry.py,sha256=mr-mAeZ4mJi8IaetpyYCKAn0A8tTeq3G_dX0RxMYx7U,11501
6
6
  balancr/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- balancr/cli/commands.py,sha256=DXlFIDiMmv9yFS77ivUHTWoawLdIlqfVMQWoEBbh7w8,69419
7
+ balancr/cli/commands.py,sha256=Ez8EJW4dYAyTdnOF9tZULNQH-aFQCWtTu4Dqy8E9QTk,71789
8
8
  balancr/cli/config.py,sha256=YNpIXg90HttoMXXtSByndsXMyeyYhE1E-PDb1iT4Ge0,4292
9
9
  balancr/cli/main.py,sha256=Iiwi6EQ0Dr0DmPtcwb7jDKkn1-kr0wowr82k2Ami03s,26388
10
10
  balancr/cli/utils.py,sha256=kFxSWzjtrqebCiVrzulZVA8-ci0f8lxUXurANVftIZg,3202
11
11
  balancr/data/__init__.py,sha256=QuQ5saA7miRQRhLYoyQMDIac_noMjbqRXw5DcP7xnps,121
12
12
  balancr/data/loader.py,sha256=QlWKB60hQfNEoLPIxS_rEKI5gEPh5Id4TZT3Y42HnDQ,2027
13
13
  balancr/data/preprocessor.py,sha256=UDBU3S3EGqXPeZnMZzCbDfNE8xLrtQ5M_AnP_bk6gvI,22648
14
- balancr/evaluation/__init__.py,sha256=x0_cs1bmzDgxX9AQrAAk6jBSU9lJbpgCnjQImAYEOaA,408
15
- balancr/evaluation/metrics.py,sha256=91pbq_my-QHQ6O061hse3NkQewLrDogGUe2MmPSS5gM,15706
16
- balancr/evaluation/visualisation.py,sha256=hK_e7zto_LDwugUk4XrzPXOhrZo1iadSeeQHrysul-M,22616
14
+ balancr/evaluation/__init__.py,sha256=FbA5Q2HaDCtAndNf5BM3BVQaE3jEmA2LU2MwdxI7OGY,475
15
+ balancr/evaluation/metrics.py,sha256=OgJh1JYlMlcuz_hhz3z43QEXXp1WeMldEwsrmjh8hUg,18553
16
+ balancr/evaluation/visualisation.py,sha256=oND6VvjOqVnhTmgVHGfymju0CP4ixNreLdC2ZT7zrA4,22655
17
17
  balancr/techniques/__init__.py,sha256=w1rHhvc4ZkHyg-ZS-g9ZTk_vlhSv9ANuF4aNMb4Ok_Y,119
18
18
  balancr/techniques/custom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  balancr/techniques/custom/example_custom_technique.py,sha256=TDo6Js5grmy-7ZozxTO4N4Hee3dQGXL2jPY187v0vz4,732
20
- balancr-0.1.0.dist-info/LICENSE,sha256=ImeeOOsQ56Rv9q6dkuScrpyBn6e01e6xgO8qFMJ1fGs,1069
21
- balancr-0.1.0.dist-info/METADATA,sha256=GknbYve4lCOAWeGoXMEJuwB-PZIQR0LJqylH7VhUI2M,16626
22
- balancr-0.1.0.dist-info/WHEEL,sha256=A3WOREP4zgxI0fKrHUG8DC8013e3dK3n7a6HDbcEIwE,91
23
- balancr-0.1.0.dist-info/entry_points.txt,sha256=OVchXsA0rcBF58V3hhC_zbrBm-RWUzRYSjjg2xahLV4,50
24
- balancr-0.1.0.dist-info/top_level.txt,sha256=RcWjXBQWsz9XBKIMgSvC_hU1yjhR-quGQ5jxqJOiUq0,8
25
- balancr-0.1.0.dist-info/RECORD,,
20
+ balancr-0.1.1.dist-info/licenses/LICENSE,sha256=ImeeOOsQ56Rv9q6dkuScrpyBn6e01e6xgO8qFMJ1fGs,1069
21
+ balancr-0.1.1.dist-info/METADATA,sha256=W1oML5Cby0U_5Q-uVEs1VaLx7oqwmljSiKzo-PfrrJY,16925
22
+ balancr-0.1.1.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
23
+ balancr-0.1.1.dist-info/entry_points.txt,sha256=OVchXsA0rcBF58V3hhC_zbrBm-RWUzRYSjjg2xahLV4,50
24
+ balancr-0.1.1.dist-info/top_level.txt,sha256=RcWjXBQWsz9XBKIMgSvC_hU1yjhR-quGQ5jxqJOiUq0,8
25
+ balancr-0.1.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.7.0)
2
+ Generator: setuptools (79.0.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5