validmind 2.8.29__py3-none-any.whl → 2.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. validmind/__version__.py +1 -1
  2. validmind/ai/test_descriptions.py +1 -9
  3. validmind/ai/utils.py +4 -24
  4. validmind/api_client.py +6 -17
  5. validmind/logging.py +48 -0
  6. validmind/tests/__init__.py +2 -0
  7. validmind/tests/__types__.py +18 -0
  8. validmind/tests/output.py +9 -2
  9. validmind/tests/plots/BoxPlot.py +260 -0
  10. validmind/tests/plots/CorrelationHeatmap.py +235 -0
  11. validmind/tests/plots/HistogramPlot.py +233 -0
  12. validmind/tests/plots/ViolinPlot.py +125 -0
  13. validmind/tests/plots/__init__.py +0 -0
  14. validmind/tests/stats/CorrelationAnalysis.py +251 -0
  15. validmind/tests/stats/DescriptiveStats.py +197 -0
  16. validmind/tests/stats/NormalityTests.py +147 -0
  17. validmind/tests/stats/OutlierDetection.py +173 -0
  18. validmind/tests/stats/__init__.py +0 -0
  19. validmind/unit_metrics/classification/individual/AbsoluteError.py +42 -0
  20. validmind/unit_metrics/classification/individual/BrierScore.py +56 -0
  21. validmind/unit_metrics/classification/individual/CalibrationError.py +77 -0
  22. validmind/unit_metrics/classification/individual/ClassBalance.py +65 -0
  23. validmind/unit_metrics/classification/individual/Confidence.py +52 -0
  24. validmind/unit_metrics/classification/individual/Correctness.py +41 -0
  25. validmind/unit_metrics/classification/individual/LogLoss.py +61 -0
  26. validmind/unit_metrics/classification/individual/OutlierScore.py +86 -0
  27. validmind/unit_metrics/classification/individual/ProbabilityError.py +54 -0
  28. validmind/unit_metrics/classification/individual/Uncertainty.py +60 -0
  29. validmind/unit_metrics/classification/individual/__init__.py +0 -0
  30. validmind/vm_models/dataset/dataset.py +147 -1
  31. validmind/vm_models/result/result.py +26 -4
  32. {validmind-2.8.29.dist-info → validmind-2.9.2.dist-info}/METADATA +2 -2
  33. {validmind-2.8.29.dist-info → validmind-2.9.2.dist-info}/RECORD +36 -15
  34. {validmind-2.8.29.dist-info → validmind-2.9.2.dist-info}/LICENSE +0 -0
  35. {validmind-2.8.29.dist-info → validmind-2.9.2.dist-info}/WHEEL +0 -0
  36. {validmind-2.8.29.dist-info → validmind-2.9.2.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,54 @@
1
+ # Copyright © 2023-2024 ValidMind Inc. All rights reserved.
2
+ # See the LICENSE file in the root of this repository for details.
3
+ # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
4
+
5
+ from typing import List
6
+
7
+ import numpy as np
8
+
9
+ from validmind import tags, tasks
10
+ from validmind.vm_models import VMDataset, VMModel
11
+
12
+
13
+ @tasks("classification")
14
+ @tags("classification")
15
+ def ProbabilityError(model: VMModel, dataset: VMDataset, **kwargs) -> List[float]:
16
+ """Calculates the probability error per row for a classification model.
17
+
18
+ For binary classification tasks, this computes the absolute difference between
19
+ the true class labels (0 or 1) and the predicted probabilities for each row.
20
+ This provides insight into how confident the model's predictions are and
21
+ how far off they are from the actual labels.
22
+
23
+ Args:
24
+ model: The classification model to evaluate
25
+ dataset: The dataset containing true labels and predicted probabilities
26
+ **kwargs: Additional parameters (unused for compatibility)
27
+
28
+ Returns:
29
+ List[float]: Per-row probability errors as a list of float values
30
+
31
+ Raises:
32
+ ValueError: If probability column is not found for the model
33
+ """
34
+ y_true = dataset.y
35
+
36
+ # Try to get probabilities, fall back to predictions if not available
37
+ try:
38
+ y_prob = dataset.y_prob(model)
39
+ # For binary classification, use the positive class probability
40
+ if y_prob.ndim > 1 and y_prob.shape[1] > 1:
41
+ y_prob = y_prob[:, 1] # Use probability of positive class
42
+ except ValueError:
43
+ # Fall back to predictions if probabilities not available
44
+ y_prob = dataset.y_pred(model)
45
+
46
+ # Convert to numpy arrays and ensure same data type
47
+ y_true = np.asarray(y_true, dtype=float)
48
+ y_prob = np.asarray(y_prob, dtype=float)
49
+
50
+ # Compute absolute difference between true labels and predicted probabilities
51
+ probability_errors = np.abs(y_true - y_prob)
52
+
53
+ # Return as a list of floats
54
+ return probability_errors.tolist()
@@ -0,0 +1,60 @@
1
+ # Copyright © 2023-2024 ValidMind Inc. All rights reserved.
2
+ # See the LICENSE file in the root of this repository for details.
3
+ # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
4
+
5
+ from typing import List
6
+
7
+ import numpy as np
8
+
9
+ from validmind import tags, tasks
10
+ from validmind.vm_models import VMDataset, VMModel
11
+
12
+
13
+ @tasks("classification")
14
+ @tags("classification")
15
+ def Uncertainty(model: VMModel, dataset: VMDataset, **kwargs) -> List[float]:
16
+ """Calculates the prediction uncertainty per row for a classification model.
17
+
18
+ Uncertainty is measured using the entropy of the predicted probability distribution.
19
+ Higher entropy indicates higher uncertainty in the prediction. For binary
20
+ classification, maximum uncertainty occurs at probability 0.5.
21
+
22
+ Args:
23
+ model: The classification model to evaluate
24
+ dataset: The dataset containing true labels and predicted probabilities
25
+ **kwargs: Additional parameters (unused for compatibility)
26
+
27
+ Returns:
28
+ List[float]: Per-row uncertainty scores as a list of float values
29
+
30
+ Raises:
31
+ ValueError: If probability column is not found for the model
32
+ """
33
+ # Try to get probabilities
34
+ try:
35
+ y_prob = dataset.y_prob(model)
36
+
37
+ if y_prob.ndim > 1 and y_prob.shape[1] > 1:
38
+ # Multi-class: calculate entropy across all classes
39
+ # Clip to avoid log(0)
40
+ y_prob_clipped = np.clip(y_prob, 1e-15, 1 - 1e-15)
41
+ # Entropy: -sum(p * log(p))
42
+ uncertainty = -np.sum(y_prob_clipped * np.log(y_prob_clipped), axis=1)
43
+ else:
44
+ # Binary classification: calculate binary entropy
45
+ y_prob = np.asarray(y_prob, dtype=float)
46
+ # Clip to avoid log(0)
47
+ y_prob_clipped = np.clip(y_prob, 1e-15, 1 - 1e-15)
48
+ # Binary entropy: -[p*log(p) + (1-p)*log(1-p)]
49
+ uncertainty = -(
50
+ y_prob_clipped * np.log(y_prob_clipped)
51
+ + (1 - y_prob_clipped) * np.log(1 - y_prob_clipped)
52
+ )
53
+
54
+ except ValueError:
55
+ # If no probabilities available, assume zero uncertainty for hard predictions
56
+ n_samples = len(dataset.y)
57
+ uncertainty = np.zeros(n_samples)
58
+
59
+ # Return as a list of floats
60
+ return uncertainty.tolist()
@@ -8,7 +8,7 @@ Dataset class wrapper
8
8
 
9
9
  import warnings
10
10
  from copy import deepcopy
11
- from typing import Any, Dict, Optional
11
+ from typing import Any, Dict, List, Optional, Union
12
12
 
13
13
  import numpy as np
14
14
  import pandas as pd
@@ -458,6 +458,152 @@ class VMDataset(VMInput):
458
458
 
459
459
  return self.extra_columns.probability_column(model, column_name)
460
460
 
461
+ def assign_scores(
462
+ self,
463
+ model: VMModel,
464
+ metrics: Union[str, List[str]],
465
+ **kwargs: Dict[str, Any],
466
+ ) -> None:
467
+ """Assign computed unit metric scores to the dataset as new columns.
468
+
469
+ This method computes unit metrics for the given model and dataset, then adds
470
+ the computed scores as new columns to the dataset using the naming convention:
471
+ {model.input_id}_{metric_name}
472
+
473
+ Args:
474
+ model (VMModel): The model used to compute the scores.
475
+ metrics (Union[str, List[str]]): Single metric ID or list of metric IDs.
476
+ Can be either:
477
+ - Short name (e.g., "F1", "Precision")
478
+ - Full metric ID (e.g., "validmind.unit_metrics.classification.F1")
479
+ **kwargs: Additional parameters passed to the unit metrics.
480
+
481
+ Examples:
482
+ # Single metric
483
+ dataset.assign_scores(model, "F1")
484
+
485
+ # Multiple metrics
486
+ dataset.assign_scores(model, ["F1", "Precision", "Recall"])
487
+
488
+ # With parameters
489
+ dataset.assign_scores(model, "ROC_AUC", average="weighted")
490
+
491
+ Raises:
492
+ ValueError: If the model input_id is None or if metric computation fails.
493
+ ImportError: If unit_metrics module cannot be imported.
494
+ """
495
+ if model.input_id is None:
496
+ raise ValueError("Model input_id must be set to use assign_scores")
497
+
498
+ # Import unit_metrics module
499
+ try:
500
+ from validmind.unit_metrics import run_metric
501
+ except ImportError as e:
502
+ raise ImportError(
503
+ f"Failed to import unit_metrics module: {e}. "
504
+ "Make sure validmind.unit_metrics is available."
505
+ ) from e
506
+
507
+ # Normalize metrics to a list
508
+ if isinstance(metrics, str):
509
+ metrics = [metrics]
510
+
511
+ # Process each metric
512
+ for metric in metrics:
513
+ # Normalize metric ID
514
+ metric_id = self._normalize_metric_id(metric)
515
+
516
+ # Extract metric name for column naming
517
+ metric_name = self._extract_metric_name(metric_id)
518
+
519
+ # Generate column name
520
+ column_name = f"{model.input_id}_{metric_name}"
521
+
522
+ try:
523
+ # Run the unit metric
524
+ result = run_metric(
525
+ metric_id,
526
+ inputs={
527
+ "model": model,
528
+ "dataset": self,
529
+ },
530
+ params=kwargs,
531
+ show=False, # Don't show widget output
532
+ )
533
+
534
+ # Extract the metric value
535
+ metric_value = result.metric
536
+
537
+ # Create column values (repeat the scalar value for all rows)
538
+ if np.isscalar(metric_value):
539
+ column_values = np.full(len(self._df), metric_value)
540
+ else:
541
+ if len(metric_value) != len(self._df):
542
+ raise ValueError(
543
+ f"Metric value length {len(metric_value)} does not match dataset length {len(self._df)}"
544
+ )
545
+ column_values = metric_value
546
+
547
+ # Add the column to the dataset
548
+ self.add_extra_column(column_name, column_values)
549
+
550
+ logger.info(f"Added metric column '{column_name}'")
551
+ except Exception as e:
552
+ logger.error(f"Failed to compute metric {metric_id}: {e}")
553
+ raise ValueError(f"Failed to compute metric {metric_id}: {e}") from e
554
+
555
+ def _normalize_metric_id(self, metric: str) -> str:
556
+ """Normalize metric identifier to full validmind unit metric ID.
557
+
558
+ Args:
559
+ metric (str): Metric identifier (short name or full ID)
560
+
561
+ Returns:
562
+ str: Full metric ID
563
+ """
564
+ # If already a full ID, return as-is
565
+ if metric.startswith("validmind.unit_metrics."):
566
+ return metric
567
+
568
+ # Try to find the metric by short name
569
+ try:
570
+ from validmind.unit_metrics import list_metrics
571
+
572
+ available_metrics = list_metrics()
573
+
574
+ # Look for exact match with short name
575
+ for metric_id in available_metrics:
576
+ if metric_id.endswith(f".{metric}"):
577
+ return metric_id
578
+
579
+ # If no exact match found, raise error with suggestions
580
+ suggestions = [m for m in available_metrics if metric.lower() in m.lower()]
581
+ if suggestions:
582
+ raise ValueError(
583
+ f"Metric '{metric}' not found. Did you mean one of: {suggestions[:5]}"
584
+ )
585
+ else:
586
+ raise ValueError(
587
+ f"Metric '{metric}' not found. Available metrics: {available_metrics[:10]}..."
588
+ )
589
+
590
+ except ImportError as e:
591
+ raise ImportError(
592
+ f"Failed to import unit_metrics for metric lookup: {e}"
593
+ ) from e
594
+
595
+ def _extract_metric_name(self, metric_id: str) -> str:
596
+ """Extract the metric name from a full metric ID.
597
+
598
+ Args:
599
+ metric_id (str): Full metric ID
600
+
601
+ Returns:
602
+ str: Metric name
603
+ """
604
+ # Extract the last part after the final dot
605
+ return metric_id.split(".")[-1]
606
+
461
607
  def add_extra_column(self, column_name, column_values=None):
462
608
  """Adds an extra column to the dataset without modifying the dataset `features` and `target` columns.
463
609
 
@@ -7,6 +7,7 @@ Result objects for test results
7
7
  """
8
8
  import asyncio
9
9
  import json
10
+ import os
10
11
  from abc import abstractmethod
11
12
  from dataclasses import dataclass
12
13
  from typing import Any, Dict, List, Optional, Union
@@ -20,7 +21,7 @@ from ipywidgets import HTML, VBox
20
21
  from ... import api_client
21
22
  from ...ai.utils import DescriptionFuture
22
23
  from ...errors import InvalidParameterError
23
- from ...logging import get_logger
24
+ from ...logging import get_logger, log_api_operation
24
25
  from ...utils import (
25
26
  HumanReadableEncoder,
26
27
  NumpyEncoder,
@@ -177,7 +178,7 @@ class TestResult(Result):
177
178
  title: Optional[str] = None
178
179
  doc: Optional[str] = None
179
180
  description: Optional[Union[str, DescriptionFuture]] = None
180
- metric: Optional[Union[int, float]] = None
181
+ metric: Optional[Union[int, float, List[Union[int, float]]]] = None
181
182
  tables: Optional[List[ResultTable]] = None
182
183
  raw_data: Optional[RawData] = None
183
184
  figures: Optional[List[Figure]] = None
@@ -476,9 +477,30 @@ class TestResult(Result):
476
477
  )
477
478
 
478
479
  if self.figures:
479
- tasks.extend(
480
- [api_client.alog_figure(figure) for figure in (self.figures or [])]
480
+ batch_size = min(
481
+ len(self.figures), int(os.getenv("VM_FIGURE_MAX_BATCH_SIZE", 20))
481
482
  )
483
+ figure_batches = [
484
+ self.figures[i : i + batch_size]
485
+ for i in range(0, len(self.figures), batch_size)
486
+ ]
487
+
488
+ async def upload_figures_in_batches():
489
+ for batch in figure_batches:
490
+
491
+ @log_api_operation(
492
+ operation_name=f"Uploading batch of {len(batch)} figures"
493
+ )
494
+ async def process_batch():
495
+ batch_tasks = [
496
+ api_client.alog_figure(figure) for figure in batch
497
+ ]
498
+ return await asyncio.gather(*batch_tasks)
499
+
500
+ await process_batch()
501
+
502
+ tasks.append(upload_figures_in_batches())
503
+
482
504
  if self.description:
483
505
  revision_name = (
484
506
  AI_REVISION_NAME
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: validmind
3
- Version: 2.8.29
3
+ Version: 2.9.2
4
4
  Summary: ValidMind Library
5
5
  License: Commercial License
6
6
  Author: Andres Rodriguez
@@ -24,7 +24,7 @@ Requires-Dist: datasets (>=2.10.0,<3.0.0)
24
24
  Requires-Dist: evaluate
25
25
  Requires-Dist: h11 (>=0.16.0)
26
26
  Requires-Dist: ipywidgets
27
- Requires-Dist: kaleido (>=0.2.1,!=0.2.1.post1)
27
+ Requires-Dist: kaleido (>=0.2.1,!=0.2.1.post1,<1.0.0)
28
28
  Requires-Dist: langchain-openai (>=0.1.8) ; extra == "all" or extra == "llm"
29
29
  Requires-Dist: langdetect
30
30
  Requires-Dist: llvmlite ; python_version >= "3.8" and python_full_version <= "3.11.0"
@@ -1,8 +1,8 @@
1
1
  validmind/__init__.py,sha256=7nOHbSRUKtpIuHvf6oQd4D9_R8oh1PQ2CkeU62S14A0,4329
2
- validmind/__version__.py,sha256=6FQh-CGg741faEnufOSp2cRuF5ODdGQ7-BxZ8EkOrrU,23
3
- validmind/ai/test_descriptions.py,sha256=eBF09MAyqAAD-Ah7vxXVRbHxOmGx5_10ZkoJmMvEaEA,7123
4
- validmind/ai/utils.py,sha256=IkqDCBiZiDQ95ElCZpi5sVgmNDiSushmF7wAizAO-74,8497
5
- validmind/api_client.py,sha256=WNAdiYc2NctAFc2itLdz-0mf3_4JPghW4pPF_VAn5jw,16970
2
+ validmind/__version__.py,sha256=2wNk1TdqAeMl8njYHKnxyR5LsTqRh1L9eYia7D6rWHg,22
3
+ validmind/ai/test_descriptions.py,sha256=CzeXPfdJaZyPcXWnxchPBIAx1Om-MxShLdCWDDd-gyI,6981
4
+ validmind/ai/utils.py,sha256=2VEOS2W37FtTvWqcHmG1Mys5BTuTSxrUhvpA2G2fvP0,7600
5
+ validmind/api_client.py,sha256=AcnyaZF1t4HoeHyGmUPVPI2YhYP5iKoNFrMdxc4iHQ0,16711
6
6
  validmind/client.py,sha256=XKb4uc7yXVV_3NH9-zTrS9jCbLPX2zZZU12vKKlSpIc,19049
7
7
  validmind/client_config.py,sha256=O1gopTaNADM4ZVPj383AJTjcpjdxyEvUQY5cFt7nbIs,1366
8
8
  validmind/datasets/__init__.py,sha256=c0hQZN_6GrUEJxdFHdQaEsQrSYNABG84ZCY0H-PzOZk,260
@@ -65,7 +65,7 @@ validmind/experimental/agents.py,sha256=UAn62qflCYnzS1m2XL_y3xUNaw0PJr9dRvNb-c-r
65
65
  validmind/html_templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
66
66
  validmind/html_templates/content_blocks.py,sha256=vFMRS4Ogq4RZq88WzG3teNEOq3U4OLgLDzD3lBx4h-g,4050
67
67
  validmind/input_registry.py,sha256=bgZqJhrBCDoTV54Eq6YhNcU9yn5GjH0aidDwrnKm_pI,1043
68
- validmind/logging.py,sha256=NKI4g4MFO1tZRE5G_g6gZU0knRRtsYoD3uPQHT7or58,5457
68
+ validmind/logging.py,sha256=FuLfiGGJp2PbQpdU4Z8F-6lQkAh_v5PXIUgvhBqWjxA,7013
69
69
  validmind/models/__init__.py,sha256=lraTbNwoKckXNP3Dbyj-euI78UTkZ_w5wpUOb8l5nWs,729
70
70
  validmind/models/foundation.py,sha256=wqVyOps3_tZtkOH9sVXPbv2Sh-EYtPuFfat7_hiKSyk,1854
71
71
  validmind/models/function.py,sha256=pwP_4pIQ1Jbh3JVKnsyEPc5wP23AbSPHDeWvqE6zbWA,2373
@@ -89,8 +89,8 @@ validmind/test_suites/summarization.py,sha256=VKwcbf9PD-hnPkDlYJWZKW4nHyOnI4q6Y3
89
89
  validmind/test_suites/tabular_datasets.py,sha256=WE4eLzRCfiqAxRqXnZFRR3Lo_u-TI6KM6hmTbR8rg5o,1798
90
90
  validmind/test_suites/text_data.py,sha256=YGVGBB05356jN9Gzcy5CHShRzo1fm5mKsZY7YBq0cYU,739
91
91
  validmind/test_suites/time_series.py,sha256=3hzWc9gXHBf8aMecD-1YYGFS5RI1o8A-eor9akeBzSU,4403
92
- validmind/tests/__init__.py,sha256=U6wUS7R8lYFjwUZmAkG7gC8Av9Z4TTDZS7uWZqbzxVM,1665
93
- validmind/tests/__types__.py,sha256=3if3CVI-YHWQpX0t_8WYh1a6dd01us0oP7r2ZsX2NX0,12589
92
+ validmind/tests/__init__.py,sha256=kEutKQ414BnIj_9l5MnnVxFs5_41UsNFG95dW93ooQY,1691
93
+ validmind/tests/__types__.py,sha256=EYq1XzwnzUCoYvFEA7LXBAS9Egs_3_oUPOxADZ7O3gA,13654
94
94
  validmind/tests/_store.py,sha256=W867LRVOuvuGKxWZuGwKaQr7jXc81MA0oogNebfSwx4,2685
95
95
  validmind/tests/comparison.py,sha256=3s0kiCtgzU5x6HBo9eGtBR8pVKD92O5OAg9Dl-GTHts,14723
96
96
  validmind/tests/data_validation/ACFandPACFPlot.py,sha256=KEpr-WjdfQah3fEuBWvoHDKAxrb8dYjFFJpwZhqAMmY,4380
@@ -278,7 +278,12 @@ validmind/tests/ongoing_monitoring/ROCCurveDrift.py,sha256=dN0R2Grr4ZgYJjBRSw5-q
278
278
  validmind/tests/ongoing_monitoring/ScoreBandsDrift.py,sha256=If2x6i56uVfqXkAfJSiBsSMsBHU9sMIYfuccpIGlC6s,7826
279
279
  validmind/tests/ongoing_monitoring/ScorecardHistogramDrift.py,sha256=tZc3oluyuTDqXVBsXpDnya_416kWCDCmClLMtFkYDDY,7324
280
280
  validmind/tests/ongoing_monitoring/TargetPredictionDistributionPlot.py,sha256=Mk7psHw8ymg8rGkxHhufK7WGIM_PJGQ2j9r21j4o5bA,5340
281
- validmind/tests/output.py,sha256=UXSZDiW_GD411QP2F9r2Vh7uXtb5a2y990bNRZhnZVQ,6153
281
+ validmind/tests/output.py,sha256=0YCVRx51MnsAOrZLAvwHKQTEjgw223JiWs45CKJ_jKk,6473
282
+ validmind/tests/plots/BoxPlot.py,sha256=5N9aEkiN7vo9UNU03GOzcQVfpPVQ4iyMYG96JqnN0-M,8071
283
+ validmind/tests/plots/CorrelationHeatmap.py,sha256=OcluzdbO1FQo6jjGAsc2ek3yBg4qLx5AKLIGWYswkPI,7773
284
+ validmind/tests/plots/HistogramPlot.py,sha256=Ktjidzr_H31yeLkkNKFyyaFn9rKoiaGX3qetLKzsPAc,7315
285
+ validmind/tests/plots/ViolinPlot.py,sha256=AucJUMMttwPfyV7Pz_a9C0DSDanBN1cPHClTL6P_qis,4044
286
+ validmind/tests/plots/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
282
287
  validmind/tests/prompt_validation/Bias.py,sha256=5W1XNPPJnB-TU-x02xWM3GcUXSMSUgLIqs04sEnOyzQ,5940
283
288
  validmind/tests/prompt_validation/Clarity.py,sha256=_3RTNA5tEo4OhY5aW466fikT6Kc8WtXK2sftFjl7Bt0,5085
284
289
  validmind/tests/prompt_validation/Conciseness.py,sha256=YH3sdiOLkzuFyMMv3TU1EZF8y9tAIN6ucnQlSAUk6M4,4829
@@ -289,6 +294,11 @@ validmind/tests/prompt_validation/Specificity.py,sha256=hEjnSZ5PiKzdRR9lzkreQoW6
289
294
  validmind/tests/prompt_validation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
290
295
  validmind/tests/prompt_validation/ai_powered_test.py,sha256=sWMf9fRXAkOpI5JYdhLHmJXlwnthjUDZGoLgCZlQZxo,2240
291
296
  validmind/tests/run.py,sha256=ftUCywJbHQ0vbZ7096iz8yq0htLbQbOWhvURjCqiqog,14211
297
+ validmind/tests/stats/CorrelationAnalysis.py,sha256=v-o5TTd5529SGlUUiQUvNQ09brd45K-pgAZ21LGHaKc,8923
298
+ validmind/tests/stats/DescriptiveStats.py,sha256=q8Ovv1x9y-xU4w03DVL5336dHGJ5HnqPWSKKN-4DaZ8,7066
299
+ validmind/tests/stats/NormalityTests.py,sha256=o0bL_niZAHPt0ySjPw_URilkV9lFK6MEMuavg7FYbb4,5062
300
+ validmind/tests/stats/OutlierDetection.py,sha256=hrbixL76jkDzUkKbAa2RN4_Kv09gJxh7t7hTyJmQdkw,5963
301
+ validmind/tests/stats/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
292
302
  validmind/tests/test_providers.py,sha256=S0_yNYAor_MX5joRJntrVjV8J3ypvUcaaSqtkBqhOsI,6021
293
303
  validmind/tests/utils.py,sha256=sPnk9HWIb0IoySqL88h7uP3LixfrfKFgFebnyTUP5EE,3950
294
304
  validmind/unit_metrics/__init__.py,sha256=ZKXMtMk7qN9QlapZFyiCcjC3HhjXJXlbQ7E4x0m8wCM,957
@@ -297,6 +307,17 @@ validmind/unit_metrics/classification/F1.py,sha256=INZUraU4T72k8ON6Scmgh0JzRAUMQ
297
307
  validmind/unit_metrics/classification/Precision.py,sha256=CGBJy9dpfYnYOho2nB-1YtF_8N0s6NC1PBb1ecqidT8,575
298
308
  validmind/unit_metrics/classification/ROC_AUC.py,sha256=fXl9cEhGy4SfLcP02XEY21bwTAV-CHvcIeZSYQIL9jw,1162
299
309
  validmind/unit_metrics/classification/Recall.py,sha256=HYna-tAKStoPo51JnNpawDKBEioTg3DFq5YyEaS2b3A,563
310
+ validmind/unit_metrics/classification/individual/AbsoluteError.py,sha256=1tmNXGKGT6PnVHgC4Qs7vb-7ISxEcuiUYLEdtmTYzrI,1515
311
+ validmind/unit_metrics/classification/individual/BrierScore.py,sha256=Ftl9QoE5R5A_l6xW57uDlxVulNL6LZysghdmiTSR798,2062
312
+ validmind/unit_metrics/classification/individual/CalibrationError.py,sha256=Nkrr351Sh14DE6t_xeLe2TdNbH8tVghxLQ_Id9I7a-8,2804
313
+ validmind/unit_metrics/classification/individual/ClassBalance.py,sha256=ccQDEJ3SukCGrgg3kG8IkuLwYcAWBXkhwjKwhBkA8Bo,2338
314
+ validmind/unit_metrics/classification/individual/Confidence.py,sha256=_ZFK4il8HJ2OJPsiyYrBv2vLssDw8KRXhZz1kVRiA4w,2049
315
+ validmind/unit_metrics/classification/individual/Correctness.py,sha256=Gu4-PwqxWJFCj93JUvFmys8YoWypzXSO7KNJiEEf0XI,1344
316
+ validmind/unit_metrics/classification/individual/LogLoss.py,sha256=bVGQdJQBm1SsVxFuiuv8u6bBOt9LUBaJWrpHes5Hkns,2245
317
+ validmind/unit_metrics/classification/individual/OutlierScore.py,sha256=j9wNViBFwbP2vbchZoFfSFEa2Ntx3qY_TcW4HnuVhnI,3107
318
+ validmind/unit_metrics/classification/individual/ProbabilityError.py,sha256=5P-4nvxhXw174CamFsMy41_0vN0kFzWvIoCwT-3jNb0,2030
319
+ validmind/unit_metrics/classification/individual/Uncertainty.py,sha256=BYy6K8tRMFuNQkDUZvsrI1-nVp3X-owYo73QHmyEnBs,2276
320
+ validmind/unit_metrics/classification/individual/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
300
321
  validmind/unit_metrics/regression/AdjustedRSquaredScore.py,sha256=37aVMrkZ9JbR21_rPcBkgt4JKqU0wFAqyy9zy2juOf4,757
301
322
  validmind/unit_metrics/regression/GiniCoefficient.py,sha256=lDJYQPwpRE7dEXQt5a0d7gVaqdUUdVrn2AJ3QzI0Qxc,1115
302
323
  validmind/unit_metrics/regression/HuberLoss.py,sha256=po_cA2oEeon-93ekxHIoa4oocwnfRVTSwLwPA_jF8vQ,805
@@ -311,22 +332,22 @@ validmind/unit_metrics/regression/RootMeanSquaredError.py,sha256=QJYK_wVBjzJTa6P
311
332
  validmind/utils.py,sha256=59WWVV_JhvxzPr8Y625qw_wsyu_ZVRoLJyi0Pw3MTMU,28613
312
333
  validmind/vm_models/__init__.py,sha256=lcqf9q2aRzrVrNN6R--81IkrnSa6BXPbhJ8SnkT_hcI,702
313
334
  validmind/vm_models/dataset/__init__.py,sha256=U4CxZjdoc0dd9u2AqBl5PJh1UVbzXWNrmundmjLF-qE,346
314
- validmind/vm_models/dataset/dataset.py,sha256=stmZ_DnvLQQT_4WRlOXXw9KQNFkCMpz8tID9eE5iqgc,32254
335
+ validmind/vm_models/dataset/dataset.py,sha256=vgpPFh9Ep4tkmGzbzUbQPRf_C6v7_VNWQ2lTSDQukaE,37608
315
336
  validmind/vm_models/dataset/utils.py,sha256=g6mBPrBmVYf8wJAlTxeg9DTiNvts4ZaaT5mbnQAPWaU,5638
316
337
  validmind/vm_models/figure.py,sha256=ZMO_nIIleNhkBV1vJeF_UUsVDCzrXNOYwV1Lbg9E0XY,6303
317
338
  validmind/vm_models/input.py,sha256=nTBQB6aqirhF-0Gmg5mYc4_vNyypvbYUfahMovcK02M,1095
318
339
  validmind/vm_models/model.py,sha256=s9pPIprHrju-qmGbzOZBcSHjZ_xgSv5ACXk92U1hEFY,6489
319
340
  validmind/vm_models/result/__init__.py,sha256=c0vMWMSY0O6ZeSCf0HfrWAI5t--4FKLEW5cZ2EZ70Ms,443
320
341
  validmind/vm_models/result/result.jinja,sha256=Yvovwm5gInCBukFRlvJXNlDIUpl2eFz4dz1lS3Sn_Gc,311
321
- validmind/vm_models/result/result.py,sha256=2Y1BwJ38YCp6rmPwENXp6q5xQ6utqxKX9C-e-il21gI,21283
342
+ validmind/vm_models/result/result.py,sha256=UcIHi67gnIZ1X6Q6W9HbZA8UIMupPTB2-aqfT5wo1HI,22097
322
343
  validmind/vm_models/result/utils.py,sha256=kjU8yaDBX874gebdKLA2KcCyW6ojk_nSTBZxHG7Gszc,4155
323
344
  validmind/vm_models/test_suite/__init__.py,sha256=tfTYd8yvzsDXzk5WDKMwCzPAbvkVUyEvtY5z5BPy-zk,215
324
345
  validmind/vm_models/test_suite/runner.py,sha256=JqW8LW4X1Ri2C6wSsAGSki-JxGUGV8zmruOnxybmZ1s,5432
325
346
  validmind/vm_models/test_suite/summary.py,sha256=7P4zhfeU7a3I1MMBn8f7s-2lzdAz7U4y6LblpR89_vE,5401
326
347
  validmind/vm_models/test_suite/test.py,sha256=C8xPGKSyYF9oMJ3VegwFJDF7cwYlIgtQoQ7nzXIS1uc,3914
327
348
  validmind/vm_models/test_suite/test_suite.py,sha256=CciC6IhrLEeWwcpY3Np8EmQCB8XEF2ljwEXcvmNYgZc,5090
328
- validmind-2.8.29.dist-info/LICENSE,sha256=XonPUfwjvrC5Ombl3y-ko0Wubb1xdG_7nzvIbkZRKHw,35772
329
- validmind-2.8.29.dist-info/METADATA,sha256=ZeBJF4CRuXpd8aiaxQQGBDbJxMkOz5sKThmWxkpGONg,6061
330
- validmind-2.8.29.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
331
- validmind-2.8.29.dist-info/entry_points.txt,sha256=HuW7YyOv9u_OEWpViQXtv0nfoI67uieJHawKWA4Hv9A,76
332
- validmind-2.8.29.dist-info/RECORD,,
349
+ validmind-2.9.2.dist-info/LICENSE,sha256=XonPUfwjvrC5Ombl3y-ko0Wubb1xdG_7nzvIbkZRKHw,35772
350
+ validmind-2.9.2.dist-info/METADATA,sha256=0WjmrpvJ0GiX69Y8AJw4CAnMcimIswh7WGFumNGgvp8,6067
351
+ validmind-2.9.2.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
352
+ validmind-2.9.2.dist-info/entry_points.txt,sha256=HuW7YyOv9u_OEWpViQXtv0nfoI67uieJHawKWA4Hv9A,76
353
+ validmind-2.9.2.dist-info/RECORD,,