validmind 2.1.1__py3-none-any.whl → 2.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. validmind/__version__.py +1 -1
  2. validmind/ai.py +72 -49
  3. validmind/api_client.py +42 -16
  4. validmind/client.py +68 -25
  5. validmind/datasets/llm/rag/__init__.py +11 -0
  6. validmind/datasets/llm/rag/datasets/rfp_existing_questions_client_1.csv +30 -0
  7. validmind/datasets/llm/rag/datasets/rfp_existing_questions_client_2.csv +30 -0
  8. validmind/datasets/llm/rag/datasets/rfp_existing_questions_client_3.csv +53 -0
  9. validmind/datasets/llm/rag/datasets/rfp_existing_questions_client_4.csv +53 -0
  10. validmind/datasets/llm/rag/datasets/rfp_existing_questions_client_5.csv +53 -0
  11. validmind/datasets/llm/rag/rfp.py +41 -0
  12. validmind/errors.py +1 -1
  13. validmind/html_templates/__init__.py +0 -0
  14. validmind/html_templates/content_blocks.py +89 -14
  15. validmind/models/__init__.py +7 -4
  16. validmind/models/foundation.py +8 -34
  17. validmind/models/function.py +51 -0
  18. validmind/models/huggingface.py +16 -46
  19. validmind/models/metadata.py +42 -0
  20. validmind/models/pipeline.py +66 -0
  21. validmind/models/pytorch.py +8 -42
  22. validmind/models/r_model.py +33 -82
  23. validmind/models/sklearn.py +39 -38
  24. validmind/template.py +8 -26
  25. validmind/tests/__init__.py +43 -20
  26. validmind/tests/data_validation/ANOVAOneWayTable.py +1 -1
  27. validmind/tests/data_validation/ChiSquaredFeaturesTable.py +1 -1
  28. validmind/tests/data_validation/DescriptiveStatistics.py +2 -4
  29. validmind/tests/data_validation/Duplicates.py +1 -1
  30. validmind/tests/data_validation/IsolationForestOutliers.py +2 -2
  31. validmind/tests/data_validation/LaggedCorrelationHeatmap.py +1 -1
  32. validmind/tests/data_validation/TargetRateBarPlots.py +1 -1
  33. validmind/tests/data_validation/nlp/LanguageDetection.py +59 -0
  34. validmind/tests/data_validation/nlp/PolarityAndSubjectivity.py +48 -0
  35. validmind/tests/data_validation/nlp/Punctuations.py +11 -12
  36. validmind/tests/data_validation/nlp/Sentiment.py +57 -0
  37. validmind/tests/data_validation/nlp/Toxicity.py +45 -0
  38. validmind/tests/decorator.py +12 -7
  39. validmind/tests/model_validation/BertScore.py +100 -98
  40. validmind/tests/model_validation/BleuScore.py +93 -64
  41. validmind/tests/model_validation/ContextualRecall.py +74 -91
  42. validmind/tests/model_validation/MeteorScore.py +86 -74
  43. validmind/tests/model_validation/RegardScore.py +103 -121
  44. validmind/tests/model_validation/RougeScore.py +118 -0
  45. validmind/tests/model_validation/TokenDisparity.py +84 -121
  46. validmind/tests/model_validation/ToxicityScore.py +109 -123
  47. validmind/tests/model_validation/embeddings/CosineSimilarityComparison.py +96 -0
  48. validmind/tests/model_validation/embeddings/CosineSimilarityHeatmap.py +71 -0
  49. validmind/tests/model_validation/embeddings/EuclideanDistanceComparison.py +92 -0
  50. validmind/tests/model_validation/embeddings/EuclideanDistanceHeatmap.py +69 -0
  51. validmind/tests/model_validation/embeddings/PCAComponentsPairwisePlots.py +78 -0
  52. validmind/tests/model_validation/embeddings/StabilityAnalysis.py +35 -23
  53. validmind/tests/model_validation/embeddings/StabilityAnalysisKeyword.py +3 -0
  54. validmind/tests/model_validation/embeddings/StabilityAnalysisRandomNoise.py +7 -1
  55. validmind/tests/model_validation/embeddings/StabilityAnalysisSynonyms.py +3 -0
  56. validmind/tests/model_validation/embeddings/StabilityAnalysisTranslation.py +3 -0
  57. validmind/tests/model_validation/embeddings/TSNEComponentsPairwisePlots.py +99 -0
  58. validmind/tests/model_validation/ragas/AnswerCorrectness.py +131 -0
  59. validmind/tests/model_validation/ragas/AnswerRelevance.py +134 -0
  60. validmind/tests/model_validation/ragas/AnswerSimilarity.py +119 -0
  61. validmind/tests/model_validation/ragas/AspectCritique.py +167 -0
  62. validmind/tests/model_validation/ragas/ContextEntityRecall.py +133 -0
  63. validmind/tests/model_validation/ragas/ContextPrecision.py +123 -0
  64. validmind/tests/model_validation/ragas/ContextRecall.py +123 -0
  65. validmind/tests/model_validation/ragas/ContextRelevancy.py +114 -0
  66. validmind/tests/model_validation/ragas/Faithfulness.py +119 -0
  67. validmind/tests/model_validation/ragas/utils.py +66 -0
  68. validmind/tests/model_validation/sklearn/OverfitDiagnosis.py +3 -7
  69. validmind/tests/model_validation/sklearn/PermutationFeatureImportance.py +8 -9
  70. validmind/tests/model_validation/sklearn/PopulationStabilityIndex.py +5 -10
  71. validmind/tests/model_validation/sklearn/PrecisionRecallCurve.py +3 -2
  72. validmind/tests/model_validation/sklearn/ROCCurve.py +2 -1
  73. validmind/tests/model_validation/sklearn/RegressionR2Square.py +1 -1
  74. validmind/tests/model_validation/sklearn/RobustnessDiagnosis.py +2 -3
  75. validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py +7 -11
  76. validmind/tests/model_validation/sklearn/WeakspotsDiagnosis.py +3 -4
  77. validmind/tests/model_validation/statsmodels/RegressionModelForecastPlot.py +1 -1
  78. validmind/tests/model_validation/statsmodels/RegressionModelForecastPlotLevels.py +1 -1
  79. validmind/tests/model_validation/statsmodels/RegressionModelInsampleComparison.py +1 -1
  80. validmind/tests/model_validation/statsmodels/RegressionModelOutsampleComparison.py +1 -1
  81. validmind/tests/model_validation/statsmodels/RegressionModelSummary.py +1 -1
  82. validmind/tests/model_validation/statsmodels/RegressionModelsCoeffs.py +1 -1
  83. validmind/tests/model_validation/statsmodels/RegressionModelsPerformance.py +1 -1
  84. validmind/tests/model_validation/statsmodels/ScorecardHistogram.py +5 -6
  85. validmind/unit_metrics/__init__.py +26 -49
  86. validmind/unit_metrics/composite.py +13 -7
  87. validmind/unit_metrics/regression/sklearn/AdjustedRSquaredScore.py +1 -1
  88. validmind/utils.py +99 -6
  89. validmind/vm_models/__init__.py +1 -1
  90. validmind/vm_models/dataset/__init__.py +7 -0
  91. validmind/vm_models/dataset/dataset.py +560 -0
  92. validmind/vm_models/dataset/utils.py +146 -0
  93. validmind/vm_models/model.py +97 -72
  94. validmind/vm_models/test/metric.py +9 -24
  95. validmind/vm_models/test/result_wrapper.py +124 -28
  96. validmind/vm_models/test/threshold_test.py +10 -28
  97. validmind/vm_models/test_context.py +1 -1
  98. validmind/vm_models/test_suite/summary.py +3 -4
  99. {validmind-2.1.1.dist-info → validmind-2.2.4.dist-info}/METADATA +5 -3
  100. {validmind-2.1.1.dist-info → validmind-2.2.4.dist-info}/RECORD +103 -78
  101. validmind/models/catboost.py +0 -33
  102. validmind/models/statsmodels.py +0 -50
  103. validmind/models/xgboost.py +0 -30
  104. validmind/tests/model_validation/BertScoreAggregate.py +0 -90
  105. validmind/tests/model_validation/RegardHistogram.py +0 -148
  106. validmind/tests/model_validation/RougeMetrics.py +0 -147
  107. validmind/tests/model_validation/RougeMetricsAggregate.py +0 -133
  108. validmind/tests/model_validation/SelfCheckNLIScore.py +0 -112
  109. validmind/tests/model_validation/ToxicityHistogram.py +0 -136
  110. validmind/vm_models/dataset.py +0 -1303
  111. {validmind-2.1.1.dist-info → validmind-2.2.4.dist-info}/LICENSE +0 -0
  112. {validmind-2.1.1.dist-info → validmind-2.2.4.dist-info}/WHEEL +0 -0
  113. {validmind-2.1.1.dist-info → validmind-2.2.4.dist-info}/entry_points.txt +0 -0
@@ -2,116 +2,118 @@
2
2
  # See the LICENSE file in the root of this repository for details.
3
3
  # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
4
4
 
5
- import itertools
6
- from dataclasses import dataclass
7
-
8
5
  import evaluate
9
6
  import pandas as pd
10
7
  import plotly.graph_objects as go
11
8
 
12
- from validmind.vm_models import Figure, Metric
9
+ from validmind import tags, tasks
13
10
 
14
11
 
15
- @dataclass
16
- class BertScore(Metric):
12
+ @tags("nlp", "text_data", "visualization")
13
+ @tasks("text_classification", "text_summarization")
14
+ def BertScore(dataset, model):
17
15
  """
18
- Evaluates text generation models' performance by calculating precision, recall, and F1 score based on BERT
16
+ Evaluates the quality of machine-generated text using BERTScore metrics and visualizes the results through histograms
17
+ and bar charts, alongside compiling a comprehensive table of descriptive statistics for each BERTScore metric.
18
+
19
+ **Purpose:**
20
+ This function is designed to assess the quality of text generated by machine learning models using BERTScore metrics.
21
+ BERTScore evaluates text generation models' performance by calculating precision, recall, and F1 score based on BERT
19
22
  contextual embeddings.
20
23
 
21
- **Purpose**: The BERTScore metric is deployed to evaluate the competence of text generation models by focusing on
22
- the similarity between the reference and the generated text. It employs the contextual embeddings from BERT models
23
- to assess the similarity of the contents. This measures the extent to which a model has learned and can generate
24
- contextually relevant results.
25
-
26
- **Test Mechanism**: The true values derived from the model's test dataset and the model's predictions are employed
27
- in this metric. BERTScore calculates the precision, recall, and F1 score of the model considering the contextual
28
- similarity between the reference and the produced text. These scores are computed for each token in the predicted
29
- sentences as compared to the reference sentences, while considering the cosine similarity with BERT embeddings. A
30
- line plot depicting the score changes across row indexes is generated for each metric i.e., Precision, Recall, and
31
- F1 Score.
32
-
33
- **Signs of High Risk**:
34
- - Observable downward trend in Precision, Recall, or F1 Score.
35
- - Noticeable instability or fluctuation in these metrics. Lower Precision implies that predictions often
36
- incorporate irrelevant contexts.
37
- - Declining Recall suggests that the model frequently omits relevant contexts during predictions.
38
- - Lower F1 score signals poor overall performance in both precision and recall.
39
-
40
- **Strengths**:
41
- - BERTScore efficiently detects the quality of text that requires to comprehend the context, a common requirement
42
- in natural language processing tasks.
43
- - This metric advances beyond the simple n-gram matching and considers the semantic similarity in the context,
44
- thereby providing more meaningful evaluation results.
45
- - The integrated visualization function allows tracking of the performance trends across different prediction sets.
46
-
47
- **Limitations**:
48
- - Dependence on BERT model embeddings for BERTScore implies that if the base BERT model is not suitable for a
49
- specific task, it might impair the accuracy of BERTScore.
50
- - Despite being good at understanding semantics, it might be incapable of capturing certain nuances in text
51
- similarity that other metrics like BLEU or ROUGE could detect.
52
- - Can be computationally expensive due to the utilization of BERT embeddings.
24
+ **Test Mechanism:**
25
+ The function starts by extracting the true and predicted values from the provided dataset and model. It then initializes
26
+ the BERTScore evaluator. For each pair of true and predicted texts, the function calculates the BERTScore metrics and
27
+ compiles them into a dataframe. Histograms and bar charts are generated for each BERTScore metric (Precision, Recall,
28
+ and F1 Score) to visualize their distribution. Additionally, a table of descriptive statistics (mean, median, standard
29
+ deviation, minimum, and maximum) is compiled for each metric, providing a comprehensive summary of the model's performance.
30
+
31
+ **Signs of High Risk:**
32
+ - Consistently low scores across BERTScore metrics could indicate poor quality in the generated text, suggesting that the model
33
+ fails to capture the essential content of the reference texts.
34
+ - Low precision scores might suggest that the generated text contains a lot of redundant or irrelevant information.
35
+ - Low recall scores may indicate that important information from the reference text is being omitted.
36
+ - An imbalanced performance between precision and recall, reflected by a low F1 Score, could signal issues in the model's ability
37
+ to balance informativeness and conciseness.
38
+
39
+ **Strengths:**
40
+ - Provides a multifaceted evaluation of text quality through different BERTScore metrics, offering a detailed view of model performance.
41
+ - Visual representations (histograms and bar charts) make it easier to interpret the distribution and trends of the scores.
42
+ - Descriptive statistics offer a concise summary of the model's strengths and weaknesses in generating text.
43
+
44
+ **Limitations:**
45
+ - BERTScore relies on the contextual embeddings from BERT models, which may not fully capture all nuances of text similarity.
46
+ - The evaluation relies on the availability of high-quality reference texts, which may not always be obtainable.
47
+ - While useful for comparison, BERTScore metrics alone do not provide a complete assessment of a model's performance and should be
48
+ supplemented with other metrics and qualitative analysis.
53
49
  """
54
50
 
55
- name = "bert_score"
56
- required_inputs = ["model", "dataset"]
57
-
58
- def run(self):
59
- y_true = list(itertools.chain.from_iterable(self.inputs.dataset.y))
60
- y_pred = self.inputs.dataset.y_pred(self.inputs.model)
61
-
62
- # Load the bert evaluation metric
63
- bert = evaluate.load("bertscore")
64
-
65
- # Compute the BLEU score
66
- bert_s = bert.compute(
67
- predictions=y_pred,
68
- references=y_true,
69
- lang="en",
51
+ # Extract true and predicted values
52
+ y_true = dataset.y
53
+ y_pred = dataset.y_pred(model)
54
+
55
+ # Ensure y_true and y_pred have the same length
56
+ if len(y_true) != len(y_pred):
57
+ min_length = min(len(y_true), len(y_pred))
58
+ y_true = y_true[:min_length]
59
+ y_pred = y_pred[:min_length]
60
+
61
+ # Load the BERT evaluation metric
62
+ bert = evaluate.load("bertscore")
63
+
64
+ # Compute the BERT score
65
+ bert_s = bert.compute(
66
+ predictions=y_pred,
67
+ references=y_true,
68
+ lang="en",
69
+ )
70
+
71
+ # Convert scores to a dataframe
72
+ metrics_df = pd.DataFrame(bert_s)
73
+ figures = []
74
+
75
+ # Generate histograms and bar charts for each score type
76
+ score_types = ["precision", "recall", "f1"]
77
+ score_names = ["Precision", "Recall", "F1 Score"]
78
+
79
+ for score_type, score_name in zip(score_types, score_names):
80
+ # Histogram
81
+ hist_fig = go.Figure(data=[go.Histogram(x=metrics_df[score_type])])
82
+ hist_fig.update_layout(
83
+ title=f"{score_name} Histogram",
84
+ xaxis_title=score_name,
85
+ yaxis_title="Count",
70
86
  )
87
+ figures.append(hist_fig)
71
88
 
72
- metrics_df = pd.DataFrame(bert_s)
73
- figures = []
74
-
75
- # Visualization part
76
- fig = go.Figure()
77
-
78
- # Adding the line plots
79
- fig.add_trace(
80
- go.Scatter(
81
- x=metrics_df.index,
82
- y=metrics_df["precision"],
83
- mode="lines+markers",
84
- name="Precision",
85
- )
86
- )
87
- fig.add_trace(
88
- go.Scatter(
89
- x=metrics_df.index,
90
- y=metrics_df["recall"],
91
- mode="lines+markers",
92
- name="Recall",
93
- )
94
- )
95
- fig.add_trace(
96
- go.Scatter(
97
- x=metrics_df.index,
98
- y=metrics_df["f1"],
99
- mode="lines+markers",
100
- name="F1 Score",
101
- )
102
- )
103
-
104
- fig.update_layout(
105
- title="Bert Scores for Each Row",
89
+ # Bar Chart
90
+ bar_fig = go.Figure(data=[go.Bar(x=metrics_df.index, y=metrics_df[score_type])])
91
+ bar_fig.update_layout(
92
+ title=f"{score_name} Bar Chart",
106
93
  xaxis_title="Row Index",
107
- yaxis_title="Score",
108
- )
109
- figures.append(
110
- Figure(
111
- for_object=self,
112
- key=self.key,
113
- figure=fig,
114
- )
94
+ yaxis_title=score_name,
115
95
  )
116
-
117
- return self.cache_results(figures=figures)
96
+ figures.append(bar_fig)
97
+
98
+ # Calculate statistics for each score type
99
+ stats_df = metrics_df.describe().loc[["mean", "50%", "max", "min", "std"]]
100
+ stats_df = stats_df.rename(
101
+ index={
102
+ "mean": "Mean Score",
103
+ "50%": "Median Score",
104
+ "max": "Max Score",
105
+ "min": "Min Score",
106
+ "std": "Standard Deviation",
107
+ }
108
+ ).T
109
+ stats_df["Count"] = len(metrics_df)
110
+
111
+ # Rename metrics for clarity
112
+ stats_df.index = stats_df.index.map(
113
+ {"precision": "Precision", "recall": "Recall", "f1": "F1 Score"}
114
+ )
115
+
116
+ # Create a DataFrame from all collected statistics
117
+ result_df = pd.DataFrame(stats_df).reset_index().rename(columns={"index": "Metric"})
118
+
119
+ return (result_df, *tuple(figures))
@@ -2,77 +2,106 @@
2
2
  # See the LICENSE file in the root of this repository for details.
3
3
  # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
4
4
 
5
- from dataclasses import dataclass
6
-
7
5
  import evaluate
6
+ import pandas as pd
7
+ import plotly.graph_objects as go
8
8
 
9
- from validmind.vm_models import Metric, ResultSummary, ResultTable, ResultTableMetadata
9
+ from validmind import tags, tasks
10
10
 
11
11
 
12
- @dataclass
13
- class BleuScore(Metric):
12
+ @tags("nlp", "text_data", "visualization")
13
+ @tasks("text_classification", "text_summarization")
14
+ def BleuScore(dataset, model):
14
15
  """
15
- Assesses translation quality by comparing machine-translated sentences with human-translated ones using BLEU score.
16
-
17
- **Purpose**: The Bilingual Evaluation Understudy (BLEU) metric measures the quality of machine-translated text by
18
- comparing it to human-translated text. This comparison is done at the sentence level and is designed to bring
19
- machine translations closer to the quality of a professional human translation. It is commonly used in the field of
20
- translation evaluation, and its purpose is to assess the accuracy of a model's output against that of a benchmark.
21
-
22
- **Test Mechanism**: The BLEU score is implemented using the NLTK's word_tokenize function to split the text into
23
- individual words. After tokenization, the evaluate library's BLEU metric calculates the BLEU score for each
24
- translated sentence by comparing the model's translations (predictions) against the actual, correct translations
25
- (references). The test algorithm then combines these individual scores into a single score that represents the
26
- average 'distance' between the generated translations and the human translations across the entire test set.
27
-
28
- **Signs of High Risk**:
29
- - Low BLEU scores suggest high model risk. This could indicate significant discrepancies between the machine
30
- translation and its human equivalent.
31
- - This could be due to ineffective model learning, overfitting of training data, or inadequate handling of the
32
- language's nuances.
33
- - Machine biases toward a certain language style or translation mode can result in lower scores.
34
-
35
- **Strengths**:
36
- - The BLEU score's primary strength lies in its simplicity and interpretability. It offers a straightforward way to
37
- assess translated text quality, and its calculations often align with human judgments.
38
- - The BLEU score breaks down its evaluations at the sentence level, offering granular insights into any errors.
39
- - The score consolidates the model’s performance into a single, comprehensive score, making it easy to compare and
40
- monitor.
41
-
42
- **Limitations**:
43
- - The BLEU score heavily favours exact matches, which can create a bias towards literal translations. Thus, it may
44
- fail to fully evaluate more complex or flexible translations that shy away from a word-for-word structure.
45
- - The score does not directly measure the intelligibility or grammatical correctness of the translations.
46
- - It may miss errors originating from subtle nuances in language, cultural contexts, or ambiguities.
16
+ Evaluates the quality of machine-generated text using BLEU metrics and visualizes the results through histograms
17
+ and bar charts, alongside compiling a comprehensive table of descriptive statistics for BLEU scores.
18
+
19
+ **Purpose:**
20
+ This function is designed to assess the quality of text generated by machine learning models using the BLEU metric.
21
+ BLEU, which stands for Bilingual Evaluation Understudy, is a metric used to evaluate the overlap of n-grams between
22
+ the machine-generated text and reference texts. This evaluation is crucial for tasks such as text summarization,
23
+ machine translation, and text generation, where the goal is to produce text that accurately reflects the content
24
+ and meaning of human-crafted references.
25
+
26
+ **Test Mechanism:**
27
+ The function starts by extracting the true and predicted values from the provided dataset and model. It then initializes
28
+ the BLEU evaluator. For each pair of true and predicted texts, the function calculates the BLEU scores and compiles them
29
+ into a dataframe. Histograms and bar charts are generated for the BLEU scores to visualize their distribution. Additionally,
30
+ a table of descriptive statistics (mean, median, standard deviation, minimum, and maximum) is compiled for the BLEU scores,
31
+ providing a comprehensive summary of the model's performance.
32
+
33
+ **Signs of High Risk:**
34
+ - Consistently low BLEU scores could indicate poor quality in the generated text, suggesting that the model fails to capture
35
+ the essential content of the reference texts.
36
+ - Low precision scores might suggest that the generated text contains a lot of redundant or irrelevant information.
37
+ - Low recall scores may indicate that important information from the reference text is being omitted.
38
+ - An imbalanced performance between precision and recall, reflected by a low BLEU score, could signal issues in the model's
39
+ ability to balance informativeness and conciseness.
40
+
41
+ **Strengths:**
42
+ - Provides a straightforward and widely-used evaluation of text quality through BLEU scores.
43
+ - Visual representations (histograms and bar charts) make it easier to interpret the distribution and trends of the scores.
44
+ - Descriptive statistics offer a concise summary of the model's strengths and weaknesses in generating text.
45
+
46
+ **Limitations:**
47
+ - BLEU metrics primarily focus on n-gram overlap and may not fully capture semantic coherence, fluency, or grammatical quality
48
+ of the text.
49
+ - The evaluation relies on the availability of high-quality reference texts, which may not always be obtainable.
50
+ - While useful for comparison, BLEU scores alone do not provide a complete assessment of a model's performance and should be
51
+ supplemented with other metrics and qualitative analysis.
47
52
  """
48
53
 
49
- name = "bleu_score"
50
- required_inputs = ["model", "dataset"]
54
+ # Extract true and predicted values
55
+ y_true = dataset.y
56
+ y_pred = dataset.y_pred(model)
51
57
 
52
- def run(self):
53
- # Load the BLEU evaluation metric
54
- bleu = evaluate.load("bleu")
58
+ # Load the BLEU evaluation metric
59
+ bleu = evaluate.load("bleu")
55
60
 
61
+ # Calculate BLEU scores
62
+ score_list = []
63
+ for y_t, y_p in zip(y_true, y_pred):
56
64
  # Compute the BLEU score
57
- bleu = bleu.compute(
58
- predictions=self.inputs.dataset.y_pred(self.inputs.model),
59
- references=self.inputs.dataset.y,
60
- )
61
- return self.cache_results(metric_value={"blue_score_metric": bleu})
62
-
63
- def summary(self, metric_value):
64
- """
65
- Build one table for summarizing the bleu score results
66
- """
67
- summary_bleu_score = metric_value["blue_score_metric"]
68
-
69
- table = []
70
- table.append(summary_bleu_score)
71
- return ResultSummary(
72
- results=[
73
- ResultTable(
74
- data=table,
75
- metadata=ResultTableMetadata(title="Bleu score Results"),
76
- ),
77
- ]
78
- )
65
+ score = bleu.compute(predictions=[y_p], references=[[y_t]])
66
+ score_list.append(score["bleu"])
67
+
68
+ # Convert scores to a dataframe
69
+ metrics_df = pd.DataFrame(score_list, columns=["BLEU Score"])
70
+
71
+ figures = []
72
+
73
+ # Histogram for BLEU Score
74
+ hist_fig = go.Figure(data=[go.Histogram(x=metrics_df["BLEU Score"])])
75
+ hist_fig.update_layout(
76
+ title="BLEU Score Histogram",
77
+ xaxis_title="BLEU Score",
78
+ yaxis_title="Count",
79
+ )
80
+ figures.append(hist_fig)
81
+
82
+ # Bar Chart for BLEU Score
83
+ bar_fig = go.Figure(data=[go.Bar(x=metrics_df.index, y=metrics_df["BLEU Score"])])
84
+ bar_fig.update_layout(
85
+ title="BLEU Score Bar Chart",
86
+ xaxis_title="Row Index",
87
+ yaxis_title="BLEU Score",
88
+ )
89
+ figures.append(bar_fig)
90
+
91
+ # Calculate statistics for BLEU Score
92
+ stats_df = metrics_df.describe().loc[["mean", "50%", "max", "min", "std"]]
93
+ stats_df = stats_df.rename(
94
+ index={
95
+ "mean": "Mean Score",
96
+ "50%": "Median Score",
97
+ "max": "Max Score",
98
+ "min": "Min Score",
99
+ "std": "Standard Deviation",
100
+ }
101
+ ).T
102
+ stats_df["Count"] = len(metrics_df)
103
+
104
+ # Create a DataFrame from all collected statistics
105
+ result_df = pd.DataFrame(stats_df).reset_index().rename(columns={"index": "Metric"})
106
+
107
+ return (result_df, *tuple(figures))
@@ -2,109 +2,92 @@
2
2
  # See the LICENSE file in the root of this repository for details.
3
3
  # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
4
4
 
5
- import itertools
6
- from dataclasses import dataclass
7
-
8
5
  import nltk
9
6
  import pandas as pd
10
7
  import plotly.graph_objects as go
11
8
 
12
- from validmind.vm_models import Figure, Metric
9
+ from validmind import tags, tasks
13
10
 
14
11
 
15
- @dataclass
16
- class ContextualRecall(Metric):
12
+ @tags("nlp", "text_data", "visualization")
13
+ @tasks("text_classification", "text_summarization")
14
+ def ContextualRecall(dataset, model):
17
15
  """
18
- Evaluates a Natural Language Generation model's ability to generate contextually relevant and factually correct
19
- text.
20
-
21
- **Purpose**:
22
- The Contextual Recall metric is used to evaluate the ability of a natural language generation (NLG) model to
23
- generate text that appropriately reflects the given context or prompt. It measures the model's capability to
24
- remember and reproduce the main context in its resulting output. This metric is critical in natural language
25
- processing tasks, as the coherency and contextuality of the generated text are essential.
26
-
27
- **Test Mechanism**:
28
-
29
- 1. **Preparation of Reference and Candidate Texts**:
30
- - **Reference Texts**: Gather the reference text(s) which exemplify the expected or ideal output for a specific
31
- context or prompt.
32
- - **Candidate Texts**: Generate candidate text(s) from the NLG model under evaluation using the same context.
33
- 2. **Tokenization and Preprocessing**:
34
- - Tokenize the reference and candidate texts into discernible words or tokens using libraries such as NLTK.
35
- 3. **Computation of Contextual Recall**:
36
- - Identify the token overlap between the reference and candidate texts.
37
- - The Contextual Recall score is computed by dividing the number of overlapping tokens by the total number of
38
- tokens in the reference text. Scores are calculated for each test dataset instance, resulting in an array of
39
- scores. These scores are then visualized using a line plot to show score variations across different rows.
40
-
41
- **Signs of High Risk**:
42
-
43
- - Low contextual recall scores could indicate that the model is not effectively reflecting the original context in
44
- its output, leading to incoherent or contextually misaligned text.
45
- - A consistent trend of low recall scores could suggest underperformance of the model.
16
+ Evaluates a Natural Language Generation model's ability to generate contextually relevant and factually correct text, visualizing the results through histograms and bar charts, alongside compiling a comprehensive table of descriptive statistics for contextual recall scores.
46
17
 
47
- **Strengths**:
18
+ **Purpose:**
19
+ The Contextual Recall metric is used to evaluate the ability of a natural language generation (NLG) model to generate text that appropriately reflects the given context or prompt. It measures the model's capability to remember and reproduce the main context in its resulting output. This metric is critical in natural language processing tasks, as the coherency and contextuality of the generated text are essential.
48
20
 
49
- - The Contextual Recall metric provides a quantifiable measure of a model's adherence to the context and factual
50
- elements of the generated narrative.
51
- - This metric finds particular value in applications requiring deep comprehension of context, such as text
52
- continuation or interactive dialogue systems.
53
- - The line plot visualization provides a clear and intuitive representation of score fluctuations.
21
+ **Test Mechanism:**
22
+ The function starts by extracting the true and predicted values from the provided dataset and model. It then tokenizes the reference and candidate texts into discernible words or tokens using NLTK. The token overlap between the reference and candidate texts is identified, and the Contextual Recall score is computed by dividing the number of overlapping tokens by the total number of tokens in the reference text. Scores are calculated for each test dataset instance, resulting in an array of scores. These scores are visualized using a histogram and a bar chart to show score variations across different rows. Additionally, a table of descriptive statistics (mean, median, standard deviation, minimum, and maximum) is compiled for the contextual recall scores, providing a comprehensive summary of the model's performance.
23
+
24
+ **Signs of High Risk:**
25
+ - Low contextual recall scores could indicate that the model is not effectively reflecting the original context in its output, leading to incoherent or contextually misaligned text.
26
+ - A consistent trend of low recall scores could suggest underperformance of the model.
54
27
 
55
- **Limitations**:
28
+ **Strengths:**
29
+ - Provides a quantifiable measure of a model's adherence to the context and factual elements of the generated narrative.
30
+ - Visual representations (histograms and bar charts) make it easier to interpret the distribution and trends of contextual recall scores.
31
+ - Descriptive statistics offer a concise summary of the model's performance in generating contextually relevant texts.
56
32
 
57
- - Despite its effectiveness, the Contextual Recall could fail to comprehensively assess the performance of NLG
58
- models. Its focus on word overlap could result in high scores for texts that use many common words, even when these
59
- texts lack coherence or meaningful context.
33
+ **Limitations:**
34
+ - The focus on word overlap could result in high scores for texts that use many common words, even when these texts lack coherence or meaningful context.
60
35
  - This metric does not consider the order of words, which could lead to overestimated scores for scrambled outputs.
61
36
  - Models that effectively use infrequent words might be undervalued, as these words might not overlap as often.
62
37
  """
63
38
 
64
- name = "contextual_recall"
65
- required_inputs = ["model", "dataset"]
66
-
67
- def run(self):
68
- y_true = list(itertools.chain.from_iterable(self.inputs.dataset.y))
69
- y_pred = self.inputs.dataset.y_pred(self.inputs.model)
70
-
71
- score_list = []
72
- for y_t, y_p in zip(y_true, y_pred):
73
- # Tokenize the reference and candidate texts
74
- reference_tokens = nltk.word_tokenize(y_t.lower())
75
- candidate_tokens = nltk.word_tokenize(y_p.lower())
76
-
77
- # Calculate overlapping tokens
78
- overlapping_tokens = set(reference_tokens) & set(candidate_tokens)
79
-
80
- # Compute contextual recall
81
- score_list.append(len(overlapping_tokens) / len(reference_tokens))
82
-
83
- metrics_df = pd.DataFrame(score_list, columns=["Contextual Recall"])
84
- figures = []
85
- # Visualization part
86
- fig = go.Figure()
87
-
88
- # Adding the line plots
89
- fig.add_trace(
90
- go.Scatter(
91
- x=metrics_df.index,
92
- y=metrics_df["Contextual Recall"],
93
- mode="lines+markers",
94
- name="Contextual Recall",
95
- )
96
- )
97
- fig.update_layout(
98
- title="Contextual Recall scores for each row",
99
- xaxis_title="Row Index",
100
- yaxis_title="Score",
101
- )
102
- figures.append(
103
- Figure(
104
- for_object=self,
105
- key=self.key,
106
- figure=fig,
107
- )
108
- )
109
-
110
- return self.cache_results(figures=figures)
39
+ y_true = dataset.y
40
+ y_pred = dataset.y_pred(model)
41
+
42
+ score_list = []
43
+ for y_t, y_p in zip(y_true, y_pred):
44
+ # Tokenize the reference and candidate texts
45
+ reference_tokens = nltk.word_tokenize(y_t.lower())
46
+ candidate_tokens = nltk.word_tokenize(y_p.lower())
47
+
48
+ # Calculate overlapping tokens
49
+ overlapping_tokens = set(reference_tokens) & set(candidate_tokens)
50
+
51
+ # Compute contextual recall
52
+ score_list.append(len(overlapping_tokens) / len(reference_tokens))
53
+
54
+ metrics_df = pd.DataFrame(score_list, columns=["Contextual Recall"])
55
+ figures = []
56
+
57
+ # Histogram for Contextual Recall
58
+ hist_fig = go.Figure(data=[go.Histogram(x=metrics_df["Contextual Recall"])])
59
+ hist_fig.update_layout(
60
+ title="Contextual Recall Histogram",
61
+ xaxis_title="Contextual Recall",
62
+ yaxis_title="Count",
63
+ )
64
+ figures.append(hist_fig)
65
+
66
+ # Bar Chart for Contextual Recall
67
+ bar_fig = go.Figure(
68
+ data=[go.Bar(x=metrics_df.index, y=metrics_df["Contextual Recall"])]
69
+ )
70
+ bar_fig.update_layout(
71
+ title="Contextual Recall Bar Chart",
72
+ xaxis_title="Row Index",
73
+ yaxis_title="Contextual Recall",
74
+ )
75
+ figures.append(bar_fig)
76
+
77
+ # Calculate statistics for Contextual Recall
78
+ stats_df = metrics_df.describe().loc[["mean", "50%", "max", "min", "std"]]
79
+ stats_df = stats_df.rename(
80
+ index={
81
+ "mean": "Mean Score",
82
+ "50%": "Median Score",
83
+ "max": "Max Score",
84
+ "min": "Min Score",
85
+ "std": "Standard Deviation",
86
+ }
87
+ ).T
88
+ stats_df["Count"] = len(metrics_df)
89
+
90
+ # Create a DataFrame from all collected statistics
91
+ result_df = pd.DataFrame(stats_df).reset_index().rename(columns={"index": "Metric"})
92
+
93
+ return (result_df, *tuple(figures))