ai-critic 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ai_critic/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ from .critic import AICritic
2
+
3
+ __all__ = ["AICritic"]
ai_critic/critic.py ADDED
@@ -0,0 +1,30 @@
1
+ from ai_critic.evaluators import (
2
+ robustness,
3
+ config,
4
+ data,
5
+ performance
6
+ )
7
+
8
+ class AICritic:
9
+ """
10
+ Orquestrador principal da avaliação de modelos sklearn
11
+ """
12
+
13
+ def __init__(self, model, X, y):
14
+ self.model = model
15
+ self.X = X
16
+ self.y = y
17
+
18
+ def evaluate(self):
19
+ report = {}
20
+
21
+ report["config"] = config(self.model)
22
+ report["data"] = data(self.X, self.y)
23
+ report["performance"] = performance(
24
+ self.model, self.X, self.y
25
+ )
26
+ report["robustness"] = robustness(
27
+ self.model, self.X, self.y
28
+ )
29
+
30
+ return report
@@ -0,0 +1,4 @@
1
+ from .robustness import evaluate as robustness
2
+ from .config import evaluate as config
3
+ from .data import evaluate as data
4
+ from .performance import evaluate as performance
@@ -0,0 +1,6 @@
1
+ def evaluate(model):
2
+ return {
3
+ "model_type": type(model).__name__,
4
+ "n_params": len(model.get_params()),
5
+ "uses_random_state": "random_state" in model.get_params()
6
+ }
@@ -0,0 +1,14 @@
1
+ import numpy as np
2
+
3
+ def evaluate(X, y):
4
+ return {
5
+ "n_samples": X.shape[0],
6
+ "n_features": X.shape[1],
7
+ "has_nan": bool(
8
+ np.isnan(X).any() or np.isnan(y).any()
9
+ ),
10
+ "class_balance": (
11
+ dict(zip(*np.unique(y, return_counts=True)))
12
+ if len(set(y)) < 20 else "many_classes"
13
+ )
14
+ }
@@ -0,0 +1,11 @@
1
+ from sklearn.model_selection import cross_val_score
2
+
3
+ def evaluate(model, X, y):
4
+ scores = cross_val_score(
5
+ model, X, y, cv=3, n_jobs=1
6
+ )
7
+
8
+ return {
9
+ "cv_mean_score": float(scores.mean()),
10
+ "cv_std": float(scores.std())
11
+ }
@@ -0,0 +1,18 @@
1
+ import numpy as np
2
+ from sklearn.base import clone
3
+
4
+ def evaluate(model, X, y):
5
+ model_1 = clone(model)
6
+ model_2 = clone(model)
7
+
8
+ model_1.fit(X, y)
9
+ model_2.fit(X + np.random.normal(0, 1e-6, X.shape), y)
10
+
11
+ score_1 = model_1.score(X, y)
12
+ score_2 = model_2.score(X, y)
13
+
14
+ return {
15
+ "score_original": float(score_1),
16
+ "score_perturbed": float(score_2),
17
+ "delta": float(abs(score_1 - score_2))
18
+ }
@@ -0,0 +1,64 @@
1
+ Metadata-Version: 2.4
2
+ Name: ai-critic
3
+ Version: 0.1.0
4
+ Summary: Fast AI evaluator for scikit-learn models
5
+ Author-email: Luiz Seabra <seu-email@exemplo.com>
6
+ Requires-Python: >=3.9
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: numpy
9
+ Requires-Dist: scikit-learn
10
+
11
+ # AI Critic 🧠⚖️
12
+
13
+ **AI Critic** is a fast evaluator for scikit-learn models.
14
+ It analyzes configuration, robustness, data quality and performance in minutes.
15
+
16
+ ## Install
17
+
18
+ ```bash
19
+ pip install ai-critic
20
+ ```
21
+
22
+ ## Quick Example
23
+
24
+ ```python
25
+ from sklearn.datasets import load_breast_cancer
26
+ from sklearn.ensemble import RandomForestClassifier
27
+ from ai_critic import AICritic
28
+
29
+ X, y = load_breast_cancer(return_X_y=True)
30
+
31
+ model = RandomForestClassifier(n_estimators=50, max_depth=3)
32
+
33
+ critic = AICritic(model, X, y)
34
+ report = critic.evaluate()
35
+
36
+ print(report)
37
+ ```
38
+
39
+ ## What it evaluates
40
+
41
+ * Model configuration sanity
42
+ * Data consistency
43
+ * Robustness to noise
44
+ * Basic performance metrics
45
+
46
+ ## Philosophy
47
+
48
+ Fast, modular, and brutally honest AI evaluation.
49
+
50
+ 📌 README simples = mais confiança
51
+
52
+ ---
53
+
54
+ ## Development & Testing
55
+
56
+ To test the package locally as an end-user (mandatory for development):
57
+
58
+ In the root directory:
59
+
60
+ ```bash
61
+ pip install -e .
62
+ python -c "from ai_critic import AICritic; print('OK')"
63
+ python -m pytest
64
+ ```
@@ -0,0 +1,11 @@
1
+ ai_critic/__init__.py,sha256=H6DlPMmbcFUamhsNULPLk9vHx81XCiXuKKf63EJ8eM0,53
2
+ ai_critic/critic.py,sha256=ulkj6A6flREQy4HMdFj8ktp-iH_-5bsGkxsYE6zCMBE,635
3
+ ai_critic/evaluators/__init__.py,sha256=Jmmz9899YD__4Uj3bA6R7vYOwlH2giPc1wuCSLv7FVw,170
4
+ ai_critic/evaluators/config.py,sha256=3q4_Wg-lbrM_I_tcZl4InL0_OijlHNtnDBnewaGDtco,195
5
+ ai_critic/evaluators/data.py,sha256=gbDz1NdJ3vZuXRGV3mQtpfLgmVRnowfCTCJ0RH5SlxM,359
6
+ ai_critic/evaluators/performance.py,sha256=25Ja6jOaSc-maSVJNABrhGrxElYQH7IDGGbYx_Rr0J8,257
7
+ ai_critic/evaluators/robustness.py,sha256=InBx3bmTTGBU8yQ4I-E81xOC2gEAOpwpKRSqkxn2Cw4,435
8
+ ai_critic-0.1.0.dist-info/METADATA,sha256=iO4Oz0mSHsm0YzpeEzmwzAwJaNvY2hzb2dfn4RbBSaE,1286
9
+ ai_critic-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
10
+ ai_critic-0.1.0.dist-info/top_level.txt,sha256=TRyZkm1vyLLcFDg_80yeg5cHvPis_oW1Ti170417jkw,10
11
+ ai_critic-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ ai_critic