ai-critic 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,64 @@
1
+ Metadata-Version: 2.4
2
+ Name: ai-critic
3
+ Version: 0.1.0
4
+ Summary: Fast AI evaluator for scikit-learn models
5
+ Author-email: Luiz Seabra <seu-email@exemplo.com>
6
+ Requires-Python: >=3.9
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: numpy
9
+ Requires-Dist: scikit-learn
10
+
11
+ # AI Critic 🧠⚖️
12
+
13
+ **AI Critic** is a fast evaluator for scikit-learn models.
14
+ It analyzes configuration, robustness, data quality and performance in minutes.
15
+
16
+ ## Install
17
+
18
+ ```bash
19
+ pip install ai-critic
20
+ ```
21
+
22
+ ## Quick Example
23
+
24
+ ```python
25
+ from sklearn.datasets import load_breast_cancer
26
+ from sklearn.ensemble import RandomForestClassifier
27
+ from ai_critic import AICritic
28
+
29
+ X, y = load_breast_cancer(return_X_y=True)
30
+
31
+ model = RandomForestClassifier(n_estimators=50, max_depth=3)
32
+
33
+ critic = AICritic(model, X, y)
34
+ report = critic.evaluate()
35
+
36
+ print(report)
37
+ ```
38
+
39
+ ## What it evaluates
40
+
41
+ * Model configuration sanity
42
+ * Data consistency
43
+ * Robustness to noise
44
+ * Basic performance metrics
45
+
46
+ ## Philosophy
47
+
48
+ Fast, modular, and brutally honest AI evaluation.
49
+
50
+ 📌 README simples = mais confiança
51
+
52
+ ---
53
+
54
+ ## Development & Testing
55
+
56
+ To test the package locally as an end-user (mandatory for development):
57
+
58
+ In the root directory:
59
+
60
+ ```bash
61
+ pip install -e .
62
+ python -c "from ai_critic import AICritic; print('OK')"
63
+ python -m pytest
64
+ ```
@@ -0,0 +1,54 @@
1
+ # AI Critic 🧠⚖️
2
+
3
+ **AI Critic** is a fast evaluator for scikit-learn models.
4
+ It analyzes configuration, robustness, data quality and performance in minutes.
5
+
6
+ ## Install
7
+
8
+ ```bash
9
+ pip install ai-critic
10
+ ```
11
+
12
+ ## Quick Example
13
+
14
+ ```python
15
+ from sklearn.datasets import load_breast_cancer
16
+ from sklearn.ensemble import RandomForestClassifier
17
+ from ai_critic import AICritic
18
+
19
+ X, y = load_breast_cancer(return_X_y=True)
20
+
21
+ model = RandomForestClassifier(n_estimators=50, max_depth=3)
22
+
23
+ critic = AICritic(model, X, y)
24
+ report = critic.evaluate()
25
+
26
+ print(report)
27
+ ```
28
+
29
+ ## What it evaluates
30
+
31
+ * Model configuration sanity
32
+ * Data consistency
33
+ * Robustness to noise
34
+ * Basic performance metrics
35
+
36
+ ## Philosophy
37
+
38
+ Fast, modular, and brutally honest AI evaluation.
39
+
40
+ 📌 README simples = mais confiança
41
+
42
+ ---
43
+
44
+ ## Development & Testing
45
+
46
+ To test the package locally as an end-user (mandatory for development):
47
+
48
+ In the root directory:
49
+
50
+ ```bash
51
+ pip install -e .
52
+ python -c "from ai_critic import AICritic; print('OK')"
53
+ python -m pytest
54
+ ```
@@ -0,0 +1,3 @@
1
+ from .critic import AICritic
2
+
3
+ __all__ = ["AICritic"]
@@ -0,0 +1,30 @@
1
+ from ai_critic.evaluators import (
2
+ robustness,
3
+ config,
4
+ data,
5
+ performance
6
+ )
7
+
8
+ class AICritic:
9
+ """
10
+ Orquestrador principal da avaliação de modelos sklearn
11
+ """
12
+
13
+ def __init__(self, model, X, y):
14
+ self.model = model
15
+ self.X = X
16
+ self.y = y
17
+
18
+ def evaluate(self):
19
+ report = {}
20
+
21
+ report["config"] = config(self.model)
22
+ report["data"] = data(self.X, self.y)
23
+ report["performance"] = performance(
24
+ self.model, self.X, self.y
25
+ )
26
+ report["robustness"] = robustness(
27
+ self.model, self.X, self.y
28
+ )
29
+
30
+ return report
@@ -0,0 +1,4 @@
1
+ from .robustness import evaluate as robustness
2
+ from .config import evaluate as config
3
+ from .data import evaluate as data
4
+ from .performance import evaluate as performance
@@ -0,0 +1,6 @@
1
+ def evaluate(model):
2
+ return {
3
+ "model_type": type(model).__name__,
4
+ "n_params": len(model.get_params()),
5
+ "uses_random_state": "random_state" in model.get_params()
6
+ }
@@ -0,0 +1,14 @@
1
+ import numpy as np
2
+
3
+ def evaluate(X, y):
4
+ return {
5
+ "n_samples": X.shape[0],
6
+ "n_features": X.shape[1],
7
+ "has_nan": bool(
8
+ np.isnan(X).any() or np.isnan(y).any()
9
+ ),
10
+ "class_balance": (
11
+ dict(zip(*np.unique(y, return_counts=True)))
12
+ if len(set(y)) < 20 else "many_classes"
13
+ )
14
+ }
@@ -0,0 +1,11 @@
1
+ from sklearn.model_selection import cross_val_score
2
+
3
+ def evaluate(model, X, y):
4
+ scores = cross_val_score(
5
+ model, X, y, cv=3, n_jobs=1
6
+ )
7
+
8
+ return {
9
+ "cv_mean_score": float(scores.mean()),
10
+ "cv_std": float(scores.std())
11
+ }
@@ -0,0 +1,18 @@
1
+ import numpy as np
2
+ from sklearn.base import clone
3
+
4
+ def evaluate(model, X, y):
5
+ model_1 = clone(model)
6
+ model_2 = clone(model)
7
+
8
+ model_1.fit(X, y)
9
+ model_2.fit(X + np.random.normal(0, 1e-6, X.shape), y)
10
+
11
+ score_1 = model_1.score(X, y)
12
+ score_2 = model_2.score(X, y)
13
+
14
+ return {
15
+ "score_original": float(score_1),
16
+ "score_perturbed": float(score_2),
17
+ "delta": float(abs(score_1 - score_2))
18
+ }
@@ -0,0 +1,64 @@
1
+ Metadata-Version: 2.4
2
+ Name: ai-critic
3
+ Version: 0.1.0
4
+ Summary: Fast AI evaluator for scikit-learn models
5
+ Author-email: Luiz Seabra <seu-email@exemplo.com>
6
+ Requires-Python: >=3.9
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: numpy
9
+ Requires-Dist: scikit-learn
10
+
11
+ # AI Critic 🧠⚖️
12
+
13
+ **AI Critic** is a fast evaluator for scikit-learn models.
14
+ It analyzes configuration, robustness, data quality and performance in minutes.
15
+
16
+ ## Install
17
+
18
+ ```bash
19
+ pip install ai-critic
20
+ ```
21
+
22
+ ## Quick Example
23
+
24
+ ```python
25
+ from sklearn.datasets import load_breast_cancer
26
+ from sklearn.ensemble import RandomForestClassifier
27
+ from ai_critic import AICritic
28
+
29
+ X, y = load_breast_cancer(return_X_y=True)
30
+
31
+ model = RandomForestClassifier(n_estimators=50, max_depth=3)
32
+
33
+ critic = AICritic(model, X, y)
34
+ report = critic.evaluate()
35
+
36
+ print(report)
37
+ ```
38
+
39
+ ## What it evaluates
40
+
41
+ * Model configuration sanity
42
+ * Data consistency
43
+ * Robustness to noise
44
+ * Basic performance metrics
45
+
46
+ ## Philosophy
47
+
48
+ Fast, modular, and brutally honest AI evaluation.
49
+
50
+ 📌 README simples = mais confiança
51
+
52
+ ---
53
+
54
+ ## Development & Testing
55
+
56
+ To test the package locally as an end-user (mandatory for development):
57
+
58
+ In the root directory:
59
+
60
+ ```bash
61
+ pip install -e .
62
+ python -c "from ai_critic import AICritic; print('OK')"
63
+ python -m pytest
64
+ ```
@@ -0,0 +1,16 @@
1
+ README.md
2
+ pyproject.toml
3
+ ai_critic/__init__.py
4
+ ai_critic/critic.py
5
+ ai_critic.egg-info/PKG-INFO
6
+ ai_critic.egg-info/SOURCES.txt
7
+ ai_critic.egg-info/dependency_links.txt
8
+ ai_critic.egg-info/requires.txt
9
+ ai_critic.egg-info/top_level.txt
10
+ ai_critic/evaluators/__init__.py
11
+ ai_critic/evaluators/config.py
12
+ ai_critic/evaluators/data.py
13
+ ai_critic/evaluators/performance.py
14
+ ai_critic/evaluators/robustness.py
15
+ test/test_in_ia.py
16
+ test/test_model.py
@@ -0,0 +1,2 @@
1
+ numpy
2
+ scikit-learn
@@ -0,0 +1 @@
1
+ ai_critic
@@ -0,0 +1,19 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "ai-critic"
7
+ version = "0.1.0"
8
+ description = "Fast AI evaluator for scikit-learn models"
9
+ readme = "README.md"
10
+ authors = [
11
+ { name="Luiz Seabra", email="seu-email@exemplo.com" }
12
+ ]
13
+ license = { file = "LICENSE" }
14
+ requires-python = ">=3.9"
15
+
16
+ dependencies = [
17
+ "numpy",
18
+ "scikit-learn"
19
+ ]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,16 @@
1
+ from sklearn.datasets import load_breast_cancer
2
+ from sklearn.ensemble import RandomForestClassifier
3
+ from ai_critic import AICritic
4
+
5
+ X, y = load_breast_cancer(return_X_y=True)
6
+
7
+ model = RandomForestClassifier(
8
+ n_estimators=50,
9
+ max_depth=3,
10
+ random_state=42
11
+ )
12
+
13
+ critic = AICritic(model, X, y)
14
+ report = critic.evaluate()
15
+
16
+ print(report)
@@ -0,0 +1,17 @@
1
+ from sklearn.datasets import load_iris
2
+ from sklearn.linear_model import LogisticRegression
3
+
4
+ from ai_critic import AICritic
5
+
6
+
7
+ def test_ai_critic_runs():
8
+ X, y = load_iris(return_X_y=True)
9
+
10
+ model = LogisticRegression(max_iter=200)
11
+
12
+ critic = AICritic(model, X, y)
13
+ report = critic.evaluate()
14
+
15
+ assert "performance" in report
16
+ assert "robustness" in report
17
+ assert report["performance"]["cv_mean_score"] > 0.5