exact-ai 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- exact_ai-0.1.0/LICENSE +21 -0
- exact_ai-0.1.0/MANIFEST.in +3 -0
- exact_ai-0.1.0/PKG-INFO +225 -0
- exact_ai-0.1.0/README.md +178 -0
- exact_ai-0.1.0/pyproject.toml +49 -0
- exact_ai-0.1.0/setup.cfg +4 -0
- exact_ai-0.1.0/src/EXACT/__init__.py +0 -0
- exact_ai-0.1.0/src/EXACT/comparators/__init__.py +1 -0
- exact_ai-0.1.0/src/EXACT/comparators/heatmap_comp.py +820 -0
- exact_ai-0.1.0/src/EXACT/comparators/text_comp.py +862 -0
- exact_ai-0.1.0/src/EXACT/evaluators/__init__.py +5 -0
- exact_ai-0.1.0/src/EXACT/evaluators/base_evaluator.py +170 -0
- exact_ai-0.1.0/src/EXACT/evaluators/faithfulness_eval.py +311 -0
- exact_ai-0.1.0/src/EXACT/evaluators/localization_eval.py +294 -0
- exact_ai-0.1.0/src/EXACT/evaluators/sharpness_eval.py +251 -0
- exact_ai-0.1.0/src/EXACT/evaluators/stability_eval.py +304 -0
- exact_ai-0.1.0/src/EXACT/explainers/__init__.py +5 -0
- exact_ai-0.1.0/src/EXACT/explainers/dff.py +214 -0
- exact_ai-0.1.0/src/EXACT/explainers/gradcam.py +243 -0
- exact_ai-0.1.0/src/EXACT/explainers/ig_image_explainer.py +465 -0
- exact_ai-0.1.0/src/EXACT/explainers/ig_tabular_explainer.py +1492 -0
- exact_ai-0.1.0/src/EXACT/explainers/lime_image_explainer.py +347 -0
- exact_ai-0.1.0/src/EXACT/explainers/lime_tabular_explainer.py +185 -0
- exact_ai-0.1.0/src/EXACT/explainers/lime_text_explainer.py +160 -0
- exact_ai-0.1.0/src/EXACT/explainers/loo_text_explainer.py +897 -0
- exact_ai-0.1.0/src/EXACT/explainers/lrp_explainer.py +1164 -0
- exact_ai-0.1.0/src/EXACT/explainers/lrp_tabular.py +509 -0
- exact_ai-0.1.0/src/EXACT/explainers/saliency_map_explainer.py +383 -0
- exact_ai-0.1.0/src/EXACT/explainers/shap_image_explainer.py +551 -0
- exact_ai-0.1.0/src/EXACT/explainers/shap_tabular_explainer.py +695 -0
- exact_ai-0.1.0/src/EXACT/explainers/shap_text_explainer.py +724 -0
- exact_ai-0.1.0/src/EXACT/explainers/vit_gradcam.py +202 -0
- exact_ai-0.1.0/src/EXACT/utils/CNNutils.py +41 -0
- exact_ai-0.1.0/src/EXACT/utils/__init__.py +3 -0
- exact_ai-0.1.0/src/EXACT/utils/predict_proba_fn.py +77 -0
- exact_ai-0.1.0/src/exact_ai.egg-info/PKG-INFO +225 -0
- exact_ai-0.1.0/src/exact_ai.egg-info/SOURCES.txt +45 -0
- exact_ai-0.1.0/src/exact_ai.egg-info/dependency_links.txt +1 -0
- exact_ai-0.1.0/src/exact_ai.egg-info/requires.txt +8 -0
- exact_ai-0.1.0/src/exact_ai.egg-info/top_level.txt +1 -0
- exact_ai-0.1.0/tests/test_dff.py +55 -0
- exact_ai-0.1.0/tests/test_gradcam.py +56 -0
- exact_ai-0.1.0/tests/test_ig_tabular.py +668 -0
- exact_ai-0.1.0/tests/test_shap_image.py +158 -0
- exact_ai-0.1.0/tests/test_shap_image2.py +182 -0
- exact_ai-0.1.0/tests/test_shap_tabular.py +210 -0
- exact_ai-0.1.0/tests/test_shap_text.py +291 -0
exact_ai-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 GurenMashu
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
exact_ai-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: exact-ai
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Comprehensive Library for explainable AI (XAI) methods.
|
|
5
|
+
Author-email: Glenn Mathews <glennmathews18@gmail.com>, Harishankar S M <harishankarsm2004@gmail.com>, Afzina Sadiq <afzinasadiq180@gmail.com>, Nandana Murali <nandanalmurali2003@gmail.com>
|
|
6
|
+
License: MIT License
|
|
7
|
+
|
|
8
|
+
Copyright (c) 2025 GurenMashu
|
|
9
|
+
|
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
11
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
12
|
+
in the Software without restriction, including without limitation the rights
|
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
15
|
+
furnished to do so, subject to the following conditions:
|
|
16
|
+
|
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
|
18
|
+
copies or substantial portions of the Software.
|
|
19
|
+
|
|
20
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
26
|
+
SOFTWARE.
|
|
27
|
+
|
|
28
|
+
Project-URL: Homepage, https://github.com/GurenMashu/E.X.A.C.T-by-GHAN
|
|
29
|
+
Project-URL: Source, https://github.com/GurenMashu/E.X.A.C.T-by-GHAN
|
|
30
|
+
Keywords: xai,explainability,gradcam,lime,shap,ml,ai
|
|
31
|
+
Classifier: Programming Language :: Python :: 3
|
|
32
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
33
|
+
Classifier: Operating System :: OS Independent
|
|
34
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
35
|
+
Requires-Python: >=3.8
|
|
36
|
+
Description-Content-Type: text/markdown
|
|
37
|
+
License-File: LICENSE
|
|
38
|
+
Requires-Dist: torch
|
|
39
|
+
Requires-Dist: torchvision
|
|
40
|
+
Requires-Dist: numpy
|
|
41
|
+
Requires-Dist: matplotlib
|
|
42
|
+
Requires-Dist: pillow
|
|
43
|
+
Requires-Dist: grad-cam
|
|
44
|
+
Requires-Dist: lime
|
|
45
|
+
Requires-Dist: shap
|
|
46
|
+
Dynamic: license-file
|
|
47
|
+
|
|
48
|
+
# EXACT
|
|
49
|
+
A plug-and-play XAI library for pytorch models.
|
|
50
|
+
Meant for beginners and anyone interested in venturing into the XAI space, this package enables users to use any of the supported explainability methods with very few lines of code.
|
|
51
|
+
|
|
52
|
+
## Key Functionalities
|
|
53
|
+
- Plug-and-play with the user's trained models and input data.
|
|
54
|
+
- Specialized evaluators to evaluate quality of generated explainability results.
|
|
55
|
+
- Specialized comparators to compare between mutliple xai methods to find the best one for your needs.
|
|
56
|
+
- All results visualized cleanly and saved locally.
|
|
57
|
+
|
|
58
|
+
## Setup
|
|
59
|
+
As of now you may clone the repo to use EXACT as PyPI deployment will be done later.
|
|
60
|
+
```bash
|
|
61
|
+
git clone "https...."
|
|
62
|
+
cd E.X.A.C.T-BY-GHAN
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
Install the dependencies
|
|
66
|
+
```bash
|
|
67
|
+
python -m venv your_env
|
|
68
|
+
source path_to_your_env/bin/activate #for linux
|
|
69
|
+
pip install -r requirements.txt
|
|
70
|
+
pip install -e. #to build the EXACT package.
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## Usage
|
|
74
|
+
All implemented explainers have detailed instruction docstrings and are structures similarly for ease of use.
|
|
75
|
+
```bash
|
|
76
|
+
from EXACT.explainers import SaliencyMap
|
|
77
|
+
explainer = SaliencyMap(model = your_trained_model)
|
|
78
|
+
result = explainer.explain(input_tensor, method = "guided", save_png = True)
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
Evaluator usage example
|
|
82
|
+
```bash
|
|
83
|
+
from EXACT.explainers import GradCAM
|
|
84
|
+
from EXACT.evaluators import SharpnessEvaluator
|
|
85
|
+
|
|
86
|
+
explainer = GradCAM(model = your_trained_model)
|
|
87
|
+
result = explainer.explain(input_tensor, input_image, method = "xgradcam")
|
|
88
|
+
|
|
89
|
+
sharp_ev = SharpnessEvaluator()
|
|
90
|
+
sharp_result = sharp_ev.evaluate(explainer_result = result)
|
|
91
|
+
sharp_ev.report(sharp_result)
|
|
92
|
+
sharp_ev.plot(sharp_result, save_png=True)
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
Comparator usage example
|
|
96
|
+
```bash
|
|
97
|
+
from EXACT.explainers import GradCAM, IGImageExplainer
|
|
98
|
+
from EXACT.comparators import HeatmapComparator
|
|
99
|
+
|
|
100
|
+
explainer = GradCAM(your_trained_model)
|
|
101
|
+
ig_explainer = IGImageExplainer(model = your_trained_model)
|
|
102
|
+
|
|
103
|
+
gradcam_result = explainer.explain(input_tensor, input_image, method="gradcam", save_png=True)
|
|
104
|
+
gradcampp_result = explainer.explain(input_tensor, input_image, method="gradcam++", save_png=True)
|
|
105
|
+
ig_result = ig_explainer.explain(input_tensor, input_image, save_png = True)
|
|
106
|
+
|
|
107
|
+
cmp = HeatmapComparator(model = your_trained_model, device = device, stability_enabled = False)
|
|
108
|
+
results = cmp.compare(
|
|
109
|
+
entries = {
|
|
110
|
+
"GradCAM": (gradcam_result, explainer, {"method": "gradcam"}),
|
|
111
|
+
"GradCAM++": (gradcampp_result, explainer, {"method": "gradcam++"}),
|
|
112
|
+
"IG": (ig_result, ig_explainer, {})
|
|
113
|
+
},
|
|
114
|
+
input_tensor,
|
|
115
|
+
input_image
|
|
116
|
+
)
|
|
117
|
+
cmp.report(results)
|
|
118
|
+
cmp.plot(results, save_png = True)
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
# System overview
|
|
122
|
+
## Architecture
|
|
123
|
+
```mermaid
|
|
124
|
+
%%{init: {'theme': 'base', 'themeVariables': {
|
|
125
|
+
'primaryColor': '#1e3a5f',
|
|
126
|
+
'primaryTextColor': '#e0e0e0',
|
|
127
|
+
'primaryBorderColor': '#4a9eff',
|
|
128
|
+
'lineColor': '#4a9eff',
|
|
129
|
+
'secondaryColor': '#2d4a2d',
|
|
130
|
+
'tertiaryColor': '#3d3d3d',
|
|
131
|
+
'fontFamily': 'Segoe UI, Arial, sans-serif',
|
|
132
|
+
'nodeBorder': '#4a9eff',
|
|
133
|
+
'clusterBkg': '#1a1a2e',
|
|
134
|
+
'clusterBorder': '#4a9eff',
|
|
135
|
+
'titleColor': '#ffffff',
|
|
136
|
+
'edgeLabelBackground': '#16213e'
|
|
137
|
+
}}}%%
|
|
138
|
+
|
|
139
|
+
flowchart TB
|
|
140
|
+
%% Input
|
|
141
|
+
A["User Model + Input Data"]:::inputClass
|
|
142
|
+
|
|
143
|
+
%% Decision Block
|
|
144
|
+
B{"Preprocessing<br/>Required?"}:::decisionClass
|
|
145
|
+
|
|
146
|
+
%% Preprocessing
|
|
147
|
+
C["Data Preprocessing<br/>"]:::processClass
|
|
148
|
+
|
|
149
|
+
%% Explainers
|
|
150
|
+
subgraph EXPLAINERS["EXPLAINERS MODULE"]
|
|
151
|
+
direction TB
|
|
152
|
+
E_SEL["Select XAI Explainer"]:::selectClass
|
|
153
|
+
E_PARAMS["Execute XAI method"]:::subClass
|
|
154
|
+
E_RES["Explanation<br/>Results"]:::resultClass
|
|
155
|
+
end
|
|
156
|
+
|
|
157
|
+
%% Comparator
|
|
158
|
+
subgraph COMPARATOR["COMPARATORS MODULE"]
|
|
159
|
+
direction TB
|
|
160
|
+
C_SEL["Compare Multiple<br/>Explainers"]:::selectClass
|
|
161
|
+
C_COMPAT["Compatibility<br/>Ensurer"]:::subClass
|
|
162
|
+
C_METHODS["Comparison<br/>Methods"]:::subClass
|
|
163
|
+
C_RES["Comparison<br/>Results"]:::resultClass
|
|
164
|
+
end
|
|
165
|
+
|
|
166
|
+
%% Evaluators
|
|
167
|
+
subgraph EVALUATORS["EVALUATORS MODULE"]
|
|
168
|
+
direction TB
|
|
169
|
+
V_SEL["Select Compatible<br/>Evaluator"]:::selectClass
|
|
170
|
+
V_COMPAT["Compatibility<br/>Ensurer"]:::subClass
|
|
171
|
+
V_RES["Evaluation<br/>Results"]:::resultClass
|
|
172
|
+
end
|
|
173
|
+
|
|
174
|
+
%% Output
|
|
175
|
+
subgraph OUTPUT["OUTPUT LAYER"]
|
|
176
|
+
direction LR
|
|
177
|
+
OUT1["Visualize<br/>& Save"]:::outputClass
|
|
178
|
+
OUT2["Dashboard/Report<br/>Visualize & Save"]:::outputClass
|
|
179
|
+
OUT3["Report<br/>Visualize & Save"]:::outputClass
|
|
180
|
+
end
|
|
181
|
+
|
|
182
|
+
%% Flows
|
|
183
|
+
A --> B
|
|
184
|
+
B -->|Yes| C
|
|
185
|
+
B -->|No| E_SEL
|
|
186
|
+
C --> E_SEL
|
|
187
|
+
|
|
188
|
+
%% Explainers Module Flow
|
|
189
|
+
E_SEL --> E_PARAMS
|
|
190
|
+
E_PARAMS --> E_RES
|
|
191
|
+
|
|
192
|
+
%% Workflow A: Single Explainer
|
|
193
|
+
E_RES --> OUT1
|
|
194
|
+
|
|
195
|
+
%% Workflow B: Multiple Explainers + Comparator
|
|
196
|
+
E_RES --> C_SEL
|
|
197
|
+
C_SEL --> C_COMPAT
|
|
198
|
+
C_COMPAT --> C_METHODS
|
|
199
|
+
C_METHODS --> C_RES
|
|
200
|
+
C_RES --> OUT2
|
|
201
|
+
|
|
202
|
+
%% Workflow C: Explainer + Evaluator
|
|
203
|
+
E_RES --> V_SEL
|
|
204
|
+
V_SEL --> V_COMPAT
|
|
205
|
+
V_COMPAT --> V_RES
|
|
206
|
+
V_RES --> OUT3
|
|
207
|
+
|
|
208
|
+
%% Styling
|
|
209
|
+
classDef inputClass fill:#1e3a5f,stroke:#4a9eff,stroke-width:2px,color:#ffffff
|
|
210
|
+
classDef decisionClass fill:#5c3a1e,stroke:#f59e0b,stroke-width:2px,color:#ffffff
|
|
211
|
+
classDef processClass fill:#2d5016,stroke:#7ec850,stroke-width:2px,color:#ffffff
|
|
212
|
+
classDef selectClass fill:#2d4a5f,stroke:#60a5fa,stroke-width:2px,color:#ffffff
|
|
213
|
+
classDef subClass fill:#1a2f3a,stroke:#4a9eff,stroke-width:1px,color:#e0e0e0
|
|
214
|
+
classDef resultClass fill:#1a4a4a,stroke:#2dd4bf,stroke-width:2px,color:#ffffff
|
|
215
|
+
classDef outputClass fill:#4a1a4a,stroke:#c084fc,stroke-width:2px,color:#ffffff
|
|
216
|
+
|
|
217
|
+
style EXPLAINERS fill:#0f172a,stroke:#4ade80,stroke-width:2px
|
|
218
|
+
style COMPARATOR fill:#0f172a,stroke:#fbbf24,stroke-width:2px
|
|
219
|
+
style EVALUATORS fill:#0f172a,stroke:#c084fc,stroke-width:2px
|
|
220
|
+
style OUTPUT fill:#0f172a,stroke:#c084fc,stroke-width:2px
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
## Contribution
|
|
225
|
+
Accepting contributions
|
exact_ai-0.1.0/README.md
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
# EXACT
|
|
2
|
+
A plug-and-play XAI library for pytorch models.
|
|
3
|
+
Meant for beginners and anyone interested in venturing into the XAI space, this package enables users to use any of the supported explainability methods with very few lines of code.
|
|
4
|
+
|
|
5
|
+
## Key Functionalities
|
|
6
|
+
- Plug-and-play with the user's trained models and input data.
|
|
7
|
+
- Specialized evaluators to evaluate quality of generated explainability results.
|
|
8
|
+
- Specialized comparators to compare between mutliple xai methods to find the best one for your needs.
|
|
9
|
+
- All results visualized cleanly and saved locally.
|
|
10
|
+
|
|
11
|
+
## Setup
|
|
12
|
+
As of now you may clone the repo to use EXACT as PyPI deployment will be done later.
|
|
13
|
+
```bash
|
|
14
|
+
git clone "https...."
|
|
15
|
+
cd E.X.A.C.T-BY-GHAN
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
Install the dependencies
|
|
19
|
+
```bash
|
|
20
|
+
python -m venv your_env
|
|
21
|
+
source path_to_your_env/bin/activate #for linux
|
|
22
|
+
pip install -r requirements.txt
|
|
23
|
+
pip install -e. #to build the EXACT package.
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
## Usage
|
|
27
|
+
All implemented explainers have detailed instruction docstrings and are structures similarly for ease of use.
|
|
28
|
+
```bash
|
|
29
|
+
from EXACT.explainers import SaliencyMap
|
|
30
|
+
explainer = SaliencyMap(model = your_trained_model)
|
|
31
|
+
result = explainer.explain(input_tensor, method = "guided", save_png = True)
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
Evaluator usage example
|
|
35
|
+
```bash
|
|
36
|
+
from EXACT.explainers import GradCAM
|
|
37
|
+
from EXACT.evaluators import SharpnessEvaluator
|
|
38
|
+
|
|
39
|
+
explainer = GradCAM(model = your_trained_model)
|
|
40
|
+
result = explainer.explain(input_tensor, input_image, method = "xgradcam")
|
|
41
|
+
|
|
42
|
+
sharp_ev = SharpnessEvaluator()
|
|
43
|
+
sharp_result = sharp_ev.evaluate(explainer_result = result)
|
|
44
|
+
sharp_ev.report(sharp_result)
|
|
45
|
+
sharp_ev.plot(sharp_result, save_png=True)
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
Comparator usage example
|
|
49
|
+
```bash
|
|
50
|
+
from EXACT.explainers import GradCAM, IGImageExplainer
|
|
51
|
+
from EXACT.comparators import HeatmapComparator
|
|
52
|
+
|
|
53
|
+
explainer = GradCAM(your_trained_model)
|
|
54
|
+
ig_explainer = IGImageExplainer(model = your_trained_model)
|
|
55
|
+
|
|
56
|
+
gradcam_result = explainer.explain(input_tensor, input_image, method="gradcam", save_png=True)
|
|
57
|
+
gradcampp_result = explainer.explain(input_tensor, input_image, method="gradcam++", save_png=True)
|
|
58
|
+
ig_result = ig_explainer.explain(input_tensor, input_image, save_png = True)
|
|
59
|
+
|
|
60
|
+
cmp = HeatmapComparator(model = your_trained_model, device = device, stability_enabled = False)
|
|
61
|
+
results = cmp.compare(
|
|
62
|
+
entries = {
|
|
63
|
+
"GradCAM": (gradcam_result, explainer, {"method": "gradcam"}),
|
|
64
|
+
"GradCAM++": (gradcampp_result, explainer, {"method": "gradcam++"}),
|
|
65
|
+
"IG": (ig_result, ig_explainer, {})
|
|
66
|
+
},
|
|
67
|
+
input_tensor,
|
|
68
|
+
input_image
|
|
69
|
+
)
|
|
70
|
+
cmp.report(results)
|
|
71
|
+
cmp.plot(results, save_png = True)
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
# System overview
|
|
75
|
+
## Architecture
|
|
76
|
+
```mermaid
|
|
77
|
+
%%{init: {'theme': 'base', 'themeVariables': {
|
|
78
|
+
'primaryColor': '#1e3a5f',
|
|
79
|
+
'primaryTextColor': '#e0e0e0',
|
|
80
|
+
'primaryBorderColor': '#4a9eff',
|
|
81
|
+
'lineColor': '#4a9eff',
|
|
82
|
+
'secondaryColor': '#2d4a2d',
|
|
83
|
+
'tertiaryColor': '#3d3d3d',
|
|
84
|
+
'fontFamily': 'Segoe UI, Arial, sans-serif',
|
|
85
|
+
'nodeBorder': '#4a9eff',
|
|
86
|
+
'clusterBkg': '#1a1a2e',
|
|
87
|
+
'clusterBorder': '#4a9eff',
|
|
88
|
+
'titleColor': '#ffffff',
|
|
89
|
+
'edgeLabelBackground': '#16213e'
|
|
90
|
+
}}}%%
|
|
91
|
+
|
|
92
|
+
flowchart TB
|
|
93
|
+
%% Input
|
|
94
|
+
A["User Model + Input Data"]:::inputClass
|
|
95
|
+
|
|
96
|
+
%% Decision Block
|
|
97
|
+
B{"Preprocessing<br/>Required?"}:::decisionClass
|
|
98
|
+
|
|
99
|
+
%% Preprocessing
|
|
100
|
+
C["Data Preprocessing<br/>"]:::processClass
|
|
101
|
+
|
|
102
|
+
%% Explainers
|
|
103
|
+
subgraph EXPLAINERS["EXPLAINERS MODULE"]
|
|
104
|
+
direction TB
|
|
105
|
+
E_SEL["Select XAI Explainer"]:::selectClass
|
|
106
|
+
E_PARAMS["Execute XAI method"]:::subClass
|
|
107
|
+
E_RES["Explanation<br/>Results"]:::resultClass
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
%% Comparator
|
|
111
|
+
subgraph COMPARATOR["COMPARATORS MODULE"]
|
|
112
|
+
direction TB
|
|
113
|
+
C_SEL["Compare Multiple<br/>Explainers"]:::selectClass
|
|
114
|
+
C_COMPAT["Compatibility<br/>Ensurer"]:::subClass
|
|
115
|
+
C_METHODS["Comparison<br/>Methods"]:::subClass
|
|
116
|
+
C_RES["Comparison<br/>Results"]:::resultClass
|
|
117
|
+
end
|
|
118
|
+
|
|
119
|
+
%% Evaluators
|
|
120
|
+
subgraph EVALUATORS["EVALUATORS MODULE"]
|
|
121
|
+
direction TB
|
|
122
|
+
V_SEL["Select Compatible<br/>Evaluator"]:::selectClass
|
|
123
|
+
V_COMPAT["Compatibility<br/>Ensurer"]:::subClass
|
|
124
|
+
V_RES["Evaluation<br/>Results"]:::resultClass
|
|
125
|
+
end
|
|
126
|
+
|
|
127
|
+
%% Output
|
|
128
|
+
subgraph OUTPUT["OUTPUT LAYER"]
|
|
129
|
+
direction LR
|
|
130
|
+
OUT1["Visualize<br/>& Save"]:::outputClass
|
|
131
|
+
OUT2["Dashboard/Report<br/>Visualize & Save"]:::outputClass
|
|
132
|
+
OUT3["Report<br/>Visualize & Save"]:::outputClass
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
%% Flows
|
|
136
|
+
A --> B
|
|
137
|
+
B -->|Yes| C
|
|
138
|
+
B -->|No| E_SEL
|
|
139
|
+
C --> E_SEL
|
|
140
|
+
|
|
141
|
+
%% Explainers Module Flow
|
|
142
|
+
E_SEL --> E_PARAMS
|
|
143
|
+
E_PARAMS --> E_RES
|
|
144
|
+
|
|
145
|
+
%% Workflow A: Single Explainer
|
|
146
|
+
E_RES --> OUT1
|
|
147
|
+
|
|
148
|
+
%% Workflow B: Multiple Explainers + Comparator
|
|
149
|
+
E_RES --> C_SEL
|
|
150
|
+
C_SEL --> C_COMPAT
|
|
151
|
+
C_COMPAT --> C_METHODS
|
|
152
|
+
C_METHODS --> C_RES
|
|
153
|
+
C_RES --> OUT2
|
|
154
|
+
|
|
155
|
+
%% Workflow C: Explainer + Evaluator
|
|
156
|
+
E_RES --> V_SEL
|
|
157
|
+
V_SEL --> V_COMPAT
|
|
158
|
+
V_COMPAT --> V_RES
|
|
159
|
+
V_RES --> OUT3
|
|
160
|
+
|
|
161
|
+
%% Styling
|
|
162
|
+
classDef inputClass fill:#1e3a5f,stroke:#4a9eff,stroke-width:2px,color:#ffffff
|
|
163
|
+
classDef decisionClass fill:#5c3a1e,stroke:#f59e0b,stroke-width:2px,color:#ffffff
|
|
164
|
+
classDef processClass fill:#2d5016,stroke:#7ec850,stroke-width:2px,color:#ffffff
|
|
165
|
+
classDef selectClass fill:#2d4a5f,stroke:#60a5fa,stroke-width:2px,color:#ffffff
|
|
166
|
+
classDef subClass fill:#1a2f3a,stroke:#4a9eff,stroke-width:1px,color:#e0e0e0
|
|
167
|
+
classDef resultClass fill:#1a4a4a,stroke:#2dd4bf,stroke-width:2px,color:#ffffff
|
|
168
|
+
classDef outputClass fill:#4a1a4a,stroke:#c084fc,stroke-width:2px,color:#ffffff
|
|
169
|
+
|
|
170
|
+
style EXPLAINERS fill:#0f172a,stroke:#4ade80,stroke-width:2px
|
|
171
|
+
style COMPARATOR fill:#0f172a,stroke:#fbbf24,stroke-width:2px
|
|
172
|
+
style EVALUATORS fill:#0f172a,stroke:#c084fc,stroke-width:2px
|
|
173
|
+
style OUTPUT fill:#0f172a,stroke:#c084fc,stroke-width:2px
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
## Contribution
|
|
178
|
+
Accepting contributions
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "exact-ai" # Your package name on PyPI
|
|
7
|
+
version = "0.1.0" # Semantic version
|
|
8
|
+
description = "Comprehensive Library for explainable AI (XAI) methods."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = { file = "LICENSE" }
|
|
11
|
+
authors = [
|
|
12
|
+
{ name = "Glenn Mathews", email = "glennmathews18@gmail.com" },
|
|
13
|
+
{ name = "Harishankar S M", email = "harishankarsm2004@gmail.com" },
|
|
14
|
+
{ name = "Afzina Sadiq", email = "afzinasadiq180@gmail.com" },
|
|
15
|
+
{ name = "Nandana Murali", email = "nandanalmurali2003@gmail.com" }
|
|
16
|
+
]
|
|
17
|
+
keywords = ["xai", "explainability", "gradcam", "lime", "shap", "ml", "ai"]
|
|
18
|
+
classifiers = [
|
|
19
|
+
"Programming Language :: Python :: 3",
|
|
20
|
+
"License :: OSI Approved :: MIT License",
|
|
21
|
+
"Operating System :: OS Independent",
|
|
22
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence"
|
|
23
|
+
]
|
|
24
|
+
requires-python = ">=3.8"
|
|
25
|
+
|
|
26
|
+
# Runtime dependencies (all required)
|
|
27
|
+
dependencies = [
|
|
28
|
+
"torch",
|
|
29
|
+
"torchvision",
|
|
30
|
+
"numpy",
|
|
31
|
+
"matplotlib",
|
|
32
|
+
"pillow",
|
|
33
|
+
"grad-cam",
|
|
34
|
+
"lime",
|
|
35
|
+
"shap"
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
[project.urls]
|
|
39
|
+
Homepage = "https://github.com/GurenMashu/E.X.A.C.T-by-GHAN"
|
|
40
|
+
#Documentation = "https://github.com/yourusername/mlmodels/wiki"
|
|
41
|
+
Source = "https://github.com/GurenMashu/E.X.A.C.T-by-GHAN"
|
|
42
|
+
#Tracker = "https://github.com/yourusername/mlmodels/issues"
|
|
43
|
+
|
|
44
|
+
[tool.setuptools]
|
|
45
|
+
package-dir = {"" = "src"} # tells setuptools where your code lives
|
|
46
|
+
|
|
47
|
+
[tool.setuptools.packages.find]
|
|
48
|
+
where = ["src"]
|
|
49
|
+
exclude = ["tests*"] # don’t ship tests to PyPI
|
exact_ai-0.1.0/setup.cfg
ADDED
|
File without changes
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .heatmap_comp import HeatmapComparator
|