nous 0.1.0__tar.gz → 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. nous-0.2.0/PKG-INFO +150 -0
  2. nous-0.2.0/README.md +89 -0
  3. nous-0.2.0/nous/__init__.py +103 -0
  4. nous-0.2.0/nous/data/__init__.py +4 -0
  5. nous-0.2.0/nous/data/california.py +32 -0
  6. nous-0.2.0/nous/data/wine.py +29 -0
  7. nous-0.2.0/nous/explain/__init__.py +26 -0
  8. nous-0.2.0/nous/explain/aggregator.py +34 -0
  9. nous-0.2.0/nous/explain/cf.py +137 -0
  10. nous-0.2.0/nous/explain/facts_desc.py +23 -0
  11. nous-0.2.0/nous/explain/fidelity.py +56 -0
  12. nous-0.2.0/nous/explain/generate.py +86 -0
  13. nous-0.2.0/nous/explain/global_book.py +52 -0
  14. nous-0.2.0/nous/explain/loo.py +130 -0
  15. nous-0.2.0/nous/explain/mse.py +93 -0
  16. nous-0.2.0/nous/explain/pruning.py +117 -0
  17. nous-0.2.0/nous/explain/stability.py +42 -0
  18. nous-0.2.0/nous/explain/traces.py +285 -0
  19. nous-0.2.0/nous/explain/utils.py +15 -0
  20. nous-0.2.0/nous/export/__init__.py +13 -0
  21. nous-0.2.0/nous/export/numpy_infer.py +412 -0
  22. nous-0.2.0/nous/facts.py +112 -0
  23. nous-0.2.0/nous/model.py +226 -0
  24. nous-0.2.0/nous/prototypes.py +43 -0
  25. nous-0.2.0/nous/rules/__init__.py +11 -0
  26. nous-0.2.0/nous/rules/blocks.py +63 -0
  27. nous-0.2.0/nous/rules/fixed.py +26 -0
  28. nous-0.2.0/nous/rules/softmax.py +93 -0
  29. nous-0.2.0/nous/rules/sparse.py +142 -0
  30. nous-0.2.0/nous/training/__init__.py +5 -0
  31. nous-0.2.0/nous/training/evaluation.py +57 -0
  32. nous-0.2.0/nous/training/schedulers.py +34 -0
  33. nous-0.2.0/nous/training/train.py +177 -0
  34. nous-0.2.0/nous/types.py +4 -0
  35. nous-0.2.0/nous/utils/__init__.py +3 -0
  36. nous-0.2.0/nous/utils/metrics.py +2 -0
  37. nous-0.2.0/nous/utils/seed.py +13 -0
  38. nous-0.2.0/nous/version.py +1 -0
  39. nous-0.2.0/nous.egg-info/PKG-INFO +150 -0
  40. nous-0.2.0/nous.egg-info/SOURCES.txt +51 -0
  41. nous-0.2.0/nous.egg-info/requires.txt +21 -0
  42. {nous-0.1.0 → nous-0.2.0}/pyproject.toml +27 -11
  43. nous-0.2.0/tests/test_explain_loo.py +16 -0
  44. nous-0.2.0/tests/test_export_numpy.py +24 -0
  45. nous-0.2.0/tests/test_facts.py +16 -0
  46. nous-0.2.0/tests/test_forward_explain.py +22 -0
  47. nous-0.2.0/tests/test_model_forward.py +32 -0
  48. nous-0.2.0/tests/test_prototypes.py +17 -0
  49. nous-0.2.0/tests/test_rules.py +33 -0
  50. nous-0.1.0/PKG-INFO +0 -138
  51. nous-0.1.0/README.md +0 -90
  52. nous-0.1.0/nous/__init__.py +0 -26
  53. nous-0.1.0/nous/causal.py +0 -63
  54. nous-0.1.0/nous/interpret.py +0 -111
  55. nous-0.1.0/nous/layers.py +0 -117
  56. nous-0.1.0/nous/models.py +0 -65
  57. nous-0.1.0/nous.egg-info/PKG-INFO +0 -138
  58. nous-0.1.0/nous.egg-info/SOURCES.txt +0 -16
  59. nous-0.1.0/nous.egg-info/requires.txt +0 -6
  60. nous-0.1.0/tests/test_interpret_causal.py +0 -77
  61. nous-0.1.0/tests/test_layers.py +0 -97
  62. nous-0.1.0/tests/test_models.py +0 -93
  63. {nous-0.1.0 → nous-0.2.0}/LICENSE +0 -0
  64. {nous-0.1.0 → nous-0.2.0}/nous.egg-info/dependency_links.txt +0 -0
  65. {nous-0.1.0 → nous-0.2.0}/nous.egg-info/top_level.txt +0 -0
  66. {nous-0.1.0 → nous-0.2.0}/setup.cfg +0 -0
nous-0.2.0/PKG-INFO ADDED
@@ -0,0 +1,150 @@
1
+ Metadata-Version: 2.4
2
+ Name: nous
3
+ Version: 0.2.0
4
+ Summary: Nous: A Neuro-Symbolic Library for Interpretable AI
5
+ Author-email: Islam Tlupov <tlupovislam@gmail.com>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2025 Islam Tlupov
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+
28
+ Project-URL: Repository, https://github.com/EmotionEngineer/nous
29
+ Classifier: Development Status :: 3 - Alpha
30
+ Classifier: Intended Audience :: Developers
31
+ Classifier: Intended Audience :: Science/Research
32
+ Classifier: License :: OSI Approved :: MIT License
33
+ Classifier: Programming Language :: Python :: 3
34
+ Classifier: Programming Language :: Python :: 3 :: Only
35
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
36
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
37
+ Classifier: Typing :: Typed
38
+ Requires-Python: >=3.8
39
+ Description-Content-Type: text/markdown
40
+ License-File: LICENSE
41
+ Requires-Dist: torch>=2.1
42
+ Requires-Dist: numpy>=1.22
43
+ Requires-Dist: pandas>=1.5
44
+ Requires-Dist: scikit-learn>=1.2
45
+ Provides-Extra: dev
46
+ Requires-Dist: pytest>=7.0; extra == "dev"
47
+ Requires-Dist: pytest-cov>=4.0; extra == "dev"
48
+ Requires-Dist: mypy>=1.5; extra == "dev"
49
+ Requires-Dist: ruff>=0.5; extra == "dev"
50
+ Requires-Dist: black>=23.0; extra == "dev"
51
+ Requires-Dist: matplotlib>=3.6; extra == "dev"
52
+ Requires-Dist: seaborn>=0.12; extra == "dev"
53
+ Requires-Dist: tqdm>=4.65; extra == "dev"
54
+ Requires-Dist: ucimlrepo>=0.0.5; extra == "dev"
55
+ Provides-Extra: examples
56
+ Requires-Dist: matplotlib>=3.6; extra == "examples"
57
+ Requires-Dist: seaborn>=0.12; extra == "examples"
58
+ Requires-Dist: tqdm>=4.65; extra == "examples"
59
+ Requires-Dist: ucimlrepo>=0.0.5; extra == "examples"
60
+ Dynamic: license-file
61
+
62
+ # Nous — A Neuro-Symbolic Library for Interpretable AI
63
+
64
+ [![PyPI](https://img.shields.io/pypi/v/nous.svg)](https://pypi.org/project/nous/)
65
+ [![License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
66
+
67
+ Make tabular models you can read.
68
+ Nous learns compact logical rules and optional case‑based prototypes inside one differentiable model — so prediction and explanation come from the same place.
69
+
70
+ - 🧩 One white‑box → two styles: rules and/or prototypes
71
+ - 🔀 Learned AND / OR / k‑of‑n mixtures capture interactions without bloat
72
+ - ✂️ Minimal, faithful stories: pruning + sufficiency/comprehensiveness checks
73
+ - 🚀 Practical: competitive accuracy, NumPy export, unit‑tested toolkit
74
+
75
+ ## Key Features
76
+
77
+ - Intrinsic interpretability (not post‑hoc): explanations are part of the forward pass
78
+ - Switchable style: enable/disable prototypes; choose rule selection (fixed / softmax / sparse); add calibrators
79
+ - Fidelity diagnostics: pruned‑forward inference, minimal‑sufficient explanations, stability tools
80
+ - Ready to ship: pure‑NumPy export for inference without PyTorch
81
+
82
+ ## Installation
83
+
84
+ ```bash
85
+ # Stable from PyPI
86
+ pip install nous
87
+
88
+ # With example extras (plots, progress, UCI fetchers)
89
+ pip install "nous[examples]"
90
+
91
+ # Dev setup (tests, linters, type checks)
92
+ pip install "nous[dev]"
93
+ ```
94
+
95
+ Requirements (core):
96
+ - Python 3.9+
97
+ - torch>=2.1
98
+ - numpy>=1.22
99
+ - pandas>=1.5
100
+ - scikit-learn>=1.2
101
+
102
+ Extras:
103
+ - examples: matplotlib>=3.6, seaborn>=0.12, tqdm>=4.65, ucimlrepo>=0.0.5
104
+ - dev: pytest>=7.0, pytest-cov>=4.0, mypy>=1.5, ruff>=0.5, black>=23.0, matplotlib>=3.6, seaborn>=0.12, tqdm>=4.65, ucimlrepo>=0.0.5
105
+
106
+ ## Recommended Configurations
107
+
108
+ | Profile | Rule selection | Calibrators | Prototypes | Use when | Speed |
109
+ |--------|-----------------|-------------|------------|----------|-------|
110
+ | Fast baseline | fixed | off | off | quick sweeps, ablations | ⚡⚡⚡ |
111
+ | Default rules | softmax | on | off | general use, strong accuracy | ⚡⚡ |
112
+ | Explain‑everything | softmax | on | on | rich case‑based narratives | ⚡ |
113
+
114
+ Tips:
115
+ - Train with prototypes off for speed; enable them only on the final model if you need case‑based stories.
116
+ - 300 epochs with patience≈50 works well on common tabular datasets.
117
+
118
+ ## Bench Snapshot (5‑fold CV, typical)
119
+
120
+ | Dataset | Metric | Nous (rules) | Nous (+proto) | EBM | XGBoost |
121
+ |--------|--------|--------------|---------------|-----|---------|
122
+ | HELOC (cls) | AUC | ~0.791 | ~0.792 | ~0.799 | ~0.796 |
123
+ | Adult (cls) | AUC | ~0.913 | ~0.914 | ~0.926 | ~0.929 |
124
+ | Breast Cancer (cls) | Acc | ~0.975 | ~0.983 | ~0.970 | ~0.965 |
125
+ | California (reg) | RMSE | ~0.514 | ~0.505 | ~0.562 | ~0.439 |
126
+
127
+ Numbers vary with seed/HPO. See examples/benchmark.ipynb for reproducible runs.
128
+
129
+ ## What makes Nous different?
130
+
131
+ - The explanation is the model: rules and prototypes live in the forward pass
132
+ - Interactions without clutter: AND/OR/k‑of‑n mixtures keep explanations short
133
+ - Verified stories: minimal‑sufficient explanations + pruned‑forward confidence checks
134
+ - Lightweight deployment: NumPy export (no torch at inference)
135
+
136
+ ## Repository Layout
137
+
138
+ - examples/
139
+ - benchmark.ipynb — end‑to‑end comparison on classic tabular data
140
+ - wine_classification.py, california_regression.py — minimal scripts
141
+ - export_numpy_demo.py — deploy without torch
142
+ - nous/
143
+ - model.py (NousNet), facts.py (calibrated L−R facts)
144
+ - rules/* (fixed/softmax/sparse), explain/* (pruning, fidelity, traces, prototypes)
145
+ - training/* (loop, schedulers), export/* (NumPy), utils/*
146
+ - tests/ — unit tests for forward, rules, facts, prototypes, explanations, export
147
+
148
+ ## License
149
+
150
+ MIT — see LICENSE.
nous-0.2.0/README.md ADDED
@@ -0,0 +1,89 @@
1
+ # Nous — A Neuro-Symbolic Library for Interpretable AI
2
+
3
+ [![PyPI](https://img.shields.io/pypi/v/nous.svg)](https://pypi.org/project/nous/)
4
+ [![License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
5
+
6
+ Make tabular models you can read.
7
+ Nous learns compact logical rules and optional case‑based prototypes inside one differentiable model — so prediction and explanation come from the same place.
8
+
9
+ - 🧩 One white‑box → two styles: rules and/or prototypes
10
+ - 🔀 Learned AND / OR / k‑of‑n mixtures capture interactions without bloat
11
+ - ✂️ Minimal, faithful stories: pruning + sufficiency/comprehensiveness checks
12
+ - 🚀 Practical: competitive accuracy, NumPy export, unit‑tested toolkit
13
+
14
+ ## Key Features
15
+
16
+ - Intrinsic interpretability (not post‑hoc): explanations are part of the forward pass
17
+ - Switchable style: enable/disable prototypes; choose rule selection (fixed / softmax / sparse); add calibrators
18
+ - Fidelity diagnostics: pruned‑forward inference, minimal‑sufficient explanations, stability tools
19
+ - Ready to ship: pure‑NumPy export for inference without PyTorch
20
+
21
+ ## Installation
22
+
23
+ ```bash
24
+ # Stable from PyPI
25
+ pip install nous
26
+
27
+ # With example extras (plots, progress, UCI fetchers)
28
+ pip install "nous[examples]"
29
+
30
+ # Dev setup (tests, linters, type checks)
31
+ pip install "nous[dev]"
32
+ ```
33
+
34
+ Requirements (core):
35
+ - Python 3.9+
36
+ - torch>=2.1
37
+ - numpy>=1.22
38
+ - pandas>=1.5
39
+ - scikit-learn>=1.2
40
+
41
+ Extras:
42
+ - examples: matplotlib>=3.6, seaborn>=0.12, tqdm>=4.65, ucimlrepo>=0.0.5
43
+ - dev: pytest>=7.0, pytest-cov>=4.0, mypy>=1.5, ruff>=0.5, black>=23.0, matplotlib>=3.6, seaborn>=0.12, tqdm>=4.65, ucimlrepo>=0.0.5
44
+
45
+ ## Recommended Configurations
46
+
47
+ | Profile | Rule selection | Calibrators | Prototypes | Use when | Speed |
48
+ |--------|-----------------|-------------|------------|----------|-------|
49
+ | Fast baseline | fixed | off | off | quick sweeps, ablations | ⚡⚡⚡ |
50
+ | Default rules | softmax | on | off | general use, strong accuracy | ⚡⚡ |
51
+ | Explain‑everything | softmax | on | on | rich case‑based narratives | ⚡ |
52
+
53
+ Tips:
54
+ - Train with prototypes off for speed; enable them only on the final model if you need case‑based stories.
55
+ - 300 epochs with patience≈50 works well on common tabular datasets.
56
+
57
+ ## Bench Snapshot (5‑fold CV, typical)
58
+
59
+ | Dataset | Metric | Nous (rules) | Nous (+proto) | EBM | XGBoost |
60
+ |--------|--------|--------------|---------------|-----|---------|
61
+ | HELOC (cls) | AUC | ~0.791 | ~0.792 | ~0.799 | ~0.796 |
62
+ | Adult (cls) | AUC | ~0.913 | ~0.914 | ~0.926 | ~0.929 |
63
+ | Breast Cancer (cls) | Acc | ~0.975 | ~0.983 | ~0.970 | ~0.965 |
64
+ | California (reg) | RMSE | ~0.514 | ~0.505 | ~0.562 | ~0.439 |
65
+
66
+ Numbers vary with seed/HPO. See examples/benchmark.ipynb for reproducible runs.
67
+
68
+ ## What makes Nous different?
69
+
70
+ - The explanation is the model: rules and prototypes live in the forward pass
71
+ - Interactions without clutter: AND/OR/k‑of‑n mixtures keep explanations short
72
+ - Verified stories: minimal‑sufficient explanations + pruned‑forward confidence checks
73
+ - Lightweight deployment: NumPy export (no torch at inference)
74
+
75
+ ## Repository Layout
76
+
77
+ - examples/
78
+ - benchmark.ipynb — end‑to‑end comparison on classic tabular data
79
+ - wine_classification.py, california_regression.py — minimal scripts
80
+ - export_numpy_demo.py — deploy without torch
81
+ - nous/
82
+ - model.py (NousNet), facts.py (calibrated L−R facts)
83
+ - rules/* (fixed/softmax/sparse), explain/* (pruning, fidelity, traces, prototypes)
84
+ - training/* (loop, schedulers), export/* (NumPy), utils/*
85
+ - tests/ — unit tests for forward, rules, facts, prototypes, explanations, export
86
+
87
+ ## License
88
+
89
+ MIT — see LICENSE.
@@ -0,0 +1,103 @@
1
+ from .version import __version__
2
+ from .model import NousNet
3
+ from .facts import BetaFactLayer, PiecewiseLinearCalibrator
4
+ from .prototypes import ScaledPrototypeLayer
5
+ from .rules import FixedPairRuleLayer, SoftmaxRuleLayer, SparseRuleLayer, SimpleNousBlock
6
+
7
+ # Explainability (core API)
8
+ from .explain import (
9
+ rule_impact_df,
10
+ minimal_sufficient_explanation,
11
+ select_pruning_threshold_global,
12
+ select_pruning_threshold_global_bs,
13
+ global_rulebook,
14
+ generate_enhanced_explanation,
15
+ explanation_fidelity_metrics,
16
+ explanation_stability,
17
+ aggregator_mixture_report,
18
+ suggest_rule_counterfactuals,
19
+ render_fact_descriptions,
20
+ AGG_NAMES,
21
+ )
22
+ from .explain.aggregator import format_agg_mixture
23
+
24
+ # Prototype tracing utilities
25
+ from .explain.traces import (
26
+ describe_prototype,
27
+ prototype_report_global,
28
+ prototype_contribution_df,
29
+ prototype_top_rules,
30
+ trace_rule_to_base_facts,
31
+ get_last_block_static_metadata,
32
+ )
33
+
34
+ # Export utilities
35
+ from .export import (
36
+ export_numpy_inference,
37
+ validate_numpy_vs_torch,
38
+ export_and_validate,
39
+ load_numpy_module,
40
+ )
41
+
42
+ # Training and evaluation
43
+ from .training import (
44
+ train_model,
45
+ evaluate_classification,
46
+ evaluate_regression,
47
+ make_sparse_regression_hook,
48
+ )
49
+
50
+ # Dataset helpers (used in examples)
51
+ from .data import get_wine_data, get_california_housing_data
52
+
53
+ # Utilities
54
+ from .utils import set_global_seed
55
+
56
+ __all__ = [
57
+ "__version__",
58
+ # Core model and components
59
+ "NousNet",
60
+ "BetaFactLayer",
61
+ "PiecewiseLinearCalibrator",
62
+ "ScaledPrototypeLayer",
63
+ "FixedPairRuleLayer",
64
+ "SoftmaxRuleLayer",
65
+ "SparseRuleLayer",
66
+ "SimpleNousBlock",
67
+ # Explainability (core)
68
+ "rule_impact_df",
69
+ "minimal_sufficient_explanation",
70
+ "select_pruning_threshold_global",
71
+ "select_pruning_threshold_global_bs",
72
+ "global_rulebook",
73
+ "generate_enhanced_explanation",
74
+ "explanation_fidelity_metrics",
75
+ "explanation_stability",
76
+ "aggregator_mixture_report",
77
+ "suggest_rule_counterfactuals",
78
+ "render_fact_descriptions",
79
+ "AGG_NAMES",
80
+ "format_agg_mixture",
81
+ # Prototype tracing utilities
82
+ "describe_prototype",
83
+ "prototype_report_global",
84
+ "prototype_contribution_df",
85
+ "prototype_top_rules",
86
+ "trace_rule_to_base_facts",
87
+ "get_last_block_static_metadata",
88
+ # Export utilities
89
+ "export_numpy_inference",
90
+ "validate_numpy_vs_torch",
91
+ "export_and_validate",
92
+ "load_numpy_module",
93
+ # Training and evaluation
94
+ "train_model",
95
+ "evaluate_classification",
96
+ "evaluate_regression",
97
+ "make_sparse_regression_hook",
98
+ # Dataset helpers
99
+ "get_wine_data",
100
+ "get_california_housing_data",
101
+ # Utilities
102
+ "set_global_seed",
103
+ ]
@@ -0,0 +1,4 @@
1
+ from .wine import get_wine_data
2
+ from .california import get_california_housing_data
3
+
4
+ __all__ = ["get_wine_data", "get_california_housing_data"]
@@ -0,0 +1,32 @@
1
+ from __future__ import annotations
2
+ import numpy as np
3
+ from typing import Tuple, List
4
+ from sklearn.model_selection import train_test_split
5
+ from sklearn.preprocessing import StandardScaler
6
+ from sklearn.datasets import fetch_california_housing
7
+
8
+ def get_california_housing_data(scale_y: bool = True):
9
+ """
10
+ Load California Housing and return standardized X and (optionally) standardized y.
11
+ Returns
12
+ -------
13
+ X_train, X_val, X_test, y_train, y_val, y_test, feature_names, class_names, task_type, y_scaler
14
+ """
15
+ data = fetch_california_housing()
16
+ X, y = data.data, data.target
17
+ feature_names = data.feature_names
18
+
19
+ X_train_full, X_test, y_train_full, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
20
+
21
+ x_scaler = StandardScaler()
22
+ X_train_full = x_scaler.fit_transform(X_train_full)
23
+ X_test = x_scaler.transform(X_test)
24
+
25
+ X_train, X_val, y_train, y_val = train_test_split(X_train_full, y_train_full, test_size=0.2, random_state=42)
26
+
27
+ y_scaler = None
28
+ if scale_y:
29
+ y_scaler = StandardScaler()
30
+ y_train = y_scaler.fit_transform(y_train.reshape(-1,1)).ravel()
31
+ y_val = y_scaler.transform(y_val.reshape(-1,1)).ravel()
32
+ return X_train, X_val, X_test, y_train, y_val, y_test, feature_names, None, "regression", y_scaler
@@ -0,0 +1,29 @@
1
+ from __future__ import annotations
2
+ import numpy as np
3
+ from typing import Tuple, List
4
+ from sklearn.model_selection import train_test_split
5
+ from sklearn.preprocessing import StandardScaler, LabelEncoder
6
+
7
+ def get_wine_data():
8
+ """
9
+ Load Wine dataset via ucimlrepo and return standardized splits.
10
+ Returns
11
+ -------
12
+ X_train, X_val, X_test, y_train, y_val, y_test, feature_names, class_names, task_type, y_scaler
13
+ """
14
+ from ucimlrepo import fetch_ucirepo
15
+ wine = fetch_ucirepo(id=109)
16
+ X, y_df = wine.data.features, wine.data.targets
17
+ feature_names = X.columns.tolist()
18
+ y = LabelEncoder().fit_transform(y_df.values.ravel())
19
+ class_names = [f"Class_{i+1}" for i in range(len(np.unique(y)))]
20
+ X_train_full, X_test, y_train_full, y_test = train_test_split(
21
+ X, y, test_size=0.2, random_state=42, stratify=y
22
+ )
23
+ preprocessor = StandardScaler()
24
+ X_train_full = preprocessor.fit_transform(X_train_full)
25
+ X_test = preprocessor.transform(X_test)
26
+ X_train, X_val, y_train, y_val = train_test_split(
27
+ X_train_full, y_train_full, test_size=0.2, random_state=42, stratify=y_train_full
28
+ )
29
+ return X_train, X_val, X_test, y_train, y_val, y_test, feature_names, class_names, "classification", None
@@ -0,0 +1,26 @@
1
+ from .aggregator import AGG_NAMES, aggregator_mixture_report, format_agg_mixture
2
+ from .facts_desc import render_fact_descriptions
3
+ from .loo import rule_impact_df
4
+ from .mse import minimal_sufficient_explanation
5
+ from .pruning import select_pruning_threshold_global, select_pruning_threshold_global_bs
6
+ from .global_book import global_rulebook
7
+ from .generate import generate_enhanced_explanation
8
+ from .fidelity import explanation_fidelity_metrics
9
+ from .stability import explanation_stability
10
+ from .cf import suggest_rule_counterfactuals
11
+
12
+ __all__ = [
13
+ "AGG_NAMES",
14
+ "aggregator_mixture_report",
15
+ "format_agg_mixture",
16
+ "render_fact_descriptions",
17
+ "rule_impact_df",
18
+ "minimal_sufficient_explanation",
19
+ "select_pruning_threshold_global",
20
+ "select_pruning_threshold_global_bs",
21
+ "global_rulebook",
22
+ "generate_enhanced_explanation",
23
+ "explanation_fidelity_metrics",
24
+ "explanation_stability",
25
+ "suggest_rule_counterfactuals",
26
+ ]
@@ -0,0 +1,34 @@
1
+ from __future__ import annotations
2
+ import numpy as np
3
+ import pandas as pd
4
+ from typing import List
5
+ from ..model import NousNet
6
+
7
+ AGG_NAMES = ['AND', 'OR', 'k-of-n', 'NOT']
8
+
9
+ def format_agg_mixture(weights) -> str:
10
+ parts = []
11
+ for i in range(weights.shape[0]):
12
+ w = float(weights[i])
13
+ if w > 1e-6:
14
+ parts.append(f"{w:.2f} {AGG_NAMES[i]}")
15
+ return " + ".join(parts) if parts else "∅"
16
+
17
+ def aggregator_mixture_report(model: NousNet, X, max_samples: int = 1000, device=None) -> pd.DataFrame:
18
+ device = device or next(model.parameters()).device
19
+ n = min(len(X), max_samples)
20
+ acc = []
21
+ for i in range(n):
22
+ _, _, internals = model.forward_explain(X[i], device=device)
23
+ for key in [k for k in internals.keys() if k.startswith("block_")]:
24
+ aw = internals[key]['aggregator_weights']
25
+ if aw is None:
26
+ continue
27
+ acc.append(aw.cpu().numpy())
28
+ if not acc:
29
+ return pd.DataFrame(columns=["AND", "OR", "k-of-n", "NOT", "entropy"])
30
+ A = np.concatenate(acc, axis=0)
31
+ mean = A.mean(axis=0)
32
+ ent = (-A * np.clip(np.log(A + 1e-12), -50, 50)).sum(axis=1).mean()
33
+ cols = AGG_NAMES[:A.shape[1]]
34
+ return pd.DataFrame([dict(**{c: float(v) for c, v in zip(cols, mean)}, entropy=float(ent))])
@@ -0,0 +1,137 @@
1
+ from __future__ import annotations
2
+ import numpy as np
3
+ import torch
4
+ from typing import Optional, Sequence, List, Dict, Any
5
+ from ..model import NousNet
6
+ from .loo import rule_impact_df
7
+
8
+ def suggest_rule_counterfactuals(
9
+ model: NousNet, x_sample, feature_names: Sequence[str], class_names: Optional[Sequence[str]] = None,
10
+ target: str = "flip", # 'flip' (classification), 'margin_drop', 'reg_delta'
11
+ target_value: Optional[float] = None, # for margin_drop/reg_delta
12
+ y_scaler=None,
13
+ k_rules: int = 3, fact_target_level: float = 0.1, max_features: int = 2,
14
+ loo_mode: str = 'frozen', top_m_rules: int = 10, use_pre_norm: bool = False,
15
+ alphas: Sequence[float] = (0.5, 1.0, 1.5, 2.0),
16
+ device=None
17
+ ) -> List[Dict[str, Any]]:
18
+ """
19
+ Suggest counterfactual input deltas guided by influential rules using β-fact geometry.
20
+ Verifies suggested deltas by forward_explain.
21
+ """
22
+ device = device or next(model.parameters()).device
23
+ task = model.config['task_type']
24
+
25
+ base_probas, base_logits, base_internals = model.forward_explain(x_sample, device=device)
26
+ if task == "classification":
27
+ pred_idx = int(np.argmax(base_probas))
28
+ runner_up = int(np.argsort(base_logits)[-2]) if base_logits.size > 1 else pred_idx
29
+ base_margin = float(base_logits[pred_idx] - base_logits[runner_up])
30
+ else:
31
+ base_pred = float(base_logits[0])
32
+
33
+ imp = rule_impact_df(
34
+ model, x_sample, feature_names, class_names=class_names,
35
+ loo_mode=loo_mode, top_m_rules=top_m_rules, use_pre_norm=use_pre_norm
36
+ )
37
+ if imp.empty:
38
+ return []
39
+
40
+ if task == "classification":
41
+ margin_col = [c for c in imp.columns if c.startswith("Δmargin(")][0]
42
+ imp = imp.sort_values(by=margin_col, ascending=False)
43
+ else:
44
+ imp = imp.sort_values(by="Δprediction", ascending=False)
45
+ imp = imp.head(k_rules)
46
+
47
+ x = torch.tensor(x_sample, dtype=torch.float32, device=device).unsqueeze(0)
48
+ if model.calibrators is not None:
49
+ x_cal = torch.stack([calib(x[:, i]) for i, calib in enumerate(model.calibrators)], dim=1)
50
+ else:
51
+ x_cal = x
52
+ diff, k_vec, nu_vec, net_w = model.fact.compute_diff_and_params(x_cal) # [1,F], [F], [F], [F,D]
53
+ facts_act = model.fact(x_cal).squeeze(0)
54
+
55
+ suggestions = []
56
+ for _, row in imp.iterrows():
57
+ b = int(row["block"]) - 1
58
+ r = int(row["rule"]) - 1
59
+ details = base_internals[f'block_{b}']
60
+ facts_used = details.get("facts_used", None)
61
+ if isinstance(facts_used, torch.Tensor):
62
+ facts_used = facts_used.cpu().numpy()
63
+ if facts_used is None or facts_used.shape[0] <= r:
64
+ continue
65
+ used = facts_used[r]
66
+ used = [int(used)] if np.ndim(used) == 0 else [int(u) for u in used.tolist()]
67
+
68
+ used_sorted = sorted(used, key=lambda fid: float(facts_act[fid].item()), reverse=True)[:max(1, min(len(used), 2))]
69
+
70
+ deltas: Dict[int, float] = {}
71
+ for fid in used_sorted:
72
+ y_now = float(facts_act[fid].item()) + 1e-12
73
+ kf = float(k_vec[fid].item())
74
+ nuf = float(nu_vec[fid].item())
75
+ diff_now = float(diff[0, fid].item())
76
+ w = net_w[fid].detach().clone() # [D]
77
+
78
+ y_target = float(fact_target_level)
79
+ # Invert β: diff_target = (logit(y_target^(1/nu))) / k
80
+ diff_target = float(torch.logit(torch.tensor(y_target, device=device).pow(1.0/max(nuf,1e-6))))
81
+ diff_target = diff_target / max(kf, 1e-6)
82
+ delta_diff = diff_target - diff_now
83
+
84
+ w_np = w.cpu().numpy()
85
+ idxs = np.argsort(-np.abs(w_np))[:max_features]
86
+ w_sel = torch.zeros_like(w)
87
+ w_sel[idxs] = w[idxs]
88
+ denom = float(w_sel.pow(2).sum().item())
89
+ if denom < 1e-12:
90
+ continue
91
+ delta_x_cal = (delta_diff / denom) * w_sel # minimal L2 shift in x̃
92
+
93
+ delta_x = delta_x_cal.clone()
94
+ if model.calibrators is not None:
95
+ for i in idxs:
96
+ xi = x[0, i]
97
+ slope_i = model.calibrators[i].local_slope(xi)
98
+ delta_x[i] = delta_x_cal[i] / slope_i
99
+
100
+ for i in idxs:
101
+ deltas[i] = deltas.get(i, 0.0) + float(delta_x[i].item())
102
+
103
+ if not deltas:
104
+ continue
105
+
106
+ feat_deltas = sorted([(feature_names[i], d) for i, d in deltas.items()], key=lambda t: -abs(t[1]))
107
+ success = False
108
+ new_out = None
109
+ for a in alphas:
110
+ x_try = x.clone()
111
+ for i, d in deltas.items():
112
+ x_try[0, i] = x_try[0, i] + a * d
113
+ prob2, logit2, _ = model.forward_explain(x_try.squeeze(0).cpu().numpy(), device=device)
114
+ if task == "classification":
115
+ new_pred = int(np.argmax(prob2))
116
+ new_margin = float(logit2[pred_idx] - logit2[runner_up])
117
+ if target == "flip" and new_pred != pred_idx:
118
+ success, new_out = True, {"pred": new_pred, "margin": new_margin}
119
+ break
120
+ if target == "margin_drop" and target_value is not None and new_margin <= base_margin - float(target_value):
121
+ success, new_out = True, {"pred": new_pred, "margin": new_margin}
122
+ break
123
+ else:
124
+ new_pred = float(logit2[0])
125
+ if target == "reg_delta" and target_value is not None:
126
+ if (new_pred - base_pred) <= float(target_value):
127
+ success, new_out = True, {"pred": new_pred}
128
+ break
129
+
130
+ suggestions.append({
131
+ "rule": (b+1, r+1),
132
+ "facts": [f"F{fid+1}" for fid in used_sorted],
133
+ "deltas": feat_deltas,
134
+ "verified": success,
135
+ "new_out": new_out
136
+ })
137
+ return suggestions
@@ -0,0 +1,23 @@
1
+ from __future__ import annotations
2
+ from typing import Dict, Sequence
3
+ from ..model import NousNet
4
+
5
+ def render_fact_descriptions(model: NousNet, feature_names: Sequence[str], top_k_feats: int = 4, eps: float = 0.03) -> Dict[int, str]:
6
+ """
7
+ Create human-readable descriptions of base β-facts using (L-R) weights.
8
+ """
9
+ L, R, th, k, nu = model.fact.get_rule_parameters()
10
+ desc = {}
11
+ for fid in range(L.shape[0]):
12
+ net = L[fid] - R[fid]
13
+ pos = [(feature_names[i], net[i]) for i in range(len(net)) if net[i] > eps]
14
+ neg = [(feature_names[i], -net[i]) for i in range(len(net)) if net[i] < -eps]
15
+ pos = sorted(pos, key=lambda t: -abs(t[1]))[:top_k_feats]
16
+ neg = sorted(neg, key=lambda t: -abs(t[1]))[:top_k_feats]
17
+ pos_str = " + ".join([f"{w:.2f}·{n}" for n, w in pos]) if pos else "0"
18
+ neg_str = " + ".join([f"{w:.2f}·{n}" for n, w in neg]) if neg else "0"
19
+ base = f"β( [L−R](x̃) = ({pos_str}) − ({neg_str}) > {th[fid]:.2f}; k={k[fid]:.2f}, ν={nu[fid]:.2f} )"
20
+ if model.calibrators is not None:
21
+ base += " where x̃ are calibrated features"
22
+ desc[fid] = base
23
+ return desc