nous 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nous might be problematic. Click here for more details.
- nous/__init__.py +96 -19
- nous/data/__init__.py +4 -0
- nous/data/california.py +32 -0
- nous/data/wine.py +29 -0
- nous/explain/__init__.py +26 -0
- nous/explain/aggregator.py +34 -0
- nous/explain/cf.py +137 -0
- nous/explain/facts_desc.py +23 -0
- nous/explain/fidelity.py +56 -0
- nous/explain/generate.py +86 -0
- nous/explain/global_book.py +52 -0
- nous/explain/loo.py +130 -0
- nous/explain/mse.py +93 -0
- nous/explain/pruning.py +117 -0
- nous/explain/stability.py +42 -0
- nous/explain/traces.py +285 -0
- nous/explain/utils.py +15 -0
- nous/export/__init__.py +13 -0
- nous/export/numpy_infer.py +412 -0
- nous/facts.py +112 -0
- nous/model.py +226 -0
- nous/prototypes.py +43 -0
- nous/rules/__init__.py +11 -0
- nous/rules/blocks.py +63 -0
- nous/rules/fixed.py +26 -0
- nous/rules/softmax.py +93 -0
- nous/rules/sparse.py +142 -0
- nous/training/__init__.py +5 -0
- nous/training/evaluation.py +57 -0
- nous/training/schedulers.py +34 -0
- nous/training/train.py +177 -0
- nous/types.py +4 -0
- nous/utils/__init__.py +3 -0
- nous/utils/metrics.py +2 -0
- nous/utils/seed.py +13 -0
- nous/version.py +1 -0
- nous-0.2.0.dist-info/METADATA +150 -0
- nous-0.2.0.dist-info/RECORD +41 -0
- nous/causal.py +0 -63
- nous/interpret.py +0 -111
- nous/layers.py +0 -117
- nous/models.py +0 -65
- nous-0.1.0.dist-info/METADATA +0 -138
- nous-0.1.0.dist-info/RECORD +0 -10
- {nous-0.1.0.dist-info → nous-0.2.0.dist-info}/WHEEL +0 -0
- {nous-0.1.0.dist-info → nous-0.2.0.dist-info}/licenses/LICENSE +0 -0
- {nous-0.1.0.dist-info → nous-0.2.0.dist-info}/top_level.txt +0 -0
nous/layers.py
DELETED
|
@@ -1,117 +0,0 @@
|
|
|
1
|
-
# nous/layers.py
|
|
2
|
-
import torch
|
|
3
|
-
import torch.nn as nn
|
|
4
|
-
from typing import List, Tuple
|
|
5
|
-
|
|
6
|
-
# --- Fact Layers ---
|
|
7
|
-
|
|
8
|
-
class ExhaustiveAtomicFactLayer(nn.Module):
|
|
9
|
-
"""Generates atomic facts by exhaustively comparing all pairs of features."""
|
|
10
|
-
def __init__(self, input_dim: int, feature_names: List[str]):
|
|
11
|
-
super().__init__()
|
|
12
|
-
if input_dim > 20:
|
|
13
|
-
num_facts = input_dim * (input_dim - 1) // 2
|
|
14
|
-
print(f"Warning: ExhaustiveAtomicFactLayer with {input_dim} features will create {num_facts} facts. This may be slow and memory-intensive.")
|
|
15
|
-
self.indices = torch.combinations(torch.arange(input_dim), r=2)
|
|
16
|
-
self.thresholds = nn.Parameter(torch.randn(self.indices.shape[0]) * 0.1)
|
|
17
|
-
self.steepness = nn.Parameter(torch.ones(self.indices.shape[0]) * 5.0)
|
|
18
|
-
self.fact_names = [f"({feature_names[i]} > {feature_names[j]})" for i, j in self.indices.numpy()]
|
|
19
|
-
|
|
20
|
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
21
|
-
steepness = torch.nn.functional.softplus(self.steepness) + 1e-4
|
|
22
|
-
diffs = x[:, self.indices[:, 0]] - x[:, self.indices[:, 1]]
|
|
23
|
-
return torch.sigmoid(steepness * (diffs - self.thresholds))
|
|
24
|
-
|
|
25
|
-
@property
|
|
26
|
-
def output_dim(self) -> int: return len(self.fact_names)
|
|
27
|
-
|
|
28
|
-
class LearnedAtomicFactLayer(nn.Module):
|
|
29
|
-
"""Base class for learnable fact layers (Sigmoid and Beta)."""
|
|
30
|
-
def __init__(self, input_dim: int, num_facts: int, feature_names: List[str]):
|
|
31
|
-
super().__init__()
|
|
32
|
-
self.input_dim = input_dim
|
|
33
|
-
self.num_facts = num_facts
|
|
34
|
-
self._feature_names = feature_names
|
|
35
|
-
self.projection_left = nn.Linear(input_dim, num_facts, bias=False)
|
|
36
|
-
self.projection_right = nn.Linear(input_dim, num_facts, bias=False)
|
|
37
|
-
self.thresholds = nn.Parameter(torch.randn(num_facts) * 0.1)
|
|
38
|
-
|
|
39
|
-
@property
|
|
40
|
-
def output_dim(self) -> int: return self.num_facts
|
|
41
|
-
|
|
42
|
-
def get_base_diffs(self, x: torch.Tensor) -> torch.Tensor:
|
|
43
|
-
"""Calculates the core difference term for all activation functions."""
|
|
44
|
-
return (self.projection_left(x) - self.projection_right(x)) - self.thresholds
|
|
45
|
-
|
|
46
|
-
def fact_names(self, prefix: str) -> List[str]:
|
|
47
|
-
"""Generates human-readable and unique names for facts."""
|
|
48
|
-
names = []
|
|
49
|
-
with torch.no_grad():
|
|
50
|
-
w_left, w_right = self.projection_left.weight, self.projection_right.weight
|
|
51
|
-
for i in range(self.output_dim):
|
|
52
|
-
left_name = self._feature_names[w_left[i].abs().argmax().item()]
|
|
53
|
-
right_name = self._feature_names[w_right[i].abs().argmax().item()]
|
|
54
|
-
base_name = f"({left_name} vs {right_name})" if left_name != right_name else f"Thresh({left_name})"
|
|
55
|
-
names.append(f"{prefix}-{i}{base_name}")
|
|
56
|
-
return names
|
|
57
|
-
|
|
58
|
-
# --- Specialized Learnable Fact Layers ---
|
|
59
|
-
|
|
60
|
-
class SigmoidFactLayer(LearnedAtomicFactLayer):
|
|
61
|
-
"""A learnable fact layer using the standard sigmoid activation."""
|
|
62
|
-
def __init__(self, input_dim: int, num_facts: int, feature_names: List[str]):
|
|
63
|
-
super().__init__(input_dim, num_facts, feature_names)
|
|
64
|
-
self.steepness = nn.Parameter(torch.ones(num_facts) * 5.0)
|
|
65
|
-
|
|
66
|
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
67
|
-
diffs = self.get_base_diffs(x)
|
|
68
|
-
steepness = torch.nn.functional.softplus(self.steepness) + 1e-4
|
|
69
|
-
return torch.sigmoid(steepness * diffs)
|
|
70
|
-
|
|
71
|
-
@property
|
|
72
|
-
def fact_names(self) -> List[str]: return super().fact_names(prefix="Sigmoid")
|
|
73
|
-
|
|
74
|
-
class BetaFactLayer(LearnedAtomicFactLayer):
|
|
75
|
-
"""A learnable fact layer using a flexible, generalized logistic function."""
|
|
76
|
-
def __init__(self, input_dim: int, num_facts: int, feature_names: List[str]):
|
|
77
|
-
super().__init__(input_dim, num_facts, feature_names)
|
|
78
|
-
self.k_raw = nn.Parameter(torch.ones(num_facts) * 0.5)
|
|
79
|
-
self.nu_raw = nn.Parameter(torch.zeros(num_facts))
|
|
80
|
-
|
|
81
|
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
82
|
-
diffs = self.get_base_diffs(x)
|
|
83
|
-
k = torch.nn.functional.softplus(self.k_raw) + 1e-4
|
|
84
|
-
nu = torch.nn.functional.softplus(self.nu_raw) + 1e-4
|
|
85
|
-
return (1 + torch.exp(-k * diffs)) ** (-nu)
|
|
86
|
-
|
|
87
|
-
@property
|
|
88
|
-
def fact_names(self) -> List[str]: return super().fact_names(prefix="Beta")
|
|
89
|
-
|
|
90
|
-
# --- Rule/Concept Layers ---
|
|
91
|
-
|
|
92
|
-
class LogicalRuleLayer(nn.Module):
|
|
93
|
-
"""Forms logical rules (AND) from facts and outputs higher-level concepts."""
|
|
94
|
-
def __init__(self, input_dim: int, num_rules: int, input_fact_names: List[str]):
|
|
95
|
-
super().__init__()
|
|
96
|
-
torch.manual_seed(input_dim + num_rules)
|
|
97
|
-
|
|
98
|
-
if input_dim > 0 and num_rules > 0:
|
|
99
|
-
self.register_buffer('rule_indices', torch.randint(0, input_dim, size=(num_rules, 2)))
|
|
100
|
-
self.rule_names = [f"({input_fact_names[i]} AND {input_fact_names[j]})" for i, j in self.rule_indices]
|
|
101
|
-
else:
|
|
102
|
-
self.register_buffer('rule_indices', torch.empty(0, 2, dtype=torch.long))
|
|
103
|
-
self.rule_names = []
|
|
104
|
-
|
|
105
|
-
self.concept_generator = nn.Linear(num_rules, num_rules)
|
|
106
|
-
self.concept_names = [f"Concept-{i}" for i in range(num_rules)]
|
|
107
|
-
|
|
108
|
-
def forward(self, facts: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
|
109
|
-
if self.rule_indices.shape[0] == 0:
|
|
110
|
-
return torch.zeros(facts.shape[0], 0).to(facts.device), torch.zeros(facts.shape[0], 0).to(facts.device)
|
|
111
|
-
fact1, fact2 = facts[:, self.rule_indices[:, 0]], facts[:, self.rule_indices[:, 1]]
|
|
112
|
-
rule_activations = fact1 * fact2
|
|
113
|
-
concepts = torch.sigmoid(self.concept_generator(rule_activations))
|
|
114
|
-
return concepts, rule_activations
|
|
115
|
-
|
|
116
|
-
@property
|
|
117
|
-
def output_dim(self) -> int: return len(self.concept_names)
|
nous/models.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
|
1
|
-
# nous/models.py
|
|
2
|
-
import torch
|
|
3
|
-
import torch.nn as nn
|
|
4
|
-
from typing import List, Literal, Tuple
|
|
5
|
-
from .layers import ExhaustiveAtomicFactLayer, SigmoidFactLayer, BetaFactLayer, LogicalRuleLayer
|
|
6
|
-
|
|
7
|
-
class NousBlock(nn.Module):
|
|
8
|
-
"""A single reasoning block in the Nous network with a residual connection."""
|
|
9
|
-
def __init__(self, input_dim: int, num_rules: int, input_fact_names: List[str]):
|
|
10
|
-
super().__init__()
|
|
11
|
-
self.rule_layer = LogicalRuleLayer(input_dim, num_rules, input_fact_names)
|
|
12
|
-
self.projection = nn.Linear(input_dim, num_rules) if input_dim != num_rules else nn.Identity()
|
|
13
|
-
self.norm = nn.LayerNorm(num_rules)
|
|
14
|
-
|
|
15
|
-
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
|
16
|
-
concepts, rule_activations = self.rule_layer(x)
|
|
17
|
-
output = self.norm(self.projection(x) + concepts)
|
|
18
|
-
return output, concepts, rule_activations
|
|
19
|
-
|
|
20
|
-
@property
|
|
21
|
-
def concept_names(self) -> List[str]:
|
|
22
|
-
return self.rule_layer.concept_names
|
|
23
|
-
|
|
24
|
-
class NousNet(nn.Module):
|
|
25
|
-
"""
|
|
26
|
-
The complete Nous neuro-symbolic network for regression and classification.
|
|
27
|
-
"""
|
|
28
|
-
def __init__(self,
|
|
29
|
-
input_dim: int,
|
|
30
|
-
output_dim: int,
|
|
31
|
-
feature_names: List[str],
|
|
32
|
-
fact_layer_type: Literal['beta', 'sigmoid', 'exhaustive'] = 'beta',
|
|
33
|
-
num_facts: int = 30,
|
|
34
|
-
num_rules_per_layer: List[int] = [10, 5]):
|
|
35
|
-
super().__init__()
|
|
36
|
-
self.feature_names = feature_names
|
|
37
|
-
|
|
38
|
-
if fact_layer_type == 'beta':
|
|
39
|
-
self.atomic_fact_layer = BetaFactLayer(input_dim, num_facts, feature_names)
|
|
40
|
-
elif fact_layer_type == 'sigmoid':
|
|
41
|
-
self.atomic_fact_layer = SigmoidFactLayer(input_dim, num_facts, feature_names)
|
|
42
|
-
elif fact_layer_type == 'exhaustive':
|
|
43
|
-
self.atomic_fact_layer = ExhaustiveAtomicFactLayer(input_dim, feature_names)
|
|
44
|
-
else:
|
|
45
|
-
raise ValueError("fact_layer_type must be 'beta', 'sigmoid', or 'exhaustive'")
|
|
46
|
-
|
|
47
|
-
self.nous_blocks = nn.ModuleList()
|
|
48
|
-
current_dim = self.atomic_fact_layer.output_dim
|
|
49
|
-
|
|
50
|
-
for i, num_rules in enumerate(num_rules_per_layer):
|
|
51
|
-
input_names = self.atomic_fact_layer.fact_names if i == 0 else self.nous_blocks[i-1].concept_names
|
|
52
|
-
block = NousBlock(current_dim, num_rules, input_names)
|
|
53
|
-
self.nous_blocks.append(block)
|
|
54
|
-
current_dim = num_rules
|
|
55
|
-
|
|
56
|
-
self.output_head = nn.Linear(current_dim, output_dim)
|
|
57
|
-
|
|
58
|
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
59
|
-
"""
|
|
60
|
-
Forward pass. Returns logits for classification or direct values for regression.
|
|
61
|
-
"""
|
|
62
|
-
h = self.atomic_fact_layer(x)
|
|
63
|
-
for block in self.nous_blocks:
|
|
64
|
-
h, _, _ = block(h)
|
|
65
|
-
return self.output_head(h)
|
nous-0.1.0.dist-info/METADATA
DELETED
|
@@ -1,138 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: nous
|
|
3
|
-
Version: 0.1.0
|
|
4
|
-
Summary: Nous: A Neuro-Symbolic Library for Interpretable and Causal Reasoning AI
|
|
5
|
-
Author-email: Islam Tlupov <tlupovislam@gmail.com>
|
|
6
|
-
License: MIT License
|
|
7
|
-
|
|
8
|
-
Copyright (c) 2025 Islam Tlupov
|
|
9
|
-
|
|
10
|
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
11
|
-
of this software and associated documentation files (the "Software"), to deal
|
|
12
|
-
in the Software without restriction, including without limitation the rights
|
|
13
|
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
14
|
-
copies of the Software, and to permit persons to whom the Software is
|
|
15
|
-
furnished to do so, subject to the following conditions:
|
|
16
|
-
|
|
17
|
-
The above copyright notice and this permission notice shall be included in all
|
|
18
|
-
copies or substantial portions of the Software.
|
|
19
|
-
|
|
20
|
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
21
|
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
22
|
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
23
|
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
24
|
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
25
|
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
26
|
-
SOFTWARE.
|
|
27
|
-
|
|
28
|
-
Project-URL: Homepage, https://github.com/EmotionEngineer/nous
|
|
29
|
-
Project-URL: Repository, https://github.com/EmotionEngineer/nous
|
|
30
|
-
Classifier: Development Status :: 3 - Alpha
|
|
31
|
-
Classifier: Intended Audience :: Developers
|
|
32
|
-
Classifier: Intended Audience :: Science/Research
|
|
33
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
34
|
-
Classifier: Programming Language :: Python :: 3
|
|
35
|
-
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
36
|
-
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
37
|
-
Classifier: Typing :: Typed
|
|
38
|
-
Requires-Python: >=3.7
|
|
39
|
-
Description-Content-Type: text/markdown
|
|
40
|
-
License-File: LICENSE
|
|
41
|
-
Requires-Dist: torch>=1.8.0
|
|
42
|
-
Requires-Dist: pandas>=1.3.0
|
|
43
|
-
Requires-Dist: scikit-learn>=1.0
|
|
44
|
-
Requires-Dist: matplotlib>=3.3.0
|
|
45
|
-
Requires-Dist: networkx>=2.6
|
|
46
|
-
Requires-Dist: seaborn>=0.11
|
|
47
|
-
Dynamic: license-file
|
|
48
|
-
|
|
49
|
-
# Nous: A Neuro-Symbolic Library for Interpretable AI
|
|
50
|
-
|
|
51
|
-
[](https://badge.fury.io/py/nous)
|
|
52
|
-
[](https://opensource.org/licenses/MIT)
|
|
53
|
-
|
|
54
|
-
**Nous** is a PyTorch-based library for building "white-box" machine learning models for both **regression** and **classification**. It enables models that don't just predict, but also **reason** and **explain** their decisions in human-understandable terms.
|
|
55
|
-
|
|
56
|
-
## Key Features
|
|
57
|
-
|
|
58
|
-
- **Deeply Interpretable**: Generate a complete, step-by-step logical trace (`fact -> rule -> concept -> prediction`) for any decision.
|
|
59
|
-
- **Supports Regression & Classification**: A unified API for predicting both continuous values and class probabilities.
|
|
60
|
-
- **Causal by Design**: Natively supports counterfactual analysis ("What if?") to provide actionable recommendations for both regression and classification tasks.
|
|
61
|
-
- **High Performance**: Achieves accuracy competitive with traditional black-box models.
|
|
62
|
-
- **Scalable & Flexible**: Choose between a high-performance `beta` activation, a robust `sigmoid`, or a maximally transparent `exhaustive` fact layer.
|
|
63
|
-
|
|
64
|
-
## Installation
|
|
65
|
-
|
|
66
|
-
```bash
|
|
67
|
-
pip install nous
|
|
68
|
-
```
|
|
69
|
-
|
|
70
|
-
## Quickstart: A 5-Minute Example (Regression)
|
|
71
|
-
|
|
72
|
-
Let's predict a house price and understand the model's reasoning.
|
|
73
|
-
|
|
74
|
-
```python
|
|
75
|
-
import torch
|
|
76
|
-
import pandas as pd
|
|
77
|
-
from sklearn.datasets import make_regression
|
|
78
|
-
from nous.models import NousNet
|
|
79
|
-
from nous.interpret import trace_decision_graph, explain_fact
|
|
80
|
-
from nous.causal import find_counterfactual
|
|
81
|
-
|
|
82
|
-
# 1. Prepare Data
|
|
83
|
-
X_raw, y = make_regression(n_samples=1000, n_features=5, n_informative=3, noise=20, random_state=42)
|
|
84
|
-
feature_names = ['area_sqft', 'num_bedrooms', 'dist_to_center', 'age_years', 'renovation_quality']
|
|
85
|
-
X = torch.tensor(X_raw, dtype=torch.float32)
|
|
86
|
-
y = torch.tensor(y, dtype=torch.float32).unsqueeze(1)
|
|
87
|
-
|
|
88
|
-
# 2. Define and Train a NousNet for Regression
|
|
89
|
-
model = NousNet(
|
|
90
|
-
input_dim=5,
|
|
91
|
-
output_dim=1, # Single output for regression
|
|
92
|
-
feature_names=feature_names,
|
|
93
|
-
fact_layer_type='beta'
|
|
94
|
-
)
|
|
95
|
-
# Training: Use a regression loss like nn.MSELoss
|
|
96
|
-
# loss_fn = torch.nn.MSELoss()
|
|
97
|
-
# ... (standard training loop omitted)
|
|
98
|
-
model.eval()
|
|
99
|
-
|
|
100
|
-
# 3. Analyze a specific house
|
|
101
|
-
x_sample = X[50]
|
|
102
|
-
predicted_price = model(x_sample).item()
|
|
103
|
-
print(f"Model's predicted price for house #50: ${predicted_price:,.2f}")
|
|
104
|
-
|
|
105
|
-
# 4. Get the Step-by-Step Reasoning
|
|
106
|
-
graph = trace_decision_graph(model, x_sample)
|
|
107
|
-
top_facts = sorted(graph['trace']['Atomic Facts'].items(), key=lambda i: i[1]['value'], reverse=True)
|
|
108
|
-
fact_to_analyze = top_facts[0][0]
|
|
109
|
-
print(f"\nTop activated fact influencing the price: '{fact_to_analyze}'")
|
|
110
|
-
|
|
111
|
-
# 5. Decode the Learned Fact
|
|
112
|
-
details_df = explain_fact(model, fact_name=fact_to_analyze)
|
|
113
|
-
print(f"\nDecoding '{fact_to_analyze}':")
|
|
114
|
-
display(details_df.head())
|
|
115
|
-
|
|
116
|
-
# 6. Get an Actionable Recommendation
|
|
117
|
-
# What's the smallest change to increase the predicted price to $150,000?
|
|
118
|
-
recommendation = find_counterfactual(
|
|
119
|
-
model,
|
|
120
|
-
x_sample,
|
|
121
|
-
target_output=150.0, # Target value for regression
|
|
122
|
-
task='regression'
|
|
123
|
-
)
|
|
124
|
-
print("\nRecommendation to increase value to $150k:")
|
|
125
|
-
for feature, old_val, new_val in recommendation['changes']:
|
|
126
|
-
print(f"- Change '{feature}' from {old_val:.2f} to {new_val:.2f}")
|
|
127
|
-
|
|
128
|
-
```
|
|
129
|
-
|
|
130
|
-
## Choosing a `fact_layer_type`
|
|
131
|
-
|
|
132
|
-
- `'beta'` (**Default, Recommended**): Best performance and flexibility.
|
|
133
|
-
- `'sigmoid'`: A robust and reliable alternative.
|
|
134
|
-
- `'exhaustive'`: Maximum transparency. Best for low-dimensional problems (<15 features).
|
|
135
|
-
|
|
136
|
-
## License
|
|
137
|
-
|
|
138
|
-
This project is licensed under the MIT License.
|
nous-0.1.0.dist-info/RECORD
DELETED
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
nous/__init__.py,sha256=-nnKnlgTh2wEqPP4Cz3zUFY0jrU6Y6BGrd-4mMDu6IE,545
|
|
2
|
-
nous/causal.py,sha256=U2_pQYpIyM7VhV0mlmmD-kQgyNLMYEH1MnN5-dLZiZA,2488
|
|
3
|
-
nous/interpret.py,sha256=QcBceWmGxvLLXvmTA1_T3G6MmyovPV8NA5sxu04CdUw,5721
|
|
4
|
-
nous/layers.py,sha256=4Uv0JkhK3EkPbZ1sdbpFK0AkU6IOsyQUjCoyjwz3ZOQ,5651
|
|
5
|
-
nous/models.py,sha256=qRhiN7_uAkmm7xIGgXR6gkzl4rArb6E-LkAEnREOYf4,2849
|
|
6
|
-
nous-0.1.0.dist-info/licenses/LICENSE,sha256=07nO-ZFpy_s_msfks8VsONyV2cBBggqsEQD2h5sdVRo,1069
|
|
7
|
-
nous-0.1.0.dist-info/METADATA,sha256=uZUy6b43xDtS_iHSugbjTZ8BmFwoRKn9mG1gT43VEIg,5992
|
|
8
|
-
nous-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
9
|
-
nous-0.1.0.dist-info/top_level.txt,sha256=yUcst4OAspsyKhX0y5ENzFkJKzR_gislA5MykV1pVbk,5
|
|
10
|
-
nous-0.1.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|