falseresmem 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- falseresmem-0.1.0/LICENSE +21 -0
- falseresmem-0.1.0/MANIFEST.in +2 -0
- falseresmem-0.1.0/PKG-INFO +57 -0
- falseresmem-0.1.0/README.md +44 -0
- falseresmem-0.1.0/falseresmem/__init__.py +5 -0
- falseresmem-0.1.0/falseresmem/inference.py +24 -0
- falseresmem-0.1.0/falseresmem/model.py +80 -0
- falseresmem-0.1.0/pyproject.toml +20 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Anastasiia Mikhailova
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: falseresmem
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A pip-installable PyTorch model package for False Image Memorability prediction.
|
|
5
|
+
Author-email: Anastasiia Mikhailova <amikhailova@uchicago.edu>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
License-File: LICENSE
|
|
8
|
+
Requires-Python: >=3.7
|
|
9
|
+
Requires-Dist: pillow
|
|
10
|
+
Requires-Dist: torch
|
|
11
|
+
Requires-Dist: torchvision
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
|
|
14
|
+
# FalseResMem Model Package
|
|
15
|
+
|
|
16
|
+
A pip-installable Python package containing a custom PyTorch model (`FalseResMem`) for prediction of false alarms in visual memory recognition tasks.
|
|
17
|
+
|
|
18
|
+
## Contact
|
|
19
|
+
|
|
20
|
+
Author: Anastasiia Mikhailova
|
|
21
|
+
Email: amikhailova@uchicago.edu
|
|
22
|
+
|
|
23
|
+
## Installation
|
|
24
|
+
|
|
25
|
+
You can install the package via pip from source (or PyPI once published):
|
|
26
|
+
|
|
27
|
+
pip install .
|
|
28
|
+
|
|
29
|
+
or
|
|
30
|
+
|
|
31
|
+
pip install FalseResMem
|
|
32
|
+
## Usage
|
|
33
|
+
|
|
34
|
+
Here's a basic example of how to import and use the model after installation:
|
|
35
|
+
|
|
36
|
+
**Low-level (raw tensors):**
|
|
37
|
+
```python
|
|
38
|
+
import torch
|
|
39
|
+
from FalseResMem import load_model
|
|
40
|
+
|
|
41
|
+
model = load_model()
|
|
42
|
+
model.eval()
|
|
43
|
+
input_tensor = torch.randn(1, 3, 224, 224)
|
|
44
|
+
output = model(input_tensor)
|
|
45
|
+
print(output)
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
**High-level (image file):**
|
|
49
|
+
```python
|
|
50
|
+
from FalseResMem import predict_image
|
|
51
|
+
|
|
52
|
+
prob = predict_image("test.jpg").item()
|
|
53
|
+
print(f"False alarm probability: {prob:.3f}")
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## License
|
|
57
|
+
MIT
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# FalseResMem Model Package
|
|
2
|
+
|
|
3
|
+
A pip-installable Python package containing a custom PyTorch model (`FalseResMem`) for prediction of false alarms in visual memory recognition tasks.
|
|
4
|
+
|
|
5
|
+
## Contact
|
|
6
|
+
|
|
7
|
+
Author: Anastasiia Mikhailova
|
|
8
|
+
Email: amikhailova@uchicago.edu
|
|
9
|
+
|
|
10
|
+
## Installation
|
|
11
|
+
|
|
12
|
+
You can install the package via pip from source (or PyPI once published):
|
|
13
|
+
|
|
14
|
+
pip install .
|
|
15
|
+
|
|
16
|
+
or
|
|
17
|
+
|
|
18
|
+
pip install FalseResMem
|
|
19
|
+
## Usage
|
|
20
|
+
|
|
21
|
+
Here's a basic example of how to import and use the model after installation:
|
|
22
|
+
|
|
23
|
+
**Low-level (raw tensors):**
|
|
24
|
+
```python
|
|
25
|
+
import torch
|
|
26
|
+
from FalseResMem import load_model
|
|
27
|
+
|
|
28
|
+
model = load_model()
|
|
29
|
+
model.eval()
|
|
30
|
+
input_tensor = torch.randn(1, 3, 224, 224)
|
|
31
|
+
output = model(input_tensor)
|
|
32
|
+
print(output)
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
**High-level (image file):**
|
|
36
|
+
```python
|
|
37
|
+
from FalseResMem import predict_image
|
|
38
|
+
|
|
39
|
+
prob = predict_image("test.jpg").item()
|
|
40
|
+
print(f"False alarm probability: {prob:.3f}")
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## License
|
|
44
|
+
MIT
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .model import load_model
|
|
3
|
+
from PIL import Image
|
|
4
|
+
import torchvision.transforms as transforms
|
|
5
|
+
|
|
6
|
+
def predict_image(image_path: str, device: str = "cpu") -> torch.Tensor:
|
|
7
|
+
"""Predict false alarm probability from image path."""
|
|
8
|
+
model = load_model().to(device)
|
|
9
|
+
model.eval()
|
|
10
|
+
|
|
11
|
+
transform = transforms.Compose([
|
|
12
|
+
transforms.Resize((256, 256)),
|
|
13
|
+
transforms.CenterCrop(227),
|
|
14
|
+
transforms.ToTensor()
|
|
15
|
+
])
|
|
16
|
+
|
|
17
|
+
image = Image.open(image_path).convert("RGB")
|
|
18
|
+
input_tensor = transform(image).unsqueeze(0).to(device)
|
|
19
|
+
|
|
20
|
+
with torch.no_grad():
|
|
21
|
+
output = model(input_tensor)
|
|
22
|
+
|
|
23
|
+
return output
|
|
24
|
+
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
from torch import nn, cat
|
|
2
|
+
from torchvision.models import resnet50, ResNet50_Weights
|
|
3
|
+
import torch.nn.functional as F
|
|
4
|
+
import torch
|
|
5
|
+
import importlib.resources
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class FalseResMem(nn.Module):
|
|
9
|
+
def __init__(self, learning_rate=1e-5, momentum=.9, cruise_altitude=384):
|
|
10
|
+
super().__init__()
|
|
11
|
+
rn = list(resnet50(weights=ResNet50_Weights.DEFAULT).children())
|
|
12
|
+
|
|
13
|
+
self.features = nn.Sequential(*[rn[i] for i in range(9)])
|
|
14
|
+
|
|
15
|
+
self.conv1 = nn.Conv2d(3, 48, kernel_size=11, stride=4)
|
|
16
|
+
self.pool1 = nn.MaxPool2d(3, 2)
|
|
17
|
+
self.lrn1 = nn.LocalResponseNorm(5)
|
|
18
|
+
self.conv2 = nn.Conv2d(48, 256, kernel_size=5, stride=1, padding=2, groups=2)
|
|
19
|
+
self.pool2 = nn.MaxPool2d(3, 2)
|
|
20
|
+
self.lrn2 = nn.LocalResponseNorm(5)
|
|
21
|
+
self.conv3 = nn.Conv2d(256, cruise_altitude, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2)
|
|
22
|
+
self.conv4 = nn.Conv2d(cruise_altitude, cruise_altitude, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1),
|
|
23
|
+
groups=2)
|
|
24
|
+
self.conv5 = nn.Conv2d(cruise_altitude, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2)
|
|
25
|
+
self.pool5 = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
|
|
26
|
+
|
|
27
|
+
self.fc6 = nn.Linear(in_features=9216 + 2048, out_features=4096, bias=True)
|
|
28
|
+
self.drp6 = nn.Dropout(p=0.5, inplace=False)
|
|
29
|
+
self.fc7 = nn.Linear(in_features=4096, out_features=4096, bias=True)
|
|
30
|
+
self.drp7 = nn.Dropout(p=0.5, inplace=False)
|
|
31
|
+
self.fc8 = nn.Linear(in_features=4096, out_features=2048, bias=True)
|
|
32
|
+
self.drp8 = nn.Dropout(p=0.5, inplace=False)
|
|
33
|
+
self.fc9 = nn.Linear(in_features=2048, out_features=1024, bias=True)
|
|
34
|
+
self.drp9 = nn.Dropout(p=0.5, inplace=False)
|
|
35
|
+
self.fc10 = nn.Linear(in_features=1024, out_features=512, bias=True)
|
|
36
|
+
self.fc11 = nn.Linear(in_features=512, out_features=256, bias=True)
|
|
37
|
+
self.fc12 = nn.Linear(in_features=256, out_features=1, bias=True)
|
|
38
|
+
|
|
39
|
+
def forward(self, x):
|
|
40
|
+
cnv = F.relu(self.conv1(x))
|
|
41
|
+
cnv = self.pool1(cnv)
|
|
42
|
+
cnv = self.lrn1(cnv)
|
|
43
|
+
cnv = F.relu(self.conv2(cnv))
|
|
44
|
+
cnv = self.pool2(cnv)
|
|
45
|
+
cnv = self.lrn2(cnv)
|
|
46
|
+
cnv = F.relu(self.conv3(cnv))
|
|
47
|
+
cnv = F.relu(self.conv4(cnv))
|
|
48
|
+
cnv = F.relu(self.conv5(cnv))
|
|
49
|
+
cnv = self.pool5(cnv)
|
|
50
|
+
feat = cnv.view(-1, 9216)
|
|
51
|
+
resfeat = self.features(x).view(-1, 2048)
|
|
52
|
+
|
|
53
|
+
catfeat = cat((feat, resfeat), 1)
|
|
54
|
+
|
|
55
|
+
hid = F.relu(self.fc6(catfeat))
|
|
56
|
+
hid = self.drp6(hid)
|
|
57
|
+
hid = F.relu(self.fc7(hid))
|
|
58
|
+
hid = self.drp8(hid)
|
|
59
|
+
hid = F.relu(self.fc8(hid))
|
|
60
|
+
hid = self.drp9(hid)
|
|
61
|
+
hid = F.relu(self.fc9(hid))
|
|
62
|
+
hid = F.relu(self.fc10(hid))
|
|
63
|
+
hid = F.relu(self.fc11(hid))
|
|
64
|
+
|
|
65
|
+
pry = torch.sigmoid(self.fc12(hid))
|
|
66
|
+
return pry
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def load_model(device: str = "cpu") -> FalseResMem:
|
|
70
|
+
"""Load pretrained FalseResMem model."""
|
|
71
|
+
model = FalseResMem().to(device)
|
|
72
|
+
model.eval()
|
|
73
|
+
|
|
74
|
+
with importlib.resources.path("FalseResMem", "model.pt") as path:
|
|
75
|
+
checkpoint = torch.load(path, map_location=device, weights_only=False)
|
|
76
|
+
if isinstance(checkpoint, dict):
|
|
77
|
+
model.load_state_dict(checkpoint, strict=False)
|
|
78
|
+
else:
|
|
79
|
+
model.load_state_dict(checkpoint, strict=False)
|
|
80
|
+
return model
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "falseresmem"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "A pip-installable PyTorch model package for False Image Memorability prediction."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.7"
|
|
11
|
+
license = "MIT"
|
|
12
|
+
dependencies = ["torch", "torchvision", "pillow"]
|
|
13
|
+
authors = [{name="Anastasiia Mikhailova", email="amikhailova@uchicago.edu"}]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
[tool.hatch.build.targets.wheel]
|
|
17
|
+
exclude = ["/falseresmem/model.pt"]
|
|
18
|
+
|
|
19
|
+
[tool.hatch.build.targets.sdist]
|
|
20
|
+
exclude = ["/falseresmem/model.pt"]
|