deepf 0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepf-0.1/MANIFEST.in +1 -0
- deepf-0.1/PKG-INFO +3 -0
- deepf-0.1/deepf/__init__.py +2 -0
- deepf-0.1/deepf/data.txt +78 -0
- deepf-0.1/deepf.egg-info/PKG-INFO +3 -0
- deepf-0.1/deepf.egg-info/SOURCES.txt +9 -0
- deepf-0.1/deepf.egg-info/dependency_links.txt +1 -0
- deepf-0.1/deepf.egg-info/top_level.txt +1 -0
- deepf-0.1/pyproject.toml +3 -0
- deepf-0.1/setup.cfg +4 -0
- deepf-0.1/setup.py +9 -0
deepf-0.1/MANIFEST.in
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
include deepf/data.txt
|
deepf-0.1/PKG-INFO
ADDED
deepf-0.1/deepf/data.txt
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn as nn
|
|
3
|
+
import torchvision as tv
|
|
4
|
+
from torch.utils.data import DataLoader
|
|
5
|
+
from torch import optim
|
|
6
|
+
from PIL import Image
|
|
7
|
+
|
|
8
|
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
9
|
+
|
|
10
|
+
# Image transform
|
|
11
|
+
transform = tv.transforms.Compose([
|
|
12
|
+
tv.transforms.Resize((224,224)),
|
|
13
|
+
tv.transforms.ToTensor()
|
|
14
|
+
])
|
|
15
|
+
|
|
16
|
+
# Load dataset (folder must contain 'fake' and 'real')
|
|
17
|
+
dataset = tv.datasets.ImageFolder("/content/drive/MyDrive/MSCIT/SEM2/CF/content/train", transform=transform)
|
|
18
|
+
loader = DataLoader(dataset, batch_size=8, shuffle=True)
|
|
19
|
+
|
|
20
|
+
# Simple CNN model
|
|
21
|
+
class DeepFakeNet(nn.Module):
|
|
22
|
+
def __init__(self):
|
|
23
|
+
super().__init__()
|
|
24
|
+
self.conv = nn.Sequential(
|
|
25
|
+
nn.Conv2d(3,8,3,padding=1), nn.ReLU(), nn.MaxPool2d(2),
|
|
26
|
+
nn.Conv2d(8,16,3,padding=1), nn.ReLU(), nn.MaxPool2d(2)
|
|
27
|
+
)
|
|
28
|
+
self.fc = nn.Sequential(
|
|
29
|
+
nn.Flatten(),
|
|
30
|
+
nn.Linear(16*56*56,2)
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
def forward(self,x):
|
|
34
|
+
x = self.conv(x)
|
|
35
|
+
x = self.fc(x)
|
|
36
|
+
return x
|
|
37
|
+
|
|
38
|
+
model = DeepFakeNet().to(device)
|
|
39
|
+
|
|
40
|
+
# Loss and optimizer
|
|
41
|
+
loss_fn = nn.CrossEntropyLoss()
|
|
42
|
+
optimizer = optim.Adam(model.parameters(), lr=0.001)
|
|
43
|
+
|
|
44
|
+
# Training loop
|
|
45
|
+
for images, labels in loader:
|
|
46
|
+
images, labels = images.to(device), labels.to(device)
|
|
47
|
+
|
|
48
|
+
optimizer.zero_grad()
|
|
49
|
+
outputs = model(images)
|
|
50
|
+
loss = loss_fn(outputs, labels)
|
|
51
|
+
loss.backward()
|
|
52
|
+
optimizer.step()
|
|
53
|
+
|
|
54
|
+
print("Training Done!")
|
|
55
|
+
|
|
56
|
+
# Save model
|
|
57
|
+
torch.save(model.state_dict(), "deepfake_model.pth")
|
|
58
|
+
|
|
59
|
+
import torch
|
|
60
|
+
from PIL import Image
|
|
61
|
+
import torchvision.transforms as T
|
|
62
|
+
|
|
63
|
+
model = DeepFakeNet()
|
|
64
|
+
model.load_state_dict(torch.load("deepfake_model.pth"))
|
|
65
|
+
model.eval()
|
|
66
|
+
|
|
67
|
+
transform = T.Compose([
|
|
68
|
+
T.Resize((224,224)),
|
|
69
|
+
T.ToTensor()
|
|
70
|
+
])
|
|
71
|
+
|
|
72
|
+
img = Image.open("/content/drive/MyDrive/MSCIT/SEM2/CF/content/train/real/real1.jpg").convert("RGB")
|
|
73
|
+
img = transform(img).unsqueeze(0)
|
|
74
|
+
|
|
75
|
+
output = model(img)
|
|
76
|
+
pred = torch.argmax(output)
|
|
77
|
+
|
|
78
|
+
print("Prediction:", "Fake" if pred==0 else "Real")
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
deepf
|
deepf-0.1/pyproject.toml
ADDED
deepf-0.1/setup.cfg
ADDED