joonmyung 1.4.9__tar.gz → 1.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {joonmyung-1.4.9 → joonmyung-1.5.1}/PKG-INFO +1 -1
  2. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/analysis/analysis.py +88 -76
  3. joonmyung-1.5.1/joonmyung/analysis/dataset.py +129 -0
  4. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/draw.py +3 -1
  5. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/log.py +2 -0
  6. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/meta_data/utils.py +5 -1
  7. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/metric.py +2 -2
  8. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/script.py +2 -1
  9. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung.egg-info/PKG-INFO +1 -1
  10. {joonmyung-1.4.9 → joonmyung-1.5.1}/setup.py +2 -1
  11. joonmyung-1.4.9/joonmyung/analysis/dataset.py +0 -108
  12. {joonmyung-1.4.9 → joonmyung-1.5.1}/LICENSE.txt +0 -0
  13. {joonmyung-1.4.9 → joonmyung-1.5.1}/README.md +0 -0
  14. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/__init__.py +0 -0
  15. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/analysis/__init__.py +0 -0
  16. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/analysis/hook.py +0 -0
  17. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/analysis/metric.py +0 -0
  18. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/analysis/model.py +0 -0
  19. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/analysis/utils.py +0 -0
  20. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/app.py +0 -0
  21. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/data.py +0 -0
  22. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/dummy.py +0 -0
  23. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/file.py +0 -0
  24. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/gradcam.py +0 -0
  25. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/meta_data/__init__.py +0 -0
  26. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/meta_data/label.py +0 -0
  27. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/status.py +0 -0
  28. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung/utils.py +0 -0
  29. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung.egg-info/SOURCES.txt +0 -0
  30. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung.egg-info/dependency_links.txt +0 -0
  31. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung.egg-info/not-zip-safe +0 -0
  32. {joonmyung-1.4.9 → joonmyung-1.5.1}/joonmyung.egg-info/top_level.txt +0 -0
  33. {joonmyung-1.4.9 → joonmyung-1.5.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: joonmyung
3
- Version: 1.4.9
3
+ Version: 1.5.1
4
4
  Summary: JoonMyung's Library
5
5
  Home-page: https://github.com/pizard/JoonMyung.git
6
6
  Author: JoonMyung Choi
@@ -1,5 +1,5 @@
1
1
  import os
2
- os.environ["CUDA_VISIBLE_DEVICES"] = "4"
2
+ os.environ["CUDA_VISIBLE_DEVICES"] = "1"
3
3
  from joonmyung.analysis.dataset import JDataset
4
4
  from joonmyung.analysis.model import JModel
5
5
  from joonmyung.draw import saliency, overlay, drawImgPlot, drawHeatmap, unNormalize
@@ -160,86 +160,98 @@ class Analysis:
160
160
  if __name__ == '__main__':
161
161
  # Section A. Data
162
162
  dataset_name, device, amp_autocast, debug = "imagenet", 'cuda', torch.cuda.amp.autocast, True
163
- data_path, _, _ = data2path(dataset_name)
164
- data_num, batch_size, bs = [[0, 0], [1, 0], [2, 0], [3, 0], [0, 1], [1, 1], [2, 1], [3, 1]], 16, []
165
- view, activate = [False, True, False, False, False], [True, False, False] #
166
- # VIEW : IMG, SALIENCY(M), SALIENCY(D), SALIENCY(S), ATTN. MOVEMENT
163
+ data_path, num_classes, _, _ = data2path(dataset_name)
164
+ view, activate = [False, True, False, False, False], [True, False, False]
165
+ # VIEW : IMG, SALIENCY:ATTN, SALIENCY:OPENCV, SALIENCY:GRAD, ATTN. MOVEMENT
167
166
  # ACTIVATE : ATTN, QKV, HEAD
168
- analysis = [2]
169
- # [0] : INPUT TYPE, [0 : SAMPLE + POS, 1 : SAMPLE, 2 : POS]
170
- if not debug:
171
- dataset = JDataset(data_path, dataset_name, device=device)
172
- samples, targets, imgs, label_names = dataset.getItems(data_num)
173
- loader = dataset.getAllItems(batch_size)
174
- num_classes = dataset.num_classes
175
- else:
176
- transform = getTransform(False, True)
177
- img = PIL.Image.open('/hub_data1/joonmyung/data/imagenet/train/n01440764/n01440764_10026.JPEG')
178
- samples, targets, label_names = transform(img)[None].to(device), torch.tensor([0]).to(device)[None].to(device), 'tench, Tinca tinca'
179
- num_classes = 1000
167
+ analysis = [0] # [0] : INPUT TYPE, [0 : SAMPLE + POS, 1 : SAMPLE, 2 : POS]
168
+
169
+ dataset = JDataset(data_path, dataset_name, device=device)
170
+ data_idxs = [[c, i] for i in range(1000) for c in range(50)]
171
+ # data_idxs = [[21, 0], [22, 0], [2, 0], [0, 0], [0, 1], [1, 1], [2, 1], [3, 1]]
180
172
 
181
173
  # Section B. Model
182
- model_number, model_name = 0, "deit_tiny_patch16_224" # deit_tiny_patch16_224, deit_small_patch16_224, deit_base_patch16_224
183
- # model_number, model_name = 0, "vit_tiny_patch16_224" # vit_tiny_patch16_224, vit_small_patch16_224, vit_base_patch16_224
174
+ model_number, model_name = 0, "deit_tiny_patch16_224" # deit, vit | tiny, small, base
184
175
  # model_number, model_name = 1, "deit_tiny_patch16_224"
185
176
 
177
+ # Section C. Setting
178
+ ls_rollout, ls_attentive, col = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 12
179
+
186
180
  modelMaker = JModel(num_classes, device=device)
187
181
  model = modelMaker.getModel(model_number, model_name)
188
182
  model = Analysis(model, analysis = analysis, activate = activate, device=device)
189
-
190
-
191
- samples_ = samples[bs] if bs else samples
192
- targets_ = targets[bs] if bs else targets
193
- output = model(samples_)
194
- if view[0]:
195
- drawImgPlot(unNormalize(samples_, "imagenet"))
196
-
197
- if view[1]: # SALIENCY W/ MODEL
198
- ls_rollout, ls_attentive, col = [0,1,2,3,4,5,6,7,8,9,10,11], [0,1,2,3,4,5,6,7,8,9,10,11], 12
199
- # discard_ratios, v_ratio, head_fusion, data_from = [0.0, 0.4, 0.8], 0.1, "mean", "cls" # Attention, Gradient
200
- discard_ratios, v_ratio, head_fusion, data_from = [0.0], 0.0, "mean", "patch" # Attention, Gradient
201
- rollout, attentive = model.anaSaliency(True, False, output, discard_ratios=discard_ratios,
202
- ls_attentive = ls_attentive, ls_rollout=ls_rollout,
203
- head_fusion = head_fusion, index=targets_, data_from=data_from,
204
- reshape = True) # (12(L), 8(B), 14(H), 14(W))
205
- print(1)
206
- # datas_rollout = overlay(samples_, rollout, dataset_name)
207
- # drawImgPlot(datas_rollout, col=col)
208
- # datas_attn = overlay(samples_, attentive, dataset_name)
209
- # drawImgPlot(datas_attn, col=col)
210
-
211
- if view[2]: # SALIENCY W/ DATA
212
- img = np.array(imgs[0])
213
-
214
- saliency = cv2.saliency.StaticSaliencySpectralResidual_create()
215
- (success, saliencyMap) = saliency.computeSaliency(img)
216
- saliencyMap = (saliencyMap * 255).astype("uint8")
217
-
218
- saliency = cv2.saliency.StaticSaliencyFineGrained_create()
219
- (success, saliencyFineMap) = saliency.computeSaliency(img)
220
- threshMap = cv2.threshold((saliencyFineMap * 255).astype("uint8"), 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
221
- # plt.imshow(threshMap)
222
- # plt.show()
223
-
224
- if view[3]: # SALIENCY FOR INPUT
225
- samples_.requires_grad, model.detach, k = True, False, 3
226
- output = model(samples_)
227
- attn = torch.stack(model.info["attn"]["f"], dim=1).mean(dim=[2,3])[0,-2]
228
- topK = attn[1:].topk(k, -1, True)[1]
229
- # a = torch.autograd.grad(attn.sum(), samples, retain_graph=True)[0].sum(dim=1)
230
- a = torch.autograd.grad(output[:,3], samples_, retain_graph=True)[0].sum(dim=1)
231
- b = F.interpolate(a.unsqueeze(0), scale_factor=0.05, mode='nearest')[0]
232
- # drawHeatmap(b)
233
- print(1)
234
- # to_np(torch.stack([attn[:, :, 0], attn[:, :, 1:].sum(dim=-1)], -1)[0])
235
-
236
- if view[4]: # ATTENTION MOVEMENT (FROM / TO)
237
- attn = torch.stack(model.info["attn"]["f"]).mean(dim=2).transpose(0,1) # (8 (B), 12 (L), 197(T_Q), 197(T_K))
238
- cls2cls = attn[:, :, :1, 0].mean(dim=2) # (8(B), 12(L))
239
- patch2cls = attn[:, :, :1, 1:].mean(dim=2).sum(dim=-1) # (8(B), 12(L))
240
- to_np(torch.stack([cls2cls.mean(dim=0), patch2cls.mean(dim=0)]))
241
-
242
- cls2patch = attn[:, :, 1:, 0].mean(dim=2)
243
- patch2patch = attn[:, :, 1:, 1:].mean(dim=2).sum(dim=-1)
244
- to_np(torch.stack([cls2patch.mean(dim=0), patch2patch.mean(dim=0)]))
245
- print(1)
183
+ for idx, data_idx in enumerate(data_idxs):
184
+ print(f"------------------------- [{data_idx[0]}]/[{data_idx[1]}] -------------------------")
185
+
186
+ sample, target, label_name = dataset[data_idx[0], data_idx[1]]
187
+ # sample, _, img, _ = dataset.getItemPath('/hub_data1/joonmyung/data/imagenet/train/n01440764/n01440764_39.JPEG')
188
+ output = model(sample)
189
+ if view[0]:
190
+ drawImgPlot(unNormalize(sample, "imagenet"))
191
+
192
+ if view[1]: # SALIENCY W/ MODEL
193
+ # ls_rollout, ls_attentive, col = [], [0,2,4,6,8,10], 6
194
+ # discard_ratios, v_ratio, head_fusion, data_from = [0.0, 0.4, 0.8], 0.1, "mean", "cls"
195
+ discard_ratios, v_ratio, head_fusion, data_from = [0.0], 0.0, "mean", "patch" # Attention, Gradient
196
+ rollout, attentive = model.anaSaliency(True, False, output, discard_ratios=discard_ratios,
197
+ ls_attentive = ls_attentive, ls_rollout=ls_rollout,
198
+ head_fusion = head_fusion, index=target, data_from=data_from,
199
+ reshape = True) # (12(L), 8(B), 14(H), 14(W))
200
+ datas_attn = overlay(sample, attentive, dataset_name)
201
+ drawImgPlot(datas_attn, col=col)
202
+
203
+ discard_ratios, v_ratio, head_fusion, data_from = [0.0], 0.0, "mean", "cls" # Attention, Gradient
204
+ rollout, attentive = model.anaSaliency(True, False, output, discard_ratios=discard_ratios,
205
+ ls_attentive = ls_attentive, ls_rollout=ls_rollout,
206
+ head_fusion = head_fusion, index=target, data_from=data_from,
207
+ reshape = True) # (12(L), 8(B), 14(H), 14(W))
208
+ datas_attn = overlay(sample, attentive, dataset_name)
209
+ drawImgPlot(datas_attn, col=col)
210
+
211
+
212
+ # datas_rollout = overlay(sample, rollout, dataset_name)
213
+ # drawImgPlot(datas_rollout, col=col)
214
+
215
+ # datas_attn = overlay(sample, attentive, dataset_name)
216
+ # drawImgPlot(datas_attn, col=col)
217
+
218
+ # a = attentive[5]
219
+ # b = torch.stack([a.clamp(max=a.quantile(1 - v_ratio)) for v_ratio in [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55]])
220
+ # datas_attn = overlay(sample, b, dataset_name)
221
+ # drawImgPlot(datas_attn, col=col)
222
+ # print(1)
223
+
224
+ if view[2]: # SALIENCY W/ DATA
225
+ img = np.array(dataset[data_idx[0], data_idx[1], 2][0])
226
+
227
+ saliency = cv2.saliency.StaticSaliencySpectralResidual_create()
228
+ (success, saliencyMap) = saliency.computeSaliency(img)
229
+ saliencyMap = (saliencyMap * 255).astype("uint8")
230
+
231
+ saliency = cv2.saliency.StaticSaliencyFineGrained_create()
232
+ (success, saliencyFineMap) = saliency.computeSaliency(img)
233
+ threshMap = cv2.threshold((saliencyFineMap * 255).astype("uint8"), 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
234
+ # plt.imshow(threshMap)
235
+ # plt.show()
236
+
237
+ if view[3]: # SALIENCY FOR INPUT
238
+ sample.requires_grad, model.detach, k = True, False, 3
239
+ output = model(sample)
240
+ attn = torch.stack(model.info["attn"]["f"], dim=1).mean(dim=[2,3])[0,-2]
241
+ topK = attn[1:].topk(k, -1, True)[1]
242
+ # a = torch.autograd.grad(attn.sum(), samples, retain_graph=True)[0].sum(dim=1)
243
+ a = torch.autograd.grad(output[:,3], sample, retain_graph=True)[0].sum(dim=1)
244
+ b = F.interpolate(a.unsqueeze(0), scale_factor=0.05, mode='nearest')[0]
245
+ # drawHeatmap(b)
246
+ print(1)
247
+ # to_np(torch.stack([attn[:, :, 0], attn[:, :, 1:].sum(dim=-1)], -1)[0])
248
+
249
+ if view[4]: # ATTENTION MOVEMENT (FROM / TO)
250
+ attn = torch.stack(model.info["attn"]["f"]).mean(dim=2).transpose(0,1) # (8 (B), 12 (L), 197(T_Q), 197(T_K))
251
+ cls2cls = attn[:, :, :1, 0].mean(dim=2) # (8(B), 12(L))
252
+ patch2cls = attn[:, :, :1, 1:].mean(dim=2).sum(dim=-1) # (8(B), 12(L))
253
+ # PATCH가 받는 정도
254
+ cls2patch = attn[:, :, 1:, 0].mean(dim=2)
255
+ patch2patch = attn[:, :, 1:, 1:].mean(dim=2).sum(dim=-1)
256
+ # to_np(torch.stack([cls2cls.mean(dim=0), patch2cls.mean(dim=0), cls2patch.mean(dim=0), patch2patch.mean(dim=0)]))
257
+ print(1)
@@ -0,0 +1,129 @@
1
+ from joonmyung.meta_data.label import imnet_label, cifar_label
2
+ from timm.data import create_dataset, create_loader
3
+ from torchvision import transforms
4
+ from joonmyung.utils import getDir
5
+ import torch
6
+ import copy
7
+ import glob
8
+ import PIL
9
+ import os
10
+
11
+
12
+ # CIFAR Setting
13
+ # pip install cifar2png
14
+ # cifar2png cifar100 ./cifar100
15
+ # cifar2png cifar10 ./cifar10
16
+
17
+ class JDataset():
18
+ # transforms.Resize(int((256 / 224) * input_size), interpolation=InterpolationMode.BICUBIC),
19
+ settings = {"imagenet" : {
20
+ "num_classes" : 1000,
21
+ "data_types" : ["val", "train"],
22
+ "label_name" : imnet_label,
23
+ "distributions" : {"mean": [0.485, 0.456, 0.406], "std": [0.229, 0.224, 0.225]},
24
+ "size": (224, 224)
25
+ },
26
+ "cifar100" : {
27
+ "num_classes" : 100,
28
+ "data_types": ["test", "train"],
29
+ "label_name" : cifar_label,
30
+ "distributions": {"mean": [0.4914, 0.4822, 0.4465], "std": [0.2023, 0.1994, 0.2010]},
31
+ "size" : (32, 32)
32
+ }
33
+ }
34
+
35
+ def __init__(self, data_path="/hub_data1/joonmyung/data", dataset="imagenet", train=False, transform_type = 0,
36
+ distribution = None, size = None, device="cuda"):
37
+ self.dataset = dataset.lower()
38
+ setting = self.settings[self.dataset]
39
+ self.label_name = setting["label_name"]
40
+ self.data_type = setting["data_types"][train]
41
+ self.transform_type = transform_type
42
+ self.distribution = distribution if distribution else setting["distributions"]
43
+ size = size if size else setting["size"]
44
+
45
+ self.transform = [
46
+ transforms.Compose([transforms.Resize(size, interpolation=3), transforms.ToTensor(), transforms.Normalize(self.distribution["mean"], self.distribution["std"])]),
47
+ transforms.Compose([transforms.Resize(size, interpolation=3), transforms.ToTensor()]),
48
+ transforms.Compose([transforms.ToTensor()])
49
+ ]
50
+
51
+ self.device = device
52
+ self.data_path = data_path
53
+ self.label_paths = sorted(getDir(os.path.join(self.data_path, self.data_type)))
54
+ self.img_paths = [sorted(glob.glob(os.path.join(self.data_path, self.data_type, label_path, "*"))) for label_path in self.label_paths]
55
+ # self.img_paths = [sorted(glob.glob(os.path.join(self.data_path, self.data_type, "*", "*")))]
56
+
57
+
58
+ def __getitem__(self, idx):
59
+ if len(idx) == 2:
60
+ label_num, img_num = idx
61
+ transform_type = self.transform_type
62
+ elif len(idx) == 3:
63
+ label_num, img_num, transform_type = idx
64
+
65
+ img_path = self.img_paths[label_num][img_num]
66
+ img = PIL.Image.open(img_path)
67
+ data = self.transform[transform_type](img)
68
+
69
+ return data.unsqueeze(0).to(self.device), torch.tensor(label_num).to(self.device), self.label_name[int(label_num)]
70
+
71
+ def getItems(self, indexs):
72
+ ds, ls, lns = [], [], []
73
+ for index in indexs:
74
+ d, l, ln = self.__getitem__(index)
75
+ ds.append(d)
76
+ ls.append(l)
77
+ lns.append(ln)
78
+ return torch.cat(ds, dim=0), torch.stack(ls, dim=0), lns
79
+
80
+ def getItemPath(self, img_path):
81
+ img = PIL.Image.open(img_path)
82
+ data = self.transform(img)
83
+ return data.unsqueeze(0).to(self.device)
84
+
85
+ def __len__(self):
86
+ return
87
+
88
+ def getAllItems(self, batch_size=32):
89
+ dataset = create_dataset(
90
+ root=self.data_path, name="IMNET"
91
+ , split='validation', is_training=False
92
+ , load_bytes=False, class_map='')
93
+
94
+ loader = create_loader(
95
+ dataset,
96
+ input_size=(3, 224, 224),
97
+ batch_size=batch_size,
98
+ use_prefetcher=True,
99
+ interpolation='bicubic',
100
+ mean = self.distribution["mean"],
101
+ std = self.distribution["std"],
102
+ num_workers=8,
103
+ crop_pct=0.9,
104
+ pin_memory=False,
105
+ tf_preprocessing=False)
106
+ return loader
107
+
108
+ def validation(self, data):
109
+ return data.lower()
110
+
111
+ def unNormalize(self, image):
112
+ result = torch.zeros_like(image)
113
+ for c, (m, s) in enumerate(zip(self.distribution["mean"], self.distribution["std"])):
114
+ result[:, c] = image[:, c].mul(s).add(m)
115
+ return result
116
+
117
+ def normalize(self, image):
118
+ result = copy.deepcopy(image)
119
+ for c, (m, s) in enumerate(zip(self.distribution["mean"], self.distribution["std"])):
120
+ result[:, c].sub_(m).div_(s)
121
+ return result
122
+
123
+ if __name__ == "__main__":
124
+ root_path = "/hub_data1/joonmyung/data"
125
+ dataset = "imagenet"
126
+ dataset = JDataset(root_path, dataset, train=True, )
127
+ d, l, l_n = dataset[0, 1]
128
+ samples = dataset.getitems([[0,1], [0,2], [0,3]])
129
+ print(1)
@@ -212,8 +212,10 @@ def saliency(attentions=None, gradients=None, head_fusion="mean",
212
212
  rollouts = rollouts.reshape(-1, B, H, W) # L, B, H, W
213
213
 
214
214
  if ls_attentive:
215
+ # attentive = saliencys[ls_attentive, :, 0] \
216
+ # if data_from == "cls" else saliencys[ls_attentive, :, 1:].mean(dim=2) # (L, B, T)
215
217
  attentive = saliencys[ls_attentive, :, 0] \
216
- if data_from == "cls" else saliencys[ls_attentive, :, 1:].mean(dim=2) # (L, B, T)
218
+ if data_from == "cls" else saliencys[ls_attentive].mean(dim=2) # (L, B, T)
217
219
  attentive = attentive[:, :, 1:]
218
220
  if reshape:
219
221
  attentive = attentive.reshape(-1, B, H, W)
@@ -1,3 +1,5 @@
1
+ import pickle
2
+
1
3
  from joonmyung.utils import is_dist_avail_and_initialized
2
4
  from collections import defaultdict, deque
3
5
  from joonmyung.draw import data2PIL
@@ -31,9 +31,13 @@ def data2path(dataset,
31
31
 
32
32
  if dataset in ["imagenet", "IMNET"]:
33
33
  data_path = os.path.join(data_path, "imagenet") if "kakao" not in server else os.path.join(data_path, "imagenet-pytorch")
34
+ num_classes = 1000
35
+ else:
36
+ raise ValueError
37
+
34
38
  output_dir = os.path.join(output_dir, conference, wandb_version, wandb_name)
35
39
 
36
- return data_path, output_dir, server
40
+ return data_path, num_classes, output_dir, server
37
41
 
38
42
  def get_label(key, d_name ="imagenet"):
39
43
  d_name = d_name.lower()
@@ -1,4 +1,4 @@
1
- from fvcore.nn import FlopCountAnalysis, flop_count_table, flop_count_str
1
+ from fvcore.nn import FlopCountAnalysis, flop_count_table
2
2
  from torchprofile import profile_macs
3
3
  from typing import Tuple
4
4
  from thop import profile
@@ -24,7 +24,7 @@ def numel(model,
24
24
  return round(params, round_num)
25
25
 
26
26
  @torch.no_grad()
27
- def flops(model, size, round_num=1, eval=True, device="cuda", fp16=False, **kwargs):
27
+ def flops(model, size, round_num=1, eval=True, fp16=False, device="cuda", **kwargs):
28
28
  if eval: model.eval()
29
29
  with torch.cuda.amp.autocast(enabled=fp16):
30
30
  inputs = torch.randn(size, device=device, requires_grad=True)
@@ -3,6 +3,7 @@ from tqdm import tqdm
3
3
  import subprocess
4
4
  import time
5
5
  import pynvml
6
+ import datetime
6
7
 
7
8
  class GPU_Worker():
8
9
  def __init__(self, gpus:list, waitTimeInit = 30, waitTime = 60, count = 0,
@@ -69,7 +70,7 @@ class GPU_Worker():
69
70
  if len(self.availGPUs) < self.need_gpu: self.setGPU()
70
71
  gpus, self.availGPUs = self.availGPUs[:self.need_gpu], self.availGPUs[self.need_gpu:]
71
72
 
72
- return ', '.join(map(str, gpus))
73
+ return ','.join(map(str, gpus))
73
74
 
74
75
 
75
76
  def check_process(self):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: joonmyung
3
- Version: 1.4.9
3
+ Version: 1.5.1
4
4
  Summary: JoonMyung's Library
5
5
  Home-page: https://github.com/pizard/JoonMyung.git
6
6
  Author: JoonMyung Choi
@@ -3,7 +3,7 @@ from setuptools import find_packages
3
3
 
4
4
  setuptools.setup(
5
5
  name="joonmyung",
6
- version="1.4.9",
6
+ version="1.5.1",
7
7
  author="JoonMyung Choi",
8
8
  author_email="pizard@korea.ac.kr",
9
9
  description="JoonMyung's Library",
@@ -27,3 +27,4 @@ setuptools.setup(
27
27
 
28
28
  # python setup.py sdist; python -m twine upload dist/*
29
29
  # ID:JoonmyungChoi
30
+
@@ -1,108 +0,0 @@
1
- from joonmyung.meta_data.label import imnet_label, cifar_label
2
- from timm.data import create_dataset, create_loader
3
- from torchvision import transforms
4
- import torch
5
- import copy
6
- import glob
7
- import PIL
8
- import os
9
-
10
- from joonmyung.utils import getDir
11
-
12
-
13
- class JDataset():
14
- distributions = {"imagenet": {"mean": [0.485, 0.456, 0.406], "std": [0.229, 0.224, 0.225]},
15
- "cifar": {"mean": [0.4914, 0.4822, 0.4465], "std": [0.2023, 0.1994, 0.2010]}}
16
- transform_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize(distributions["cifar"]["mean"], distributions["cifar"]["std"])])
17
- # transform_imagenet_ = transforms.Compose([transforms.ToTensor(), transforms.Normalize(distributions["imagenet"]["mean"], distributions["imagenet"]["std"])])
18
- # transform_imagenet_vis = transforms.Compose([transforms.Resize(256, interpolation=3), transforms.CenterCrop(224)])
19
- transform_imagenet_vis = transforms.Compose([transforms.Resize((224, 224), interpolation=3)])
20
- transform_imagenet_norm = transforms.Compose([transforms.ToTensor(), transforms.Normalize(distributions["imagenet"]["mean"], distributions["imagenet"]["std"])])
21
-
22
- # transforms.Resize(int((256 / 224) * input_size), interpolation=InterpolationMode.BICUBIC),
23
- transforms = {"imagenet" : [transform_imagenet_vis, transform_imagenet_norm], "cifar" : transform_cifar}
24
-
25
- # CIFAR Setting
26
- # pip install cifar2png
27
- # cifar2png cifar100 ./cifar100
28
- # cifar2png cifar10 ./cifar10
29
- def validation(self, data):
30
- return data.lower()
31
-
32
- def unNormalize(self, image):
33
- result = torch.zeros_like(image)
34
- for c, (m, s) in enumerate(zip(self.distributions[self.d_kind]["mean"], self.distributions[self.d_kind]["std"])):
35
- result[:, c] = image[:, c].mul(s).add(m)
36
- return result
37
-
38
- def normalize(self, image):
39
- result = copy.deepcopy(image)
40
- for c, (m, s) in enumerate(zip(self.distributions[self.d_kind]["mean"], self.distributions[self.d_kind]["std"])):
41
- result[:, c].sub_(m).div_(s)
42
- return result
43
-
44
- def __init__(self, data_path="/hub_data1/joonmyung/data", dataset="imagenet", device="cuda", train = False):
45
- dataset = dataset.lower()
46
-
47
- self.d = dataset.lower()
48
- self.num_classes = 1000 if self.d == "imagenet" else 100
49
- if train:
50
- [self.d_kind, self.d_type] = ["imagenet", "val"] if self.d == "imagenet" else ["cifar", "test"]
51
- else:
52
- [self.d_kind, self.d_type] = ["imagenet", "train"] if self.d == "imagenet" else ["cifar", "train"]
53
- self.device = device
54
-
55
- self.transform = self.transforms[self.d_kind]
56
- self.data_path = data_path
57
- self.label_name = imnet_label if self.d_kind == "imagenet" else cifar_label
58
- self.label_paths = sorted(getDir(os.path.join(self.data_path, self.d_type)))
59
- self.img_paths = [sorted(glob.glob(os.path.join(self.data_path, self.d_type, label_path, "*"))) for label_path in self.label_paths]
60
-
61
- def __getitem__(self, index):
62
- label_num, img_num = index
63
- img_path = self.img_paths[label_num][img_num]
64
- img = PIL.Image.open(img_path)
65
-
66
- img = self.transform[0](img)
67
- data = self.transform[1](img)
68
- return data.unsqueeze(0).to(self.device), torch.tensor(label_num).to(self.device), \
69
- img, self.label_name[int(label_num)]
70
- def getItems(self, indexs):
71
- ds, ls, ies, lns = [], [], [], []
72
- for index in indexs:
73
- d, l, i, ln = self.__getitem__(index)
74
- ds.append(d)
75
- ls.append(l)
76
- ies.append(i)
77
- lns.append(ln)
78
- return torch.cat(ds, dim=0), torch.stack(ls, dim=0), ies, lns
79
- def __len__(self):
80
- return
81
-
82
- def getAllItems(self, batch_size=32):
83
-
84
- dataset = create_dataset(
85
- root=self.data_path, name="IMNET" if self.d == "imagenet" else self.d.upper()
86
- , split='validation', is_training=False
87
- , load_bytes=False, class_map='')
88
-
89
- loader = create_loader(
90
- dataset,
91
- input_size=(3, 224, 224),
92
- batch_size=batch_size,
93
- use_prefetcher=True,
94
- interpolation='bicubic',
95
- mean=self.distributions[self.d_kind]["mean"],
96
- std=self.distributions[self.d_kind]["std"],
97
- num_workers=8,
98
- crop_pct=0.9,
99
- pin_memory=False,
100
- tf_preprocessing=False)
101
- return loader
102
-
103
- # if __name__ == "__main__":
104
- # root_path = "/hub_data1/joonmyung/data"
105
- # dataset = "cifar100"
106
- # dataset = JDataset(root_path, dataset)
107
- # # sample = dataset[0, 1]
108
- # samples = dataset.getitems([[0,1], [0,2], [0,3]])
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes