joonmyung 1.5.13__tar.gz → 1.5.15__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {joonmyung-1.5.13 → joonmyung-1.5.15}/PKG-INFO +1 -1
  2. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/draw.py +10 -16
  3. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/script.py +18 -13
  4. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung.egg-info/PKG-INFO +1 -1
  5. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung.egg-info/SOURCES.txt +1 -14
  6. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung.egg-info/top_level.txt +0 -1
  7. {joonmyung-1.5.13 → joonmyung-1.5.15}/setup.py +1 -1
  8. joonmyung-1.5.13/models/SA/MHSA.py +0 -37
  9. joonmyung-1.5.13/models/SA/PVTSA.py +0 -90
  10. joonmyung-1.5.13/models/SA/TMSA.py +0 -37
  11. joonmyung-1.5.13/models/SA/__init__.py +0 -0
  12. joonmyung-1.5.13/models/__init__.py +0 -0
  13. joonmyung-1.5.13/models/deit.py +0 -372
  14. joonmyung-1.5.13/models/evit.py +0 -154
  15. joonmyung-1.5.13/models/modules/PE.py +0 -139
  16. joonmyung-1.5.13/models/modules/__init__.py +0 -0
  17. joonmyung-1.5.13/models/modules/blocks.py +0 -168
  18. joonmyung-1.5.13/models/pvt.py +0 -307
  19. joonmyung-1.5.13/models/pvt_v2.py +0 -202
  20. joonmyung-1.5.13/models/tome.py +0 -285
  21. {joonmyung-1.5.13 → joonmyung-1.5.15}/LICENSE.txt +0 -0
  22. {joonmyung-1.5.13 → joonmyung-1.5.15}/README.md +0 -0
  23. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/__init__.py +0 -0
  24. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/analysis/__init__.py +0 -0
  25. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/analysis/analysis.py +0 -0
  26. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/analysis/dataset.py +0 -0
  27. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/analysis/hook.py +0 -0
  28. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/analysis/metric.py +0 -0
  29. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/analysis/model.py +0 -0
  30. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/analysis/utils.py +0 -0
  31. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/app.py +0 -0
  32. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/data.py +0 -0
  33. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/dummy.py +0 -0
  34. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/file.py +0 -0
  35. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/gradcam.py +0 -0
  36. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/log.py +0 -0
  37. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/meta_data/__init__.py +0 -0
  38. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/meta_data/label.py +0 -0
  39. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/meta_data/utils.py +0 -0
  40. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/metric.py +0 -0
  41. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/models/__init__.py +0 -0
  42. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/models/tome.py +0 -0
  43. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/status.py +0 -0
  44. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung/utils.py +0 -0
  45. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung.egg-info/dependency_links.txt +0 -0
  46. {joonmyung-1.5.13 → joonmyung-1.5.15}/joonmyung.egg-info/not-zip-safe +0 -0
  47. {joonmyung-1.5.13 → joonmyung-1.5.15}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: joonmyung
3
- Version: 1.5.13
3
+ Version: 1.5.15
4
4
  Summary: JoonMyung's Library
5
5
  Home-page: https://github.com/pizard/JoonMyung.git
6
6
  Author: JoonMyung Choi
@@ -241,28 +241,22 @@ def saliency(attentions=None, gradients=None, head_fusion="mean",
241
241
 
242
242
 
243
243
 
244
+
244
245
  def data2PIL(datas):
245
- if type(datas) == torch.Tensor: # MAKE TO (..., H, W, 3)
246
- if datas.shape[-1] == 3:
247
- pils = datas
248
- elif len(datas.shape) == 2:
246
+ if type(datas) == torch.Tensor:
247
+ if len(datas.shape) == 2:
249
248
  pils = datas.unsqueeze(-1).detach().cpu()
250
- elif len(datas.shape) == 3:
249
+ if len(datas.shape) == 3:
251
250
  pils = datas.permute(1, 2, 0).detach().cpu()
252
251
  elif len(datas.shape) == 4:
253
252
  pils = datas.permute(0, 2, 3, 1).detach().cpu()
254
253
  elif type(datas) == np.ndarray:
255
- if len(datas.shape) == 2:
256
- datas = np.expand_dims(datas, axis=-1)
257
- # TODO NEED TO CHECK
258
- # if datas.max() <= 1:
259
- # pils = cv2.cvtColor(datas, cv2.COLOR_BGR2RGB) # 0.29ms
260
- # else:
261
- # pils = datas
262
-
263
- # image = Image.fromarray(image) # 0.32ms
264
- pils = cv2.cvtColor(datas, cv2.COLOR_BGR2RGB) # 0.29ms
265
-
254
+ if len(datas.shape) == 3: datas = np.expand_dims(datas, axis=0)
255
+ if datas.max() <= 1:
256
+ # image = Image.fromarray(image) # 0.32ms
257
+ pils = cv2.cvtColor(datas, cv2.COLOR_BGR2RGB) # 0.29ms
258
+ else:
259
+ pils = datas
266
260
  elif type(datas) == PIL.JpegImagePlugin.JpegImageFile \
267
261
  or type(datas) == PIL.Image.Image:
268
262
  pils = datas
@@ -97,7 +97,7 @@ class GPU_Worker():
97
97
  return
98
98
 
99
99
  def message(self, text):
100
- url = "https://hooks.slack.com/services/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"
100
+ url = "https://hooks.slack.com/services/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"
101
101
  payload = {"text": text}
102
102
  headers = {'Content-type': 'application/json'}
103
103
 
@@ -105,7 +105,7 @@ class GPU_Worker():
105
105
 
106
106
  return response
107
107
 
108
- def Process_Worker(processes, gpuWorker, id = "", p = True):
108
+ def Process_Worker(processes, gpuWorker, m = None, p = False):
109
109
  # TODO : 실험이 완전히 끝난 시간 체크할 필요가 존재함
110
110
  start = time.localtime()
111
111
  print("------ Start Running!! : {} ------".format(time2str(start)))
@@ -114,25 +114,30 @@ def Process_Worker(processes, gpuWorker, id = "", p = True):
114
114
  gpus = gpuWorker.getGPU()
115
115
  prefix = f"CUDA_VISIBLE_DEVICES={gpus} nohup sh -c \'"
116
116
  suffix = f"\' > {i+1}:gpu{gpus}.log 2>&1 "
117
- # print("------ {}:GPU{} {} ------".format(i + 1, gpus, prefix + process + suffix))
118
- p = subprocess.Popen(prefix + process + suffix, shell=True)
119
- gpuWorker.register_process(gpus, p)
117
+ if p:
118
+ print("------ {}:GPU{} {} ------".format(i + 1, gpus, prefix + process + suffix))
119
+ session = subprocess.Popen(prefix + process + suffix, shell=True)
120
+ gpuWorker.register_process(gpus, session)
120
121
  gpuWorker.waitForEnd()
121
122
  end = time.localtime()
123
+
122
124
  print("------ End Running!! : {} ------".format(time2str(end)))
123
125
  training_time = datetime.timedelta(seconds=time.mktime(end) - time.mktime(start))
124
126
  print(f"Time 1/all : {training_time}/{training_time / len(processes)} ------")
125
- gpuWorker.message(f"Experiments Finished"
126
- f"{id} : "
127
- f"Time 1/all : {training_time}/{training_time / len(processes)}"
128
- )
129
-
130
-
131
-
127
+ if m:
128
+ gpuWorker.message(f"Experiments Finished {m} : "
129
+ f"Time 1/all : {training_time}/{training_time / len(processes)}"
130
+ )
132
131
 
133
132
 
134
133
  if __name__ == '__main__':
135
134
  # Wokring Sample
136
135
  processes = [1,2,3,4,5]
137
136
  gpuWorker = GPU_Worker([0,1,2,3], 30, 120, checkType=1, utilRatio=50, need_gpu=4)
138
- Process_Worker(processes, gpuWorker)
137
+ Process_Worker(processes, gpuWorker)
138
+
139
+
140
+
141
+ # 1호기 : https://hooks.slack.com/services/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
142
+ # 2호기 : https://hooks.slack.com/services/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
143
+ # 3호기 : https://hooks.slack.com/services/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
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: joonmyung
3
- Version: 1.5.13
3
+ Version: 1.5.15
4
4
  Summary: JoonMyung's Library
5
5
  Home-page: https://github.com/pizard/JoonMyung.git
6
6
  Author: JoonMyung Choi
@@ -29,17 +29,4 @@ joonmyung/meta_data/__init__.py
29
29
  joonmyung/meta_data/label.py
30
30
  joonmyung/meta_data/utils.py
31
31
  joonmyung/models/__init__.py
32
- joonmyung/models/tome.py
33
- models/__init__.py
34
- models/deit.py
35
- models/evit.py
36
- models/pvt.py
37
- models/pvt_v2.py
38
- models/tome.py
39
- models/SA/MHSA.py
40
- models/SA/PVTSA.py
41
- models/SA/TMSA.py
42
- models/SA/__init__.py
43
- models/modules/PE.py
44
- models/modules/__init__.py
45
- models/modules/blocks.py
32
+ joonmyung/models/tome.py
@@ -3,7 +3,7 @@ from setuptools import find_packages
3
3
 
4
4
  setuptools.setup(
5
5
  name="joonmyung",
6
- version="1.5.13",
6
+ version="1.5.15",
7
7
  author="JoonMyung Choi",
8
8
  author_email="pizard@korea.ac.kr",
9
9
  description="JoonMyung's Library",
@@ -1,37 +0,0 @@
1
- import torch.nn as nn
2
- class MHSA(nn.Module):
3
- def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.,
4
- talking_head=False):
5
- super().__init__()
6
- self.num_heads = num_heads
7
- head_dim = dim // num_heads
8
- self.scale = head_dim ** -0.5
9
-
10
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
11
- self.attn_drop = nn.Dropout(attn_drop)
12
- self.proj = nn.Linear(dim, dim)
13
- self.proj_drop = nn.Dropout(proj_drop)
14
-
15
-
16
- self.proj_l = nn.Linear(num_heads, num_heads) if talking_head else nn.Identity()
17
- self.proj_w = nn.Linear(num_heads, num_heads) if talking_head else nn.Identity()
18
-
19
-
20
- def forward(self, x):
21
- B, N, C = x.shape
22
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
23
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
24
-
25
- attn = (q @ k.transpose(-2, -1)) * self.scale # (B, H, T, D)
26
- # attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
27
- attn = self.proj_l(attn.transpose(1, -1)).transpose(1, -1)
28
-
29
- attn = attn.softmax(dim=-1)
30
- attn = self.attn_drop(attn)
31
- # attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
32
- attn = self.proj_w(attn.transpose(1, -1)).transpose(1, -1)
33
-
34
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
35
- x = self.proj(x)
36
- x = self.proj_drop(x)
37
- return x
@@ -1,90 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from functools import partial
5
-
6
- from timm.models.layers import DropPath, to_2tuple, trunc_normal_
7
- from timm.models.registry import register_model
8
- from timm.models.vision_transformer import _cfg
9
- import math
10
-
11
- class PVTSA(nn.Module):
12
- def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1, linear=False,
13
- talking_head=False):
14
- super().__init__()
15
- assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
16
-
17
- self.dim = dim
18
- self.num_heads = num_heads
19
- head_dim = dim // num_heads
20
- self.scale = qk_scale or head_dim ** -0.5
21
-
22
- self.q = nn.Linear(dim, dim, bias=qkv_bias)
23
- self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
24
- self.attn_drop = nn.Dropout(attn_drop)
25
- self.proj = nn.Linear(dim, dim)
26
- self.proj_drop = nn.Dropout(proj_drop)
27
-
28
- self.proj_l = nn.Linear(num_heads, num_heads) if talking_head else nn.Identity()
29
- self.proj_w = nn.Linear(num_heads, num_heads) if talking_head else nn.Identity()
30
-
31
- self.linear = linear
32
- self.sr_ratio = sr_ratio
33
- if not linear:
34
- if sr_ratio > 1:
35
- self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
36
- self.norm = nn.LayerNorm(dim)
37
- else:
38
- self.pool = nn.AdaptiveAvgPool2d(7)
39
- self.sr = nn.Conv2d(dim, dim, kernel_size=1, stride=1)
40
- self.norm = nn.LayerNorm(dim)
41
- self.act = nn.GELU()
42
- self.apply(self._init_weights)
43
-
44
- def _init_weights(self, m):
45
- if isinstance(m, nn.Linear):
46
- trunc_normal_(m.weight, std=.02)
47
- if isinstance(m, nn.Linear) and m.bias is not None:
48
- nn.init.constant_(m.bias, 0)
49
- elif isinstance(m, nn.LayerNorm):
50
- nn.init.constant_(m.bias, 0)
51
- nn.init.constant_(m.weight, 1.0)
52
- elif isinstance(m, nn.Conv2d):
53
- fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
54
- fan_out //= m.groups
55
- m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
56
- if m.bias is not None:
57
- m.bias.data.zero_()
58
-
59
- def forward(self, x, H, W):
60
- B, N, C = x.shape
61
- q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
62
-
63
- if not self.linear:
64
- if self.sr_ratio > 1:
65
- x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
66
- x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
67
- x_ = self.norm(x_)
68
- kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
69
- else:
70
- kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
71
- else:
72
- x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
73
- x_ = self.sr(self.pool(x_)).reshape(B, C, -1).permute(0, 2, 1)
74
- x_ = self.norm(x_)
75
- x_ = self.act(x_)
76
- kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
77
- k, v = kv[0], kv[1]
78
-
79
- attn = (q @ k.transpose(-2, -1)) * self.scale
80
-
81
- attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
82
- attn = attn.softmax(dim=-1)
83
- attn = self.attn_drop(attn)
84
- attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
85
-
86
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
87
- x = self.proj(x)
88
- x = self.proj_drop(x)
89
-
90
- return x
@@ -1,37 +0,0 @@
1
- import torch.nn as nn
2
- class MHSA(nn.Module):
3
- def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.,
4
- talking_head=False):
5
- super().__init__()
6
- self.num_heads = num_heads
7
- head_dim = dim // num_heads
8
- self.scale = head_dim ** -0.5
9
-
10
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
11
- self.attn_drop = nn.Dropout(attn_drop)
12
- self.proj = nn.Linear(dim, dim)
13
- self.proj_drop = nn.Dropout(proj_drop)
14
-
15
-
16
- self.proj_l = nn.Linear(num_heads, num_heads) if talking_head else nn.Identity()
17
- self.proj_w = nn.Linear(num_heads, num_heads) if talking_head else nn.Identity()
18
-
19
-
20
- def forward(self, x):
21
- B, N, C = x.shape
22
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
23
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
24
-
25
- attn = (q @ k.transpose(-2, -1)) * self.scale # (B, H, T, D)
26
- # attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
27
- attn = self.proj_l(attn.transpose(1, -1)).transpose(1, -1)
28
-
29
- attn = attn.softmax(dim=-1)
30
- attn = self.attn_drop(attn)
31
- # attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
32
- attn = self.proj_w(attn.transpose(1, -1)).transpose(1, -1)
33
-
34
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
35
- x = self.proj(x)
36
- x = self.proj_drop(x)
37
- return x, k.mean(dim=1)
File without changes
File without changes