foscat 3.0.9__py3-none-any.whl → 3.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foscat/CNN.py +151 -0
- foscat/CircSpline.py +102 -34
- foscat/FoCUS.py +2363 -1052
- foscat/GCNN.py +239 -0
- foscat/Softmax.py +29 -20
- foscat/Spline1D.py +86 -36
- foscat/Synthesis.py +335 -262
- foscat/alm.py +690 -0
- foscat/alm_tools.py +11 -0
- foscat/backend.py +933 -588
- foscat/backend_tens.py +63 -0
- foscat/loss_backend_tens.py +48 -38
- foscat/loss_backend_torch.py +35 -41
- foscat/scat.py +1639 -1015
- foscat/scat1D.py +1256 -774
- foscat/scat2D.py +9 -7
- foscat/scat_cov.py +3067 -1541
- foscat/scat_cov1D.py +11 -1467
- foscat/scat_cov2D.py +9 -7
- foscat/scat_cov_map.py +77 -51
- foscat/scat_cov_map2D.py +79 -49
- foscat-3.6.0.dist-info/LICENCE +13 -0
- foscat-3.6.0.dist-info/METADATA +184 -0
- foscat-3.6.0.dist-info/RECORD +27 -0
- {foscat-3.0.9.dist-info → foscat-3.6.0.dist-info}/WHEEL +1 -1
- foscat/GetGPUinfo.py +0 -36
- foscat-3.0.9.dist-info/METADATA +0 -23
- foscat-3.0.9.dist-info/RECORD +0 -22
- {foscat-3.0.9.dist-info → foscat-3.6.0.dist-info}/top_level.txt +0 -0
foscat/backend_tens.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
|
|
3
|
+
import tensorflow as tf
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class foscat_backend_tens:
|
|
7
|
+
|
|
8
|
+
def __init__(self, backend):
|
|
9
|
+
|
|
10
|
+
self.bk = backend
|
|
11
|
+
|
|
12
|
+
# ---------------------------------------------−---------
|
|
13
|
+
|
|
14
|
+
@tf.function
|
|
15
|
+
def loss(self, x, batch, loss_function):
|
|
16
|
+
|
|
17
|
+
operation = loss_function.scat_operator
|
|
18
|
+
|
|
19
|
+
nx = 1
|
|
20
|
+
if len(x.shape) > 1:
|
|
21
|
+
nx = x.shape[0]
|
|
22
|
+
|
|
23
|
+
with tf.device(
|
|
24
|
+
operation.gpulist[(operation.gpupos + self.curr_gpu) % operation.ngpu]
|
|
25
|
+
):
|
|
26
|
+
print(
|
|
27
|
+
"%s Run %d [PROC=%04d] on GPU %s"
|
|
28
|
+
% (
|
|
29
|
+
loss_function.name,
|
|
30
|
+
loss_function.id_loss,
|
|
31
|
+
self.mpi_rank,
|
|
32
|
+
operation.gpulist[
|
|
33
|
+
(operation.gpupos + self.curr_gpu) % operation.ngpu
|
|
34
|
+
],
|
|
35
|
+
)
|
|
36
|
+
)
|
|
37
|
+
sys.stdout.flush()
|
|
38
|
+
|
|
39
|
+
l_x = x
|
|
40
|
+
"""
|
|
41
|
+
if nx>1:
|
|
42
|
+
l_x={}
|
|
43
|
+
for i in range(nx):
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
if nx == 1:
|
|
47
|
+
ndata = x.shape[0]
|
|
48
|
+
else:
|
|
49
|
+
ndata = x.shape[0] * x.shape[1]
|
|
50
|
+
|
|
51
|
+
if self.KEEP_TRACK is not None:
|
|
52
|
+
l_loss, linfo = loss_function.eval(l_x, batch, return_all=True)
|
|
53
|
+
else:
|
|
54
|
+
l_loss = loss_function.eval(l_x, batch)
|
|
55
|
+
|
|
56
|
+
g = tf.gradients(l_loss, x)[0]
|
|
57
|
+
g = self.backend.check_dense(g, ndata)
|
|
58
|
+
self.curr_gpu = self.curr_gpu + 1
|
|
59
|
+
|
|
60
|
+
if self.KEEP_TRACK is not None:
|
|
61
|
+
return l_loss, g, linfo
|
|
62
|
+
else:
|
|
63
|
+
return l_loss, g
|
foscat/loss_backend_tens.py
CHANGED
|
@@ -1,60 +1,70 @@
|
|
|
1
|
-
import tensorflow as tf
|
|
2
|
-
import numpy as np
|
|
3
1
|
import sys
|
|
4
2
|
|
|
3
|
+
import tensorflow as tf
|
|
4
|
+
|
|
5
|
+
|
|
5
6
|
class loss_backend:
|
|
6
|
-
|
|
7
|
-
def __init__(self,backend,curr_gpu,mpi_rank):
|
|
8
|
-
|
|
9
|
-
self.bk=backend
|
|
10
|
-
self.curr_gpu=curr_gpu
|
|
11
|
-
self.mpi_rank=mpi_rank
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
def check_dense(self,data,datasz):
|
|
7
|
+
|
|
8
|
+
def __init__(self, backend, curr_gpu, mpi_rank):
|
|
9
|
+
|
|
10
|
+
self.bk = backend
|
|
11
|
+
self.curr_gpu = curr_gpu
|
|
12
|
+
self.mpi_rank = mpi_rank
|
|
13
|
+
|
|
14
|
+
def check_dense(self, data, datasz):
|
|
15
15
|
if isinstance(data, tf.Tensor):
|
|
16
16
|
return data
|
|
17
|
-
|
|
17
|
+
|
|
18
18
|
return data.to_dense()
|
|
19
|
-
|
|
19
|
+
|
|
20
20
|
# ---------------------------------------------−---------
|
|
21
|
-
|
|
21
|
+
|
|
22
22
|
@tf.function
|
|
23
|
-
def loss(self,x,batch,loss_function,KEEP_TRACK):
|
|
23
|
+
def loss(self, x, batch, loss_function, KEEP_TRACK):
|
|
24
24
|
|
|
25
|
-
operation=loss_function.scat_operator
|
|
25
|
+
operation = loss_function.scat_operator
|
|
26
26
|
|
|
27
|
-
nx=1
|
|
28
|
-
if len(x.shape)>1:
|
|
29
|
-
nx=x.shape[0]
|
|
30
|
-
|
|
31
|
-
with tf.device(
|
|
32
|
-
|
|
33
|
-
|
|
27
|
+
nx = 1
|
|
28
|
+
if len(x.shape) > 1:
|
|
29
|
+
nx = x.shape[0]
|
|
30
|
+
|
|
31
|
+
with tf.device(
|
|
32
|
+
operation.gpulist[(operation.gpupos + self.curr_gpu) % operation.ngpu]
|
|
33
|
+
):
|
|
34
|
+
print(
|
|
35
|
+
"%s Run [PROC=%04d] on GPU %s"
|
|
36
|
+
% (
|
|
37
|
+
loss_function.name,
|
|
38
|
+
self.mpi_rank,
|
|
39
|
+
operation.gpulist[
|
|
40
|
+
(operation.gpupos + self.curr_gpu) % operation.ngpu
|
|
41
|
+
],
|
|
42
|
+
)
|
|
43
|
+
)
|
|
34
44
|
sys.stdout.flush()
|
|
35
45
|
|
|
36
|
-
l_x=x
|
|
46
|
+
l_x = x
|
|
37
47
|
"""
|
|
38
48
|
if nx>1:
|
|
39
49
|
l_x={}
|
|
40
50
|
for i in range(nx):
|
|
41
51
|
"""
|
|
42
|
-
|
|
43
|
-
if nx==1:
|
|
44
|
-
ndata=x.shape[0]
|
|
52
|
+
|
|
53
|
+
if nx == 1:
|
|
54
|
+
ndata = x.shape[0]
|
|
45
55
|
else:
|
|
46
|
-
ndata=x.shape[0]*x.shape[1]
|
|
47
|
-
|
|
56
|
+
ndata = x.shape[0] * x.shape[1]
|
|
57
|
+
|
|
48
58
|
if KEEP_TRACK is not None:
|
|
49
|
-
|
|
59
|
+
l_loss, linfo = loss_function.eval(l_x, batch, return_all=True)
|
|
50
60
|
else:
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
g=tf.gradients(
|
|
54
|
-
g=self.check_dense(g,ndata)
|
|
55
|
-
self.curr_gpu=self.curr_gpu+1
|
|
56
|
-
|
|
61
|
+
l_loss = loss_function.eval(l_x, batch)
|
|
62
|
+
|
|
63
|
+
g = tf.gradients(l_loss, x)[0]
|
|
64
|
+
g = self.check_dense(g, ndata)
|
|
65
|
+
self.curr_gpu = self.curr_gpu + 1
|
|
66
|
+
|
|
57
67
|
if KEEP_TRACK is not None:
|
|
58
|
-
return
|
|
68
|
+
return l_loss, g, linfo
|
|
59
69
|
else:
|
|
60
|
-
return
|
|
70
|
+
return l_loss, g
|
foscat/loss_backend_torch.py
CHANGED
|
@@ -1,18 +1,15 @@
|
|
|
1
1
|
import torch
|
|
2
|
-
|
|
3
|
-
import numpy as np
|
|
4
|
-
import sys
|
|
2
|
+
|
|
5
3
|
|
|
6
4
|
class loss_backend:
|
|
7
|
-
|
|
8
|
-
def __init__(self,backend,curr_gpu,mpi_rank):
|
|
9
|
-
|
|
10
|
-
self.bk=backend
|
|
11
|
-
self.curr_gpu=curr_gpu
|
|
12
|
-
self.mpi_rank=mpi_rank
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
def check_dense(self,data,datasz):
|
|
5
|
+
|
|
6
|
+
def __init__(self, backend, curr_gpu, mpi_rank):
|
|
7
|
+
|
|
8
|
+
self.bk = backend
|
|
9
|
+
self.curr_gpu = curr_gpu
|
|
10
|
+
self.mpi_rank = mpi_rank
|
|
11
|
+
|
|
12
|
+
def check_dense(self, data, datasz):
|
|
16
13
|
if isinstance(data, torch.Tensor):
|
|
17
14
|
return data
|
|
18
15
|
"""
|
|
@@ -21,44 +18,41 @@ class loss_backend:
|
|
|
21
18
|
minlength=datasz)
|
|
22
19
|
"""
|
|
23
20
|
return data
|
|
24
|
-
|
|
21
|
+
|
|
25
22
|
# ---------------------------------------------−---------
|
|
26
23
|
|
|
27
|
-
def loss(self,x,batch,loss_function,KEEP_TRACK):
|
|
24
|
+
def loss(self, x, batch, loss_function, KEEP_TRACK):
|
|
28
25
|
|
|
29
|
-
operation=loss_function.scat_operator
|
|
26
|
+
operation = loss_function.scat_operator
|
|
30
27
|
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
nx=x.shape[0]
|
|
28
|
+
if torch.cuda.is_available():
|
|
29
|
+
with torch.cuda.device((operation.gpupos + self.curr_gpu) % operation.ngpu):
|
|
34
30
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
31
|
+
l_x = x.clone().detach().requires_grad_(True)
|
|
32
|
+
|
|
33
|
+
if KEEP_TRACK is not None:
|
|
34
|
+
l_loss, linfo = loss_function.eval(l_x, batch, return_all=True)
|
|
35
|
+
else:
|
|
36
|
+
l_loss = loss_function.eval(l_x, batch)
|
|
37
|
+
|
|
38
|
+
l_loss.backward()
|
|
39
|
+
|
|
40
|
+
g = l_x.grad
|
|
41
|
+
|
|
42
|
+
self.curr_gpu = self.curr_gpu + 1
|
|
43
|
+
else:
|
|
44
|
+
l_x = x.clone().detach().requires_grad_(True)
|
|
39
45
|
|
|
40
|
-
l_x=x.clone().detach().requires_grad_(True)
|
|
41
|
-
|
|
42
|
-
if nx==1:
|
|
43
|
-
ndata=x.shape[0]
|
|
44
|
-
else:
|
|
45
|
-
ndata=x.shape[0]*x.shape[1]
|
|
46
|
-
|
|
47
46
|
if KEEP_TRACK is not None:
|
|
48
|
-
|
|
47
|
+
l_loss, linfo = loss_function.eval(l_x, batch, return_all=True)
|
|
49
48
|
else:
|
|
50
|
-
|
|
49
|
+
l_loss = loss_function.eval(l_x, batch)
|
|
50
|
+
|
|
51
|
+
l_loss.backward()
|
|
51
52
|
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
g=l_x.grad
|
|
53
|
+
g = l_x.grad
|
|
55
54
|
|
|
56
|
-
print(g)
|
|
57
|
-
|
|
58
|
-
self.curr_gpu=self.curr_gpu+1
|
|
59
|
-
|
|
60
55
|
if KEEP_TRACK is not None:
|
|
61
|
-
return
|
|
56
|
+
return l_loss.detach(), g, linfo
|
|
62
57
|
else:
|
|
63
|
-
return
|
|
64
|
-
|
|
58
|
+
return l_loss.detach(), g
|