foscat 3.1.6__py3-none-any.whl → 3.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
foscat/backend_tens.py CHANGED
@@ -1,49 +1,62 @@
1
+ import sys
2
+
1
3
  import tensorflow as tf
2
- import numpy as np
4
+
3
5
 
4
6
  class foscat_backend_tens:
5
-
6
- def __init__(self,backend):
7
-
8
- self.bk=backend
7
+
8
+ def __init__(self, backend):
9
+
10
+ self.bk = backend
11
+
9
12
  # ---------------------------------------------−---------
10
-
13
+
11
14
  @tf.function
12
- def loss(self,x,batch,loss_function):
15
+ def loss(self, x, batch, loss_function):
13
16
 
14
- operation=loss_function.scat_operator
17
+ operation = loss_function.scat_operator
15
18
 
16
- nx=1
17
- if len(x.shape)>1:
18
- nx=x.shape[0]
19
-
20
- with tf.device(operation.gpulist[(operation.gpupos+self.curr_gpu)%operation.ngpu]):
21
- print('%s Run [PROC=%04d] on GPU %s'%(loss_function.name,self.mpi_rank,
22
- operation.gpulist[(operation.gpupos+self.curr_gpu)%operation.ngpu]))
19
+ nx = 1
20
+ if len(x.shape) > 1:
21
+ nx = x.shape[0]
22
+
23
+ with tf.device(
24
+ operation.gpulist[(operation.gpupos + self.curr_gpu) % operation.ngpu]
25
+ ):
26
+ print(
27
+ "%s Run [PROC=%04d] on GPU %s"
28
+ % (
29
+ loss_function.name,
30
+ self.mpi_rank,
31
+ operation.gpulist[
32
+ (operation.gpupos + self.curr_gpu) % operation.ngpu
33
+ ],
34
+ )
35
+ )
23
36
  sys.stdout.flush()
24
37
 
25
- l_x=x
38
+ l_x = x
26
39
  """
27
40
  if nx>1:
28
41
  l_x={}
29
42
  for i in range(nx):
30
43
  """
31
-
32
- if nx==1:
33
- ndata=x.shape[0]
44
+
45
+ if nx == 1:
46
+ ndata = x.shape[0]
34
47
  else:
35
- ndata=x.shape[0]*x.shape[1]
36
-
48
+ ndata = x.shape[0] * x.shape[1]
49
+
37
50
  if self.KEEP_TRACK is not None:
38
- l,linfo=loss_function.eval(l_x,batch,return_all=True)
51
+ l_loss, linfo = loss_function.eval(l_x, batch, return_all=True)
39
52
  else:
40
- l=loss_function.eval(l_x,batch)
41
-
42
- g=tf.gradients(l,x)[0]
43
- g=self.backend.check_dense(g,ndata)
44
- self.curr_gpu=self.curr_gpu+1
45
-
53
+ l_loss = loss_function.eval(l_x, batch)
54
+
55
+ g = tf.gradients(l_loss, x)[0]
56
+ g = self.backend.check_dense(g, ndata)
57
+ self.curr_gpu = self.curr_gpu + 1
58
+
46
59
  if self.KEEP_TRACK is not None:
47
- return l,g,linfo
60
+ return l_loss, g, linfo
48
61
  else:
49
- return l,g
62
+ return l_loss, g
@@ -1,60 +1,70 @@
1
- import tensorflow as tf
2
- import numpy as np
3
1
  import sys
4
2
 
3
+ import tensorflow as tf
4
+
5
+
5
6
  class loss_backend:
6
-
7
- def __init__(self,backend,curr_gpu,mpi_rank):
8
-
9
- self.bk=backend
10
- self.curr_gpu=curr_gpu
11
- self.mpi_rank=mpi_rank
12
-
13
-
14
- def check_dense(self,data,datasz):
7
+
8
+ def __init__(self, backend, curr_gpu, mpi_rank):
9
+
10
+ self.bk = backend
11
+ self.curr_gpu = curr_gpu
12
+ self.mpi_rank = mpi_rank
13
+
14
+ def check_dense(self, data, datasz):
15
15
  if isinstance(data, tf.Tensor):
16
16
  return data
17
-
17
+
18
18
  return data.to_dense()
19
-
19
+
20
20
  # ---------------------------------------------−---------
21
-
21
+
22
22
  @tf.function
23
- def loss(self,x,batch,loss_function,KEEP_TRACK):
23
+ def loss(self, x, batch, loss_function, KEEP_TRACK):
24
24
 
25
- operation=loss_function.scat_operator
25
+ operation = loss_function.scat_operator
26
26
 
27
- nx=1
28
- if len(x.shape)>1:
29
- nx=x.shape[0]
30
-
31
- with tf.device(operation.gpulist[(operation.gpupos+self.curr_gpu)%operation.ngpu]):
32
- print('%s Run [PROC=%04d] on GPU %s'%(loss_function.name,self.mpi_rank,
33
- operation.gpulist[(operation.gpupos+self.curr_gpu)%operation.ngpu]))
27
+ nx = 1
28
+ if len(x.shape) > 1:
29
+ nx = x.shape[0]
30
+
31
+ with tf.device(
32
+ operation.gpulist[(operation.gpupos + self.curr_gpu) % operation.ngpu]
33
+ ):
34
+ print(
35
+ "%s Run [PROC=%04d] on GPU %s"
36
+ % (
37
+ loss_function.name,
38
+ self.mpi_rank,
39
+ operation.gpulist[
40
+ (operation.gpupos + self.curr_gpu) % operation.ngpu
41
+ ],
42
+ )
43
+ )
34
44
  sys.stdout.flush()
35
45
 
36
- l_x=x
46
+ l_x = x
37
47
  """
38
48
  if nx>1:
39
49
  l_x={}
40
50
  for i in range(nx):
41
51
  """
42
-
43
- if nx==1:
44
- ndata=x.shape[0]
52
+
53
+ if nx == 1:
54
+ ndata = x.shape[0]
45
55
  else:
46
- ndata=x.shape[0]*x.shape[1]
47
-
56
+ ndata = x.shape[0] * x.shape[1]
57
+
48
58
  if KEEP_TRACK is not None:
49
- l,linfo=loss_function.eval(l_x,batch,return_all=True)
59
+ l_loss, linfo = loss_function.eval(l_x, batch, return_all=True)
50
60
  else:
51
- l=loss_function.eval(l_x,batch)
52
-
53
- g=tf.gradients(l,x)[0]
54
- g=self.check_dense(g,ndata)
55
- self.curr_gpu=self.curr_gpu+1
56
-
61
+ l_loss = loss_function.eval(l_x, batch)
62
+
63
+ g = tf.gradients(l_loss, x)[0]
64
+ g = self.check_dense(g, ndata)
65
+ self.curr_gpu = self.curr_gpu + 1
66
+
57
67
  if KEEP_TRACK is not None:
58
- return l,g,linfo
68
+ return l_loss, g, linfo
59
69
  else:
60
- return l,g
70
+ return l_loss, g
@@ -1,18 +1,15 @@
1
1
  import torch
2
- from torch.autograd import grad
3
- import numpy as np
4
- import sys
2
+
5
3
 
6
4
  class loss_backend:
7
-
8
- def __init__(self,backend,curr_gpu,mpi_rank):
9
-
10
- self.bk=backend
11
- self.curr_gpu=curr_gpu
12
- self.mpi_rank=mpi_rank
13
-
14
-
15
- def check_dense(self,data,datasz):
5
+
6
+ def __init__(self, backend, curr_gpu, mpi_rank):
7
+
8
+ self.bk = backend
9
+ self.curr_gpu = curr_gpu
10
+ self.mpi_rank = mpi_rank
11
+
12
+ def check_dense(self, data, datasz):
16
13
  if isinstance(data, torch.Tensor):
17
14
  return data
18
15
  """
@@ -21,64 +18,42 @@ class loss_backend:
21
18
  minlength=datasz)
22
19
  """
23
20
  return data
24
-
25
- # ---------------------------------------------−---------
26
21
 
27
- def loss(self,x,batch,loss_function,KEEP_TRACK):
22
+ # ---------------------------------------------−---------
28
23
 
29
- operation=loss_function.scat_operator
24
+ def loss(self, x, batch, loss_function, KEEP_TRACK):
30
25
 
31
- nx=1
32
- if len(x.shape)>1:
33
- nx=x.shape[0]
26
+ operation = loss_function.scat_operator
34
27
 
35
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
36
28
  if torch.cuda.is_available():
37
- with torch.cuda.device((operation.gpupos+self.curr_gpu)%operation.ngpu):
29
+ with torch.cuda.device((operation.gpupos + self.curr_gpu) % operation.ngpu):
30
+
31
+ l_x = x.clone().detach().requires_grad_(True)
38
32
 
39
- l_x=x.clone().detach().requires_grad_(True)
40
-
41
- if nx==1:
42
- ndata=x.shape[0]
43
- else:
44
- ndata=x.shape[0]*x.shape[1]
45
-
46
33
  if KEEP_TRACK is not None:
47
- l,linfo=loss_function.eval(l_x,batch,return_all=True)
34
+ l_loss, linfo = loss_function.eval(l_x, batch, return_all=True)
48
35
  else:
49
- l=loss_function.eval(l_x,batch)
36
+ l_loss = loss_function.eval(l_x, batch)
50
37
 
51
- l.backward()
52
-
53
- g=l_x.grad
54
-
55
- self.curr_gpu=self.curr_gpu+1
38
+ l_loss.backward()
39
+
40
+ g = l_x.grad
41
+
42
+ self.curr_gpu = self.curr_gpu + 1
56
43
  else:
57
- l_x=x.clone().detach().requires_grad_(True)
44
+ l_x = x.clone().detach().requires_grad_(True)
58
45
 
59
- if nx==1:
60
- ndata=x.shape[0]
61
- else:
62
- ndata=x.shape[0]*x.shape[1]
63
46
 
64
47
  if KEEP_TRACK is not None:
65
- l,linfo=loss_function.eval(l_x,batch,return_all=True)
48
+ l_loss, linfo = loss_function.eval(l_x, batch, return_all=True)
66
49
  else:
67
- """
68
- sx=operation.eval(l_x)
69
- tmp=(sx.C01-1.0)
70
-
71
- l=operation.backend.bk_reduce_sum(tmp*tmp) #loss_function.eval(l_x,batch)
72
- """
73
-
74
- l=loss_function.eval(l_x,batch)
75
-
76
- l.backward()
77
-
78
- g=l_x.grad
79
-
50
+ l_loss = loss_function.eval(l_x, batch)
51
+
52
+ l_loss.backward()
53
+
54
+ g = l_x.grad
55
+
80
56
  if KEEP_TRACK is not None:
81
- return l.detach(),g,linfo
57
+ return l_loss.detach(), g, linfo
82
58
  else:
83
- return l.detach(),g
84
-
59
+ return l_loss.detach(), g