foscat 3.0.8__py3-none-any.whl → 3.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
foscat/backend_tens.py ADDED
@@ -0,0 +1,63 @@
1
+ import sys
2
+
3
+ import tensorflow as tf
4
+
5
+
6
+ class foscat_backend_tens:
7
+
8
+ def __init__(self, backend):
9
+
10
+ self.bk = backend
11
+
12
+ # ---------------------------------------------−---------
13
+
14
+ @tf.function
15
+ def loss(self, x, batch, loss_function):
16
+
17
+ operation = loss_function.scat_operator
18
+
19
+ nx = 1
20
+ if len(x.shape) > 1:
21
+ nx = x.shape[0]
22
+
23
+ with tf.device(
24
+ operation.gpulist[(operation.gpupos + self.curr_gpu) % operation.ngpu]
25
+ ):
26
+ print(
27
+ "%s Run %d [PROC=%04d] on GPU %s"
28
+ % (
29
+ loss_function.name,
30
+ loss_function.id_loss,
31
+ self.mpi_rank,
32
+ operation.gpulist[
33
+ (operation.gpupos + self.curr_gpu) % operation.ngpu
34
+ ],
35
+ )
36
+ )
37
+ sys.stdout.flush()
38
+
39
+ l_x = x
40
+ """
41
+ if nx>1:
42
+ l_x={}
43
+ for i in range(nx):
44
+ """
45
+
46
+ if nx == 1:
47
+ ndata = x.shape[0]
48
+ else:
49
+ ndata = x.shape[0] * x.shape[1]
50
+
51
+ if self.KEEP_TRACK is not None:
52
+ l_loss, linfo = loss_function.eval(l_x, batch, return_all=True)
53
+ else:
54
+ l_loss = loss_function.eval(l_x, batch)
55
+
56
+ g = tf.gradients(l_loss, x)[0]
57
+ g = self.backend.check_dense(g, ndata)
58
+ self.curr_gpu = self.curr_gpu + 1
59
+
60
+ if self.KEEP_TRACK is not None:
61
+ return l_loss, g, linfo
62
+ else:
63
+ return l_loss, g
@@ -1,49 +1,70 @@
1
+ import sys
2
+
1
3
  import tensorflow as tf
2
- import numpy as np
4
+
3
5
 
4
6
  class loss_backend:
5
-
6
- def __init__(self,backend):
7
-
8
- self.bk=backend
7
+
8
+ def __init__(self, backend, curr_gpu, mpi_rank):
9
+
10
+ self.bk = backend
11
+ self.curr_gpu = curr_gpu
12
+ self.mpi_rank = mpi_rank
13
+
14
+ def check_dense(self, data, datasz):
15
+ if isinstance(data, tf.Tensor):
16
+ return data
17
+
18
+ return data.to_dense()
19
+
9
20
  # ---------------------------------------------−---------
10
-
21
+
11
22
  @tf.function
12
- def loss(self,x,batch,loss_function):
23
+ def loss(self, x, batch, loss_function, KEEP_TRACK):
24
+
25
+ operation = loss_function.scat_operator
13
26
 
14
- operation=loss_function.scat_operator
27
+ nx = 1
28
+ if len(x.shape) > 1:
29
+ nx = x.shape[0]
15
30
 
16
- nx=1
17
- if len(x.shape)>1:
18
- nx=x.shape[0]
19
-
20
- with tf.device(operation.gpulist[(operation.gpupos+self.curr_gpu)%operation.ngpu]):
21
- print('%s Run [PROC=%04d] on GPU %s'%(loss_function.name,self.mpi_rank,
22
- operation.gpulist[(operation.gpupos+self.curr_gpu)%operation.ngpu]))
31
+ with tf.device(
32
+ operation.gpulist[(operation.gpupos + self.curr_gpu) % operation.ngpu]
33
+ ):
34
+ print(
35
+ "%s Run [PROC=%04d] on GPU %s"
36
+ % (
37
+ loss_function.name,
38
+ self.mpi_rank,
39
+ operation.gpulist[
40
+ (operation.gpupos + self.curr_gpu) % operation.ngpu
41
+ ],
42
+ )
43
+ )
23
44
  sys.stdout.flush()
24
45
 
25
- l_x=x
46
+ l_x = x
26
47
  """
27
48
  if nx>1:
28
49
  l_x={}
29
50
  for i in range(nx):
30
51
  """
31
-
32
- if nx==1:
33
- ndata=x.shape[0]
52
+
53
+ if nx == 1:
54
+ ndata = x.shape[0]
34
55
  else:
35
- ndata=x.shape[0]*x.shape[1]
36
-
37
- if self.KEEP_TRACK is not None:
38
- l,linfo=loss_function.eval(l_x,batch,return_all=True)
56
+ ndata = x.shape[0] * x.shape[1]
57
+
58
+ if KEEP_TRACK is not None:
59
+ l_loss, linfo = loss_function.eval(l_x, batch, return_all=True)
39
60
  else:
40
- l=loss_function.eval(l_x,batch)
41
-
42
- g=tf.gradients(l,x)[0]
43
- g=self.backend.check_dense(g,ndata)
44
- self.curr_gpu=self.curr_gpu+1
45
-
46
- if self.KEEP_TRACK is not None:
47
- return l,g,linfo
61
+ l_loss = loss_function.eval(l_x, batch)
62
+
63
+ g = tf.gradients(l_loss, x)[0]
64
+ g = self.check_dense(g, ndata)
65
+ self.curr_gpu = self.curr_gpu + 1
66
+
67
+ if KEEP_TRACK is not None:
68
+ return l_loss, g, linfo
48
69
  else:
49
- return l,g
70
+ return l_loss, g
@@ -1,49 +1,58 @@
1
- import tensorflow as tf
2
- import numpy as np
1
+ import torch
2
+
3
3
 
4
4
  class loss_backend:
5
-
6
- def __init__(self,backend):
7
-
8
- self.bk=backend
5
+
6
+ def __init__(self, backend, curr_gpu, mpi_rank):
7
+
8
+ self.bk = backend
9
+ self.curr_gpu = curr_gpu
10
+ self.mpi_rank = mpi_rank
11
+
12
+ def check_dense(self, data, datasz):
13
+ if isinstance(data, torch.Tensor):
14
+ return data
15
+ """
16
+ idx=tf.cast(data.indices, tf.int32)
17
+ data=tf.math.bincount(idx,weights=data.values,
18
+ minlength=datasz)
19
+ """
20
+ return data
21
+
9
22
  # ---------------------------------------------−---------
10
-
11
- @tf.function
12
- def loss(self,x,batch,loss_function):
13
-
14
- operation=loss_function.scat_operator
15
-
16
- nx=1
17
- if len(x.shape)>1:
18
- nx=x.shape[0]
19
-
20
- with tf.device(operation.gpulist[(operation.gpupos+self.curr_gpu)%operation.ngpu]):
21
- print('%s Run [PROC=%04d] on GPU %s'%(loss_function.name,self.mpi_rank,
22
- operation.gpulist[(operation.gpupos+self.curr_gpu)%operation.ngpu]))
23
- sys.stdout.flush()
24
-
25
- l_x=x
26
- """
27
- if nx>1:
28
- l_x={}
29
- for i in range(nx):
30
- """
31
-
32
- if nx==1:
33
- ndata=x.shape[0]
34
- else:
35
- ndata=x.shape[0]*x.shape[1]
36
-
37
- if self.KEEP_TRACK is not None:
38
- l,linfo=loss_function.eval(l_x,batch,return_all=True)
23
+
24
+ def loss(self, x, batch, loss_function, KEEP_TRACK):
25
+
26
+ operation = loss_function.scat_operator
27
+
28
+ if torch.cuda.is_available():
29
+ with torch.cuda.device((operation.gpupos + self.curr_gpu) % operation.ngpu):
30
+
31
+ l_x = x.clone().detach().requires_grad_(True)
32
+
33
+ if KEEP_TRACK is not None:
34
+ l_loss, linfo = loss_function.eval(l_x, batch, return_all=True)
35
+ else:
36
+ l_loss = loss_function.eval(l_x, batch)
37
+
38
+ l_loss.backward()
39
+
40
+ g = l_x.grad
41
+
42
+ self.curr_gpu = self.curr_gpu + 1
43
+ else:
44
+ l_x = x.clone().detach().requires_grad_(True)
45
+
46
+ if KEEP_TRACK is not None:
47
+ l_loss, linfo = loss_function.eval(l_x, batch, return_all=True)
39
48
  else:
40
- l=loss_function.eval(l_x,batch)
41
-
42
- g=tf.gradients(l,x)[0]
43
- g=self.backend.check_dense(g,ndata)
44
- self.curr_gpu=self.curr_gpu+1
45
-
46
- if self.KEEP_TRACK is not None:
47
- return l,g,linfo
49
+ l_loss = loss_function.eval(l_x, batch)
50
+
51
+ l_loss.backward()
52
+
53
+ g = l_x.grad
54
+
55
+ if KEEP_TRACK is not None:
56
+ return l_loss.detach(), g, linfo
48
57
  else:
49
- return l,g
58
+ return l_loss.detach(), g