sopy-quantum 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025-Present Jonathan Jerke at Quantum Galaxies Corporation
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,31 @@
1
+ Metadata-Version: 2.4
2
+ Name: sopy-quantum
3
+ Version: 1.0.0
4
+ Summary: Representation and Decomposition with Sums of Product for Operations in separated dimensions
5
+ Author-email: Jonathan Jerke <jonathan@quantumgalaxies.org>
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://sopy.quantumgalaxies.org
8
+ Project-URL: Issues, https://github.com/quantumgalaxies/sopy/issues
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Intended Audience :: Science/Research
12
+ Classifier: Intended Audience :: Developers
13
+ Requires-Python: >=3.12
14
+ Description-Content-Type: text/markdown
15
+ License-File: LICENSE.txt
16
+ Requires-Dist: tensorflow>=2.16.1
17
+ Requires-Dist: scikit-learn>=1.6.1
18
+ Dynamic: license-file
19
+
20
+ # SoPy
21
+
22
+ ## Representation and Decomposition with Sums of Product for Operations in separated dimensions
23
+
24
+ ### How to Contribute
25
+ * Wrap work in a class for user friendliness
26
+ * Write to disk/database/json
27
+ * Develop amplitude/component to various non-local resources
28
+ * Engage with Quantum Galaxies deploying matrices in separated dimensions
29
+
30
+ ### Contact Info
31
+ sopy.quantumgalaxies.org
@@ -0,0 +1,12 @@
1
+ # SoPy
2
+
3
+ ## Representation and Decomposition with Sums of Product for Operations in separated dimensions
4
+
5
+ ### How to Contribute
6
+ * Wrap work in a class for user friendliness
7
+ * Write to disk/database/json
8
+ * Develop amplitude/component to various non-local resources
9
+ * Engage with Quantum Galaxies deploying matrices in separated dimensions
10
+
11
+ ### Contact Info
12
+ sopy.quantumgalaxies.org
@@ -0,0 +1,27 @@
1
+ [build-system]
2
+ requires = ["setuptools >= 77.0.3"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "sopy-quantum"
7
+ version = "1.0.0"
8
+ authors = [
9
+ { name="Jonathan Jerke", email="jonathan@quantumgalaxies.org" },
10
+ ]
11
+ description = "Representation and Decomposition with Sums of Product for Operations in separated dimensions"
12
+ readme = "README.md"
13
+ requires-python = ">=3.12"
14
+ classifiers = [
15
+ "Programming Language :: Python :: 3",
16
+ "Operating System :: OS Independent",
17
+ 'Intended Audience :: Science/Research',
18
+ 'Intended Audience :: Developers',
19
+ ]
20
+ dependencies = ['tensorflow>=2.16.1','scikit-learn>=1.6.1']
21
+ dynamic = []
22
+ license = "MIT"
23
+ license-files = ["LICENSE.txt"]
24
+
25
+ [project.urls]
26
+ Homepage = "https://sopy.quantumgalaxies.org"
27
+ Issues = "https://github.com/quantumgalaxies/sopy/issues"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,3 @@
1
+ from .component.component import component
2
+ from .amplitude.amplitude import amplitude
3
+ from .vector.vector import vector
@@ -0,0 +1 @@
1
+ from . import amplitude
@@ -0,0 +1,109 @@
1
+ #################### ##########################
2
+ ################ SoPy ##################
3
+ #################### ##########################
4
+
5
+ ################################################
6
+ ### by Quantum Galaxies Corp ###
7
+ ## (2023,2024,2025) ##
8
+ ################################################
9
+
10
+
11
+ import tensorflow as tf
12
+
13
+ class amplitude :
14
+ """
15
+ d = 0
16
+ """
17
+
18
+ def __init__(self, a = 1 , contents = [[]]):
19
+ if contents == [[]]:
20
+ self.contents = tf.constant([[a]], dtype = tf.float64)
21
+ else:
22
+ self.contents = contents
23
+
24
+ def boost(self):
25
+ return self
26
+
27
+ def set_boost(self, transform = [[]]):
28
+ return self
29
+
30
+ def unboost(self):
31
+ return self
32
+
33
+ def copy(self):
34
+ other = amplitude(contents = self.contents)
35
+ return other
36
+
37
+ def __len__(self):
38
+ """
39
+ for SoP like (canonRanks)* N*[R]
40
+ return CanonRanks, which is the number of product sums
41
+ """
42
+ try:
43
+ return len(self.values())
44
+ except:
45
+ return 0
46
+
47
+ def normalize(self):
48
+ """
49
+ take another component and map its canonical norms to amplitudes
50
+ """
51
+ self.contents = tf.linalg.normalize(self.values(),axis=1)[1]
52
+ return self
53
+
54
+
55
+ def sample(self, num_samples):
56
+ """
57
+ Manifestly obvious, the reconstruction under frequency sampling of this output would be q
58
+
59
+ Parameters
60
+ ----------
61
+ q : SoP
62
+ lattices : dict of lattice positions
63
+
64
+ Returns
65
+ -------
66
+ A sample in q
67
+ """
68
+
69
+ def discrete_inverse_transform_sampling(pdf_values):
70
+ """
71
+ Generates random samples from a discrete PDF using inverse transform sampling.
72
+
73
+ Args:
74
+ pdf_values: A NumPy array representing the PDF values.
75
+ pdf_domain: A NumPy array representing the corresponding domain values.
76
+ num_samples: The number of samples to generate.
77
+
78
+ Returns:
79
+ sample.
80
+
81
+ advised by Gemini
82
+ """
83
+ pdf_values -= tf.math.reduce_min(pdf_values, axis=0)
84
+ # 1. Normalize
85
+ pdf_values = tf.math.abs(pdf_values) / tf.math.abs(tf.math.reduce_sum(pdf_values))
86
+ # 2. Calculate the CDF
87
+
88
+ cdf = tf.reshape(tf.math.cumsum(pdf_values),-1)
89
+ # 3. Generate uniform samples
90
+ uniform_samples = tf.random.uniform(shape=(num_samples,), dtype = tf.float64)
91
+ # 4. Inverse lookup
92
+
93
+ sampled_value = tf.searchsorted( cdf, uniform_samples)
94
+ return sampled_value
95
+
96
+ return tf.convert_to_tensor( discrete_inverse_transform_sampling ( self.values()) )
97
+
98
+ def __getitem__(self,r):
99
+ if r < len(self):
100
+ return amplitude( contents = [self.contents[r]] )
101
+
102
+
103
+
104
+ def __imul__(self, m):
105
+ self.contents *= m
106
+ return self
107
+
108
+ def values(self):
109
+ return (self.contents)
@@ -0,0 +1 @@
1
+ from . import component
@@ -0,0 +1,154 @@
1
+ #################### ##########################
2
+ ################ SoPy ##################
3
+ #################### ##########################
4
+
5
+ ################################################
6
+ ### by Quantum Galaxies Corp ###
7
+ ## (2023,2024,2025) ##
8
+ ################################################
9
+
10
+
11
+ import tensorflow as tf
12
+ pi2 = 6.283185307179586476925286766559005768394338798
13
+
14
+ class component :
15
+ """
16
+ d > 0
17
+ """
18
+
19
+ def __init__ (self, lattice : [float] , contents = [[]], transform = [[]] ):
20
+ self.contents = contents
21
+ self.transform = transform
22
+ self.lattice = lattice
23
+
24
+ def copy(self):
25
+ other = component(lattice = self.lattice, contents = self.contents, transform = self.transform)
26
+ return other
27
+
28
+ def __len__(self):
29
+ """
30
+ for SoP like (canonRanks)* N*[R]
31
+ return CanonRanks, which is the number of product sums
32
+ """
33
+ try:
34
+ return len(self.contents)
35
+ except:
36
+ return 0
37
+
38
+ def inner(self, other ):
39
+ """
40
+ inner product on naturalized collective space
41
+
42
+ return matrix across canon ranks
43
+ """
44
+ assert isinstance(other, component)
45
+ u = self.values()
46
+ v = other.values()
47
+ return tf.linalg.matmul(u,v, transpose_a = False, transpose_b = True)
48
+
49
+ def normalize(self):
50
+ self.contents = tf.transpose(tf.linalg.normalize(tf.transpose(self.contents),axis=0)[0])
51
+ return self
52
+
53
+ def amplitude(self):
54
+ return tf.linalg.normalize((self.contents),axis=1)[1]
55
+
56
+ def add(self, other ):
57
+ assert isinstance(other, component)
58
+ u = self.values()
59
+ v = other.values()
60
+ self.contents = tf.concat([(u[space]),(v[space])],0)
61
+ return self
62
+
63
+ def set_boost(self, transform = [[]]):
64
+ if transform == [[]]:
65
+ u = self.values()
66
+ q,r = tf.linalg.qr(tf.transpose(u) , full_matrices = False)
67
+ self.transform = q
68
+ else:
69
+ self.transform = transform
70
+ return self
71
+
72
+ def boost(self):
73
+ u = self.values()
74
+ self.contents = tf.transpose(tf.linalg.matmul(tf.transpose(self.transform),tf.transpose(u)))
75
+ return self
76
+
77
+
78
+ def unboost(self):
79
+ u = self.values()
80
+ self.contents = tf.transpose(tf.linalg.matmul(self.transform,tf.transpose(u)))
81
+ return self
82
+
83
+ def len(self):
84
+ return len(tf.transpose(self.contents))
85
+
86
+
87
+ def __getitem__(self, r):
88
+ if r < len(self):
89
+ return component(lattice = self.lattice, contents = [self.contents[r]], transform = self.transform )
90
+
91
+ def sample(self, sample_rank, num_samples = 1):
92
+ """
93
+ Manifestly obvious, the reconstruction under frequency sampling of this output would be q
94
+
95
+ Parameters
96
+ ----------
97
+ q : SoP
98
+ lattices : dict of lattice positions
99
+
100
+ Returns
101
+ -------
102
+ A sample in q
103
+ """
104
+
105
+ u = self.values()
106
+
107
+ def discrete_inverse_transform_sampling(pdf_values, pdf_domain):
108
+ """
109
+ Generates random samples from a discrete PDF using inverse transform sampling.
110
+
111
+ Args:
112
+ pdf_values: A NumPy array representing the PDF values.
113
+ pdf_domain: A NumPy array representing the corresponding domain values.
114
+ num_samples: The number of samples to generate.
115
+
116
+ Returns:
117
+ sample.
118
+
119
+ advised by Gemini
120
+ """
121
+ pdf_values -= tf.math.reduce_min(pdf_values, axis=0)
122
+ # 1. Normalize the PDF
123
+ pdf_values = tf.math.abs(pdf_values) / tf.math.abs(tf.math.reduce_sum(pdf_values))
124
+ # 2. Calculate the CDF
125
+
126
+ cdf = tf.reshape(tf.math.cumsum(pdf_values),-1)
127
+ # 3. Generate uniform samples
128
+ uniform_samples = tf.random.uniform(shape=(num_samples,), dtype = tf.float64)
129
+ # 4. Inverse lookup
130
+
131
+ sampled_value = tf.searchsorted( cdf, uniform_samples)
132
+ return pdf_domain[sampled_value]
133
+
134
+ return tf.convert_to_tensor(discrete_inverse_transform_sampling ( u[sample_rank], self.lattice ) )
135
+
136
+
137
+ def gaussian(self, position , sigma ,l: int = 0 ):
138
+ position = tf.constant(position, dtype=tf.float64)
139
+ sigma = tf.constant(sigma, dtype=tf.float64)
140
+
141
+ self.contents = tf.convert_to_tensor([[ tf.math.sqrt( 1. / pi2 / sigma**2 ) * (tf.math.exp( -1/2. * ( x - position )**2/sigma**2 ))*(x-position)**l for x in self.lattice ]])
142
+ return self
143
+
144
+
145
+ def delta(self, position , spacing ):
146
+ position = tf.constant(position, dtype=tf.float64)
147
+ spacing = tf.constant(spacing, dtype=tf.float64)
148
+
149
+ self.contents = tf.convert_to_tensor([[ (tf.math.sin( pi2/2. * ( x - position )/ spacing )/( pi2/2. * ( x - position )/ spacing ) if x != position else 1 ) for x in self.lattice ]])
150
+ return self
151
+
152
+ def values(self):
153
+ return (self.contents)
154
+
@@ -0,0 +1 @@
1
+ from . import vector
@@ -0,0 +1,249 @@
1
+ #################### ##########################
2
+ ################ SoPy ##################
3
+ #################### ##########################
4
+
5
+ ################################################
6
+ ### by Quantum Galaxies Corp ###
7
+ ## (2023,2024,2025) ##
8
+ ################################################
9
+
10
+ from .. import amplitude
11
+ from .. import component
12
+ import tensorflow as tf
13
+ from sklearn.cluster import KMeans
14
+ class vector :
15
+
16
+ def __init__(self):
17
+ self.contents = []
18
+
19
+ def __len__(self):
20
+ ranks = len(self.contents)
21
+ return ranks
22
+
23
+ def dist( self, other):
24
+ return tf.math.sqrt(tf.math.abs(self.dot(self) + other.dot(other) - self.dot(other) - other.dot(self)))
25
+
26
+ def ld1(self):
27
+ return tf.math.reduce_sum(tf.math.abs(self[0]))
28
+
29
+ def ld2(self):
30
+ return tf.math.sqrt(tf.math.reduce_sum(tf.math.abs(self[0])**2))
31
+
32
+ def n(self):
33
+ return tf.math.sqrt(self.dot(self))
34
+
35
+ def boost(self):
36
+ transforms =[[]]+ [component ( contents = self[d] ,lattice = self.contents[0][d].lattice ).set_boost().transform for d in self.dims(True)]
37
+ new = vector()
38
+ for r in range(len(self)):
39
+ new.contents += [ [self.contents[r][d].copy().set_boost(transform = transforms[d]).boost() for d in self.dims(False)] ]
40
+ return new
41
+
42
+ def unboost(self):
43
+ new = vector()
44
+ for r in range(len(self)):
45
+ new.contents += [ [self.contents[r][d].copy().unboost() for d in self.dims(False)] ]
46
+ return new
47
+
48
+ def dot(self, other, norm_ = False, exclude_dim : int = -1 , sum_ = True):
49
+ """
50
+ inner product Euclidiean between v and u in [R]**N, same as (canonRanks)* N*[R]
51
+
52
+ norm_=True will not include dim=0
53
+ """
54
+ assert isinstance(other, vector)
55
+ def innert(vector1, vector2):
56
+ uv = []
57
+ for dim in vector1.dims(norm_):
58
+ if dim != exclude_dim:
59
+ uv += [tf.matmul(vector1[dim],vector2[dim], transpose_b = True)]
60
+ return tf.math.reduce_prod(tf.convert_to_tensor(uv),axis=0)
61
+ uv = innert(self,other)
62
+ if sum_:
63
+ return tf.math.reduce_sum(uv)
64
+ else:
65
+ return tf.convert_to_tensor(uv)
66
+
67
+ def copy(self, norm_ = False):
68
+ other = vector()
69
+ for r in range(len(self)):
70
+ contents = [self.contents[r][0].copy()]
71
+ for d in self.dims(True):
72
+ if norm_:
73
+ contents += [self.contents[r][d].normalize().copy()]
74
+ else:
75
+ contents += [self.contents[r][d].copy()]
76
+ other.contents += [contents]
77
+ return other
78
+
79
+ def mul(self, m , norm_ = False):
80
+ other = self.copy()
81
+ for r in range(len(self)):
82
+ other.contents[r][0] *= m
83
+ return other
84
+
85
+ def learn(self, other , iterate = 0, alpha = 1e-9):
86
+ assert isinstance(other, vector)
87
+ u = self##train
88
+ v = other##origin
89
+ eye = tf.linalg.eye(len(u),dtype = tf.float64)
90
+ q = vector()
91
+ comps = [[]]+[ component(contents =tf.linalg.matmul(
92
+ tf.linalg.inv( u.dot(u,norm_ = True, exclude_dim = target_dim, sum_ = False) + alpha*eye),
93
+ tf.linalg.matmul(u.dot(v,norm_ = True, exclude_dim = target_dim, sum_ = False),
94
+ tf.multiply(v[0],v[target_dim]), transpose_b = False) )
95
+ , lattice = u.contents[0][target_dim].lattice, transform = self.contents[0][target_dim].transform
96
+ ) for target_dim in u.dims(True) ]
97
+ amps = amplitude(contents = 1./len(u.dims(True))*tf.math.reduce_sum([comps[d].amplitude() for d in u.dims(True) ],axis=0))
98
+ q.contents = [[ amps[r] ] + [ comps[d][r].normalize() for d in u.dims(True) ] for r in range(len(u)) ]
99
+ if iterate == 0:
100
+ return q
101
+ else:
102
+ return q.learn(other, iterate - 1, alpha = alpha )
103
+
104
+ def decompose(self, partition , iterate = 0 , alpha = 1e-9):
105
+ new = self.max(partition)
106
+ return new.learn( self, iterate = iterate, alpha = alpha)
107
+
108
+ def fibonacci(self, partition, iterate = 0 , total_iterate = 0, alpha = 1e-9, total_alpha = 1e-9):
109
+ Y = vector()
110
+ for like_ranks in self.set(partition=partition):
111
+ Y += like_ranks.decompose(partition = 1, alpha = alpha , iterate = iterate)
112
+ return Y.learn( self, iterate = total_iterate, alpha = total_alpha )
113
+
114
+
115
+ def dims(self, norm = True):
116
+ """
117
+ an interator for N dim where
118
+ SoP like (canonRanks)* N*[R]
119
+
120
+ norm == False will loop over Weights as well...
121
+ """
122
+ return range(norm==True, len(self.contents[0]))
123
+
124
+ def __getitem__(self, dim):
125
+ return tf.concat([ (self.contents)[r][dim].values() for r in range(len(self)) ],0)
126
+
127
+ def __imul__(self, m):
128
+ for r in range(len(self)):
129
+ self.contents[r][0] *= m
130
+ return self
131
+
132
+ def __add__(self, other):
133
+ new = self.copy()
134
+ new.contents += other.contents
135
+ return new
136
+
137
+ def __iadd__(self,other):
138
+ return self+other
139
+
140
+ def __isub__(self,other):
141
+ return self-other
142
+
143
+ def __sub__(self,other):
144
+ kmeans = KMeans(n_clusters=min(len(self), len(other)+1), random_state=42, n_init="auto")
145
+ M = (self+other).dot(other, sum_ = False)
146
+ kmeans.fit( M)
147
+ new = vector()
148
+ for i in range(len(M)):
149
+ if kmeans.labels_[i] not in kmeans.labels_[len(self):] :
150
+ new.contents += [ self.contents[i] ]
151
+ return new
152
+
153
+ def max(self, num = 1):
154
+ new = vector()
155
+ def modify_tensor(x, i):
156
+ """Modifies q2[0][i][0] using tensor_scatter_nd_update."""
157
+
158
+ indices = [[i, 0]]
159
+ updates = [0.0]
160
+
161
+ x = tf.tensor_scatter_nd_update(x, indices, updates)
162
+ return x
163
+
164
+
165
+ args =(tf.math.abs(self[0]))
166
+ for n in range(min(len(self),num)):
167
+ i = tf.math.argmax(args)[0]
168
+ new.contents += [self.contents[i]]
169
+ args = modify_tensor(args, i )
170
+ return new
171
+
172
+ def min(self, num = 1):
173
+ new = vector()
174
+ def modify_tensor(x, i):
175
+ """Modifies q2[0][i][0] using tensor_scatter_nd_update."""
176
+
177
+ indices = [[i, 0]]
178
+ updates = [tf.math.max(x)]
179
+
180
+ x = tf.tensor_scatter_nd_update(x, indices, updates)
181
+ return x
182
+ args =(tf.math.abs(self[0]))
183
+ for n in range(min(len(self),num)):
184
+ i = tf.math.argmin(args)[0]
185
+ new.contents += [self.contents[i]]
186
+ args = modify_tensor(args, i )
187
+ return new
188
+
189
+
190
+ def gaussian(self, a , positions , sigmas ,ls , lattices ):
191
+ lens = [ len(x) for x in [ls,positions,sigmas,lattices]]
192
+ assert min(lens) == max(lens)
193
+ v = [ amplitude(a) ]
194
+ for d,(l,position, sigma,lattice) in enumerate(zip( ls, positions, sigmas ,lattices)):
195
+ v +=[ component(lattice = lattice).gaussian(position = position,sigma = sigma, l = l).normalize()]
196
+ self.contents += [v]
197
+ return self
198
+
199
+ def set(self,partition):
200
+ self.partition = partition
201
+ return self
202
+
203
+ def __iter__(self):
204
+ kmeans = KMeans(n_clusters=self.partition, random_state=42, n_init="auto")
205
+ kmeans.fit(self.dot(self, sum_ = False))
206
+ labels = (kmeans.labels_)
207
+ new = self.copy()
208
+
209
+ new.index = 0
210
+ new.labels = labels
211
+ return new
212
+
213
+ def __next__(self):
214
+ new = vector()
215
+ for index in range(len(self)):
216
+ if self.labels[index] == self.index:
217
+ new.contents += [self.contents[index]]
218
+ self.index += 1
219
+ if len(new) == 0:
220
+ raise StopIteration
221
+ return new
222
+
223
+ def delta(self, a , positions , spacings, lattices ):
224
+ lens = [ len(x) for x in [positions,spacings,lattices]]
225
+ assert min(lens) == max(lens)
226
+ v = [ amplitude(a) ]
227
+ for d,(position, spacing,lattice) in enumerate(zip( positions, spacings ,lattices)):
228
+ v +=[ component(lattice = lattice).delta(position = position,spacing = spacing).normalize()]
229
+ self.contents += [v]
230
+ return self
231
+
232
+ def sample(self, num_samples ):
233
+ sample_ranks = amplitude( contents = self[0] ).sample( num_samples )
234
+ return tf.convert_to_tensor([ [ self.contents[r][d].sample(sample_rank=0,num_samples=1) for d in self.dims() ] for r in sample_ranks ])
235
+
236
+
237
+ def transpose(self,tl):
238
+ """
239
+ meant to input a dictionary with integer keys, which includes 0 as a amplitude
240
+ """
241
+
242
+ comps = [ amplitude( contents = tl[0]) ]+ [ component( contents = tl[key], lattice = range(len(tl[key]))) for key in tl if key != 0 ]
243
+ other = vector()
244
+ other.contents = [ [ comps[d][r] for d in range(len(comps)) ] for r in range(len(comps[0])) ]
245
+ return other
246
+
247
+
248
+
249
+
@@ -0,0 +1,31 @@
1
+ Metadata-Version: 2.4
2
+ Name: sopy-quantum
3
+ Version: 1.0.0
4
+ Summary: Representation and Decomposition with Sums of Product for Operations in separated dimensions
5
+ Author-email: Jonathan Jerke <jonathan@quantumgalaxies.org>
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://sopy.quantumgalaxies.org
8
+ Project-URL: Issues, https://github.com/quantumgalaxies/sopy/issues
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Intended Audience :: Science/Research
12
+ Classifier: Intended Audience :: Developers
13
+ Requires-Python: >=3.12
14
+ Description-Content-Type: text/markdown
15
+ License-File: LICENSE.txt
16
+ Requires-Dist: tensorflow>=2.16.1
17
+ Requires-Dist: scikit-learn>=1.6.1
18
+ Dynamic: license-file
19
+
20
+ # SoPy
21
+
22
+ ## Representation and Decomposition with Sums of Product for Operations in separated dimensions
23
+
24
+ ### How to Contribute
25
+ * Wrap work in a class for user friendliness
26
+ * Write to disk/database/json
27
+ * Develop amplitude/component to various non-local resources
28
+ * Engage with Quantum Galaxies deploying matrices in separated dimensions
29
+
30
+ ### Contact Info
31
+ sopy.quantumgalaxies.org
@@ -0,0 +1,15 @@
1
+ LICENSE.txt
2
+ README.md
3
+ pyproject.toml
4
+ sopy/__init__.py
5
+ sopy/amplitude/__init__.py
6
+ sopy/amplitude/amplitude.py
7
+ sopy/component/__init__.py
8
+ sopy/component/component.py
9
+ sopy/vector/__init__.py
10
+ sopy/vector/vector.py
11
+ sopy_quantum.egg-info/PKG-INFO
12
+ sopy_quantum.egg-info/SOURCES.txt
13
+ sopy_quantum.egg-info/dependency_links.txt
14
+ sopy_quantum.egg-info/requires.txt
15
+ sopy_quantum.egg-info/top_level.txt
@@ -0,0 +1,2 @@
1
+ tensorflow>=2.16.1
2
+ scikit-learn>=1.6.1