parallel-sparse-tools 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. parallel-sparse-tools-0.1.0/LICENSE +28 -0
  2. parallel-sparse-tools-0.1.0/PKG-INFO +22 -0
  3. parallel-sparse-tools-0.1.0/README.md +2 -0
  4. parallel-sparse-tools-0.1.0/pyproject.toml +78 -0
  5. parallel-sparse-tools-0.1.0/setup.cfg +4 -0
  6. parallel-sparse-tools-0.1.0/setup.py +108 -0
  7. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools/__init__.py +0 -0
  8. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools/expm_multiply_parallel_core/__init__.py +1 -0
  9. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py +354 -0
  10. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools/expm_multiply_parallel_core/expm_multiply_parallel_wrapper.cpp +40499 -0
  11. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools/expm_multiply_parallel_core/generate_source.py +209 -0
  12. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools/expm_multiply_parallel_core/source/csr_utils.h +164 -0
  13. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools/expm_multiply_parallel_core/source/expm_multiply_parallel_impl.h +314 -0
  14. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools/matvec/__init__.py +9 -0
  15. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools/matvec/_oputils/oputils_impl.h +955 -0
  16. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools/matvec/_oputils.cpp +14436 -0
  17. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools/matvec/generate_oputils.py +533 -0
  18. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools/matvec/matvec_core.py +95 -0
  19. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools.egg-info/PKG-INFO +22 -0
  20. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools.egg-info/SOURCES.txt +21 -0
  21. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools.egg-info/dependency_links.txt +1 -0
  22. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools.egg-info/requires.txt +2 -0
  23. parallel-sparse-tools-0.1.0/src/parallel_sparse_tools.egg-info/top_level.txt +1 -0
@@ -0,0 +1,28 @@
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2023, QuSpin
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions are met:
7
+
8
+ 1. Redistributions of source code must retain the above copyright notice, this
9
+ list of conditions and the following disclaimer.
10
+
11
+ 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ this list of conditions and the following disclaimer in the documentation
13
+ and/or other materials provided with the distribution.
14
+
15
+ 3. Neither the name of the copyright holder nor the names of its
16
+ contributors may be used to endorse or promote products derived from
17
+ this software without specific prior written permission.
18
+
19
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@@ -0,0 +1,22 @@
1
+ Metadata-Version: 2.1
2
+ Name: parallel-sparse-tools
3
+ Version: 0.1.0
4
+ Summary: My package description
5
+ Author-email: Phillip Weinberg <weinbe58@gmail.com>
6
+ License: BSD-3-Clause
7
+ Classifier: Development Status :: 3 - Alpha
8
+ Classifier: Operating System :: Microsoft :: Windows
9
+ Classifier: Operating System :: MacOS
10
+ Classifier: Operating System :: POSIX :: Linux
11
+ Classifier: Programming Language :: Python :: 3.9
12
+ Classifier: Programming Language :: Python :: 3.10
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Requires-Python: <3.13,>=3.9
16
+ Description-Content-Type: text/markdown
17
+ License-File: LICENSE
18
+ Requires-Dist: numpy
19
+ Requires-Dist: scipy
20
+
21
+ # parallel-sparse-tools
22
+ Repository for OpenMP parallelized Sparse tools
@@ -0,0 +1,2 @@
1
+ # parallel-sparse-tools
2
+ Repository for OpenMP parallelized Sparse tools
@@ -0,0 +1,78 @@
1
+ [build-system]
2
+ requires = ["setuptools", "wheel", "Cython", "numpy"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "parallel-sparse-tools"
7
+ version = "0.1.0"
8
+ authors = [
9
+ {name = "Phillip Weinberg", email = "weinbe58@gmail.com"},
10
+ ]
11
+ description = "My package description"
12
+ readme = "README.md"
13
+ requires-python = ">=3.9,<3.13"
14
+ license = {text = "BSD-3-Clause"}
15
+ classifiers = [
16
+ "Development Status :: 3 - Alpha",
17
+ "Operating System :: Microsoft :: Windows",
18
+ "Operating System :: MacOS",
19
+ "Operating System :: POSIX :: Linux",
20
+ "Programming Language :: Python :: 3.9",
21
+ "Programming Language :: Python :: 3.10",
22
+ "Programming Language :: Python :: 3.11",
23
+ "Programming Language :: Python :: 3.12",
24
+ ]
25
+ dependencies = [
26
+ "numpy",
27
+ "scipy",
28
+ ]
29
+
30
+
31
+ [tool.cibuildwheel]
32
+ skip = ""
33
+ test-skip = ""
34
+
35
+ build-frontend = "default"
36
+ config-settings = {}
37
+ dependency-versions = "pinned"
38
+ environment = {}
39
+ environment-pass = []
40
+ build-verbosity = 0
41
+
42
+ before-all = ""
43
+ before-build = ""
44
+ repair-wheel-command = ""
45
+
46
+ test-command = ""
47
+ before-test = ""
48
+ test-requires = []
49
+ test-extras = []
50
+
51
+ container-engine = "docker"
52
+
53
+ manylinux-x86_64-image = "manylinux2014"
54
+ manylinux-i686-image = "manylinux2014"
55
+ manylinux-aarch64-image = "manylinux2014"
56
+ manylinux-ppc64le-image = "manylinux2014"
57
+ manylinux-s390x-image = "manylinux2014"
58
+ manylinux-pypy_x86_64-image = "manylinux2014"
59
+ manylinux-pypy_i686-image = "manylinux2014"
60
+ manylinux-pypy_aarch64-image = "manylinux2014"
61
+
62
+ musllinux-x86_64-image = "musllinux_1_1"
63
+ musllinux-i686-image = "musllinux_1_1"
64
+ musllinux-aarch64-image = "musllinux_1_1"
65
+ musllinux-ppc64le-image = "musllinux_1_1"
66
+ musllinux-s390x-image = "musllinux_1_1"
67
+
68
+
69
+ [tool.cibuildwheel.linux]
70
+ archs = ["x86_64"]
71
+ repair-wheel-command = "auditwheel repair -w {dest_dir} {wheel}"
72
+
73
+ [tool.cibuildwheel.macos]
74
+ archs = ["universal2"]
75
+ repair-wheel-command = "delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel}"
76
+
77
+ [tool.cibuildwheel.windows]
78
+ archs = ["AMD64"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,108 @@
1
+ from setuptools import find_packages, setup, Extension
2
+ from Cython.Build import cythonize
3
+ from typing import List
4
+ import os
5
+ import sys
6
+ import glob
7
+ import subprocess
8
+ import numpy as np
9
+
10
+
11
+ def extra_compile_args() -> List[str]:
12
+ if sys.platform in ["win32", "cygwin", "win64"]:
13
+ extra_compile_args = ["/openmp"]
14
+ if sys.platform in ["darwin"]:
15
+ extra_compile_args = [
16
+ "-DLLVM_ENABLE_PROJECTS",
17
+ "-Xpreprocessor",
18
+ "-fopenmp-version=50"
19
+ "-fopenmp",
20
+ ]
21
+ else:
22
+ extra_compile_args = ["-fopenmp"]
23
+
24
+ return extra_compile_args
25
+
26
+
27
+ def extra_link_args() -> List[str]:
28
+ if sys.platform in ["win32", "cygwin", "win64"]:
29
+ extra_link_args = ["/openmp"]
30
+ if sys.platform in ["darwin"]:
31
+ extra_link_args = [
32
+ "-DLLVM_ENABLE_PROJECTS",
33
+ "-Xpreprocessor",
34
+ "-fopenmp-version=50"
35
+ "-fopenmp",
36
+ ]
37
+ else:
38
+ extra_link_args = ["-fopenmp"]
39
+
40
+ return extra_link_args
41
+
42
+
43
+ def matvec_extension() -> List[Extension]:
44
+ package_path = ("parallel_sparse_tools", "matvec")
45
+ package_dir = os.path.join("src", *package_path)
46
+
47
+ subprocess.check_call(
48
+ [sys.executable, os.path.join(package_dir, "generate_oputils.py")]
49
+ )
50
+
51
+ includes = [
52
+ np.get_include(),
53
+ os.path.join("src", "parallel_sparse_tools", "matvec", "_oputils"),
54
+ ]
55
+
56
+ return generate_extensions(package_path, includes)
57
+
58
+
59
+ def expm_multiply_parallel_core_extension() -> List[Extension]:
60
+ package_path = ("parallel_sparse_tools", "expm_multiply_parallel_core")
61
+ package_dir = os.path.join("src", *package_path)
62
+
63
+ subprocess.check_call(
64
+ [sys.executable, os.path.join(package_dir, "generate_source.py")]
65
+ )
66
+
67
+ includes = [
68
+ np.get_include(),
69
+ os.path.join("src", "parallel_sparse_tools", "matvec", "_oputils"),
70
+ os.path.join(package_dir, "source"),
71
+ ]
72
+
73
+ return generate_extensions(package_path, includes)
74
+
75
+
76
+ def generate_extensions(package_path, includes=[]):
77
+ package_dir = os.path.join("src", *package_path)
78
+ cython_src = glob.glob(os.path.join(package_dir, "*.pyx"))
79
+
80
+ exts = []
81
+
82
+ for cython_file in cython_src:
83
+ module_name = os.path.split(cython_file)[-1].replace(".pyx", "")
84
+ module_path = ".".join(package_path + (module_name,))
85
+
86
+ exts.append(
87
+ Extension(
88
+ module_path,
89
+ [cython_file],
90
+ include_dirs=includes,
91
+ extra_compile_args=extra_compile_args(),
92
+ extra_link_args=extra_link_args(),
93
+ )
94
+ )
95
+
96
+ return cythonize(exts, include_path=includes)
97
+
98
+
99
+ ext_modules = [
100
+ *matvec_extension(),
101
+ *expm_multiply_parallel_core_extension(),
102
+ ]
103
+ setup(
104
+ include_package_data=True,
105
+ packages=find_packages(where="src"),
106
+ package_dir={"": "src"},
107
+ ext_modules=ext_modules,
108
+ )
@@ -0,0 +1 @@
1
+ from .expm_multiply_parallel_core import ExpmMultiplyParallel
@@ -0,0 +1,354 @@
1
+ from scipy.sparse.linalg import LinearOperator, onenormest, aslinearoperator
2
+ from .expm_multiply_parallel_wrapper import (
3
+ _wrapper_expm_multiply,
4
+ _wrapper_expm_multiply_batch,
5
+ _wrapper_csr_trace,
6
+ _wrapper_csr_1_norm,
7
+ )
8
+ from scipy.sparse.construct import eye
9
+ from scipy.sparse.linalg._expm_multiply import _fragment_3_1, _exact_1_norm
10
+ import scipy.sparse as _sp
11
+ import numpy as _np
12
+
13
+
14
+ class ExpmMultiplyParallel(object):
15
+ """Implements `scipy.sparse.linalg.expm_multiply()` for *openmp*.
16
+
17
+ Notes
18
+ -----
19
+ * this is a wrapper over custom c++ code.
20
+ * the `dtype` input need not be the same dtype as `A` or `a`; however, it must be possible to cast the result of `a*A` to this `dtype`.
21
+ * consider the special case of real-time evolution with a purely-imaginary Hamiltonian, in which case `a=-1j*time` and `A` are both complex-valued, while the resulting matrix exponential is real-valued: in such cases, one can use either one of
22
+
23
+ >>> expm_multiply_parallel( (1j*H.tocsr()).astype(np.float64), a=-1.0, dtype=np.float64)`
24
+
25
+ and
26
+
27
+ >>> expm_multiply_parallel( H.tocsr(), a=-1.0j, dtype=np.complex128)
28
+
29
+ The more efficient way to compute the matrix exponential in this case is to use a real-valued `dtype`.
30
+
31
+
32
+ Examples
33
+ --------
34
+
35
+ This example shows how to construct the `expm_multiply_parallel` object.
36
+
37
+ Further code snippets can be found in the examples for the function methods of the class.
38
+ The code snippet below initiates the class, and is required to run the example codes for the function methods.
39
+
40
+ .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py
41
+ :linenos:
42
+ :language: python
43
+ :lines: 7-30
44
+
45
+ """
46
+
47
+ def __init__(self, A, a=1.0, dtype=None, copy=False):
48
+ """Initializes `expm_multiply_parallel`.
49
+
50
+ Parameters
51
+ -----------
52
+ A : {array_like, scipy.sparse matrix}
53
+ The operator (matrix) whose exponential is to be calculated.
54
+ a : scalar, optional
55
+ scalar value multiplying generator matrix :math:`A` in matrix exponential: :math:`\\mathrm{e}^{aA}`.
56
+ dtype : numpy.dtype, optional
57
+ data type specified for the total operator :math:`\\mathrm{e}^{aA}`. Default is: `numpy.result_type(A.dtype,min_scalar_type(a),float64)`.
58
+ copy : bool, optional
59
+ if `True` the matrix is copied otherwise the matrix is stored by reference.
60
+
61
+ """
62
+ if _np.array(a).ndim == 0:
63
+ self._a = a
64
+ else:
65
+ raise ValueError("a must be scalar value.")
66
+
67
+ self._A = _sp.csr_matrix(A, copy=copy)
68
+
69
+ if A.shape[0] != A.shape[1]:
70
+ raise ValueError("A must be a square matrix.")
71
+
72
+ a_dtype_min = _np.min_scalar_type(self._a)
73
+
74
+ # use double precision by default.
75
+ if dtype is None:
76
+ self._dtype = _np.result_type(A.dtype, a_dtype_min, _np.float64)
77
+ else:
78
+ min_dtype = _np.result_type(A.dtype, a_dtype_min, _np.float32)
79
+ if not _np.can_cast(min_dtype, dtype):
80
+ raise ValueError(
81
+ "dtype not sufficient to represent a*A to at least float32 precision."
82
+ )
83
+
84
+ self._dtype = dtype
85
+
86
+ tol = _np.finfo(self._dtype).eps / 2
87
+ tol_dtype = _np.finfo(self._dtype).eps.dtype
88
+ self._tol = _np.array(tol, dtype=tol_dtype)
89
+
90
+ mu = (
91
+ _wrapper_csr_trace(self._A.indptr, self._A.indices, self._A.data)
92
+ / self._A.shape[0]
93
+ )
94
+ self._mu = _np.array(mu, dtype=self._dtype)
95
+ self._A_1_norm = _wrapper_csr_1_norm(
96
+ self._A.indptr, self._A.indices, self._A.data, self._mu
97
+ )
98
+ self._calculate_partition()
99
+
100
+ # shift = eye(A.shape[0],format="csr",dtype=A.dtype)
101
+ # shift.data *= mu
102
+ # self._A = self._A - shift
103
+
104
+ @property
105
+ def a(self):
106
+ """scalar: value multiplying generator matrix :math:`A` in matrix exponential: :math:`\\mathrm{e}^{aA}`"""
107
+ return self._a
108
+
109
+ @property
110
+ def A(self):
111
+ """scipy.sparse.csr_matrix: csr_matrix to be exponentiated."""
112
+ return self._A
113
+
114
+ def set_a(self, a, dtype=None):
115
+ """Sets the value of the property `a`.
116
+
117
+ Parameters
118
+ ----------
119
+ a : scalar
120
+ new value of `a`.
121
+ dtype : numpy.dtype, optional
122
+ dtype specified for this operator. Default is: result_type(A.dtype,min_scalar_type(a),float64)
123
+
124
+ Examples
125
+ --------
126
+
127
+ .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py
128
+ :linenos:
129
+ :language: python
130
+ :lines: 32-35
131
+
132
+ """
133
+
134
+ if _np.array(a).ndim == 0:
135
+ self._a = a
136
+
137
+ a_dtype_min = _np.min_scalar_type(self._a)
138
+
139
+ # use double precision by default.
140
+ if dtype is None:
141
+ self._dtype = _np.result_type(self._A.dtype, a_dtype_min, _np.float64)
142
+ else:
143
+ min_dtype = _np.result_type(A.dtype, a_dtype_min, _np.float32)
144
+ if not _np.can_cast(min_dtype, dtype):
145
+ raise ValueError(
146
+ "dtype not sufficient to represent a*A to at least float32 precision."
147
+ )
148
+
149
+ self._dtype = dtype
150
+
151
+ tol = _np.finfo(self._dtype).eps / 2
152
+ tol_dtype = _np.finfo(self._dtype).eps.dtype
153
+ self._tol = _np.array(tol, dtype=tol_dtype)
154
+ self._mu = _np.array(self._mu, dtype=self._dtype)
155
+
156
+ self._calculate_partition()
157
+ else:
158
+ raise ValueError("expecting 'a' to be scalar.")
159
+
160
+ def dot(self, v, work_array=None, overwrite_v=False, tol=None):
161
+ """Calculates the action of :math:`\\mathrm{e}^{aA}` on a vector :math:`v`.
162
+
163
+ Examples
164
+ --------
165
+
166
+ .. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py
167
+ :linenos:
168
+ :language: python
169
+ :lines: 37-
170
+
171
+ Parameters
172
+ -----------
173
+ v : contiguous numpy.ndarray, 1d or 2d array
174
+ array to apply :math:`\\mathrm{e}^{aA}` on.
175
+ work_array : contiguous numpy.ndarray, optional
176
+ array can be any shape but must contain 2*v.size contiguous elements.
177
+ This array is used as temporary memory space for the underlying c-code. This saves extra memory allocation for function operations.
178
+ overwrite_v : bool, optoinal
179
+ if set to `True`, the data in `v` is overwritten by the function. This saves extra memory allocation for the results.
180
+ tol: float, optoinal
181
+ tolerance value used to truncate Taylor expansion of matrix exponential.
182
+
183
+ Returns
184
+ --------
185
+ numpy.ndarray
186
+ result of :math:`\\mathrm{e}^{aA}v`.
187
+
188
+ If `overwrite_v = True` the dunction returns `v` with the data overwritten, otherwise the result is stored in a new array.
189
+
190
+ """
191
+ v = _np.asarray(v)
192
+
193
+ if v.ndim > 2:
194
+ raise ValueError("array must have ndim <= 2.")
195
+
196
+ if v.shape[0] != self._A.shape[1]:
197
+ raise ValueError("dimension mismatch {}, {}".format(self._A.shape, v.shape))
198
+
199
+ v_dtype = _np.result_type(self._dtype, v.dtype)
200
+
201
+ if overwrite_v:
202
+ if v_dtype != v.dtype:
203
+ raise ValueError(
204
+ "if overwrite_v is True, the input array must match correct output dtype for matrix multiplication."
205
+ )
206
+
207
+ if not v.flags["CARRAY"]:
208
+ raise TypeError("input array must a contiguous and writable.")
209
+
210
+ else:
211
+ v = v.astype(v_dtype, order="C", copy=True)
212
+
213
+ if work_array is None:
214
+ if v.ndim == 1:
215
+ work_array = _np.zeros((2 * self._A.shape[0],), dtype=v.dtype)
216
+ else:
217
+ work_array = _np.zeros(
218
+ (2 * self._A.shape[0], v.shape[1]), dtype=v.dtype
219
+ )
220
+ else:
221
+ work_array = work_array.ravel(order="A")
222
+
223
+ if work_array.size != 2 * v.size:
224
+ raise ValueError(
225
+ "work_array must have twice the number of elements as in v."
226
+ )
227
+
228
+ if work_array.dtype != v_dtype:
229
+ raise ValueError(
230
+ "work_array must be array of dtype which matches the result of the matrix-vector multiplication."
231
+ )
232
+
233
+ a = _np.array(self._a, dtype=v_dtype)
234
+ mu = _np.array(self._mu, dtype=v_dtype)
235
+ if tol is not None:
236
+ tol = _np.array(tol, dtype=mu.real.dtype)
237
+ else:
238
+ tol = _np.array(self._tol, dtype=mu.real.dtype)
239
+ if v.ndim == 1:
240
+ _wrapper_expm_multiply(
241
+ self._A.indptr,
242
+ self._A.indices,
243
+ self._A.data,
244
+ self._s,
245
+ self._m_star,
246
+ a,
247
+ tol,
248
+ mu,
249
+ v,
250
+ work_array.ravel(),
251
+ )
252
+ else:
253
+ work_array = work_array.reshape((-1, v.shape[1]))
254
+ _wrapper_expm_multiply_batch(
255
+ self._A.indptr,
256
+ self._A.indices,
257
+ self._A.data,
258
+ self._s,
259
+ self._m_star,
260
+ a,
261
+ tol,
262
+ mu,
263
+ v,
264
+ work_array,
265
+ )
266
+
267
+ return v
268
+
269
+ def _calculate_partition(self):
270
+ if _np.abs(self._a) * self._A_1_norm == 0:
271
+ self._m_star, self._s = 0, 1
272
+ else:
273
+ ell = 2
274
+ norm_info = LazyOperatorNormInfo(
275
+ self._A, self._A_1_norm, self._a, self._mu, self._dtype, ell=ell
276
+ )
277
+ self._m_star, self._s = _fragment_3_1(norm_info, 1, self._tol, ell=ell)
278
+
279
+
280
+ ##### code below is copied from scipy.sparse.linalg._expm_multiply_core and modified slightly.
281
+
282
+
283
+ def matvec_p(v, A, a, mu, p):
284
+ for i in range(p):
285
+ v = a * (A.dot(v) - mu * v)
286
+
287
+ return v
288
+
289
+
290
+ class LazyOperatorNormInfo:
291
+ """
292
+ Information about an operator is lazily computed.
293
+
294
+ The information includes the exact 1-norm of the operator,
295
+ in addition to estimates of 1-norms of powers of the operator.
296
+ This uses the notation of Computing the Action (2011).
297
+ This class is specialized enough to probably not be of general interest
298
+ outside of this module.
299
+
300
+ """
301
+
302
+ def __init__(self, A, A_1_norm, a, mu, dtype, ell=2):
303
+ """
304
+ Provide the operator and some norm-related information.
305
+
306
+ Parameters
307
+ -----------
308
+ A : linear operator
309
+ The operator of interest.
310
+ A_1_norm : float
311
+ The exact 1-norm of A.
312
+ ell : int, optional
313
+ A technical parameter controlling norm estimation quality.
314
+
315
+ """
316
+ self._A = A
317
+ self._a = a
318
+ self._mu = mu
319
+ self._dtype = dtype
320
+ self._A_1_norm = A_1_norm
321
+ self._ell = ell
322
+ self._d = {}
323
+
324
+ def onenorm(self):
325
+ """
326
+ Compute the exact 1-norm.
327
+ """
328
+ return _np.abs(self._a) * self._A_1_norm
329
+
330
+ def d(self, p):
331
+ """
332
+ Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.
333
+ """
334
+ if p not in self._d:
335
+ matvec = lambda v: self._a * (self._A.dot(v) - self._mu * v)
336
+ rmatvec = lambda v: _np.conj(self._a) * (
337
+ self._A.H.dot(v) - _np.conj(self._mu) * v
338
+ )
339
+ LO = LinearOperator(
340
+ self._A.shape, dtype=self._dtype, matvec=matvec, rmatvec=rmatvec
341
+ )
342
+
343
+ est = onenormest(LO**p)
344
+
345
+ # est = onenormest((self._a * aslinearoperator(self._A))**p)
346
+ self._d[p] = est ** (1.0 / p)
347
+
348
+ return self._d[p]
349
+
350
+ def alpha(self, p):
351
+ """
352
+ Lazily compute max(d(p), d(p+1)).
353
+ """
354
+ return max(self.d(p), self.d(p + 1))