sparse-kappa 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sparse_kappa-0.0.1/PKG-INFO +192 -0
- sparse_kappa-0.0.1/README.md +161 -0
- sparse_kappa-0.0.1/pyproject.toml +40 -0
- sparse_kappa-0.0.1/setup.cfg +4 -0
- sparse_kappa-0.0.1/setup.py +43 -0
- sparse_kappa-0.0.1/sparse_kappa/__init__.py +11 -0
- sparse_kappa-0.0.1/sparse_kappa/norm1/__init__.py +7 -0
- sparse_kappa-0.0.1/sparse_kappa/norm1/hager_higham.py +198 -0
- sparse_kappa-0.0.1/sparse_kappa/norm1/inverse_norm.py +54 -0
- sparse_kappa-0.0.1/sparse_kappa/norm2/__init__.py +19 -0
- sparse_kappa-0.0.1/sparse_kappa/norm2/arnoldi.py +112 -0
- sparse_kappa-0.0.1/sparse_kappa/norm2/cupy_wrappers.py +418 -0
- sparse_kappa-0.0.1/sparse_kappa/norm2/golub_kahan.py +102 -0
- sparse_kappa-0.0.1/sparse_kappa/norm2/lanczos.py +143 -0
- sparse_kappa-0.0.1/sparse_kappa/norm2/power_method.py +199 -0
- sparse_kappa-0.0.1/sparse_kappa/sparse_kappa.py +232 -0
- sparse_kappa-0.0.1/sparse_kappa/utils.py +263 -0
- sparse_kappa-0.0.1/sparse_kappa.egg-info/PKG-INFO +192 -0
- sparse_kappa-0.0.1/sparse_kappa.egg-info/SOURCES.txt +35 -0
- sparse_kappa-0.0.1/sparse_kappa.egg-info/dependency_links.txt +1 -0
- sparse_kappa-0.0.1/sparse_kappa.egg-info/requires.txt +8 -0
- sparse_kappa-0.0.1/sparse_kappa.egg-info/top_level.txt +1 -0
- sparse_kappa-0.0.1/tests/test_accuracy.py +138 -0
- sparse_kappa-0.0.1/tests/test_norm1.py +49 -0
- sparse_kappa-0.0.1/tests/test_norm2.py +79 -0
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: sparse-kappa
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: GPU-accelerated sparse matrix condition number estimation using CuPy
|
|
5
|
+
Home-page: https://github.com/chenxinye/sparse-kappa
|
|
6
|
+
Author: Xinye Chen
|
|
7
|
+
Author-email: Xinye Chen <xinyechenai@gmail.com>
|
|
8
|
+
License: MIT
|
|
9
|
+
Project-URL: Homepage, https://github.com/chenxinye/sparse-kappa
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Science/Research
|
|
12
|
+
Classifier: Topic :: Scientific/Engineering :: Mathematics
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Requires-Python: >=3.8
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
Requires-Dist: cupy>=10.0.0
|
|
22
|
+
Requires-Dist: numpy>=1.20.0
|
|
23
|
+
Provides-Extra: dev
|
|
24
|
+
Requires-Dist: pytest>=6.0; extra == "dev"
|
|
25
|
+
Requires-Dist: pytest-cov>=2.0; extra == "dev"
|
|
26
|
+
Requires-Dist: black>=22.0; extra == "dev"
|
|
27
|
+
Requires-Dist: flake8>=4.0; extra == "dev"
|
|
28
|
+
Dynamic: author
|
|
29
|
+
Dynamic: home-page
|
|
30
|
+
Dynamic: requires-python
|
|
31
|
+
|
|
32
|
+
# CuPy Sparse Condition Number Estimation
|
|
33
|
+
|
|
34
|
+
A GPU-accelerated library for estimating condition numbers of sparse matrices using CuPy.
|
|
35
|
+
|
|
36
|
+
## Features
|
|
37
|
+
|
|
38
|
+
- **GPU-Accelerated**: All computations run on NVIDIA GPUs via CuPy
|
|
39
|
+
- **Multiple Norms**: Support for 1-norm and 2-norm condition numbers
|
|
40
|
+
- **Rich Algorithm Suite**:
|
|
41
|
+
- **1-norm**: Hager-Higham algorithm
|
|
42
|
+
- **2-norm**: Power method, Lanczos, Arnoldi, Golub-Kahan bidiagonalization
|
|
43
|
+
- **CuPy integrations**: svds, eigsh, lobpcg wrappers
|
|
44
|
+
- **Automatic Method Selection**: Chooses optimal algorithm based on matrix properties
|
|
45
|
+
- **Memory Efficient**: Designed for large sparse matrices
|
|
46
|
+
|
|
47
|
+
## Installation
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
Simply via pip manager
|
|
51
|
+
```bash
|
|
52
|
+
pip install sparse-kappa
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
```bash
|
|
56
|
+
git clone https://github.com/chenxinye/sparse-kappa
|
|
57
|
+
pip install cupy-cuda11x # or cupy-cuda12x for CUDA 12
|
|
58
|
+
pip install -e .
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
## Quick Start
|
|
62
|
+
|
|
63
|
+
```python
|
|
64
|
+
import cupy as cp
|
|
65
|
+
import cupyx.scipy.sparse as sp
|
|
66
|
+
from sparse_kappa import cond_estimate
|
|
67
|
+
|
|
68
|
+
# Create sparse matrix on GPU
|
|
69
|
+
A = sp.random(10000, 10000, density=0.01, format='csr')
|
|
70
|
+
|
|
71
|
+
# Estimate condition number (automatic method selection)
|
|
72
|
+
cond = cond_estimate(A)
|
|
73
|
+
print(f"Condition number: {cond:.2e}")
|
|
74
|
+
|
|
75
|
+
# Use specific method
|
|
76
|
+
cond = cond_estimate(A, norm=2, method='lanczos')
|
|
77
|
+
|
|
78
|
+
# Get detailed results
|
|
79
|
+
result = cond_estimate(A, norm=2, method='svds', verbose=True)
|
|
80
|
+
print(f"Method: {result['method']}")
|
|
81
|
+
print(f"Iterations: {result['iterations']}")
|
|
82
|
+
print(f"σ_max: {result['sigma_max']:.4e}")
|
|
83
|
+
print(f"σ_min: {result['sigma_min']:.4e}")
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## Available Methods
|
|
87
|
+
|
|
88
|
+
### 1-Norm Methods
|
|
89
|
+
|
|
90
|
+
| Method | Description | Best For |
|
|
91
|
+
|--------|-------------|----------|
|
|
92
|
+
| `hager-higham` | Iterative refinement algorithm | General matrices, fast estimation |
|
|
93
|
+
|
|
94
|
+
### 2-Norm Methods
|
|
95
|
+
|
|
96
|
+
| Method | Description | Best For | Complexity |
|
|
97
|
+
|--------|-------------|----------|------------|
|
|
98
|
+
| `svds` | Partial SVD (most accurate) | Small-medium matrices (<5k) | O(k·nnz) |
|
|
99
|
+
| `eigsh` | Symmetric eigenvalue solver | Symmetric matrices | O(k·nnz) |
|
|
100
|
+
| `lobpcg` | Block preconditioned CG | Large matrices | O(k·nnz) |
|
|
101
|
+
| `power` | Power iteration | Quick estimates | O(k·nnz) |
|
|
102
|
+
| `lanczos` | Lanczos tridiagonalization | Medium matrices | O(k²·nnz) |
|
|
103
|
+
| `arnoldi` | Arnoldi iteration | Non-symmetric | O(k²·nnz) |
|
|
104
|
+
| `golub-kahan` | Bidiagonalization | Numerically stable | O(k·nnz) |
|
|
105
|
+
| `auto` | Automatic selection | All cases | - |
|
|
106
|
+
|
|
107
|
+
## Examples
|
|
108
|
+
|
|
109
|
+
### Example 1: Compare Methods
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
import cupyx.scipy.sparse as sp
|
|
113
|
+
from sparse_kappa import cond_estimate
|
|
114
|
+
|
|
115
|
+
A = sp.random(2000, 2000, density=0.005, format='csr')
|
|
116
|
+
|
|
117
|
+
methods = ['power', 'lanczos', 'svds', 'golub-kahan']
|
|
118
|
+
for method in methods:
|
|
119
|
+
cond = cond_estimate(A, norm=2, method=method)
|
|
120
|
+
print(f"{method:12s}: {cond:.4e}")
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
### Example 2: 1-Norm Estimation
|
|
124
|
+
|
|
125
|
+
```python
|
|
126
|
+
result = cond_estimate(A, norm=1, method='hager-higham', verbose=True)
|
|
127
|
+
print(f"κ₁(A) = {result['condition_number']:.4e}")
|
|
128
|
+
print(f"||A||₁ = {result['norm_A']:.4e}")
|
|
129
|
+
print(f"||A⁻¹||₁ = {result['norm_Ainv']:.4e}")
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
### Example 3: Symmetric Matrix
|
|
133
|
+
|
|
134
|
+
```python
|
|
135
|
+
# Create symmetric matrix
|
|
136
|
+
A = sp.random(1000, 1000, density=0.01, format='csr')
|
|
137
|
+
A = (A + A.T) / 2
|
|
138
|
+
|
|
139
|
+
# Use eigsh (optimized for symmetric)
|
|
140
|
+
cond = cond_estimate(A, norm=2, method='eigsh')
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
## Performance Tips
|
|
144
|
+
|
|
145
|
+
1. **Auto mode is recommended** for first-time usage
|
|
146
|
+
2. **For symmetric matrices**, use `eigsh` or `lanczos`
|
|
147
|
+
3. **For large sparse matrices** (>10k), use `golub-kahan` or `lobpcg`
|
|
148
|
+
4. **For highest accuracy on small matrices**, use `svds`
|
|
149
|
+
5. **Increase `max_iter`** if convergence fails
|
|
150
|
+
|
|
151
|
+
## Testing
|
|
152
|
+
|
|
153
|
+
```bash
|
|
154
|
+
# Run all tests
|
|
155
|
+
pytest tests/ -v
|
|
156
|
+
|
|
157
|
+
# Run specific test file
|
|
158
|
+
pytest tests/test_norm2.py -v
|
|
159
|
+
|
|
160
|
+
# Run with coverage
|
|
161
|
+
pytest tests/ --cov=sparse_kappa
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
## License
|
|
166
|
+
|
|
167
|
+
MIT License
|
|
168
|
+
|
|
169
|
+
## Contributing
|
|
170
|
+
|
|
171
|
+
Contributions welcome! Please submit issues and pull requests on GitHub.
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
## References
|
|
175
|
+
|
|
176
|
+
1. Hager, W. W. (1984). "Condition estimates." SIAM J. Sci. Stat. Comput.
|
|
177
|
+
2. Higham, N. J., & Tisseur, F. (2000). "A block algorithm for matrix 1-norm estimation." SIAM J. Matrix Anal. Appl.
|
|
178
|
+
3. Golub, G. H., & Van Loan, C. F. (2013). "Matrix Computations" (4th ed.)
|
|
179
|
+
4. Saad, Y. (2011). "Numerical Methods for Large Eigenvalue Problems" (2nd ed.)
|
|
180
|
+
|
|
181
|
+
## Citation
|
|
182
|
+
|
|
183
|
+
If you use this library in your research, please cite:
|
|
184
|
+
|
|
185
|
+
```bibtex
|
|
186
|
+
@software{sparse_kappa,
|
|
187
|
+
title={Sparse Matrices Condition Number Estimation on GPUs},
|
|
188
|
+
author={Xinye Chen},
|
|
189
|
+
year={2026},
|
|
190
|
+
url={https://github.com/chenxinye/sparse_kappa}
|
|
191
|
+
}
|
|
192
|
+
```
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
# CuPy Sparse Condition Number Estimation
|
|
2
|
+
|
|
3
|
+
A GPU-accelerated library for estimating condition numbers of sparse matrices using CuPy.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **GPU-Accelerated**: All computations run on NVIDIA GPUs via CuPy
|
|
8
|
+
- **Multiple Norms**: Support for 1-norm and 2-norm condition numbers
|
|
9
|
+
- **Rich Algorithm Suite**:
|
|
10
|
+
- **1-norm**: Hager-Higham algorithm
|
|
11
|
+
- **2-norm**: Power method, Lanczos, Arnoldi, Golub-Kahan bidiagonalization
|
|
12
|
+
- **CuPy integrations**: svds, eigsh, lobpcg wrappers
|
|
13
|
+
- **Automatic Method Selection**: Chooses optimal algorithm based on matrix properties
|
|
14
|
+
- **Memory Efficient**: Designed for large sparse matrices
|
|
15
|
+
|
|
16
|
+
## Installation
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
Simply via pip manager
|
|
20
|
+
```bash
|
|
21
|
+
pip install sparse-kappa
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
git clone https://github.com/chenxinye/sparse-kappa
|
|
26
|
+
pip install cupy-cuda11x # or cupy-cuda12x for CUDA 12
|
|
27
|
+
pip install -e .
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
## Quick Start
|
|
31
|
+
|
|
32
|
+
```python
|
|
33
|
+
import cupy as cp
|
|
34
|
+
import cupyx.scipy.sparse as sp
|
|
35
|
+
from sparse_kappa import cond_estimate
|
|
36
|
+
|
|
37
|
+
# Create sparse matrix on GPU
|
|
38
|
+
A = sp.random(10000, 10000, density=0.01, format='csr')
|
|
39
|
+
|
|
40
|
+
# Estimate condition number (automatic method selection)
|
|
41
|
+
cond = cond_estimate(A)
|
|
42
|
+
print(f"Condition number: {cond:.2e}")
|
|
43
|
+
|
|
44
|
+
# Use specific method
|
|
45
|
+
cond = cond_estimate(A, norm=2, method='lanczos')
|
|
46
|
+
|
|
47
|
+
# Get detailed results
|
|
48
|
+
result = cond_estimate(A, norm=2, method='svds', verbose=True)
|
|
49
|
+
print(f"Method: {result['method']}")
|
|
50
|
+
print(f"Iterations: {result['iterations']}")
|
|
51
|
+
print(f"σ_max: {result['sigma_max']:.4e}")
|
|
52
|
+
print(f"σ_min: {result['sigma_min']:.4e}")
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
## Available Methods
|
|
56
|
+
|
|
57
|
+
### 1-Norm Methods
|
|
58
|
+
|
|
59
|
+
| Method | Description | Best For |
|
|
60
|
+
|--------|-------------|----------|
|
|
61
|
+
| `hager-higham` | Iterative refinement algorithm | General matrices, fast estimation |
|
|
62
|
+
|
|
63
|
+
### 2-Norm Methods
|
|
64
|
+
|
|
65
|
+
| Method | Description | Best For | Complexity |
|
|
66
|
+
|--------|-------------|----------|------------|
|
|
67
|
+
| `svds` | Partial SVD (most accurate) | Small-medium matrices (<5k) | O(k·nnz) |
|
|
68
|
+
| `eigsh` | Symmetric eigenvalue solver | Symmetric matrices | O(k·nnz) |
|
|
69
|
+
| `lobpcg` | Block preconditioned CG | Large matrices | O(k·nnz) |
|
|
70
|
+
| `power` | Power iteration | Quick estimates | O(k·nnz) |
|
|
71
|
+
| `lanczos` | Lanczos tridiagonalization | Medium matrices | O(k²·nnz) |
|
|
72
|
+
| `arnoldi` | Arnoldi iteration | Non-symmetric | O(k²·nnz) |
|
|
73
|
+
| `golub-kahan` | Bidiagonalization | Numerically stable | O(k·nnz) |
|
|
74
|
+
| `auto` | Automatic selection | All cases | - |
|
|
75
|
+
|
|
76
|
+
## Examples
|
|
77
|
+
|
|
78
|
+
### Example 1: Compare Methods
|
|
79
|
+
|
|
80
|
+
```python
|
|
81
|
+
import cupyx.scipy.sparse as sp
|
|
82
|
+
from sparse_kappa import cond_estimate
|
|
83
|
+
|
|
84
|
+
A = sp.random(2000, 2000, density=0.005, format='csr')
|
|
85
|
+
|
|
86
|
+
methods = ['power', 'lanczos', 'svds', 'golub-kahan']
|
|
87
|
+
for method in methods:
|
|
88
|
+
cond = cond_estimate(A, norm=2, method=method)
|
|
89
|
+
print(f"{method:12s}: {cond:.4e}")
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
### Example 2: 1-Norm Estimation
|
|
93
|
+
|
|
94
|
+
```python
|
|
95
|
+
result = cond_estimate(A, norm=1, method='hager-higham', verbose=True)
|
|
96
|
+
print(f"κ₁(A) = {result['condition_number']:.4e}")
|
|
97
|
+
print(f"||A||₁ = {result['norm_A']:.4e}")
|
|
98
|
+
print(f"||A⁻¹||₁ = {result['norm_Ainv']:.4e}")
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
### Example 3: Symmetric Matrix
|
|
102
|
+
|
|
103
|
+
```python
|
|
104
|
+
# Create symmetric matrix
|
|
105
|
+
A = sp.random(1000, 1000, density=0.01, format='csr')
|
|
106
|
+
A = (A + A.T) / 2
|
|
107
|
+
|
|
108
|
+
# Use eigsh (optimized for symmetric)
|
|
109
|
+
cond = cond_estimate(A, norm=2, method='eigsh')
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
## Performance Tips
|
|
113
|
+
|
|
114
|
+
1. **Auto mode is recommended** for first-time usage
|
|
115
|
+
2. **For symmetric matrices**, use `eigsh` or `lanczos`
|
|
116
|
+
3. **For large sparse matrices** (>10k), use `golub-kahan` or `lobpcg`
|
|
117
|
+
4. **For highest accuracy on small matrices**, use `svds`
|
|
118
|
+
5. **Increase `max_iter`** if convergence fails
|
|
119
|
+
|
|
120
|
+
## Testing
|
|
121
|
+
|
|
122
|
+
```bash
|
|
123
|
+
# Run all tests
|
|
124
|
+
pytest tests/ -v
|
|
125
|
+
|
|
126
|
+
# Run specific test file
|
|
127
|
+
pytest tests/test_norm2.py -v
|
|
128
|
+
|
|
129
|
+
# Run with coverage
|
|
130
|
+
pytest tests/ --cov=sparse_kappa
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
## License
|
|
135
|
+
|
|
136
|
+
MIT License
|
|
137
|
+
|
|
138
|
+
## Contributing
|
|
139
|
+
|
|
140
|
+
Contributions welcome! Please submit issues and pull requests on GitHub.
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
## References
|
|
144
|
+
|
|
145
|
+
1. Hager, W. W. (1984). "Condition estimates." SIAM J. Sci. Stat. Comput.
|
|
146
|
+
2. Higham, N. J., & Tisseur, F. (2000). "A block algorithm for matrix 1-norm estimation." SIAM J. Matrix Anal. Appl.
|
|
147
|
+
3. Golub, G. H., & Van Loan, C. F. (2013). "Matrix Computations" (4th ed.)
|
|
148
|
+
4. Saad, Y. (2011). "Numerical Methods for Large Eigenvalue Problems" (2nd ed.)
|
|
149
|
+
|
|
150
|
+
## Citation
|
|
151
|
+
|
|
152
|
+
If you use this library in your research, please cite:
|
|
153
|
+
|
|
154
|
+
```bibtex
|
|
155
|
+
@software{sparse_kappa,
|
|
156
|
+
title={Sparse Matrices Condition Number Estimation on GPUs},
|
|
157
|
+
author={Xinye Chen},
|
|
158
|
+
year={2026},
|
|
159
|
+
url={https://github.com/chenxinye/sparse_kappa}
|
|
160
|
+
}
|
|
161
|
+
```
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=45", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "sparse-kappa"
|
|
7
|
+
version = "0.0.1"
|
|
8
|
+
description = "GPU-accelerated sparse matrix condition number estimation using CuPy"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
authors = [
|
|
11
|
+
{name = "Xinye Chen", email = "xinyechenai@gmail.com"}
|
|
12
|
+
]
|
|
13
|
+
license = {text = "MIT"}
|
|
14
|
+
requires-python = ">=3.8"
|
|
15
|
+
classifiers = [
|
|
16
|
+
"Development Status :: 3 - Alpha",
|
|
17
|
+
"Intended Audience :: Science/Research",
|
|
18
|
+
"Topic :: Scientific/Engineering :: Mathematics",
|
|
19
|
+
"License :: OSI Approved :: MIT License",
|
|
20
|
+
"Programming Language :: Python :: 3",
|
|
21
|
+
"Programming Language :: Python :: 3.8",
|
|
22
|
+
"Programming Language :: Python :: 3.9",
|
|
23
|
+
"Programming Language :: Python :: 3.10",
|
|
24
|
+
"Programming Language :: Python :: 3.11",
|
|
25
|
+
]
|
|
26
|
+
dependencies = [
|
|
27
|
+
"cupy>=10.0.0",
|
|
28
|
+
"numpy>=1.20.0",
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
[project.optional-dependencies]
|
|
32
|
+
dev = [
|
|
33
|
+
"pytest>=6.0",
|
|
34
|
+
"pytest-cov>=2.0",
|
|
35
|
+
"black>=22.0",
|
|
36
|
+
"flake8>=4.0",
|
|
37
|
+
]
|
|
38
|
+
|
|
39
|
+
[project.urls]
|
|
40
|
+
Homepage = "https://github.com/chenxinye/sparse-kappa"
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from setuptools import setup, find_packages
|
|
2
|
+
|
|
3
|
+
with open("README.md", "r", encoding="utf-8") as fh:
|
|
4
|
+
long_description = fh.read()
|
|
5
|
+
|
|
6
|
+
setup(
|
|
7
|
+
name="sparse-kappa",
|
|
8
|
+
version="0.0.1",
|
|
9
|
+
author="Xinye Chen",
|
|
10
|
+
author_email="xinyechenai@gmail.com",
|
|
11
|
+
description="GPU-accelerated sparse matrix condition number estimation using CuPy",
|
|
12
|
+
long_description=long_description,
|
|
13
|
+
long_description_content_type="text/markdown",
|
|
14
|
+
url="https://github.com/chenxinye/sparse-kappa",
|
|
15
|
+
|
|
16
|
+
packages=find_packages(include=["sparse_kappa", "sparse_kappa.*"]),
|
|
17
|
+
package_dir={"": "."},
|
|
18
|
+
|
|
19
|
+
classifiers=[
|
|
20
|
+
"Development Status :: 3 - Alpha",
|
|
21
|
+
"Intended Audience :: Science/Research",
|
|
22
|
+
"Topic :: Scientific/Engineering :: Mathematics",
|
|
23
|
+
"License :: OSI Approved :: MIT License",
|
|
24
|
+
"Programming Language :: Python :: 3",
|
|
25
|
+
"Programming Language :: Python :: 3.8",
|
|
26
|
+
"Programming Language :: Python :: 3.9",
|
|
27
|
+
"Programming Language :: Python :: 3.10",
|
|
28
|
+
"Programming Language :: Python :: 3.11",
|
|
29
|
+
],
|
|
30
|
+
python_requires=">=3.8",
|
|
31
|
+
install_requires=[
|
|
32
|
+
"cupy>=10.0.0",
|
|
33
|
+
"numpy>=1.20.0",
|
|
34
|
+
],
|
|
35
|
+
extras_require={
|
|
36
|
+
"dev": [
|
|
37
|
+
"pytest>=6.0",
|
|
38
|
+
"pytest-cov>=2.0",
|
|
39
|
+
"black>=22.0",
|
|
40
|
+
"flake8>=4.0",
|
|
41
|
+
],
|
|
42
|
+
},
|
|
43
|
+
)
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CuPy Sparse Condition Number Estimation Library
|
|
3
|
+
|
|
4
|
+
A comprehensive GPU-accelerated library for estimating condition numbers
|
|
5
|
+
of sparse matrices using various iterative methods.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .sparse_kappa import cond_estimate, ConditionNumberEstimator
|
|
9
|
+
|
|
10
|
+
__version__ = "0.0.1"
|
|
11
|
+
__all__ = ["cond_estimate", "ConditionNumberEstimator"]
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hager-Higham algorithm for 1-norm condition number estimation.
|
|
3
|
+
|
|
4
|
+
References
|
|
5
|
+
----------
|
|
6
|
+
- Hager, W. W. (1984). Condition estimates. SIAM Journal on
|
|
7
|
+
Scientific and Statistical Computing, 5(2), 311-316.
|
|
8
|
+
- Higham, N. J., & Tisseur, F. (2000). A block algorithm for matrix
|
|
9
|
+
1-norm estimation, with an application to 1-norm pseudospectra.
|
|
10
|
+
SIAM Journal on Matrix Analysis and Applications, 21(4), 1185-1201.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import cupy as cp
|
|
14
|
+
import cupyx.scipy.sparse as sp
|
|
15
|
+
from typing import Dict, Any
|
|
16
|
+
from ..utils import sparse_matrix_norm, apply_inverse, print_iteration
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def hager_higham_norm1(
|
|
20
|
+
A: sp.spmatrix,
|
|
21
|
+
max_iter: int = 5,
|
|
22
|
+
tol: float = 1e-6,
|
|
23
|
+
verbose: bool = False,
|
|
24
|
+
variant: str = 'higham',
|
|
25
|
+
**kwargs
|
|
26
|
+
) -> Dict[str, Any]:
|
|
27
|
+
"""
|
|
28
|
+
Estimate 1-norm condition number using Hager-Higham algorithm.
|
|
29
|
+
|
|
30
|
+
Estimates κ₁(A) = ||A||₁ · ||A⁻¹||₁ using iterative refinement.
|
|
31
|
+
|
|
32
|
+
Parameters
|
|
33
|
+
----------
|
|
34
|
+
A : sparse matrix
|
|
35
|
+
Input matrix (n x n)
|
|
36
|
+
max_iter : int, default=5
|
|
37
|
+
Maximum number of iterations
|
|
38
|
+
tol : float, default=1e-6
|
|
39
|
+
Convergence tolerance
|
|
40
|
+
verbose : bool, default=False
|
|
41
|
+
Print iteration information
|
|
42
|
+
variant : str, default='higham'
|
|
43
|
+
Algorithm variant ('hager' or 'higham')
|
|
44
|
+
|
|
45
|
+
Returns
|
|
46
|
+
-------
|
|
47
|
+
result : dict
|
|
48
|
+
- 'condition_number': estimated κ₁(A)
|
|
49
|
+
- 'norm_A': ||A||₁
|
|
50
|
+
- 'norm_Ainv': ||A⁻¹||₁
|
|
51
|
+
- 'iterations': number of iterations
|
|
52
|
+
- 'converged': convergence status
|
|
53
|
+
|
|
54
|
+
Algorithm
|
|
55
|
+
---------
|
|
56
|
+
1. Compute ||A||₁ exactly (max column sum)
|
|
57
|
+
2. Estimate ||A⁻¹||₁ using iterative refinement:
|
|
58
|
+
- Start with random vector x
|
|
59
|
+
- Iterate: solve A y = x, update x based on sign pattern
|
|
60
|
+
- Stop when estimate converges
|
|
61
|
+
|
|
62
|
+
Complexity: O(k · nnz(A)) where k is number of iterations
|
|
63
|
+
"""
|
|
64
|
+
n = A.shape[0]
|
|
65
|
+
|
|
66
|
+
# Step 1: Compute ||A||₁ exactly
|
|
67
|
+
norm_A = sparse_matrix_norm(A, ord=1)
|
|
68
|
+
|
|
69
|
+
if verbose:
|
|
70
|
+
print(f"Hager-Higham 1-norm condition number estimation")
|
|
71
|
+
print(f"Matrix size: {n} x {n}")
|
|
72
|
+
print(f"||A||₁ = {norm_A:.6e}")
|
|
73
|
+
print(f"Estimating ||A⁻¹||₁...")
|
|
74
|
+
|
|
75
|
+
# Step 2: Estimate ||A⁻¹||₁
|
|
76
|
+
norm_Ainv, iterations, converged = estimate_inverse_norm1(
|
|
77
|
+
A, max_iter, tol, verbose, variant
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
# Step 3: Compute condition number
|
|
81
|
+
cond = norm_A * norm_Ainv
|
|
82
|
+
|
|
83
|
+
if verbose:
|
|
84
|
+
print(f"\n||A⁻¹||₁ = {norm_Ainv:.6e}")
|
|
85
|
+
print(f"κ₁(A) = {cond:.6e}")
|
|
86
|
+
print(f"Converged: {converged} after {iterations} iterations")
|
|
87
|
+
|
|
88
|
+
return {
|
|
89
|
+
'condition_number': float(cond),
|
|
90
|
+
'norm_A': float(norm_A),
|
|
91
|
+
'norm_Ainv': float(norm_Ainv),
|
|
92
|
+
'iterations': iterations,
|
|
93
|
+
'converged': converged,
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def estimate_inverse_norm1(
|
|
98
|
+
A: sp.spmatrix,
|
|
99
|
+
max_iter: int,
|
|
100
|
+
tol: float,
|
|
101
|
+
verbose: bool,
|
|
102
|
+
variant: str
|
|
103
|
+
) -> tuple:
|
|
104
|
+
"""
|
|
105
|
+
Estimate ||A⁻¹||₁ using Hager-Higham algorithm.
|
|
106
|
+
|
|
107
|
+
Returns
|
|
108
|
+
-------
|
|
109
|
+
norm_estimate : float
|
|
110
|
+
Estimated ||A⁻¹||₁
|
|
111
|
+
iterations : int
|
|
112
|
+
Number of iterations
|
|
113
|
+
converged : bool
|
|
114
|
+
Convergence status
|
|
115
|
+
"""
|
|
116
|
+
n = A.shape[0]
|
|
117
|
+
|
|
118
|
+
# Initialize with random vector
|
|
119
|
+
x = cp.ones(n, dtype=A.dtype) / n
|
|
120
|
+
|
|
121
|
+
norm_estimate = 0.0
|
|
122
|
+
converged = False
|
|
123
|
+
|
|
124
|
+
for it in range(max_iter):
|
|
125
|
+
# Solve A^T z = sign(x)
|
|
126
|
+
sign_x = cp.sign(cp.real(x))
|
|
127
|
+
sign_x[sign_x == 0] = 1
|
|
128
|
+
|
|
129
|
+
# Solve A^T z = sign_x
|
|
130
|
+
z = apply_inverse(A.T, sign_x, method='lsmr', atol=1e-10, btol=1e-10)
|
|
131
|
+
|
|
132
|
+
# Find index with maximum |z_i|
|
|
133
|
+
abs_z = cp.abs(z)
|
|
134
|
+
j = cp.argmax(abs_z)
|
|
135
|
+
|
|
136
|
+
# Check convergence
|
|
137
|
+
new_estimate = float(abs_z[j])
|
|
138
|
+
|
|
139
|
+
if verbose:
|
|
140
|
+
print_iteration(it + 1, new_estimate)
|
|
141
|
+
|
|
142
|
+
if it > 0 and abs(new_estimate - norm_estimate) < tol * norm_estimate:
|
|
143
|
+
converged = True
|
|
144
|
+
norm_estimate = new_estimate
|
|
145
|
+
iterations = it + 1
|
|
146
|
+
break
|
|
147
|
+
|
|
148
|
+
norm_estimate = new_estimate
|
|
149
|
+
|
|
150
|
+
# Update x: x = A^{-1} e_j
|
|
151
|
+
e_j = cp.zeros(n, dtype=A.dtype)
|
|
152
|
+
e_j[j] = 1.0
|
|
153
|
+
x = apply_inverse(A, e_j, method='lsmr', atol=1e-10, btol=1e-10)
|
|
154
|
+
|
|
155
|
+
# Compute 1-norm of x
|
|
156
|
+
norm_x = float(cp.sum(cp.abs(x)))
|
|
157
|
+
|
|
158
|
+
if norm_x > norm_estimate:
|
|
159
|
+
norm_estimate = norm_x
|
|
160
|
+
|
|
161
|
+
if not converged:
|
|
162
|
+
iterations = max_iter
|
|
163
|
+
|
|
164
|
+
return norm_estimate, iterations, converged
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def inverse_norm1_direct(A: sp.spmatrix, num_samples: int = 10) -> float:
|
|
168
|
+
"""
|
|
169
|
+
Estimate ||A⁻¹||₁ using random sampling (simpler but less accurate).
|
|
170
|
+
|
|
171
|
+
Parameters
|
|
172
|
+
----------
|
|
173
|
+
A : sparse matrix
|
|
174
|
+
Input matrix
|
|
175
|
+
num_samples : int
|
|
176
|
+
Number of random vectors to sample
|
|
177
|
+
|
|
178
|
+
Returns
|
|
179
|
+
-------
|
|
180
|
+
norm_estimate : float
|
|
181
|
+
Estimated ||A⁻¹||₁
|
|
182
|
+
"""
|
|
183
|
+
n = A.shape[0]
|
|
184
|
+
max_norm = 0.0
|
|
185
|
+
|
|
186
|
+
for _ in range(num_samples):
|
|
187
|
+
# Random unit vector in 1-norm
|
|
188
|
+
b = cp.random.randn(n).astype(A.dtype)
|
|
189
|
+
b /= cp.linalg.norm(b, ord=1)
|
|
190
|
+
|
|
191
|
+
# Solve A x = b
|
|
192
|
+
x = apply_inverse(A, b, method='lsmr')
|
|
193
|
+
|
|
194
|
+
# Compute 1-norm
|
|
195
|
+
norm_x = float(cp.linalg.norm(x, ord=1))
|
|
196
|
+
max_norm = max(max_norm, norm_x)
|
|
197
|
+
|
|
198
|
+
return max_norm
|