passagemath-modules 10.6.31__cp314-cp314-musllinux_1_2_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of passagemath-modules might be problematic. Click here for more details.
- passagemath_modules-10.6.31.dist-info/METADATA +281 -0
- passagemath_modules-10.6.31.dist-info/RECORD +808 -0
- passagemath_modules-10.6.31.dist-info/WHEEL +5 -0
- passagemath_modules-10.6.31.dist-info/top_level.txt +2 -0
- passagemath_modules.libs/libgcc_s-0cd532bd.so.1 +0 -0
- passagemath_modules.libs/libgfortran-2c33b284.so.5.0.0 +0 -0
- passagemath_modules.libs/libgmp-0e7fc84e.so.10.5.0 +0 -0
- passagemath_modules.libs/libgsl-42cda06f.so.28.0.0 +0 -0
- passagemath_modules.libs/libmpc-d8ebe4b5.so.3.3.1 +0 -0
- passagemath_modules.libs/libmpfr-aaecbfc0.so.6.2.1 +0 -0
- passagemath_modules.libs/libopenblasp-r0-905cb27d.3.29.so +0 -0
- passagemath_modules.libs/libquadmath-bb76a5fc.so.0.0.0 +0 -0
- sage/algebras/all__sagemath_modules.py +20 -0
- sage/algebras/catalog.py +148 -0
- sage/algebras/clifford_algebra.py +3107 -0
- sage/algebras/clifford_algebra_element.cpython-314-x86_64-linux-musl.so +0 -0
- sage/algebras/clifford_algebra_element.pxd +16 -0
- sage/algebras/clifford_algebra_element.pyx +997 -0
- sage/algebras/commutative_dga.py +4252 -0
- sage/algebras/exterior_algebra_groebner.cpython-314-x86_64-linux-musl.so +0 -0
- sage/algebras/exterior_algebra_groebner.pxd +55 -0
- sage/algebras/exterior_algebra_groebner.pyx +727 -0
- sage/algebras/finite_dimensional_algebras/all.py +2 -0
- sage/algebras/finite_dimensional_algebras/finite_dimensional_algebra.py +1029 -0
- sage/algebras/finite_dimensional_algebras/finite_dimensional_algebra_element.cpython-314-x86_64-linux-musl.so +0 -0
- sage/algebras/finite_dimensional_algebras/finite_dimensional_algebra_element.pxd +12 -0
- sage/algebras/finite_dimensional_algebras/finite_dimensional_algebra_element.pyx +706 -0
- sage/algebras/finite_dimensional_algebras/finite_dimensional_algebra_ideal.py +196 -0
- sage/algebras/finite_dimensional_algebras/finite_dimensional_algebra_morphism.py +255 -0
- sage/algebras/finite_gca.py +528 -0
- sage/algebras/group_algebra.py +232 -0
- sage/algebras/lie_algebras/abelian.py +197 -0
- sage/algebras/lie_algebras/affine_lie_algebra.py +1213 -0
- sage/algebras/lie_algebras/all.py +25 -0
- sage/algebras/lie_algebras/all__sagemath_modules.py +1 -0
- sage/algebras/lie_algebras/bch.py +177 -0
- sage/algebras/lie_algebras/bgg_dual_module.py +1184 -0
- sage/algebras/lie_algebras/bgg_resolution.py +232 -0
- sage/algebras/lie_algebras/center_uea.py +767 -0
- sage/algebras/lie_algebras/classical_lie_algebra.py +2516 -0
- sage/algebras/lie_algebras/examples.py +683 -0
- sage/algebras/lie_algebras/free_lie_algebra.py +973 -0
- sage/algebras/lie_algebras/heisenberg.py +820 -0
- sage/algebras/lie_algebras/lie_algebra.py +1562 -0
- sage/algebras/lie_algebras/lie_algebra_element.cpython-314-x86_64-linux-musl.so +0 -0
- sage/algebras/lie_algebras/lie_algebra_element.pxd +68 -0
- sage/algebras/lie_algebras/lie_algebra_element.pyx +2122 -0
- sage/algebras/lie_algebras/morphism.py +661 -0
- sage/algebras/lie_algebras/nilpotent_lie_algebra.py +457 -0
- sage/algebras/lie_algebras/onsager.py +1324 -0
- sage/algebras/lie_algebras/poincare_birkhoff_witt.py +816 -0
- sage/algebras/lie_algebras/quotient.py +462 -0
- sage/algebras/lie_algebras/rank_two_heisenberg_virasoro.py +355 -0
- sage/algebras/lie_algebras/representation.py +1040 -0
- sage/algebras/lie_algebras/structure_coefficients.py +459 -0
- sage/algebras/lie_algebras/subalgebra.py +967 -0
- sage/algebras/lie_algebras/symplectic_derivation.py +289 -0
- sage/algebras/lie_algebras/verma_module.py +1630 -0
- sage/algebras/lie_algebras/virasoro.py +1186 -0
- sage/algebras/octonion_algebra.cpython-314-x86_64-linux-musl.so +0 -0
- sage/algebras/octonion_algebra.pxd +20 -0
- sage/algebras/octonion_algebra.pyx +987 -0
- sage/algebras/orlik_solomon.py +907 -0
- sage/algebras/orlik_terao.py +779 -0
- sage/algebras/steenrod/all.py +7 -0
- sage/algebras/steenrod/steenrod_algebra.py +4258 -0
- sage/algebras/steenrod/steenrod_algebra_bases.py +1179 -0
- sage/algebras/steenrod/steenrod_algebra_misc.py +1167 -0
- sage/algebras/steenrod/steenrod_algebra_mult.py +954 -0
- sage/algebras/weyl_algebra.py +1126 -0
- sage/all__sagemath_modules.py +62 -0
- sage/calculus/all__sagemath_modules.py +19 -0
- sage/calculus/expr.py +205 -0
- sage/calculus/integration.cpython-314-x86_64-linux-musl.so +0 -0
- sage/calculus/integration.pyx +698 -0
- sage/calculus/interpolation.cpython-314-x86_64-linux-musl.so +0 -0
- sage/calculus/interpolation.pxd +13 -0
- sage/calculus/interpolation.pyx +387 -0
- sage/calculus/interpolators.cpython-314-x86_64-linux-musl.so +0 -0
- sage/calculus/interpolators.pyx +326 -0
- sage/calculus/ode.cpython-314-x86_64-linux-musl.so +0 -0
- sage/calculus/ode.pxd +5 -0
- sage/calculus/ode.pyx +610 -0
- sage/calculus/riemann.cpython-314-x86_64-linux-musl.so +0 -0
- sage/calculus/riemann.pyx +1521 -0
- sage/calculus/test_sympy.py +201 -0
- sage/calculus/transforms/all.py +7 -0
- sage/calculus/transforms/dft.py +844 -0
- sage/calculus/transforms/dwt.cpython-314-x86_64-linux-musl.so +0 -0
- sage/calculus/transforms/dwt.pxd +7 -0
- sage/calculus/transforms/dwt.pyx +160 -0
- sage/calculus/transforms/fft.cpython-314-x86_64-linux-musl.so +0 -0
- sage/calculus/transforms/fft.pxd +12 -0
- sage/calculus/transforms/fft.pyx +487 -0
- sage/calculus/wester.py +662 -0
- sage/coding/abstract_code.py +1108 -0
- sage/coding/ag_code.py +868 -0
- sage/coding/ag_code_decoders.cpython-314-x86_64-linux-musl.so +0 -0
- sage/coding/ag_code_decoders.pyx +2639 -0
- sage/coding/all.py +15 -0
- sage/coding/bch_code.py +494 -0
- sage/coding/binary_code.cpython-314-x86_64-linux-musl.so +0 -0
- sage/coding/binary_code.pxd +124 -0
- sage/coding/binary_code.pyx +4139 -0
- sage/coding/bounds_catalog.py +43 -0
- sage/coding/channel.py +819 -0
- sage/coding/channels_catalog.py +29 -0
- sage/coding/code_bounds.py +755 -0
- sage/coding/code_constructions.py +804 -0
- sage/coding/codes_catalog.py +111 -0
- sage/coding/cyclic_code.py +1329 -0
- sage/coding/databases.py +316 -0
- sage/coding/decoder.py +373 -0
- sage/coding/decoders_catalog.py +88 -0
- sage/coding/delsarte_bounds.py +709 -0
- sage/coding/encoder.py +390 -0
- sage/coding/encoders_catalog.py +64 -0
- sage/coding/extended_code.py +468 -0
- sage/coding/gabidulin_code.py +1058 -0
- sage/coding/golay_code.py +404 -0
- sage/coding/goppa_code.py +441 -0
- sage/coding/grs_code.py +2371 -0
- sage/coding/guava.py +107 -0
- sage/coding/guruswami_sudan/all.py +1 -0
- sage/coding/guruswami_sudan/gs_decoder.py +897 -0
- sage/coding/guruswami_sudan/interpolation.py +409 -0
- sage/coding/guruswami_sudan/utils.py +176 -0
- sage/coding/hamming_code.py +176 -0
- sage/coding/information_set_decoder.py +1032 -0
- sage/coding/kasami_codes.cpython-314-x86_64-linux-musl.so +0 -0
- sage/coding/kasami_codes.pyx +351 -0
- sage/coding/linear_code.py +3067 -0
- sage/coding/linear_code_no_metric.py +1354 -0
- sage/coding/linear_rank_metric.py +961 -0
- sage/coding/parity_check_code.py +353 -0
- sage/coding/punctured_code.py +719 -0
- sage/coding/reed_muller_code.py +999 -0
- sage/coding/self_dual_codes.py +942 -0
- sage/coding/source_coding/all.py +2 -0
- sage/coding/source_coding/huffman.py +553 -0
- sage/coding/subfield_subcode.py +423 -0
- sage/coding/two_weight_db.py +399 -0
- sage/combinat/all__sagemath_modules.py +7 -0
- sage/combinat/cartesian_product.py +347 -0
- sage/combinat/family.py +11 -0
- sage/combinat/free_module.py +1977 -0
- sage/combinat/root_system/all.py +147 -0
- sage/combinat/root_system/ambient_space.py +527 -0
- sage/combinat/root_system/associahedron.py +471 -0
- sage/combinat/root_system/braid_move_calculator.py +143 -0
- sage/combinat/root_system/braid_orbit.cpython-314-x86_64-linux-musl.so +0 -0
- sage/combinat/root_system/braid_orbit.pyx +144 -0
- sage/combinat/root_system/branching_rules.py +2301 -0
- sage/combinat/root_system/cartan_matrix.py +1245 -0
- sage/combinat/root_system/cartan_type.py +3069 -0
- sage/combinat/root_system/coxeter_group.py +162 -0
- sage/combinat/root_system/coxeter_matrix.py +1261 -0
- sage/combinat/root_system/coxeter_type.py +681 -0
- sage/combinat/root_system/dynkin_diagram.py +900 -0
- sage/combinat/root_system/extended_affine_weyl_group.py +2993 -0
- sage/combinat/root_system/fundamental_group.py +795 -0
- sage/combinat/root_system/hecke_algebra_representation.py +1203 -0
- sage/combinat/root_system/integrable_representations.py +1227 -0
- sage/combinat/root_system/non_symmetric_macdonald_polynomials.py +1965 -0
- sage/combinat/root_system/pieri_factors.py +1147 -0
- sage/combinat/root_system/plot.py +1615 -0
- sage/combinat/root_system/root_lattice_realization_algebras.py +1214 -0
- sage/combinat/root_system/root_lattice_realizations.py +4628 -0
- sage/combinat/root_system/root_space.py +487 -0
- sage/combinat/root_system/root_system.py +882 -0
- sage/combinat/root_system/type_A.py +348 -0
- sage/combinat/root_system/type_A_affine.py +227 -0
- sage/combinat/root_system/type_A_infinity.py +241 -0
- sage/combinat/root_system/type_B.py +347 -0
- sage/combinat/root_system/type_BC_affine.py +287 -0
- sage/combinat/root_system/type_B_affine.py +216 -0
- sage/combinat/root_system/type_C.py +317 -0
- sage/combinat/root_system/type_C_affine.py +188 -0
- sage/combinat/root_system/type_D.py +357 -0
- sage/combinat/root_system/type_D_affine.py +208 -0
- sage/combinat/root_system/type_E.py +641 -0
- sage/combinat/root_system/type_E_affine.py +231 -0
- sage/combinat/root_system/type_F.py +387 -0
- sage/combinat/root_system/type_F_affine.py +137 -0
- sage/combinat/root_system/type_G.py +293 -0
- sage/combinat/root_system/type_G_affine.py +132 -0
- sage/combinat/root_system/type_H.py +105 -0
- sage/combinat/root_system/type_I.py +110 -0
- sage/combinat/root_system/type_Q.py +150 -0
- sage/combinat/root_system/type_affine.py +509 -0
- sage/combinat/root_system/type_dual.py +704 -0
- sage/combinat/root_system/type_folded.py +301 -0
- sage/combinat/root_system/type_marked.py +748 -0
- sage/combinat/root_system/type_reducible.py +601 -0
- sage/combinat/root_system/type_relabel.py +730 -0
- sage/combinat/root_system/type_super_A.py +837 -0
- sage/combinat/root_system/weight_lattice_realizations.py +1188 -0
- sage/combinat/root_system/weight_space.py +639 -0
- sage/combinat/root_system/weyl_characters.py +2238 -0
- sage/crypto/__init__.py +4 -0
- sage/crypto/all.py +28 -0
- sage/crypto/block_cipher/all.py +7 -0
- sage/crypto/block_cipher/des.py +1065 -0
- sage/crypto/block_cipher/miniaes.py +2171 -0
- sage/crypto/block_cipher/present.py +909 -0
- sage/crypto/block_cipher/sdes.py +1527 -0
- sage/crypto/boolean_function.cpython-314-x86_64-linux-musl.so +0 -0
- sage/crypto/boolean_function.pxd +10 -0
- sage/crypto/boolean_function.pyx +1487 -0
- sage/crypto/cipher.py +78 -0
- sage/crypto/classical.py +3668 -0
- sage/crypto/classical_cipher.py +569 -0
- sage/crypto/cryptosystem.py +387 -0
- sage/crypto/key_exchange/all.py +7 -0
- sage/crypto/key_exchange/catalog.py +24 -0
- sage/crypto/key_exchange/diffie_hellman.py +323 -0
- sage/crypto/key_exchange/key_exchange_scheme.py +107 -0
- sage/crypto/lattice.py +312 -0
- sage/crypto/lfsr.py +295 -0
- sage/crypto/lwe.py +840 -0
- sage/crypto/mq/__init__.py +4 -0
- sage/crypto/mq/mpolynomialsystemgenerator.py +204 -0
- sage/crypto/mq/rijndael_gf.py +2345 -0
- sage/crypto/mq/sbox.py +7 -0
- sage/crypto/mq/sr.py +3344 -0
- sage/crypto/public_key/all.py +5 -0
- sage/crypto/public_key/blum_goldwasser.py +776 -0
- sage/crypto/sbox.cpython-314-x86_64-linux-musl.so +0 -0
- sage/crypto/sbox.pyx +2090 -0
- sage/crypto/sboxes.py +2090 -0
- sage/crypto/stream.py +390 -0
- sage/crypto/stream_cipher.py +297 -0
- sage/crypto/util.py +519 -0
- sage/ext/all__sagemath_modules.py +1 -0
- sage/ext/interpreters/__init__.py +1 -0
- sage/ext/interpreters/all__sagemath_modules.py +2 -0
- sage/ext/interpreters/wrapper_cc.cpython-314-x86_64-linux-musl.so +0 -0
- sage/ext/interpreters/wrapper_cc.pxd +30 -0
- sage/ext/interpreters/wrapper_cc.pyx +252 -0
- sage/ext/interpreters/wrapper_cdf.cpython-314-x86_64-linux-musl.so +0 -0
- sage/ext/interpreters/wrapper_cdf.pxd +26 -0
- sage/ext/interpreters/wrapper_cdf.pyx +245 -0
- sage/ext/interpreters/wrapper_rdf.cpython-314-x86_64-linux-musl.so +0 -0
- sage/ext/interpreters/wrapper_rdf.pxd +23 -0
- sage/ext/interpreters/wrapper_rdf.pyx +221 -0
- sage/ext/interpreters/wrapper_rr.cpython-314-x86_64-linux-musl.so +0 -0
- sage/ext/interpreters/wrapper_rr.pxd +28 -0
- sage/ext/interpreters/wrapper_rr.pyx +335 -0
- sage/geometry/all__sagemath_modules.py +5 -0
- sage/geometry/toric_lattice.py +1745 -0
- sage/geometry/toric_lattice_element.cpython-314-x86_64-linux-musl.so +0 -0
- sage/geometry/toric_lattice_element.pyx +432 -0
- sage/groups/abelian_gps/abelian_group.py +1925 -0
- sage/groups/abelian_gps/abelian_group_element.py +164 -0
- sage/groups/abelian_gps/all__sagemath_modules.py +5 -0
- sage/groups/abelian_gps/dual_abelian_group.py +421 -0
- sage/groups/abelian_gps/dual_abelian_group_element.py +179 -0
- sage/groups/abelian_gps/element_base.py +341 -0
- sage/groups/abelian_gps/values.py +488 -0
- sage/groups/additive_abelian/additive_abelian_group.py +476 -0
- sage/groups/additive_abelian/additive_abelian_wrapper.py +857 -0
- sage/groups/additive_abelian/all.py +4 -0
- sage/groups/additive_abelian/qmodnz.py +231 -0
- sage/groups/additive_abelian/qmodnz_element.py +349 -0
- sage/groups/affine_gps/affine_group.py +535 -0
- sage/groups/affine_gps/all.py +1 -0
- sage/groups/affine_gps/catalog.py +17 -0
- sage/groups/affine_gps/euclidean_group.py +246 -0
- sage/groups/affine_gps/group_element.py +562 -0
- sage/groups/all__sagemath_modules.py +12 -0
- sage/groups/galois_group.py +479 -0
- sage/groups/matrix_gps/all.py +4 -0
- sage/groups/matrix_gps/all__sagemath_modules.py +13 -0
- sage/groups/matrix_gps/catalog.py +26 -0
- sage/groups/matrix_gps/coxeter_group.py +927 -0
- sage/groups/matrix_gps/finitely_generated.py +487 -0
- sage/groups/matrix_gps/group_element.cpython-314-x86_64-linux-musl.so +0 -0
- sage/groups/matrix_gps/group_element.pxd +11 -0
- sage/groups/matrix_gps/group_element.pyx +431 -0
- sage/groups/matrix_gps/linear.py +440 -0
- sage/groups/matrix_gps/matrix_group.py +617 -0
- sage/groups/matrix_gps/named_group.py +296 -0
- sage/groups/matrix_gps/orthogonal.py +544 -0
- sage/groups/matrix_gps/symplectic.py +251 -0
- sage/groups/matrix_gps/unitary.py +436 -0
- sage/groups/misc_gps/all__sagemath_modules.py +1 -0
- sage/groups/misc_gps/argument_groups.py +1905 -0
- sage/groups/misc_gps/imaginary_groups.py +479 -0
- sage/groups/perm_gps/all__sagemath_modules.py +1 -0
- sage/groups/perm_gps/partn_ref/all__sagemath_modules.py +1 -0
- sage/groups/perm_gps/partn_ref/refinement_binary.cpython-314-x86_64-linux-musl.so +0 -0
- sage/groups/perm_gps/partn_ref/refinement_binary.pxd +41 -0
- sage/groups/perm_gps/partn_ref/refinement_binary.pyx +1167 -0
- sage/groups/perm_gps/partn_ref/refinement_matrices.cpython-314-x86_64-linux-musl.so +0 -0
- sage/groups/perm_gps/partn_ref/refinement_matrices.pxd +31 -0
- sage/groups/perm_gps/partn_ref/refinement_matrices.pyx +385 -0
- sage/homology/algebraic_topological_model.py +595 -0
- sage/homology/all.py +2 -0
- sage/homology/all__sagemath_modules.py +8 -0
- sage/homology/chain_complex.py +2148 -0
- sage/homology/chain_complex_homspace.py +165 -0
- sage/homology/chain_complex_morphism.py +629 -0
- sage/homology/chain_homotopy.py +604 -0
- sage/homology/chains.py +653 -0
- sage/homology/free_resolution.py +923 -0
- sage/homology/graded_resolution.py +567 -0
- sage/homology/hochschild_complex.py +756 -0
- sage/homology/homology_group.py +188 -0
- sage/homology/homology_morphism.py +422 -0
- sage/homology/homology_vector_space_with_basis.py +1454 -0
- sage/homology/koszul_complex.py +169 -0
- sage/homology/matrix_utils.py +205 -0
- sage/libs/all__sagemath_modules.py +1 -0
- sage/libs/gsl/__init__.py +1 -0
- sage/libs/gsl/airy.pxd +56 -0
- sage/libs/gsl/all.pxd +66 -0
- sage/libs/gsl/array.cpython-314-x86_64-linux-musl.so +0 -0
- sage/libs/gsl/array.pxd +5 -0
- sage/libs/gsl/array.pyx +102 -0
- sage/libs/gsl/bessel.pxd +208 -0
- sage/libs/gsl/blas.pxd +116 -0
- sage/libs/gsl/blas_types.pxd +34 -0
- sage/libs/gsl/block.pxd +52 -0
- sage/libs/gsl/chebyshev.pxd +37 -0
- sage/libs/gsl/clausen.pxd +12 -0
- sage/libs/gsl/combination.pxd +47 -0
- sage/libs/gsl/complex.pxd +151 -0
- sage/libs/gsl/coulomb.pxd +30 -0
- sage/libs/gsl/coupling.pxd +21 -0
- sage/libs/gsl/dawson.pxd +12 -0
- sage/libs/gsl/debye.pxd +24 -0
- sage/libs/gsl/dilog.pxd +14 -0
- sage/libs/gsl/eigen.pxd +46 -0
- sage/libs/gsl/elementary.pxd +12 -0
- sage/libs/gsl/ellint.pxd +48 -0
- sage/libs/gsl/elljac.pxd +8 -0
- sage/libs/gsl/erf.pxd +32 -0
- sage/libs/gsl/errno.pxd +26 -0
- sage/libs/gsl/exp.pxd +44 -0
- sage/libs/gsl/expint.pxd +44 -0
- sage/libs/gsl/fermi_dirac.pxd +44 -0
- sage/libs/gsl/fft.pxd +121 -0
- sage/libs/gsl/fit.pxd +50 -0
- sage/libs/gsl/gamma.pxd +94 -0
- sage/libs/gsl/gegenbauer.pxd +26 -0
- sage/libs/gsl/histogram.pxd +176 -0
- sage/libs/gsl/hyperg.pxd +52 -0
- sage/libs/gsl/integration.pxd +69 -0
- sage/libs/gsl/interp.pxd +109 -0
- sage/libs/gsl/laguerre.pxd +24 -0
- sage/libs/gsl/lambert.pxd +16 -0
- sage/libs/gsl/legendre.pxd +90 -0
- sage/libs/gsl/linalg.pxd +185 -0
- sage/libs/gsl/log.pxd +26 -0
- sage/libs/gsl/math.pxd +43 -0
- sage/libs/gsl/matrix.pxd +143 -0
- sage/libs/gsl/matrix_complex.pxd +130 -0
- sage/libs/gsl/min.pxd +67 -0
- sage/libs/gsl/monte.pxd +56 -0
- sage/libs/gsl/ntuple.pxd +32 -0
- sage/libs/gsl/odeiv.pxd +70 -0
- sage/libs/gsl/permutation.pxd +78 -0
- sage/libs/gsl/poly.pxd +40 -0
- sage/libs/gsl/pow_int.pxd +12 -0
- sage/libs/gsl/psi.pxd +28 -0
- sage/libs/gsl/qrng.pxd +29 -0
- sage/libs/gsl/random.pxd +257 -0
- sage/libs/gsl/rng.pxd +100 -0
- sage/libs/gsl/roots.pxd +72 -0
- sage/libs/gsl/sort.pxd +36 -0
- sage/libs/gsl/statistics.pxd +59 -0
- sage/libs/gsl/sum.pxd +55 -0
- sage/libs/gsl/synchrotron.pxd +16 -0
- sage/libs/gsl/transport.pxd +24 -0
- sage/libs/gsl/trig.pxd +58 -0
- sage/libs/gsl/types.pxd +137 -0
- sage/libs/gsl/vector.pxd +101 -0
- sage/libs/gsl/vector_complex.pxd +83 -0
- sage/libs/gsl/wavelet.pxd +49 -0
- sage/libs/gsl/zeta.pxd +28 -0
- sage/libs/mpc/__init__.pxd +114 -0
- sage/libs/mpc/types.pxd +28 -0
- sage/libs/mpfr/__init__.pxd +299 -0
- sage/libs/mpfr/types.pxd +26 -0
- sage/libs/mpmath/__init__.py +1 -0
- sage/libs/mpmath/all.py +27 -0
- sage/libs/mpmath/all__sagemath_modules.py +1 -0
- sage/libs/mpmath/utils.cpython-314-x86_64-linux-musl.so +0 -0
- sage/libs/mpmath/utils.pxd +4 -0
- sage/libs/mpmath/utils.pyx +319 -0
- sage/matrix/action.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/action.pxd +26 -0
- sage/matrix/action.pyx +596 -0
- sage/matrix/all.py +9 -0
- sage/matrix/args.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/args.pxd +144 -0
- sage/matrix/args.pyx +1668 -0
- sage/matrix/benchmark.py +1258 -0
- sage/matrix/berlekamp_massey.py +95 -0
- sage/matrix/compute_J_ideal.py +926 -0
- sage/matrix/constructor.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/constructor.pyx +750 -0
- sage/matrix/docs.py +430 -0
- sage/matrix/echelon_matrix.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/echelon_matrix.pyx +155 -0
- sage/matrix/matrix.pxd +2 -0
- sage/matrix/matrix0.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix0.pxd +68 -0
- sage/matrix/matrix0.pyx +6324 -0
- sage/matrix/matrix1.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix1.pxd +8 -0
- sage/matrix/matrix1.pyx +2851 -0
- sage/matrix/matrix2.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix2.pxd +25 -0
- sage/matrix/matrix2.pyx +20181 -0
- sage/matrix/matrix_cdv.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_cdv.pxd +4 -0
- sage/matrix/matrix_cdv.pyx +93 -0
- sage/matrix/matrix_complex_double_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_complex_double_dense.pxd +5 -0
- sage/matrix/matrix_complex_double_dense.pyx +98 -0
- sage/matrix/matrix_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_dense.pxd +5 -0
- sage/matrix/matrix_dense.pyx +343 -0
- sage/matrix/matrix_domain_dense.pxd +5 -0
- sage/matrix/matrix_domain_sparse.pxd +5 -0
- sage/matrix/matrix_double_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_double_dense.pxd +7 -0
- sage/matrix/matrix_double_dense.pyx +3906 -0
- sage/matrix/matrix_double_sparse.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_double_sparse.pxd +6 -0
- sage/matrix/matrix_double_sparse.pyx +248 -0
- sage/matrix/matrix_generic_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_generic_dense.pxd +7 -0
- sage/matrix/matrix_generic_dense.pyx +354 -0
- sage/matrix/matrix_generic_sparse.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_generic_sparse.pxd +7 -0
- sage/matrix/matrix_generic_sparse.pyx +461 -0
- sage/matrix/matrix_laurent_mpolynomial_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_laurent_mpolynomial_dense.pxd +5 -0
- sage/matrix/matrix_laurent_mpolynomial_dense.pyx +115 -0
- sage/matrix/matrix_misc.py +313 -0
- sage/matrix/matrix_numpy_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_numpy_dense.pxd +14 -0
- sage/matrix/matrix_numpy_dense.pyx +450 -0
- sage/matrix/matrix_numpy_integer_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_numpy_integer_dense.pxd +7 -0
- sage/matrix/matrix_numpy_integer_dense.pyx +59 -0
- sage/matrix/matrix_polynomial_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_polynomial_dense.pxd +5 -0
- sage/matrix/matrix_polynomial_dense.pyx +5341 -0
- sage/matrix/matrix_real_double_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_real_double_dense.pxd +7 -0
- sage/matrix/matrix_real_double_dense.pyx +122 -0
- sage/matrix/matrix_space.py +2848 -0
- sage/matrix/matrix_sparse.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_sparse.pxd +5 -0
- sage/matrix/matrix_sparse.pyx +1222 -0
- sage/matrix/matrix_window.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/matrix_window.pxd +37 -0
- sage/matrix/matrix_window.pyx +242 -0
- sage/matrix/misc_mpfr.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/misc_mpfr.pyx +80 -0
- sage/matrix/operation_table.py +1182 -0
- sage/matrix/special.py +3666 -0
- sage/matrix/strassen.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matrix/strassen.pyx +851 -0
- sage/matrix/symplectic_basis.py +541 -0
- sage/matrix/template.pxd +6 -0
- sage/matrix/tests.py +71 -0
- sage/matroids/advanced.py +77 -0
- sage/matroids/all.py +13 -0
- sage/matroids/basis_exchange_matroid.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/basis_exchange_matroid.pxd +96 -0
- sage/matroids/basis_exchange_matroid.pyx +2344 -0
- sage/matroids/basis_matroid.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/basis_matroid.pxd +45 -0
- sage/matroids/basis_matroid.pyx +1217 -0
- sage/matroids/catalog.py +44 -0
- sage/matroids/chow_ring.py +473 -0
- sage/matroids/chow_ring_ideal.py +849 -0
- sage/matroids/circuit_closures_matroid.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/circuit_closures_matroid.pxd +16 -0
- sage/matroids/circuit_closures_matroid.pyx +559 -0
- sage/matroids/circuits_matroid.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/circuits_matroid.pxd +38 -0
- sage/matroids/circuits_matroid.pyx +947 -0
- sage/matroids/constructor.py +1086 -0
- sage/matroids/database_collections.py +365 -0
- sage/matroids/database_matroids.py +5338 -0
- sage/matroids/dual_matroid.py +583 -0
- sage/matroids/extension.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/extension.pxd +34 -0
- sage/matroids/extension.pyx +519 -0
- sage/matroids/flats_matroid.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/flats_matroid.pxd +28 -0
- sage/matroids/flats_matroid.pyx +715 -0
- sage/matroids/gammoid.py +600 -0
- sage/matroids/graphic_matroid.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/graphic_matroid.pxd +39 -0
- sage/matroids/graphic_matroid.pyx +2024 -0
- sage/matroids/lean_matrix.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/lean_matrix.pxd +126 -0
- sage/matroids/lean_matrix.pyx +3667 -0
- sage/matroids/linear_matroid.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/linear_matroid.pxd +180 -0
- sage/matroids/linear_matroid.pyx +6649 -0
- sage/matroids/matroid.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/matroid.pxd +243 -0
- sage/matroids/matroid.pyx +8759 -0
- sage/matroids/matroids_catalog.py +190 -0
- sage/matroids/matroids_plot_helpers.py +890 -0
- sage/matroids/minor_matroid.py +480 -0
- sage/matroids/minorfix.h +9 -0
- sage/matroids/named_matroids.py +5 -0
- sage/matroids/rank_matroid.py +268 -0
- sage/matroids/set_system.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/set_system.pxd +38 -0
- sage/matroids/set_system.pyx +800 -0
- sage/matroids/transversal_matroid.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/transversal_matroid.pxd +14 -0
- sage/matroids/transversal_matroid.pyx +893 -0
- sage/matroids/union_matroid.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/union_matroid.pxd +20 -0
- sage/matroids/union_matroid.pyx +331 -0
- sage/matroids/unpickling.cpython-314-x86_64-linux-musl.so +0 -0
- sage/matroids/unpickling.pyx +843 -0
- sage/matroids/utilities.py +809 -0
- sage/misc/all__sagemath_modules.py +20 -0
- sage/misc/c3.cpython-314-x86_64-linux-musl.so +0 -0
- sage/misc/c3.pyx +238 -0
- sage/misc/compat.py +87 -0
- sage/misc/element_with_label.py +173 -0
- sage/misc/func_persist.py +79 -0
- sage/misc/pickle_old.cpython-314-x86_64-linux-musl.so +0 -0
- sage/misc/pickle_old.pyx +19 -0
- sage/misc/proof.py +7 -0
- sage/misc/replace_dot_all.py +472 -0
- sage/misc/sagedoc_conf.py +168 -0
- sage/misc/sphinxify.py +167 -0
- sage/misc/test_class_pickling.py +85 -0
- sage/modules/all.py +42 -0
- sage/modules/complex_double_vector.py +25 -0
- sage/modules/diamond_cutting.py +380 -0
- sage/modules/fg_pid/all.py +1 -0
- sage/modules/fg_pid/fgp_element.py +456 -0
- sage/modules/fg_pid/fgp_module.py +2091 -0
- sage/modules/fg_pid/fgp_morphism.py +550 -0
- sage/modules/filtered_vector_space.py +1271 -0
- sage/modules/finite_submodule_iter.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/finite_submodule_iter.pxd +27 -0
- sage/modules/finite_submodule_iter.pyx +452 -0
- sage/modules/fp_graded/all.py +1 -0
- sage/modules/fp_graded/element.py +346 -0
- sage/modules/fp_graded/free_element.py +298 -0
- sage/modules/fp_graded/free_homspace.py +53 -0
- sage/modules/fp_graded/free_module.py +1060 -0
- sage/modules/fp_graded/free_morphism.py +217 -0
- sage/modules/fp_graded/homspace.py +563 -0
- sage/modules/fp_graded/module.py +1340 -0
- sage/modules/fp_graded/morphism.py +1990 -0
- sage/modules/fp_graded/steenrod/all.py +1 -0
- sage/modules/fp_graded/steenrod/homspace.py +65 -0
- sage/modules/fp_graded/steenrod/module.py +477 -0
- sage/modules/fp_graded/steenrod/morphism.py +404 -0
- sage/modules/fp_graded/steenrod/profile.py +241 -0
- sage/modules/free_module.py +8447 -0
- sage/modules/free_module_element.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/free_module_element.pxd +22 -0
- sage/modules/free_module_element.pyx +5445 -0
- sage/modules/free_module_homspace.py +369 -0
- sage/modules/free_module_integer.py +896 -0
- sage/modules/free_module_morphism.py +823 -0
- sage/modules/free_module_pseudohomspace.py +352 -0
- sage/modules/free_module_pseudomorphism.py +578 -0
- sage/modules/free_quadratic_module.py +1706 -0
- sage/modules/free_quadratic_module_integer_symmetric.py +1790 -0
- sage/modules/matrix_morphism.py +1745 -0
- sage/modules/misc.py +103 -0
- sage/modules/module_functors.py +192 -0
- sage/modules/multi_filtered_vector_space.py +719 -0
- sage/modules/ore_module.py +2208 -0
- sage/modules/ore_module_element.py +178 -0
- sage/modules/ore_module_homspace.py +147 -0
- sage/modules/ore_module_morphism.py +968 -0
- sage/modules/quotient_module.py +699 -0
- sage/modules/real_double_vector.py +22 -0
- sage/modules/submodule.py +255 -0
- sage/modules/tensor_operations.py +567 -0
- sage/modules/torsion_quadratic_module.py +1352 -0
- sage/modules/tutorial_free_modules.py +248 -0
- sage/modules/vector_complex_double_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/vector_complex_double_dense.pxd +6 -0
- sage/modules/vector_complex_double_dense.pyx +117 -0
- sage/modules/vector_double_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/vector_double_dense.pxd +6 -0
- sage/modules/vector_double_dense.pyx +604 -0
- sage/modules/vector_integer_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/vector_integer_dense.pxd +15 -0
- sage/modules/vector_integer_dense.pyx +361 -0
- sage/modules/vector_integer_sparse.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/vector_integer_sparse.pxd +29 -0
- sage/modules/vector_integer_sparse.pyx +406 -0
- sage/modules/vector_modn_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/vector_modn_dense.pxd +12 -0
- sage/modules/vector_modn_dense.pyx +394 -0
- sage/modules/vector_modn_sparse.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/vector_modn_sparse.pxd +21 -0
- sage/modules/vector_modn_sparse.pyx +298 -0
- sage/modules/vector_numpy_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/vector_numpy_dense.pxd +15 -0
- sage/modules/vector_numpy_dense.pyx +304 -0
- sage/modules/vector_numpy_integer_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/vector_numpy_integer_dense.pxd +7 -0
- sage/modules/vector_numpy_integer_dense.pyx +54 -0
- sage/modules/vector_rational_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/vector_rational_dense.pxd +15 -0
- sage/modules/vector_rational_dense.pyx +387 -0
- sage/modules/vector_rational_sparse.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/vector_rational_sparse.pxd +30 -0
- sage/modules/vector_rational_sparse.pyx +413 -0
- sage/modules/vector_real_double_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/vector_real_double_dense.pxd +6 -0
- sage/modules/vector_real_double_dense.pyx +126 -0
- sage/modules/vector_space_homspace.py +430 -0
- sage/modules/vector_space_morphism.py +989 -0
- sage/modules/with_basis/all.py +15 -0
- sage/modules/with_basis/cell_module.py +494 -0
- sage/modules/with_basis/indexed_element.cpython-314-x86_64-linux-musl.so +0 -0
- sage/modules/with_basis/indexed_element.pxd +13 -0
- sage/modules/with_basis/indexed_element.pyx +1058 -0
- sage/modules/with_basis/invariant.py +1075 -0
- sage/modules/with_basis/morphism.py +1636 -0
- sage/modules/with_basis/representation.py +2939 -0
- sage/modules/with_basis/subquotient.py +685 -0
- sage/numerical/all__sagemath_modules.py +6 -0
- sage/numerical/gauss_legendre.cpython-314-x86_64-linux-musl.so +0 -0
- sage/numerical/gauss_legendre.pyx +381 -0
- sage/numerical/optimize.py +910 -0
- sage/probability/all.py +10 -0
- sage/probability/probability_distribution.cpython-314-x86_64-linux-musl.so +0 -0
- sage/probability/probability_distribution.pyx +1242 -0
- sage/probability/random_variable.py +411 -0
- sage/quadratic_forms/all.py +4 -0
- sage/quadratic_forms/all__sagemath_modules.py +15 -0
- sage/quadratic_forms/binary_qf.py +2042 -0
- sage/quadratic_forms/bqf_class_group.py +748 -0
- sage/quadratic_forms/constructions.py +93 -0
- sage/quadratic_forms/count_local_2.cpython-314-x86_64-linux-musl.so +0 -0
- sage/quadratic_forms/count_local_2.pyx +365 -0
- sage/quadratic_forms/extras.py +195 -0
- sage/quadratic_forms/quadratic_form.py +1753 -0
- sage/quadratic_forms/quadratic_form__count_local_2.py +221 -0
- sage/quadratic_forms/quadratic_form__equivalence_testing.py +708 -0
- sage/quadratic_forms/quadratic_form__evaluate.cpython-314-x86_64-linux-musl.so +0 -0
- sage/quadratic_forms/quadratic_form__evaluate.pyx +139 -0
- sage/quadratic_forms/quadratic_form__local_density_congruence.py +977 -0
- sage/quadratic_forms/quadratic_form__local_field_invariants.py +1072 -0
- sage/quadratic_forms/quadratic_form__neighbors.py +424 -0
- sage/quadratic_forms/quadratic_form__reduction_theory.py +488 -0
- sage/quadratic_forms/quadratic_form__split_local_covering.py +416 -0
- sage/quadratic_forms/quadratic_form__ternary_Tornaria.py +657 -0
- sage/quadratic_forms/quadratic_form__theta.py +352 -0
- sage/quadratic_forms/quadratic_form__variable_substitutions.py +370 -0
- sage/quadratic_forms/random_quadraticform.py +209 -0
- sage/quadratic_forms/ternary.cpython-314-x86_64-linux-musl.so +0 -0
- sage/quadratic_forms/ternary.pyx +1154 -0
- sage/quadratic_forms/ternary_qf.py +2027 -0
- sage/rings/all__sagemath_modules.py +28 -0
- sage/rings/asymptotic/all__sagemath_modules.py +1 -0
- sage/rings/asymptotic/misc.py +1252 -0
- sage/rings/cc.py +4 -0
- sage/rings/cfinite_sequence.py +1306 -0
- sage/rings/complex_conversion.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/complex_conversion.pxd +8 -0
- sage/rings/complex_conversion.pyx +23 -0
- sage/rings/complex_double.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/complex_double.pxd +21 -0
- sage/rings/complex_double.pyx +2654 -0
- sage/rings/complex_mpc.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/complex_mpc.pxd +21 -0
- sage/rings/complex_mpc.pyx +2576 -0
- sage/rings/complex_mpfr.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/complex_mpfr.pxd +18 -0
- sage/rings/complex_mpfr.pyx +3602 -0
- sage/rings/derivation.py +2334 -0
- sage/rings/finite_rings/all__sagemath_modules.py +1 -0
- sage/rings/finite_rings/maps_finite_field.py +191 -0
- sage/rings/function_field/all__sagemath_modules.py +8 -0
- sage/rings/function_field/derivations.py +102 -0
- sage/rings/function_field/derivations_rational.py +132 -0
- sage/rings/function_field/differential.py +853 -0
- sage/rings/function_field/divisor.py +1107 -0
- sage/rings/function_field/drinfeld_modules/action.py +199 -0
- sage/rings/function_field/drinfeld_modules/all.py +1 -0
- sage/rings/function_field/drinfeld_modules/charzero_drinfeld_module.py +673 -0
- sage/rings/function_field/drinfeld_modules/drinfeld_module.py +2087 -0
- sage/rings/function_field/drinfeld_modules/finite_drinfeld_module.py +1131 -0
- sage/rings/function_field/drinfeld_modules/homset.py +420 -0
- sage/rings/function_field/drinfeld_modules/morphism.py +820 -0
- sage/rings/function_field/hermite_form_polynomial.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/function_field/hermite_form_polynomial.pyx +188 -0
- sage/rings/function_field/khuri_makdisi.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/function_field/khuri_makdisi.pyx +935 -0
- sage/rings/invariants/all.py +4 -0
- sage/rings/invariants/invariant_theory.py +4597 -0
- sage/rings/invariants/reconstruction.py +395 -0
- sage/rings/polynomial/all__sagemath_modules.py +17 -0
- sage/rings/polynomial/integer_valued_polynomials.py +1230 -0
- sage/rings/polynomial/laurent_polynomial_mpair.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/polynomial/laurent_polynomial_mpair.pxd +15 -0
- sage/rings/polynomial/laurent_polynomial_mpair.pyx +2023 -0
- sage/rings/polynomial/ore_function_element.py +952 -0
- sage/rings/polynomial/ore_function_field.py +1028 -0
- sage/rings/polynomial/ore_polynomial_element.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/polynomial/ore_polynomial_element.pxd +48 -0
- sage/rings/polynomial/ore_polynomial_element.pyx +3145 -0
- sage/rings/polynomial/ore_polynomial_ring.py +1334 -0
- sage/rings/polynomial/polynomial_real_mpfr_dense.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/polynomial/polynomial_real_mpfr_dense.pyx +788 -0
- sage/rings/polynomial/q_integer_valued_polynomials.py +1264 -0
- sage/rings/polynomial/skew_polynomial_element.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/polynomial/skew_polynomial_element.pxd +9 -0
- sage/rings/polynomial/skew_polynomial_element.pyx +684 -0
- sage/rings/polynomial/skew_polynomial_finite_field.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/polynomial/skew_polynomial_finite_field.pxd +19 -0
- sage/rings/polynomial/skew_polynomial_finite_field.pyx +1093 -0
- sage/rings/polynomial/skew_polynomial_finite_order.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/polynomial/skew_polynomial_finite_order.pxd +10 -0
- sage/rings/polynomial/skew_polynomial_finite_order.pyx +567 -0
- sage/rings/polynomial/skew_polynomial_ring.py +908 -0
- sage/rings/real_double_element_gsl.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/real_double_element_gsl.pxd +8 -0
- sage/rings/real_double_element_gsl.pyx +794 -0
- sage/rings/real_field.py +58 -0
- sage/rings/real_mpfr.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/real_mpfr.pxd +29 -0
- sage/rings/real_mpfr.pyx +6122 -0
- sage/rings/ring_extension.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/ring_extension.pxd +42 -0
- sage/rings/ring_extension.pyx +2779 -0
- sage/rings/ring_extension_conversion.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/ring_extension_conversion.pxd +16 -0
- sage/rings/ring_extension_conversion.pyx +462 -0
- sage/rings/ring_extension_element.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/ring_extension_element.pxd +21 -0
- sage/rings/ring_extension_element.pyx +1635 -0
- sage/rings/ring_extension_homset.py +64 -0
- sage/rings/ring_extension_morphism.cpython-314-x86_64-linux-musl.so +0 -0
- sage/rings/ring_extension_morphism.pxd +35 -0
- sage/rings/ring_extension_morphism.pyx +920 -0
- sage/schemes/all__sagemath_modules.py +1 -0
- sage/schemes/projective/all__sagemath_modules.py +1 -0
- sage/schemes/projective/coherent_sheaf.py +300 -0
- sage/schemes/projective/cohomology.py +510 -0
- sage/stats/all.py +15 -0
- sage/stats/basic_stats.py +489 -0
- sage/stats/distributions/all.py +7 -0
- sage/stats/distributions/catalog.py +34 -0
- sage/stats/distributions/dgs.h +50 -0
- sage/stats/distributions/dgs.pxd +111 -0
- sage/stats/distributions/dgs_bern.h +400 -0
- sage/stats/distributions/dgs_gauss.h +614 -0
- sage/stats/distributions/dgs_misc.h +104 -0
- sage/stats/distributions/discrete_gaussian_integer.cpython-314-x86_64-linux-musl.so +0 -0
- sage/stats/distributions/discrete_gaussian_integer.pxd +14 -0
- sage/stats/distributions/discrete_gaussian_integer.pyx +498 -0
- sage/stats/distributions/discrete_gaussian_lattice.py +908 -0
- sage/stats/distributions/discrete_gaussian_polynomial.py +141 -0
- sage/stats/hmm/all.py +15 -0
- sage/stats/hmm/chmm.cpython-314-x86_64-linux-musl.so +0 -0
- sage/stats/hmm/chmm.pyx +1595 -0
- sage/stats/hmm/distributions.cpython-314-x86_64-linux-musl.so +0 -0
- sage/stats/hmm/distributions.pxd +29 -0
- sage/stats/hmm/distributions.pyx +531 -0
- sage/stats/hmm/hmm.cpython-314-x86_64-linux-musl.so +0 -0
- sage/stats/hmm/hmm.pxd +17 -0
- sage/stats/hmm/hmm.pyx +1388 -0
- sage/stats/hmm/util.cpython-314-x86_64-linux-musl.so +0 -0
- sage/stats/hmm/util.pxd +7 -0
- sage/stats/hmm/util.pyx +165 -0
- sage/stats/intlist.cpython-314-x86_64-linux-musl.so +0 -0
- sage/stats/intlist.pxd +14 -0
- sage/stats/intlist.pyx +588 -0
- sage/stats/r.py +49 -0
- sage/stats/time_series.cpython-314-x86_64-linux-musl.so +0 -0
- sage/stats/time_series.pxd +6 -0
- sage/stats/time_series.pyx +2546 -0
- sage/tensor/all.py +2 -0
- sage/tensor/modules/all.py +8 -0
- sage/tensor/modules/alternating_contr_tensor.py +761 -0
- sage/tensor/modules/comp.py +5598 -0
- sage/tensor/modules/ext_pow_free_module.py +824 -0
- sage/tensor/modules/finite_rank_free_module.py +3589 -0
- sage/tensor/modules/format_utilities.py +333 -0
- sage/tensor/modules/free_module_alt_form.py +858 -0
- sage/tensor/modules/free_module_automorphism.py +1207 -0
- sage/tensor/modules/free_module_basis.py +1074 -0
- sage/tensor/modules/free_module_element.py +284 -0
- sage/tensor/modules/free_module_homset.py +652 -0
- sage/tensor/modules/free_module_linear_group.py +564 -0
- sage/tensor/modules/free_module_morphism.py +1581 -0
- sage/tensor/modules/free_module_tensor.py +3289 -0
- sage/tensor/modules/reflexive_module.py +386 -0
- sage/tensor/modules/tensor_free_module.py +780 -0
- sage/tensor/modules/tensor_free_submodule.py +538 -0
- sage/tensor/modules/tensor_free_submodule_basis.py +140 -0
- sage/tensor/modules/tensor_with_indices.py +1043 -0
|
@@ -0,0 +1,3906 @@
|
|
|
1
|
+
# sage_setup: distribution = sagemath-modules
|
|
2
|
+
# sage.doctest: optional - numpy
|
|
3
|
+
"""
|
|
4
|
+
Dense matrices using a NumPy backend
|
|
5
|
+
|
|
6
|
+
This serves as a base class for dense matrices over
|
|
7
|
+
Real Double Field and Complex Double Field.
|
|
8
|
+
|
|
9
|
+
AUTHORS:
|
|
10
|
+
|
|
11
|
+
- Jason Grout, Sep 2008: switch to NumPy backend, factored out the Matrix_double_dense class
|
|
12
|
+
|
|
13
|
+
- Josh Kantor
|
|
14
|
+
|
|
15
|
+
- William Stein: many bug fixes and touch ups.
|
|
16
|
+
|
|
17
|
+
EXAMPLES::
|
|
18
|
+
|
|
19
|
+
sage: b = Mat(RDF,2,3).basis()
|
|
20
|
+
sage: b[0,0]
|
|
21
|
+
[1.0 0.0 0.0]
|
|
22
|
+
[0.0 0.0 0.0]
|
|
23
|
+
|
|
24
|
+
We deal with the case of zero rows or zero columns::
|
|
25
|
+
|
|
26
|
+
sage: m = MatrixSpace(RDF,0,3)
|
|
27
|
+
sage: m.zero_matrix()
|
|
28
|
+
[]
|
|
29
|
+
|
|
30
|
+
TESTS::
|
|
31
|
+
|
|
32
|
+
sage: a = matrix(RDF,2,range(4), sparse=False)
|
|
33
|
+
sage: TestSuite(a).run()
|
|
34
|
+
sage: a = matrix(CDF,2,range(4), sparse=False)
|
|
35
|
+
sage: TestSuite(a).run()
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
# ****************************************************************************
|
|
39
|
+
# Copyright (C) 2004-2006 Joshua Kantor <kantor.jm@gmail.com>
|
|
40
|
+
# Copyright (C) 2008 Georg S. Weber
|
|
41
|
+
# Copyright (C) 2008-2011 Mike Hansen
|
|
42
|
+
# Copyright (C) 2008-2012 Jason Grout
|
|
43
|
+
# Copyright (C) 2009 Dag Sverre Seljebotn
|
|
44
|
+
# Copyright (C) 2009 Yann Laigle-Chapuy
|
|
45
|
+
# Copyright (C) 2009-2010 Florent Hivert
|
|
46
|
+
# Copyright (C) 2010-2012 Rob Beezer
|
|
47
|
+
# Copyright (C) 2011 Martin Raum
|
|
48
|
+
# Copyright (C) 2011-2012 J. H. Palmieri
|
|
49
|
+
# Copyright (C) 2011-2014 André Apitzsch
|
|
50
|
+
# Copyright (C) 2011-2018 Jeroen Demeyer
|
|
51
|
+
# Copyright (C) 2012 Kenneth Smith
|
|
52
|
+
# Copyright (C) 2016-2019 Frédéric Chapoton
|
|
53
|
+
# Copyright (C) 2017 Kiran Kedlaya
|
|
54
|
+
# Copyright (C) 2019 Chaman Agrawal
|
|
55
|
+
# Copyright (C) 2019-2021 Markus Wageringel
|
|
56
|
+
# Copyright (C) 2020 Michael Orlitzky
|
|
57
|
+
# Copyright (C) 2020 Victor Santos
|
|
58
|
+
# Copyright (C) 2021 Jonathan Kliem
|
|
59
|
+
# Copyright (C) 2021 Travis Scrimshaw
|
|
60
|
+
#
|
|
61
|
+
# Distributed under the terms of the GNU General Public License (GPL)
|
|
62
|
+
# as published by the Free Software Foundation; either version 2 of
|
|
63
|
+
# the License, or (at your option) any later version.
|
|
64
|
+
# https://www.gnu.org/licenses/
|
|
65
|
+
# ****************************************************************************
|
|
66
|
+
|
|
67
|
+
import math
|
|
68
|
+
|
|
69
|
+
import sage.rings.real_double
|
|
70
|
+
import sage.rings.complex_double
|
|
71
|
+
|
|
72
|
+
from sage.structure.element cimport Vector
|
|
73
|
+
from sage.matrix.constructor import matrix
|
|
74
|
+
cimport sage.structure.element
|
|
75
|
+
|
|
76
|
+
cimport numpy as cnumpy
|
|
77
|
+
|
|
78
|
+
numpy = None
|
|
79
|
+
scipy = None
|
|
80
|
+
|
|
81
|
+
# This is for the Numpy C API to work
|
|
82
|
+
cnumpy.import_array()
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
cdef class Matrix_double_dense(Matrix_numpy_dense):
|
|
86
|
+
"""
|
|
87
|
+
Base class for matrices over the Real Double Field and the Complex
|
|
88
|
+
Double Field. These are supposed to be fast matrix operations
|
|
89
|
+
using C doubles. Most operations are implemented using numpy which
|
|
90
|
+
will call the underlying BLAS on the system.
|
|
91
|
+
|
|
92
|
+
This class cannot be instantiated on its own. The numpy matrix
|
|
93
|
+
creation depends on several variables that are set in the
|
|
94
|
+
subclasses.
|
|
95
|
+
|
|
96
|
+
EXAMPLES::
|
|
97
|
+
|
|
98
|
+
sage: m = Matrix(RDF, [[1,2],[3,4]])
|
|
99
|
+
sage: m**2
|
|
100
|
+
[ 7.0 10.0]
|
|
101
|
+
[15.0 22.0]
|
|
102
|
+
sage: m^(-1) # rel tol 1e-15 # needs scipy
|
|
103
|
+
[-1.9999999999999996 0.9999999999999998]
|
|
104
|
+
[ 1.4999999999999998 -0.4999999999999999]
|
|
105
|
+
|
|
106
|
+
TESTS:
|
|
107
|
+
|
|
108
|
+
Test hashing::
|
|
109
|
+
|
|
110
|
+
sage: A = matrix(RDF, 3, range(1,10))
|
|
111
|
+
sage: hash(A)
|
|
112
|
+
Traceback (most recent call last):
|
|
113
|
+
...
|
|
114
|
+
TypeError: mutable matrices are unhashable
|
|
115
|
+
sage: A.set_immutable()
|
|
116
|
+
sage: hash(A)
|
|
117
|
+
6694819972852100501 # 64-bit
|
|
118
|
+
1829383573 # 32-bit
|
|
119
|
+
sage: A = matrix(CDF, 3, range(1,10))
|
|
120
|
+
sage: hash(A)
|
|
121
|
+
Traceback (most recent call last):
|
|
122
|
+
...
|
|
123
|
+
TypeError: mutable matrices are unhashable
|
|
124
|
+
sage: A.set_immutable()
|
|
125
|
+
sage: hash(A)
|
|
126
|
+
6694819972852100501 # 64-bit
|
|
127
|
+
1829383573 # 32-bit
|
|
128
|
+
"""
|
|
129
|
+
|
|
130
|
+
def LU_valid(self):
|
|
131
|
+
r"""
|
|
132
|
+
Return ``True`` if the LU form of this matrix has
|
|
133
|
+
already been computed.
|
|
134
|
+
|
|
135
|
+
EXAMPLES::
|
|
136
|
+
|
|
137
|
+
sage: # needs scipy
|
|
138
|
+
sage: A = random_matrix(RDF, 3); A.LU_valid()
|
|
139
|
+
False
|
|
140
|
+
sage: P, L, U = A.LU()
|
|
141
|
+
sage: A.LU_valid()
|
|
142
|
+
True
|
|
143
|
+
"""
|
|
144
|
+
return self.fetch('PLU_factors') is not None
|
|
145
|
+
|
|
146
|
+
########################################################################
|
|
147
|
+
# LEVEL 2 functionality
|
|
148
|
+
# * def _pickle
|
|
149
|
+
# * def _unpickle
|
|
150
|
+
cpdef _add_(self, right):
|
|
151
|
+
"""
|
|
152
|
+
Add two matrices together.
|
|
153
|
+
|
|
154
|
+
EXAMPLES::
|
|
155
|
+
|
|
156
|
+
sage: A = matrix(RDF,3,range(1,10))
|
|
157
|
+
sage: A+A
|
|
158
|
+
[ 2.0 4.0 6.0]
|
|
159
|
+
[ 8.0 10.0 12.0]
|
|
160
|
+
[14.0 16.0 18.0]
|
|
161
|
+
"""
|
|
162
|
+
if self._nrows == 0 or self._ncols == 0:
|
|
163
|
+
return self.__copy__()
|
|
164
|
+
|
|
165
|
+
cdef Matrix_double_dense M, _right, _left
|
|
166
|
+
_right = right
|
|
167
|
+
_left = self
|
|
168
|
+
|
|
169
|
+
M = self._new()
|
|
170
|
+
M._matrix_numpy = _left._matrix_numpy + _right._matrix_numpy
|
|
171
|
+
return M
|
|
172
|
+
|
|
173
|
+
cpdef _sub_(self, right):
|
|
174
|
+
"""
|
|
175
|
+
Return ``self - right``.
|
|
176
|
+
|
|
177
|
+
EXAMPLES::
|
|
178
|
+
|
|
179
|
+
sage: A = matrix(RDF,3,range(1,10))
|
|
180
|
+
sage: (A-A).is_zero()
|
|
181
|
+
True
|
|
182
|
+
"""
|
|
183
|
+
if self._nrows == 0 or self._ncols == 0:
|
|
184
|
+
return self.__copy__()
|
|
185
|
+
|
|
186
|
+
cdef Matrix_double_dense M,_right,_left
|
|
187
|
+
_right = right
|
|
188
|
+
_left = self
|
|
189
|
+
|
|
190
|
+
M = self._new()
|
|
191
|
+
M._matrix_numpy = _left._matrix_numpy - _right._matrix_numpy
|
|
192
|
+
return M
|
|
193
|
+
|
|
194
|
+
def __neg__(self):
|
|
195
|
+
"""
|
|
196
|
+
Negate this matrix.
|
|
197
|
+
|
|
198
|
+
EXAMPLES::
|
|
199
|
+
|
|
200
|
+
sage: A = matrix(RDF,3,range(1,10))
|
|
201
|
+
sage: -A
|
|
202
|
+
[-1.0 -2.0 -3.0]
|
|
203
|
+
[-4.0 -5.0 -6.0]
|
|
204
|
+
[-7.0 -8.0 -9.0]
|
|
205
|
+
sage: B = -A ; (A+B).is_zero()
|
|
206
|
+
True
|
|
207
|
+
"""
|
|
208
|
+
if self._nrows == 0 or self._ncols == 0:
|
|
209
|
+
return self.__copy__()
|
|
210
|
+
|
|
211
|
+
cdef Matrix_double_dense M
|
|
212
|
+
M = self._new()
|
|
213
|
+
M._matrix_numpy = -self._matrix_numpy
|
|
214
|
+
return M
|
|
215
|
+
|
|
216
|
+
# x * __copy__
|
|
217
|
+
# * _list -- list of underlying elements (need not be a copy)
|
|
218
|
+
# * _dict -- sparse dictionary of underlying elements (need not be a copy)
|
|
219
|
+
########################################################################
|
|
220
|
+
# def _pickle(self): #unsure how to implement
|
|
221
|
+
# def _unpickle(self, data, int version): # use version >= 0 #unsure how to implement
|
|
222
|
+
######################################################################
|
|
223
|
+
cdef sage.structure.element.Matrix _matrix_times_matrix_(self, sage.structure.element.Matrix right):
|
|
224
|
+
r"""
|
|
225
|
+
Multiply ``self * right`` as matrices.
|
|
226
|
+
|
|
227
|
+
EXAMPLES::
|
|
228
|
+
|
|
229
|
+
sage: A = matrix(RDF,3,range(1,10))
|
|
230
|
+
sage: B = matrix(RDF,3,range(1,13))
|
|
231
|
+
sage: A*B
|
|
232
|
+
[ 38.0 44.0 50.0 56.0]
|
|
233
|
+
[ 83.0 98.0 113.0 128.0]
|
|
234
|
+
[128.0 152.0 176.0 200.0]
|
|
235
|
+
|
|
236
|
+
TESTS:
|
|
237
|
+
|
|
238
|
+
Check that :issue:`31234` is fixed::
|
|
239
|
+
|
|
240
|
+
sage: matrix.identity(QQ, 4) * matrix(RDF, 4, 0)
|
|
241
|
+
[]
|
|
242
|
+
|
|
243
|
+
Check that an empty matrix is initialized correctly; see :issue:`27366`:
|
|
244
|
+
|
|
245
|
+
sage: A = matrix(RDF, 3, 0)
|
|
246
|
+
sage: A*A.transpose()
|
|
247
|
+
[0.0 0.0 0.0]
|
|
248
|
+
[0.0 0.0 0.0]
|
|
249
|
+
[0.0 0.0 0.0]
|
|
250
|
+
"""
|
|
251
|
+
if self._ncols != right._nrows:
|
|
252
|
+
raise IndexError("Number of columns of self must equal number of rows of right")
|
|
253
|
+
|
|
254
|
+
cdef Matrix_double_dense M, _right, _left
|
|
255
|
+
|
|
256
|
+
if self._nrows == 0 or self._ncols == 0 or right._nrows == 0 or right._ncols == 0:
|
|
257
|
+
M = self._new(self._nrows, right._ncols)
|
|
258
|
+
M._matrix_numpy.fill(0)
|
|
259
|
+
return M
|
|
260
|
+
|
|
261
|
+
M = self._new(self._nrows, right._ncols)
|
|
262
|
+
_right = right
|
|
263
|
+
_left = self
|
|
264
|
+
global numpy
|
|
265
|
+
if numpy is None:
|
|
266
|
+
import numpy
|
|
267
|
+
|
|
268
|
+
M._matrix_numpy = numpy.dot(_left._matrix_numpy, _right._matrix_numpy)
|
|
269
|
+
return M
|
|
270
|
+
|
|
271
|
+
def __invert__(self):
|
|
272
|
+
"""
|
|
273
|
+
Invert this matrix.
|
|
274
|
+
|
|
275
|
+
EXAMPLES::
|
|
276
|
+
|
|
277
|
+
sage: # needs scipy
|
|
278
|
+
sage: A = Matrix(RDF, [[10, 0], [0, 100]])
|
|
279
|
+
sage: (~A).det()
|
|
280
|
+
0.001
|
|
281
|
+
|
|
282
|
+
sage: # needs scipy
|
|
283
|
+
sage: A = matrix(RDF, 3, [2,3,5,7,8,9,11,13,17]); A
|
|
284
|
+
[ 2.0 3.0 5.0]
|
|
285
|
+
[ 7.0 8.0 9.0]
|
|
286
|
+
[11.0 13.0 17.0]
|
|
287
|
+
sage: ~A # tol 1e-14
|
|
288
|
+
[-2.7142857142857184 -2.000000000000004 1.8571428571428603]
|
|
289
|
+
[ 2.857142857142863 3.000000000000006 -2.4285714285714333]
|
|
290
|
+
[-0.4285714285714305 -1.000000000000002 0.7142857142857159]
|
|
291
|
+
|
|
292
|
+
Note that if this matrix is (nearly) singular, finding
|
|
293
|
+
its inverse will not help much and will give slightly different
|
|
294
|
+
answers on similar platforms depending on the hardware
|
|
295
|
+
and other factors::
|
|
296
|
+
|
|
297
|
+
sage: A = matrix(RDF,3,range(1,10));A
|
|
298
|
+
[1.0 2.0 3.0]
|
|
299
|
+
[4.0 5.0 6.0]
|
|
300
|
+
[7.0 8.0 9.0]
|
|
301
|
+
|
|
302
|
+
sage: A.determinant() < 10e-12 # needs scipy
|
|
303
|
+
True
|
|
304
|
+
|
|
305
|
+
TESTS::
|
|
306
|
+
|
|
307
|
+
sage: # needs scipy
|
|
308
|
+
sage: ~Matrix(RDF, 0,0)
|
|
309
|
+
[]
|
|
310
|
+
sage: ~Matrix(RDF, 0,3)
|
|
311
|
+
Traceback (most recent call last):
|
|
312
|
+
...
|
|
313
|
+
ArithmeticError: self must be a square matrix
|
|
314
|
+
"""
|
|
315
|
+
# see github issue 4502 --- there is an issue with the "#random" pragma that needs to be fixed
|
|
316
|
+
# as for the mathematical side, scipy v0.7 is expected to fix the invertibility failures
|
|
317
|
+
#
|
|
318
|
+
# sage: A = Matrix(RDF, [[1, 0], [0, 0]])
|
|
319
|
+
# sage: A.inverse().det() # random - on some computers, this will be invertible due to numerical error.
|
|
320
|
+
# Traceback (most recent call last):
|
|
321
|
+
# ...
|
|
322
|
+
# LinAlgError: singular matrix
|
|
323
|
+
# sage: A = matrix(RDF,3,range(1,10));A
|
|
324
|
+
# [1.0 2.0 3.0]
|
|
325
|
+
# [4.0 5.0 6.0]
|
|
326
|
+
# [7.0 8.0 9.0]
|
|
327
|
+
#
|
|
328
|
+
# sage: A.determinant() < 10e-12
|
|
329
|
+
# True
|
|
330
|
+
# sage: ~A # random - on some computers, this will be invertible due to numerical error.
|
|
331
|
+
# Traceback (most recent call last):
|
|
332
|
+
# ...
|
|
333
|
+
# ZeroDivisionError: singular matrix
|
|
334
|
+
#
|
|
335
|
+
if self._nrows != self._ncols:
|
|
336
|
+
raise ArithmeticError("self must be a square matrix")
|
|
337
|
+
if self._nrows == 0 and self._ncols == 0:
|
|
338
|
+
return self.__copy__()
|
|
339
|
+
|
|
340
|
+
# Maybe we should cache the (P)LU decomposition and use scipy.lu_solve?
|
|
341
|
+
cdef Matrix_double_dense M
|
|
342
|
+
M = self._new()
|
|
343
|
+
global scipy
|
|
344
|
+
if scipy is None:
|
|
345
|
+
import scipy
|
|
346
|
+
import scipy.linalg
|
|
347
|
+
from numpy.linalg import LinAlgError
|
|
348
|
+
try: # Standard error reporting for Sage.
|
|
349
|
+
M._matrix_numpy = scipy.linalg.inv(self._matrix_numpy)
|
|
350
|
+
except LinAlgError:
|
|
351
|
+
raise ZeroDivisionError("input matrix must be nonsingular")
|
|
352
|
+
return M
|
|
353
|
+
|
|
354
|
+
# def _list(self):
|
|
355
|
+
# def _dict(self):
|
|
356
|
+
|
|
357
|
+
########################################################################
|
|
358
|
+
# LEVEL 3 functionality (Optional)
|
|
359
|
+
# * cdef _sub_
|
|
360
|
+
# * __deepcopy__
|
|
361
|
+
# * __invert__
|
|
362
|
+
# * Matrix windows -- only if you need strassen for that base
|
|
363
|
+
# * Other functions (list them here):
|
|
364
|
+
#
|
|
365
|
+
# compute_LU(self)
|
|
366
|
+
#
|
|
367
|
+
########################################################################
|
|
368
|
+
|
|
369
|
+
def condition(self, p='frob'):
|
|
370
|
+
r"""
|
|
371
|
+
Return the condition number of a square nonsingular matrix.
|
|
372
|
+
|
|
373
|
+
Roughly speaking, this is a measure of how sensitive
|
|
374
|
+
the matrix is to round-off errors in numerical computations.
|
|
375
|
+
The minimum possible value is 1.0, and larger numbers indicate
|
|
376
|
+
greater sensitivity.
|
|
377
|
+
|
|
378
|
+
INPUT:
|
|
379
|
+
|
|
380
|
+
- ``p`` -- (default: ``'frob'``) controls which norm is used
|
|
381
|
+
to compute the condition number, allowable values are
|
|
382
|
+
'frob' (for the Frobenius norm), integers -2, -1, 1, 2,
|
|
383
|
+
positive and negative infinity. See output discussion
|
|
384
|
+
for specifics.
|
|
385
|
+
|
|
386
|
+
OUTPUT:
|
|
387
|
+
|
|
388
|
+
The condition number of a matrix is the product of a norm
|
|
389
|
+
of the matrix times the norm of the inverse of the matrix.
|
|
390
|
+
This requires that the matrix be square and invertible
|
|
391
|
+
(nonsingular, full rank).
|
|
392
|
+
|
|
393
|
+
Returned value is a double precision floating point value
|
|
394
|
+
in ``RDF``, or ``Infinity``. Row and column sums described below are
|
|
395
|
+
sums of the absolute values of the entries, where the
|
|
396
|
+
absolute value of the complex number `a+bi` is `\sqrt{a^2+b^2}`.
|
|
397
|
+
Singular values are the "diagonal" entries of the "S" matrix in
|
|
398
|
+
the singular value decomposition.
|
|
399
|
+
|
|
400
|
+
- ``p = 'frob'``: the default norm employed in computing
|
|
401
|
+
the condition number, the Frobenius norm, which for a
|
|
402
|
+
matrix `A=(a_{ij})` computes
|
|
403
|
+
|
|
404
|
+
.. MATH::
|
|
405
|
+
|
|
406
|
+
\left(\sum_{i,j}\left\lvert{a_{i,j}}\right\rvert^2\right)^{1/2}
|
|
407
|
+
|
|
408
|
+
- ``p = 'sv'``: the quotient of the maximal and minimal singular value.
|
|
409
|
+
- ``p = Infinity`` or ``p = oo``: the maximum row sum.
|
|
410
|
+
- ``p = -Infinity`` or ``p = -oo``: the minimum column sum.
|
|
411
|
+
- ``p = 1``: the maximum column sum.
|
|
412
|
+
- ``p = -1``: the minimum column sum.
|
|
413
|
+
- ``p = 2``: the 2-norm, equal to the maximum singular value.
|
|
414
|
+
- ``p = -2``: the minimum singular value.
|
|
415
|
+
|
|
416
|
+
ALGORITHM:
|
|
417
|
+
|
|
418
|
+
Computation is performed by the ``cond()`` function of
|
|
419
|
+
the SciPy/NumPy library.
|
|
420
|
+
|
|
421
|
+
EXAMPLES:
|
|
422
|
+
|
|
423
|
+
First over the reals. ::
|
|
424
|
+
|
|
425
|
+
sage: A = matrix(RDF, 4, [(1/4)*x^3 for x in range(16)]); A
|
|
426
|
+
[ 0.0 0.25 2.0 6.75]
|
|
427
|
+
[ 16.0 31.25 54.0 85.75]
|
|
428
|
+
[ 128.0 182.25 250.0 332.75]
|
|
429
|
+
[ 432.0 549.25 686.0 843.75]
|
|
430
|
+
sage: A.condition()
|
|
431
|
+
9923.88955...
|
|
432
|
+
sage: A.condition(p='frob')
|
|
433
|
+
9923.88955...
|
|
434
|
+
sage: A.condition(p=Infinity) # tol 3e-14
|
|
435
|
+
22738.50000000045
|
|
436
|
+
sage: A.condition(p=-Infinity) # tol 2e-14
|
|
437
|
+
17.50000000000028
|
|
438
|
+
sage: A.condition(p=1)
|
|
439
|
+
12139.21...
|
|
440
|
+
sage: A.condition(p=-1) # tol 2e-14
|
|
441
|
+
550.0000000000093
|
|
442
|
+
sage: A.condition(p=2)
|
|
443
|
+
9897.8088...
|
|
444
|
+
sage: A.condition(p=-2)
|
|
445
|
+
0.000101032462...
|
|
446
|
+
|
|
447
|
+
And over the complex numbers. ::
|
|
448
|
+
|
|
449
|
+
sage: # needs sage.symbolic
|
|
450
|
+
sage: B = matrix(CDF, 3, [x + x^2*I for x in range(9)]); B
|
|
451
|
+
[ 0.0 1.0 + 1.0*I 2.0 + 4.0*I]
|
|
452
|
+
[ 3.0 + 9.0*I 4.0 + 16.0*I 5.0 + 25.0*I]
|
|
453
|
+
[6.0 + 36.0*I 7.0 + 49.0*I 8.0 + 64.0*I]
|
|
454
|
+
sage: B.condition()
|
|
455
|
+
203.851798...
|
|
456
|
+
sage: B.condition(p='frob')
|
|
457
|
+
203.851798...
|
|
458
|
+
sage: B.condition(p=Infinity)
|
|
459
|
+
369.55630...
|
|
460
|
+
sage: B.condition(p=-Infinity)
|
|
461
|
+
5.46112969...
|
|
462
|
+
sage: B.condition(p=1)
|
|
463
|
+
289.251481...
|
|
464
|
+
sage: B.condition(p=-1)
|
|
465
|
+
20.4566639...
|
|
466
|
+
sage: B.condition(p=2)
|
|
467
|
+
202.653543...
|
|
468
|
+
sage: B.condition(p=-2)
|
|
469
|
+
0.00493453005...
|
|
470
|
+
|
|
471
|
+
Hilbert matrices are famously ill-conditioned, while
|
|
472
|
+
an identity matrix can hit the minimum with the right norm. ::
|
|
473
|
+
|
|
474
|
+
sage: A = matrix(RDF, 10, [1/(i+j+1) for i in range(10) for j in range(10)])
|
|
475
|
+
sage: A.condition() # tol 2e-4
|
|
476
|
+
16332197709146.014
|
|
477
|
+
sage: id = identity_matrix(CDF, 10)
|
|
478
|
+
sage: id.condition(p=1)
|
|
479
|
+
1.0
|
|
480
|
+
|
|
481
|
+
Return values are in `RDF`. ::
|
|
482
|
+
|
|
483
|
+
sage: A = matrix(CDF, 2, range(1,5))
|
|
484
|
+
sage: A.condition() in RDF
|
|
485
|
+
True
|
|
486
|
+
|
|
487
|
+
Rectangular and singular matrices raise errors if p is not 'sv'. ::
|
|
488
|
+
|
|
489
|
+
sage: A = matrix(RDF, 2, 3, range(6))
|
|
490
|
+
sage: A.condition()
|
|
491
|
+
Traceback (most recent call last):
|
|
492
|
+
...
|
|
493
|
+
TypeError: matrix must be square if p is not 'sv', not 2 x 3
|
|
494
|
+
|
|
495
|
+
sage: A.condition('sv')
|
|
496
|
+
7.34...
|
|
497
|
+
|
|
498
|
+
sage: A = matrix(QQ, 5, range(25))
|
|
499
|
+
sage: A.is_singular()
|
|
500
|
+
True
|
|
501
|
+
sage: B = A.change_ring(CDF)
|
|
502
|
+
sage: B.condition()
|
|
503
|
+
+Infinity
|
|
504
|
+
|
|
505
|
+
Improper values of ``p`` are caught. ::
|
|
506
|
+
|
|
507
|
+
sage: A = matrix(CDF, 2, range(1,5))
|
|
508
|
+
sage: A.condition(p='bogus')
|
|
509
|
+
Traceback (most recent call last):
|
|
510
|
+
...
|
|
511
|
+
ValueError: condition number 'p' must be +/- infinity, 'frob', 'sv' or an integer, not bogus
|
|
512
|
+
sage: A.condition(p=632)
|
|
513
|
+
Traceback (most recent call last):
|
|
514
|
+
...
|
|
515
|
+
ValueError: condition number integer values of 'p' must be -2, -1, 1 or 2, not 632
|
|
516
|
+
|
|
517
|
+
TESTS:
|
|
518
|
+
|
|
519
|
+
Some condition numbers, first by the definition which also exercises
|
|
520
|
+
:meth:`norm`, then by this method. ::
|
|
521
|
+
|
|
522
|
+
sage: # needs scipy
|
|
523
|
+
sage: A = matrix(CDF, [[1,2,4],[5,3,9],[7,8,6]])
|
|
524
|
+
sage: c = A.norm(2)*A.inverse().norm(2)
|
|
525
|
+
sage: d = A.condition(2)
|
|
526
|
+
sage: abs(c-d) < 1.0e-12
|
|
527
|
+
True
|
|
528
|
+
sage: c = A.norm(1)*A.inverse().norm(1)
|
|
529
|
+
sage: d = A.condition(1)
|
|
530
|
+
sage: abs(c-d) < 1.0e-12
|
|
531
|
+
True
|
|
532
|
+
"""
|
|
533
|
+
if not self.is_square() and p != 'sv':
|
|
534
|
+
raise TypeError("matrix must be square if p is not 'sv', not %s x %s" % (self.nrows(), self.ncols()))
|
|
535
|
+
global numpy
|
|
536
|
+
if numpy is None:
|
|
537
|
+
import numpy
|
|
538
|
+
import sage.rings.infinity
|
|
539
|
+
import sage.rings.integer
|
|
540
|
+
from sage.rings.real_double import RDF
|
|
541
|
+
if p == sage.rings.infinity.Infinity:
|
|
542
|
+
p = numpy.inf
|
|
543
|
+
elif p == -sage.rings.infinity.Infinity:
|
|
544
|
+
p = -numpy.inf
|
|
545
|
+
elif p == 'frob':
|
|
546
|
+
p = 'fro'
|
|
547
|
+
elif p == 'sv':
|
|
548
|
+
p = None
|
|
549
|
+
else:
|
|
550
|
+
try:
|
|
551
|
+
p = sage.rings.integer.Integer(p)
|
|
552
|
+
except TypeError:
|
|
553
|
+
raise ValueError("condition number 'p' must be +/- infinity, 'frob', 'sv' or an integer, not %s" % p)
|
|
554
|
+
if p not in [-2, -1, 1, 2]:
|
|
555
|
+
raise ValueError("condition number integer values of 'p' must be -2, -1, 1 or 2, not %s" % p)
|
|
556
|
+
# may raise a LinAlgError if matrix is singular
|
|
557
|
+
c = numpy.linalg.cond(self._matrix_numpy, p=p)
|
|
558
|
+
if c == numpy.inf:
|
|
559
|
+
return sage.rings.infinity.Infinity
|
|
560
|
+
else:
|
|
561
|
+
return RDF(c.real if numpy.iscomplexobj(c) else c)
|
|
562
|
+
|
|
563
|
+
def norm(self, p=2):
|
|
564
|
+
r"""
|
|
565
|
+
Return the norm of the matrix.
|
|
566
|
+
|
|
567
|
+
INPUT:
|
|
568
|
+
|
|
569
|
+
- ``p`` -- (default: 2) controls which norm is computed,
|
|
570
|
+
allowable values are 'frob' (for the Frobenius norm),
|
|
571
|
+
integers -2, -1, 1, 2, positive and negative infinity. See
|
|
572
|
+
output discussion for specifics.
|
|
573
|
+
|
|
574
|
+
OUTPUT:
|
|
575
|
+
|
|
576
|
+
Returned value is a double precision floating point value
|
|
577
|
+
in ``RDF``. Row and column sums described below are
|
|
578
|
+
sums of the absolute values of the entries, where the
|
|
579
|
+
absolute value of the complex number `a+bi` is `\sqrt{a^2+b^2}`.
|
|
580
|
+
Singular values are the "diagonal" entries of the "S" matrix in
|
|
581
|
+
the singular value decomposition.
|
|
582
|
+
|
|
583
|
+
- ``p = 'frob'``: the Frobenius norm, which for
|
|
584
|
+
a matrix `A=(a_{ij})` computes
|
|
585
|
+
|
|
586
|
+
.. MATH::
|
|
587
|
+
|
|
588
|
+
\left(\sum_{i,j}\left\lvert{a_{i,j}}\right\rvert^2\right)^{1/2}
|
|
589
|
+
|
|
590
|
+
- ``p = Infinity`` or ``p = oo``: the maximum row sum.
|
|
591
|
+
- ``p = -Infinity`` or ``p = -oo``: the minimum column sum.
|
|
592
|
+
- ``p = 1``: the maximum column sum.
|
|
593
|
+
- ``p = -1``: the minimum column sum.
|
|
594
|
+
- ``p = 2``: the induced 2-norm, equal to the maximum singular value.
|
|
595
|
+
- ``p = -2``: the minimum singular value.
|
|
596
|
+
|
|
597
|
+
ALGORITHM:
|
|
598
|
+
|
|
599
|
+
Computation is performed by the :func:`~scipy:scipy.linalg.norm`
|
|
600
|
+
function of the SciPy/NumPy library.
|
|
601
|
+
|
|
602
|
+
EXAMPLES:
|
|
603
|
+
|
|
604
|
+
First over the reals. ::
|
|
605
|
+
|
|
606
|
+
sage: A = matrix(RDF, 3, range(-3, 6)); A
|
|
607
|
+
[-3.0 -2.0 -1.0]
|
|
608
|
+
[ 0.0 1.0 2.0]
|
|
609
|
+
[ 3.0 4.0 5.0]
|
|
610
|
+
sage: A.norm()
|
|
611
|
+
7.99575670...
|
|
612
|
+
sage: A.norm(p='frob')
|
|
613
|
+
8.30662386...
|
|
614
|
+
sage: A.norm(p=Infinity)
|
|
615
|
+
12.0
|
|
616
|
+
sage: A.norm(p=-Infinity)
|
|
617
|
+
3.0
|
|
618
|
+
sage: A.norm(p=1)
|
|
619
|
+
8.0
|
|
620
|
+
sage: A.norm(p=-1)
|
|
621
|
+
6.0
|
|
622
|
+
sage: A.norm(p=2)
|
|
623
|
+
7.99575670...
|
|
624
|
+
sage: A.norm(p=-2) < 10^-15
|
|
625
|
+
True
|
|
626
|
+
|
|
627
|
+
And over the complex numbers. ::
|
|
628
|
+
|
|
629
|
+
sage: # needs sage.symbolic
|
|
630
|
+
sage: B = matrix(CDF, 2, [[1+I, 2+3*I],[3+4*I,3*I]]); B
|
|
631
|
+
[1.0 + 1.0*I 2.0 + 3.0*I]
|
|
632
|
+
[3.0 + 4.0*I 3.0*I]
|
|
633
|
+
sage: B.norm()
|
|
634
|
+
6.66189877...
|
|
635
|
+
sage: B.norm(p='frob')
|
|
636
|
+
7.0
|
|
637
|
+
sage: B.norm(p=Infinity)
|
|
638
|
+
8.0
|
|
639
|
+
sage: B.norm(p=-Infinity)
|
|
640
|
+
5.01976483...
|
|
641
|
+
sage: B.norm(p=1)
|
|
642
|
+
6.60555127...
|
|
643
|
+
sage: B.norm(p=-1)
|
|
644
|
+
6.41421356...
|
|
645
|
+
sage: B.norm(p=2)
|
|
646
|
+
6.66189877...
|
|
647
|
+
sage: B.norm(p=-2)
|
|
648
|
+
2.14921023...
|
|
649
|
+
|
|
650
|
+
Since it is invariant under unitary multiplication, the
|
|
651
|
+
Frobenius norm is equal to the square root of the sum of
|
|
652
|
+
squares of the singular values. ::
|
|
653
|
+
|
|
654
|
+
sage: # needs scipy
|
|
655
|
+
sage: A = matrix(RDF, 5, range(1,26))
|
|
656
|
+
sage: f = A.norm(p='frob')
|
|
657
|
+
sage: U, S, V = A.SVD()
|
|
658
|
+
sage: s = sqrt(sum([S[i,i]^2 for i in range(5)]))
|
|
659
|
+
sage: abs(f-s) < 1.0e-12
|
|
660
|
+
True
|
|
661
|
+
|
|
662
|
+
Return values are in `RDF`. ::
|
|
663
|
+
|
|
664
|
+
sage: A = matrix(CDF, 2, range(4))
|
|
665
|
+
sage: A.norm() in RDF
|
|
666
|
+
True
|
|
667
|
+
|
|
668
|
+
Improper values of ``p`` are caught. ::
|
|
669
|
+
|
|
670
|
+
sage: A.norm(p='bogus')
|
|
671
|
+
Traceback (most recent call last):
|
|
672
|
+
...
|
|
673
|
+
ValueError: matrix norm 'p' must be +/- infinity, 'frob' or an integer, not bogus
|
|
674
|
+
sage: A.norm(p=632)
|
|
675
|
+
Traceback (most recent call last):
|
|
676
|
+
...
|
|
677
|
+
ValueError: matrix norm integer values of 'p' must be -2, -1, 1 or 2, not 632
|
|
678
|
+
"""
|
|
679
|
+
global numpy
|
|
680
|
+
if numpy is None:
|
|
681
|
+
import numpy
|
|
682
|
+
|
|
683
|
+
import sage.rings.infinity
|
|
684
|
+
import sage.rings.integer
|
|
685
|
+
import sage.rings.real_double
|
|
686
|
+
if p == sage.rings.infinity.Infinity:
|
|
687
|
+
p = numpy.inf
|
|
688
|
+
elif p == -sage.rings.infinity.Infinity:
|
|
689
|
+
p = -numpy.inf
|
|
690
|
+
elif p == 'frob':
|
|
691
|
+
p = 'fro'
|
|
692
|
+
else:
|
|
693
|
+
try:
|
|
694
|
+
p = sage.rings.integer.Integer(p)
|
|
695
|
+
except TypeError:
|
|
696
|
+
raise ValueError("matrix norm 'p' must be +/- infinity, 'frob' or an integer, not %s" % p)
|
|
697
|
+
if p not in [-2, -1, 1, 2]:
|
|
698
|
+
raise ValueError("matrix norm integer values of 'p' must be -2, -1, 1 or 2, not %s" % p)
|
|
699
|
+
return sage.rings.real_double.RDF(numpy.linalg.norm(self._matrix_numpy, ord=p))
|
|
700
|
+
|
|
701
|
+
def singular_values(self, eps=None):
|
|
702
|
+
r"""
|
|
703
|
+
Return a sorted list of the singular values of the matrix.
|
|
704
|
+
|
|
705
|
+
INPUT:
|
|
706
|
+
|
|
707
|
+
- ``eps`` -- (default: ``None``) the largest number which
|
|
708
|
+
will be considered to be zero. May also be set to the
|
|
709
|
+
string 'auto'. See the discussion below.
|
|
710
|
+
|
|
711
|
+
OUTPUT:
|
|
712
|
+
|
|
713
|
+
A sorted list of the singular values of the matrix, which are the
|
|
714
|
+
diagonal entries of the "S" matrix in the SVD decomposition. As such,
|
|
715
|
+
the values are real and are returned as elements of ``RDF``. The
|
|
716
|
+
list is sorted with larger values first, and since theory predicts
|
|
717
|
+
these values are always positive, for a rank-deficient matrix the
|
|
718
|
+
list should end in zeros (but in practice may not). The length of
|
|
719
|
+
the list is the minimum of the row count and column count for the
|
|
720
|
+
matrix.
|
|
721
|
+
|
|
722
|
+
The number of nonzero singular values will be the rank of the
|
|
723
|
+
matrix. However, as a numerical matrix, it is impossible to
|
|
724
|
+
control the difference between zero entries and very small
|
|
725
|
+
nonzero entries. As an informed consumer it is up to you
|
|
726
|
+
to use the output responsibly. We will do our best, and give
|
|
727
|
+
you the tools to work with the output, but we cannot
|
|
728
|
+
give you a guarantee.
|
|
729
|
+
|
|
730
|
+
With ``eps`` set to ``None`` you will get the raw singular
|
|
731
|
+
values and can manage them as you see fit. You may also set
|
|
732
|
+
``eps`` to any positive floating point value you wish. If you
|
|
733
|
+
set ``eps`` to 'auto' this routine will compute a reasonable
|
|
734
|
+
cutoff value, based on the size of the matrix, the largest
|
|
735
|
+
singular value and the smallest nonzero value representable
|
|
736
|
+
by the 53-bit precision values used. See the discussion
|
|
737
|
+
at page 268 of [Wat2010]_.
|
|
738
|
+
|
|
739
|
+
See the examples for a way to use the "verbose" facility
|
|
740
|
+
to easily watch the zero cutoffs in action.
|
|
741
|
+
|
|
742
|
+
ALGORITHM:
|
|
743
|
+
|
|
744
|
+
The singular values come from the SVD decomposition
|
|
745
|
+
computed by SciPy/NumPy using :func:`scipy:scipy.linalg.svd`.
|
|
746
|
+
|
|
747
|
+
EXAMPLES:
|
|
748
|
+
|
|
749
|
+
Singular values close to zero have trailing digits that may vary
|
|
750
|
+
on different hardware. For exact matrices, the number of nonzero
|
|
751
|
+
singular values will equal the rank of the matrix. So for some of
|
|
752
|
+
the doctests we round the small singular values that ideally would
|
|
753
|
+
be zero, to control the variability across hardware.
|
|
754
|
+
|
|
755
|
+
This matrix has a determinant of one. A chain of two or
|
|
756
|
+
three theorems implies the product of the singular values
|
|
757
|
+
must also be one. ::
|
|
758
|
+
|
|
759
|
+
sage: # needs scipy
|
|
760
|
+
sage: A = matrix(QQ, [[ 1, 0, 0, 0, 0, 1, 3],
|
|
761
|
+
....: [-2, 1, 1, -2, 0, -4, 0],
|
|
762
|
+
....: [ 1, 0, 1, -4, -6, -3, 7],
|
|
763
|
+
....: [-2, 2, 1, 1, 7, 1, -1],
|
|
764
|
+
....: [-1, 0, -1, 5, 8, 4, -6],
|
|
765
|
+
....: [ 4, -2, -2, 1, -3, 0, 8],
|
|
766
|
+
....: [-2, 1, 0, 2, 7, 3, -4]])
|
|
767
|
+
sage: A.determinant()
|
|
768
|
+
1
|
|
769
|
+
sage: B = A.change_ring(RDF)
|
|
770
|
+
sage: sv = B.singular_values(); sv # tol 1e-12
|
|
771
|
+
[20.523980658874265, 8.486837028536643, 5.86168134845073, 2.4429165899286978, 0.5831970144724045, 0.26933287286576313, 0.0025524488076110402]
|
|
772
|
+
sage: prod(sv) # tol 1e-12
|
|
773
|
+
0.9999999999999525
|
|
774
|
+
|
|
775
|
+
An exact matrix that is obviously not of full rank, and then
|
|
776
|
+
a computation of the singular values after conversion
|
|
777
|
+
to an approximate matrix. ::
|
|
778
|
+
|
|
779
|
+
sage: # needs scipy
|
|
780
|
+
sage: A = matrix(QQ, [[1/3, 2/3, 11/3],
|
|
781
|
+
....: [2/3, 1/3, 7/3],
|
|
782
|
+
....: [2/3, 5/3, 27/3]])
|
|
783
|
+
sage: A.rank()
|
|
784
|
+
2
|
|
785
|
+
sage: B = A.change_ring(CDF)
|
|
786
|
+
sage: sv = B.singular_values()
|
|
787
|
+
sage: sv[0:2]
|
|
788
|
+
[10.1973039..., 0.487045871...]
|
|
789
|
+
sage: sv[2] < 1e-14
|
|
790
|
+
True
|
|
791
|
+
|
|
792
|
+
A matrix of rank 3 over the complex numbers. ::
|
|
793
|
+
|
|
794
|
+
sage: # needs scipy
|
|
795
|
+
sage: A = matrix(CDF, [[46*I - 28, -47*I - 50, 21*I + 51, -62*I - 782, 13*I + 22],
|
|
796
|
+
....: [35*I - 20, -32*I - 46, 18*I + 43, -57*I - 670, 7*I + 3],
|
|
797
|
+
....: [22*I - 13, -23*I - 23, 9*I + 24, -26*I - 347, 7*I + 13],
|
|
798
|
+
....: [-44*I + 23, 41*I + 57, -19*I - 54, 60*I + 757, -11*I - 9],
|
|
799
|
+
....: [30*I - 18, -30*I - 34, 14*I + 34, -42*I - 522, 8*I + 12]])
|
|
800
|
+
sage: sv = A.singular_values()
|
|
801
|
+
sage: sv[0:3] # tol 1e-14
|
|
802
|
+
[1440.7336659952966, 18.404403413369227, 6.839707797136151]
|
|
803
|
+
sage: (sv[3] < 10^-13) or sv[3]
|
|
804
|
+
True
|
|
805
|
+
sage: (sv[4] < 10^-14) or sv[4]
|
|
806
|
+
True
|
|
807
|
+
|
|
808
|
+
A full-rank matrix that is ill-conditioned. We use this to
|
|
809
|
+
illustrate ways of using the various possibilities for ``eps``,
|
|
810
|
+
including one that is ill-advised. Notice that the automatically
|
|
811
|
+
computed cutoff gets this (difficult) example slightly wrong.
|
|
812
|
+
This illustrates the impossibility of any automated process always
|
|
813
|
+
getting this right. Use with caution and judgement. ::
|
|
814
|
+
|
|
815
|
+
sage: entries = [1/(i+j+1) for i in range(12) for j in range(12)]
|
|
816
|
+
sage: B = matrix(QQ, 12, 12, entries)
|
|
817
|
+
sage: B.rank()
|
|
818
|
+
12
|
|
819
|
+
sage: A = B.change_ring(RDF)
|
|
820
|
+
sage: A.condition() > 1.59e16 or A.condition()
|
|
821
|
+
True
|
|
822
|
+
|
|
823
|
+
sage: # needs scipy
|
|
824
|
+
sage: A.singular_values(eps=None) # abs tol 7e-16
|
|
825
|
+
[1.7953720595619975, 0.38027524595503703, 0.04473854875218107, 0.0037223122378911614, 0.0002330890890217751, 1.116335748323284e-05, 4.082376110397296e-07, 1.1228610675717613e-08, 2.2519645713496478e-10, 3.1113486853814003e-12, 2.6500422260778388e-14, 9.87312834948426e-17]
|
|
826
|
+
sage: A.singular_values(eps='auto') # abs tol 7e-16
|
|
827
|
+
[1.7953720595619975, 0.38027524595503703, 0.04473854875218107, 0.0037223122378911614, 0.0002330890890217751, 1.116335748323284e-05, 4.082376110397296e-07, 1.1228610675717613e-08, 2.2519645713496478e-10, 3.1113486853814003e-12, 2.6500422260778388e-14, 0.0]
|
|
828
|
+
sage: A.singular_values(eps=1e-4) # abs tol 7e-16
|
|
829
|
+
[1.7953720595619975, 0.38027524595503703, 0.04473854875218107, 0.0037223122378911614, 0.0002330890890217751, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
|
|
830
|
+
|
|
831
|
+
With Sage's "verbose" facility, you can compactly see the cutoff
|
|
832
|
+
at work. In any application of this routine, or those that build upon
|
|
833
|
+
it, it would be a good idea to conduct this exercise on samples.
|
|
834
|
+
We also test here that all the values are returned in `RDF` since
|
|
835
|
+
singular values are always real. ::
|
|
836
|
+
|
|
837
|
+
sage: # needs scipy
|
|
838
|
+
sage: A = matrix(CDF, 4, range(16))
|
|
839
|
+
sage: from sage.misc.verbose import set_verbose
|
|
840
|
+
sage: set_verbose(1)
|
|
841
|
+
sage: sv = A.singular_values(eps='auto'); sv
|
|
842
|
+
verbose 1 (<module>) singular values,
|
|
843
|
+
smallest-non-zero:cutoff:largest-zero,
|
|
844
|
+
2.2766...:6.2421...e-14:...
|
|
845
|
+
[35.13996365902..., 2.27661020871472..., 0.0, 0.0]
|
|
846
|
+
sage: set_verbose(0)
|
|
847
|
+
sage: all(s in RDF for s in sv)
|
|
848
|
+
True
|
|
849
|
+
|
|
850
|
+
TESTS:
|
|
851
|
+
|
|
852
|
+
Bogus values of the ``eps`` keyword will be caught::
|
|
853
|
+
|
|
854
|
+
sage: A.singular_values(eps='junk') # needs scipy
|
|
855
|
+
Traceback (most recent call last):
|
|
856
|
+
...
|
|
857
|
+
ValueError: could not convert string to float: ...
|
|
858
|
+
|
|
859
|
+
AUTHOR:
|
|
860
|
+
|
|
861
|
+
- Rob Beezer - (2011-02-18)
|
|
862
|
+
"""
|
|
863
|
+
from sage.misc.verbose import verbose
|
|
864
|
+
from sage.rings.real_double import RDF
|
|
865
|
+
global scipy
|
|
866
|
+
# get SVD decomposition, which is a cached quantity
|
|
867
|
+
_, S, _ = self.SVD()
|
|
868
|
+
diag = min(self._nrows, self._ncols)
|
|
869
|
+
sv = [RDF(S[i,i]) for i in range(diag)]
|
|
870
|
+
# no cutoff, send raw data back
|
|
871
|
+
if eps is None:
|
|
872
|
+
verbose("singular values, no zero cutoff specified", level=1)
|
|
873
|
+
return sv
|
|
874
|
+
# set cutoff as RDF element
|
|
875
|
+
if eps == 'auto':
|
|
876
|
+
if scipy is None: import scipy
|
|
877
|
+
eps = 2*max(self._nrows, self._ncols)*numpy.finfo(float).eps*sv[0]
|
|
878
|
+
eps = RDF(eps)
|
|
879
|
+
# locate nonzero entries
|
|
880
|
+
rank = 0
|
|
881
|
+
while rank < diag and sv[rank] > eps:
|
|
882
|
+
rank = rank + 1
|
|
883
|
+
# capture info for watching zero cutoff behavior at verbose level 1
|
|
884
|
+
if rank == 0:
|
|
885
|
+
small_nonzero = None
|
|
886
|
+
else:
|
|
887
|
+
small_nonzero = sv[rank-1]
|
|
888
|
+
if rank < diag:
|
|
889
|
+
large_zero = sv[rank]
|
|
890
|
+
else:
|
|
891
|
+
large_zero = None
|
|
892
|
+
# convert small values to zero, then done
|
|
893
|
+
for i in range(rank, diag):
|
|
894
|
+
sv[i] = RDF(0)
|
|
895
|
+
verbose("singular values, smallest-non-zero:cutoff:largest-zero, %s:%s:%s" % (small_nonzero, eps, large_zero), level=1)
|
|
896
|
+
return sv
|
|
897
|
+
|
|
898
|
+
def LU(self):
|
|
899
|
+
r"""
|
|
900
|
+
Return a decomposition of the (row-permuted) matrix as a product of
|
|
901
|
+
a lower-triangular matrix ("L") and an upper-triangular matrix ("U").
|
|
902
|
+
|
|
903
|
+
OUTPUT:
|
|
904
|
+
|
|
905
|
+
For an `m\times n` matrix ``A`` this method returns a triple of
|
|
906
|
+
immutable matrices ``P, L, U`` such that
|
|
907
|
+
|
|
908
|
+
- ``A = P*L*U``
|
|
909
|
+
- ``P`` is a square permutation matrix, of size `m\times m`,
|
|
910
|
+
so is all zeroes, but with exactly a single one in each
|
|
911
|
+
row and each column
|
|
912
|
+
- ``L`` is lower-triangular, square of size `m\times m`,
|
|
913
|
+
with every diagonal entry equal to one
|
|
914
|
+
- ``U`` is upper-triangular with size `m\times n`, i.e.
|
|
915
|
+
entries below the "diagonal" are all zero
|
|
916
|
+
|
|
917
|
+
The computed decomposition is cached and returned on
|
|
918
|
+
subsequent calls, thus requiring the results to be immutable.
|
|
919
|
+
|
|
920
|
+
Effectively, ``P`` permutes the rows of ``A``. Then ``L``
|
|
921
|
+
can be viewed as a sequence of row operations on this matrix,
|
|
922
|
+
where each operation is adding a multiple of a row to a
|
|
923
|
+
subsequent row. There is no scaling (thus 1s on the diagonal
|
|
924
|
+
of ``L``) and no row-swapping (``P`` does that). As a result
|
|
925
|
+
``U`` is close to being the result of Gaussian-elimination.
|
|
926
|
+
However, round-off errors can make it hard to determine
|
|
927
|
+
the zero entries of ``U``.
|
|
928
|
+
|
|
929
|
+
.. NOTE::
|
|
930
|
+
The behaviour of ``LU()`` has changed in Sage version 9.1.
|
|
931
|
+
Earlier, ``LU()`` returned ``P,L,U`` such that ``P*A=L*U``,
|
|
932
|
+
where ``P`` represents the permutation and is
|
|
933
|
+
the matrix inverse of the ``P`` returned by this method.
|
|
934
|
+
The computation of this matrix inverse can be accomplished
|
|
935
|
+
quickly with just a transpose as the matrix is orthogonal/unitary.
|
|
936
|
+
|
|
937
|
+
For details see :issue:`18365`.
|
|
938
|
+
|
|
939
|
+
EXAMPLES::
|
|
940
|
+
|
|
941
|
+
sage: # needs scipy
|
|
942
|
+
sage: m = matrix(RDF,4,range(16))
|
|
943
|
+
sage: P,L,U = m.LU()
|
|
944
|
+
sage: P*L*U # rel tol 2e-16
|
|
945
|
+
[ 0.0 1.0 2.0 3.0]
|
|
946
|
+
[ 4.0 5.0 6.0 7.0]
|
|
947
|
+
[ 8.0 9.0 10.0 11.0]
|
|
948
|
+
[12.0 13.0 14.0 15.0]
|
|
949
|
+
|
|
950
|
+
Below example illustrates the change in behaviour of ``LU()``. ::
|
|
951
|
+
|
|
952
|
+
sage: # needs scipy
|
|
953
|
+
sage: (m - P*L*U).norm() < 1e-14
|
|
954
|
+
True
|
|
955
|
+
sage: (P*m - L*U).norm() < 1e-14
|
|
956
|
+
False
|
|
957
|
+
|
|
958
|
+
:issue:`10839` made this routine available for rectangular matrices. ::
|
|
959
|
+
|
|
960
|
+
sage: # needs scipy
|
|
961
|
+
sage: A = matrix(RDF, 5, 6, range(30)); A
|
|
962
|
+
[ 0.0 1.0 2.0 3.0 4.0 5.0]
|
|
963
|
+
[ 6.0 7.0 8.0 9.0 10.0 11.0]
|
|
964
|
+
[12.0 13.0 14.0 15.0 16.0 17.0]
|
|
965
|
+
[18.0 19.0 20.0 21.0 22.0 23.0]
|
|
966
|
+
[24.0 25.0 26.0 27.0 28.0 29.0]
|
|
967
|
+
sage: P, L, U = A.LU()
|
|
968
|
+
sage: P
|
|
969
|
+
[0.0 1.0 0.0 0.0 0.0]
|
|
970
|
+
[0.0 0.0 0.0 0.0 1.0]
|
|
971
|
+
[0.0 0.0 1.0 0.0 0.0]
|
|
972
|
+
[0.0 0.0 0.0 1.0 0.0]
|
|
973
|
+
[1.0 0.0 0.0 0.0 0.0]
|
|
974
|
+
sage: L.zero_at(0) # Use zero_at(0) to get rid of signed zeros
|
|
975
|
+
[ 1.0 0.0 0.0 0.0 0.0]
|
|
976
|
+
[ 0.0 1.0 0.0 0.0 0.0]
|
|
977
|
+
[ 0.5 0.5 1.0 0.0 0.0]
|
|
978
|
+
[0.75 0.25 0.0 1.0 0.0]
|
|
979
|
+
[0.25 0.75 0.0 0.0 1.0]
|
|
980
|
+
sage: U.zero_at(0) # Use zero_at(0) to get rid of signed zeros
|
|
981
|
+
[24.0 25.0 26.0 27.0 28.0 29.0]
|
|
982
|
+
[ 0.0 1.0 2.0 3.0 4.0 5.0]
|
|
983
|
+
[ 0.0 0.0 0.0 0.0 0.0 0.0]
|
|
984
|
+
[ 0.0 0.0 0.0 0.0 0.0 0.0]
|
|
985
|
+
[ 0.0 0.0 0.0 0.0 0.0 0.0]
|
|
986
|
+
sage: P.transpose()*A-L*U
|
|
987
|
+
[0.0 0.0 0.0 0.0 0.0 0.0]
|
|
988
|
+
[0.0 0.0 0.0 0.0 0.0 0.0]
|
|
989
|
+
[0.0 0.0 0.0 0.0 0.0 0.0]
|
|
990
|
+
[0.0 0.0 0.0 0.0 0.0 0.0]
|
|
991
|
+
[0.0 0.0 0.0 0.0 0.0 0.0]
|
|
992
|
+
sage: P*L*U
|
|
993
|
+
[ 0.0 1.0 2.0 3.0 4.0 5.0]
|
|
994
|
+
[ 6.0 7.0 8.0 9.0 10.0 11.0]
|
|
995
|
+
[12.0 13.0 14.0 15.0 16.0 17.0]
|
|
996
|
+
[18.0 19.0 20.0 21.0 22.0 23.0]
|
|
997
|
+
[24.0 25.0 26.0 27.0 28.0 29.0]
|
|
998
|
+
|
|
999
|
+
Trivial cases return matrices of the right size and
|
|
1000
|
+
characteristics. ::
|
|
1001
|
+
|
|
1002
|
+
sage: # needs scipy
|
|
1003
|
+
sage: A = matrix(RDF, 5, 0)
|
|
1004
|
+
sage: P, L, U = A.LU()
|
|
1005
|
+
sage: P.parent()
|
|
1006
|
+
Full MatrixSpace of 5 by 5 dense matrices over Real Double Field
|
|
1007
|
+
sage: L.parent()
|
|
1008
|
+
Full MatrixSpace of 5 by 5 dense matrices over Real Double Field
|
|
1009
|
+
sage: U.parent()
|
|
1010
|
+
Full MatrixSpace of 5 by 0 dense matrices over Real Double Field
|
|
1011
|
+
sage: A-P*L*U
|
|
1012
|
+
[]
|
|
1013
|
+
|
|
1014
|
+
The results are immutable since they are cached. ::
|
|
1015
|
+
|
|
1016
|
+
sage: # needs scipy
|
|
1017
|
+
sage: P, L, U = matrix(RDF, 2, 2, range(4)).LU()
|
|
1018
|
+
sage: L[0,0] = 0
|
|
1019
|
+
Traceback (most recent call last):
|
|
1020
|
+
...
|
|
1021
|
+
ValueError: matrix is immutable; please change a copy instead (i.e., use copy(M) to change a copy of M).
|
|
1022
|
+
sage: P[0,0] = 0
|
|
1023
|
+
Traceback (most recent call last):
|
|
1024
|
+
...
|
|
1025
|
+
ValueError: matrix is immutable; please change a copy instead (i.e., use copy(M) to change a copy of M).
|
|
1026
|
+
sage: U[0,0] = 0
|
|
1027
|
+
Traceback (most recent call last):
|
|
1028
|
+
...
|
|
1029
|
+
ValueError: matrix is immutable; please change a copy instead (i.e., use copy(M) to change a copy of M).
|
|
1030
|
+
"""
|
|
1031
|
+
global scipy, numpy
|
|
1032
|
+
cdef Matrix_double_dense P, L, U
|
|
1033
|
+
m = self._nrows
|
|
1034
|
+
n = self._ncols
|
|
1035
|
+
|
|
1036
|
+
# scipy fails on trivial cases
|
|
1037
|
+
if m == 0 or n == 0:
|
|
1038
|
+
P = self._new(m, m)
|
|
1039
|
+
for i in range(m):
|
|
1040
|
+
P[i,i]=1
|
|
1041
|
+
P.set_immutable()
|
|
1042
|
+
L = P
|
|
1043
|
+
U = self._new(m,n)
|
|
1044
|
+
U.set_immutable()
|
|
1045
|
+
return P, L, U
|
|
1046
|
+
|
|
1047
|
+
PLU = self.fetch('PLU_factors')
|
|
1048
|
+
if PLU is not None:
|
|
1049
|
+
return PLU
|
|
1050
|
+
if scipy is None:
|
|
1051
|
+
import scipy
|
|
1052
|
+
import scipy.linalg
|
|
1053
|
+
if numpy is None:
|
|
1054
|
+
import numpy
|
|
1055
|
+
PM, LM, UM = scipy.linalg.lu(self._matrix_numpy)
|
|
1056
|
+
# TODO: It's an awful waste to store a huge matrix for P, which
|
|
1057
|
+
# is just a simple permutation, really.
|
|
1058
|
+
P = self._new(m, m)
|
|
1059
|
+
L = self._new(m, m)
|
|
1060
|
+
U = self._new(m, n)
|
|
1061
|
+
P._matrix_numpy = numpy.ascontiguousarray(PM)
|
|
1062
|
+
L._matrix_numpy = numpy.ascontiguousarray(LM)
|
|
1063
|
+
U._matrix_numpy = numpy.ascontiguousarray(UM)
|
|
1064
|
+
PLU = (P, L, U)
|
|
1065
|
+
for M in PLU:
|
|
1066
|
+
M.set_immutable()
|
|
1067
|
+
self.cache('PLU_factors', PLU)
|
|
1068
|
+
return PLU
|
|
1069
|
+
|
|
1070
|
+
def eigenvalues(self, other=None, algorithm='default', tol=None, *,
|
|
1071
|
+
homogeneous=False):
|
|
1072
|
+
r"""
|
|
1073
|
+
Return a list of ordinary or generalized eigenvalues.
|
|
1074
|
+
|
|
1075
|
+
INPUT:
|
|
1076
|
+
|
|
1077
|
+
- ``self`` -- a square matrix
|
|
1078
|
+
|
|
1079
|
+
- ``other`` -- a square matrix `B` (default: ``None``) in a generalized
|
|
1080
|
+
eigenvalue problem; if ``None``, an ordinary eigenvalue problem is
|
|
1081
|
+
solved; if ``algorithm`` is ``'symmetric'`` or ``'hermitian'``, `B`
|
|
1082
|
+
must be real symmetric or hermitian positive definite, respectively
|
|
1083
|
+
|
|
1084
|
+
- ``algorithm`` -- (default: ``'default'``)
|
|
1085
|
+
|
|
1086
|
+
- ``'default'`` -- applicable to any matrix
|
|
1087
|
+
with double-precision floating point entries.
|
|
1088
|
+
Uses the :func:`~scipy:scipy.linalg.eigvals` function from SciPy.
|
|
1089
|
+
|
|
1090
|
+
- ``'symmetric'`` -- converts the matrix into a real matrix
|
|
1091
|
+
(i.e. with entries from :class:`~sage.rings.real_double.RDF`),
|
|
1092
|
+
then applies the algorithm for Hermitian matrices. This
|
|
1093
|
+
algorithm can be significantly faster than the
|
|
1094
|
+
``'default'`` algorithm.
|
|
1095
|
+
|
|
1096
|
+
- ``'hermitian'`` -- uses the :func:`~scipy:scipy.linalg.eigh`
|
|
1097
|
+
function from SciPy, which applies only to real symmetric or
|
|
1098
|
+
complex Hermitian matrices. Since Hermitian is defined as a matrix
|
|
1099
|
+
equaling its conjugate-transpose, for a matrix with real
|
|
1100
|
+
entries this property is equivalent to being symmetric.
|
|
1101
|
+
This algorithm can be significantly faster than the
|
|
1102
|
+
``'default'`` algorithm.
|
|
1103
|
+
|
|
1104
|
+
- ``'tol'`` -- (default: ``None``) if set to a value other than
|
|
1105
|
+
``None``, this is interpreted as a small real number used to aid in
|
|
1106
|
+
grouping eigenvalues that are numerically similar, but is ignored
|
|
1107
|
+
when ``homogeneous`` is set. See the output description for more
|
|
1108
|
+
information.
|
|
1109
|
+
|
|
1110
|
+
- ``homogeneous`` -- boolean (default: ``False``); if ``True``, use
|
|
1111
|
+
homogeneous coordinates for the output
|
|
1112
|
+
(see :meth:`eigenvectors_right` for details)
|
|
1113
|
+
|
|
1114
|
+
.. WARNING::
|
|
1115
|
+
|
|
1116
|
+
When using the ``'symmetric'`` or ``'hermitian'`` algorithms,
|
|
1117
|
+
no check is made on the input matrix, and only the entries below,
|
|
1118
|
+
and on, the main diagonal are employed in the computation.
|
|
1119
|
+
|
|
1120
|
+
Methods such as :meth:`is_symmetric` and :meth:`is_hermitian`
|
|
1121
|
+
could be used to verify this beforehand.
|
|
1122
|
+
|
|
1123
|
+
OUTPUT:
|
|
1124
|
+
|
|
1125
|
+
Default output for a square matrix of size `n` is a list of `n`
|
|
1126
|
+
eigenvalues from the complex double field,
|
|
1127
|
+
:class:`~sage.rings.complex_double.CDF`. If the ``'symmetric'``
|
|
1128
|
+
or ``'hermitian'`` algorithms are chosen, the returned eigenvalues
|
|
1129
|
+
are from the real double field,
|
|
1130
|
+
:class:`~sage.rings.real_double.RDF`.
|
|
1131
|
+
|
|
1132
|
+
If a tolerance is specified, an attempt is made to group eigenvalues
|
|
1133
|
+
that are numerically similar. The return is then a list of pairs,
|
|
1134
|
+
where each pair is an eigenvalue followed by its multiplicity.
|
|
1135
|
+
The eigenvalue reported is the mean of the eigenvalues computed,
|
|
1136
|
+
and these eigenvalues are contained in an interval (or disk) whose
|
|
1137
|
+
radius is less than ``5*tol`` for `n < 10,000` in the worst case.
|
|
1138
|
+
|
|
1139
|
+
More precisely, for an `n\times n` matrix, the diameter of the
|
|
1140
|
+
interval containing similar eigenvalues could be as large as sum
|
|
1141
|
+
of the reciprocals of the first `n` integers times ``tol``.
|
|
1142
|
+
|
|
1143
|
+
.. WARNING::
|
|
1144
|
+
|
|
1145
|
+
Use caution when using the ``tol`` parameter to group
|
|
1146
|
+
eigenvalues. See the examples below to see how this can go wrong.
|
|
1147
|
+
|
|
1148
|
+
EXAMPLES::
|
|
1149
|
+
|
|
1150
|
+
sage: # needs scipy
|
|
1151
|
+
sage: m = matrix(RDF, 2, 2, [1,2,3,4])
|
|
1152
|
+
sage: ev = m.eigenvalues(); ev
|
|
1153
|
+
[-0.372281323..., 5.37228132...]
|
|
1154
|
+
sage: ev[0].parent()
|
|
1155
|
+
Complex Double Field
|
|
1156
|
+
|
|
1157
|
+
sage: # needs scipy
|
|
1158
|
+
sage: m = matrix(RDF, 2, 2, [0,1,-1,0])
|
|
1159
|
+
sage: m.eigenvalues(algorithm='default')
|
|
1160
|
+
[1.0*I, -1.0*I]
|
|
1161
|
+
|
|
1162
|
+
sage: m = matrix(CDF, 2, 2, [I,1,-I,0]) # needs sage.symbolic
|
|
1163
|
+
sage: m.eigenvalues() # needs scipy sage.symbolic
|
|
1164
|
+
[-0.624810533... + 1.30024259...*I, 0.624810533... - 0.30024259...*I]
|
|
1165
|
+
|
|
1166
|
+
The adjacency matrix of a graph will be symmetric, and the
|
|
1167
|
+
eigenvalues will be real. ::
|
|
1168
|
+
|
|
1169
|
+
sage: # needs sage.graphs
|
|
1170
|
+
sage: A = graphs.PetersenGraph().adjacency_matrix()
|
|
1171
|
+
sage: A = A.change_ring(RDF)
|
|
1172
|
+
sage: ev = A.eigenvalues(algorithm='symmetric'); ev # tol 1e-14
|
|
1173
|
+
[-2.0, -2.0, -2.0, -2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0]
|
|
1174
|
+
sage: ev[0].parent()
|
|
1175
|
+
Real Double Field
|
|
1176
|
+
|
|
1177
|
+
The matrix ``A`` is "random", but the construction of ``C``
|
|
1178
|
+
provides a positive-definite Hermitian matrix. Note that
|
|
1179
|
+
the eigenvalues of a Hermitian matrix are real, and the
|
|
1180
|
+
eigenvalues of a positive-definite matrix will be positive. ::
|
|
1181
|
+
|
|
1182
|
+
sage: # needs sage.symbolic
|
|
1183
|
+
sage: A = matrix([[ 4*I + 5, 8*I + 1, 7*I + 5, 3*I + 5],
|
|
1184
|
+
....: [ 7*I - 2, -4*I + 7, -2*I + 4, 8*I + 8],
|
|
1185
|
+
....: [-2*I + 1, 6*I + 6, 5*I + 5, -I - 4],
|
|
1186
|
+
....: [ 5*I + 1, 6*I + 2, I - 4, -I + 3]])
|
|
1187
|
+
sage: C = (A*A.conjugate_transpose()).change_ring(CDF)
|
|
1188
|
+
sage: ev = C.eigenvalues(algorithm='hermitian'); ev # needs scipy
|
|
1189
|
+
[2.68144025..., 49.5167998..., 274.086188..., 390.71557...]
|
|
1190
|
+
sage: ev[0].parent() # needs scipy
|
|
1191
|
+
Real Double Field
|
|
1192
|
+
|
|
1193
|
+
A tolerance can be given to aid in grouping eigenvalues that
|
|
1194
|
+
are similar numerically. However, if the parameter is too small
|
|
1195
|
+
it might split too finely. Too large, and it can go wrong very
|
|
1196
|
+
badly. Use with care. ::
|
|
1197
|
+
|
|
1198
|
+
sage: # needs sage.graphs
|
|
1199
|
+
sage: G = graphs.PetersenGraph()
|
|
1200
|
+
sage: G.spectrum()
|
|
1201
|
+
[3, 1, 1, 1, 1, 1, -2, -2, -2, -2]
|
|
1202
|
+
sage: A = G.adjacency_matrix().change_ring(RDF)
|
|
1203
|
+
sage: A.eigenvalues(algorithm='symmetric', tol=1.0e-5) # tol 1e-15
|
|
1204
|
+
[(-2.0, 4), (1.0, 5), (3.0, 1)]
|
|
1205
|
+
sage: A.eigenvalues(algorithm='symmetric', tol=2.5) # tol 1e-15
|
|
1206
|
+
[(-2.0, 4), (1.3333333333333333, 6)]
|
|
1207
|
+
|
|
1208
|
+
An (extreme) example of properly grouping similar eigenvalues. ::
|
|
1209
|
+
|
|
1210
|
+
sage: # needs sage.graphs
|
|
1211
|
+
sage: G = graphs.HigmanSimsGraph()
|
|
1212
|
+
sage: A = G.adjacency_matrix().change_ring(RDF)
|
|
1213
|
+
sage: A.eigenvalues(algorithm='symmetric', tol=1.0e-5) # tol 2e-15
|
|
1214
|
+
[(-8.0, 22), (2.0, 77), (22.0, 1)]
|
|
1215
|
+
|
|
1216
|
+
In this generalized eigenvalue problem, the homogeneous coordinates
|
|
1217
|
+
explain the output obtained for the eigenvalues::
|
|
1218
|
+
|
|
1219
|
+
sage: # needs scipy
|
|
1220
|
+
sage: A = matrix.identity(RDF, 2)
|
|
1221
|
+
sage: B = matrix(RDF, [[3, 5], [6, 10]])
|
|
1222
|
+
sage: A.eigenvalues(B) # tol 1e-14
|
|
1223
|
+
[0.0769230769230769, +infinity]
|
|
1224
|
+
sage: E = A.eigenvalues(B, homogeneous=True); E # random
|
|
1225
|
+
[(0.9999999999999999, 13.000000000000002), (0.9999999999999999, 0.0)]
|
|
1226
|
+
sage: [alpha/beta for alpha, beta in E] # tol 1e-14
|
|
1227
|
+
[0.0769230769230769, NaN + NaN*I]
|
|
1228
|
+
|
|
1229
|
+
.. SEEALSO::
|
|
1230
|
+
|
|
1231
|
+
:meth:`eigenvectors_left`,
|
|
1232
|
+
:meth:`eigenvectors_right`,
|
|
1233
|
+
:meth:`.Matrix.eigenmatrix_left`,
|
|
1234
|
+
:meth:`.Matrix.eigenmatrix_right`.
|
|
1235
|
+
|
|
1236
|
+
TESTS:
|
|
1237
|
+
|
|
1238
|
+
Testing bad input. ::
|
|
1239
|
+
|
|
1240
|
+
sage: A = matrix(CDF, 2, range(4))
|
|
1241
|
+
sage: A.eigenvalues(algorithm='junk')
|
|
1242
|
+
Traceback (most recent call last):
|
|
1243
|
+
...
|
|
1244
|
+
ValueError: algorithm must be 'default', 'symmetric', or 'hermitian', not junk
|
|
1245
|
+
|
|
1246
|
+
sage: A = matrix(CDF, 2, 3, range(6))
|
|
1247
|
+
sage: A.eigenvalues()
|
|
1248
|
+
Traceback (most recent call last):
|
|
1249
|
+
...
|
|
1250
|
+
ValueError: matrix must be square, not 2 x 3
|
|
1251
|
+
sage: matrix.identity(CDF, 2).eigenvalues(A)
|
|
1252
|
+
Traceback (most recent call last):
|
|
1253
|
+
...
|
|
1254
|
+
ValueError: other matrix must be square, not 2 x 3
|
|
1255
|
+
|
|
1256
|
+
sage: A = matrix(CDF, 2, [1, 2, 3, 4*I]) # needs sage.symbolic
|
|
1257
|
+
sage: A.eigenvalues(algorithm='symmetric') # needs sage.symbolic
|
|
1258
|
+
Traceback (most recent call last):
|
|
1259
|
+
...
|
|
1260
|
+
TypeError: cannot apply symmetric algorithm to matrix with complex entries
|
|
1261
|
+
|
|
1262
|
+
sage: A = matrix(CDF, 2, 2, range(4))
|
|
1263
|
+
sage: A.eigenvalues(tol='junk')
|
|
1264
|
+
Traceback (most recent call last):
|
|
1265
|
+
...
|
|
1266
|
+
TypeError: tolerance parameter must be a real number, not junk
|
|
1267
|
+
|
|
1268
|
+
sage: A = matrix(CDF, 2, 2, range(4))
|
|
1269
|
+
sage: A.eigenvalues(tol=-0.01)
|
|
1270
|
+
Traceback (most recent call last):
|
|
1271
|
+
...
|
|
1272
|
+
ValueError: tolerance parameter must be positive, not -0.01
|
|
1273
|
+
|
|
1274
|
+
A very small matrix. ::
|
|
1275
|
+
|
|
1276
|
+
sage: matrix(CDF,0,0).eigenvalues()
|
|
1277
|
+
[]
|
|
1278
|
+
|
|
1279
|
+
Check that homogeneous coordinates work for hermitian positive definite
|
|
1280
|
+
input::
|
|
1281
|
+
|
|
1282
|
+
sage: A = matrix.identity(CDF, 2)
|
|
1283
|
+
sage: B = matrix(CDF, [[2, 1 + I], [1 - I, 3]]) # needs sage.symbolic
|
|
1284
|
+
sage: A.eigenvalues(B, algorithm='hermitian', homogeneous=True) # tol 1e-14 # needs scipy sage.symbolic
|
|
1285
|
+
[(0.25, 1.0), (1.0, 1.0)]
|
|
1286
|
+
|
|
1287
|
+
Test the deprecation::
|
|
1288
|
+
|
|
1289
|
+
sage: # needs sage.graphs
|
|
1290
|
+
sage: A = graphs.PetersenGraph().adjacency_matrix().change_ring(RDF)
|
|
1291
|
+
sage: ev = A.eigenvalues('symmetric', 1e-13)
|
|
1292
|
+
doctest:...: DeprecationWarning: "algorithm" and "tol" should be used
|
|
1293
|
+
as keyword argument only
|
|
1294
|
+
See https://github.com/sagemath/sage/issues/29243 for details.
|
|
1295
|
+
sage: ev # tol 1e-13
|
|
1296
|
+
[(-2.0, 4), (1.0, 5), (3.0, 1)]
|
|
1297
|
+
sage: A.eigenvalues('symmetric', 1e-13, tol=1e-12)
|
|
1298
|
+
Traceback (most recent call last):
|
|
1299
|
+
...
|
|
1300
|
+
TypeError: eigenvalues() got multiple values for keyword argument 'tol'
|
|
1301
|
+
sage: A.eigenvalues('symmetric', algorithm='hermitian')
|
|
1302
|
+
Traceback (most recent call last):
|
|
1303
|
+
...
|
|
1304
|
+
TypeError: eigenvalues() got multiple values for keyword argument 'algorithm'
|
|
1305
|
+
"""
|
|
1306
|
+
from sage.rings.real_double import RDF
|
|
1307
|
+
from sage.rings.complex_double import CDF
|
|
1308
|
+
if isinstance(other, str):
|
|
1309
|
+
# for backward compatibility, allow algorithm to be passed as first
|
|
1310
|
+
# positional argument and tol as second positional argument
|
|
1311
|
+
from sage.misc.superseded import deprecation
|
|
1312
|
+
deprecation(29243, '"algorithm" and "tol" should be used as '
|
|
1313
|
+
'keyword argument only')
|
|
1314
|
+
if algorithm != 'default':
|
|
1315
|
+
if isinstance(algorithm, str):
|
|
1316
|
+
raise TypeError("eigenvalues() got multiple values for "
|
|
1317
|
+
"keyword argument 'algorithm'")
|
|
1318
|
+
if tol is not None:
|
|
1319
|
+
raise TypeError("eigenvalues() got multiple values for "
|
|
1320
|
+
"keyword argument 'tol'")
|
|
1321
|
+
tol = algorithm
|
|
1322
|
+
algorithm = other
|
|
1323
|
+
other = None
|
|
1324
|
+
if algorithm not in ['default', 'symmetric', 'hermitian']:
|
|
1325
|
+
msg = "algorithm must be 'default', 'symmetric', or 'hermitian', not {0}"
|
|
1326
|
+
raise ValueError(msg.format(algorithm))
|
|
1327
|
+
if not self.is_square():
|
|
1328
|
+
raise ValueError('matrix must be square, not %s x %s'
|
|
1329
|
+
% (self.nrows(), self.ncols()))
|
|
1330
|
+
if other is not None and not other.is_square():
|
|
1331
|
+
raise ValueError('other matrix must be square, not %s x %s'
|
|
1332
|
+
% (other.nrows(), other.ncols()))
|
|
1333
|
+
if algorithm == 'symmetric':
|
|
1334
|
+
if self.base_ring() != RDF:
|
|
1335
|
+
try:
|
|
1336
|
+
self = self.change_ring(RDF) # check side effect
|
|
1337
|
+
except TypeError:
|
|
1338
|
+
raise TypeError('cannot apply symmetric algorithm to matrix with complex entries')
|
|
1339
|
+
if other is not None and other.base_ring() != RDF:
|
|
1340
|
+
try:
|
|
1341
|
+
other = other.change_ring(RDF) # check side effect
|
|
1342
|
+
except TypeError:
|
|
1343
|
+
raise TypeError('cannot apply symmetric algorithm to matrix with complex entries')
|
|
1344
|
+
algorithm = 'hermitian'
|
|
1345
|
+
if homogeneous:
|
|
1346
|
+
tol = None
|
|
1347
|
+
multiplicity = (tol is not None)
|
|
1348
|
+
if multiplicity:
|
|
1349
|
+
try:
|
|
1350
|
+
tol = float(tol)
|
|
1351
|
+
except (ValueError, TypeError):
|
|
1352
|
+
msg = 'tolerance parameter must be a real number, not {0}'
|
|
1353
|
+
raise TypeError(msg.format(tol))
|
|
1354
|
+
if tol < 0:
|
|
1355
|
+
msg = 'tolerance parameter must be positive, not {0}'
|
|
1356
|
+
raise ValueError(msg.format(tol))
|
|
1357
|
+
|
|
1358
|
+
if self._nrows == 0:
|
|
1359
|
+
return []
|
|
1360
|
+
global scipy
|
|
1361
|
+
if scipy is None:
|
|
1362
|
+
import scipy
|
|
1363
|
+
import scipy.linalg
|
|
1364
|
+
global numpy
|
|
1365
|
+
if numpy is None:
|
|
1366
|
+
import numpy
|
|
1367
|
+
other_numpy = None if other is None else other.numpy()
|
|
1368
|
+
# generic eigenvalues, or real eigenvalues for Hermitian
|
|
1369
|
+
if algorithm == 'default':
|
|
1370
|
+
return_class = CDF
|
|
1371
|
+
evalues = scipy.linalg.eigvals(self._matrix_numpy, other_numpy,
|
|
1372
|
+
homogeneous_eigvals=homogeneous)
|
|
1373
|
+
elif algorithm == 'hermitian':
|
|
1374
|
+
return_class = RDF
|
|
1375
|
+
evalues = scipy.linalg.eigh(self._matrix_numpy, other_numpy,
|
|
1376
|
+
eigvals_only=True)
|
|
1377
|
+
if homogeneous:
|
|
1378
|
+
# eigh does not support homogeneous output
|
|
1379
|
+
evalues = evalues, [RDF.one()] * len(evalues)
|
|
1380
|
+
|
|
1381
|
+
if homogeneous:
|
|
1382
|
+
return [(return_class(a), return_class(b))
|
|
1383
|
+
for a, b in zip(*evalues)]
|
|
1384
|
+
elif not multiplicity:
|
|
1385
|
+
return [return_class(e) for e in evalues]
|
|
1386
|
+
else:
|
|
1387
|
+
# pairs in ev_group are
|
|
1388
|
+
# slot 0: the sum of "equal" eigenvalues, "s"
|
|
1389
|
+
# slot 1: number of eigenvalues in this sum, "m"
|
|
1390
|
+
# slot 2: average of these eigenvalues, "avg"
|
|
1391
|
+
# we test if "new" eigenvalues are close to the group average
|
|
1392
|
+
ev_group = []
|
|
1393
|
+
for e in evalues:
|
|
1394
|
+
location = None
|
|
1395
|
+
best_fit = tol
|
|
1396
|
+
for i in range(len(ev_group)):
|
|
1397
|
+
_, m, avg = ev_group[i]
|
|
1398
|
+
d = numpy.abs(avg - e)
|
|
1399
|
+
if d < best_fit:
|
|
1400
|
+
best_fit = d
|
|
1401
|
+
location = i
|
|
1402
|
+
if location is None:
|
|
1403
|
+
ev_group.append([e, 1, e])
|
|
1404
|
+
else:
|
|
1405
|
+
ev_group[location][0] += e
|
|
1406
|
+
ev_group[location][1] += 1
|
|
1407
|
+
ev_group[location][2] = ev_group[location][0]/ev_group[location][1]
|
|
1408
|
+
return [(return_class(avg), m) for _, m, avg in ev_group]
|
|
1409
|
+
|
|
1410
|
+
def eigenvectors_left(self, other=None, *, algorithm=None, homogeneous=False):
|
|
1411
|
+
r"""
|
|
1412
|
+
Compute the ordinary or generalized left eigenvectors of a matrix of
|
|
1413
|
+
double precision real or complex numbers (i.e. ``RDF`` or ``CDF``).
|
|
1414
|
+
|
|
1415
|
+
INPUT:
|
|
1416
|
+
|
|
1417
|
+
- ``other`` -- a square matrix `B` (default: ``None``) in a generalized
|
|
1418
|
+
eigenvalue problem; if ``None``, an ordinary eigenvalue problem is
|
|
1419
|
+
solved
|
|
1420
|
+
|
|
1421
|
+
- ``algorithm`` (default: ``None``); for compatibility with
|
|
1422
|
+
:meth:`sage.matrix.matrix2.Matrix.eigenvectors_left`, supported options
|
|
1423
|
+
are ``None`` (select automatically) or ``'scipy'``
|
|
1424
|
+
|
|
1425
|
+
- ``homogeneous`` -- boolean (default: ``False``); if ``True``, use
|
|
1426
|
+
homogeneous coordinates for the eigenvalues in the output
|
|
1427
|
+
|
|
1428
|
+
OUTPUT:
|
|
1429
|
+
|
|
1430
|
+
A list of triples, each of the form ``(e,[v],1)``,
|
|
1431
|
+
where ``e`` is the eigenvalue, and ``v`` is an associated
|
|
1432
|
+
left eigenvector such that
|
|
1433
|
+
|
|
1434
|
+
.. MATH::
|
|
1435
|
+
|
|
1436
|
+
v A = e v.
|
|
1437
|
+
|
|
1438
|
+
If the matrix `A` is of size `n`, then there are `n` triples.
|
|
1439
|
+
|
|
1440
|
+
If a matrix `B` is passed as optional argument, the output is a
|
|
1441
|
+
solution to the generalized eigenvalue problem such that
|
|
1442
|
+
|
|
1443
|
+
.. MATH::
|
|
1444
|
+
|
|
1445
|
+
v A = e v B.
|
|
1446
|
+
|
|
1447
|
+
If ``homogeneous`` is set, each eigenvalue is returned as a tuple
|
|
1448
|
+
`(\alpha, \beta)` of homogeneous coordinates such that
|
|
1449
|
+
|
|
1450
|
+
.. MATH::
|
|
1451
|
+
|
|
1452
|
+
\beta v A = \alpha v B.
|
|
1453
|
+
|
|
1454
|
+
The format of the output is designed to match the format
|
|
1455
|
+
for exact results. However, since matrices here have numerical
|
|
1456
|
+
entries, the resulting eigenvalues will also be numerical. No
|
|
1457
|
+
attempt is made to determine if two eigenvalues are equal, or if
|
|
1458
|
+
eigenvalues might actually be zero. So the algebraic multiplicity
|
|
1459
|
+
of each eigenvalue is reported as 1. Decisions about equal
|
|
1460
|
+
eigenvalues or zero eigenvalues should be addressed in the
|
|
1461
|
+
calling routine.
|
|
1462
|
+
|
|
1463
|
+
The SciPy routines used for these computations produce eigenvectors
|
|
1464
|
+
normalized to have length 1, but on different hardware they may vary
|
|
1465
|
+
by a complex sign. So for doctests we have normalized output by forcing
|
|
1466
|
+
their eigenvectors to have their first nonzero entry equal to one.
|
|
1467
|
+
|
|
1468
|
+
ALGORITHM:
|
|
1469
|
+
|
|
1470
|
+
Values are computed with the SciPy library using
|
|
1471
|
+
:func:`scipy:scipy.linalg.eig`.
|
|
1472
|
+
|
|
1473
|
+
EXAMPLES::
|
|
1474
|
+
|
|
1475
|
+
sage: # needs scipy
|
|
1476
|
+
sage: m = matrix(RDF, [[-5, 3, 2, 8],[10, 2, 4, -2],[-1, -10, -10, -17],[-2, 7, 6, 13]])
|
|
1477
|
+
sage: m
|
|
1478
|
+
[ -5.0 3.0 2.0 8.0]
|
|
1479
|
+
[ 10.0 2.0 4.0 -2.0]
|
|
1480
|
+
[ -1.0 -10.0 -10.0 -17.0]
|
|
1481
|
+
[ -2.0 7.0 6.0 13.0]
|
|
1482
|
+
sage: spectrum = m.left_eigenvectors()
|
|
1483
|
+
sage: for i in range(len(spectrum)):
|
|
1484
|
+
....: spectrum[i][1][0] = matrix(RDF, spectrum[i][1]).echelon_form()[0]
|
|
1485
|
+
sage: spectrum[0] # tol 1e-13
|
|
1486
|
+
(2.0, [(1.0, 1.0, 1.0, 1.0)], 1)
|
|
1487
|
+
sage: spectrum[1] # tol 1e-13
|
|
1488
|
+
(1.0, [(1.0, 0.8, 0.8, 0.6)], 1)
|
|
1489
|
+
sage: spectrum[2] # tol 1e-13
|
|
1490
|
+
(-2.0, [(1.0, 0.4, 0.6, 0.2)], 1)
|
|
1491
|
+
sage: spectrum[3] # tol 1e-13
|
|
1492
|
+
(-1.0, [(1.0, 1.0, 2.0, 2.0)], 1)
|
|
1493
|
+
|
|
1494
|
+
A generalized eigenvalue problem::
|
|
1495
|
+
|
|
1496
|
+
sage: # needs scipy
|
|
1497
|
+
sage: A = matrix(CDF, [[1+I, -2], [3, 4]])
|
|
1498
|
+
sage: B = matrix(CDF, [[0, 7-I], [2, -3]])
|
|
1499
|
+
sage: E = A.eigenvectors_left(B)
|
|
1500
|
+
sage: all((v * A - e * v * B).norm() < 1e-14 for e, [v], _ in E)
|
|
1501
|
+
True
|
|
1502
|
+
|
|
1503
|
+
In a generalized eigenvalue problem with a singular matrix `B`, we can
|
|
1504
|
+
check the eigenvector property using homogeneous coordinates, even
|
|
1505
|
+
though the quotient `\alpha/\beta` is not always defined::
|
|
1506
|
+
|
|
1507
|
+
sage: # needs scipy
|
|
1508
|
+
sage: A = matrix.identity(CDF, 2)
|
|
1509
|
+
sage: B = matrix(CDF, [[2, 1+I], [4, 2+2*I]])
|
|
1510
|
+
sage: E = A.eigenvectors_left(B, homogeneous=True)
|
|
1511
|
+
sage: all((beta * v * A - alpha * v * B).norm() < 1e-14
|
|
1512
|
+
....: for (alpha, beta), [v], _ in E)
|
|
1513
|
+
True
|
|
1514
|
+
|
|
1515
|
+
.. SEEALSO::
|
|
1516
|
+
|
|
1517
|
+
:meth:`eigenvalues`,
|
|
1518
|
+
:meth:`eigenvectors_right`,
|
|
1519
|
+
:meth:`.Matrix.eigenmatrix_left`.
|
|
1520
|
+
|
|
1521
|
+
TESTS:
|
|
1522
|
+
|
|
1523
|
+
The following example shows that :issue:`20439` has been resolved::
|
|
1524
|
+
|
|
1525
|
+
sage: # needs scipy
|
|
1526
|
+
sage: A = matrix(CDF, [[-2.53634347567, 2.04801738686, -0.0, -62.166145304],
|
|
1527
|
+
....: [ 0.7, -0.6, 0.0, 0.0],
|
|
1528
|
+
....: [0.547271128842, 0.0, -0.3015, -21.7532081652],
|
|
1529
|
+
....: [0.0, 0.0, 0.3, -0.4]])
|
|
1530
|
+
sage: spectrum = A.left_eigenvectors()
|
|
1531
|
+
sage: all((Matrix(spectrum[i][1])*(A - spectrum[i][0])).norm() < 10^(-2)
|
|
1532
|
+
....: for i in range(A.nrows()))
|
|
1533
|
+
True
|
|
1534
|
+
|
|
1535
|
+
The following example shows that the fix for :issue:`20439` (conjugating
|
|
1536
|
+
eigenvectors rather than eigenvalues) is the correct one::
|
|
1537
|
+
|
|
1538
|
+
sage: # needs scipy
|
|
1539
|
+
sage: A = Matrix(CDF,[[I,0],[0,1]])
|
|
1540
|
+
sage: spectrum = A.left_eigenvectors()
|
|
1541
|
+
sage: for i in range(len(spectrum)):
|
|
1542
|
+
....: spectrum[i][1][0] = matrix(CDF, spectrum[i][1]).echelon_form()[0]
|
|
1543
|
+
sage: spectrum
|
|
1544
|
+
[(1.0*I, [(1.0, 0.0)], 1), (1.0, [(0.0, 1.0)], 1)]
|
|
1545
|
+
"""
|
|
1546
|
+
if algorithm not in (None, "scipy"):
|
|
1547
|
+
raise NotImplementedError(f"algorithm {algorithm} not implemented for matrix over {self.base_ring()}")
|
|
1548
|
+
if not self.is_square():
|
|
1549
|
+
raise ArithmeticError("self must be a square matrix")
|
|
1550
|
+
if other is not None and not other.is_square():
|
|
1551
|
+
raise ArithmeticError("other must be a square matrix")
|
|
1552
|
+
if self._nrows == 0:
|
|
1553
|
+
return [], self.__copy__()
|
|
1554
|
+
global scipy
|
|
1555
|
+
if scipy is None:
|
|
1556
|
+
import scipy
|
|
1557
|
+
import scipy.linalg
|
|
1558
|
+
v, eig = scipy.linalg.eig(self._matrix_numpy,
|
|
1559
|
+
None if other is None else other.numpy(),
|
|
1560
|
+
right=False, left=True,
|
|
1561
|
+
homogeneous_eigvals=homogeneous)
|
|
1562
|
+
# scipy puts eigenvectors in columns, we will extract from rows
|
|
1563
|
+
eig = matrix(eig.T)
|
|
1564
|
+
if other is not None:
|
|
1565
|
+
# scipy fails to normalize generalized left eigenvectors
|
|
1566
|
+
# (see https://github.com/scipy/scipy/issues/11550),
|
|
1567
|
+
# FIXME: remove this normalization step once that issue is resolved
|
|
1568
|
+
eig = [v.normalized() for v in eig]
|
|
1569
|
+
from sage.rings.complex_double import CDF
|
|
1570
|
+
if homogeneous:
|
|
1571
|
+
v = [(CDF(a), CDF(b)) for a, b in v.T]
|
|
1572
|
+
else:
|
|
1573
|
+
v = [CDF(e) for e in v]
|
|
1574
|
+
return [(v[i], [eig[i].conjugate()], 1) for i in range(len(v))]
|
|
1575
|
+
|
|
1576
|
+
left_eigenvectors = eigenvectors_left
|
|
1577
|
+
|
|
1578
|
+
def eigenvectors_right(self, other=None, *, homogeneous=False):
|
|
1579
|
+
r"""
|
|
1580
|
+
Compute the ordinary or generalized right eigenvectors of a matrix of
|
|
1581
|
+
double precision real or complex numbers (i.e. ``RDF`` or ``CDF``).
|
|
1582
|
+
|
|
1583
|
+
INPUT:
|
|
1584
|
+
|
|
1585
|
+
- ``other`` -- a square matrix `B` (default: ``None``) in a generalized
|
|
1586
|
+
eigenvalue problem; if ``None``, an ordinary eigenvalue problem is
|
|
1587
|
+
solved
|
|
1588
|
+
|
|
1589
|
+
- ``homogeneous`` -- boolean (default: ``False``); if ``True``, use
|
|
1590
|
+
homogeneous coordinates for the eigenvalues in the output
|
|
1591
|
+
|
|
1592
|
+
OUTPUT:
|
|
1593
|
+
|
|
1594
|
+
A list of triples, each of the form ``(e,[v],1)``,
|
|
1595
|
+
where ``e`` is the eigenvalue, and ``v`` is an associated
|
|
1596
|
+
right eigenvector such that
|
|
1597
|
+
|
|
1598
|
+
.. MATH::
|
|
1599
|
+
|
|
1600
|
+
A v = e v.
|
|
1601
|
+
|
|
1602
|
+
If the matrix `A` is of size `n`, then there are `n` triples.
|
|
1603
|
+
|
|
1604
|
+
If a matrix `B` is passed as optional argument, the output is a
|
|
1605
|
+
solution to the generalized eigenvalue problem such that
|
|
1606
|
+
|
|
1607
|
+
.. MATH::
|
|
1608
|
+
|
|
1609
|
+
A v = e B v.
|
|
1610
|
+
|
|
1611
|
+
If ``homogeneous`` is set, each eigenvalue is returned as a tuple
|
|
1612
|
+
`(\alpha, \beta)` of homogeneous coordinates such that
|
|
1613
|
+
|
|
1614
|
+
.. MATH::
|
|
1615
|
+
|
|
1616
|
+
\beta A v = \alpha B v.
|
|
1617
|
+
|
|
1618
|
+
The format of the output is designed to match the format
|
|
1619
|
+
for exact results. However, since matrices here have numerical
|
|
1620
|
+
entries, the resulting eigenvalues will also be numerical. No
|
|
1621
|
+
attempt is made to determine if two eigenvalues are equal, or if
|
|
1622
|
+
eigenvalues might actually be zero. So the algebraic multiplicity
|
|
1623
|
+
of each eigenvalue is reported as 1. Decisions about equal
|
|
1624
|
+
eigenvalues or zero eigenvalues should be addressed in the
|
|
1625
|
+
calling routine.
|
|
1626
|
+
|
|
1627
|
+
The SciPy routines used for these computations produce eigenvectors
|
|
1628
|
+
normalized to have length 1, but on different hardware they may vary
|
|
1629
|
+
by a complex sign. So for doctests we have normalized output by forcing
|
|
1630
|
+
their eigenvectors to have their first nonzero entry equal to one.
|
|
1631
|
+
|
|
1632
|
+
ALGORITHM:
|
|
1633
|
+
|
|
1634
|
+
Values are computed with the SciPy library using
|
|
1635
|
+
:func:`scipy:scipy.linalg.eig`.
|
|
1636
|
+
|
|
1637
|
+
EXAMPLES::
|
|
1638
|
+
|
|
1639
|
+
sage: # needs scipy
|
|
1640
|
+
sage: m = matrix(RDF, [[-9, -14, 19, -74],[-1, 2, 4, -11],[-4, -12, 6, -32],[0, -2, -1, 1]])
|
|
1641
|
+
sage: m
|
|
1642
|
+
[ -9.0 -14.0 19.0 -74.0]
|
|
1643
|
+
[ -1.0 2.0 4.0 -11.0]
|
|
1644
|
+
[ -4.0 -12.0 6.0 -32.0]
|
|
1645
|
+
[ 0.0 -2.0 -1.0 1.0]
|
|
1646
|
+
sage: spectrum = m.right_eigenvectors()
|
|
1647
|
+
sage: for i in range(len(spectrum)):
|
|
1648
|
+
....: spectrum[i][1][0] = matrix(RDF, spectrum[i][1]).echelon_form()[0]
|
|
1649
|
+
sage: spectrum[0] # tol 1e-13
|
|
1650
|
+
(2.0, [(1.0, -2.0, 3.0, 1.0)], 1)
|
|
1651
|
+
sage: spectrum[1] # tol 1e-13
|
|
1652
|
+
(1.0, [(1.0, -0.666666666666633, 1.333333333333286, 0.33333333333331555)], 1)
|
|
1653
|
+
sage: spectrum[2] # tol 1e-13
|
|
1654
|
+
(-2.0, [(1.0, -0.2, 1.0, 0.2)], 1)
|
|
1655
|
+
sage: spectrum[3] # tol 1e-12
|
|
1656
|
+
(-1.0, [(1.0, -0.5, 2.0, 0.5)], 1)
|
|
1657
|
+
|
|
1658
|
+
A generalized eigenvalue problem::
|
|
1659
|
+
|
|
1660
|
+
sage: # needs scipy
|
|
1661
|
+
sage: A = matrix(CDF, [[1+I, -2], [3, 4]])
|
|
1662
|
+
sage: B = matrix(CDF, [[0, 7-I], [2, -3]])
|
|
1663
|
+
sage: E = A.eigenvectors_right(B)
|
|
1664
|
+
sage: all((A * v - e * B * v).norm() < 1e-14 for e, [v], _ in E)
|
|
1665
|
+
True
|
|
1666
|
+
|
|
1667
|
+
In a generalized eigenvalue problem with a singular matrix `B`, we can
|
|
1668
|
+
check the eigenvector property using homogeneous coordinates, even
|
|
1669
|
+
though the quotient `\alpha/\beta` is not always defined::
|
|
1670
|
+
|
|
1671
|
+
sage: # needs scipy
|
|
1672
|
+
sage: A = matrix.identity(RDF, 2)
|
|
1673
|
+
sage: B = matrix(RDF, [[3, 5], [6, 10]])
|
|
1674
|
+
sage: E = A.eigenvectors_right(B, homogeneous=True)
|
|
1675
|
+
sage: all((beta * A * v - alpha * B * v).norm() < 1e-14
|
|
1676
|
+
....: for (alpha, beta), [v], _ in E)
|
|
1677
|
+
True
|
|
1678
|
+
|
|
1679
|
+
.. SEEALSO::
|
|
1680
|
+
|
|
1681
|
+
:meth:`eigenvalues`,
|
|
1682
|
+
:meth:`eigenvectors_left`,
|
|
1683
|
+
:meth:`.Matrix.eigenmatrix_right`.
|
|
1684
|
+
|
|
1685
|
+
TESTS:
|
|
1686
|
+
|
|
1687
|
+
The following example shows that :issue:`20439` has been resolved::
|
|
1688
|
+
|
|
1689
|
+
sage: # needs scipy
|
|
1690
|
+
sage: A = matrix(CDF, [[-2.53634347567, 2.04801738686, -0.0, -62.166145304],
|
|
1691
|
+
....: [ 0.7, -0.6, 0.0, 0.0],
|
|
1692
|
+
....: [0.547271128842, 0.0, -0.3015, -21.7532081652],
|
|
1693
|
+
....: [0.0, 0.0, 0.3, -0.4]])
|
|
1694
|
+
sage: spectrum = A.right_eigenvectors()
|
|
1695
|
+
sage: all(((A - spectrum[i][0]) * Matrix(spectrum[i][1]).transpose()).norm() < 10^(-2)
|
|
1696
|
+
....: for i in range(A.nrows()))
|
|
1697
|
+
True
|
|
1698
|
+
|
|
1699
|
+
The following example shows that the fix for :issue:`20439` (conjugating
|
|
1700
|
+
eigenvectors rather than eigenvalues) is the correct one::
|
|
1701
|
+
|
|
1702
|
+
sage: # needs scipy
|
|
1703
|
+
sage: A = Matrix(CDF,[[I,0],[0,1]])
|
|
1704
|
+
sage: spectrum = A.right_eigenvectors()
|
|
1705
|
+
sage: for i in range(len(spectrum)):
|
|
1706
|
+
....: spectrum[i][1][0] = matrix(CDF, spectrum[i][1]).echelon_form()[0]
|
|
1707
|
+
sage: spectrum
|
|
1708
|
+
[(1.0*I, [(1.0, 0.0)], 1), (1.0, [(0.0, 1.0)], 1)]
|
|
1709
|
+
"""
|
|
1710
|
+
if not self.is_square():
|
|
1711
|
+
raise ArithmeticError("self must be a square matrix")
|
|
1712
|
+
if other is not None and not other.is_square():
|
|
1713
|
+
raise ArithmeticError("other must be a square matrix")
|
|
1714
|
+
if self._nrows == 0:
|
|
1715
|
+
return [], self.__copy__()
|
|
1716
|
+
global scipy
|
|
1717
|
+
if scipy is None:
|
|
1718
|
+
import scipy
|
|
1719
|
+
import scipy.linalg
|
|
1720
|
+
v, eig = scipy.linalg.eig(self._matrix_numpy,
|
|
1721
|
+
None if other is None else other.numpy(),
|
|
1722
|
+
right=True, left=False,
|
|
1723
|
+
homogeneous_eigvals=homogeneous)
|
|
1724
|
+
# scipy puts eigenvectors in columns, we will extract from rows
|
|
1725
|
+
eig = matrix(eig.T)
|
|
1726
|
+
from sage.rings.complex_double import CDF
|
|
1727
|
+
if homogeneous:
|
|
1728
|
+
v = [(CDF(a), CDF(b)) for a, b in v.T]
|
|
1729
|
+
else:
|
|
1730
|
+
v = [CDF(e) for e in v]
|
|
1731
|
+
return [(v[i], [eig[i]], 1) for i in range(len(v))]
|
|
1732
|
+
|
|
1733
|
+
right_eigenvectors = eigenvectors_right
|
|
1734
|
+
|
|
1735
|
+
def _solve_right_nonsingular_square(self, B, check_rank=False):
|
|
1736
|
+
"""
|
|
1737
|
+
Find a solution `X` to the equation `A X = B` if ``self`` is a square
|
|
1738
|
+
matrix `A`.
|
|
1739
|
+
|
|
1740
|
+
ALGORITHM:
|
|
1741
|
+
|
|
1742
|
+
Uses the function :func:`scipy:scipy.linalg.solve` from SciPy.
|
|
1743
|
+
|
|
1744
|
+
TESTS::
|
|
1745
|
+
|
|
1746
|
+
sage: # needs scipy sage.symbolic
|
|
1747
|
+
sage: A = matrix(CDF, [[1, 2], [3, 3+I]])
|
|
1748
|
+
sage: b = matrix(CDF, [[1, 0], [2, 1]])
|
|
1749
|
+
sage: x = A._solve_right_nonsingular_square(b)
|
|
1750
|
+
sage: (A * x - b).norm() < 1e-14
|
|
1751
|
+
True
|
|
1752
|
+
"""
|
|
1753
|
+
global scipy
|
|
1754
|
+
if scipy is None:
|
|
1755
|
+
import scipy
|
|
1756
|
+
import scipy.linalg
|
|
1757
|
+
X = self._new(self._ncols, B.ncols())
|
|
1758
|
+
# may raise a LinAlgError for a singular matrix
|
|
1759
|
+
X._matrix_numpy = scipy.linalg.solve(self._matrix_numpy, B.numpy())
|
|
1760
|
+
return X
|
|
1761
|
+
|
|
1762
|
+
def _solve_right_general(self, B, check=False):
|
|
1763
|
+
"""
|
|
1764
|
+
Compute a least-squares solution `X` to the equation `A X = B` where
|
|
1765
|
+
``self`` is the matrix `A`.
|
|
1766
|
+
|
|
1767
|
+
ALGORITHM:
|
|
1768
|
+
|
|
1769
|
+
Uses the function :func:`scipy:scipy.linalg.lstsq` from SciPy.
|
|
1770
|
+
|
|
1771
|
+
TESTS::
|
|
1772
|
+
|
|
1773
|
+
sage: # needs scipy
|
|
1774
|
+
sage: A = matrix(RDF, 3, 2, [1, 3, 4, 2, 0, -3])
|
|
1775
|
+
sage: b = matrix(RDF, 3, 2, [5, 6, 1, 0, 0, 2])
|
|
1776
|
+
sage: x = A._solve_right_general(b)
|
|
1777
|
+
sage: y = ~(A.T * A) * A.T * b # closed form solution
|
|
1778
|
+
sage: (x - y).norm() < 1e-14
|
|
1779
|
+
True
|
|
1780
|
+
"""
|
|
1781
|
+
global scipy
|
|
1782
|
+
if scipy is None:
|
|
1783
|
+
import scipy
|
|
1784
|
+
import scipy.linalg
|
|
1785
|
+
X = self._new(self._ncols, B.ncols())
|
|
1786
|
+
arr = scipy.linalg.lstsq(self._matrix_numpy, B.numpy())[0]
|
|
1787
|
+
X._matrix_numpy = arr
|
|
1788
|
+
return X
|
|
1789
|
+
|
|
1790
|
+
def determinant(self):
|
|
1791
|
+
"""
|
|
1792
|
+
Return the determinant of ``self``.
|
|
1793
|
+
|
|
1794
|
+
ALGORITHM:
|
|
1795
|
+
|
|
1796
|
+
Uses :func:`scipy:scipy.linalg.det`.
|
|
1797
|
+
|
|
1798
|
+
EXAMPLES::
|
|
1799
|
+
|
|
1800
|
+
sage: # needs scipy
|
|
1801
|
+
sage: m = matrix(RDF,2,range(4)); m.det()
|
|
1802
|
+
-2.0
|
|
1803
|
+
sage: m = matrix(RDF,0,[]); m.det()
|
|
1804
|
+
1.0
|
|
1805
|
+
sage: m = matrix(RDF, 2, range(6)); m.det()
|
|
1806
|
+
Traceback (most recent call last):
|
|
1807
|
+
...
|
|
1808
|
+
ValueError: self must be a square matrix
|
|
1809
|
+
"""
|
|
1810
|
+
if not self.is_square():
|
|
1811
|
+
raise ValueError("self must be a square matrix")
|
|
1812
|
+
if self._nrows == 0 or self._ncols == 0:
|
|
1813
|
+
return self._sage_dtype(1)
|
|
1814
|
+
global scipy
|
|
1815
|
+
if scipy is None:
|
|
1816
|
+
import scipy
|
|
1817
|
+
import scipy.linalg
|
|
1818
|
+
|
|
1819
|
+
return self._sage_dtype(scipy.linalg.det(self._matrix_numpy))
|
|
1820
|
+
|
|
1821
|
+
def log_determinant(self):
|
|
1822
|
+
"""
|
|
1823
|
+
Compute the log of the absolute value of the determinant
|
|
1824
|
+
using LU decomposition.
|
|
1825
|
+
|
|
1826
|
+
.. NOTE::
|
|
1827
|
+
|
|
1828
|
+
This is useful if the usual determinant overflows.
|
|
1829
|
+
|
|
1830
|
+
EXAMPLES::
|
|
1831
|
+
|
|
1832
|
+
sage: # needs scipy
|
|
1833
|
+
sage: m = matrix(RDF,2,2,range(4)); m
|
|
1834
|
+
[0.0 1.0]
|
|
1835
|
+
[2.0 3.0]
|
|
1836
|
+
sage: RDF(log(abs(m.determinant())))
|
|
1837
|
+
0.6931471805599453
|
|
1838
|
+
sage: m.log_determinant()
|
|
1839
|
+
0.6931471805599453
|
|
1840
|
+
sage: m = matrix(RDF,0,0,[]); m
|
|
1841
|
+
[]
|
|
1842
|
+
sage: m.log_determinant()
|
|
1843
|
+
0.0
|
|
1844
|
+
sage: m = matrix(CDF,2,2,range(4)); m
|
|
1845
|
+
[0.0 1.0]
|
|
1846
|
+
[2.0 3.0]
|
|
1847
|
+
sage: RDF(log(abs(m.determinant())))
|
|
1848
|
+
0.6931471805599453
|
|
1849
|
+
sage: m.log_determinant()
|
|
1850
|
+
0.6931471805599453
|
|
1851
|
+
sage: m = matrix(CDF,0,0,[]); m
|
|
1852
|
+
[]
|
|
1853
|
+
sage: m.log_determinant()
|
|
1854
|
+
0.0
|
|
1855
|
+
"""
|
|
1856
|
+
global numpy
|
|
1857
|
+
cdef Matrix_double_dense U
|
|
1858
|
+
|
|
1859
|
+
if self._nrows == 0 or self._ncols == 0:
|
|
1860
|
+
return sage.rings.real_double.RDF(0)
|
|
1861
|
+
|
|
1862
|
+
if not self.is_square():
|
|
1863
|
+
raise ArithmeticError("self must be a square matrix")
|
|
1864
|
+
|
|
1865
|
+
_, _, U = self.LU()
|
|
1866
|
+
if numpy is None:
|
|
1867
|
+
import numpy
|
|
1868
|
+
|
|
1869
|
+
return sage.rings.real_double.RDF(sum(numpy.log(abs(numpy.diag(U._matrix_numpy)))))
|
|
1870
|
+
|
|
1871
|
+
def conjugate(self):
|
|
1872
|
+
r"""
|
|
1873
|
+
Return the conjugate of this matrix, i.e. the matrix whose entries are
|
|
1874
|
+
the conjugates of the entries of ``self``.
|
|
1875
|
+
|
|
1876
|
+
EXAMPLES::
|
|
1877
|
+
|
|
1878
|
+
sage: # needs sage.symbolic
|
|
1879
|
+
sage: A = matrix(CDF, [[1+I, 3-I], [0, 2*I]])
|
|
1880
|
+
sage: A.conjugate()
|
|
1881
|
+
[1.0 - 1.0*I 3.0 + 1.0*I]
|
|
1882
|
+
[ 0.0 -2.0*I]
|
|
1883
|
+
|
|
1884
|
+
There is a shorthand notation::
|
|
1885
|
+
|
|
1886
|
+
sage: A.conjugate() == A.C # needs sage.symbolic
|
|
1887
|
+
True
|
|
1888
|
+
|
|
1889
|
+
Conjugates work (trivially) for real matrices::
|
|
1890
|
+
|
|
1891
|
+
sage: B = matrix.random(RDF, 3)
|
|
1892
|
+
sage: B == B.conjugate()
|
|
1893
|
+
True
|
|
1894
|
+
|
|
1895
|
+
TESTS::
|
|
1896
|
+
|
|
1897
|
+
sage: matrix(CDF, 0).conjugate()
|
|
1898
|
+
[]
|
|
1899
|
+
"""
|
|
1900
|
+
cdef Matrix_double_dense A
|
|
1901
|
+
A = self._new(self._nrows, self._ncols)
|
|
1902
|
+
A._matrix_numpy = self._matrix_numpy.conjugate()
|
|
1903
|
+
if self._subdivisions is not None:
|
|
1904
|
+
A.subdivide(*self.subdivisions())
|
|
1905
|
+
return A
|
|
1906
|
+
|
|
1907
|
+
def SVD(self):
|
|
1908
|
+
r"""
|
|
1909
|
+
Return the singular value decomposition of this matrix.
|
|
1910
|
+
|
|
1911
|
+
The `U` and `V` matrices are not unique and may be returned with different
|
|
1912
|
+
values in the future or on different systems. The `S` matrix is unique
|
|
1913
|
+
and contains the singular values in descending order.
|
|
1914
|
+
|
|
1915
|
+
The computed decomposition is cached and returned on subsequent calls.
|
|
1916
|
+
|
|
1917
|
+
INPUT:
|
|
1918
|
+
|
|
1919
|
+
- ``A`` -- a matrix
|
|
1920
|
+
|
|
1921
|
+
OUTPUT:
|
|
1922
|
+
|
|
1923
|
+
``U, S, V`` -- immutable matrices such that ``A = U*S*V.conjugate_transpose()``
|
|
1924
|
+
where `U` and `V` are orthogonal and `S` is zero off of the diagonal
|
|
1925
|
+
|
|
1926
|
+
Note that if ``self`` is m-by-n, then the dimensions of the
|
|
1927
|
+
matrices that this returns are (m,m), (m,n), and (n, n).
|
|
1928
|
+
|
|
1929
|
+
.. NOTE::
|
|
1930
|
+
|
|
1931
|
+
If all you need is the singular values of the matrix, see
|
|
1932
|
+
the more convenient :meth:`singular_values`.
|
|
1933
|
+
|
|
1934
|
+
EXAMPLES::
|
|
1935
|
+
|
|
1936
|
+
sage: # needs scipy
|
|
1937
|
+
sage: m = matrix(RDF,4,range(1,17))
|
|
1938
|
+
sage: U,S,V = m.SVD()
|
|
1939
|
+
sage: U*S*V.transpose() # tol 1e-14
|
|
1940
|
+
[0.9999999999999993 1.9999999999999987 3.000000000000001 4.000000000000002]
|
|
1941
|
+
[ 4.999999999999998 5.999999999999998 6.999999999999998 8.0]
|
|
1942
|
+
[ 8.999999999999998 9.999999999999996 10.999999999999998 12.0]
|
|
1943
|
+
[12.999999999999998 14.0 15.0 16.0]
|
|
1944
|
+
|
|
1945
|
+
A non-square example::
|
|
1946
|
+
|
|
1947
|
+
sage: # needs scipy
|
|
1948
|
+
sage: m = matrix(RDF, 2, range(1,7)); m
|
|
1949
|
+
[1.0 2.0 3.0]
|
|
1950
|
+
[4.0 5.0 6.0]
|
|
1951
|
+
sage: U, S, V = m.SVD()
|
|
1952
|
+
sage: U*S*V.transpose() # tol 1e-14
|
|
1953
|
+
[0.9999999999999994 1.9999999999999998 2.999999999999999]
|
|
1954
|
+
[ 4.000000000000001 5.000000000000002 6.000000000000001]
|
|
1955
|
+
|
|
1956
|
+
S contains the singular values::
|
|
1957
|
+
|
|
1958
|
+
sage: # needs scipy
|
|
1959
|
+
sage: S.round(4)
|
|
1960
|
+
[ 9.508 0.0 0.0]
|
|
1961
|
+
[ 0.0 0.7729 0.0]
|
|
1962
|
+
sage: [N(sqrt(abs(x)), digits=4) for x in (S*S.transpose()).eigenvalues()]
|
|
1963
|
+
[9.508, 0.7729]
|
|
1964
|
+
|
|
1965
|
+
U and V are orthogonal matrices::
|
|
1966
|
+
|
|
1967
|
+
sage: # needs scipy
|
|
1968
|
+
sage: U # random, SVD is not unique
|
|
1969
|
+
[-0.386317703119 -0.922365780077]
|
|
1970
|
+
[-0.922365780077 0.386317703119]
|
|
1971
|
+
[-0.274721127897 -0.961523947641]
|
|
1972
|
+
[-0.961523947641 0.274721127897]
|
|
1973
|
+
sage: (U*U.transpose()) # tol 1e-15
|
|
1974
|
+
[ 1.0 0.0]
|
|
1975
|
+
[ 0.0 1.0000000000000004]
|
|
1976
|
+
sage: V # random, SVD is not unique
|
|
1977
|
+
[-0.428667133549 0.805963908589 0.408248290464]
|
|
1978
|
+
[-0.566306918848 0.112382414097 -0.816496580928]
|
|
1979
|
+
[-0.703946704147 -0.581199080396 0.408248290464]
|
|
1980
|
+
sage: (V*V.transpose()) # tol 1e-15
|
|
1981
|
+
[0.9999999999999999 0.0 0.0]
|
|
1982
|
+
[ 0.0 1.0 0.0]
|
|
1983
|
+
[ 0.0 0.0 0.9999999999999999]
|
|
1984
|
+
|
|
1985
|
+
TESTS::
|
|
1986
|
+
|
|
1987
|
+
sage: # needs scipy
|
|
1988
|
+
sage: m = matrix(RDF,3,2,range(1, 7)); m
|
|
1989
|
+
[1.0 2.0]
|
|
1990
|
+
[3.0 4.0]
|
|
1991
|
+
[5.0 6.0]
|
|
1992
|
+
sage: U,S,V = m.SVD()
|
|
1993
|
+
sage: U*S*V.transpose() # tol 1e-15
|
|
1994
|
+
[0.9999999999999996 1.9999999999999998]
|
|
1995
|
+
[ 3.0 3.9999999999999996]
|
|
1996
|
+
[ 4.999999999999999 6.000000000000001]
|
|
1997
|
+
|
|
1998
|
+
sage: # needs scipy
|
|
1999
|
+
sage: m = matrix(RDF, 3, 0, []); m
|
|
2000
|
+
[]
|
|
2001
|
+
sage: m.SVD()
|
|
2002
|
+
([], [], [])
|
|
2003
|
+
sage: m = matrix(RDF, 0, 3, []); m
|
|
2004
|
+
[]
|
|
2005
|
+
sage: m.SVD()
|
|
2006
|
+
([], [], [])
|
|
2007
|
+
sage: def shape(x): return (x.nrows(), x.ncols())
|
|
2008
|
+
sage: m = matrix(RDF, 2, 3, range(6))
|
|
2009
|
+
sage: list(map(shape, m.SVD()))
|
|
2010
|
+
[(2, 2), (2, 3), (3, 3)]
|
|
2011
|
+
sage: for x in m.SVD(): x.is_immutable()
|
|
2012
|
+
True
|
|
2013
|
+
True
|
|
2014
|
+
True
|
|
2015
|
+
"""
|
|
2016
|
+
global scipy, numpy
|
|
2017
|
+
cdef Py_ssize_t i
|
|
2018
|
+
cdef Matrix_double_dense U, S, V
|
|
2019
|
+
|
|
2020
|
+
if self._nrows == 0 or self._ncols == 0:
|
|
2021
|
+
U_t = self.new_matrix(self._nrows, self._ncols)
|
|
2022
|
+
S_t = self.new_matrix(self._nrows, self._ncols)
|
|
2023
|
+
V_t = self.new_matrix(self._ncols, self._nrows)
|
|
2024
|
+
return U_t, S_t, V_t
|
|
2025
|
+
|
|
2026
|
+
USV = self.fetch('SVD_factors')
|
|
2027
|
+
if USV is None:
|
|
2028
|
+
# TODO: More efficient representation of non-square diagonal matrix S
|
|
2029
|
+
if scipy is None:
|
|
2030
|
+
import scipy
|
|
2031
|
+
import scipy.linalg
|
|
2032
|
+
if numpy is None:
|
|
2033
|
+
import numpy
|
|
2034
|
+
U_mat, S_diagonal, V_mat = scipy.linalg.svd(self._matrix_numpy)
|
|
2035
|
+
|
|
2036
|
+
U = self._new(self._nrows, self._nrows)
|
|
2037
|
+
S = self._new(self._nrows, self._ncols)
|
|
2038
|
+
V = self._new(self._ncols, self._ncols)
|
|
2039
|
+
|
|
2040
|
+
S_mat = numpy.zeros((self._nrows, self._ncols), dtype=self._numpy_dtype)
|
|
2041
|
+
for i in range(S_diagonal.shape[0]):
|
|
2042
|
+
S_mat[i,i] = S_diagonal[i]
|
|
2043
|
+
|
|
2044
|
+
U._matrix_numpy = numpy.ascontiguousarray(U_mat)
|
|
2045
|
+
S._matrix_numpy = S_mat
|
|
2046
|
+
V._matrix_numpy = numpy.ascontiguousarray(V_mat.conj().T)
|
|
2047
|
+
USV = U, S, V
|
|
2048
|
+
for M in USV: M.set_immutable()
|
|
2049
|
+
self.cache('SVD_factors', USV)
|
|
2050
|
+
|
|
2051
|
+
return USV
|
|
2052
|
+
|
|
2053
|
+
def QR(self):
|
|
2054
|
+
r"""
|
|
2055
|
+
Return a factorization into a unitary matrix and an
|
|
2056
|
+
upper-triangular matrix.
|
|
2057
|
+
|
|
2058
|
+
Applies to any matrix over ``RDF`` or ``CDF``.
|
|
2059
|
+
|
|
2060
|
+
OUTPUT:
|
|
2061
|
+
|
|
2062
|
+
``Q``, ``R`` -- a pair of matrices such that if `A`
|
|
2063
|
+
is the original matrix, then
|
|
2064
|
+
|
|
2065
|
+
.. MATH::
|
|
2066
|
+
|
|
2067
|
+
A = QR, \quad Q^\ast Q = I
|
|
2068
|
+
|
|
2069
|
+
where `R` is upper-triangular. `Q^\ast` is the
|
|
2070
|
+
conjugate-transpose in the complex case, and just
|
|
2071
|
+
the transpose in the real case. So `Q` is a unitary
|
|
2072
|
+
matrix (or rather, orthogonal, in the real case),
|
|
2073
|
+
or equivalently `Q` has orthogonal columns. For a
|
|
2074
|
+
matrix of full rank this factorization is unique
|
|
2075
|
+
up to adjustments via multiples of rows and columns
|
|
2076
|
+
by multiples with scalars having modulus `1`. So
|
|
2077
|
+
in the full-rank case, `R` is unique if the diagonal
|
|
2078
|
+
entries are required to be positive real numbers.
|
|
2079
|
+
|
|
2080
|
+
The resulting decomposition is cached.
|
|
2081
|
+
|
|
2082
|
+
ALGORITHM:
|
|
2083
|
+
|
|
2084
|
+
Calls :func:`scipy:scipy.linalg.qr` from SciPy, which is in turn an
|
|
2085
|
+
interface to LAPACK routines.
|
|
2086
|
+
|
|
2087
|
+
EXAMPLES:
|
|
2088
|
+
|
|
2089
|
+
Over the reals, the inverse of ``Q`` is its transpose,
|
|
2090
|
+
since including a conjugate has no effect. In the real
|
|
2091
|
+
case, we say ``Q`` is orthogonal. ::
|
|
2092
|
+
|
|
2093
|
+
sage: # needs scipy
|
|
2094
|
+
sage: A = matrix(RDF, [[-2, 0, -4, -1, -1],
|
|
2095
|
+
....: [-2, 1, -6, -3, -1],
|
|
2096
|
+
....: [1, 1, 7, 4, 5],
|
|
2097
|
+
....: [3, 0, 8, 3, 3],
|
|
2098
|
+
....: [-1, 1, -6, -6, 5]])
|
|
2099
|
+
sage: Q, R = A.QR()
|
|
2100
|
+
|
|
2101
|
+
At this point, ``Q`` is only well-defined up to the
|
|
2102
|
+
signs of its columns, and similarly for ``R`` and its
|
|
2103
|
+
rows, so we normalize them::
|
|
2104
|
+
|
|
2105
|
+
sage: # needs scipy
|
|
2106
|
+
sage: Qnorm = Q._normalize_columns()
|
|
2107
|
+
sage: Rnorm = R._normalize_rows()
|
|
2108
|
+
sage: Qnorm.round(6).zero_at(10^-6)
|
|
2109
|
+
[ 0.458831 0.126051 0.381212 0.394574 0.68744]
|
|
2110
|
+
[ 0.458831 -0.47269 -0.051983 -0.717294 0.220963]
|
|
2111
|
+
[-0.229416 -0.661766 0.661923 0.180872 -0.196411]
|
|
2112
|
+
[-0.688247 -0.189076 -0.204468 -0.09663 0.662889]
|
|
2113
|
+
[ 0.229416 -0.535715 -0.609939 0.536422 -0.024551]
|
|
2114
|
+
sage: Rnorm.round(6).zero_at(10^-6)
|
|
2115
|
+
[ 4.358899 -0.458831 13.076697 6.194225 2.982405]
|
|
2116
|
+
[ 0.0 1.670172 0.598741 -1.29202 6.207997]
|
|
2117
|
+
[ 0.0 0.0 5.444402 5.468661 -0.682716]
|
|
2118
|
+
[ 0.0 0.0 0.0 1.027626 -3.6193]
|
|
2119
|
+
[ 0.0 0.0 0.0 0.0 0.024551]
|
|
2120
|
+
sage: (Q*Q.transpose()) # tol 1e-14
|
|
2121
|
+
[0.9999999999999994 0.0 0.0 0.0 0.0]
|
|
2122
|
+
[ 0.0 1.0 0.0 0.0 0.0]
|
|
2123
|
+
[ 0.0 0.0 0.9999999999999999 0.0 0.0]
|
|
2124
|
+
[ 0.0 0.0 0.0 0.9999999999999998 0.0]
|
|
2125
|
+
[ 0.0 0.0 0.0 0.0 1.0000000000000002]
|
|
2126
|
+
sage: (Q*R - A).zero_at(10^-14)
|
|
2127
|
+
[0.0 0.0 0.0 0.0 0.0]
|
|
2128
|
+
[0.0 0.0 0.0 0.0 0.0]
|
|
2129
|
+
[0.0 0.0 0.0 0.0 0.0]
|
|
2130
|
+
[0.0 0.0 0.0 0.0 0.0]
|
|
2131
|
+
[0.0 0.0 0.0 0.0 0.0]
|
|
2132
|
+
|
|
2133
|
+
Now over the complex numbers, demonstrating that the SciPy libraries
|
|
2134
|
+
are (properly) using the Hermitian inner product, so that ``Q`` is
|
|
2135
|
+
a unitary matrix (its inverse is the conjugate-transpose). ::
|
|
2136
|
+
|
|
2137
|
+
sage: # needs scipy
|
|
2138
|
+
sage: A = matrix(CDF, [[-8, 4*I + 1, -I + 2, 2*I + 1],
|
|
2139
|
+
....: [1, -2*I - 1, -I + 3, -I + 1],
|
|
2140
|
+
....: [I + 7, 2*I + 1, -2*I + 7, -I + 1],
|
|
2141
|
+
....: [I + 2, 0, I + 12, -1]])
|
|
2142
|
+
sage: Q, R = A.QR()
|
|
2143
|
+
sage: Q._normalize_columns() # tol 1e-6
|
|
2144
|
+
[ 0.7302967433402214 0.20705664550556482 + 0.5383472783144685*I 0.24630498099986423 - 0.07644563587232917*I 0.23816176831943323 - 0.10365960327796941*I]
|
|
2145
|
+
[ -0.09128709291752768 -0.20705664550556482 - 0.37787837804765584*I 0.37865595338630315 - 0.19522214955246678*I 0.7012444502144682 - 0.36437116509865947*I]
|
|
2146
|
+
[ -0.6390096504226938 - 0.09128709291752768*I 0.17082173254209104 + 0.6677576817554466*I -0.03411475806452064 + 0.040901987417671426*I 0.31401710855067644 - 0.08251917187054114*I]
|
|
2147
|
+
[ -0.18257418583505536 - 0.09128709291752768*I -0.03623491296347384 + 0.07246982592694771*I 0.8632284069415112 + 0.06322839976356195*I -0.44996948676115206 - 0.01161191812089182*I]
|
|
2148
|
+
sage: R._normalize_rows().zero_at(1e-15) # tol 1e-6
|
|
2149
|
+
[ 10.954451150103322 -1.9170289512680814*I 5.385938482134133 - 2.1908902300206643*I -0.2738612787525829 - 2.1908902300206643*I]
|
|
2150
|
+
[ 0.0 4.8295962564173 -0.8696379111233719 - 5.864879483945123*I 0.993871898426711 - 0.30540855212070794*I]
|
|
2151
|
+
[ 0.0 0.0 12.00160760935814 -0.2709533402297273 + 0.4420629644486325*I]
|
|
2152
|
+
[ 0.0 0.0 0.0 1.9429639442589917]
|
|
2153
|
+
sage: (Q.conjugate().transpose()*Q).zero_at(1e-15) # tol 1e-15
|
|
2154
|
+
[ 1.0 0.0 0.0 0.0]
|
|
2155
|
+
[ 0.0 0.9999999999999994 0.0 0.0]
|
|
2156
|
+
[ 0.0 0.0 1.0000000000000002 0.0]
|
|
2157
|
+
[ 0.0 0.0 0.0 1.0000000000000004]
|
|
2158
|
+
sage: (Q*R - A).zero_at(10^-14)
|
|
2159
|
+
[0.0 0.0 0.0 0.0]
|
|
2160
|
+
[0.0 0.0 0.0 0.0]
|
|
2161
|
+
[0.0 0.0 0.0 0.0]
|
|
2162
|
+
[0.0 0.0 0.0 0.0]
|
|
2163
|
+
|
|
2164
|
+
An example of a rectangular matrix that is also rank-deficient.
|
|
2165
|
+
If you run this example yourself, you may see a very small, nonzero
|
|
2166
|
+
entries in the third row, in the third column, even though the exact
|
|
2167
|
+
version of the matrix has rank 2. The final two columns of ``Q``
|
|
2168
|
+
span the left kernel of ``A`` (as evidenced by the two zero rows of
|
|
2169
|
+
``R``). Different platforms will compute different bases for this
|
|
2170
|
+
left kernel, so we do not exhibit the actual matrix. ::
|
|
2171
|
+
|
|
2172
|
+
sage: # needs scipy
|
|
2173
|
+
sage: Arat = matrix(QQ, [[2, -3, 3],
|
|
2174
|
+
....: [-1, 1, -1],
|
|
2175
|
+
....: [-1, 3, -3],
|
|
2176
|
+
....: [-5, 1, -1]])
|
|
2177
|
+
sage: Arat.rank()
|
|
2178
|
+
2
|
|
2179
|
+
sage: A = Arat.change_ring(CDF)
|
|
2180
|
+
sage: Q, R = A.QR()
|
|
2181
|
+
sage: R._normalize_rows() # abs tol 1e-14
|
|
2182
|
+
[ 5.567764362830022 -2.6940795304016243 2.6940795304016243]
|
|
2183
|
+
[ 0.0 3.5695847775155825 -3.5695847775155825]
|
|
2184
|
+
[ 0.0 0.0 2.4444034681064287e-16]
|
|
2185
|
+
[ 0.0 0.0 0.0]
|
|
2186
|
+
sage: (Q.conjugate_transpose()*Q) # abs tol 1e-14
|
|
2187
|
+
[ 1.0000000000000002 -5.185196889911925e-17 -4.1457180570414476e-17 -2.909388767229071e-17]
|
|
2188
|
+
[ -5.185196889911925e-17 1.0000000000000002 -9.286869233696149e-17 -1.1035822863186828e-16]
|
|
2189
|
+
[-4.1457180570414476e-17 -9.286869233696149e-17 1.0 4.4159215672155694e-17]
|
|
2190
|
+
[ -2.909388767229071e-17 -1.1035822863186828e-16 4.4159215672155694e-17 1.0]
|
|
2191
|
+
|
|
2192
|
+
Results are cached, meaning they are immutable matrices.
|
|
2193
|
+
Make a copy if you need to manipulate a result. ::
|
|
2194
|
+
|
|
2195
|
+
sage: # needs scipy
|
|
2196
|
+
sage: A = random_matrix(CDF, 2, 2)
|
|
2197
|
+
sage: Q, R = A.QR()
|
|
2198
|
+
sage: Q.is_mutable()
|
|
2199
|
+
False
|
|
2200
|
+
sage: R.is_mutable()
|
|
2201
|
+
False
|
|
2202
|
+
sage: Q[0,0] = 0
|
|
2203
|
+
Traceback (most recent call last):
|
|
2204
|
+
...
|
|
2205
|
+
ValueError: matrix is immutable; please change a copy instead (i.e., use copy(M) to change a copy of M).
|
|
2206
|
+
sage: Qcopy = copy(Q)
|
|
2207
|
+
sage: Qcopy[0,0] = 679
|
|
2208
|
+
sage: Qcopy[0,0]
|
|
2209
|
+
679.0
|
|
2210
|
+
|
|
2211
|
+
TESTS:
|
|
2212
|
+
|
|
2213
|
+
Trivial cases return trivial results of the correct size,
|
|
2214
|
+
and we check ``Q`` itself in one case, verifying a fix for
|
|
2215
|
+
:issue:`10795`. ::
|
|
2216
|
+
|
|
2217
|
+
sage: # needs scipy
|
|
2218
|
+
sage: A = zero_matrix(RDF, 0, 10)
|
|
2219
|
+
sage: Q, R = A.QR()
|
|
2220
|
+
sage: Q.nrows(), Q.ncols()
|
|
2221
|
+
(0, 0)
|
|
2222
|
+
sage: R.nrows(), R.ncols()
|
|
2223
|
+
(0, 10)
|
|
2224
|
+
sage: A = zero_matrix(RDF, 3, 0)
|
|
2225
|
+
sage: Q, R = A.QR()
|
|
2226
|
+
sage: Q.nrows(), Q.ncols()
|
|
2227
|
+
(3, 3)
|
|
2228
|
+
sage: R.nrows(), R.ncols()
|
|
2229
|
+
(3, 0)
|
|
2230
|
+
sage: Q
|
|
2231
|
+
[1.0 0.0 0.0]
|
|
2232
|
+
[0.0 1.0 0.0]
|
|
2233
|
+
[0.0 0.0 1.0]
|
|
2234
|
+
"""
|
|
2235
|
+
global scipy
|
|
2236
|
+
cdef Matrix_double_dense Q,R
|
|
2237
|
+
|
|
2238
|
+
if self._nrows == 0 or self._ncols == 0:
|
|
2239
|
+
return self.new_matrix(self._nrows, self._nrows, entries=1), self.new_matrix()
|
|
2240
|
+
|
|
2241
|
+
QR = self.fetch('QR_factors')
|
|
2242
|
+
if QR is None:
|
|
2243
|
+
Q = self._new(self._nrows, self._nrows)
|
|
2244
|
+
R = self._new(self._nrows, self._ncols)
|
|
2245
|
+
if scipy is None:
|
|
2246
|
+
import scipy
|
|
2247
|
+
import scipy.linalg
|
|
2248
|
+
Q._matrix_numpy, R._matrix_numpy = scipy.linalg.qr(self._matrix_numpy)
|
|
2249
|
+
Q.set_immutable()
|
|
2250
|
+
R.set_immutable()
|
|
2251
|
+
QR = (Q, R)
|
|
2252
|
+
self.cache('QR_factors', QR)
|
|
2253
|
+
return QR
|
|
2254
|
+
|
|
2255
|
+
def is_unitary(self, tol=1e-12, algorithm='orthonormal'):
|
|
2256
|
+
r"""
|
|
2257
|
+
Return ``True`` if the columns of the matrix are an orthonormal basis.
|
|
2258
|
+
|
|
2259
|
+
For a matrix with real entries this determines if a matrix is
|
|
2260
|
+
"orthogonal" and for a matrix with complex entries this determines
|
|
2261
|
+
if the matrix is "unitary."
|
|
2262
|
+
|
|
2263
|
+
INPUT:
|
|
2264
|
+
|
|
2265
|
+
- ``tol`` -- (default: ``1e-12``) the largest value of the
|
|
2266
|
+
absolute value of the difference between two matrix entries
|
|
2267
|
+
for which they will still be considered equal
|
|
2268
|
+
|
|
2269
|
+
- ``algorithm`` -- (default: ``'orthonormal'``) set to
|
|
2270
|
+
``'orthonormal'`` for a stable procedure and set to 'naive' for a
|
|
2271
|
+
fast procedure
|
|
2272
|
+
|
|
2273
|
+
OUTPUT:
|
|
2274
|
+
|
|
2275
|
+
``True`` if the matrix is square and its conjugate-transpose is
|
|
2276
|
+
its inverse, and ``False`` otherwise. In other words, a matrix
|
|
2277
|
+
is orthogonal or unitary if the product of its conjugate-transpose
|
|
2278
|
+
times the matrix is the identity matrix.
|
|
2279
|
+
|
|
2280
|
+
The tolerance parameter is used to allow for numerical values
|
|
2281
|
+
to be equal if there is a slight difference due to round-off
|
|
2282
|
+
and other imprecisions.
|
|
2283
|
+
|
|
2284
|
+
The result is cached, on a per-tolerance and per-algorithm basis.
|
|
2285
|
+
|
|
2286
|
+
ALGORITHMS:
|
|
2287
|
+
|
|
2288
|
+
The naive algorithm simply computes the product of the
|
|
2289
|
+
conjugate-transpose with the matrix and compares the entries
|
|
2290
|
+
to the identity matrix, with equality controlled by the
|
|
2291
|
+
tolerance parameter.
|
|
2292
|
+
|
|
2293
|
+
The orthonormal algorithm first computes a Schur decomposition
|
|
2294
|
+
(via the :meth:`schur` method) and checks that the result is a
|
|
2295
|
+
diagonal matrix with entries of modulus 1, which is equivalent to
|
|
2296
|
+
being unitary.
|
|
2297
|
+
|
|
2298
|
+
So the naive algorithm might finish fairly quickly for a matrix
|
|
2299
|
+
that is not unitary, once the product has been computed.
|
|
2300
|
+
However, the orthonormal algorithm will compute a Schur
|
|
2301
|
+
decomposition before going through a similar check of a
|
|
2302
|
+
matrix entry-by-entry.
|
|
2303
|
+
|
|
2304
|
+
EXAMPLES:
|
|
2305
|
+
|
|
2306
|
+
A matrix that is far from unitary. ::
|
|
2307
|
+
|
|
2308
|
+
sage: # needs scipy
|
|
2309
|
+
sage: A = matrix(RDF, 4, range(16))
|
|
2310
|
+
sage: A.conjugate().transpose()*A
|
|
2311
|
+
[224.0 248.0 272.0 296.0]
|
|
2312
|
+
[248.0 276.0 304.0 332.0]
|
|
2313
|
+
[272.0 304.0 336.0 368.0]
|
|
2314
|
+
[296.0 332.0 368.0 404.0]
|
|
2315
|
+
sage: A.is_unitary()
|
|
2316
|
+
False
|
|
2317
|
+
sage: A.is_unitary(algorithm='naive')
|
|
2318
|
+
False
|
|
2319
|
+
sage: A.is_unitary(algorithm='orthonormal')
|
|
2320
|
+
False
|
|
2321
|
+
|
|
2322
|
+
The QR decomposition will produce a unitary matrix as Q and the
|
|
2323
|
+
SVD decomposition will create two unitary matrices, U and V. ::
|
|
2324
|
+
|
|
2325
|
+
sage: # needs scipy sage.symbolic
|
|
2326
|
+
sage: A = matrix(CDF, [[ 1 - I, -3*I, -2 + I, 1, -2 + 3*I],
|
|
2327
|
+
....: [ 1 - I, -2 + I, 1 + 4*I, 0, 2 + I],
|
|
2328
|
+
....: [ -1, -5 + I, -2 + I, 1 + I, -5 - 4*I],
|
|
2329
|
+
....: [-2 + 4*I, 2 - I, 8 - 4*I, 1 - 8*I, 3 - 2*I]])
|
|
2330
|
+
sage: Q, R = A.QR()
|
|
2331
|
+
sage: Q.is_unitary()
|
|
2332
|
+
True
|
|
2333
|
+
sage: U, S, V = A.SVD()
|
|
2334
|
+
sage: U.is_unitary(algorithm='naive')
|
|
2335
|
+
True
|
|
2336
|
+
sage: U.is_unitary(algorithm='orthonormal')
|
|
2337
|
+
True
|
|
2338
|
+
sage: V.is_unitary(algorithm='naive')
|
|
2339
|
+
True
|
|
2340
|
+
|
|
2341
|
+
If we make the tolerance too strict we can get misleading results. ::
|
|
2342
|
+
|
|
2343
|
+
sage: # needs scipy
|
|
2344
|
+
sage: A = matrix(RDF, 10, 10, [1/(i+j+1) for i in range(10) for j in range(10)])
|
|
2345
|
+
sage: Q, R = A.QR()
|
|
2346
|
+
sage: Q.is_unitary(algorithm='naive', tol=1e-16)
|
|
2347
|
+
False
|
|
2348
|
+
sage: Q.is_unitary(algorithm='orthonormal', tol=1e-17)
|
|
2349
|
+
False
|
|
2350
|
+
|
|
2351
|
+
Rectangular matrices are not unitary/orthogonal, even if their
|
|
2352
|
+
columns form an orthonormal set. ::
|
|
2353
|
+
|
|
2354
|
+
sage: A = matrix(CDF, [[1,0], [0,0], [0,1]])
|
|
2355
|
+
sage: A.is_unitary() # needs scipy
|
|
2356
|
+
False
|
|
2357
|
+
|
|
2358
|
+
The smallest cases::
|
|
2359
|
+
|
|
2360
|
+
sage: P = matrix(CDF, 0, 0)
|
|
2361
|
+
sage: P.is_unitary(algorithm='naive') # needs scipy
|
|
2362
|
+
True
|
|
2363
|
+
|
|
2364
|
+
sage: P = matrix(CDF, 1, 1, [1])
|
|
2365
|
+
sage: P.is_unitary(algorithm='orthonormal') # needs scipy
|
|
2366
|
+
True
|
|
2367
|
+
|
|
2368
|
+
sage: P = matrix(CDF, 0, 0,)
|
|
2369
|
+
sage: P.is_unitary(algorithm='orthonormal') # needs scipy
|
|
2370
|
+
True
|
|
2371
|
+
|
|
2372
|
+
TESTS::
|
|
2373
|
+
|
|
2374
|
+
sage: P = matrix(CDF, 2, 2)
|
|
2375
|
+
sage: P.is_unitary(tol='junk')
|
|
2376
|
+
Traceback (most recent call last):
|
|
2377
|
+
...
|
|
2378
|
+
TypeError: tolerance must be a real number, not junk
|
|
2379
|
+
|
|
2380
|
+
sage: P.is_unitary(tol=-0.3)
|
|
2381
|
+
Traceback (most recent call last):
|
|
2382
|
+
...
|
|
2383
|
+
ValueError: tolerance must be positive, not -0.3
|
|
2384
|
+
|
|
2385
|
+
sage: P.is_unitary(algorithm='junk')
|
|
2386
|
+
Traceback (most recent call last):
|
|
2387
|
+
...
|
|
2388
|
+
ValueError: algorithm must be 'naive' or 'orthonormal', not junk
|
|
2389
|
+
|
|
2390
|
+
|
|
2391
|
+
AUTHOR:
|
|
2392
|
+
|
|
2393
|
+
- Rob Beezer (2011-05-04)
|
|
2394
|
+
"""
|
|
2395
|
+
if self.dimensions() == (0,0):
|
|
2396
|
+
# The "orthonormal" algorithm would otherwise fail in this
|
|
2397
|
+
# corner case. Returning ``True`` is consistent with the
|
|
2398
|
+
# other implementations of this method.
|
|
2399
|
+
return True
|
|
2400
|
+
|
|
2401
|
+
global numpy
|
|
2402
|
+
try:
|
|
2403
|
+
tol = float(tol)
|
|
2404
|
+
except Exception:
|
|
2405
|
+
raise TypeError('tolerance must be a real number, not {0}'.format(tol))
|
|
2406
|
+
if tol <= 0:
|
|
2407
|
+
raise ValueError('tolerance must be positive, not {0}'.format(tol))
|
|
2408
|
+
if algorithm not in ['naive', 'orthonormal']:
|
|
2409
|
+
raise ValueError("algorithm must be 'naive' or 'orthonormal', not {0}".format(algorithm))
|
|
2410
|
+
key = 'unitary_{0}_{1}'.format(algorithm, tol)
|
|
2411
|
+
b = self.fetch(key)
|
|
2412
|
+
if b is not None:
|
|
2413
|
+
return b
|
|
2414
|
+
if not self.is_square():
|
|
2415
|
+
self.cache(key, False)
|
|
2416
|
+
return False
|
|
2417
|
+
if numpy is None:
|
|
2418
|
+
import numpy
|
|
2419
|
+
cdef Py_ssize_t i, j
|
|
2420
|
+
cdef Matrix_double_dense T, P
|
|
2421
|
+
if algorithm == 'orthonormal':
|
|
2422
|
+
# Schur decomposition over CDF will be unitary
|
|
2423
|
+
# iff diagonal with unit modulus entries
|
|
2424
|
+
_, T = self.schur(base_ring=sage.rings.complex_double.CDF)
|
|
2425
|
+
unitary = T._is_lower_triangular(tol)
|
|
2426
|
+
if unitary:
|
|
2427
|
+
for 0 <= i < self._nrows:
|
|
2428
|
+
if abs(abs(T.get_unsafe(i,i)) - 1) > tol:
|
|
2429
|
+
unitary = False
|
|
2430
|
+
break
|
|
2431
|
+
elif algorithm == 'naive':
|
|
2432
|
+
P = self.conjugate().transpose()*self
|
|
2433
|
+
unitary = True
|
|
2434
|
+
for i from 0 <= i < self._nrows:
|
|
2435
|
+
# off-diagonal, since P is Hermitian
|
|
2436
|
+
for j from 0 <= j < i:
|
|
2437
|
+
if abs(P.get_unsafe(i,j)) > tol:
|
|
2438
|
+
unitary = False
|
|
2439
|
+
break
|
|
2440
|
+
# at diagonal
|
|
2441
|
+
if abs(P.get_unsafe(i,i) - 1) > tol:
|
|
2442
|
+
unitary = False
|
|
2443
|
+
if not unitary:
|
|
2444
|
+
break
|
|
2445
|
+
self.cache(key, unitary)
|
|
2446
|
+
return unitary
|
|
2447
|
+
|
|
2448
|
+
def _is_hermitian_orthonormal(self, tol=1e-12, skew=False):
|
|
2449
|
+
r"""
|
|
2450
|
+
Return ``True`` if the matrix is (skew-)Hermitian.
|
|
2451
|
+
|
|
2452
|
+
For internal purposes. This function is used in ``is_hermitian``
|
|
2453
|
+
and ``is_skew_hermitian`` functions.
|
|
2454
|
+
|
|
2455
|
+
INPUT:
|
|
2456
|
+
|
|
2457
|
+
- ``tol`` -- (default: ``1e-12``) the largest value of the
|
|
2458
|
+
absolute value of the difference between two matrix entries
|
|
2459
|
+
for which they will still be considered equal
|
|
2460
|
+
|
|
2461
|
+
- ``skew`` -- (default: ``False``) specifies the type of the
|
|
2462
|
+
test. Set to ``True`` to check whether the matrix is
|
|
2463
|
+
skew-Hermitian.
|
|
2464
|
+
|
|
2465
|
+
OUTPUT:
|
|
2466
|
+
|
|
2467
|
+
``True`` if the matrix is square and (skew-)Hermitian, and
|
|
2468
|
+
``False`` otherwise.
|
|
2469
|
+
|
|
2470
|
+
|
|
2471
|
+
Note that if conjugation has no effect on elements of the base
|
|
2472
|
+
ring (such as for integers), then the :meth:`is_(skew_)symmetric`
|
|
2473
|
+
method is equivalent and faster.
|
|
2474
|
+
|
|
2475
|
+
The tolerance parameter is used to allow for numerical values
|
|
2476
|
+
to be equal if there is a slight difference due to round-off
|
|
2477
|
+
and other imprecisions.
|
|
2478
|
+
|
|
2479
|
+
The result is cached, on a per-tolerance basis.
|
|
2480
|
+
|
|
2481
|
+
ALGORITHMS:
|
|
2482
|
+
|
|
2483
|
+
The orthonormal algorithm first computes a Schur decomposition
|
|
2484
|
+
(via the :meth:`schur` method) and checks that the result is a
|
|
2485
|
+
diagonal matrix with real entries.
|
|
2486
|
+
|
|
2487
|
+
EXAMPLES::
|
|
2488
|
+
|
|
2489
|
+
sage: # needs scipy sage.symbolic
|
|
2490
|
+
sage: A = matrix(CDF, [[ 1 + I, 1 - 6*I, -1 - I],
|
|
2491
|
+
....: [-3 - I, -4*I, -2],
|
|
2492
|
+
....: [-1 + I, -2 - 8*I, 2 + I]])
|
|
2493
|
+
sage: A._is_hermitian_orthonormal()
|
|
2494
|
+
False
|
|
2495
|
+
sage: B = A*A.conjugate_transpose()
|
|
2496
|
+
sage: B._is_hermitian_orthonormal()
|
|
2497
|
+
True
|
|
2498
|
+
|
|
2499
|
+
A matrix that is nearly Hermitian, but for one non-real
|
|
2500
|
+
diagonal entry::
|
|
2501
|
+
|
|
2502
|
+
sage: # needs scipy sage.symbolic
|
|
2503
|
+
sage: A = matrix(CDF, [[ 2, 2-I, 1+4*I],
|
|
2504
|
+
....: [ 2+I, 3+I, 2-6*I],
|
|
2505
|
+
....: [1-4*I, 2+6*I, 5]])
|
|
2506
|
+
sage: A._is_hermitian_orthonormal()
|
|
2507
|
+
False
|
|
2508
|
+
sage: A[1,1] = 132
|
|
2509
|
+
sage: A._is_hermitian_orthonormal()
|
|
2510
|
+
True
|
|
2511
|
+
|
|
2512
|
+
A square, empty matrix is trivially Hermitian::
|
|
2513
|
+
|
|
2514
|
+
sage: A = matrix(RDF, 0, 0)
|
|
2515
|
+
sage: A._is_hermitian_orthonormal() # needs scipy sage.symbolic
|
|
2516
|
+
True
|
|
2517
|
+
|
|
2518
|
+
Rectangular matrices are never Hermitian::
|
|
2519
|
+
|
|
2520
|
+
sage: A = matrix(CDF, 3, 4)
|
|
2521
|
+
sage: A._is_hermitian_orthonormal() # needs scipy sage.symbolic
|
|
2522
|
+
False
|
|
2523
|
+
|
|
2524
|
+
A matrix that is skew-Hermitian::
|
|
2525
|
+
|
|
2526
|
+
sage: # needs scipy sage.symbolic
|
|
2527
|
+
sage: A = matrix(CDF, [[-I, 2.0+I], [-2.0+I, 0.0]])
|
|
2528
|
+
sage: A._is_hermitian_orthonormal()
|
|
2529
|
+
False
|
|
2530
|
+
sage: A._is_hermitian_orthonormal(skew=True)
|
|
2531
|
+
True
|
|
2532
|
+
|
|
2533
|
+
AUTHOR:
|
|
2534
|
+
|
|
2535
|
+
- Rob Beezer (2011-03-30)
|
|
2536
|
+
"""
|
|
2537
|
+
import sage.rings.complex_double
|
|
2538
|
+
global numpy
|
|
2539
|
+
tol = float(tol)
|
|
2540
|
+
|
|
2541
|
+
key = ("_is_hermitian_orthonormal", tol, skew)
|
|
2542
|
+
h = self.fetch(key)
|
|
2543
|
+
if h is not None:
|
|
2544
|
+
return h
|
|
2545
|
+
if not self.is_square():
|
|
2546
|
+
self.cache(key, False)
|
|
2547
|
+
return False
|
|
2548
|
+
if self._nrows == 0:
|
|
2549
|
+
self.cache(key, True)
|
|
2550
|
+
return True
|
|
2551
|
+
if numpy is None:
|
|
2552
|
+
import numpy
|
|
2553
|
+
cdef Py_ssize_t i
|
|
2554
|
+
cdef Matrix_double_dense T
|
|
2555
|
+
# A matrix M is skew-hermitian iff I*M is hermitian
|
|
2556
|
+
T = self.__mul__(1j) if skew else self.__copy__()
|
|
2557
|
+
|
|
2558
|
+
# Schur decomposition over CDF will be diagonal and real iff Hermitian
|
|
2559
|
+
_, T = T.schur(base_ring=sage.rings.complex_double.CDF)
|
|
2560
|
+
hermitian = T._is_lower_triangular(tol)
|
|
2561
|
+
if hermitian:
|
|
2562
|
+
for i in range(T._nrows):
|
|
2563
|
+
if abs(T.get_unsafe(i, i).imag()) > tol:
|
|
2564
|
+
hermitian = False
|
|
2565
|
+
break
|
|
2566
|
+
self.cache(key, hermitian)
|
|
2567
|
+
return hermitian
|
|
2568
|
+
|
|
2569
|
+
def is_hermitian(self, tol=1e-12, algorithm="naive"):
|
|
2570
|
+
r"""
|
|
2571
|
+
Return ``True`` if the matrix is equal to its conjugate-transpose.
|
|
2572
|
+
|
|
2573
|
+
INPUT:
|
|
2574
|
+
|
|
2575
|
+
- ``tol`` -- (default: ``1e-12``) the largest value of the
|
|
2576
|
+
absolute value of the difference between two matrix entries
|
|
2577
|
+
for which they will still be considered equal.
|
|
2578
|
+
|
|
2579
|
+
- ``algorithm`` -- string (default: ``'naive'``); either ``'naive'``
|
|
2580
|
+
or ``'orthonormal'``
|
|
2581
|
+
|
|
2582
|
+
OUTPUT:
|
|
2583
|
+
|
|
2584
|
+
``True`` if the matrix is square and equal to the transpose with
|
|
2585
|
+
every entry conjugated, and ``False`` otherwise.
|
|
2586
|
+
|
|
2587
|
+
Note that if conjugation has no effect on elements of the base
|
|
2588
|
+
ring (such as for integers), then the :meth:`is_symmetric`
|
|
2589
|
+
method is equivalent and faster.
|
|
2590
|
+
|
|
2591
|
+
The tolerance parameter is used to allow for numerical values
|
|
2592
|
+
to be equal if there is a slight difference due to round-off
|
|
2593
|
+
and other imprecisions.
|
|
2594
|
+
|
|
2595
|
+
The result is cached, on a per-tolerance and per-algorithm basis.
|
|
2596
|
+
|
|
2597
|
+
ALGORITHMS:
|
|
2598
|
+
|
|
2599
|
+
The naive algorithm simply compares corresponding entries on either
|
|
2600
|
+
side of the diagonal (and on the diagonal itself) to see if they are
|
|
2601
|
+
conjugates, with equality controlled by the tolerance parameter.
|
|
2602
|
+
|
|
2603
|
+
The orthonormal algorithm first computes a Schur decomposition
|
|
2604
|
+
(via the :meth:`schur` method) and checks that the result is a
|
|
2605
|
+
diagonal matrix with real entries.
|
|
2606
|
+
|
|
2607
|
+
So the naive algorithm can finish quickly for a matrix that is not
|
|
2608
|
+
Hermitian, while the orthonormal algorithm will always compute a
|
|
2609
|
+
Schur decomposition before going through a similar check of the matrix
|
|
2610
|
+
entry-by-entry.
|
|
2611
|
+
|
|
2612
|
+
EXAMPLES::
|
|
2613
|
+
|
|
2614
|
+
sage: # needs scipy sage.symbolic
|
|
2615
|
+
sage: A = matrix(CDF, [[ 1 + I, 1 - 6*I, -1 - I],
|
|
2616
|
+
....: [-3 - I, -4*I, -2],
|
|
2617
|
+
....: [-1 + I, -2 - 8*I, 2 + I]])
|
|
2618
|
+
sage: A.is_hermitian(algorithm='orthonormal')
|
|
2619
|
+
False
|
|
2620
|
+
sage: A.is_hermitian(algorithm='naive')
|
|
2621
|
+
False
|
|
2622
|
+
sage: B = A*A.conjugate_transpose()
|
|
2623
|
+
sage: B.is_hermitian(algorithm='orthonormal')
|
|
2624
|
+
True
|
|
2625
|
+
sage: B.is_hermitian(algorithm='naive')
|
|
2626
|
+
True
|
|
2627
|
+
|
|
2628
|
+
A matrix that is nearly Hermitian, but for one non-real
|
|
2629
|
+
diagonal entry. ::
|
|
2630
|
+
|
|
2631
|
+
sage: # needs scipy sage.symbolic
|
|
2632
|
+
sage: A = matrix(CDF, [[ 2, 2-I, 1+4*I],
|
|
2633
|
+
....: [ 2+I, 3+I, 2-6*I],
|
|
2634
|
+
....: [1-4*I, 2+6*I, 5]])
|
|
2635
|
+
sage: A.is_hermitian(algorithm='orthonormal')
|
|
2636
|
+
False
|
|
2637
|
+
sage: A[1,1] = 132
|
|
2638
|
+
sage: A.is_hermitian(algorithm='orthonormal')
|
|
2639
|
+
True
|
|
2640
|
+
|
|
2641
|
+
We get a unitary matrix from the SVD routine and use this
|
|
2642
|
+
numerical matrix to create a matrix that should be Hermitian
|
|
2643
|
+
(indeed it should be the identity matrix), but with some
|
|
2644
|
+
imprecision. We use this to illustrate that if the tolerance
|
|
2645
|
+
is set too small, then we can be too strict about the equality
|
|
2646
|
+
of entries and may achieve the wrong result (depending on
|
|
2647
|
+
the system)::
|
|
2648
|
+
|
|
2649
|
+
sage: # needs scipy sage.symbolic
|
|
2650
|
+
sage: A = matrix(CDF, [[ 1 + I, 1 - 6*I, -1 - I],
|
|
2651
|
+
....: [-3 - I, -4*I, -2],
|
|
2652
|
+
....: [-1 + I, -2 - 8*I, 2 + I]])
|
|
2653
|
+
sage: U, _, _ = A.SVD()
|
|
2654
|
+
sage: B = U*U.conjugate_transpose()
|
|
2655
|
+
sage: B.is_hermitian(algorithm='naive')
|
|
2656
|
+
True
|
|
2657
|
+
sage: B.is_hermitian(algorithm='naive', tol=1.0e-17) # random
|
|
2658
|
+
False
|
|
2659
|
+
sage: B.is_hermitian(algorithm='naive', tol=1.0e-15)
|
|
2660
|
+
True
|
|
2661
|
+
|
|
2662
|
+
A square, empty matrix is trivially Hermitian. ::
|
|
2663
|
+
|
|
2664
|
+
sage: A = matrix(RDF, 0, 0)
|
|
2665
|
+
sage: A.is_hermitian() # needs scipy
|
|
2666
|
+
True
|
|
2667
|
+
|
|
2668
|
+
Rectangular matrices are never Hermitian, no matter which
|
|
2669
|
+
algorithm is requested. ::
|
|
2670
|
+
|
|
2671
|
+
sage: A = matrix(CDF, 3, 4)
|
|
2672
|
+
sage: A.is_hermitian() # needs scipy
|
|
2673
|
+
False
|
|
2674
|
+
|
|
2675
|
+
TESTS:
|
|
2676
|
+
|
|
2677
|
+
The ``algorithm`` keyword gets checked. ::
|
|
2678
|
+
|
|
2679
|
+
sage: A = matrix(RDF, 2, range(4))
|
|
2680
|
+
sage: A.is_hermitian(algorithm='junk')
|
|
2681
|
+
Traceback (most recent call last):
|
|
2682
|
+
...
|
|
2683
|
+
ValueError: algorithm must be 'naive' or 'orthonormal', not junk
|
|
2684
|
+
|
|
2685
|
+
AUTHOR:
|
|
2686
|
+
|
|
2687
|
+
- Rob Beezer (2011-03-30)
|
|
2688
|
+
"""
|
|
2689
|
+
if algorithm == "naive":
|
|
2690
|
+
return super()._is_hermitian(skew=False, tolerance=tol)
|
|
2691
|
+
elif algorithm == "orthonormal":
|
|
2692
|
+
return self._is_hermitian_orthonormal(tol=tol, skew=False)
|
|
2693
|
+
else:
|
|
2694
|
+
raise ValueError("algorithm must be 'naive' or 'orthonormal', not {0}".format(algorithm))
|
|
2695
|
+
|
|
2696
|
+
def is_skew_hermitian(self, tol=1e-12, algorithm='orthonormal'):
|
|
2697
|
+
r"""
|
|
2698
|
+
Return ``True`` if the matrix is equal to the negative of its
|
|
2699
|
+
conjugate transpose.
|
|
2700
|
+
|
|
2701
|
+
INPUT:
|
|
2702
|
+
|
|
2703
|
+
- ``tol`` -- (default: ``1e-12``) the largest value of the
|
|
2704
|
+
absolute value of the difference between two matrix entries
|
|
2705
|
+
for which they will still be considered equal.
|
|
2706
|
+
|
|
2707
|
+
- ``algorithm`` -- (default: ``'orthonormal'``) set to
|
|
2708
|
+
``'orthonormal'`` for a stable procedure and set to ``'naive'`` for a
|
|
2709
|
+
fast procedure
|
|
2710
|
+
|
|
2711
|
+
OUTPUT:
|
|
2712
|
+
|
|
2713
|
+
``True`` if the matrix is square and equal to the negative of
|
|
2714
|
+
its conjugate transpose, and ``False`` otherwise.
|
|
2715
|
+
|
|
2716
|
+
Note that if conjugation has no effect on elements of the base
|
|
2717
|
+
ring (such as for integers), then the :meth:`is_skew_symmetric`
|
|
2718
|
+
method is equivalent and faster.
|
|
2719
|
+
|
|
2720
|
+
The tolerance parameter is used to allow for numerical values
|
|
2721
|
+
to be equal if there is a slight difference due to round-off
|
|
2722
|
+
and other imprecisions.
|
|
2723
|
+
|
|
2724
|
+
The result is cached, on a per-tolerance and per-algorithm basis.
|
|
2725
|
+
|
|
2726
|
+
ALGORITHMS:
|
|
2727
|
+
|
|
2728
|
+
The naive algorithm simply compares corresponding entries on either
|
|
2729
|
+
side of the diagonal (and on the diagonal itself) to see if they are
|
|
2730
|
+
conjugates, with equality controlled by the tolerance parameter.
|
|
2731
|
+
|
|
2732
|
+
The orthonormal algorithm first computes a Schur decomposition
|
|
2733
|
+
(via the :meth:`schur` method) and checks that the result is a
|
|
2734
|
+
diagonal matrix with real entries.
|
|
2735
|
+
|
|
2736
|
+
So the naive algorithm can finish quickly for a matrix that is not
|
|
2737
|
+
Hermitian, while the orthonormal algorithm will always compute a
|
|
2738
|
+
Schur decomposition before going through a similar check of the matrix
|
|
2739
|
+
entry-by-entry.
|
|
2740
|
+
|
|
2741
|
+
EXAMPLES::
|
|
2742
|
+
|
|
2743
|
+
sage: # needs scipy
|
|
2744
|
+
sage: A = matrix(CDF, [[0, -1],
|
|
2745
|
+
....: [1, 0]])
|
|
2746
|
+
sage: A.is_skew_hermitian(algorithm='orthonormal')
|
|
2747
|
+
True
|
|
2748
|
+
sage: A.is_skew_hermitian(algorithm='naive')
|
|
2749
|
+
True
|
|
2750
|
+
|
|
2751
|
+
A matrix that is nearly skew-Hermitian, but for a non-real
|
|
2752
|
+
diagonal entry. ::
|
|
2753
|
+
|
|
2754
|
+
sage: # needs scipy sage.symbolic
|
|
2755
|
+
sage: A = matrix(CDF, [[ -I, -1, 1-I],
|
|
2756
|
+
....: [ 1, 1, -1],
|
|
2757
|
+
....: [-1-I, 1, -I]])
|
|
2758
|
+
sage: A.is_skew_hermitian()
|
|
2759
|
+
False
|
|
2760
|
+
sage: A[1,1] = -I
|
|
2761
|
+
sage: A.is_skew_hermitian()
|
|
2762
|
+
True
|
|
2763
|
+
|
|
2764
|
+
We get a unitary matrix from the SVD routine and use this
|
|
2765
|
+
numerical matrix to create a matrix that should be
|
|
2766
|
+
skew-Hermitian (indeed it should be the identity matrix
|
|
2767
|
+
multiplied by `I`), but with some imprecision. We use this to
|
|
2768
|
+
illustrate that if the tolerance is set too small, then we can
|
|
2769
|
+
be too strict about the equality of entries and may achieve
|
|
2770
|
+
the wrong result (depending on the system)::
|
|
2771
|
+
|
|
2772
|
+
sage: # needs scipy sage.symbolic
|
|
2773
|
+
sage: A = matrix(CDF, [[ 1 + I, 1 - 6*I, -1 - I],
|
|
2774
|
+
....: [-3 - I, -4*I, -2],
|
|
2775
|
+
....: [-1 + I, -2 - 8*I, 2 + I]])
|
|
2776
|
+
sage: U, _, _ = A.SVD()
|
|
2777
|
+
sage: B = 1j*U*U.conjugate_transpose()
|
|
2778
|
+
sage: B.is_skew_hermitian(algorithm='naive')
|
|
2779
|
+
True
|
|
2780
|
+
sage: B.is_skew_hermitian(algorithm='naive', tol=1.0e-17) # random
|
|
2781
|
+
False
|
|
2782
|
+
sage: B.is_skew_hermitian(algorithm='naive', tol=1.0e-15)
|
|
2783
|
+
True
|
|
2784
|
+
|
|
2785
|
+
A square, empty matrix is trivially Hermitian. ::
|
|
2786
|
+
|
|
2787
|
+
sage: A = matrix(RDF, 0, 0)
|
|
2788
|
+
sage: A.is_skew_hermitian() # needs scipy
|
|
2789
|
+
True
|
|
2790
|
+
|
|
2791
|
+
Rectangular matrices are never Hermitian, no matter which
|
|
2792
|
+
algorithm is requested. ::
|
|
2793
|
+
|
|
2794
|
+
sage: A = matrix(CDF, 3, 4)
|
|
2795
|
+
sage: A.is_skew_hermitian() # needs scipy
|
|
2796
|
+
False
|
|
2797
|
+
|
|
2798
|
+
TESTS:
|
|
2799
|
+
|
|
2800
|
+
The ``algorithm`` keyword gets checked. ::
|
|
2801
|
+
|
|
2802
|
+
sage: A = matrix(RDF, 2, range(4))
|
|
2803
|
+
sage: A.is_skew_hermitian(algorithm='junk')
|
|
2804
|
+
Traceback (most recent call last):
|
|
2805
|
+
...
|
|
2806
|
+
ValueError: algorithm must be 'naive' or 'orthonormal', not junk
|
|
2807
|
+
|
|
2808
|
+
AUTHOR:
|
|
2809
|
+
|
|
2810
|
+
- Rob Beezer (2011-03-30)
|
|
2811
|
+
"""
|
|
2812
|
+
if algorithm == "naive":
|
|
2813
|
+
return super()._is_hermitian(skew=True, tolerance=tol)
|
|
2814
|
+
elif algorithm == "orthonormal":
|
|
2815
|
+
return self._is_hermitian_orthonormal(tol=tol, skew=True)
|
|
2816
|
+
else:
|
|
2817
|
+
raise ValueError("algorithm must be 'naive' or 'orthonormal', not {0}".format(algorithm))
|
|
2818
|
+
|
|
2819
|
+
def is_normal(self, tol=1e-12, algorithm='orthonormal'):
|
|
2820
|
+
r"""
|
|
2821
|
+
Return ``True`` if the matrix commutes with its conjugate-transpose.
|
|
2822
|
+
|
|
2823
|
+
INPUT:
|
|
2824
|
+
|
|
2825
|
+
- ``tol`` -- (default: ``1e-12``) the largest value of the
|
|
2826
|
+
absolute value of the difference between two matrix entries
|
|
2827
|
+
for which they will still be considered equal.
|
|
2828
|
+
|
|
2829
|
+
- ``algorithm`` -- (default: ``'orthonormal'``) set to
|
|
2830
|
+
``'orthonormal'`` for a stable procedure and set to ``'naive'`` for a
|
|
2831
|
+
fast procedure
|
|
2832
|
+
|
|
2833
|
+
OUTPUT:
|
|
2834
|
+
|
|
2835
|
+
``True`` if the matrix is square and commutes with its
|
|
2836
|
+
conjugate-transpose, and ``False`` otherwise.
|
|
2837
|
+
|
|
2838
|
+
Normal matrices are precisely those that can be diagonalized
|
|
2839
|
+
by a unitary matrix.
|
|
2840
|
+
|
|
2841
|
+
The tolerance parameter is used to allow for numerical values
|
|
2842
|
+
to be equal if there is a slight difference due to round-off
|
|
2843
|
+
and other imprecisions.
|
|
2844
|
+
|
|
2845
|
+
The result is cached, on a per-tolerance and per-algorithm basis.
|
|
2846
|
+
|
|
2847
|
+
ALGORITHMS:
|
|
2848
|
+
|
|
2849
|
+
The naive algorithm simply compares entries of the two possible
|
|
2850
|
+
products of the matrix with its conjugate-transpose, with equality
|
|
2851
|
+
controlled by the tolerance parameter.
|
|
2852
|
+
|
|
2853
|
+
The orthonormal algorithm first computes a Schur decomposition
|
|
2854
|
+
(via the :meth:`schur` method) and checks that the result is a
|
|
2855
|
+
diagonal matrix. An orthonormal diagonalization
|
|
2856
|
+
is equivalent to being normal.
|
|
2857
|
+
|
|
2858
|
+
So the naive algorithm can finish fairly quickly for a matrix
|
|
2859
|
+
that is not normal, once the products have been computed.
|
|
2860
|
+
However, the orthonormal algorithm will compute a Schur
|
|
2861
|
+
decomposition before going through a similar check of a
|
|
2862
|
+
matrix entry-by-entry.
|
|
2863
|
+
|
|
2864
|
+
EXAMPLES:
|
|
2865
|
+
|
|
2866
|
+
First over the complexes. ``B`` is Hermitian, hence normal. ::
|
|
2867
|
+
|
|
2868
|
+
sage: # needs scipy sage.symbolic
|
|
2869
|
+
sage: A = matrix(CDF, [[ 1 + I, 1 - 6*I, -1 - I],
|
|
2870
|
+
....: [-3 - I, -4*I, -2],
|
|
2871
|
+
....: [-1 + I, -2 - 8*I, 2 + I]])
|
|
2872
|
+
sage: B = A*A.conjugate_transpose()
|
|
2873
|
+
sage: B.is_hermitian()
|
|
2874
|
+
True
|
|
2875
|
+
sage: B.is_normal(algorithm='orthonormal')
|
|
2876
|
+
True
|
|
2877
|
+
sage: B.is_normal(algorithm='naive')
|
|
2878
|
+
True
|
|
2879
|
+
sage: B[0,0] = I
|
|
2880
|
+
sage: B.is_normal(algorithm='orthonormal')
|
|
2881
|
+
False
|
|
2882
|
+
sage: B.is_normal(algorithm='naive')
|
|
2883
|
+
False
|
|
2884
|
+
|
|
2885
|
+
Now over the reals. Circulant matrices are normal. ::
|
|
2886
|
+
|
|
2887
|
+
sage: # needs scipy sage.graphs
|
|
2888
|
+
sage: G = graphs.CirculantGraph(20, [3, 7])
|
|
2889
|
+
sage: D = digraphs.Circuit(20)
|
|
2890
|
+
sage: A = 3*D.adjacency_matrix() - 5*G.adjacency_matrix()
|
|
2891
|
+
sage: A = A.change_ring(RDF)
|
|
2892
|
+
sage: A.is_normal()
|
|
2893
|
+
True
|
|
2894
|
+
sage: A.is_normal(algorithm='naive')
|
|
2895
|
+
True
|
|
2896
|
+
sage: A[19,0] = 4.0
|
|
2897
|
+
sage: A.is_normal()
|
|
2898
|
+
False
|
|
2899
|
+
sage: A.is_normal(algorithm='naive')
|
|
2900
|
+
False
|
|
2901
|
+
|
|
2902
|
+
Skew-Hermitian matrices are normal. ::
|
|
2903
|
+
|
|
2904
|
+
sage: # needs scipy sage.symbolic
|
|
2905
|
+
sage: A = matrix(CDF, [[ 1 + I, 1 - 6*I, -1 - I],
|
|
2906
|
+
....: [-3 - I, -4*I, -2],
|
|
2907
|
+
....: [-1 + I, -2 - 8*I, 2 + I]])
|
|
2908
|
+
sage: B = A - A.conjugate_transpose()
|
|
2909
|
+
sage: B.is_hermitian()
|
|
2910
|
+
False
|
|
2911
|
+
sage: B.is_normal()
|
|
2912
|
+
True
|
|
2913
|
+
sage: B.is_normal(algorithm='naive')
|
|
2914
|
+
True
|
|
2915
|
+
|
|
2916
|
+
A small matrix that does not fit into any of the usual categories
|
|
2917
|
+
of normal matrices. ::
|
|
2918
|
+
|
|
2919
|
+
sage: # needs scipy
|
|
2920
|
+
sage: A = matrix(RDF, [[1, -1],
|
|
2921
|
+
....: [1, 1]])
|
|
2922
|
+
sage: A.is_normal()
|
|
2923
|
+
True
|
|
2924
|
+
sage: not A.is_hermitian() and not A.is_skew_symmetric()
|
|
2925
|
+
True
|
|
2926
|
+
|
|
2927
|
+
Sage has several fields besides the entire complex numbers
|
|
2928
|
+
where conjugation is non-trivial. ::
|
|
2929
|
+
|
|
2930
|
+
sage: # needs sage.rings.number_field
|
|
2931
|
+
sage: F.<b> = QuadraticField(-7)
|
|
2932
|
+
sage: C = matrix(F, [[-2*b - 3, 7*b - 6, -b + 3],
|
|
2933
|
+
....: [-2*b - 3, -3*b + 2, -2*b],
|
|
2934
|
+
....: [ b + 1, 0, -2]])
|
|
2935
|
+
sage: C = C*C.conjugate_transpose()
|
|
2936
|
+
sage: C.is_normal()
|
|
2937
|
+
True
|
|
2938
|
+
|
|
2939
|
+
A square, empty matrix is trivially normal. ::
|
|
2940
|
+
|
|
2941
|
+
sage: A = matrix(CDF, 0, 0)
|
|
2942
|
+
sage: A.is_normal()
|
|
2943
|
+
True
|
|
2944
|
+
|
|
2945
|
+
Rectangular matrices are never normal, no matter which
|
|
2946
|
+
algorithm is requested. ::
|
|
2947
|
+
|
|
2948
|
+
sage: A = matrix(CDF, 3, 4)
|
|
2949
|
+
sage: A.is_normal()
|
|
2950
|
+
False
|
|
2951
|
+
|
|
2952
|
+
TESTS:
|
|
2953
|
+
|
|
2954
|
+
The tolerance must be strictly positive. ::
|
|
2955
|
+
|
|
2956
|
+
sage: A = matrix(RDF, 2, range(4))
|
|
2957
|
+
sage: A.is_normal(tol = -3.1)
|
|
2958
|
+
Traceback (most recent call last):
|
|
2959
|
+
...
|
|
2960
|
+
ValueError: tolerance must be positive, not -3.1
|
|
2961
|
+
|
|
2962
|
+
The ``algorithm`` keyword gets checked. ::
|
|
2963
|
+
|
|
2964
|
+
sage: A = matrix(RDF, 2, range(4))
|
|
2965
|
+
sage: A.is_normal(algorithm='junk')
|
|
2966
|
+
Traceback (most recent call last):
|
|
2967
|
+
...
|
|
2968
|
+
ValueError: algorithm must be 'naive' or 'orthonormal', not junk
|
|
2969
|
+
|
|
2970
|
+
AUTHOR:
|
|
2971
|
+
|
|
2972
|
+
- Rob Beezer (2011-03-31)
|
|
2973
|
+
"""
|
|
2974
|
+
import sage.rings.complex_double
|
|
2975
|
+
global numpy
|
|
2976
|
+
tol = float(tol)
|
|
2977
|
+
if tol <= 0:
|
|
2978
|
+
raise ValueError('tolerance must be positive, not {0}'.format(tol))
|
|
2979
|
+
if algorithm not in ['naive', 'orthonormal']:
|
|
2980
|
+
raise ValueError("algorithm must be 'naive' or 'orthonormal', not {0}".format(algorithm))
|
|
2981
|
+
|
|
2982
|
+
key = 'normal_{0}_{1}'.format(algorithm, tol)
|
|
2983
|
+
b = self.fetch(key)
|
|
2984
|
+
if b is not None:
|
|
2985
|
+
return b
|
|
2986
|
+
if not self.is_square():
|
|
2987
|
+
self.cache(key, False)
|
|
2988
|
+
return False
|
|
2989
|
+
if self._nrows == 0:
|
|
2990
|
+
self.cache(key, True)
|
|
2991
|
+
return True
|
|
2992
|
+
cdef Py_ssize_t i, j
|
|
2993
|
+
cdef Matrix_double_dense T, left, right
|
|
2994
|
+
if algorithm == 'orthonormal':
|
|
2995
|
+
# Schur decomposition over CDF will be diagonal iff normal
|
|
2996
|
+
_, T = self.schur(base_ring=sage.rings.complex_double.CDF)
|
|
2997
|
+
normal = T._is_lower_triangular(tol)
|
|
2998
|
+
elif algorithm == 'naive':
|
|
2999
|
+
if numpy is None:
|
|
3000
|
+
import numpy
|
|
3001
|
+
CT = self.conjugate_transpose()
|
|
3002
|
+
left = self*CT
|
|
3003
|
+
right = CT*self
|
|
3004
|
+
normal = True
|
|
3005
|
+
# two products are Hermitian, need only check lower triangle
|
|
3006
|
+
for i in range(self._nrows):
|
|
3007
|
+
for j in range(i+1):
|
|
3008
|
+
if abs(left.get_unsafe(i,j) - right.get_unsafe(i,j)) > tol:
|
|
3009
|
+
normal = False
|
|
3010
|
+
break
|
|
3011
|
+
if not normal:
|
|
3012
|
+
break
|
|
3013
|
+
self.cache(key, normal)
|
|
3014
|
+
return normal
|
|
3015
|
+
|
|
3016
|
+
def schur(self, base_ring=None):
|
|
3017
|
+
r"""
|
|
3018
|
+
Return the Schur decomposition of the matrix.
|
|
3019
|
+
|
|
3020
|
+
INPUT:
|
|
3021
|
+
|
|
3022
|
+
- ``base_ring`` -- defaults to the base ring of ``self``; use this to
|
|
3023
|
+
request the base ring of the returned matrices, which will affect the
|
|
3024
|
+
format of the results
|
|
3025
|
+
|
|
3026
|
+
OUTPUT:
|
|
3027
|
+
|
|
3028
|
+
A pair of immutable matrices. The first is a unitary matrix `Q`.
|
|
3029
|
+
The second, `T`, is upper-triangular when returned over the complex
|
|
3030
|
+
numbers, while it is almost upper-triangular over the reals. In the
|
|
3031
|
+
latter case, there can be some `2\times 2` blocks on the diagonal
|
|
3032
|
+
which represent a pair of conjugate complex eigenvalues of ``self``.
|
|
3033
|
+
|
|
3034
|
+
If ``self`` is the matrix `A`, then
|
|
3035
|
+
|
|
3036
|
+
.. MATH::
|
|
3037
|
+
|
|
3038
|
+
A = QT({\overline Q})^t
|
|
3039
|
+
|
|
3040
|
+
where the latter matrix is the conjugate-transpose of ``Q``, which
|
|
3041
|
+
is also the inverse of ``Q``, since ``Q`` is unitary.
|
|
3042
|
+
|
|
3043
|
+
Note that in the case of a normal matrix (Hermitian, symmetric, and
|
|
3044
|
+
others), the upper-triangular matrix is a diagonal matrix with
|
|
3045
|
+
eigenvalues of ``self`` on the diagonal, and the unitary matrix
|
|
3046
|
+
has columns that form an orthonormal basis composed of eigenvectors
|
|
3047
|
+
of ``self``. This is known as "orthonormal diagonalization".
|
|
3048
|
+
|
|
3049
|
+
.. WARNING::
|
|
3050
|
+
|
|
3051
|
+
The Schur decomposition is not unique, as there may be numerous
|
|
3052
|
+
choices for the vectors of the orthonormal basis, and consequently
|
|
3053
|
+
different possibilities for the upper-triangular matrix. However,
|
|
3054
|
+
the diagonal of the upper-triangular matrix will always contain the
|
|
3055
|
+
eigenvalues of the matrix (in the complex version), or `2\times 2`
|
|
3056
|
+
block matrices in the real version representing pairs of conjugate
|
|
3057
|
+
complex eigenvalues.
|
|
3058
|
+
|
|
3059
|
+
In particular, results may vary across systems and processors.
|
|
3060
|
+
|
|
3061
|
+
EXAMPLES:
|
|
3062
|
+
|
|
3063
|
+
First over the complexes. The similar matrix is always
|
|
3064
|
+
upper-triangular in this case. ::
|
|
3065
|
+
|
|
3066
|
+
sage: # needs scipy sage.symbolic
|
|
3067
|
+
sage: A = matrix(CDF, 4, 4, range(16)) + matrix(CDF, 4, 4,
|
|
3068
|
+
....: [x^3*I for x in range(0, 16)])
|
|
3069
|
+
sage: Q, T = A.schur()
|
|
3070
|
+
sage: (Q*Q.conjugate().transpose()).zero_at(1e-12) # tol 1e-12
|
|
3071
|
+
[ 0.999999999999999 0.0 0.0 0.0]
|
|
3072
|
+
[ 0.0 0.9999999999999996 0.0 0.0]
|
|
3073
|
+
[ 0.0 0.0 0.9999999999999992 0.0]
|
|
3074
|
+
[ 0.0 0.0 0.0 0.9999999999999999]
|
|
3075
|
+
sage: all(T.zero_at(1.0e-12)[i,j] == 0 for i in range(4) for j in range(i))
|
|
3076
|
+
True
|
|
3077
|
+
sage: (Q*T*Q.conjugate().transpose() - A).zero_at(1.0e-11)
|
|
3078
|
+
[0.0 0.0 0.0 0.0]
|
|
3079
|
+
[0.0 0.0 0.0 0.0]
|
|
3080
|
+
[0.0 0.0 0.0 0.0]
|
|
3081
|
+
[0.0 0.0 0.0 0.0]
|
|
3082
|
+
sage: eigenvalues = [T[i,i] for i in range(4)]; eigenvalues
|
|
3083
|
+
[30.733... + 4648.541...*I, -0.184... - 159.057...*I, -0.523... + 11.158...*I, -0.025... - 0.642...*I]
|
|
3084
|
+
sage: A.eigenvalues()
|
|
3085
|
+
[30.733... + 4648.541...*I, -0.184... - 159.057...*I, -0.523... + 11.158...*I, -0.025... - 0.642...*I]
|
|
3086
|
+
sage: abs(A.norm()-T.norm()) < 1e-10
|
|
3087
|
+
True
|
|
3088
|
+
|
|
3089
|
+
We begin with a real matrix but ask for a decomposition over the
|
|
3090
|
+
complexes. The result will yield an upper-triangular matrix over
|
|
3091
|
+
the complex numbers for ``T``. ::
|
|
3092
|
+
|
|
3093
|
+
sage: # needs scipy
|
|
3094
|
+
sage: A = matrix(RDF, 4, 4, [x^3 for x in range(16)])
|
|
3095
|
+
sage: Q, T = A.schur(base_ring=CDF)
|
|
3096
|
+
sage: (Q*Q.conjugate().transpose()).zero_at(1e-12) # tol 1e-12
|
|
3097
|
+
[0.9999999999999987 0.0 0.0 0.0]
|
|
3098
|
+
[ 0.0 0.9999999999999999 0.0 0.0]
|
|
3099
|
+
[ 0.0 0.0 1.0000000000000013 0.0]
|
|
3100
|
+
[ 0.0 0.0 0.0 1.0000000000000007]
|
|
3101
|
+
sage: T.parent()
|
|
3102
|
+
Full MatrixSpace of 4 by 4 dense matrices over Complex Double Field
|
|
3103
|
+
sage: all(T.zero_at(1.0e-12)[i,j] == 0 for i in range(4) for j in range(i))
|
|
3104
|
+
True
|
|
3105
|
+
sage: (Q*T*Q.conjugate().transpose() - A).zero_at(1.0e-11)
|
|
3106
|
+
[0.0 0.0 0.0 0.0]
|
|
3107
|
+
[0.0 0.0 0.0 0.0]
|
|
3108
|
+
[0.0 0.0 0.0 0.0]
|
|
3109
|
+
[0.0 0.0 0.0 0.0]
|
|
3110
|
+
|
|
3111
|
+
Now totally over the reals. But with complex eigenvalues, the
|
|
3112
|
+
similar matrix may not be upper-triangular. But "at worst" there
|
|
3113
|
+
may be some `2\times 2` blocks on the diagonal which represent
|
|
3114
|
+
a pair of conjugate complex eigenvalues. These blocks will then
|
|
3115
|
+
just interrupt the zeros below the main diagonal. This example
|
|
3116
|
+
has a pair of these of the blocks. ::
|
|
3117
|
+
|
|
3118
|
+
sage: # needs scipy
|
|
3119
|
+
sage: A = matrix(RDF, 4, 4, [[1, 0, -3, -1],
|
|
3120
|
+
....: [4, -16, -7, 0],
|
|
3121
|
+
....: [1, 21, 1, -2],
|
|
3122
|
+
....: [26, -1, -2, 1]])
|
|
3123
|
+
sage: Q, T = A.schur()
|
|
3124
|
+
sage: (Q*Q.conjugate().transpose()) # tol 1e-12
|
|
3125
|
+
[0.9999999999999994 0.0 0.0 0.0]
|
|
3126
|
+
[ 0.0 1.0000000000000013 0.0 0.0]
|
|
3127
|
+
[ 0.0 0.0 1.0000000000000004 0.0]
|
|
3128
|
+
[ 0.0 0.0 0.0 1.0000000000000016]
|
|
3129
|
+
sage: all(T.zero_at(1.0e-12)[i,j] == 0 for i in range(4) for j in range(i))
|
|
3130
|
+
False
|
|
3131
|
+
sage: all(T.zero_at(1.0e-12)[i,j] == 0 for i in range(4) for j in range(i-1))
|
|
3132
|
+
True
|
|
3133
|
+
sage: (Q*T*Q.conjugate().transpose() - A).zero_at(1.0e-11)
|
|
3134
|
+
[0.0 0.0 0.0 0.0]
|
|
3135
|
+
[0.0 0.0 0.0 0.0]
|
|
3136
|
+
[0.0 0.0 0.0 0.0]
|
|
3137
|
+
[0.0 0.0 0.0 0.0]
|
|
3138
|
+
sage: sorted(T[0:2,0:2].eigenvalues() + T[2:4,2:4].eigenvalues())
|
|
3139
|
+
[-5.710... - 8.382...*I, -5.710... + 8.382...*I, -0.789... - 2.336...*I, -0.789... + 2.336...*I]
|
|
3140
|
+
sage: sorted(A.eigenvalues())
|
|
3141
|
+
[-5.710... - 8.382...*I, -5.710... + 8.382...*I, -0.789... - 2.336...*I, -0.789... + 2.336...*I]
|
|
3142
|
+
sage: abs(A.norm()-T.norm()) < 1e-12
|
|
3143
|
+
True
|
|
3144
|
+
|
|
3145
|
+
Starting with complex numbers and requesting a result over the reals
|
|
3146
|
+
will never happen. ::
|
|
3147
|
+
|
|
3148
|
+
sage: # needs scipy sage.symbolic
|
|
3149
|
+
sage: A = matrix(CDF, 2, 2, [[2+I, -1+3*I], [5-4*I, 2-7*I]])
|
|
3150
|
+
sage: A.schur(base_ring=RDF)
|
|
3151
|
+
Traceback (most recent call last):
|
|
3152
|
+
...
|
|
3153
|
+
TypeError: unable to convert input matrix over CDF to a matrix over RDF
|
|
3154
|
+
|
|
3155
|
+
If theory predicts your matrix is real, but it contains some
|
|
3156
|
+
very small imaginary parts, you can specify the cutoff for "small"
|
|
3157
|
+
imaginary parts, then request the output as real matrices, and let
|
|
3158
|
+
the routine do the rest. ::
|
|
3159
|
+
|
|
3160
|
+
sage: # needs scipy
|
|
3161
|
+
sage: A = matrix(RDF, 2, 2, [1, 1, -1, 0]) + matrix(CDF, 2, 2, [1.0e-14*I]*4)
|
|
3162
|
+
sage: B = A.zero_at(1.0e-12)
|
|
3163
|
+
sage: B.parent()
|
|
3164
|
+
Full MatrixSpace of 2 by 2 dense matrices over Complex Double Field
|
|
3165
|
+
sage: Q, T = B.schur(RDF)
|
|
3166
|
+
sage: Q.parent()
|
|
3167
|
+
Full MatrixSpace of 2 by 2 dense matrices over Real Double Field
|
|
3168
|
+
sage: T.parent()
|
|
3169
|
+
Full MatrixSpace of 2 by 2 dense matrices over Real Double Field
|
|
3170
|
+
sage: Q.round(6)
|
|
3171
|
+
[ 0.707107 0.707107]
|
|
3172
|
+
[-0.707107 0.707107]
|
|
3173
|
+
sage: T.round(6)
|
|
3174
|
+
[ 0.5 1.5]
|
|
3175
|
+
[-0.5 0.5]
|
|
3176
|
+
sage: (Q*T*Q.conjugate().transpose() - B).zero_at(1.0e-11)
|
|
3177
|
+
[0.0 0.0]
|
|
3178
|
+
[0.0 0.0]
|
|
3179
|
+
|
|
3180
|
+
A Hermitian matrix has real eigenvalues, so the similar matrix
|
|
3181
|
+
will be upper-triangular. Furthermore, a Hermitian matrix is
|
|
3182
|
+
diagonalizable with respect to an orthonormal basis, composed
|
|
3183
|
+
of eigenvectors of the matrix. Here that basis is the set of
|
|
3184
|
+
columns of the unitary matrix. ::
|
|
3185
|
+
|
|
3186
|
+
sage: # needs scipy sage.symbolic
|
|
3187
|
+
sage: A = matrix(CDF, [[ 52, -9*I - 8, 6*I - 187, -188*I + 2],
|
|
3188
|
+
....: [ 9*I - 8, 12, -58*I + 59, 30*I + 42],
|
|
3189
|
+
....: [-6*I - 187, 58*I + 59, 2677, 2264*I + 65],
|
|
3190
|
+
....: [ 188*I + 2, -30*I + 42, -2264*I + 65, 2080]])
|
|
3191
|
+
sage: Q, T = A.schur()
|
|
3192
|
+
sage: T = T.zero_at(1.0e-12).change_ring(RDF)
|
|
3193
|
+
sage: T.round(6)
|
|
3194
|
+
[4680.13301 0.0 0.0 0.0]
|
|
3195
|
+
[ 0.0 102.715967 0.0 0.0]
|
|
3196
|
+
[ 0.0 0.0 35.039344 0.0]
|
|
3197
|
+
[ 0.0 0.0 0.0 3.11168]
|
|
3198
|
+
sage: (Q*Q.conjugate().transpose()).zero_at(1e-12) # tol 1e-12
|
|
3199
|
+
[1.0000000000000004 0.0 0.0 0.0]
|
|
3200
|
+
[ 0.0 0.9999999999999989 0.0 0.0]
|
|
3201
|
+
[ 0.0 0.0 1.0000000000000002 0.0]
|
|
3202
|
+
[ 0.0 0.0 0.0 0.9999999999999992]
|
|
3203
|
+
sage: (Q*T*Q.conjugate().transpose() - A).zero_at(1.0e-11)
|
|
3204
|
+
[0.0 0.0 0.0 0.0]
|
|
3205
|
+
[0.0 0.0 0.0 0.0]
|
|
3206
|
+
[0.0 0.0 0.0 0.0]
|
|
3207
|
+
[0.0 0.0 0.0 0.0]
|
|
3208
|
+
|
|
3209
|
+
Similarly, a real symmetric matrix has only real eigenvalues,
|
|
3210
|
+
and there is an orthonormal basis composed of eigenvectors of
|
|
3211
|
+
the matrix. ::
|
|
3212
|
+
|
|
3213
|
+
sage: # needs scipy
|
|
3214
|
+
sage: A = matrix(RDF, [[ 1, -2, 5, -3],
|
|
3215
|
+
....: [-2, 9, 1, 5],
|
|
3216
|
+
....: [ 5, 1, 3 , 7],
|
|
3217
|
+
....: [-3, 5, 7, -8]])
|
|
3218
|
+
sage: Q, T = A.schur()
|
|
3219
|
+
sage: Q.round(4)
|
|
3220
|
+
[-0.3027 -0.751 0.576 -0.1121]
|
|
3221
|
+
[ 0.139 -0.3892 -0.2648 0.8713]
|
|
3222
|
+
[ 0.4361 0.359 0.7599 0.3217]
|
|
3223
|
+
[ -0.836 0.3945 0.1438 0.3533]
|
|
3224
|
+
sage: T = T.zero_at(10^-12)
|
|
3225
|
+
sage: all(abs(e) < 10^-4
|
|
3226
|
+
....: for e in (T - diagonal_matrix(RDF, [-13.5698, -0.8508, 7.7664, 11.6542])).list())
|
|
3227
|
+
True
|
|
3228
|
+
sage: (Q*Q.transpose()) # tol 1e-12
|
|
3229
|
+
[0.9999999999999998 0.0 0.0 0.0]
|
|
3230
|
+
[ 0.0 1.0 0.0 0.0]
|
|
3231
|
+
[ 0.0 0.0 0.9999999999999998 0.0]
|
|
3232
|
+
[ 0.0 0.0 0.0 0.9999999999999996]
|
|
3233
|
+
sage: (Q*T*Q.transpose() - A).zero_at(1.0e-11)
|
|
3234
|
+
[0.0 0.0 0.0 0.0]
|
|
3235
|
+
[0.0 0.0 0.0 0.0]
|
|
3236
|
+
[0.0 0.0 0.0 0.0]
|
|
3237
|
+
[0.0 0.0 0.0 0.0]
|
|
3238
|
+
|
|
3239
|
+
The results are cached, both as a real factorization and also as a
|
|
3240
|
+
complex factorization. This means the returned matrices are
|
|
3241
|
+
immutable. ::
|
|
3242
|
+
|
|
3243
|
+
sage: # needs scipy
|
|
3244
|
+
sage: A = matrix(RDF, 2, 2, [[0, -1], [1, 0]])
|
|
3245
|
+
sage: Qr, Tr = A.schur(base_ring=RDF)
|
|
3246
|
+
sage: Qc, Tc = A.schur(base_ring=CDF)
|
|
3247
|
+
sage: all(M.is_immutable() for M in [Qr, Tr, Qc, Tc])
|
|
3248
|
+
True
|
|
3249
|
+
sage: Tr.round(6) != Tc.round(6)
|
|
3250
|
+
True
|
|
3251
|
+
|
|
3252
|
+
TESTS:
|
|
3253
|
+
|
|
3254
|
+
The Schur factorization is only defined for square matrices. ::
|
|
3255
|
+
|
|
3256
|
+
sage: A = matrix(RDF, 4, 5, range(20))
|
|
3257
|
+
sage: A.schur()
|
|
3258
|
+
Traceback (most recent call last):
|
|
3259
|
+
...
|
|
3260
|
+
ValueError: Schur decomposition requires a square matrix, not a 4 x 5 matrix
|
|
3261
|
+
|
|
3262
|
+
A base ring request is checked. ::
|
|
3263
|
+
|
|
3264
|
+
sage: A = matrix(RDF, 3, range(9))
|
|
3265
|
+
sage: A.schur(base_ring=QQ)
|
|
3266
|
+
Traceback (most recent call last):
|
|
3267
|
+
...
|
|
3268
|
+
ValueError: base ring of Schur decomposition matrices must be RDF or CDF, not Rational Field
|
|
3269
|
+
|
|
3270
|
+
AUTHOR:
|
|
3271
|
+
|
|
3272
|
+
- Rob Beezer (2011-03-31)
|
|
3273
|
+
"""
|
|
3274
|
+
global scipy
|
|
3275
|
+
from sage.rings.real_double import RDF
|
|
3276
|
+
from sage.rings.complex_double import CDF
|
|
3277
|
+
|
|
3278
|
+
cdef Matrix_double_dense Q, T
|
|
3279
|
+
|
|
3280
|
+
if not self.is_square():
|
|
3281
|
+
raise ValueError('Schur decomposition requires a square matrix, not a {0} x {1} matrix'.format(self.nrows(), self.ncols()))
|
|
3282
|
+
if base_ring is None:
|
|
3283
|
+
base_ring = self.base_ring()
|
|
3284
|
+
if base_ring not in [RDF, CDF]:
|
|
3285
|
+
raise ValueError('base ring of Schur decomposition matrices must be RDF or CDF, not {0}'.format(base_ring))
|
|
3286
|
+
|
|
3287
|
+
if self.base_ring() != base_ring:
|
|
3288
|
+
try:
|
|
3289
|
+
self = self.change_ring(base_ring)
|
|
3290
|
+
except TypeError:
|
|
3291
|
+
raise TypeError('unable to convert input matrix over CDF to a matrix over RDF')
|
|
3292
|
+
if base_ring == CDF:
|
|
3293
|
+
format = 'complex'
|
|
3294
|
+
else:
|
|
3295
|
+
format = 'real'
|
|
3296
|
+
|
|
3297
|
+
schur = self.fetch('schur_factors_' + format)
|
|
3298
|
+
if schur is not None:
|
|
3299
|
+
return schur
|
|
3300
|
+
if scipy is None:
|
|
3301
|
+
import scipy
|
|
3302
|
+
import scipy.linalg
|
|
3303
|
+
Q = self._new(self._nrows, self._nrows)
|
|
3304
|
+
T = self._new(self._nrows, self._nrows)
|
|
3305
|
+
T._matrix_numpy, Q._matrix_numpy = scipy.linalg.schur(self._matrix_numpy, output=format)
|
|
3306
|
+
Q.set_immutable()
|
|
3307
|
+
T.set_immutable()
|
|
3308
|
+
# Our return order is the reverse of NumPy's
|
|
3309
|
+
schur = (Q, T)
|
|
3310
|
+
self.cache('schur_factors_' + format, schur)
|
|
3311
|
+
return schur
|
|
3312
|
+
|
|
3313
|
+
def cholesky(self):
|
|
3314
|
+
r"""
|
|
3315
|
+
Return the Cholesky factorization of a matrix that
|
|
3316
|
+
is real symmetric, or complex Hermitian.
|
|
3317
|
+
|
|
3318
|
+
INPUT:
|
|
3319
|
+
|
|
3320
|
+
Any square matrix with entries from ``RDF`` that is symmetric, or
|
|
3321
|
+
with entries from ``CDF`` that is Hermitian. The matrix must
|
|
3322
|
+
be positive definite for the Cholesky decomposition to exist.
|
|
3323
|
+
|
|
3324
|
+
OUTPUT:
|
|
3325
|
+
|
|
3326
|
+
For a matrix `A` the routine returns a lower triangular
|
|
3327
|
+
matrix `L` such that,
|
|
3328
|
+
|
|
3329
|
+
.. MATH::
|
|
3330
|
+
|
|
3331
|
+
A = LL^\ast
|
|
3332
|
+
|
|
3333
|
+
where `L^\ast` is the conjugate-transpose in the complex case,
|
|
3334
|
+
and just the transpose in the real case. If the matrix fails
|
|
3335
|
+
to be positive definite (perhaps because it is not symmetric
|
|
3336
|
+
or Hermitian), then this function raises a :exc:`ValueError`.
|
|
3337
|
+
|
|
3338
|
+
IMPLEMENTATION:
|
|
3339
|
+
|
|
3340
|
+
The existence of a Cholesky decomposition and the
|
|
3341
|
+
positive definite property are equivalent. So this
|
|
3342
|
+
method and the :meth:`is_positive_definite` method compute and
|
|
3343
|
+
cache both the Cholesky decomposition and the
|
|
3344
|
+
positive-definiteness. So the :meth:`is_positive_definite`
|
|
3345
|
+
method or catching a :exc:`ValueError` from the :meth:`cholesky`
|
|
3346
|
+
method are equally expensive computationally and if the
|
|
3347
|
+
decomposition exists, it is cached as a side-effect of either
|
|
3348
|
+
routine.
|
|
3349
|
+
|
|
3350
|
+
EXAMPLES:
|
|
3351
|
+
|
|
3352
|
+
A real matrix that is symmetric, Hermitian, and positive definite::
|
|
3353
|
+
|
|
3354
|
+
sage: # needs scipy
|
|
3355
|
+
sage: M = matrix(RDF,[[ 1, 1, 1, 1, 1],
|
|
3356
|
+
....: [ 1, 5, 31, 121, 341],
|
|
3357
|
+
....: [ 1, 31, 341, 1555, 4681],
|
|
3358
|
+
....: [ 1,121, 1555, 7381, 22621],
|
|
3359
|
+
....: [ 1,341, 4681, 22621, 69905]])
|
|
3360
|
+
sage: M.is_symmetric()
|
|
3361
|
+
True
|
|
3362
|
+
sage: M.is_hermitian()
|
|
3363
|
+
True
|
|
3364
|
+
sage: L = M.cholesky()
|
|
3365
|
+
sage: L.round(6).zero_at(10^-10)
|
|
3366
|
+
[ 1.0 0.0 0.0 0.0 0.0]
|
|
3367
|
+
[ 1.0 2.0 0.0 0.0 0.0]
|
|
3368
|
+
[ 1.0 15.0 10.723805 0.0 0.0]
|
|
3369
|
+
[ 1.0 60.0 60.985814 7.792973 0.0]
|
|
3370
|
+
[ 1.0 170.0 198.623524 39.366567 1.7231]
|
|
3371
|
+
sage: (L*L.transpose()).round(6).zero_at(10^-10)
|
|
3372
|
+
[ 1.0 1.0 1.0 1.0 1.0]
|
|
3373
|
+
[ 1.0 5.0 31.0 121.0 341.0]
|
|
3374
|
+
[ 1.0 31.0 341.0 1555.0 4681.0]
|
|
3375
|
+
[ 1.0 121.0 1555.0 7381.0 22621.0]
|
|
3376
|
+
[ 1.0 341.0 4681.0 22621.0 69905.0]
|
|
3377
|
+
|
|
3378
|
+
A complex matrix that is Hermitian and positive definite. ::
|
|
3379
|
+
|
|
3380
|
+
sage: # needs scipy sage.symbolic
|
|
3381
|
+
sage: A = matrix(CDF, [[ 23, 17*I + 3, 24*I + 25, 21*I],
|
|
3382
|
+
....: [ -17*I + 3, 38, -69*I + 89, 7*I + 15],
|
|
3383
|
+
....: [-24*I + 25, 69*I + 89, 976, 24*I + 6],
|
|
3384
|
+
....: [ -21*I, -7*I + 15, -24*I + 6, 28]])
|
|
3385
|
+
sage: A.is_hermitian()
|
|
3386
|
+
True
|
|
3387
|
+
sage: L = A.cholesky()
|
|
3388
|
+
sage: L.round(6).zero_at(10^-10)
|
|
3389
|
+
[ 4.795832 0.0 0.0 0.0]
|
|
3390
|
+
[ 0.625543 - 3.544745*I 5.004346 0.0 0.0]
|
|
3391
|
+
[ 5.21286 - 5.004346*I 13.588189 + 10.721116*I 24.984023 0.0]
|
|
3392
|
+
[ -4.378803*I -0.104257 - 0.851434*I -0.21486 + 0.371348*I 2.811799]
|
|
3393
|
+
sage: (L*L.conjugate_transpose()).round(6).zero_at(10^-10)
|
|
3394
|
+
[ 23.0 3.0 + 17.0*I 25.0 + 24.0*I 21.0*I]
|
|
3395
|
+
[ 3.0 - 17.0*I 38.0 89.0 - 69.0*I 15.0 + 7.0*I]
|
|
3396
|
+
[25.0 - 24.0*I 89.0 + 69.0*I 976.0 6.0 + 24.0*I]
|
|
3397
|
+
[ -21.0*I 15.0 - 7.0*I 6.0 - 24.0*I 28.0]
|
|
3398
|
+
|
|
3399
|
+
This routine will recognize when the input matrix is not
|
|
3400
|
+
positive definite. The negative eigenvalues are an
|
|
3401
|
+
equivalent indicator. (Eigenvalues of a Hermitian matrix
|
|
3402
|
+
must be real, so there is no loss in ignoring the imprecise
|
|
3403
|
+
imaginary parts). ::
|
|
3404
|
+
|
|
3405
|
+
sage: # needs scipy
|
|
3406
|
+
sage: A = matrix(RDF, [[ 3, -6, 9, 6, -9],
|
|
3407
|
+
....: [-6, 11, -16, -11, 17],
|
|
3408
|
+
....: [ 9, -16, 28, 16, -40],
|
|
3409
|
+
....: [ 6, -11, 16, 9, -19],
|
|
3410
|
+
....: [-9, 17, -40, -19, 68]])
|
|
3411
|
+
sage: A.is_symmetric()
|
|
3412
|
+
True
|
|
3413
|
+
sage: A.eigenvalues()
|
|
3414
|
+
[108.07..., 13.02..., -0.02..., -0.70..., -1.37...]
|
|
3415
|
+
sage: A.cholesky()
|
|
3416
|
+
Traceback (most recent call last):
|
|
3417
|
+
...
|
|
3418
|
+
ValueError: matrix is not positive definite
|
|
3419
|
+
|
|
3420
|
+
sage: # needs scipy sage.symbolic
|
|
3421
|
+
sage: B = matrix(CDF, [[ 2, 4 - 2*I, 2 + 2*I],
|
|
3422
|
+
....: [4 + 2*I, 8, 10*I],
|
|
3423
|
+
....: [2 - 2*I, -10*I, -3]])
|
|
3424
|
+
sage: B.is_hermitian()
|
|
3425
|
+
True
|
|
3426
|
+
sage: [ev.real() for ev in B.eigenvalues()]
|
|
3427
|
+
[15.88..., 0.08..., -8.97...]
|
|
3428
|
+
sage: B.cholesky()
|
|
3429
|
+
Traceback (most recent call last):
|
|
3430
|
+
...
|
|
3431
|
+
ValueError: matrix is not positive definite
|
|
3432
|
+
|
|
3433
|
+
TESTS:
|
|
3434
|
+
|
|
3435
|
+
A trivial case. ::
|
|
3436
|
+
|
|
3437
|
+
sage: A = matrix(RDF, 0, [])
|
|
3438
|
+
sage: A.cholesky()
|
|
3439
|
+
[]
|
|
3440
|
+
|
|
3441
|
+
The Cholesky factorization is only defined for Hermitian (in
|
|
3442
|
+
particular, square) matrices::
|
|
3443
|
+
|
|
3444
|
+
sage: A = matrix(RDF, 4, 5, range(20))
|
|
3445
|
+
sage: A.cholesky()
|
|
3446
|
+
Traceback (most recent call last):
|
|
3447
|
+
...
|
|
3448
|
+
ValueError: matrix is not Hermitian
|
|
3449
|
+
|
|
3450
|
+
::
|
|
3451
|
+
|
|
3452
|
+
sage: # needs sage.symbolic
|
|
3453
|
+
sage: A = matrix(CDF, [[1+I]])
|
|
3454
|
+
sage: A.cholesky()
|
|
3455
|
+
Traceback (most recent call last):
|
|
3456
|
+
...
|
|
3457
|
+
ValueError: matrix is not Hermitian
|
|
3458
|
+
"""
|
|
3459
|
+
from sage.rings.real_double import RDF
|
|
3460
|
+
from sage.rings.complex_double import CDF
|
|
3461
|
+
|
|
3462
|
+
cdef Matrix_double_dense L
|
|
3463
|
+
cache_cholesky = 'cholesky'
|
|
3464
|
+
cache_posdef = 'positive_definite'
|
|
3465
|
+
L = self.fetch(cache_cholesky)
|
|
3466
|
+
if L is not None:
|
|
3467
|
+
return L
|
|
3468
|
+
|
|
3469
|
+
if not self.is_hermitian():
|
|
3470
|
+
self.cache(cache_posdef, False)
|
|
3471
|
+
raise ValueError("matrix is not Hermitian")
|
|
3472
|
+
|
|
3473
|
+
if self._nrows == 0: # special case
|
|
3474
|
+
self.cache(cache_posdef, True)
|
|
3475
|
+
L = self.__copy__()
|
|
3476
|
+
L.set_immutable()
|
|
3477
|
+
return L
|
|
3478
|
+
|
|
3479
|
+
L = self._new()
|
|
3480
|
+
from scipy.linalg import cholesky
|
|
3481
|
+
from numpy.linalg import LinAlgError
|
|
3482
|
+
try:
|
|
3483
|
+
L._matrix_numpy = cholesky(self._matrix_numpy, lower=1)
|
|
3484
|
+
except LinAlgError:
|
|
3485
|
+
self.cache(cache_posdef, False)
|
|
3486
|
+
raise ValueError("matrix is not positive definite")
|
|
3487
|
+
L.set_immutable()
|
|
3488
|
+
self.cache(cache_cholesky, L)
|
|
3489
|
+
self.cache(cache_posdef, True)
|
|
3490
|
+
|
|
3491
|
+
return L
|
|
3492
|
+
|
|
3493
|
+
def is_positive_definite(self):
|
|
3494
|
+
r"""
|
|
3495
|
+
Determines if a matrix is positive definite.
|
|
3496
|
+
|
|
3497
|
+
A matrix `A` is positive definite if it is square,
|
|
3498
|
+
is Hermitian (which reduces to symmetric in the real case),
|
|
3499
|
+
and for every nonzero vector `\vec{x}`,
|
|
3500
|
+
|
|
3501
|
+
.. MATH::
|
|
3502
|
+
|
|
3503
|
+
\vec{x}^\ast A \vec{x} > 0
|
|
3504
|
+
|
|
3505
|
+
where `\vec{x}^\ast` is the conjugate-transpose in the
|
|
3506
|
+
complex case and just the transpose in the real case.
|
|
3507
|
+
Equivalently, a positive definite matrix has only positive
|
|
3508
|
+
eigenvalues and only positive determinants of leading
|
|
3509
|
+
principal submatrices.
|
|
3510
|
+
|
|
3511
|
+
Applies to any matrix over ``RDF`` or ``CDF``.
|
|
3512
|
+
|
|
3513
|
+
OUTPUT:
|
|
3514
|
+
|
|
3515
|
+
``True`` if and only if the matrix is square, Hermitian,
|
|
3516
|
+
and meets the condition above on the quadratic form.
|
|
3517
|
+
The result is cached.
|
|
3518
|
+
|
|
3519
|
+
IMPLEMENTATION:
|
|
3520
|
+
|
|
3521
|
+
The existence of a Cholesky decomposition and the
|
|
3522
|
+
positive definite property are equivalent. So this
|
|
3523
|
+
method and the :meth:`cholesky` method compute and
|
|
3524
|
+
cache both the Cholesky decomposition and the
|
|
3525
|
+
positive-definiteness. So the :meth:`is_positive_definite`
|
|
3526
|
+
method or catching a :exc:`ValueError` from the :meth:`cholesky`
|
|
3527
|
+
method are equally expensive computationally and if the
|
|
3528
|
+
decomposition exists, it is cached as a side-effect of either
|
|
3529
|
+
routine.
|
|
3530
|
+
|
|
3531
|
+
EXAMPLES:
|
|
3532
|
+
|
|
3533
|
+
A matrix over ``RDF`` that is positive definite. ::
|
|
3534
|
+
|
|
3535
|
+
sage: # needs scipy
|
|
3536
|
+
sage: M = matrix(RDF,[[ 1, 1, 1, 1, 1],
|
|
3537
|
+
....: [ 1, 5, 31, 121, 341],
|
|
3538
|
+
....: [ 1, 31, 341, 1555, 4681],
|
|
3539
|
+
....: [ 1,121, 1555, 7381, 22621],
|
|
3540
|
+
....: [ 1,341, 4681, 22621, 69905]])
|
|
3541
|
+
sage: M.is_symmetric()
|
|
3542
|
+
True
|
|
3543
|
+
sage: M.eigenvalues()
|
|
3544
|
+
[77547.66..., 82.44..., 2.41..., 0.46..., 0.011...]
|
|
3545
|
+
sage: [round(M[:i,:i].determinant()) for i in range(1, M.nrows()+1)]
|
|
3546
|
+
[1, 4, 460, 27936, 82944]
|
|
3547
|
+
sage: M.is_positive_definite()
|
|
3548
|
+
True
|
|
3549
|
+
|
|
3550
|
+
A matrix over ``CDF`` that is positive definite. ::
|
|
3551
|
+
|
|
3552
|
+
sage: # needs scipy sage.symbolic
|
|
3553
|
+
sage: C = matrix(CDF, [[ 23, 17*I + 3, 24*I + 25, 21*I],
|
|
3554
|
+
....: [ -17*I + 3, 38, -69*I + 89, 7*I + 15],
|
|
3555
|
+
....: [-24*I + 25, 69*I + 89, 976, 24*I + 6],
|
|
3556
|
+
....: [ -21*I, -7*I + 15, -24*I + 6, 28]])
|
|
3557
|
+
sage: C.is_hermitian()
|
|
3558
|
+
True
|
|
3559
|
+
sage: [x.real() for x in C.eigenvalues()]
|
|
3560
|
+
[991.46..., 55.96..., 3.69..., 13.87...]
|
|
3561
|
+
sage: [round(C[:i,:i].determinant().real()) for i in range(1, C.nrows()+1)]
|
|
3562
|
+
[23, 576, 359540, 2842600]
|
|
3563
|
+
sage: C.is_positive_definite()
|
|
3564
|
+
True
|
|
3565
|
+
|
|
3566
|
+
A matrix over ``RDF`` that is not positive definite. ::
|
|
3567
|
+
|
|
3568
|
+
sage: # needs scipy
|
|
3569
|
+
sage: A = matrix(RDF, [[ 3, -6, 9, 6, -9],
|
|
3570
|
+
....: [-6, 11, -16, -11, 17],
|
|
3571
|
+
....: [ 9, -16, 28, 16, -40],
|
|
3572
|
+
....: [ 6, -11, 16, 9, -19],
|
|
3573
|
+
....: [-9, 17, -40, -19, 68]])
|
|
3574
|
+
sage: A.is_symmetric()
|
|
3575
|
+
True
|
|
3576
|
+
sage: A.eigenvalues()
|
|
3577
|
+
[108.07..., 13.02..., -0.02..., -0.70..., -1.37...]
|
|
3578
|
+
sage: [round(A[:i,:i].determinant()) for i in range(1, A.nrows()+1)]
|
|
3579
|
+
[3, -3, -15, 30, -30]
|
|
3580
|
+
sage: A.is_positive_definite()
|
|
3581
|
+
False
|
|
3582
|
+
|
|
3583
|
+
A matrix over ``CDF`` that is not positive definite. ::
|
|
3584
|
+
|
|
3585
|
+
sage: # needs scipy sage.symbolic
|
|
3586
|
+
sage: B = matrix(CDF, [[ 2, 4 - 2*I, 2 + 2*I],
|
|
3587
|
+
....: [4 + 2*I, 8, 10*I],
|
|
3588
|
+
....: [2 - 2*I, -10*I, -3]])
|
|
3589
|
+
sage: B.is_hermitian()
|
|
3590
|
+
True
|
|
3591
|
+
sage: [ev.real() for ev in B.eigenvalues()]
|
|
3592
|
+
[15.88..., 0.08..., -8.97...]
|
|
3593
|
+
sage: [round(B[:i,:i].determinant().real()) for i in range(1, B.nrows()+1)]
|
|
3594
|
+
[2, -4, -12]
|
|
3595
|
+
sage: B.is_positive_definite()
|
|
3596
|
+
False
|
|
3597
|
+
|
|
3598
|
+
A large random matrix that is guaranteed by theory to be
|
|
3599
|
+
positive definite. ::
|
|
3600
|
+
|
|
3601
|
+
sage: # needs scipy
|
|
3602
|
+
sage: R = random_matrix(CDF, 200)
|
|
3603
|
+
sage: H = R.conjugate_transpose()*R
|
|
3604
|
+
sage: H.is_positive_definite()
|
|
3605
|
+
True
|
|
3606
|
+
|
|
3607
|
+
TESTS:
|
|
3608
|
+
|
|
3609
|
+
A trivially small case. ::
|
|
3610
|
+
|
|
3611
|
+
sage: # needs scipy
|
|
3612
|
+
sage: S = matrix(CDF, [])
|
|
3613
|
+
sage: S.nrows(), S.ncols()
|
|
3614
|
+
(0, 0)
|
|
3615
|
+
sage: S.is_positive_definite()
|
|
3616
|
+
True
|
|
3617
|
+
|
|
3618
|
+
A rectangular matrix will never be positive definite. ::
|
|
3619
|
+
|
|
3620
|
+
sage: R = matrix(RDF, 2, 3, range(6))
|
|
3621
|
+
sage: R.is_positive_definite() # needs scipy
|
|
3622
|
+
False
|
|
3623
|
+
|
|
3624
|
+
A non-Hermitian matrix will never be positive definite::
|
|
3625
|
+
|
|
3626
|
+
sage: T = matrix(CDF, 8, 8, range(64))
|
|
3627
|
+
sage: T.is_positive_definite() # needs scipy
|
|
3628
|
+
False
|
|
3629
|
+
|
|
3630
|
+
::
|
|
3631
|
+
|
|
3632
|
+
sage: # needs scipy sage.symbolic
|
|
3633
|
+
sage: A = matrix(CDF, [[1+I]])
|
|
3634
|
+
sage: A.is_positive_definite()
|
|
3635
|
+
False
|
|
3636
|
+
|
|
3637
|
+
AUTHOR:
|
|
3638
|
+
|
|
3639
|
+
- Rob Beezer (2012-05-28)
|
|
3640
|
+
"""
|
|
3641
|
+
cache_str = 'positive_definite'
|
|
3642
|
+
posdef = self.fetch(cache_str)
|
|
3643
|
+
if posdef is None:
|
|
3644
|
+
try:
|
|
3645
|
+
self.cholesky()
|
|
3646
|
+
except ValueError:
|
|
3647
|
+
pass
|
|
3648
|
+
posdef = self.fetch(cache_str)
|
|
3649
|
+
return posdef
|
|
3650
|
+
|
|
3651
|
+
cdef _vector_times_matrix_(self, Vector v):
|
|
3652
|
+
if self._nrows == 0 or self._ncols == 0:
|
|
3653
|
+
return self.row_ambient_module().zero_vector()
|
|
3654
|
+
global numpy
|
|
3655
|
+
if numpy is None:
|
|
3656
|
+
import numpy
|
|
3657
|
+
|
|
3658
|
+
v_numpy = numpy.array([self._python_dtype(i) for i in v])
|
|
3659
|
+
|
|
3660
|
+
M = self.row_ambient_module()
|
|
3661
|
+
ans = numpy.dot(v_numpy,self._matrix_numpy)
|
|
3662
|
+
return M(ans)
|
|
3663
|
+
|
|
3664
|
+
cdef _matrix_times_vector_(self, Vector v):
|
|
3665
|
+
if self._nrows == 0 or self._ncols == 0:
|
|
3666
|
+
return self.column_ambient_module().zero_vector()
|
|
3667
|
+
|
|
3668
|
+
global numpy
|
|
3669
|
+
if numpy is None:
|
|
3670
|
+
import numpy
|
|
3671
|
+
|
|
3672
|
+
v_numpy = numpy.array([self._python_dtype(i) for i in v], dtype=self._numpy_dtype)
|
|
3673
|
+
|
|
3674
|
+
M = self.column_ambient_module()
|
|
3675
|
+
ans = numpy.dot(self._matrix_numpy, v_numpy)
|
|
3676
|
+
return M(ans)
|
|
3677
|
+
|
|
3678
|
+
def _replace_self_with_numpy32(self, numpy_matrix):
|
|
3679
|
+
"""
|
|
3680
|
+
|
|
3681
|
+
EXAMPLES::
|
|
3682
|
+
|
|
3683
|
+
sage: import numpy
|
|
3684
|
+
sage: a = numpy.array([[1,2],[3,4]], 'float32')
|
|
3685
|
+
sage: m = matrix(RDF,2,2,0)
|
|
3686
|
+
sage: m._replace_self_with_numpy32(a)
|
|
3687
|
+
sage: m
|
|
3688
|
+
[1.0 2.0]
|
|
3689
|
+
[3.0 4.0]
|
|
3690
|
+
"""
|
|
3691
|
+
# TODO find where this is used and change it
|
|
3692
|
+
self._replace_self_with_numpy(numpy_matrix)
|
|
3693
|
+
|
|
3694
|
+
def _hadamard_row_bound(self):
|
|
3695
|
+
r"""
|
|
3696
|
+
Return an integer n such that the absolute value of the
|
|
3697
|
+
determinant of this matrix is at most `10^n`.
|
|
3698
|
+
|
|
3699
|
+
EXAMPLES::
|
|
3700
|
+
|
|
3701
|
+
sage: a = matrix(RDF, 3, [1,2,5,7,-3,4,2,1,123])
|
|
3702
|
+
sage: a._hadamard_row_bound()
|
|
3703
|
+
4
|
|
3704
|
+
sage: a.det() # needs scipy
|
|
3705
|
+
-2014.0
|
|
3706
|
+
sage: 10^4
|
|
3707
|
+
10000
|
|
3708
|
+
"""
|
|
3709
|
+
cdef double d = 0, s
|
|
3710
|
+
cdef Py_ssize_t i, j
|
|
3711
|
+
for i from 0 <= i < self._nrows:
|
|
3712
|
+
s = 0
|
|
3713
|
+
for j from 0 <= j < self._ncols:
|
|
3714
|
+
s += self.get_unsafe(i, j)**2
|
|
3715
|
+
d += math.log(s)
|
|
3716
|
+
d /= 2
|
|
3717
|
+
return int(math.ceil(d / math.log(10)))
|
|
3718
|
+
|
|
3719
|
+
def exp(self):
|
|
3720
|
+
r"""
|
|
3721
|
+
Calculate the exponential of this matrix X, which is the matrix
|
|
3722
|
+
|
|
3723
|
+
.. MATH::
|
|
3724
|
+
|
|
3725
|
+
e^X = \sum_{k=0}^{\infty} \frac{X^k}{k!}.
|
|
3726
|
+
|
|
3727
|
+
EXAMPLES::
|
|
3728
|
+
|
|
3729
|
+
sage: # needs scipy
|
|
3730
|
+
sage: A = matrix(RDF, 2, [1,2,3,4]); A
|
|
3731
|
+
[1.0 2.0]
|
|
3732
|
+
[3.0 4.0]
|
|
3733
|
+
sage: A.exp() # tol 5e-14
|
|
3734
|
+
[51.968956198705044 74.73656456700327]
|
|
3735
|
+
[112.10484685050491 164.07380304920997]
|
|
3736
|
+
sage: A = matrix(CDF, 2, [1,2+I,3*I,4]); A # needs sage.symbolic
|
|
3737
|
+
[ 1.0 2.0 + 1.0*I]
|
|
3738
|
+
[ 3.0*I 4.0]
|
|
3739
|
+
sage: A.exp() # tol 3e-14 # needs sage.symbolic
|
|
3740
|
+
[-19.614602953804912 + 12.517743846762578*I 3.7949636449582176 + 28.88379930658099*I]
|
|
3741
|
+
[ -32.383580980922254 + 21.88423595789845*I 2.269633004093535 + 44.901324827684824*I]
|
|
3742
|
+
|
|
3743
|
+
TESTS::
|
|
3744
|
+
|
|
3745
|
+
sage: # needs scipy
|
|
3746
|
+
sage: A = matrix(RDF, 2, [1,2,3,4])
|
|
3747
|
+
sage: A.exp() # tol 5e-14
|
|
3748
|
+
[51.968956198705044 74.73656456700327]
|
|
3749
|
+
[112.10484685050491 164.07380304920997]
|
|
3750
|
+
|
|
3751
|
+
sage: A = matrix(CDF, 2, [1,2+I,3*I,4]) # needs sage.symbolic
|
|
3752
|
+
sage: A.exp() # tol 3e-14 # needs scipy sage.symbolic
|
|
3753
|
+
[-19.614602953804923 + 12.51774384676257*I 3.7949636449582016 + 28.883799306580997*I]
|
|
3754
|
+
[-32.38358098092227 + 21.884235957898433*I 2.2696330040935084 + 44.90132482768484*I]
|
|
3755
|
+
"""
|
|
3756
|
+
global scipy
|
|
3757
|
+
if scipy is None:
|
|
3758
|
+
import scipy
|
|
3759
|
+
import scipy.linalg
|
|
3760
|
+
|
|
3761
|
+
cdef Matrix_double_dense M
|
|
3762
|
+
M = self._new()
|
|
3763
|
+
M._matrix_numpy = scipy.linalg.expm(self._matrix_numpy)
|
|
3764
|
+
return M
|
|
3765
|
+
|
|
3766
|
+
def zero_at(self, eps):
|
|
3767
|
+
"""
|
|
3768
|
+
Return a copy of the matrix where elements smaller than or
|
|
3769
|
+
equal to ``eps`` are replaced with zeroes. For complex matrices,
|
|
3770
|
+
the real and imaginary parts are considered individually.
|
|
3771
|
+
|
|
3772
|
+
This is useful for modifying output from algorithms which have large
|
|
3773
|
+
relative errors when producing zero elements, e.g. to create reliable
|
|
3774
|
+
doctests.
|
|
3775
|
+
|
|
3776
|
+
INPUT:
|
|
3777
|
+
|
|
3778
|
+
- ``eps`` -- cutoff value
|
|
3779
|
+
|
|
3780
|
+
OUTPUT: a modified copy of the matrix
|
|
3781
|
+
|
|
3782
|
+
EXAMPLES::
|
|
3783
|
+
|
|
3784
|
+
sage: # needs sage.symbolic
|
|
3785
|
+
sage: a = matrix(CDF, [[1, 1e-4r, 1+1e-100jr], [1e-8+3j, 0, 1e-58r]])
|
|
3786
|
+
sage: a
|
|
3787
|
+
[ 1.0 0.0001 1.0 + 1e-100*I]
|
|
3788
|
+
[ 1e-08 + 3.0*I 0.0 1e-58]
|
|
3789
|
+
sage: a.zero_at(1e-50)
|
|
3790
|
+
[ 1.0 0.0001 1.0]
|
|
3791
|
+
[1e-08 + 3.0*I 0.0 0.0]
|
|
3792
|
+
sage: a.zero_at(1e-4)
|
|
3793
|
+
[ 1.0 0.0 1.0]
|
|
3794
|
+
[3.0*I 0.0 0.0]
|
|
3795
|
+
"""
|
|
3796
|
+
global numpy
|
|
3797
|
+
cdef Matrix_double_dense M
|
|
3798
|
+
if numpy is None:
|
|
3799
|
+
import numpy
|
|
3800
|
+
eps = float(eps)
|
|
3801
|
+
out = self._matrix_numpy.copy()
|
|
3802
|
+
if self._sage_dtype is sage.rings.complex_double.CDF:
|
|
3803
|
+
out.real[numpy.abs(out.real) <= eps] = 0
|
|
3804
|
+
out.imag[numpy.abs(out.imag) <= eps] = 0
|
|
3805
|
+
else:
|
|
3806
|
+
out[numpy.abs(out) <= eps] = 0
|
|
3807
|
+
M = self._new()
|
|
3808
|
+
M._matrix_numpy = out
|
|
3809
|
+
return M
|
|
3810
|
+
|
|
3811
|
+
def round(self, ndigits=0):
|
|
3812
|
+
"""
|
|
3813
|
+
Return a copy of the matrix where all entries have been rounded
|
|
3814
|
+
to a given precision in decimal digits (default: 0 digits).
|
|
3815
|
+
|
|
3816
|
+
INPUT:
|
|
3817
|
+
|
|
3818
|
+
- ``ndigits`` -- the precision in number of decimal digits
|
|
3819
|
+
|
|
3820
|
+
OUTPUT: a modified copy of the matrix
|
|
3821
|
+
|
|
3822
|
+
EXAMPLES::
|
|
3823
|
+
|
|
3824
|
+
sage: M = matrix(CDF, [[10.234r + 34.2343jr, 34e10r]])
|
|
3825
|
+
sage: M
|
|
3826
|
+
[10.234 + 34.2343*I 340000000000.0]
|
|
3827
|
+
sage: M.round(2)
|
|
3828
|
+
[10.23 + 34.23*I 340000000000.0]
|
|
3829
|
+
sage: M.round()
|
|
3830
|
+
[ 10.0 + 34.0*I 340000000000.0]
|
|
3831
|
+
"""
|
|
3832
|
+
global numpy
|
|
3833
|
+
cdef Matrix_double_dense M
|
|
3834
|
+
if numpy is None:
|
|
3835
|
+
import numpy
|
|
3836
|
+
ndigits = int(ndigits)
|
|
3837
|
+
M = self._new()
|
|
3838
|
+
M._matrix_numpy = numpy.round(self._matrix_numpy, ndigits)
|
|
3839
|
+
return M
|
|
3840
|
+
|
|
3841
|
+
def _normalize_columns(self):
|
|
3842
|
+
"""
|
|
3843
|
+
Return a copy of the matrix where each column has been
|
|
3844
|
+
multiplied by plus or minus 1, to guarantee that the real
|
|
3845
|
+
part of the leading entry of each nonzero column is positive.
|
|
3846
|
+
|
|
3847
|
+
This is useful for modifying output from algorithms which
|
|
3848
|
+
produce matrices which are only well-defined up to signs of
|
|
3849
|
+
the columns, for example an algorithm which should produce an
|
|
3850
|
+
orthogonal matrix.
|
|
3851
|
+
|
|
3852
|
+
OUTPUT: a modified copy of the matrix
|
|
3853
|
+
|
|
3854
|
+
EXAMPLES::
|
|
3855
|
+
|
|
3856
|
+
sage: # needs sage.symbolic
|
|
3857
|
+
sage: a = matrix(CDF, [[1, -2+I, 0, -3*I], [2, 2, -2, 2], [-3, -3, -3, -2]])
|
|
3858
|
+
sage: a
|
|
3859
|
+
[ 1.0 -2.0 + 1.0*I 0.0 -3.0*I]
|
|
3860
|
+
[ 2.0 2.0 -2.0 2.0]
|
|
3861
|
+
[ -3.0 -3.0 -3.0 -2.0]
|
|
3862
|
+
sage: a._normalize_columns()
|
|
3863
|
+
[ 1.0 2.0 - 1.0*I 0.0 -3.0*I]
|
|
3864
|
+
[ 2.0 -2.0 2.0 2.0]
|
|
3865
|
+
[ -3.0 3.0 3.0 -2.0]
|
|
3866
|
+
"""
|
|
3867
|
+
M = self.__copy__()
|
|
3868
|
+
cdef Py_ssize_t i, j
|
|
3869
|
+
for j from 0 <= j < M.ncols():
|
|
3870
|
+
for i from 0 <= i < M.column(j).degree():
|
|
3871
|
+
a = M.column(j)[i].real()
|
|
3872
|
+
if a != 0:
|
|
3873
|
+
if a < 0:
|
|
3874
|
+
M.rescale_col(j, -1)
|
|
3875
|
+
break
|
|
3876
|
+
return M
|
|
3877
|
+
|
|
3878
|
+
def _normalize_rows(self):
|
|
3879
|
+
"""
|
|
3880
|
+
Return a copy of the matrix where each row has been
|
|
3881
|
+
multiplied by plus or minus 1, to guarantee that the real
|
|
3882
|
+
part of the leading entry of each nonzero row is positive.
|
|
3883
|
+
|
|
3884
|
+
This is useful for modifying output from algorithms which
|
|
3885
|
+
produce matrices which are only well-defined up to signs of
|
|
3886
|
+
the rows, for example an algorithm which should produce an
|
|
3887
|
+
upper triangular matrix.
|
|
3888
|
+
|
|
3889
|
+
OUTPUT: a modified copy of the matrix
|
|
3890
|
+
|
|
3891
|
+
EXAMPLES::
|
|
3892
|
+
|
|
3893
|
+
sage: # needs sage.symbolic
|
|
3894
|
+
sage: a = matrix(CDF, [[1, 2, -3], [-2+I, 2, -3], [0, -2, -3], [-3*I, 2, -2]])
|
|
3895
|
+
sage: a
|
|
3896
|
+
[ 1.0 2.0 -3.0]
|
|
3897
|
+
[-2.0 + 1.0*I 2.0 -3.0]
|
|
3898
|
+
[ 0.0 -2.0 -3.0]
|
|
3899
|
+
[ -3.0*I 2.0 -2.0]
|
|
3900
|
+
sage: a._normalize_rows()
|
|
3901
|
+
[ 1.0 2.0 -3.0]
|
|
3902
|
+
[2.0 - 1.0*I -2.0 3.0]
|
|
3903
|
+
[ 0.0 2.0 3.0]
|
|
3904
|
+
[ -3.0*I 2.0 -2.0]
|
|
3905
|
+
"""
|
|
3906
|
+
return self.transpose()._normalize_columns().transpose()
|