passagemath-modules 10.6.31rc3__cp314-cp314-musllinux_1_2_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of passagemath-modules might be problematic. Click here for more details.
- passagemath_modules-10.6.31rc3.dist-info/METADATA +281 -0
- passagemath_modules-10.6.31rc3.dist-info/RECORD +807 -0
- passagemath_modules-10.6.31rc3.dist-info/WHEEL +5 -0
- passagemath_modules-10.6.31rc3.dist-info/top_level.txt +2 -0
- passagemath_modules.libs/libgcc_s-2d945d6c.so.1 +0 -0
- passagemath_modules.libs/libgfortran-67378ab2.so.5.0.0 +0 -0
- passagemath_modules.libs/libgmp-28992bcb.so.10.5.0 +0 -0
- passagemath_modules.libs/libgsl-23768756.so.28.0.0 +0 -0
- passagemath_modules.libs/libmpc-7897025b.so.3.3.1 +0 -0
- passagemath_modules.libs/libmpfr-e34bb864.so.6.2.1 +0 -0
- passagemath_modules.libs/libopenblasp-r0-503f0c35.3.29.so +0 -0
- sage/algebras/all__sagemath_modules.py +20 -0
- sage/algebras/catalog.py +148 -0
- sage/algebras/clifford_algebra.py +3107 -0
- sage/algebras/clifford_algebra_element.cpython-314-aarch64-linux-musl.so +0 -0
- sage/algebras/clifford_algebra_element.pxd +16 -0
- sage/algebras/clifford_algebra_element.pyx +997 -0
- sage/algebras/commutative_dga.py +4252 -0
- sage/algebras/exterior_algebra_groebner.cpython-314-aarch64-linux-musl.so +0 -0
- sage/algebras/exterior_algebra_groebner.pxd +55 -0
- sage/algebras/exterior_algebra_groebner.pyx +727 -0
- sage/algebras/finite_dimensional_algebras/all.py +2 -0
- sage/algebras/finite_dimensional_algebras/finite_dimensional_algebra.py +1029 -0
- sage/algebras/finite_dimensional_algebras/finite_dimensional_algebra_element.cpython-314-aarch64-linux-musl.so +0 -0
- sage/algebras/finite_dimensional_algebras/finite_dimensional_algebra_element.pxd +12 -0
- sage/algebras/finite_dimensional_algebras/finite_dimensional_algebra_element.pyx +706 -0
- sage/algebras/finite_dimensional_algebras/finite_dimensional_algebra_ideal.py +196 -0
- sage/algebras/finite_dimensional_algebras/finite_dimensional_algebra_morphism.py +255 -0
- sage/algebras/finite_gca.py +528 -0
- sage/algebras/group_algebra.py +232 -0
- sage/algebras/lie_algebras/abelian.py +197 -0
- sage/algebras/lie_algebras/affine_lie_algebra.py +1213 -0
- sage/algebras/lie_algebras/all.py +25 -0
- sage/algebras/lie_algebras/all__sagemath_modules.py +1 -0
- sage/algebras/lie_algebras/bch.py +177 -0
- sage/algebras/lie_algebras/bgg_dual_module.py +1184 -0
- sage/algebras/lie_algebras/bgg_resolution.py +232 -0
- sage/algebras/lie_algebras/center_uea.py +767 -0
- sage/algebras/lie_algebras/classical_lie_algebra.py +2516 -0
- sage/algebras/lie_algebras/examples.py +683 -0
- sage/algebras/lie_algebras/free_lie_algebra.py +973 -0
- sage/algebras/lie_algebras/heisenberg.py +820 -0
- sage/algebras/lie_algebras/lie_algebra.py +1562 -0
- sage/algebras/lie_algebras/lie_algebra_element.cpython-314-aarch64-linux-musl.so +0 -0
- sage/algebras/lie_algebras/lie_algebra_element.pxd +68 -0
- sage/algebras/lie_algebras/lie_algebra_element.pyx +2122 -0
- sage/algebras/lie_algebras/morphism.py +661 -0
- sage/algebras/lie_algebras/nilpotent_lie_algebra.py +457 -0
- sage/algebras/lie_algebras/onsager.py +1324 -0
- sage/algebras/lie_algebras/poincare_birkhoff_witt.py +816 -0
- sage/algebras/lie_algebras/quotient.py +462 -0
- sage/algebras/lie_algebras/rank_two_heisenberg_virasoro.py +355 -0
- sage/algebras/lie_algebras/representation.py +1040 -0
- sage/algebras/lie_algebras/structure_coefficients.py +459 -0
- sage/algebras/lie_algebras/subalgebra.py +967 -0
- sage/algebras/lie_algebras/symplectic_derivation.py +289 -0
- sage/algebras/lie_algebras/verma_module.py +1630 -0
- sage/algebras/lie_algebras/virasoro.py +1186 -0
- sage/algebras/octonion_algebra.cpython-314-aarch64-linux-musl.so +0 -0
- sage/algebras/octonion_algebra.pxd +20 -0
- sage/algebras/octonion_algebra.pyx +987 -0
- sage/algebras/orlik_solomon.py +907 -0
- sage/algebras/orlik_terao.py +779 -0
- sage/algebras/steenrod/all.py +7 -0
- sage/algebras/steenrod/steenrod_algebra.py +4258 -0
- sage/algebras/steenrod/steenrod_algebra_bases.py +1179 -0
- sage/algebras/steenrod/steenrod_algebra_misc.py +1167 -0
- sage/algebras/steenrod/steenrod_algebra_mult.py +954 -0
- sage/algebras/weyl_algebra.py +1126 -0
- sage/all__sagemath_modules.py +62 -0
- sage/calculus/all__sagemath_modules.py +19 -0
- sage/calculus/expr.py +205 -0
- sage/calculus/integration.cpython-314-aarch64-linux-musl.so +0 -0
- sage/calculus/integration.pyx +698 -0
- sage/calculus/interpolation.cpython-314-aarch64-linux-musl.so +0 -0
- sage/calculus/interpolation.pxd +13 -0
- sage/calculus/interpolation.pyx +387 -0
- sage/calculus/interpolators.cpython-314-aarch64-linux-musl.so +0 -0
- sage/calculus/interpolators.pyx +326 -0
- sage/calculus/ode.cpython-314-aarch64-linux-musl.so +0 -0
- sage/calculus/ode.pxd +5 -0
- sage/calculus/ode.pyx +610 -0
- sage/calculus/riemann.cpython-314-aarch64-linux-musl.so +0 -0
- sage/calculus/riemann.pyx +1521 -0
- sage/calculus/test_sympy.py +201 -0
- sage/calculus/transforms/all.py +7 -0
- sage/calculus/transforms/dft.py +844 -0
- sage/calculus/transforms/dwt.cpython-314-aarch64-linux-musl.so +0 -0
- sage/calculus/transforms/dwt.pxd +7 -0
- sage/calculus/transforms/dwt.pyx +160 -0
- sage/calculus/transforms/fft.cpython-314-aarch64-linux-musl.so +0 -0
- sage/calculus/transforms/fft.pxd +12 -0
- sage/calculus/transforms/fft.pyx +487 -0
- sage/calculus/wester.py +662 -0
- sage/coding/abstract_code.py +1108 -0
- sage/coding/ag_code.py +868 -0
- sage/coding/ag_code_decoders.cpython-314-aarch64-linux-musl.so +0 -0
- sage/coding/ag_code_decoders.pyx +2639 -0
- sage/coding/all.py +15 -0
- sage/coding/bch_code.py +494 -0
- sage/coding/binary_code.cpython-314-aarch64-linux-musl.so +0 -0
- sage/coding/binary_code.pxd +124 -0
- sage/coding/binary_code.pyx +4139 -0
- sage/coding/bounds_catalog.py +43 -0
- sage/coding/channel.py +819 -0
- sage/coding/channels_catalog.py +29 -0
- sage/coding/code_bounds.py +755 -0
- sage/coding/code_constructions.py +804 -0
- sage/coding/codes_catalog.py +111 -0
- sage/coding/cyclic_code.py +1329 -0
- sage/coding/databases.py +316 -0
- sage/coding/decoder.py +373 -0
- sage/coding/decoders_catalog.py +88 -0
- sage/coding/delsarte_bounds.py +709 -0
- sage/coding/encoder.py +390 -0
- sage/coding/encoders_catalog.py +64 -0
- sage/coding/extended_code.py +468 -0
- sage/coding/gabidulin_code.py +1058 -0
- sage/coding/golay_code.py +404 -0
- sage/coding/goppa_code.py +441 -0
- sage/coding/grs_code.py +2371 -0
- sage/coding/guava.py +107 -0
- sage/coding/guruswami_sudan/all.py +1 -0
- sage/coding/guruswami_sudan/gs_decoder.py +897 -0
- sage/coding/guruswami_sudan/interpolation.py +409 -0
- sage/coding/guruswami_sudan/utils.py +176 -0
- sage/coding/hamming_code.py +176 -0
- sage/coding/information_set_decoder.py +1032 -0
- sage/coding/kasami_codes.cpython-314-aarch64-linux-musl.so +0 -0
- sage/coding/kasami_codes.pyx +351 -0
- sage/coding/linear_code.py +3067 -0
- sage/coding/linear_code_no_metric.py +1354 -0
- sage/coding/linear_rank_metric.py +961 -0
- sage/coding/parity_check_code.py +353 -0
- sage/coding/punctured_code.py +719 -0
- sage/coding/reed_muller_code.py +999 -0
- sage/coding/self_dual_codes.py +942 -0
- sage/coding/source_coding/all.py +2 -0
- sage/coding/source_coding/huffman.py +553 -0
- sage/coding/subfield_subcode.py +423 -0
- sage/coding/two_weight_db.py +399 -0
- sage/combinat/all__sagemath_modules.py +7 -0
- sage/combinat/cartesian_product.py +347 -0
- sage/combinat/family.py +11 -0
- sage/combinat/free_module.py +1977 -0
- sage/combinat/root_system/all.py +147 -0
- sage/combinat/root_system/ambient_space.py +527 -0
- sage/combinat/root_system/associahedron.py +471 -0
- sage/combinat/root_system/braid_move_calculator.py +143 -0
- sage/combinat/root_system/braid_orbit.cpython-314-aarch64-linux-musl.so +0 -0
- sage/combinat/root_system/braid_orbit.pyx +144 -0
- sage/combinat/root_system/branching_rules.py +2301 -0
- sage/combinat/root_system/cartan_matrix.py +1245 -0
- sage/combinat/root_system/cartan_type.py +3069 -0
- sage/combinat/root_system/coxeter_group.py +162 -0
- sage/combinat/root_system/coxeter_matrix.py +1261 -0
- sage/combinat/root_system/coxeter_type.py +681 -0
- sage/combinat/root_system/dynkin_diagram.py +900 -0
- sage/combinat/root_system/extended_affine_weyl_group.py +2993 -0
- sage/combinat/root_system/fundamental_group.py +795 -0
- sage/combinat/root_system/hecke_algebra_representation.py +1203 -0
- sage/combinat/root_system/integrable_representations.py +1227 -0
- sage/combinat/root_system/non_symmetric_macdonald_polynomials.py +1965 -0
- sage/combinat/root_system/pieri_factors.py +1147 -0
- sage/combinat/root_system/plot.py +1615 -0
- sage/combinat/root_system/root_lattice_realization_algebras.py +1214 -0
- sage/combinat/root_system/root_lattice_realizations.py +4628 -0
- sage/combinat/root_system/root_space.py +487 -0
- sage/combinat/root_system/root_system.py +882 -0
- sage/combinat/root_system/type_A.py +348 -0
- sage/combinat/root_system/type_A_affine.py +227 -0
- sage/combinat/root_system/type_A_infinity.py +241 -0
- sage/combinat/root_system/type_B.py +347 -0
- sage/combinat/root_system/type_BC_affine.py +287 -0
- sage/combinat/root_system/type_B_affine.py +216 -0
- sage/combinat/root_system/type_C.py +317 -0
- sage/combinat/root_system/type_C_affine.py +188 -0
- sage/combinat/root_system/type_D.py +357 -0
- sage/combinat/root_system/type_D_affine.py +208 -0
- sage/combinat/root_system/type_E.py +641 -0
- sage/combinat/root_system/type_E_affine.py +231 -0
- sage/combinat/root_system/type_F.py +387 -0
- sage/combinat/root_system/type_F_affine.py +137 -0
- sage/combinat/root_system/type_G.py +293 -0
- sage/combinat/root_system/type_G_affine.py +132 -0
- sage/combinat/root_system/type_H.py +105 -0
- sage/combinat/root_system/type_I.py +110 -0
- sage/combinat/root_system/type_Q.py +150 -0
- sage/combinat/root_system/type_affine.py +509 -0
- sage/combinat/root_system/type_dual.py +704 -0
- sage/combinat/root_system/type_folded.py +301 -0
- sage/combinat/root_system/type_marked.py +748 -0
- sage/combinat/root_system/type_reducible.py +601 -0
- sage/combinat/root_system/type_relabel.py +730 -0
- sage/combinat/root_system/type_super_A.py +837 -0
- sage/combinat/root_system/weight_lattice_realizations.py +1188 -0
- sage/combinat/root_system/weight_space.py +639 -0
- sage/combinat/root_system/weyl_characters.py +2238 -0
- sage/crypto/__init__.py +4 -0
- sage/crypto/all.py +28 -0
- sage/crypto/block_cipher/all.py +7 -0
- sage/crypto/block_cipher/des.py +1065 -0
- sage/crypto/block_cipher/miniaes.py +2171 -0
- sage/crypto/block_cipher/present.py +909 -0
- sage/crypto/block_cipher/sdes.py +1527 -0
- sage/crypto/boolean_function.cpython-314-aarch64-linux-musl.so +0 -0
- sage/crypto/boolean_function.pxd +10 -0
- sage/crypto/boolean_function.pyx +1487 -0
- sage/crypto/cipher.py +78 -0
- sage/crypto/classical.py +3668 -0
- sage/crypto/classical_cipher.py +569 -0
- sage/crypto/cryptosystem.py +387 -0
- sage/crypto/key_exchange/all.py +7 -0
- sage/crypto/key_exchange/catalog.py +24 -0
- sage/crypto/key_exchange/diffie_hellman.py +323 -0
- sage/crypto/key_exchange/key_exchange_scheme.py +107 -0
- sage/crypto/lattice.py +312 -0
- sage/crypto/lfsr.py +295 -0
- sage/crypto/lwe.py +840 -0
- sage/crypto/mq/__init__.py +4 -0
- sage/crypto/mq/mpolynomialsystemgenerator.py +204 -0
- sage/crypto/mq/rijndael_gf.py +2345 -0
- sage/crypto/mq/sbox.py +7 -0
- sage/crypto/mq/sr.py +3344 -0
- sage/crypto/public_key/all.py +5 -0
- sage/crypto/public_key/blum_goldwasser.py +776 -0
- sage/crypto/sbox.cpython-314-aarch64-linux-musl.so +0 -0
- sage/crypto/sbox.pyx +2090 -0
- sage/crypto/sboxes.py +2090 -0
- sage/crypto/stream.py +390 -0
- sage/crypto/stream_cipher.py +297 -0
- sage/crypto/util.py +519 -0
- sage/ext/all__sagemath_modules.py +1 -0
- sage/ext/interpreters/__init__.py +1 -0
- sage/ext/interpreters/all__sagemath_modules.py +2 -0
- sage/ext/interpreters/wrapper_cc.cpython-314-aarch64-linux-musl.so +0 -0
- sage/ext/interpreters/wrapper_cc.pxd +30 -0
- sage/ext/interpreters/wrapper_cc.pyx +252 -0
- sage/ext/interpreters/wrapper_cdf.cpython-314-aarch64-linux-musl.so +0 -0
- sage/ext/interpreters/wrapper_cdf.pxd +26 -0
- sage/ext/interpreters/wrapper_cdf.pyx +245 -0
- sage/ext/interpreters/wrapper_rdf.cpython-314-aarch64-linux-musl.so +0 -0
- sage/ext/interpreters/wrapper_rdf.pxd +23 -0
- sage/ext/interpreters/wrapper_rdf.pyx +221 -0
- sage/ext/interpreters/wrapper_rr.cpython-314-aarch64-linux-musl.so +0 -0
- sage/ext/interpreters/wrapper_rr.pxd +28 -0
- sage/ext/interpreters/wrapper_rr.pyx +335 -0
- sage/geometry/all__sagemath_modules.py +5 -0
- sage/geometry/toric_lattice.py +1745 -0
- sage/geometry/toric_lattice_element.cpython-314-aarch64-linux-musl.so +0 -0
- sage/geometry/toric_lattice_element.pyx +432 -0
- sage/groups/abelian_gps/abelian_group.py +1925 -0
- sage/groups/abelian_gps/abelian_group_element.py +164 -0
- sage/groups/abelian_gps/all__sagemath_modules.py +5 -0
- sage/groups/abelian_gps/dual_abelian_group.py +421 -0
- sage/groups/abelian_gps/dual_abelian_group_element.py +179 -0
- sage/groups/abelian_gps/element_base.py +341 -0
- sage/groups/abelian_gps/values.py +488 -0
- sage/groups/additive_abelian/additive_abelian_group.py +476 -0
- sage/groups/additive_abelian/additive_abelian_wrapper.py +857 -0
- sage/groups/additive_abelian/all.py +4 -0
- sage/groups/additive_abelian/qmodnz.py +231 -0
- sage/groups/additive_abelian/qmodnz_element.py +349 -0
- sage/groups/affine_gps/affine_group.py +535 -0
- sage/groups/affine_gps/all.py +1 -0
- sage/groups/affine_gps/catalog.py +17 -0
- sage/groups/affine_gps/euclidean_group.py +246 -0
- sage/groups/affine_gps/group_element.py +562 -0
- sage/groups/all__sagemath_modules.py +12 -0
- sage/groups/galois_group.py +479 -0
- sage/groups/matrix_gps/all.py +4 -0
- sage/groups/matrix_gps/all__sagemath_modules.py +13 -0
- sage/groups/matrix_gps/catalog.py +26 -0
- sage/groups/matrix_gps/coxeter_group.py +927 -0
- sage/groups/matrix_gps/finitely_generated.py +487 -0
- sage/groups/matrix_gps/group_element.cpython-314-aarch64-linux-musl.so +0 -0
- sage/groups/matrix_gps/group_element.pxd +11 -0
- sage/groups/matrix_gps/group_element.pyx +431 -0
- sage/groups/matrix_gps/linear.py +440 -0
- sage/groups/matrix_gps/matrix_group.py +617 -0
- sage/groups/matrix_gps/named_group.py +296 -0
- sage/groups/matrix_gps/orthogonal.py +544 -0
- sage/groups/matrix_gps/symplectic.py +251 -0
- sage/groups/matrix_gps/unitary.py +436 -0
- sage/groups/misc_gps/all__sagemath_modules.py +1 -0
- sage/groups/misc_gps/argument_groups.py +1905 -0
- sage/groups/misc_gps/imaginary_groups.py +479 -0
- sage/groups/perm_gps/all__sagemath_modules.py +1 -0
- sage/groups/perm_gps/partn_ref/all__sagemath_modules.py +1 -0
- sage/groups/perm_gps/partn_ref/refinement_binary.cpython-314-aarch64-linux-musl.so +0 -0
- sage/groups/perm_gps/partn_ref/refinement_binary.pxd +41 -0
- sage/groups/perm_gps/partn_ref/refinement_binary.pyx +1167 -0
- sage/groups/perm_gps/partn_ref/refinement_matrices.cpython-314-aarch64-linux-musl.so +0 -0
- sage/groups/perm_gps/partn_ref/refinement_matrices.pxd +31 -0
- sage/groups/perm_gps/partn_ref/refinement_matrices.pyx +385 -0
- sage/homology/algebraic_topological_model.py +595 -0
- sage/homology/all.py +2 -0
- sage/homology/all__sagemath_modules.py +8 -0
- sage/homology/chain_complex.py +2148 -0
- sage/homology/chain_complex_homspace.py +165 -0
- sage/homology/chain_complex_morphism.py +629 -0
- sage/homology/chain_homotopy.py +604 -0
- sage/homology/chains.py +653 -0
- sage/homology/free_resolution.py +923 -0
- sage/homology/graded_resolution.py +567 -0
- sage/homology/hochschild_complex.py +756 -0
- sage/homology/homology_group.py +188 -0
- sage/homology/homology_morphism.py +422 -0
- sage/homology/homology_vector_space_with_basis.py +1454 -0
- sage/homology/koszul_complex.py +169 -0
- sage/homology/matrix_utils.py +205 -0
- sage/libs/all__sagemath_modules.py +1 -0
- sage/libs/gsl/__init__.py +1 -0
- sage/libs/gsl/airy.pxd +56 -0
- sage/libs/gsl/all.pxd +66 -0
- sage/libs/gsl/array.cpython-314-aarch64-linux-musl.so +0 -0
- sage/libs/gsl/array.pxd +5 -0
- sage/libs/gsl/array.pyx +102 -0
- sage/libs/gsl/bessel.pxd +208 -0
- sage/libs/gsl/blas.pxd +116 -0
- sage/libs/gsl/blas_types.pxd +34 -0
- sage/libs/gsl/block.pxd +52 -0
- sage/libs/gsl/chebyshev.pxd +37 -0
- sage/libs/gsl/clausen.pxd +12 -0
- sage/libs/gsl/combination.pxd +47 -0
- sage/libs/gsl/complex.pxd +151 -0
- sage/libs/gsl/coulomb.pxd +30 -0
- sage/libs/gsl/coupling.pxd +21 -0
- sage/libs/gsl/dawson.pxd +12 -0
- sage/libs/gsl/debye.pxd +24 -0
- sage/libs/gsl/dilog.pxd +14 -0
- sage/libs/gsl/eigen.pxd +46 -0
- sage/libs/gsl/elementary.pxd +12 -0
- sage/libs/gsl/ellint.pxd +48 -0
- sage/libs/gsl/elljac.pxd +8 -0
- sage/libs/gsl/erf.pxd +32 -0
- sage/libs/gsl/errno.pxd +26 -0
- sage/libs/gsl/exp.pxd +44 -0
- sage/libs/gsl/expint.pxd +44 -0
- sage/libs/gsl/fermi_dirac.pxd +44 -0
- sage/libs/gsl/fft.pxd +121 -0
- sage/libs/gsl/fit.pxd +50 -0
- sage/libs/gsl/gamma.pxd +94 -0
- sage/libs/gsl/gegenbauer.pxd +26 -0
- sage/libs/gsl/histogram.pxd +176 -0
- sage/libs/gsl/hyperg.pxd +52 -0
- sage/libs/gsl/integration.pxd +69 -0
- sage/libs/gsl/interp.pxd +109 -0
- sage/libs/gsl/laguerre.pxd +24 -0
- sage/libs/gsl/lambert.pxd +16 -0
- sage/libs/gsl/legendre.pxd +90 -0
- sage/libs/gsl/linalg.pxd +185 -0
- sage/libs/gsl/log.pxd +26 -0
- sage/libs/gsl/math.pxd +43 -0
- sage/libs/gsl/matrix.pxd +143 -0
- sage/libs/gsl/matrix_complex.pxd +130 -0
- sage/libs/gsl/min.pxd +67 -0
- sage/libs/gsl/monte.pxd +56 -0
- sage/libs/gsl/ntuple.pxd +32 -0
- sage/libs/gsl/odeiv.pxd +70 -0
- sage/libs/gsl/permutation.pxd +78 -0
- sage/libs/gsl/poly.pxd +40 -0
- sage/libs/gsl/pow_int.pxd +12 -0
- sage/libs/gsl/psi.pxd +28 -0
- sage/libs/gsl/qrng.pxd +29 -0
- sage/libs/gsl/random.pxd +257 -0
- sage/libs/gsl/rng.pxd +100 -0
- sage/libs/gsl/roots.pxd +72 -0
- sage/libs/gsl/sort.pxd +36 -0
- sage/libs/gsl/statistics.pxd +59 -0
- sage/libs/gsl/sum.pxd +55 -0
- sage/libs/gsl/synchrotron.pxd +16 -0
- sage/libs/gsl/transport.pxd +24 -0
- sage/libs/gsl/trig.pxd +58 -0
- sage/libs/gsl/types.pxd +137 -0
- sage/libs/gsl/vector.pxd +101 -0
- sage/libs/gsl/vector_complex.pxd +83 -0
- sage/libs/gsl/wavelet.pxd +49 -0
- sage/libs/gsl/zeta.pxd +28 -0
- sage/libs/mpc/__init__.pxd +114 -0
- sage/libs/mpc/types.pxd +28 -0
- sage/libs/mpfr/__init__.pxd +299 -0
- sage/libs/mpfr/types.pxd +26 -0
- sage/libs/mpmath/__init__.py +1 -0
- sage/libs/mpmath/all.py +27 -0
- sage/libs/mpmath/all__sagemath_modules.py +1 -0
- sage/libs/mpmath/utils.cpython-314-aarch64-linux-musl.so +0 -0
- sage/libs/mpmath/utils.pxd +4 -0
- sage/libs/mpmath/utils.pyx +319 -0
- sage/matrix/action.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/action.pxd +26 -0
- sage/matrix/action.pyx +596 -0
- sage/matrix/all.py +9 -0
- sage/matrix/args.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/args.pxd +144 -0
- sage/matrix/args.pyx +1668 -0
- sage/matrix/benchmark.py +1258 -0
- sage/matrix/berlekamp_massey.py +95 -0
- sage/matrix/compute_J_ideal.py +926 -0
- sage/matrix/constructor.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/constructor.pyx +750 -0
- sage/matrix/docs.py +430 -0
- sage/matrix/echelon_matrix.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/echelon_matrix.pyx +155 -0
- sage/matrix/matrix.pxd +2 -0
- sage/matrix/matrix0.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix0.pxd +68 -0
- sage/matrix/matrix0.pyx +6324 -0
- sage/matrix/matrix1.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix1.pxd +8 -0
- sage/matrix/matrix1.pyx +2851 -0
- sage/matrix/matrix2.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix2.pxd +25 -0
- sage/matrix/matrix2.pyx +20181 -0
- sage/matrix/matrix_cdv.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_cdv.pxd +4 -0
- sage/matrix/matrix_cdv.pyx +93 -0
- sage/matrix/matrix_complex_double_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_complex_double_dense.pxd +5 -0
- sage/matrix/matrix_complex_double_dense.pyx +98 -0
- sage/matrix/matrix_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_dense.pxd +5 -0
- sage/matrix/matrix_dense.pyx +343 -0
- sage/matrix/matrix_domain_dense.pxd +5 -0
- sage/matrix/matrix_domain_sparse.pxd +5 -0
- sage/matrix/matrix_double_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_double_dense.pxd +7 -0
- sage/matrix/matrix_double_dense.pyx +3906 -0
- sage/matrix/matrix_double_sparse.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_double_sparse.pxd +6 -0
- sage/matrix/matrix_double_sparse.pyx +248 -0
- sage/matrix/matrix_generic_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_generic_dense.pxd +7 -0
- sage/matrix/matrix_generic_dense.pyx +354 -0
- sage/matrix/matrix_generic_sparse.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_generic_sparse.pxd +7 -0
- sage/matrix/matrix_generic_sparse.pyx +461 -0
- sage/matrix/matrix_laurent_mpolynomial_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_laurent_mpolynomial_dense.pxd +5 -0
- sage/matrix/matrix_laurent_mpolynomial_dense.pyx +115 -0
- sage/matrix/matrix_misc.py +313 -0
- sage/matrix/matrix_numpy_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_numpy_dense.pxd +14 -0
- sage/matrix/matrix_numpy_dense.pyx +450 -0
- sage/matrix/matrix_numpy_integer_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_numpy_integer_dense.pxd +7 -0
- sage/matrix/matrix_numpy_integer_dense.pyx +59 -0
- sage/matrix/matrix_polynomial_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_polynomial_dense.pxd +5 -0
- sage/matrix/matrix_polynomial_dense.pyx +5341 -0
- sage/matrix/matrix_real_double_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_real_double_dense.pxd +7 -0
- sage/matrix/matrix_real_double_dense.pyx +122 -0
- sage/matrix/matrix_space.py +2848 -0
- sage/matrix/matrix_sparse.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_sparse.pxd +5 -0
- sage/matrix/matrix_sparse.pyx +1222 -0
- sage/matrix/matrix_window.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/matrix_window.pxd +37 -0
- sage/matrix/matrix_window.pyx +242 -0
- sage/matrix/misc_mpfr.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/misc_mpfr.pyx +80 -0
- sage/matrix/operation_table.py +1182 -0
- sage/matrix/special.py +3666 -0
- sage/matrix/strassen.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matrix/strassen.pyx +851 -0
- sage/matrix/symplectic_basis.py +541 -0
- sage/matrix/template.pxd +6 -0
- sage/matrix/tests.py +71 -0
- sage/matroids/advanced.py +77 -0
- sage/matroids/all.py +13 -0
- sage/matroids/basis_exchange_matroid.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/basis_exchange_matroid.pxd +96 -0
- sage/matroids/basis_exchange_matroid.pyx +2344 -0
- sage/matroids/basis_matroid.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/basis_matroid.pxd +45 -0
- sage/matroids/basis_matroid.pyx +1217 -0
- sage/matroids/catalog.py +44 -0
- sage/matroids/chow_ring.py +473 -0
- sage/matroids/chow_ring_ideal.py +849 -0
- sage/matroids/circuit_closures_matroid.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/circuit_closures_matroid.pxd +16 -0
- sage/matroids/circuit_closures_matroid.pyx +559 -0
- sage/matroids/circuits_matroid.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/circuits_matroid.pxd +38 -0
- sage/matroids/circuits_matroid.pyx +947 -0
- sage/matroids/constructor.py +1086 -0
- sage/matroids/database_collections.py +365 -0
- sage/matroids/database_matroids.py +5338 -0
- sage/matroids/dual_matroid.py +583 -0
- sage/matroids/extension.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/extension.pxd +34 -0
- sage/matroids/extension.pyx +519 -0
- sage/matroids/flats_matroid.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/flats_matroid.pxd +28 -0
- sage/matroids/flats_matroid.pyx +715 -0
- sage/matroids/gammoid.py +600 -0
- sage/matroids/graphic_matroid.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/graphic_matroid.pxd +39 -0
- sage/matroids/graphic_matroid.pyx +2024 -0
- sage/matroids/lean_matrix.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/lean_matrix.pxd +126 -0
- sage/matroids/lean_matrix.pyx +3667 -0
- sage/matroids/linear_matroid.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/linear_matroid.pxd +180 -0
- sage/matroids/linear_matroid.pyx +6649 -0
- sage/matroids/matroid.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/matroid.pxd +243 -0
- sage/matroids/matroid.pyx +8759 -0
- sage/matroids/matroids_catalog.py +190 -0
- sage/matroids/matroids_plot_helpers.py +890 -0
- sage/matroids/minor_matroid.py +480 -0
- sage/matroids/minorfix.h +9 -0
- sage/matroids/named_matroids.py +5 -0
- sage/matroids/rank_matroid.py +268 -0
- sage/matroids/set_system.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/set_system.pxd +38 -0
- sage/matroids/set_system.pyx +800 -0
- sage/matroids/transversal_matroid.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/transversal_matroid.pxd +14 -0
- sage/matroids/transversal_matroid.pyx +893 -0
- sage/matroids/union_matroid.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/union_matroid.pxd +20 -0
- sage/matroids/union_matroid.pyx +331 -0
- sage/matroids/unpickling.cpython-314-aarch64-linux-musl.so +0 -0
- sage/matroids/unpickling.pyx +843 -0
- sage/matroids/utilities.py +809 -0
- sage/misc/all__sagemath_modules.py +20 -0
- sage/misc/c3.cpython-314-aarch64-linux-musl.so +0 -0
- sage/misc/c3.pyx +238 -0
- sage/misc/compat.py +87 -0
- sage/misc/element_with_label.py +173 -0
- sage/misc/func_persist.py +79 -0
- sage/misc/pickle_old.cpython-314-aarch64-linux-musl.so +0 -0
- sage/misc/pickle_old.pyx +19 -0
- sage/misc/proof.py +7 -0
- sage/misc/replace_dot_all.py +472 -0
- sage/misc/sagedoc_conf.py +168 -0
- sage/misc/sphinxify.py +167 -0
- sage/misc/test_class_pickling.py +85 -0
- sage/modules/all.py +42 -0
- sage/modules/complex_double_vector.py +25 -0
- sage/modules/diamond_cutting.py +380 -0
- sage/modules/fg_pid/all.py +1 -0
- sage/modules/fg_pid/fgp_element.py +456 -0
- sage/modules/fg_pid/fgp_module.py +2091 -0
- sage/modules/fg_pid/fgp_morphism.py +550 -0
- sage/modules/filtered_vector_space.py +1271 -0
- sage/modules/finite_submodule_iter.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/finite_submodule_iter.pxd +27 -0
- sage/modules/finite_submodule_iter.pyx +452 -0
- sage/modules/fp_graded/all.py +1 -0
- sage/modules/fp_graded/element.py +346 -0
- sage/modules/fp_graded/free_element.py +298 -0
- sage/modules/fp_graded/free_homspace.py +53 -0
- sage/modules/fp_graded/free_module.py +1060 -0
- sage/modules/fp_graded/free_morphism.py +217 -0
- sage/modules/fp_graded/homspace.py +563 -0
- sage/modules/fp_graded/module.py +1340 -0
- sage/modules/fp_graded/morphism.py +1990 -0
- sage/modules/fp_graded/steenrod/all.py +1 -0
- sage/modules/fp_graded/steenrod/homspace.py +65 -0
- sage/modules/fp_graded/steenrod/module.py +477 -0
- sage/modules/fp_graded/steenrod/morphism.py +404 -0
- sage/modules/fp_graded/steenrod/profile.py +241 -0
- sage/modules/free_module.py +8447 -0
- sage/modules/free_module_element.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/free_module_element.pxd +22 -0
- sage/modules/free_module_element.pyx +5445 -0
- sage/modules/free_module_homspace.py +369 -0
- sage/modules/free_module_integer.py +896 -0
- sage/modules/free_module_morphism.py +823 -0
- sage/modules/free_module_pseudohomspace.py +352 -0
- sage/modules/free_module_pseudomorphism.py +578 -0
- sage/modules/free_quadratic_module.py +1706 -0
- sage/modules/free_quadratic_module_integer_symmetric.py +1790 -0
- sage/modules/matrix_morphism.py +1745 -0
- sage/modules/misc.py +103 -0
- sage/modules/module_functors.py +192 -0
- sage/modules/multi_filtered_vector_space.py +719 -0
- sage/modules/ore_module.py +2208 -0
- sage/modules/ore_module_element.py +178 -0
- sage/modules/ore_module_homspace.py +147 -0
- sage/modules/ore_module_morphism.py +968 -0
- sage/modules/quotient_module.py +699 -0
- sage/modules/real_double_vector.py +22 -0
- sage/modules/submodule.py +255 -0
- sage/modules/tensor_operations.py +567 -0
- sage/modules/torsion_quadratic_module.py +1352 -0
- sage/modules/tutorial_free_modules.py +248 -0
- sage/modules/vector_complex_double_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/vector_complex_double_dense.pxd +6 -0
- sage/modules/vector_complex_double_dense.pyx +117 -0
- sage/modules/vector_double_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/vector_double_dense.pxd +6 -0
- sage/modules/vector_double_dense.pyx +604 -0
- sage/modules/vector_integer_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/vector_integer_dense.pxd +15 -0
- sage/modules/vector_integer_dense.pyx +361 -0
- sage/modules/vector_integer_sparse.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/vector_integer_sparse.pxd +29 -0
- sage/modules/vector_integer_sparse.pyx +406 -0
- sage/modules/vector_modn_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/vector_modn_dense.pxd +12 -0
- sage/modules/vector_modn_dense.pyx +394 -0
- sage/modules/vector_modn_sparse.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/vector_modn_sparse.pxd +21 -0
- sage/modules/vector_modn_sparse.pyx +298 -0
- sage/modules/vector_numpy_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/vector_numpy_dense.pxd +15 -0
- sage/modules/vector_numpy_dense.pyx +304 -0
- sage/modules/vector_numpy_integer_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/vector_numpy_integer_dense.pxd +7 -0
- sage/modules/vector_numpy_integer_dense.pyx +54 -0
- sage/modules/vector_rational_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/vector_rational_dense.pxd +15 -0
- sage/modules/vector_rational_dense.pyx +387 -0
- sage/modules/vector_rational_sparse.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/vector_rational_sparse.pxd +30 -0
- sage/modules/vector_rational_sparse.pyx +413 -0
- sage/modules/vector_real_double_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/vector_real_double_dense.pxd +6 -0
- sage/modules/vector_real_double_dense.pyx +126 -0
- sage/modules/vector_space_homspace.py +430 -0
- sage/modules/vector_space_morphism.py +989 -0
- sage/modules/with_basis/all.py +15 -0
- sage/modules/with_basis/cell_module.py +494 -0
- sage/modules/with_basis/indexed_element.cpython-314-aarch64-linux-musl.so +0 -0
- sage/modules/with_basis/indexed_element.pxd +13 -0
- sage/modules/with_basis/indexed_element.pyx +1058 -0
- sage/modules/with_basis/invariant.py +1075 -0
- sage/modules/with_basis/morphism.py +1636 -0
- sage/modules/with_basis/representation.py +2939 -0
- sage/modules/with_basis/subquotient.py +685 -0
- sage/numerical/all__sagemath_modules.py +6 -0
- sage/numerical/gauss_legendre.cpython-314-aarch64-linux-musl.so +0 -0
- sage/numerical/gauss_legendre.pyx +381 -0
- sage/numerical/optimize.py +910 -0
- sage/probability/all.py +10 -0
- sage/probability/probability_distribution.cpython-314-aarch64-linux-musl.so +0 -0
- sage/probability/probability_distribution.pyx +1242 -0
- sage/probability/random_variable.py +411 -0
- sage/quadratic_forms/all.py +4 -0
- sage/quadratic_forms/all__sagemath_modules.py +15 -0
- sage/quadratic_forms/binary_qf.py +2042 -0
- sage/quadratic_forms/bqf_class_group.py +748 -0
- sage/quadratic_forms/constructions.py +93 -0
- sage/quadratic_forms/count_local_2.cpython-314-aarch64-linux-musl.so +0 -0
- sage/quadratic_forms/count_local_2.pyx +365 -0
- sage/quadratic_forms/extras.py +195 -0
- sage/quadratic_forms/quadratic_form.py +1753 -0
- sage/quadratic_forms/quadratic_form__count_local_2.py +221 -0
- sage/quadratic_forms/quadratic_form__equivalence_testing.py +708 -0
- sage/quadratic_forms/quadratic_form__evaluate.cpython-314-aarch64-linux-musl.so +0 -0
- sage/quadratic_forms/quadratic_form__evaluate.pyx +139 -0
- sage/quadratic_forms/quadratic_form__local_density_congruence.py +977 -0
- sage/quadratic_forms/quadratic_form__local_field_invariants.py +1072 -0
- sage/quadratic_forms/quadratic_form__neighbors.py +424 -0
- sage/quadratic_forms/quadratic_form__reduction_theory.py +488 -0
- sage/quadratic_forms/quadratic_form__split_local_covering.py +416 -0
- sage/quadratic_forms/quadratic_form__ternary_Tornaria.py +657 -0
- sage/quadratic_forms/quadratic_form__theta.py +352 -0
- sage/quadratic_forms/quadratic_form__variable_substitutions.py +370 -0
- sage/quadratic_forms/random_quadraticform.py +209 -0
- sage/quadratic_forms/ternary.cpython-314-aarch64-linux-musl.so +0 -0
- sage/quadratic_forms/ternary.pyx +1154 -0
- sage/quadratic_forms/ternary_qf.py +2027 -0
- sage/rings/all__sagemath_modules.py +28 -0
- sage/rings/asymptotic/all__sagemath_modules.py +1 -0
- sage/rings/asymptotic/misc.py +1252 -0
- sage/rings/cc.py +4 -0
- sage/rings/cfinite_sequence.py +1306 -0
- sage/rings/complex_conversion.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/complex_conversion.pxd +8 -0
- sage/rings/complex_conversion.pyx +23 -0
- sage/rings/complex_double.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/complex_double.pxd +21 -0
- sage/rings/complex_double.pyx +2654 -0
- sage/rings/complex_mpc.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/complex_mpc.pxd +21 -0
- sage/rings/complex_mpc.pyx +2576 -0
- sage/rings/complex_mpfr.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/complex_mpfr.pxd +18 -0
- sage/rings/complex_mpfr.pyx +3602 -0
- sage/rings/derivation.py +2334 -0
- sage/rings/finite_rings/all__sagemath_modules.py +1 -0
- sage/rings/finite_rings/maps_finite_field.py +191 -0
- sage/rings/function_field/all__sagemath_modules.py +8 -0
- sage/rings/function_field/derivations.py +102 -0
- sage/rings/function_field/derivations_rational.py +132 -0
- sage/rings/function_field/differential.py +853 -0
- sage/rings/function_field/divisor.py +1107 -0
- sage/rings/function_field/drinfeld_modules/action.py +199 -0
- sage/rings/function_field/drinfeld_modules/all.py +1 -0
- sage/rings/function_field/drinfeld_modules/charzero_drinfeld_module.py +673 -0
- sage/rings/function_field/drinfeld_modules/drinfeld_module.py +2087 -0
- sage/rings/function_field/drinfeld_modules/finite_drinfeld_module.py +1131 -0
- sage/rings/function_field/drinfeld_modules/homset.py +420 -0
- sage/rings/function_field/drinfeld_modules/morphism.py +820 -0
- sage/rings/function_field/hermite_form_polynomial.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/function_field/hermite_form_polynomial.pyx +188 -0
- sage/rings/function_field/khuri_makdisi.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/function_field/khuri_makdisi.pyx +935 -0
- sage/rings/invariants/all.py +4 -0
- sage/rings/invariants/invariant_theory.py +4597 -0
- sage/rings/invariants/reconstruction.py +395 -0
- sage/rings/polynomial/all__sagemath_modules.py +17 -0
- sage/rings/polynomial/integer_valued_polynomials.py +1230 -0
- sage/rings/polynomial/laurent_polynomial_mpair.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/polynomial/laurent_polynomial_mpair.pxd +15 -0
- sage/rings/polynomial/laurent_polynomial_mpair.pyx +2023 -0
- sage/rings/polynomial/ore_function_element.py +952 -0
- sage/rings/polynomial/ore_function_field.py +1028 -0
- sage/rings/polynomial/ore_polynomial_element.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/polynomial/ore_polynomial_element.pxd +48 -0
- sage/rings/polynomial/ore_polynomial_element.pyx +3145 -0
- sage/rings/polynomial/ore_polynomial_ring.py +1334 -0
- sage/rings/polynomial/polynomial_real_mpfr_dense.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/polynomial/polynomial_real_mpfr_dense.pyx +788 -0
- sage/rings/polynomial/q_integer_valued_polynomials.py +1264 -0
- sage/rings/polynomial/skew_polynomial_element.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/polynomial/skew_polynomial_element.pxd +9 -0
- sage/rings/polynomial/skew_polynomial_element.pyx +684 -0
- sage/rings/polynomial/skew_polynomial_finite_field.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/polynomial/skew_polynomial_finite_field.pxd +19 -0
- sage/rings/polynomial/skew_polynomial_finite_field.pyx +1093 -0
- sage/rings/polynomial/skew_polynomial_finite_order.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/polynomial/skew_polynomial_finite_order.pxd +10 -0
- sage/rings/polynomial/skew_polynomial_finite_order.pyx +567 -0
- sage/rings/polynomial/skew_polynomial_ring.py +908 -0
- sage/rings/real_double_element_gsl.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/real_double_element_gsl.pxd +8 -0
- sage/rings/real_double_element_gsl.pyx +794 -0
- sage/rings/real_field.py +58 -0
- sage/rings/real_mpfr.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/real_mpfr.pxd +29 -0
- sage/rings/real_mpfr.pyx +6122 -0
- sage/rings/ring_extension.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/ring_extension.pxd +42 -0
- sage/rings/ring_extension.pyx +2779 -0
- sage/rings/ring_extension_conversion.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/ring_extension_conversion.pxd +16 -0
- sage/rings/ring_extension_conversion.pyx +462 -0
- sage/rings/ring_extension_element.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/ring_extension_element.pxd +21 -0
- sage/rings/ring_extension_element.pyx +1635 -0
- sage/rings/ring_extension_homset.py +64 -0
- sage/rings/ring_extension_morphism.cpython-314-aarch64-linux-musl.so +0 -0
- sage/rings/ring_extension_morphism.pxd +35 -0
- sage/rings/ring_extension_morphism.pyx +920 -0
- sage/schemes/all__sagemath_modules.py +1 -0
- sage/schemes/projective/all__sagemath_modules.py +1 -0
- sage/schemes/projective/coherent_sheaf.py +300 -0
- sage/schemes/projective/cohomology.py +510 -0
- sage/stats/all.py +15 -0
- sage/stats/basic_stats.py +489 -0
- sage/stats/distributions/all.py +7 -0
- sage/stats/distributions/catalog.py +34 -0
- sage/stats/distributions/dgs.h +50 -0
- sage/stats/distributions/dgs.pxd +111 -0
- sage/stats/distributions/dgs_bern.h +400 -0
- sage/stats/distributions/dgs_gauss.h +614 -0
- sage/stats/distributions/dgs_misc.h +104 -0
- sage/stats/distributions/discrete_gaussian_integer.cpython-314-aarch64-linux-musl.so +0 -0
- sage/stats/distributions/discrete_gaussian_integer.pxd +14 -0
- sage/stats/distributions/discrete_gaussian_integer.pyx +498 -0
- sage/stats/distributions/discrete_gaussian_lattice.py +908 -0
- sage/stats/distributions/discrete_gaussian_polynomial.py +141 -0
- sage/stats/hmm/all.py +15 -0
- sage/stats/hmm/chmm.cpython-314-aarch64-linux-musl.so +0 -0
- sage/stats/hmm/chmm.pyx +1595 -0
- sage/stats/hmm/distributions.cpython-314-aarch64-linux-musl.so +0 -0
- sage/stats/hmm/distributions.pxd +29 -0
- sage/stats/hmm/distributions.pyx +531 -0
- sage/stats/hmm/hmm.cpython-314-aarch64-linux-musl.so +0 -0
- sage/stats/hmm/hmm.pxd +17 -0
- sage/stats/hmm/hmm.pyx +1388 -0
- sage/stats/hmm/util.cpython-314-aarch64-linux-musl.so +0 -0
- sage/stats/hmm/util.pxd +7 -0
- sage/stats/hmm/util.pyx +165 -0
- sage/stats/intlist.cpython-314-aarch64-linux-musl.so +0 -0
- sage/stats/intlist.pxd +14 -0
- sage/stats/intlist.pyx +588 -0
- sage/stats/r.py +49 -0
- sage/stats/time_series.cpython-314-aarch64-linux-musl.so +0 -0
- sage/stats/time_series.pxd +6 -0
- sage/stats/time_series.pyx +2546 -0
- sage/tensor/all.py +2 -0
- sage/tensor/modules/all.py +8 -0
- sage/tensor/modules/alternating_contr_tensor.py +761 -0
- sage/tensor/modules/comp.py +5598 -0
- sage/tensor/modules/ext_pow_free_module.py +824 -0
- sage/tensor/modules/finite_rank_free_module.py +3589 -0
- sage/tensor/modules/format_utilities.py +333 -0
- sage/tensor/modules/free_module_alt_form.py +858 -0
- sage/tensor/modules/free_module_automorphism.py +1207 -0
- sage/tensor/modules/free_module_basis.py +1074 -0
- sage/tensor/modules/free_module_element.py +284 -0
- sage/tensor/modules/free_module_homset.py +652 -0
- sage/tensor/modules/free_module_linear_group.py +564 -0
- sage/tensor/modules/free_module_morphism.py +1581 -0
- sage/tensor/modules/free_module_tensor.py +3289 -0
- sage/tensor/modules/reflexive_module.py +386 -0
- sage/tensor/modules/tensor_free_module.py +780 -0
- sage/tensor/modules/tensor_free_submodule.py +538 -0
- sage/tensor/modules/tensor_free_submodule_basis.py +140 -0
- sage/tensor/modules/tensor_with_indices.py +1043 -0
sage/stats/hmm/chmm.pyx
ADDED
|
@@ -0,0 +1,1595 @@
|
|
|
1
|
+
# sage_setup: distribution = sagemath-modules
|
|
2
|
+
# sage.doctest: needs numpy
|
|
3
|
+
r"""
|
|
4
|
+
Continuous Emission Hidden Markov Models
|
|
5
|
+
|
|
6
|
+
AUTHOR:
|
|
7
|
+
|
|
8
|
+
- William Stein, 2010-03
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
# ***************************************************************************
|
|
12
|
+
# Copyright (C) 2010 William Stein <wstein@gmail.com>
|
|
13
|
+
#
|
|
14
|
+
# This program is free software: you can redistribute it and/or modify
|
|
15
|
+
# it under the terms of the GNU General Public License as published by
|
|
16
|
+
# the Free Software Foundation, either version 2 of the License, or
|
|
17
|
+
# (at your option) any later version.
|
|
18
|
+
# https://www.gnu.org/licenses/
|
|
19
|
+
# ***************************************************************************
|
|
20
|
+
|
|
21
|
+
from cpython.object cimport PyObject_RichCompare
|
|
22
|
+
from libc.math cimport log, sqrt, exp, isnormal, isfinite, M_PI
|
|
23
|
+
cdef double sqrt2pi = sqrt(2*M_PI)
|
|
24
|
+
from cysignals.signals cimport sig_on, sig_off
|
|
25
|
+
|
|
26
|
+
from sage.misc.flatten import flatten
|
|
27
|
+
from sage.structure.element import Matrix
|
|
28
|
+
|
|
29
|
+
from sage.stats.time_series cimport TimeSeries
|
|
30
|
+
from sage.stats.intlist cimport IntList
|
|
31
|
+
|
|
32
|
+
from sage.stats.hmm.hmm cimport HiddenMarkovModel
|
|
33
|
+
from sage.stats.hmm.util cimport HMM_Util
|
|
34
|
+
from sage.stats.hmm.distributions cimport GaussianMixtureDistribution
|
|
35
|
+
|
|
36
|
+
cdef HMM_Util util = HMM_Util()
|
|
37
|
+
|
|
38
|
+
from sage.misc.randstate cimport current_randstate, randstate
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# TODO: DELETE THIS FUNCTION WHEN MOVE Gaussian stuff to distributions.pyx!!! (next version)
|
|
42
|
+
cdef double random_normal(double mean, double std, randstate rstate) noexcept:
|
|
43
|
+
r"""
|
|
44
|
+
Return a number chosen randomly with given mean and standard deviation.
|
|
45
|
+
|
|
46
|
+
INPUT:
|
|
47
|
+
|
|
48
|
+
- ``mean`` -- double
|
|
49
|
+
- ``std`` -- double, standard deviation
|
|
50
|
+
- ``rstate`` -- a randstate object
|
|
51
|
+
|
|
52
|
+
OUTPUT:
|
|
53
|
+
|
|
54
|
+
a double
|
|
55
|
+
"""
|
|
56
|
+
# Ported from http://users.tkk.fi/~nbeijar/soft/terrain/source_o2/boxmuller.c
|
|
57
|
+
# This the box muller algorithm.
|
|
58
|
+
# Client code can get the current random state from:
|
|
59
|
+
# cdef randstate rstate = current_randstate()
|
|
60
|
+
|
|
61
|
+
cdef double x1, x2, w, y1, y2
|
|
62
|
+
while True:
|
|
63
|
+
x1 = 2*rstate.c_rand_double() - 1
|
|
64
|
+
x2 = 2*rstate.c_rand_double() - 1
|
|
65
|
+
w = x1*x1 + x2*x2
|
|
66
|
+
if w < 1: break
|
|
67
|
+
w = sqrt( (-2*log(w))/w )
|
|
68
|
+
y1 = x1 * w
|
|
69
|
+
return mean + y1*std
|
|
70
|
+
|
|
71
|
+
cdef class GaussianHiddenMarkovModel(HiddenMarkovModel):
|
|
72
|
+
r"""
|
|
73
|
+
Gaussian emissions Hidden Markov Model.
|
|
74
|
+
|
|
75
|
+
INPUT:
|
|
76
|
+
|
|
77
|
+
- ``A`` -- matrix; the `N \times N` transition matrix
|
|
78
|
+
- ``B`` -- list of pairs ``(mu, sigma)`` that define the distributions
|
|
79
|
+
- ``pi`` -- initial state probabilities
|
|
80
|
+
- ``normalize`` -- boolean (default: ``True``)
|
|
81
|
+
|
|
82
|
+
EXAMPLES:
|
|
83
|
+
|
|
84
|
+
We illustrate the primary functions with an example 2-state Gaussian HMM::
|
|
85
|
+
|
|
86
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[.1,.9],[.5,.5]],
|
|
87
|
+
....: [(1,1), (-1,1)],
|
|
88
|
+
....: [.5,.5]); m
|
|
89
|
+
Gaussian Hidden Markov Model with 2 States
|
|
90
|
+
Transition matrix:
|
|
91
|
+
[0.1 0.9]
|
|
92
|
+
[0.5 0.5]
|
|
93
|
+
Emission parameters:
|
|
94
|
+
[(1.0, 1.0), (-1.0, 1.0)]
|
|
95
|
+
Initial probabilities: [0.5000, 0.5000]
|
|
96
|
+
|
|
97
|
+
We query the defining transition matrix, emission parameters, and
|
|
98
|
+
initial state probabilities::
|
|
99
|
+
|
|
100
|
+
sage: m.transition_matrix()
|
|
101
|
+
[0.1 0.9]
|
|
102
|
+
[0.5 0.5]
|
|
103
|
+
sage: m.emission_parameters()
|
|
104
|
+
[(1.0, 1.0), (-1.0, 1.0)]
|
|
105
|
+
sage: m.initial_probabilities()
|
|
106
|
+
[0.5000, 0.5000]
|
|
107
|
+
|
|
108
|
+
We obtain a sample sequence with 10 entries in it, and compute the
|
|
109
|
+
logarithm of the probability of obtaining this sequence, given the
|
|
110
|
+
model::
|
|
111
|
+
|
|
112
|
+
sage: obs = m.sample(5); obs # random
|
|
113
|
+
[-1.6835, 0.0635, -2.1688, 0.3043, -0.3188]
|
|
114
|
+
sage: log_likelihood = m.log_likelihood(obs)
|
|
115
|
+
sage: counter = 0
|
|
116
|
+
sage: n = 0
|
|
117
|
+
sage: def add_samples(i):
|
|
118
|
+
....: global counter, n
|
|
119
|
+
....: for _ in range(i):
|
|
120
|
+
....: n += 1
|
|
121
|
+
....: obs2 = m.sample(5)
|
|
122
|
+
....: if all(abs(obs2[i] - obs[i]) < 0.25 for i in range(5)):
|
|
123
|
+
....: counter += 1
|
|
124
|
+
|
|
125
|
+
sage: add_samples(10000)
|
|
126
|
+
sage: while abs(log_likelihood - log(counter*1.0/n/0.5^5)) < 0.1:
|
|
127
|
+
....: add_samples(10000)
|
|
128
|
+
|
|
129
|
+
We compute the Viterbi path, and probability that the given path
|
|
130
|
+
of states produced obs::
|
|
131
|
+
|
|
132
|
+
sage: m.viterbi(obs) # random
|
|
133
|
+
([1, 0, 1, 0, 1], -8.714092684611794)
|
|
134
|
+
|
|
135
|
+
We use the Baum-Welch iterative algorithm to find another model
|
|
136
|
+
for which our observation sequence is more likely::
|
|
137
|
+
|
|
138
|
+
sage: try:
|
|
139
|
+
....: p, s = m.baum_welch(obs)
|
|
140
|
+
....: assert p > log_likelihood
|
|
141
|
+
....: assert (1 <= s <= 500)
|
|
142
|
+
....: except RuntimeError:
|
|
143
|
+
....: pass
|
|
144
|
+
|
|
145
|
+
Notice that running Baum-Welch changed our model::
|
|
146
|
+
|
|
147
|
+
sage: m # random
|
|
148
|
+
Gaussian Hidden Markov Model with 2 States
|
|
149
|
+
Transition matrix:
|
|
150
|
+
[ 0.4154981366185841 0.584501863381416]
|
|
151
|
+
[ 0.9999993174253741 6.825746258991804e-07]
|
|
152
|
+
Emission parameters:
|
|
153
|
+
[(0.4178882427119503, 0.5173109664360919),
|
|
154
|
+
(-1.5025208631331122, 0.5085512836055119)]
|
|
155
|
+
Initial probabilities: [0.0000, 1.0000]
|
|
156
|
+
"""
|
|
157
|
+
cdef TimeSeries B, prob
|
|
158
|
+
cdef int n_out
|
|
159
|
+
|
|
160
|
+
def __init__(self, A, B, pi, bint normalize=True):
|
|
161
|
+
r"""
|
|
162
|
+
Create a Gaussian emissions HMM with transition probability
|
|
163
|
+
matrix `A`, normal emissions given by `B`, and initial state
|
|
164
|
+
probability distribution ``pi``.
|
|
165
|
+
|
|
166
|
+
INPUT:
|
|
167
|
+
|
|
168
|
+
- ``A`` -- list of lists or a square `N \times N` matrix, whose
|
|
169
|
+
`(i,j)` entry gives the probability of transitioning from
|
|
170
|
+
state `i` to state `j`.
|
|
171
|
+
|
|
172
|
+
- ``B`` -- list of `N` pairs ``(mu, std)``, where if ``B[i]=(mu,std)``,
|
|
173
|
+
then the probability distribution associated with state `i`
|
|
174
|
+
normal with mean ``mu`` and standard deviation ``std``.
|
|
175
|
+
|
|
176
|
+
- ``pi`` -- the probabilities of starting in each initial
|
|
177
|
+
state, i.e., ``pi[i]`` is the probability of starting in
|
|
178
|
+
state `i`.
|
|
179
|
+
|
|
180
|
+
- ``normalize`` -- boolean (default: ``True``); if given, input is
|
|
181
|
+
normalized to define valid probability distributions,
|
|
182
|
+
e.g., the entries of `A` are made nonnegative and the rows
|
|
183
|
+
sum to 1.
|
|
184
|
+
|
|
185
|
+
EXAMPLES::
|
|
186
|
+
|
|
187
|
+
sage: hmm.GaussianHiddenMarkovModel([[.1,.9],[.5,.5]], [(1,1), (-1,1)], [.5,.5])
|
|
188
|
+
Gaussian Hidden Markov Model with 2 States
|
|
189
|
+
Transition matrix:
|
|
190
|
+
[0.1 0.9]
|
|
191
|
+
[0.5 0.5]
|
|
192
|
+
Emission parameters:
|
|
193
|
+
[(1.0, 1.0), (-1.0, 1.0)]
|
|
194
|
+
Initial probabilities: [0.5000, 0.5000]
|
|
195
|
+
|
|
196
|
+
We input a model in which both `A` and ``pi`` have to be
|
|
197
|
+
renormalized to define valid probability distributions::
|
|
198
|
+
|
|
199
|
+
sage: hmm.GaussianHiddenMarkovModel([[-1,.7],[.3,.4]], [(1,1), (-1,1)], [-1,.3]) # rel tol 3e-14
|
|
200
|
+
Gaussian Hidden Markov Model with 2 States
|
|
201
|
+
Transition matrix:
|
|
202
|
+
[ 0.0 1.0]
|
|
203
|
+
[0.42857142857142855 0.5714285714285714]
|
|
204
|
+
Emission parameters:
|
|
205
|
+
[(1.0, 1.0), (-1.0, 1.0)]
|
|
206
|
+
Initial probabilities: [0.0000, 1.0000]
|
|
207
|
+
|
|
208
|
+
Bad things can happen::
|
|
209
|
+
|
|
210
|
+
sage: hmm.GaussianHiddenMarkovModel([[-1,.7],[.3,.4]], [(1,1), (-1,1)], [-1,.3],
|
|
211
|
+
....: normalize=False)
|
|
212
|
+
Gaussian Hidden Markov Model with 2 States
|
|
213
|
+
Transition matrix:
|
|
214
|
+
[-1.0 0.7]
|
|
215
|
+
[ 0.3 0.4]
|
|
216
|
+
...
|
|
217
|
+
"""
|
|
218
|
+
self.pi = util.initial_probs_to_TimeSeries(pi, normalize)
|
|
219
|
+
self.N = len(self.pi)
|
|
220
|
+
self.A = util.state_matrix_to_TimeSeries(A, self.N, normalize)
|
|
221
|
+
|
|
222
|
+
# B should be a matrix of N rows, with column 0 the mean and 1
|
|
223
|
+
# the standard deviation.
|
|
224
|
+
if isinstance(B, Matrix):
|
|
225
|
+
B = B.list()
|
|
226
|
+
else:
|
|
227
|
+
B = flatten(B)
|
|
228
|
+
self.B = TimeSeries(B)
|
|
229
|
+
self.probability_init()
|
|
230
|
+
|
|
231
|
+
def __richcmp__(self, other, op):
|
|
232
|
+
r"""
|
|
233
|
+
Compare ``self`` and ``other``, which must both be GaussianHiddenMarkovModel's.
|
|
234
|
+
|
|
235
|
+
EXAMPLES::
|
|
236
|
+
|
|
237
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[1]], [(0,1)], [1])
|
|
238
|
+
sage: n = hmm.GaussianHiddenMarkovModel([[1]], [(1,1)], [1])
|
|
239
|
+
sage: m < n
|
|
240
|
+
True
|
|
241
|
+
sage: m == m
|
|
242
|
+
True
|
|
243
|
+
sage: n > m
|
|
244
|
+
True
|
|
245
|
+
sage: n < m
|
|
246
|
+
False
|
|
247
|
+
"""
|
|
248
|
+
if not isinstance(other, GaussianHiddenMarkovModel):
|
|
249
|
+
return NotImplemented
|
|
250
|
+
return PyObject_RichCompare(self.__reduce__()[1],
|
|
251
|
+
other.__reduce__()[1], op)
|
|
252
|
+
|
|
253
|
+
def __getitem__(self, Py_ssize_t i):
|
|
254
|
+
r"""
|
|
255
|
+
Return the mean and standard distribution for the `i`-th state.
|
|
256
|
+
|
|
257
|
+
INPUT:
|
|
258
|
+
|
|
259
|
+
- ``i`` -- integer
|
|
260
|
+
|
|
261
|
+
OUTPUT: 2 floats
|
|
262
|
+
|
|
263
|
+
EXAMPLES::
|
|
264
|
+
|
|
265
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[.1,.9],[.5,.5]], [(1,.5), (-2,.3)], [.5,.5])
|
|
266
|
+
sage: m[0]
|
|
267
|
+
(1.0, 0.5)
|
|
268
|
+
sage: m[1]
|
|
269
|
+
(-2.0, 0.3)
|
|
270
|
+
sage: m[-1]
|
|
271
|
+
(-2.0, 0.3)
|
|
272
|
+
sage: m[3]
|
|
273
|
+
Traceback (most recent call last):
|
|
274
|
+
...
|
|
275
|
+
IndexError: index out of range
|
|
276
|
+
sage: m[-3]
|
|
277
|
+
Traceback (most recent call last):
|
|
278
|
+
...
|
|
279
|
+
IndexError: index out of range
|
|
280
|
+
"""
|
|
281
|
+
if i < 0:
|
|
282
|
+
i += self.N
|
|
283
|
+
if i < 0 or i >= self.N:
|
|
284
|
+
raise IndexError('index out of range')
|
|
285
|
+
|
|
286
|
+
# TODO: change to be a normal distribution class (next version)
|
|
287
|
+
return self.B[2*i], self.B[2*i+1]
|
|
288
|
+
|
|
289
|
+
def __reduce__(self):
|
|
290
|
+
r"""
|
|
291
|
+
Used in pickling.
|
|
292
|
+
|
|
293
|
+
EXAMPLES::
|
|
294
|
+
|
|
295
|
+
sage: G = hmm.GaussianHiddenMarkovModel([[1]], [(0,1)], [1])
|
|
296
|
+
sage: loads(dumps(G)) == G
|
|
297
|
+
True
|
|
298
|
+
"""
|
|
299
|
+
return unpickle_gaussian_hmm_v1, \
|
|
300
|
+
(self.A, self.B, self.pi, self.prob, self.n_out)
|
|
301
|
+
|
|
302
|
+
def emission_parameters(self):
|
|
303
|
+
r"""
|
|
304
|
+
Return the parameters that define the normal distributions
|
|
305
|
+
associated to all of the states.
|
|
306
|
+
|
|
307
|
+
OUTPUT:
|
|
308
|
+
|
|
309
|
+
a list ``B`` of pairs ``B[i] = (mu, std)``, such that the
|
|
310
|
+
distribution associated to state `i` is normal with mean
|
|
311
|
+
``mu`` and standard deviation ``std``.
|
|
312
|
+
|
|
313
|
+
EXAMPLES::
|
|
314
|
+
|
|
315
|
+
sage: M = hmm.GaussianHiddenMarkovModel([[.1,.9],[.5,.5]],
|
|
316
|
+
....: [(1,.5), (-1,3)],
|
|
317
|
+
....: [.1,.9])
|
|
318
|
+
sage: M.emission_parameters()
|
|
319
|
+
[(1.0, 0.5), (-1.0, 3.0)]
|
|
320
|
+
"""
|
|
321
|
+
cdef Py_ssize_t i
|
|
322
|
+
from sage.rings.real_double import RDF
|
|
323
|
+
return [(RDF(self.B[2*i]),RDF(self.B[2*i+1])) for i in range(self.N)]
|
|
324
|
+
|
|
325
|
+
def __repr__(self):
|
|
326
|
+
r"""
|
|
327
|
+
Return string representation.
|
|
328
|
+
|
|
329
|
+
EXAMPLES::
|
|
330
|
+
|
|
331
|
+
sage: hmm.GaussianHiddenMarkovModel([[.1,.9],[.5,.5]], [(1,.5), (-1,3)], [.1,.9]).__repr__()
|
|
332
|
+
'Gaussian Hidden Markov Model with 2 States\nTransition matrix:\n[0.1 0.9]\n[0.5 0.5]\nEmission parameters:\n[(1.0, 0.5), (-1.0, 3.0)]\nInitial probabilities: [0.1000, 0.9000]'
|
|
333
|
+
"""
|
|
334
|
+
s = "Gaussian Hidden Markov Model with %s States" % self.N
|
|
335
|
+
s += '\nTransition matrix:\n%s' % self.transition_matrix()
|
|
336
|
+
s += '\nEmission parameters:\n%s' % self.emission_parameters()
|
|
337
|
+
s += '\nInitial probabilities: %s' % self.initial_probabilities()
|
|
338
|
+
return s
|
|
339
|
+
|
|
340
|
+
def generate_sequence(self, Py_ssize_t length, starting_state=None):
|
|
341
|
+
r"""
|
|
342
|
+
Return a sample of the given length from this HMM.
|
|
343
|
+
|
|
344
|
+
INPUT:
|
|
345
|
+
|
|
346
|
+
- ``length`` -- positive integer
|
|
347
|
+
- ``starting_state`` -- integer (or ``None``); if specified then
|
|
348
|
+
generate a sequence using this model starting with the given state
|
|
349
|
+
instead of the initial probabilities to determine the
|
|
350
|
+
starting state.
|
|
351
|
+
|
|
352
|
+
OUTPUT:
|
|
353
|
+
|
|
354
|
+
- an :class:`IntList` or list of emission symbols
|
|
355
|
+
- :class:`TimeSeries` of emissions
|
|
356
|
+
|
|
357
|
+
EXAMPLES::
|
|
358
|
+
|
|
359
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[.1,.9],[.5,.5]],
|
|
360
|
+
....: [(1,.5), (-1,3)],
|
|
361
|
+
....: [.1,.9])
|
|
362
|
+
sage: m.generate_sequence(5) # random
|
|
363
|
+
([-3.0505, 0.5317, -4.5065, 0.6521, 1.0435], [1, 0, 1, 0, 1])
|
|
364
|
+
sage: m.generate_sequence(0)
|
|
365
|
+
([], [])
|
|
366
|
+
sage: m.generate_sequence(-1)
|
|
367
|
+
Traceback (most recent call last):
|
|
368
|
+
...
|
|
369
|
+
ValueError: length must be nonnegative
|
|
370
|
+
|
|
371
|
+
Verify numerically that the starting state is 0 with probability about 0.1::
|
|
372
|
+
|
|
373
|
+
sage: counter = 0
|
|
374
|
+
sage: n = 0
|
|
375
|
+
sage: def add_samples(i):
|
|
376
|
+
....: global counter, n
|
|
377
|
+
....: for i in range(i):
|
|
378
|
+
....: n += 1
|
|
379
|
+
....: if m.generate_sequence(1)[1][0] == 0:
|
|
380
|
+
....: counter += 1
|
|
381
|
+
|
|
382
|
+
sage: add_samples(10^5)
|
|
383
|
+
sage: while abs(counter*1.0 / n - 0.1) > 0.01: add_samples(10^5)
|
|
384
|
+
|
|
385
|
+
Example in which the starting state is 0 (see :issue:`11452`)::
|
|
386
|
+
|
|
387
|
+
sage: set_random_seed(23); m.generate_sequence(2)
|
|
388
|
+
([0.6501, -2.0151], [0, 1])
|
|
389
|
+
|
|
390
|
+
Force a starting state of 1 even though as we saw above it would be 0::
|
|
391
|
+
|
|
392
|
+
sage: set_random_seed(23); m.generate_sequence(2, starting_state=1)
|
|
393
|
+
([-3.1491, -1.0244], [1, 1])
|
|
394
|
+
"""
|
|
395
|
+
if length < 0:
|
|
396
|
+
raise ValueError("length must be nonnegative")
|
|
397
|
+
|
|
398
|
+
# Create Integer lists for states and observations
|
|
399
|
+
cdef IntList states = IntList(length)
|
|
400
|
+
cdef TimeSeries obs = TimeSeries(length)
|
|
401
|
+
if length == 0:
|
|
402
|
+
return states, obs
|
|
403
|
+
|
|
404
|
+
# Setup variables, including random state.
|
|
405
|
+
cdef Py_ssize_t i, j
|
|
406
|
+
cdef randstate rstate = current_randstate()
|
|
407
|
+
cdef int q = 0
|
|
408
|
+
cdef double r, accum
|
|
409
|
+
|
|
410
|
+
# Choose the starting state.
|
|
411
|
+
# See the remark in hmm.pyx about how this should get
|
|
412
|
+
# replaced by some general fast discrete distribution code.
|
|
413
|
+
if starting_state is None:
|
|
414
|
+
r = rstate.c_rand_double()
|
|
415
|
+
accum = 0
|
|
416
|
+
for i in range(self.N):
|
|
417
|
+
if r < self.pi._values[i] + accum:
|
|
418
|
+
q = i
|
|
419
|
+
break
|
|
420
|
+
else:
|
|
421
|
+
accum += self.pi._values[i]
|
|
422
|
+
else:
|
|
423
|
+
q = starting_state
|
|
424
|
+
if q < 0 or q>= self.N:
|
|
425
|
+
raise ValueError("starting state must be between 0 and %s" % (self.N-1))
|
|
426
|
+
|
|
427
|
+
states._values[0] = q
|
|
428
|
+
obs._values[0] = self.random_sample(q, rstate)
|
|
429
|
+
|
|
430
|
+
cdef double* row
|
|
431
|
+
cdef int O
|
|
432
|
+
sig_on()
|
|
433
|
+
for i in range(1, length):
|
|
434
|
+
accum = 0
|
|
435
|
+
row = self.A._values + q*self.N
|
|
436
|
+
r = rstate.c_rand_double()
|
|
437
|
+
for j in range(self.N):
|
|
438
|
+
if r < row[j] + accum:
|
|
439
|
+
q = j
|
|
440
|
+
break
|
|
441
|
+
else:
|
|
442
|
+
accum += row[j]
|
|
443
|
+
states._values[i] = q
|
|
444
|
+
obs._values[i] = self.random_sample(q, rstate)
|
|
445
|
+
sig_off()
|
|
446
|
+
|
|
447
|
+
return obs, states
|
|
448
|
+
|
|
449
|
+
cdef probability_init(self):
|
|
450
|
+
r"""
|
|
451
|
+
Used internally to compute caching information that makes
|
|
452
|
+
certain computations in the Baum-Welch algorithm faster. This
|
|
453
|
+
function has no input or output.
|
|
454
|
+
"""
|
|
455
|
+
self.prob = TimeSeries(2*self.N)
|
|
456
|
+
cdef int i
|
|
457
|
+
for i in range(self.N):
|
|
458
|
+
self.prob[2*i] = 1.0/(sqrt2pi*self.B[2*i+1])
|
|
459
|
+
self.prob[2*i+1] = -1.0/(2*self.B[2*i+1]*self.B[2*i+1])
|
|
460
|
+
|
|
461
|
+
cdef double random_sample(self, int state, randstate rstate) noexcept:
|
|
462
|
+
r"""
|
|
463
|
+
Return a random sample from the normal distribution associated
|
|
464
|
+
to the given state.
|
|
465
|
+
|
|
466
|
+
This is only used internally, and no bounds or other error
|
|
467
|
+
checking is done, so calling this improperly can lead to seg
|
|
468
|
+
faults.
|
|
469
|
+
|
|
470
|
+
INPUT:
|
|
471
|
+
|
|
472
|
+
- ``state`` -- integer
|
|
473
|
+
- ``rstate`` -- randstate instance
|
|
474
|
+
|
|
475
|
+
OUTPUT:
|
|
476
|
+
|
|
477
|
+
double
|
|
478
|
+
"""
|
|
479
|
+
return random_normal(self.B._values[state*2], self.B._values[state*2+1], rstate)
|
|
480
|
+
|
|
481
|
+
cdef double probability_of(self, int state, double observation) noexcept:
|
|
482
|
+
r"""
|
|
483
|
+
Return a useful continuous analogue of "the probability b_j(o)"
|
|
484
|
+
of seeing the given observation given that we're in the given
|
|
485
|
+
state j (=state).
|
|
486
|
+
|
|
487
|
+
The distribution is a normal distribution, and we're asking
|
|
488
|
+
about the probability of a particular point being observed;
|
|
489
|
+
the probability of a particular point is 0, which is not
|
|
490
|
+
useful. Thus we instead consider the limit p = prob([o,o+d])/d
|
|
491
|
+
as d goes to 0. There is a simple closed form formula for p,
|
|
492
|
+
derived in the source code. Note that p can be bigger than 1;
|
|
493
|
+
for example, if we set observation=mean in the closed formula
|
|
494
|
+
we get p=1/(sqrt(2*pi)*std), so p>1 when std<1/sqrt(2*pi).
|
|
495
|
+
|
|
496
|
+
INPUT:
|
|
497
|
+
|
|
498
|
+
- ``state`` -- integer
|
|
499
|
+
- ``observation`` -- double
|
|
500
|
+
|
|
501
|
+
OUTPUT:
|
|
502
|
+
|
|
503
|
+
double
|
|
504
|
+
"""
|
|
505
|
+
# The code below is an optimized version of the following code:
|
|
506
|
+
# cdef double mean = self.B._values[2*state], \
|
|
507
|
+
# std = self.B._values[2*state+1]
|
|
508
|
+
# return 1/(sqrt2pi*std) * \
|
|
509
|
+
# exp(-(observation-mean)*(observation-mean)/(2*std*std))
|
|
510
|
+
#
|
|
511
|
+
# Here is how to use Sage to verify that the above formula computes
|
|
512
|
+
# the limit claimed above:
|
|
513
|
+
#
|
|
514
|
+
# var('x,d,obs,mean,std')
|
|
515
|
+
# n = 1/sqrt(2*pi*std^2) * exp(-(x-mean)^2/(2*std^2))
|
|
516
|
+
# assume(std>0); assume(d>0)
|
|
517
|
+
# m = n.integrate(x,obs,obs+d)/d
|
|
518
|
+
# p = SR(m.limit(d=0).simplify_full())
|
|
519
|
+
# q = 1/(sqrt(2*pi)*std) * exp(-(obs-mean)*(obs-mean)/(2*std*std))
|
|
520
|
+
# bool(p==q) # outputs True
|
|
521
|
+
|
|
522
|
+
cdef double x = observation - self.B._values[2*state] # observation - mean
|
|
523
|
+
return self.prob._values[2*state] * exp(x*x*self.prob._values[2*state+1])
|
|
524
|
+
|
|
525
|
+
def log_likelihood(self, obs):
|
|
526
|
+
r"""
|
|
527
|
+
Return the logarithm of a continuous analogue of the
|
|
528
|
+
probability that this model produced the given observation
|
|
529
|
+
sequence.
|
|
530
|
+
|
|
531
|
+
Note that the "continuous analogue of the probability" above can
|
|
532
|
+
be bigger than 1, hence the logarithm can be positive.
|
|
533
|
+
|
|
534
|
+
INPUT:
|
|
535
|
+
|
|
536
|
+
- ``obs`` -- sequence of observations
|
|
537
|
+
|
|
538
|
+
OUTPUT: float
|
|
539
|
+
|
|
540
|
+
EXAMPLES::
|
|
541
|
+
|
|
542
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[.1,.9],[.5,.5]],
|
|
543
|
+
....: [(1,.5), (-1,3)],
|
|
544
|
+
....: [.1,.9])
|
|
545
|
+
sage: m.log_likelihood([1,1,1])
|
|
546
|
+
-4.297880766072486
|
|
547
|
+
sage: s = m.sample(20)
|
|
548
|
+
sage: -80 < m.log_likelihood(s) < -20
|
|
549
|
+
True
|
|
550
|
+
"""
|
|
551
|
+
if len(obs) == 0:
|
|
552
|
+
return 1.0
|
|
553
|
+
if not isinstance(obs, TimeSeries):
|
|
554
|
+
obs = TimeSeries(obs)
|
|
555
|
+
return self._forward_scale(obs)
|
|
556
|
+
|
|
557
|
+
def _forward_scale(self, TimeSeries obs):
|
|
558
|
+
r"""
|
|
559
|
+
Memory-efficient implementation of the forward algorithm (with scaling).
|
|
560
|
+
|
|
561
|
+
INPUT:
|
|
562
|
+
|
|
563
|
+
- ``obs`` -- integer list of observation states
|
|
564
|
+
|
|
565
|
+
OUTPUT:
|
|
566
|
+
|
|
567
|
+
float -- the log of the probability that the model
|
|
568
|
+
produced this sequence
|
|
569
|
+
|
|
570
|
+
EXAMPLES::
|
|
571
|
+
|
|
572
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[.1,.9],[.5,.5]], [(1,.5), (-1,3)], [.1,.9])
|
|
573
|
+
sage: m._forward_scale(stats.TimeSeries([1,-1,-1,1]))
|
|
574
|
+
-7.641988207069133
|
|
575
|
+
"""
|
|
576
|
+
cdef Py_ssize_t i, j, t, T = len(obs)
|
|
577
|
+
|
|
578
|
+
# The running sum of the log probabilities
|
|
579
|
+
cdef double log_probability = 0, sum, a
|
|
580
|
+
|
|
581
|
+
cdef TimeSeries alpha = TimeSeries(self.N), \
|
|
582
|
+
alpha2 = TimeSeries(self.N)
|
|
583
|
+
|
|
584
|
+
# Initialization
|
|
585
|
+
sum = 0
|
|
586
|
+
for i in range(self.N):
|
|
587
|
+
a = self.pi[i] * self.probability_of(i, obs._values[0])
|
|
588
|
+
alpha[i] = a
|
|
589
|
+
sum += a
|
|
590
|
+
|
|
591
|
+
log_probability = log(sum)
|
|
592
|
+
alpha.rescale(1/sum)
|
|
593
|
+
|
|
594
|
+
# Induction
|
|
595
|
+
cdef double s
|
|
596
|
+
for t in range(1, T):
|
|
597
|
+
sum = 0
|
|
598
|
+
for j in range(self.N):
|
|
599
|
+
s = 0
|
|
600
|
+
for i in range(self.N):
|
|
601
|
+
s += alpha._values[i] * self.A._values[i*self.N + j]
|
|
602
|
+
a = s * self.probability_of(j, obs._values[t])
|
|
603
|
+
alpha2._values[j] = a
|
|
604
|
+
sum += a
|
|
605
|
+
|
|
606
|
+
log_probability += log(sum)
|
|
607
|
+
for j in range(self.N):
|
|
608
|
+
alpha._values[j] = alpha2._values[j] / sum
|
|
609
|
+
|
|
610
|
+
# Termination
|
|
611
|
+
return log_probability
|
|
612
|
+
|
|
613
|
+
def viterbi(self, obs):
|
|
614
|
+
r"""
|
|
615
|
+
Determine "the" hidden sequence of states that is most likely
|
|
616
|
+
to produce the given sequence ``obs`` of observations, along with
|
|
617
|
+
the probability that this hidden sequence actually produced
|
|
618
|
+
the observation.
|
|
619
|
+
|
|
620
|
+
INPUT:
|
|
621
|
+
|
|
622
|
+
- ``obs`` -- sequence of emitted ints or symbols
|
|
623
|
+
|
|
624
|
+
OUTPUT:
|
|
625
|
+
|
|
626
|
+
- ``list`` -- "the" most probable sequence of hidden states, i.e.,
|
|
627
|
+
the Viterbi path
|
|
628
|
+
|
|
629
|
+
- ``float`` -- log of probability that the observed sequence
|
|
630
|
+
was produced by the Viterbi sequence of states
|
|
631
|
+
|
|
632
|
+
EXAMPLES:
|
|
633
|
+
|
|
634
|
+
We find the optimal state sequence for a given model::
|
|
635
|
+
|
|
636
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[0.5,0.5],[0.5,0.5]],
|
|
637
|
+
....: [(0,1),(10,1)],
|
|
638
|
+
....: [0.5,0.5])
|
|
639
|
+
sage: m.viterbi([0,1,10,10,1])
|
|
640
|
+
([0, 0, 1, 1, 0], -9.0604285688230...)
|
|
641
|
+
|
|
642
|
+
Another example in which the most likely states change based
|
|
643
|
+
on the last observation::
|
|
644
|
+
|
|
645
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[.1,.9],[.5,.5]],
|
|
646
|
+
....: [(1,.5), (-1,3)],
|
|
647
|
+
....: [.1,.9])
|
|
648
|
+
sage: m.viterbi([-2,-1,.1,0.1])
|
|
649
|
+
([1, 1, 0, 1], -9.61823698847639...)
|
|
650
|
+
sage: m.viterbi([-2,-1,.1,0.3])
|
|
651
|
+
([1, 1, 1, 0], -9.566023653378513)
|
|
652
|
+
"""
|
|
653
|
+
cdef TimeSeries _obs
|
|
654
|
+
if not isinstance(obs, TimeSeries):
|
|
655
|
+
_obs = TimeSeries(obs)
|
|
656
|
+
else:
|
|
657
|
+
_obs = obs
|
|
658
|
+
|
|
659
|
+
# The algorithm is the same as _viterbi above, except
|
|
660
|
+
# we take the logarithms of everything first, and add
|
|
661
|
+
# instead of multiply.
|
|
662
|
+
cdef Py_ssize_t t, T = _obs._length
|
|
663
|
+
cdef IntList state_sequence = IntList(T)
|
|
664
|
+
if T == 0:
|
|
665
|
+
return state_sequence, 0.0
|
|
666
|
+
|
|
667
|
+
cdef int i, j, N = self.N
|
|
668
|
+
|
|
669
|
+
# delta[i] is the maximum of the probabilities over all
|
|
670
|
+
# paths ending in state i.
|
|
671
|
+
cdef TimeSeries delta = TimeSeries(N), delta_prev = TimeSeries(N)
|
|
672
|
+
|
|
673
|
+
# We view psi as an N x T matrix of ints. The quantity
|
|
674
|
+
# psi[N*t + j]
|
|
675
|
+
# is a most probable hidden state at time t-1, given
|
|
676
|
+
# that j is a most probable state at time j.
|
|
677
|
+
cdef IntList psi = IntList(N * T) # initialized to 0 by default
|
|
678
|
+
|
|
679
|
+
# Log Preprocessing
|
|
680
|
+
cdef TimeSeries A = self.A.log()
|
|
681
|
+
cdef TimeSeries pi = self.pi.log()
|
|
682
|
+
|
|
683
|
+
# Initialization:
|
|
684
|
+
for i in range(N):
|
|
685
|
+
delta._values[i] = pi._values[i] + log(self.probability_of(i, _obs._values[0]))
|
|
686
|
+
|
|
687
|
+
# Recursion:
|
|
688
|
+
cdef double mx, tmp, minus_inf = float('-inf')
|
|
689
|
+
cdef int index
|
|
690
|
+
|
|
691
|
+
for t in range(1, T):
|
|
692
|
+
delta_prev, delta = delta, delta_prev
|
|
693
|
+
for j in range(N):
|
|
694
|
+
# Compute delta_t[j] = max_i(delta_{t-1}(i) a_{i,j}) * b_j(_obs[t])
|
|
695
|
+
mx = minus_inf
|
|
696
|
+
index = -1
|
|
697
|
+
for i in range(N):
|
|
698
|
+
tmp = delta_prev._values[i] + A._values[i*N+j]
|
|
699
|
+
if tmp > mx:
|
|
700
|
+
mx = tmp
|
|
701
|
+
index = i
|
|
702
|
+
delta._values[j] = mx + log(self.probability_of(j, _obs._values[t]))
|
|
703
|
+
psi._values[N*t + j] = index
|
|
704
|
+
|
|
705
|
+
# Termination:
|
|
706
|
+
mx, index = delta.max(index=True)
|
|
707
|
+
|
|
708
|
+
# Path (state sequence) backtracking:
|
|
709
|
+
state_sequence._values[T-1] = index
|
|
710
|
+
t = T-2
|
|
711
|
+
while t >= 0:
|
|
712
|
+
state_sequence._values[t] = psi._values[N*(t+1) + state_sequence._values[t+1]]
|
|
713
|
+
t -= 1
|
|
714
|
+
|
|
715
|
+
return state_sequence, mx
|
|
716
|
+
|
|
717
|
+
cdef TimeSeries _backward_scale_all(self, TimeSeries obs, TimeSeries scale):
|
|
718
|
+
r"""
|
|
719
|
+
This function returns the matrix beta_t(i), and is used
|
|
720
|
+
internally as part of the Baum-Welch algorithm.
|
|
721
|
+
|
|
722
|
+
The quantity beta_t(i) is the probability of observing the
|
|
723
|
+
sequence obs[t+1:] assuming that the model is in state i at
|
|
724
|
+
time t.
|
|
725
|
+
|
|
726
|
+
INPUT:
|
|
727
|
+
|
|
728
|
+
- ``obs`` -- TimeSeries
|
|
729
|
+
- ``scale`` -- TimeSeries
|
|
730
|
+
|
|
731
|
+
OUTPUT:
|
|
732
|
+
|
|
733
|
+
- TimeSeries beta such that beta_t(i) = beta[t*N + i]
|
|
734
|
+
- scale is also changed by this function
|
|
735
|
+
"""
|
|
736
|
+
cdef Py_ssize_t t, T = obs._length
|
|
737
|
+
cdef int N = self.N, i, j
|
|
738
|
+
cdef double s
|
|
739
|
+
cdef TimeSeries beta = TimeSeries(N*T, initialize=False)
|
|
740
|
+
|
|
741
|
+
# 1. Initialization
|
|
742
|
+
for i in range(N):
|
|
743
|
+
beta._values[(T-1)*N + i] = 1 / scale._values[T-1]
|
|
744
|
+
|
|
745
|
+
# 2. Induction
|
|
746
|
+
t = T-2
|
|
747
|
+
while t >= 0:
|
|
748
|
+
for i in range(N):
|
|
749
|
+
s = 0
|
|
750
|
+
for j in range(N):
|
|
751
|
+
s += self.A._values[i*N+j] * \
|
|
752
|
+
self.probability_of(j, obs._values[t+1]) * beta._values[(t+1)*N+j]
|
|
753
|
+
beta._values[t*N + i] = s/scale._values[t]
|
|
754
|
+
t -= 1
|
|
755
|
+
return beta
|
|
756
|
+
|
|
757
|
+
cdef _forward_scale_all(self, TimeSeries obs):
|
|
758
|
+
r"""
|
|
759
|
+
Return scaled values alpha_t(i), the sequence of scalings, and
|
|
760
|
+
the log probability.
|
|
761
|
+
|
|
762
|
+
The quantity alpha_t(i) is the probability of observing the
|
|
763
|
+
sequence obs[:t+1] assuming that the model is in state i at
|
|
764
|
+
time t.
|
|
765
|
+
|
|
766
|
+
INPUT:
|
|
767
|
+
|
|
768
|
+
- ``obs`` -- TimeSeries
|
|
769
|
+
|
|
770
|
+
OUTPUT:
|
|
771
|
+
|
|
772
|
+
- TimeSeries alpha with alpha_t(i) = alpha[t*N + i]
|
|
773
|
+
- TimeSeries scale with scale[t] the scaling at step t
|
|
774
|
+
- ``float`` -- log_probability of the observation sequence
|
|
775
|
+
being produced by the model
|
|
776
|
+
"""
|
|
777
|
+
cdef Py_ssize_t i, j, t, T = len(obs)
|
|
778
|
+
cdef int N = self.N
|
|
779
|
+
|
|
780
|
+
# The running some of the log probabilities
|
|
781
|
+
cdef double log_probability = 0, sum, a
|
|
782
|
+
|
|
783
|
+
cdef TimeSeries alpha = TimeSeries(N*T, initialize=False)
|
|
784
|
+
cdef TimeSeries scale = TimeSeries(T, initialize=False)
|
|
785
|
+
|
|
786
|
+
# Initialization
|
|
787
|
+
sum = 0
|
|
788
|
+
for i in range(self.N):
|
|
789
|
+
a = self.pi._values[i] * self.probability_of(i, obs._values[0])
|
|
790
|
+
alpha._values[0*N + i] = a
|
|
791
|
+
sum += a
|
|
792
|
+
|
|
793
|
+
scale._values[0] = sum
|
|
794
|
+
log_probability = log(sum)
|
|
795
|
+
for i in range(self.N):
|
|
796
|
+
alpha._values[0*N + i] /= sum
|
|
797
|
+
|
|
798
|
+
# Induction
|
|
799
|
+
# The code below is just an optimized version of:
|
|
800
|
+
# alpha = (alpha * A).pairwise_product(B[O[t+1]])
|
|
801
|
+
# alpha = alpha.scale(1/alpha.sum())
|
|
802
|
+
# along with keeping track of the log of the scaling factor.
|
|
803
|
+
cdef double s
|
|
804
|
+
for t in range(1,T):
|
|
805
|
+
sum = 0
|
|
806
|
+
for j in range(N):
|
|
807
|
+
s = 0
|
|
808
|
+
for i in range(N):
|
|
809
|
+
s += alpha._values[(t-1)*N + i] * self.A._values[i*N + j]
|
|
810
|
+
a = s * self.probability_of(j, obs._values[t])
|
|
811
|
+
alpha._values[t*N + j] = a
|
|
812
|
+
sum += a
|
|
813
|
+
|
|
814
|
+
log_probability += log(sum)
|
|
815
|
+
scale._values[t] = sum
|
|
816
|
+
for j in range(self.N):
|
|
817
|
+
alpha._values[t*N + j] /= sum
|
|
818
|
+
|
|
819
|
+
# Termination
|
|
820
|
+
return alpha, scale, log_probability
|
|
821
|
+
|
|
822
|
+
cdef TimeSeries _baum_welch_xi(self, TimeSeries alpha, TimeSeries beta, TimeSeries obs):
|
|
823
|
+
r"""
|
|
824
|
+
Used internally to compute the scaled quantity xi_t(i,j)
|
|
825
|
+
appearing in the Baum-Welch reestimation algorithm.
|
|
826
|
+
|
|
827
|
+
INPUT:
|
|
828
|
+
|
|
829
|
+
- ``alpha`` -- TimeSeries as output by the scaled forward algorithm
|
|
830
|
+
- ``beta`` -- TimeSeries as output by the scaled backward algorithm
|
|
831
|
+
- ``obs`` -- TimeSeries of observations
|
|
832
|
+
|
|
833
|
+
OUTPUT:
|
|
834
|
+
|
|
835
|
+
TimeSeries xi such that xi[t*N*N + i*N + j] = xi_t(i,j).
|
|
836
|
+
"""
|
|
837
|
+
cdef int i, j, N = self.N
|
|
838
|
+
cdef double sum
|
|
839
|
+
cdef Py_ssize_t t, T = alpha._length//N
|
|
840
|
+
cdef TimeSeries xi = TimeSeries(T*N*N, initialize=False)
|
|
841
|
+
for t in range(T-1):
|
|
842
|
+
sum = 0.0
|
|
843
|
+
for i in range(N):
|
|
844
|
+
for j in range(N):
|
|
845
|
+
xi._values[t*N*N+i*N+j] = alpha._values[t*N+i]*beta._values[(t+1)*N+j]*\
|
|
846
|
+
self.A._values[i*N+j] * self.probability_of(j, obs._values[t+1])
|
|
847
|
+
sum += xi._values[t*N*N+i*N+j]
|
|
848
|
+
for i in range(N):
|
|
849
|
+
for j in range(N):
|
|
850
|
+
xi._values[t*N*N+i*N+j] /= sum
|
|
851
|
+
return xi
|
|
852
|
+
|
|
853
|
+
def baum_welch(self, obs, int max_iter=500, double log_likelihood_cutoff=1e-4,
|
|
854
|
+
double min_sd=0.01, bint fix_emissions=False, bint v=False):
|
|
855
|
+
r"""
|
|
856
|
+
Given an observation sequence ``obs``, improve this HMM using the
|
|
857
|
+
Baum-Welch algorithm to increase the probability of observing ``obs``.
|
|
858
|
+
|
|
859
|
+
INPUT:
|
|
860
|
+
|
|
861
|
+
- ``obs`` -- a time series of emissions
|
|
862
|
+
|
|
863
|
+
- ``max_iter`` -- integer (default: 500); maximum number
|
|
864
|
+
of Baum-Welch steps to take
|
|
865
|
+
|
|
866
|
+
- ``log_likelihood_cutoff`` -- positive float (default: 1e-4);
|
|
867
|
+
the minimal improvement in likelihood with respect to
|
|
868
|
+
the last iteration required to continue. Relative value
|
|
869
|
+
to log likelihood.
|
|
870
|
+
|
|
871
|
+
- ``min_sd`` -- positive float (default: 0.01); when
|
|
872
|
+
reestimating, the standard deviation of emissions is not
|
|
873
|
+
allowed to be less than ``min_sd``.
|
|
874
|
+
|
|
875
|
+
- ``fix_emissions`` -- boolean (default: ``False``); if ``True``, do not
|
|
876
|
+
change emissions when updating
|
|
877
|
+
|
|
878
|
+
OUTPUT:
|
|
879
|
+
|
|
880
|
+
changes the model in place, and returns the log
|
|
881
|
+
likelihood and number of iterations.
|
|
882
|
+
|
|
883
|
+
EXAMPLES::
|
|
884
|
+
|
|
885
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[.1,.9],[.5,.5]],
|
|
886
|
+
....: [(1,.5), (-1,3)],
|
|
887
|
+
....: [.1,.9])
|
|
888
|
+
sage: m.log_likelihood([-2,-1,.1,0.1])
|
|
889
|
+
-8.858282215986275
|
|
890
|
+
sage: m.baum_welch([-2,-1,.1,0.1])
|
|
891
|
+
(4.534646052182..., 7)
|
|
892
|
+
sage: m.log_likelihood([-2,-1,.1,0.1])
|
|
893
|
+
4.534646052182...
|
|
894
|
+
sage: m # rel tol 3e-14
|
|
895
|
+
Gaussian Hidden Markov Model with 2 States
|
|
896
|
+
Transition matrix:
|
|
897
|
+
[ 0.9999999992430161 7.569839394440382e-10]
|
|
898
|
+
[ 0.49998462791192644 0.5000153720880736]
|
|
899
|
+
Emission parameters:
|
|
900
|
+
[(0.09999999999999999, 0.01), (-1.4999508147591902, 0.5000710504895474)]
|
|
901
|
+
Initial probabilities: [0.0000, 1.0000]
|
|
902
|
+
|
|
903
|
+
We illustrate bounding the standard deviation below. Note that above we had
|
|
904
|
+
different emission parameters when the ``min_sd`` was the default of 0.01::
|
|
905
|
+
|
|
906
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[.1,.9],[.5,.5]],
|
|
907
|
+
....: [(1,.5), (-1,3)],
|
|
908
|
+
....: [.1,.9])
|
|
909
|
+
sage: m.baum_welch([-2,-1,.1,0.1], min_sd=1)
|
|
910
|
+
(-4.07939572755..., 32)
|
|
911
|
+
sage: m.emission_parameters()
|
|
912
|
+
[(-0.2663018798..., 1.0), (-1.99850979..., 1.0)]
|
|
913
|
+
|
|
914
|
+
We watch the log likelihoods of the model converge, step by step::
|
|
915
|
+
|
|
916
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[.1,.9],[.5,.5]],
|
|
917
|
+
....: [(1,.5), (-1,3)],
|
|
918
|
+
....: [.1,.9])
|
|
919
|
+
sage: v = m.sample(10)
|
|
920
|
+
sage: l = stats.TimeSeries([m.baum_welch(v, max_iter=1)[0]
|
|
921
|
+
....: for _ in range(len(v))])
|
|
922
|
+
sage: all(l[i] <= l[i+1] + 0.0001 for i in range(9))
|
|
923
|
+
True
|
|
924
|
+
sage: l # random
|
|
925
|
+
[-20.1167, -17.7611, -16.9814, -16.9364, -16.9314,
|
|
926
|
+
-16.9309, -16.9309, -16.9309, -16.9309, -16.9309]
|
|
927
|
+
|
|
928
|
+
We illustrate fixing emissions::
|
|
929
|
+
|
|
930
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[.1,.9],[.9,.1]],
|
|
931
|
+
....: [(1,2),(-1,.5)],
|
|
932
|
+
....: [.3,.7])
|
|
933
|
+
sage: set_random_seed(0); v = m.sample(100)
|
|
934
|
+
sage: m.baum_welch(v,fix_emissions=True)
|
|
935
|
+
(-164.72944548204..., 23)
|
|
936
|
+
sage: m.emission_parameters()
|
|
937
|
+
[(1.0, 2.0), (-1.0, 0.5)]
|
|
938
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[.1,.9],[.9,.1]],
|
|
939
|
+
....: [(1,2),(-1,.5)],
|
|
940
|
+
....: [.3,.7])
|
|
941
|
+
sage: m.baum_welch(v)
|
|
942
|
+
(-162.854370397998..., 49)
|
|
943
|
+
sage: m.emission_parameters() # rel tol 3e-14
|
|
944
|
+
[(1.2722419172602375, 2.371368751761901),
|
|
945
|
+
(-0.9486174675179113, 0.5762360385123765)]
|
|
946
|
+
"""
|
|
947
|
+
if not isinstance(obs, TimeSeries):
|
|
948
|
+
obs = TimeSeries(obs)
|
|
949
|
+
cdef TimeSeries _obs = obs
|
|
950
|
+
cdef TimeSeries alpha, beta, scale, gamma, xi
|
|
951
|
+
cdef double log_probability, log_probability0, log_probability_prev, delta
|
|
952
|
+
cdef int i, j, k, N, n_iterations
|
|
953
|
+
cdef Py_ssize_t t, T
|
|
954
|
+
cdef double denominator_A, numerator_A, denominator_B, numerator_mean, numerator_std
|
|
955
|
+
|
|
956
|
+
# Initialization
|
|
957
|
+
alpha, scale, log_probability0 = self._forward_scale_all(_obs)
|
|
958
|
+
if not isfinite(log_probability0):
|
|
959
|
+
return (0.0, 0)
|
|
960
|
+
log_probability = log_probability0
|
|
961
|
+
beta = self._backward_scale_all(_obs, scale)
|
|
962
|
+
gamma = self._baum_welch_gamma(alpha, beta)
|
|
963
|
+
xi = self._baum_welch_xi(alpha, beta, _obs)
|
|
964
|
+
log_probability_prev = log_probability
|
|
965
|
+
N = self.N
|
|
966
|
+
n_iterations = 0
|
|
967
|
+
T = len(_obs)
|
|
968
|
+
|
|
969
|
+
# Re-estimation
|
|
970
|
+
while True:
|
|
971
|
+
# Reestimate
|
|
972
|
+
for i in range(N):
|
|
973
|
+
if not isfinite(gamma._values[0*N+i]):
|
|
974
|
+
# Before raising an error, leave self in a valid state.
|
|
975
|
+
util.normalize_probability_TimeSeries(self.pi, 0, self.pi._length)
|
|
976
|
+
raise RuntimeError("impossible to compute gamma during reestimation")
|
|
977
|
+
self.pi._values[i] = gamma._values[0*N+i]
|
|
978
|
+
|
|
979
|
+
# Update the probabilities pi to define a valid discrete distribution
|
|
980
|
+
util.normalize_probability_TimeSeries(self.pi, 0, self.pi._length)
|
|
981
|
+
|
|
982
|
+
# Reestimate transition matrix and emission probabilities in
|
|
983
|
+
# each state.
|
|
984
|
+
for i in range(N):
|
|
985
|
+
# Compute the updated transition matrix
|
|
986
|
+
denominator_A = 0.0
|
|
987
|
+
for t in range(T-1):
|
|
988
|
+
denominator_A += gamma._values[t*N+i]
|
|
989
|
+
if not isnormal(denominator_A):
|
|
990
|
+
raise RuntimeError("unable to re-estimate transition matrix")
|
|
991
|
+
for j in range(N):
|
|
992
|
+
numerator_A = 0.0
|
|
993
|
+
for t in range(T-1):
|
|
994
|
+
numerator_A += xi._values[t*N*N+i*N+j]
|
|
995
|
+
self.A._values[i*N+j] = numerator_A / denominator_A
|
|
996
|
+
|
|
997
|
+
# Rescale the i-th row of the transition matrix to be
|
|
998
|
+
# a valid stochastic matrix:
|
|
999
|
+
util.normalize_probability_TimeSeries(self.A, i*N, (i+1)*N)
|
|
1000
|
+
|
|
1001
|
+
if not fix_emissions:
|
|
1002
|
+
denominator_B = denominator_A + gamma._values[(T-1)*N + i]
|
|
1003
|
+
if not isnormal(denominator_B):
|
|
1004
|
+
raise RuntimeError("unable to re-estimate emission probabilities")
|
|
1005
|
+
|
|
1006
|
+
numerator_mean = 0.0
|
|
1007
|
+
numerator_std = 0.0
|
|
1008
|
+
for t in range(T):
|
|
1009
|
+
numerator_mean += gamma._values[t*N + i] * _obs._values[t]
|
|
1010
|
+
numerator_std += gamma._values[t*N + i] * \
|
|
1011
|
+
(_obs._values[t] - self.B._values[2*i])*(_obs._values[t] - self.B._values[2*i])
|
|
1012
|
+
# restimated mean
|
|
1013
|
+
self.B._values[2*i] = numerator_mean / denominator_B
|
|
1014
|
+
# restimated standard deviation
|
|
1015
|
+
self.B._values[2*i+1] = sqrt(numerator_std / denominator_B)
|
|
1016
|
+
if self.B._values[2*i+1] < min_sd:
|
|
1017
|
+
self.B._values[2*i+1] = min_sd
|
|
1018
|
+
self.probability_init()
|
|
1019
|
+
|
|
1020
|
+
n_iterations += 1
|
|
1021
|
+
if n_iterations >= max_iter: break
|
|
1022
|
+
|
|
1023
|
+
# Initialization for next iteration
|
|
1024
|
+
alpha, scale, log_probability0 = self._forward_scale_all(_obs)
|
|
1025
|
+
|
|
1026
|
+
if not isfinite(log_probability0): break
|
|
1027
|
+
log_probability = log_probability0
|
|
1028
|
+
beta = self._backward_scale_all(_obs, scale)
|
|
1029
|
+
gamma = self._baum_welch_gamma(alpha, beta)
|
|
1030
|
+
xi = self._baum_welch_xi(alpha, beta, _obs)
|
|
1031
|
+
|
|
1032
|
+
# Compute the difference between the log probability of
|
|
1033
|
+
# two iterations.
|
|
1034
|
+
delta = log_probability - log_probability_prev
|
|
1035
|
+
log_probability_prev = log_probability
|
|
1036
|
+
|
|
1037
|
+
# If the log probability does not change by more than delta,
|
|
1038
|
+
# then terminate
|
|
1039
|
+
if delta >= 0 and delta <= log_likelihood_cutoff:
|
|
1040
|
+
break
|
|
1041
|
+
|
|
1042
|
+
return log_probability, n_iterations
|
|
1043
|
+
|
|
1044
|
+
|
|
1045
|
+
cdef class GaussianMixtureHiddenMarkovModel(GaussianHiddenMarkovModel):
|
|
1046
|
+
r"""
|
|
1047
|
+
Gaussian mixture Hidden Markov Model.
|
|
1048
|
+
|
|
1049
|
+
INPUT:
|
|
1050
|
+
|
|
1051
|
+
- ``A`` -- matrix; the `N \times N` transition matrix
|
|
1052
|
+
|
|
1053
|
+
- ``B`` -- list of mixture definitions for each state. Each
|
|
1054
|
+
state may have a varying number of gaussians with selection
|
|
1055
|
+
probabilities that sum to 1 and encoded as ``(p, (mu,sigma))``
|
|
1056
|
+
|
|
1057
|
+
- ``pi`` -- initial state probabilities
|
|
1058
|
+
|
|
1059
|
+
- ``normalize`` -- boolean (default: ``True``); if given, input is
|
|
1060
|
+
normalized to define valid probability distributions,
|
|
1061
|
+
e.g., the entries of `A` are made nonnegative and the rows
|
|
1062
|
+
sum to 1, and the probabilities in ``pi`` are normalized.
|
|
1063
|
+
|
|
1064
|
+
EXAMPLES::
|
|
1065
|
+
|
|
1066
|
+
sage: A = [[0.5,0.5],[0.5,0.5]]
|
|
1067
|
+
sage: B = [[(0.9,(0.0,1.0)), (0.1,(1,10000))],[(1,(1,1)), (0,(0,0.1))]]
|
|
1068
|
+
sage: hmm.GaussianMixtureHiddenMarkovModel(A, B, [1,0])
|
|
1069
|
+
Gaussian Mixture Hidden Markov Model with 2 States
|
|
1070
|
+
Transition matrix:
|
|
1071
|
+
[0.5 0.5]
|
|
1072
|
+
[0.5 0.5]
|
|
1073
|
+
Emission parameters:
|
|
1074
|
+
[0.9*N(0.0,1.0) + 0.1*N(1.0,10000.0), 1.0*N(1.0,1.0) + 0.0*N(0.0,0.1)]
|
|
1075
|
+
Initial probabilities: [1.0000, 0.0000]
|
|
1076
|
+
|
|
1077
|
+
TESTS:
|
|
1078
|
+
|
|
1079
|
+
If a standard deviation is 0, it is normalized to be slightly bigger than 0.::
|
|
1080
|
+
|
|
1081
|
+
sage: hmm.GaussianMixtureHiddenMarkovModel([[1]], [[(1,(0,0))]], [1])
|
|
1082
|
+
Gaussian Mixture Hidden Markov Model with 1 States
|
|
1083
|
+
Transition matrix:
|
|
1084
|
+
[1.0]
|
|
1085
|
+
Emission parameters:
|
|
1086
|
+
[1.0*N(0.0,1e-08)]
|
|
1087
|
+
Initial probabilities: [1.0000]
|
|
1088
|
+
|
|
1089
|
+
We test that number of emission distributions must be the same as the number of states::
|
|
1090
|
+
|
|
1091
|
+
sage: hmm.GaussianMixtureHiddenMarkovModel([[1]], [], [1])
|
|
1092
|
+
Traceback (most recent call last):
|
|
1093
|
+
...
|
|
1094
|
+
ValueError: number of GaussianMixtures must be the same as number of entries of pi
|
|
1095
|
+
|
|
1096
|
+
sage: hmm.GaussianMixtureHiddenMarkovModel([[1]], [[]], [1])
|
|
1097
|
+
Traceback (most recent call last):
|
|
1098
|
+
...
|
|
1099
|
+
ValueError: must specify at least one component of the mixture model
|
|
1100
|
+
|
|
1101
|
+
We test that the number of initial probabilities must equal the number of states::
|
|
1102
|
+
|
|
1103
|
+
sage: hmm.GaussianMixtureHiddenMarkovModel([[1]], [[]], [1,2])
|
|
1104
|
+
Traceback (most recent call last):
|
|
1105
|
+
...
|
|
1106
|
+
ValueError: number of entries of transition matrix A must be the square of the number of entries of pi
|
|
1107
|
+
"""
|
|
1108
|
+
|
|
1109
|
+
cdef object mixture # mixture
|
|
1110
|
+
|
|
1111
|
+
def __init__(self, A, B, pi=None, bint normalize=True):
|
|
1112
|
+
r"""
|
|
1113
|
+
Initialize a Gaussian mixture hidden Markov model.
|
|
1114
|
+
|
|
1115
|
+
EXAMPLES::
|
|
1116
|
+
|
|
1117
|
+
sage: hmm.GaussianMixtureHiddenMarkovModel([[.9,.1],[.4,.6]], [[(.4,(0,1)), (.6,(1,0.1))],[(1,(0,1))]], [.7,.3])
|
|
1118
|
+
Gaussian Mixture Hidden Markov Model with 2 States
|
|
1119
|
+
Transition matrix:
|
|
1120
|
+
[0.9 0.1]
|
|
1121
|
+
[0.4 0.6]
|
|
1122
|
+
Emission parameters:
|
|
1123
|
+
[0.4*N(0.0,1.0) + 0.6*N(1.0,0.1), 1.0*N(0.0,1.0)]
|
|
1124
|
+
Initial probabilities: [0.7000, 0.3000]
|
|
1125
|
+
"""
|
|
1126
|
+
self.pi = util.initial_probs_to_TimeSeries(pi, normalize)
|
|
1127
|
+
self.N = len(self.pi)
|
|
1128
|
+
self.A = util.state_matrix_to_TimeSeries(A, self.N, normalize)
|
|
1129
|
+
if self.N*self.N != len(self.A):
|
|
1130
|
+
raise ValueError("number of entries of transition matrix A must be the square of the number of entries of pi")
|
|
1131
|
+
|
|
1132
|
+
self.mixture = [b if isinstance(b, GaussianMixtureDistribution) else
|
|
1133
|
+
GaussianMixtureDistribution([flatten(x) for x in b])
|
|
1134
|
+
for b in B]
|
|
1135
|
+
if len(self.mixture) != self.N:
|
|
1136
|
+
raise ValueError("number of GaussianMixtures must be the same as number of entries of pi")
|
|
1137
|
+
|
|
1138
|
+
def __repr__(self):
|
|
1139
|
+
r"""
|
|
1140
|
+
Return string representation.
|
|
1141
|
+
|
|
1142
|
+
EXAMPLES::
|
|
1143
|
+
|
|
1144
|
+
sage: hmm.GaussianMixtureHiddenMarkovModel([[.9,.1],[.4,.6]], [[(.4,(0,1)), (.6,(1,0.1))],[(1,(0,1))]], [.7,.3]).__repr__()
|
|
1145
|
+
'Gaussian Mixture Hidden Markov Model with 2 States\nTransition matrix:\n[0.9 0.1]\n[0.4 0.6]\nEmission parameters:\n[0.4*N(0.0,1.0) + 0.6*N(1.0,0.1), 1.0*N(0.0,1.0)]\nInitial probabilities: [0.7000, 0.3000]'
|
|
1146
|
+
"""
|
|
1147
|
+
s = "Gaussian Mixture Hidden Markov Model with %s States" % self.N
|
|
1148
|
+
s += '\nTransition matrix:\n%s' % self.transition_matrix()
|
|
1149
|
+
s += '\nEmission parameters:\n%s' % self.emission_parameters()
|
|
1150
|
+
s += '\nInitial probabilities: %s' % self.initial_probabilities()
|
|
1151
|
+
return s
|
|
1152
|
+
|
|
1153
|
+
def __reduce__(self):
|
|
1154
|
+
r"""
|
|
1155
|
+
Used in pickling.
|
|
1156
|
+
|
|
1157
|
+
EXAMPLES::
|
|
1158
|
+
|
|
1159
|
+
sage: m = hmm.GaussianMixtureHiddenMarkovModel([[1]], [[(.4,(0,1)), (.6,(1,0.1))]], [1])
|
|
1160
|
+
sage: loads(dumps(m)) == m
|
|
1161
|
+
True
|
|
1162
|
+
"""
|
|
1163
|
+
return unpickle_gaussian_mixture_hmm_v1, \
|
|
1164
|
+
(self.A, self.B, self.pi, self.mixture)
|
|
1165
|
+
|
|
1166
|
+
def __richcmp__(self, other, op):
|
|
1167
|
+
r"""
|
|
1168
|
+
Compare ``self`` and ``other``, which must both be
|
|
1169
|
+
``GaussianMixtureHiddenMarkovModel``s.
|
|
1170
|
+
|
|
1171
|
+
EXAMPLES::
|
|
1172
|
+
|
|
1173
|
+
sage: m = hmm.GaussianMixtureHiddenMarkovModel([[1]], [[(.4,(0,1)), (.6,(1,0.1))]], [1])
|
|
1174
|
+
sage: n = hmm.GaussianMixtureHiddenMarkovModel([[1]], [[(.5,(0,1)), (.5,(1,0.1))]], [1])
|
|
1175
|
+
sage: m < n
|
|
1176
|
+
True
|
|
1177
|
+
sage: m == m
|
|
1178
|
+
True
|
|
1179
|
+
sage: n > m
|
|
1180
|
+
True
|
|
1181
|
+
sage: n < m
|
|
1182
|
+
False
|
|
1183
|
+
"""
|
|
1184
|
+
if not isinstance(other, GaussianMixtureHiddenMarkovModel):
|
|
1185
|
+
return NotImplemented
|
|
1186
|
+
return PyObject_RichCompare(self.__reduce__()[1],
|
|
1187
|
+
other.__reduce__()[1], op)
|
|
1188
|
+
|
|
1189
|
+
def __getitem__(self, Py_ssize_t i):
|
|
1190
|
+
r"""
|
|
1191
|
+
Return the Gaussian mixture distribution associated to the
|
|
1192
|
+
i-th state.
|
|
1193
|
+
|
|
1194
|
+
INPUT:
|
|
1195
|
+
|
|
1196
|
+
- ``i`` -- integer
|
|
1197
|
+
|
|
1198
|
+
OUTPUT: a Gaussian mixture distribution object
|
|
1199
|
+
|
|
1200
|
+
EXAMPLES::
|
|
1201
|
+
|
|
1202
|
+
sage: m = hmm.GaussianMixtureHiddenMarkovModel([[.9,.1],[.4,.6]], [[(.4,(0,1)), (.6,(1,0.1))],[(1,(0,1))]], [.7,.3])
|
|
1203
|
+
sage: m[0]
|
|
1204
|
+
0.4*N(0.0,1.0) + 0.6*N(1.0,0.1)
|
|
1205
|
+
sage: m[1]
|
|
1206
|
+
1.0*N(0.0,1.0)
|
|
1207
|
+
|
|
1208
|
+
Negative indexing works::
|
|
1209
|
+
|
|
1210
|
+
sage: m[-1]
|
|
1211
|
+
1.0*N(0.0,1.0)
|
|
1212
|
+
|
|
1213
|
+
Bounds are checked::
|
|
1214
|
+
|
|
1215
|
+
sage: m[2]
|
|
1216
|
+
Traceback (most recent call last):
|
|
1217
|
+
...
|
|
1218
|
+
IndexError: index out of range
|
|
1219
|
+
sage: m[-3]
|
|
1220
|
+
Traceback (most recent call last):
|
|
1221
|
+
...
|
|
1222
|
+
IndexError: index out of range
|
|
1223
|
+
"""
|
|
1224
|
+
if i < 0:
|
|
1225
|
+
i += self.N
|
|
1226
|
+
if i < 0 or i >= self.N:
|
|
1227
|
+
raise IndexError('index out of range')
|
|
1228
|
+
return self.mixture[i]
|
|
1229
|
+
|
|
1230
|
+
def emission_parameters(self):
|
|
1231
|
+
r"""
|
|
1232
|
+
Return a list of all the emission distributions.
|
|
1233
|
+
|
|
1234
|
+
OUTPUT: list of Gaussian mixtures
|
|
1235
|
+
|
|
1236
|
+
EXAMPLES::
|
|
1237
|
+
|
|
1238
|
+
sage: m = hmm.GaussianMixtureHiddenMarkovModel([[.9,.1],[.4,.6]],
|
|
1239
|
+
....: [[(.4,(0,1)), (.6,(1,0.1))], [(1,(0,1))]],
|
|
1240
|
+
....: [.7,.3])
|
|
1241
|
+
sage: m.emission_parameters()
|
|
1242
|
+
[0.4*N(0.0,1.0) + 0.6*N(1.0,0.1), 1.0*N(0.0,1.0)]
|
|
1243
|
+
"""
|
|
1244
|
+
return list(self.mixture)
|
|
1245
|
+
|
|
1246
|
+
cdef double random_sample(self, int state, randstate rstate) noexcept:
|
|
1247
|
+
r"""
|
|
1248
|
+
Return a random sample from the normal distribution associated
|
|
1249
|
+
to the given state.
|
|
1250
|
+
|
|
1251
|
+
This is only used internally, and no bounds or other error
|
|
1252
|
+
checking is done, so calling this improperly can lead to seg
|
|
1253
|
+
faults.
|
|
1254
|
+
|
|
1255
|
+
INPUT:
|
|
1256
|
+
|
|
1257
|
+
- ``state`` -- integer
|
|
1258
|
+
- ``rstate`` -- randstate instance
|
|
1259
|
+
|
|
1260
|
+
OUTPUT:
|
|
1261
|
+
|
|
1262
|
+
double
|
|
1263
|
+
"""
|
|
1264
|
+
cdef GaussianMixtureDistribution G = self.mixture[state]
|
|
1265
|
+
return G._sample(rstate)
|
|
1266
|
+
|
|
1267
|
+
cdef double probability_of(self, int state, double observation) noexcept:
|
|
1268
|
+
r"""
|
|
1269
|
+
Return the probability b_j(o) of see the given observation o
|
|
1270
|
+
(=observation) given that we're in the given state j (=state).
|
|
1271
|
+
|
|
1272
|
+
This is a continuous probability, so this really returns a
|
|
1273
|
+
number p such that the probability of a value in the interval
|
|
1274
|
+
[o,o+d] is p*d.
|
|
1275
|
+
|
|
1276
|
+
INPUT:
|
|
1277
|
+
|
|
1278
|
+
- ``state`` -- integer
|
|
1279
|
+
- ``observation`` -- double
|
|
1280
|
+
|
|
1281
|
+
OUTPUT:
|
|
1282
|
+
|
|
1283
|
+
double
|
|
1284
|
+
"""
|
|
1285
|
+
cdef GaussianMixtureDistribution G = self.mixture[state]
|
|
1286
|
+
return G.prob(observation)
|
|
1287
|
+
|
|
1288
|
+
cdef TimeSeries _baum_welch_mixed_gamma(self, TimeSeries alpha, TimeSeries beta,
|
|
1289
|
+
TimeSeries obs, int j):
|
|
1290
|
+
r"""
|
|
1291
|
+
Let gamma_t(j,m) be the m-component (in the mixture) of the
|
|
1292
|
+
probability of being in state j at time t, given the
|
|
1293
|
+
observation sequence. This function outputs a TimeSeries v
|
|
1294
|
+
such that v[m*T + t] gives gamma_t(j, m) where T is the number
|
|
1295
|
+
of time steps.
|
|
1296
|
+
|
|
1297
|
+
INPUT:
|
|
1298
|
+
|
|
1299
|
+
- ``alpha`` -- TimeSeries
|
|
1300
|
+
- ``beta`` -- TimeSeries
|
|
1301
|
+
- ``obs`` -- TimeSeries
|
|
1302
|
+
- ``j`` -- integer
|
|
1303
|
+
|
|
1304
|
+
OUTPUT:
|
|
1305
|
+
|
|
1306
|
+
TimeSeries
|
|
1307
|
+
"""
|
|
1308
|
+
cdef int i, k, m, N = self.N
|
|
1309
|
+
cdef Py_ssize_t t, T = alpha._length//N
|
|
1310
|
+
|
|
1311
|
+
cdef double numer, alpha_minus, P, s, prob
|
|
1312
|
+
cdef GaussianMixtureDistribution G = self.mixture[j]
|
|
1313
|
+
cdef int M = len(G)
|
|
1314
|
+
cdef TimeSeries mixed_gamma = TimeSeries(T*M)
|
|
1315
|
+
|
|
1316
|
+
for t in range(T):
|
|
1317
|
+
prob = self.probability_of(j, obs._values[t])
|
|
1318
|
+
if prob == 0:
|
|
1319
|
+
# If the probability of observing obs[t] in state j is 0, then
|
|
1320
|
+
# all of the m-mixture components *have* to automatically be 0,
|
|
1321
|
+
# since prob is the sum of those and they are all nonnegative.
|
|
1322
|
+
for m in range(M):
|
|
1323
|
+
mixed_gamma._values[m*T + t] = 0
|
|
1324
|
+
else:
|
|
1325
|
+
# Compute the denominator we used when scaling gamma.
|
|
1326
|
+
# The key thing is that this is consistent between
|
|
1327
|
+
# gamma and mixed_gamma.
|
|
1328
|
+
P = 0
|
|
1329
|
+
for k in range(N):
|
|
1330
|
+
P += alpha._values[t*N+k]*beta._values[t*N+k]
|
|
1331
|
+
|
|
1332
|
+
# Divide out the total probability, so we can multiply back in
|
|
1333
|
+
# the m-components of the probability.
|
|
1334
|
+
alpha_minus = alpha._values[t*N + j] / prob
|
|
1335
|
+
for m in range(M):
|
|
1336
|
+
numer = alpha_minus * G.prob_m(obs._values[t], m) * beta._values[t*N + j]
|
|
1337
|
+
mixed_gamma._values[m*T + t] = numer / P
|
|
1338
|
+
|
|
1339
|
+
return mixed_gamma
|
|
1340
|
+
|
|
1341
|
+
def baum_welch(self, obs, int max_iter=1000, double log_likelihood_cutoff=1e-12,
|
|
1342
|
+
double min_sd=0.01, bint fix_emissions=False):
|
|
1343
|
+
r"""
|
|
1344
|
+
Given an observation sequence ``obs``, improve this HMM using the
|
|
1345
|
+
Baum-Welch algorithm to increase the probability of observing ``obs``.
|
|
1346
|
+
|
|
1347
|
+
INPUT:
|
|
1348
|
+
|
|
1349
|
+
- ``obs`` -- a time series of emissions
|
|
1350
|
+
- ``max_iter`` -- integer (default: 1000); maximum number
|
|
1351
|
+
of Baum-Welch steps to take
|
|
1352
|
+
- ``log_likelihood_cutoff`` -- positive float (default: 1e-12);
|
|
1353
|
+
the minimal improvement in likelihood with respect to
|
|
1354
|
+
the last iteration required to continue. Relative value
|
|
1355
|
+
to log likelihood.
|
|
1356
|
+
- ``min_sd`` -- positive float (default: 0.01); when
|
|
1357
|
+
reestimating, the standard deviation of emissions is not
|
|
1358
|
+
allowed to be less than ``min_sd``.
|
|
1359
|
+
- ``fix_emissions`` -- boolean (default: ``False``); if ``True``, do not
|
|
1360
|
+
change emissions when updating
|
|
1361
|
+
|
|
1362
|
+
OUTPUT:
|
|
1363
|
+
|
|
1364
|
+
changes the model in place, and returns the log
|
|
1365
|
+
likelihood and number of iterations.
|
|
1366
|
+
|
|
1367
|
+
EXAMPLES::
|
|
1368
|
+
|
|
1369
|
+
sage: m = hmm.GaussianMixtureHiddenMarkovModel(
|
|
1370
|
+
....: [[.9,.1],[.4,.6]],
|
|
1371
|
+
....: [[(.4,(0,1)), (.6,(1,0.1))], [(1,(0,1))]],
|
|
1372
|
+
....: [.7,.3])
|
|
1373
|
+
sage: set_random_seed(0); v = m.sample(10); v
|
|
1374
|
+
[0.3576, -0.9365, 0.9449, -0.6957, 1.0217,
|
|
1375
|
+
0.9644, 0.9987, -0.5950, -1.0219, 0.6477]
|
|
1376
|
+
sage: m.log_likelihood(v)
|
|
1377
|
+
-8.31408655939536...
|
|
1378
|
+
sage: m.baum_welch(v)
|
|
1379
|
+
(2.18905068682..., 15)
|
|
1380
|
+
sage: m.log_likelihood(v)
|
|
1381
|
+
2.18905068682...
|
|
1382
|
+
sage: m # rel tol 6e-12
|
|
1383
|
+
Gaussian Mixture Hidden Markov Model with 2 States
|
|
1384
|
+
Transition matrix:
|
|
1385
|
+
[ 0.8746363339773399 0.12536366602266016]
|
|
1386
|
+
[ 1.0 1.451685202290174e-40]
|
|
1387
|
+
Emission parameters:
|
|
1388
|
+
[0.500161629343*N(-0.812298726239,0.173329026744)
|
|
1389
|
+
+ 0.499838370657*N(0.982433690378,0.029719932009),
|
|
1390
|
+
1.0*N(0.503260056832,0.145881515324)]
|
|
1391
|
+
Initial probabilities: [0.0000, 1.0000]
|
|
1392
|
+
|
|
1393
|
+
We illustrate bounding the standard deviation below. Note that above we had
|
|
1394
|
+
different emission parameters when the min_sd was the default of 0.01::
|
|
1395
|
+
|
|
1396
|
+
sage: m = hmm.GaussianMixtureHiddenMarkovModel(
|
|
1397
|
+
....: [[.9,.1],[.4,.6]],
|
|
1398
|
+
....: [[(.4,(0,1)), (.6,(1,0.1))], [(1,(0,1))]],
|
|
1399
|
+
....: [.7,.3])
|
|
1400
|
+
sage: m.baum_welch(v, min_sd=1)
|
|
1401
|
+
(-12.617885761692..., 1000)
|
|
1402
|
+
sage: m.emission_parameters() # rel tol 6e-12
|
|
1403
|
+
[0.503545634447*N(0.200166509595,1.0) + 0.496454365553*N(0.200166509595,1.0),
|
|
1404
|
+
1.0*N(0.0543433426535,1.0)]
|
|
1405
|
+
|
|
1406
|
+
We illustrate fixing all emissions::
|
|
1407
|
+
|
|
1408
|
+
sage: m = hmm.GaussianMixtureHiddenMarkovModel(
|
|
1409
|
+
....: [[.9,.1],[.4,.6]],
|
|
1410
|
+
....: [[(.4,(0,1)), (.6,(1,0.1))], [(1,(0,1))]],
|
|
1411
|
+
....: [.7,.3])
|
|
1412
|
+
sage: set_random_seed(0); v = m.sample(10)
|
|
1413
|
+
sage: m.baum_welch(v, fix_emissions=True)
|
|
1414
|
+
(-7.58656858997..., 36)
|
|
1415
|
+
sage: m.emission_parameters()
|
|
1416
|
+
[0.4*N(0.0,1.0) + 0.6*N(1.0,0.1),
|
|
1417
|
+
1.0*N(0.0,1.0)]
|
|
1418
|
+
"""
|
|
1419
|
+
if not isinstance(obs, TimeSeries):
|
|
1420
|
+
obs = TimeSeries(obs)
|
|
1421
|
+
cdef TimeSeries _obs = obs
|
|
1422
|
+
cdef TimeSeries alpha, beta, scale, gamma, mixed_gamma, mixed_gamma_m, xi
|
|
1423
|
+
cdef double log_probability, log_probability0, log_probability_prev, delta
|
|
1424
|
+
cdef int i, j, k, m, N, n_iterations
|
|
1425
|
+
cdef Py_ssize_t t, T
|
|
1426
|
+
cdef double denominator_A, numerator_A, denominator_B, numerator_mean, numerator_std, \
|
|
1427
|
+
numerator_c, c, mu, std, numer, denom, new_mu, new_std, new_c, s
|
|
1428
|
+
cdef GaussianMixtureDistribution G
|
|
1429
|
+
|
|
1430
|
+
# Initialization
|
|
1431
|
+
alpha, scale, log_probability0 = self._forward_scale_all(_obs)
|
|
1432
|
+
if not isfinite(log_probability0):
|
|
1433
|
+
return (0.0, 0)
|
|
1434
|
+
log_probability = log_probability0
|
|
1435
|
+
beta = self._backward_scale_all(_obs, scale)
|
|
1436
|
+
gamma = self._baum_welch_gamma(alpha, beta)
|
|
1437
|
+
xi = self._baum_welch_xi(alpha, beta, _obs)
|
|
1438
|
+
log_probability_prev = log_probability
|
|
1439
|
+
N = self.N
|
|
1440
|
+
n_iterations = 0
|
|
1441
|
+
T = len(_obs)
|
|
1442
|
+
|
|
1443
|
+
# Re-estimation
|
|
1444
|
+
while True:
|
|
1445
|
+
|
|
1446
|
+
# Reestimate frequency of state i in time t=0
|
|
1447
|
+
for i in range(N):
|
|
1448
|
+
if not isfinite(gamma._values[0*N+i]):
|
|
1449
|
+
# Before raising an error, leave self in a valid state.
|
|
1450
|
+
util.normalize_probability_TimeSeries(self.pi, 0, self.pi._length)
|
|
1451
|
+
raise RuntimeError("impossible to compute gamma during reestimation")
|
|
1452
|
+
self.pi._values[i] = gamma._values[0*N+i]
|
|
1453
|
+
|
|
1454
|
+
# Update the probabilities pi to define a valid discrete distribution
|
|
1455
|
+
util.normalize_probability_TimeSeries(self.pi, 0, self.pi._length)
|
|
1456
|
+
|
|
1457
|
+
# Reestimate transition matrix and emission probabilities in
|
|
1458
|
+
# each state.
|
|
1459
|
+
for i in range(N):
|
|
1460
|
+
# Reestimate the state transition matrix
|
|
1461
|
+
denominator_A = 0.0
|
|
1462
|
+
for t in range(T-1):
|
|
1463
|
+
denominator_A += gamma._values[t*N+i]
|
|
1464
|
+
if not isnormal(denominator_A):
|
|
1465
|
+
raise RuntimeError("unable to re-estimate pi (1)")
|
|
1466
|
+
for j in range(N):
|
|
1467
|
+
numerator_A = 0.0
|
|
1468
|
+
for t in range(T-1):
|
|
1469
|
+
numerator_A += xi._values[t*N*N+i*N+j]
|
|
1470
|
+
self.A._values[i*N+j] = numerator_A / denominator_A
|
|
1471
|
+
|
|
1472
|
+
# Rescale the i-th row of the transition matrix to be
|
|
1473
|
+
# a valid stochastic matrix:
|
|
1474
|
+
util.normalize_probability_TimeSeries(self.A, i*N, (i+1)*N)
|
|
1475
|
+
|
|
1476
|
+
########################################################################
|
|
1477
|
+
# Re-estimate the emission probabilities
|
|
1478
|
+
########################################################################
|
|
1479
|
+
G = self.mixture[i]
|
|
1480
|
+
if not fix_emissions and not G.is_fixed():
|
|
1481
|
+
mixed_gamma = self._baum_welch_mixed_gamma(alpha, beta, _obs, i)
|
|
1482
|
+
new_G = []
|
|
1483
|
+
for m in range(len(G)):
|
|
1484
|
+
if G.fixed._values[m]:
|
|
1485
|
+
new_G.append(G[m])
|
|
1486
|
+
continue
|
|
1487
|
+
|
|
1488
|
+
# Compute re-estimated mu_{j,m}
|
|
1489
|
+
numer = 0
|
|
1490
|
+
denom = 0
|
|
1491
|
+
for t in range(T):
|
|
1492
|
+
numer += mixed_gamma._values[m*T + t] * _obs._values[t]
|
|
1493
|
+
denom += mixed_gamma._values[m*T + t]
|
|
1494
|
+
new_mu = numer / denom
|
|
1495
|
+
|
|
1496
|
+
# Compute re-estimated standard deviation
|
|
1497
|
+
numer = 0
|
|
1498
|
+
mu = G[m][1]
|
|
1499
|
+
for t in range(T):
|
|
1500
|
+
numer += mixed_gamma._values[m*T + t] * \
|
|
1501
|
+
(_obs._values[t] - mu)*(_obs._values[t] - mu)
|
|
1502
|
+
|
|
1503
|
+
new_std = sqrt(numer / denom)
|
|
1504
|
+
if new_std < min_sd:
|
|
1505
|
+
new_std = min_sd
|
|
1506
|
+
|
|
1507
|
+
# Compute re-estimated weighting coefficient
|
|
1508
|
+
new_c = denom
|
|
1509
|
+
s = 0
|
|
1510
|
+
for t in range(T):
|
|
1511
|
+
s += gamma._values[t*N + i]
|
|
1512
|
+
new_c /= s
|
|
1513
|
+
|
|
1514
|
+
new_G.append((new_c,new_mu,new_std))
|
|
1515
|
+
|
|
1516
|
+
self.mixture[i] = GaussianMixtureDistribution(new_G)
|
|
1517
|
+
|
|
1518
|
+
n_iterations += 1
|
|
1519
|
+
if n_iterations >= max_iter: break
|
|
1520
|
+
|
|
1521
|
+
########################################################################
|
|
1522
|
+
# Initialization for next iteration
|
|
1523
|
+
########################################################################
|
|
1524
|
+
alpha, scale, log_probability0 = self._forward_scale_all(_obs)
|
|
1525
|
+
if not isfinite(log_probability0): break
|
|
1526
|
+
log_probability = log_probability0
|
|
1527
|
+
beta = self._backward_scale_all(_obs, scale)
|
|
1528
|
+
gamma = self._baum_welch_gamma(alpha, beta)
|
|
1529
|
+
xi = self._baum_welch_xi(alpha, beta, _obs)
|
|
1530
|
+
|
|
1531
|
+
# Compute the difference between the log probability of
|
|
1532
|
+
# two iterations.
|
|
1533
|
+
delta = log_probability - log_probability_prev
|
|
1534
|
+
log_probability_prev = log_probability
|
|
1535
|
+
|
|
1536
|
+
# If the log probability does not improve by more than
|
|
1537
|
+
# delta, then terminate
|
|
1538
|
+
if delta >= 0 and delta <= log_likelihood_cutoff:
|
|
1539
|
+
break
|
|
1540
|
+
|
|
1541
|
+
return log_probability, n_iterations
|
|
1542
|
+
|
|
1543
|
+
|
|
1544
|
+
##################################################
|
|
1545
|
+
# For Unpickling
|
|
1546
|
+
##################################################
|
|
1547
|
+
|
|
1548
|
+
# We keep the _v0 function for backwards compatible.
|
|
1549
|
+
def unpickle_gaussian_hmm_v0(A, B, pi, name):
|
|
1550
|
+
r"""
|
|
1551
|
+
EXAMPLES::
|
|
1552
|
+
|
|
1553
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[1]], [(0,1)], [1])
|
|
1554
|
+
sage: sage.stats.hmm.chmm.unpickle_gaussian_hmm_v0(m.transition_matrix(), m.emission_parameters(), m.initial_probabilities(), 'test')
|
|
1555
|
+
Gaussian Hidden Markov Model with 1 States
|
|
1556
|
+
Transition matrix:
|
|
1557
|
+
[1.0]
|
|
1558
|
+
Emission parameters:
|
|
1559
|
+
[(0.0, 1.0)]
|
|
1560
|
+
Initial probabilities: [1.0000]
|
|
1561
|
+
"""
|
|
1562
|
+
return GaussianHiddenMarkovModel(A,B,pi)
|
|
1563
|
+
|
|
1564
|
+
|
|
1565
|
+
def unpickle_gaussian_hmm_v1(A, B, pi, prob, n_out):
|
|
1566
|
+
r"""
|
|
1567
|
+
EXAMPLES::
|
|
1568
|
+
|
|
1569
|
+
sage: m = hmm.GaussianHiddenMarkovModel([[1]], [(0,1)], [1])
|
|
1570
|
+
sage: loads(dumps(m)) == m # indirect test
|
|
1571
|
+
True
|
|
1572
|
+
"""
|
|
1573
|
+
cdef GaussianHiddenMarkovModel m = GaussianHiddenMarkovModel.__new__(GaussianHiddenMarkovModel)
|
|
1574
|
+
m.A = A
|
|
1575
|
+
m.B = B
|
|
1576
|
+
m.pi = pi
|
|
1577
|
+
m.prob = prob
|
|
1578
|
+
m.n_out = n_out
|
|
1579
|
+
return m
|
|
1580
|
+
|
|
1581
|
+
|
|
1582
|
+
def unpickle_gaussian_mixture_hmm_v1(A, B, pi, mixture):
|
|
1583
|
+
r"""
|
|
1584
|
+
EXAMPLES::
|
|
1585
|
+
|
|
1586
|
+
sage: m = hmm.GaussianMixtureHiddenMarkovModel([[1]], [[(.4,(0,1)), (.6,(1,0.1))]], [1])
|
|
1587
|
+
sage: loads(dumps(m)) == m # indirect test
|
|
1588
|
+
True
|
|
1589
|
+
"""
|
|
1590
|
+
cdef GaussianMixtureHiddenMarkovModel m = GaussianMixtureHiddenMarkovModel.__new__(GaussianMixtureHiddenMarkovModel)
|
|
1591
|
+
m.A = A
|
|
1592
|
+
m.B = B
|
|
1593
|
+
m.pi = pi
|
|
1594
|
+
m.mixture = mixture
|
|
1595
|
+
return m
|