numpy 2.4.2__cp313-cp313t-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- numpy/__config__.py +170 -0
- numpy/__config__.pyi +108 -0
- numpy/__init__.cython-30.pxd +1242 -0
- numpy/__init__.pxd +1155 -0
- numpy/__init__.py +942 -0
- numpy/__init__.pyi +6202 -0
- numpy/_array_api_info.py +346 -0
- numpy/_array_api_info.pyi +206 -0
- numpy/_configtool.py +39 -0
- numpy/_configtool.pyi +1 -0
- numpy/_core/__init__.py +203 -0
- numpy/_core/__init__.pyi +666 -0
- numpy/_core/_add_newdocs.py +7151 -0
- numpy/_core/_add_newdocs.pyi +2 -0
- numpy/_core/_add_newdocs_scalars.py +381 -0
- numpy/_core/_add_newdocs_scalars.pyi +16 -0
- numpy/_core/_asarray.py +130 -0
- numpy/_core/_asarray.pyi +43 -0
- numpy/_core/_dtype.py +366 -0
- numpy/_core/_dtype.pyi +56 -0
- numpy/_core/_dtype_ctypes.py +120 -0
- numpy/_core/_dtype_ctypes.pyi +83 -0
- numpy/_core/_exceptions.py +162 -0
- numpy/_core/_exceptions.pyi +54 -0
- numpy/_core/_internal.py +968 -0
- numpy/_core/_internal.pyi +61 -0
- numpy/_core/_methods.py +252 -0
- numpy/_core/_methods.pyi +22 -0
- numpy/_core/_multiarray_tests.cp313t-win32.lib +0 -0
- numpy/_core/_multiarray_tests.cp313t-win32.pyd +0 -0
- numpy/_core/_multiarray_umath.cp313t-win32.lib +0 -0
- numpy/_core/_multiarray_umath.cp313t-win32.pyd +0 -0
- numpy/_core/_operand_flag_tests.cp313t-win32.lib +0 -0
- numpy/_core/_operand_flag_tests.cp313t-win32.pyd +0 -0
- numpy/_core/_rational_tests.cp313t-win32.lib +0 -0
- numpy/_core/_rational_tests.cp313t-win32.pyd +0 -0
- numpy/_core/_simd.cp313t-win32.lib +0 -0
- numpy/_core/_simd.cp313t-win32.pyd +0 -0
- numpy/_core/_simd.pyi +35 -0
- numpy/_core/_string_helpers.py +100 -0
- numpy/_core/_string_helpers.pyi +12 -0
- numpy/_core/_struct_ufunc_tests.cp313t-win32.lib +0 -0
- numpy/_core/_struct_ufunc_tests.cp313t-win32.pyd +0 -0
- numpy/_core/_type_aliases.py +131 -0
- numpy/_core/_type_aliases.pyi +86 -0
- numpy/_core/_ufunc_config.py +515 -0
- numpy/_core/_ufunc_config.pyi +69 -0
- numpy/_core/_umath_tests.cp313t-win32.lib +0 -0
- numpy/_core/_umath_tests.cp313t-win32.pyd +0 -0
- numpy/_core/_umath_tests.pyi +47 -0
- numpy/_core/arrayprint.py +1779 -0
- numpy/_core/arrayprint.pyi +158 -0
- numpy/_core/cversions.py +13 -0
- numpy/_core/defchararray.py +1414 -0
- numpy/_core/defchararray.pyi +1150 -0
- numpy/_core/einsumfunc.py +1650 -0
- numpy/_core/einsumfunc.pyi +184 -0
- numpy/_core/fromnumeric.py +4233 -0
- numpy/_core/fromnumeric.pyi +1735 -0
- numpy/_core/function_base.py +547 -0
- numpy/_core/function_base.pyi +276 -0
- numpy/_core/getlimits.py +462 -0
- numpy/_core/getlimits.pyi +124 -0
- numpy/_core/include/numpy/__multiarray_api.c +376 -0
- numpy/_core/include/numpy/__multiarray_api.h +1628 -0
- numpy/_core/include/numpy/__ufunc_api.c +55 -0
- numpy/_core/include/numpy/__ufunc_api.h +349 -0
- numpy/_core/include/numpy/_neighborhood_iterator_imp.h +90 -0
- numpy/_core/include/numpy/_numpyconfig.h +33 -0
- numpy/_core/include/numpy/_public_dtype_api_table.h +86 -0
- numpy/_core/include/numpy/arrayobject.h +7 -0
- numpy/_core/include/numpy/arrayscalars.h +198 -0
- numpy/_core/include/numpy/dtype_api.h +547 -0
- numpy/_core/include/numpy/halffloat.h +70 -0
- numpy/_core/include/numpy/ndarrayobject.h +304 -0
- numpy/_core/include/numpy/ndarraytypes.h +1982 -0
- numpy/_core/include/numpy/npy_2_compat.h +249 -0
- numpy/_core/include/numpy/npy_2_complexcompat.h +28 -0
- numpy/_core/include/numpy/npy_3kcompat.h +374 -0
- numpy/_core/include/numpy/npy_common.h +989 -0
- numpy/_core/include/numpy/npy_cpu.h +126 -0
- numpy/_core/include/numpy/npy_endian.h +79 -0
- numpy/_core/include/numpy/npy_math.h +602 -0
- numpy/_core/include/numpy/npy_no_deprecated_api.h +20 -0
- numpy/_core/include/numpy/npy_os.h +42 -0
- numpy/_core/include/numpy/numpyconfig.h +185 -0
- numpy/_core/include/numpy/random/LICENSE.txt +21 -0
- numpy/_core/include/numpy/random/bitgen.h +20 -0
- numpy/_core/include/numpy/random/distributions.h +209 -0
- numpy/_core/include/numpy/random/libdivide.h +2079 -0
- numpy/_core/include/numpy/ufuncobject.h +343 -0
- numpy/_core/include/numpy/utils.h +37 -0
- numpy/_core/lib/npy-pkg-config/mlib.ini +12 -0
- numpy/_core/lib/npy-pkg-config/npymath.ini +20 -0
- numpy/_core/lib/npymath.lib +0 -0
- numpy/_core/lib/pkgconfig/numpy.pc +7 -0
- numpy/_core/memmap.py +363 -0
- numpy/_core/memmap.pyi +3 -0
- numpy/_core/multiarray.py +1740 -0
- numpy/_core/multiarray.pyi +1328 -0
- numpy/_core/numeric.py +2771 -0
- numpy/_core/numeric.pyi +1276 -0
- numpy/_core/numerictypes.py +633 -0
- numpy/_core/numerictypes.pyi +196 -0
- numpy/_core/overrides.py +188 -0
- numpy/_core/overrides.pyi +47 -0
- numpy/_core/printoptions.py +32 -0
- numpy/_core/printoptions.pyi +28 -0
- numpy/_core/records.py +1088 -0
- numpy/_core/records.pyi +340 -0
- numpy/_core/shape_base.py +996 -0
- numpy/_core/shape_base.pyi +182 -0
- numpy/_core/strings.py +1813 -0
- numpy/_core/strings.pyi +536 -0
- numpy/_core/tests/_locales.py +72 -0
- numpy/_core/tests/_natype.py +144 -0
- numpy/_core/tests/data/astype_copy.pkl +0 -0
- numpy/_core/tests/data/generate_umath_validation_data.cpp +170 -0
- numpy/_core/tests/data/recarray_from_file.fits +0 -0
- numpy/_core/tests/data/umath-validation-set-README.txt +15 -0
- numpy/_core/tests/data/umath-validation-set-arccos.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-arccosh.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-arcsin.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-arcsinh.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-arctan.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-arctanh.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-cbrt.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-cos.csv +1375 -0
- numpy/_core/tests/data/umath-validation-set-cosh.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-exp.csv +412 -0
- numpy/_core/tests/data/umath-validation-set-exp2.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-expm1.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-log.csv +271 -0
- numpy/_core/tests/data/umath-validation-set-log10.csv +1629 -0
- numpy/_core/tests/data/umath-validation-set-log1p.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-log2.csv +1629 -0
- numpy/_core/tests/data/umath-validation-set-sin.csv +1370 -0
- numpy/_core/tests/data/umath-validation-set-sinh.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-tan.csv +1429 -0
- numpy/_core/tests/data/umath-validation-set-tanh.csv +1429 -0
- numpy/_core/tests/examples/cython/checks.pyx +374 -0
- numpy/_core/tests/examples/cython/meson.build +43 -0
- numpy/_core/tests/examples/cython/setup.py +39 -0
- numpy/_core/tests/examples/limited_api/limited_api1.c +15 -0
- numpy/_core/tests/examples/limited_api/limited_api2.pyx +11 -0
- numpy/_core/tests/examples/limited_api/limited_api_latest.c +19 -0
- numpy/_core/tests/examples/limited_api/meson.build +63 -0
- numpy/_core/tests/examples/limited_api/setup.py +24 -0
- numpy/_core/tests/test__exceptions.py +90 -0
- numpy/_core/tests/test_abc.py +54 -0
- numpy/_core/tests/test_api.py +655 -0
- numpy/_core/tests/test_argparse.py +90 -0
- numpy/_core/tests/test_array_api_info.py +113 -0
- numpy/_core/tests/test_array_coercion.py +928 -0
- numpy/_core/tests/test_array_interface.py +222 -0
- numpy/_core/tests/test_arraymethod.py +84 -0
- numpy/_core/tests/test_arrayobject.py +95 -0
- numpy/_core/tests/test_arrayprint.py +1324 -0
- numpy/_core/tests/test_casting_floatingpoint_errors.py +154 -0
- numpy/_core/tests/test_casting_unittests.py +955 -0
- numpy/_core/tests/test_conversion_utils.py +209 -0
- numpy/_core/tests/test_cpu_dispatcher.py +48 -0
- numpy/_core/tests/test_cpu_features.py +450 -0
- numpy/_core/tests/test_custom_dtypes.py +393 -0
- numpy/_core/tests/test_cython.py +352 -0
- numpy/_core/tests/test_datetime.py +2792 -0
- numpy/_core/tests/test_defchararray.py +858 -0
- numpy/_core/tests/test_deprecations.py +460 -0
- numpy/_core/tests/test_dlpack.py +190 -0
- numpy/_core/tests/test_dtype.py +2110 -0
- numpy/_core/tests/test_einsum.py +1351 -0
- numpy/_core/tests/test_errstate.py +131 -0
- numpy/_core/tests/test_extint128.py +217 -0
- numpy/_core/tests/test_finfo.py +86 -0
- numpy/_core/tests/test_function_base.py +504 -0
- numpy/_core/tests/test_getlimits.py +171 -0
- numpy/_core/tests/test_half.py +593 -0
- numpy/_core/tests/test_hashtable.py +36 -0
- numpy/_core/tests/test_indexerrors.py +122 -0
- numpy/_core/tests/test_indexing.py +1692 -0
- numpy/_core/tests/test_item_selection.py +167 -0
- numpy/_core/tests/test_limited_api.py +102 -0
- numpy/_core/tests/test_longdouble.py +370 -0
- numpy/_core/tests/test_mem_overlap.py +933 -0
- numpy/_core/tests/test_mem_policy.py +453 -0
- numpy/_core/tests/test_memmap.py +248 -0
- numpy/_core/tests/test_multiarray.py +11008 -0
- numpy/_core/tests/test_multiprocessing.py +55 -0
- numpy/_core/tests/test_multithreading.py +406 -0
- numpy/_core/tests/test_nditer.py +3533 -0
- numpy/_core/tests/test_nep50_promotions.py +287 -0
- numpy/_core/tests/test_numeric.py +4301 -0
- numpy/_core/tests/test_numerictypes.py +650 -0
- numpy/_core/tests/test_overrides.py +800 -0
- numpy/_core/tests/test_print.py +202 -0
- numpy/_core/tests/test_protocols.py +46 -0
- numpy/_core/tests/test_records.py +544 -0
- numpy/_core/tests/test_regression.py +2677 -0
- numpy/_core/tests/test_scalar_ctors.py +203 -0
- numpy/_core/tests/test_scalar_methods.py +328 -0
- numpy/_core/tests/test_scalarbuffer.py +153 -0
- numpy/_core/tests/test_scalarinherit.py +105 -0
- numpy/_core/tests/test_scalarmath.py +1168 -0
- numpy/_core/tests/test_scalarprint.py +403 -0
- numpy/_core/tests/test_shape_base.py +904 -0
- numpy/_core/tests/test_simd.py +1345 -0
- numpy/_core/tests/test_simd_module.py +105 -0
- numpy/_core/tests/test_stringdtype.py +1855 -0
- numpy/_core/tests/test_strings.py +1523 -0
- numpy/_core/tests/test_ufunc.py +3405 -0
- numpy/_core/tests/test_umath.py +4962 -0
- numpy/_core/tests/test_umath_accuracy.py +132 -0
- numpy/_core/tests/test_umath_complex.py +631 -0
- numpy/_core/tests/test_unicode.py +369 -0
- numpy/_core/umath.py +60 -0
- numpy/_core/umath.pyi +232 -0
- numpy/_distributor_init.py +15 -0
- numpy/_distributor_init.pyi +1 -0
- numpy/_expired_attrs_2_0.py +78 -0
- numpy/_expired_attrs_2_0.pyi +61 -0
- numpy/_globals.py +121 -0
- numpy/_globals.pyi +17 -0
- numpy/_pyinstaller/__init__.py +0 -0
- numpy/_pyinstaller/__init__.pyi +0 -0
- numpy/_pyinstaller/hook-numpy.py +36 -0
- numpy/_pyinstaller/hook-numpy.pyi +6 -0
- numpy/_pyinstaller/tests/__init__.py +16 -0
- numpy/_pyinstaller/tests/pyinstaller-smoke.py +32 -0
- numpy/_pyinstaller/tests/test_pyinstaller.py +35 -0
- numpy/_pytesttester.py +201 -0
- numpy/_pytesttester.pyi +18 -0
- numpy/_typing/__init__.py +173 -0
- numpy/_typing/_add_docstring.py +153 -0
- numpy/_typing/_array_like.py +106 -0
- numpy/_typing/_char_codes.py +213 -0
- numpy/_typing/_dtype_like.py +114 -0
- numpy/_typing/_extended_precision.py +15 -0
- numpy/_typing/_nbit.py +19 -0
- numpy/_typing/_nbit_base.py +94 -0
- numpy/_typing/_nbit_base.pyi +39 -0
- numpy/_typing/_nested_sequence.py +79 -0
- numpy/_typing/_scalars.py +20 -0
- numpy/_typing/_shape.py +8 -0
- numpy/_typing/_ufunc.py +7 -0
- numpy/_typing/_ufunc.pyi +975 -0
- numpy/_utils/__init__.py +95 -0
- numpy/_utils/__init__.pyi +28 -0
- numpy/_utils/_convertions.py +18 -0
- numpy/_utils/_convertions.pyi +4 -0
- numpy/_utils/_inspect.py +192 -0
- numpy/_utils/_inspect.pyi +70 -0
- numpy/_utils/_pep440.py +486 -0
- numpy/_utils/_pep440.pyi +118 -0
- numpy/char/__init__.py +2 -0
- numpy/char/__init__.pyi +111 -0
- numpy/conftest.py +248 -0
- numpy/core/__init__.py +33 -0
- numpy/core/__init__.pyi +0 -0
- numpy/core/_dtype.py +10 -0
- numpy/core/_dtype.pyi +0 -0
- numpy/core/_dtype_ctypes.py +10 -0
- numpy/core/_dtype_ctypes.pyi +0 -0
- numpy/core/_internal.py +27 -0
- numpy/core/_multiarray_umath.py +57 -0
- numpy/core/_utils.py +21 -0
- numpy/core/arrayprint.py +10 -0
- numpy/core/defchararray.py +10 -0
- numpy/core/einsumfunc.py +10 -0
- numpy/core/fromnumeric.py +10 -0
- numpy/core/function_base.py +10 -0
- numpy/core/getlimits.py +10 -0
- numpy/core/multiarray.py +25 -0
- numpy/core/numeric.py +12 -0
- numpy/core/numerictypes.py +10 -0
- numpy/core/overrides.py +10 -0
- numpy/core/overrides.pyi +7 -0
- numpy/core/records.py +10 -0
- numpy/core/shape_base.py +10 -0
- numpy/core/umath.py +10 -0
- numpy/ctypeslib/__init__.py +13 -0
- numpy/ctypeslib/__init__.pyi +15 -0
- numpy/ctypeslib/_ctypeslib.py +603 -0
- numpy/ctypeslib/_ctypeslib.pyi +236 -0
- numpy/doc/ufuncs.py +138 -0
- numpy/dtypes.py +41 -0
- numpy/dtypes.pyi +630 -0
- numpy/exceptions.py +246 -0
- numpy/exceptions.pyi +27 -0
- numpy/f2py/__init__.py +86 -0
- numpy/f2py/__init__.pyi +5 -0
- numpy/f2py/__main__.py +5 -0
- numpy/f2py/__version__.py +1 -0
- numpy/f2py/__version__.pyi +1 -0
- numpy/f2py/_backends/__init__.py +9 -0
- numpy/f2py/_backends/__init__.pyi +5 -0
- numpy/f2py/_backends/_backend.py +44 -0
- numpy/f2py/_backends/_backend.pyi +46 -0
- numpy/f2py/_backends/_distutils.py +76 -0
- numpy/f2py/_backends/_distutils.pyi +13 -0
- numpy/f2py/_backends/_meson.py +244 -0
- numpy/f2py/_backends/_meson.pyi +62 -0
- numpy/f2py/_backends/meson.build.template +58 -0
- numpy/f2py/_isocbind.py +62 -0
- numpy/f2py/_isocbind.pyi +13 -0
- numpy/f2py/_src_pyf.py +247 -0
- numpy/f2py/_src_pyf.pyi +28 -0
- numpy/f2py/auxfuncs.py +1004 -0
- numpy/f2py/auxfuncs.pyi +262 -0
- numpy/f2py/capi_maps.py +811 -0
- numpy/f2py/capi_maps.pyi +33 -0
- numpy/f2py/cb_rules.py +665 -0
- numpy/f2py/cb_rules.pyi +17 -0
- numpy/f2py/cfuncs.py +1563 -0
- numpy/f2py/cfuncs.pyi +31 -0
- numpy/f2py/common_rules.py +143 -0
- numpy/f2py/common_rules.pyi +9 -0
- numpy/f2py/crackfortran.py +3725 -0
- numpy/f2py/crackfortran.pyi +266 -0
- numpy/f2py/diagnose.py +149 -0
- numpy/f2py/diagnose.pyi +1 -0
- numpy/f2py/f2py2e.py +788 -0
- numpy/f2py/f2py2e.pyi +74 -0
- numpy/f2py/f90mod_rules.py +269 -0
- numpy/f2py/f90mod_rules.pyi +16 -0
- numpy/f2py/func2subr.py +329 -0
- numpy/f2py/func2subr.pyi +7 -0
- numpy/f2py/rules.py +1629 -0
- numpy/f2py/rules.pyi +41 -0
- numpy/f2py/setup.cfg +3 -0
- numpy/f2py/src/fortranobject.c +1436 -0
- numpy/f2py/src/fortranobject.h +173 -0
- numpy/f2py/symbolic.py +1518 -0
- numpy/f2py/symbolic.pyi +219 -0
- numpy/f2py/tests/__init__.py +16 -0
- numpy/f2py/tests/src/abstract_interface/foo.f90 +34 -0
- numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 +6 -0
- numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +235 -0
- numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap +1 -0
- numpy/f2py/tests/src/assumed_shape/foo_free.f90 +34 -0
- numpy/f2py/tests/src/assumed_shape/foo_mod.f90 +41 -0
- numpy/f2py/tests/src/assumed_shape/foo_use.f90 +19 -0
- numpy/f2py/tests/src/assumed_shape/precision.f90 +4 -0
- numpy/f2py/tests/src/block_docstring/foo.f +6 -0
- numpy/f2py/tests/src/callback/foo.f +62 -0
- numpy/f2py/tests/src/callback/gh17797.f90 +7 -0
- numpy/f2py/tests/src/callback/gh18335.f90 +17 -0
- numpy/f2py/tests/src/callback/gh25211.f +10 -0
- numpy/f2py/tests/src/callback/gh25211.pyf +18 -0
- numpy/f2py/tests/src/callback/gh26681.f90 +18 -0
- numpy/f2py/tests/src/cli/gh_22819.pyf +6 -0
- numpy/f2py/tests/src/cli/hi77.f +3 -0
- numpy/f2py/tests/src/cli/hiworld.f90 +3 -0
- numpy/f2py/tests/src/common/block.f +11 -0
- numpy/f2py/tests/src/common/gh19161.f90 +10 -0
- numpy/f2py/tests/src/crackfortran/accesstype.f90 +13 -0
- numpy/f2py/tests/src/crackfortran/common_with_division.f +17 -0
- numpy/f2py/tests/src/crackfortran/data_common.f +8 -0
- numpy/f2py/tests/src/crackfortran/data_multiplier.f +5 -0
- numpy/f2py/tests/src/crackfortran/data_stmts.f90 +20 -0
- numpy/f2py/tests/src/crackfortran/data_with_comments.f +8 -0
- numpy/f2py/tests/src/crackfortran/foo_deps.f90 +6 -0
- numpy/f2py/tests/src/crackfortran/gh15035.f +16 -0
- numpy/f2py/tests/src/crackfortran/gh17859.f +12 -0
- numpy/f2py/tests/src/crackfortran/gh22648.pyf +7 -0
- numpy/f2py/tests/src/crackfortran/gh23533.f +5 -0
- numpy/f2py/tests/src/crackfortran/gh23598.f90 +4 -0
- numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 +11 -0
- numpy/f2py/tests/src/crackfortran/gh23879.f90 +20 -0
- numpy/f2py/tests/src/crackfortran/gh27697.f90 +12 -0
- numpy/f2py/tests/src/crackfortran/gh2848.f90 +13 -0
- numpy/f2py/tests/src/crackfortran/operators.f90 +49 -0
- numpy/f2py/tests/src/crackfortran/privatemod.f90 +11 -0
- numpy/f2py/tests/src/crackfortran/publicmod.f90 +10 -0
- numpy/f2py/tests/src/crackfortran/pubprivmod.f90 +10 -0
- numpy/f2py/tests/src/crackfortran/unicode_comment.f90 +4 -0
- numpy/f2py/tests/src/f2cmap/.f2py_f2cmap +1 -0
- numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 +9 -0
- numpy/f2py/tests/src/isocintrin/isoCtests.f90 +34 -0
- numpy/f2py/tests/src/kind/foo.f90 +20 -0
- numpy/f2py/tests/src/mixed/foo.f +5 -0
- numpy/f2py/tests/src/mixed/foo_fixed.f90 +8 -0
- numpy/f2py/tests/src/mixed/foo_free.f90 +8 -0
- numpy/f2py/tests/src/modules/gh25337/data.f90 +8 -0
- numpy/f2py/tests/src/modules/gh25337/use_data.f90 +6 -0
- numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 +21 -0
- numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 +21 -0
- numpy/f2py/tests/src/modules/module_data_docstring.f90 +12 -0
- numpy/f2py/tests/src/modules/use_modules.f90 +20 -0
- numpy/f2py/tests/src/negative_bounds/issue_20853.f90 +7 -0
- numpy/f2py/tests/src/parameter/constant_array.f90 +45 -0
- numpy/f2py/tests/src/parameter/constant_both.f90 +57 -0
- numpy/f2py/tests/src/parameter/constant_compound.f90 +15 -0
- numpy/f2py/tests/src/parameter/constant_integer.f90 +22 -0
- numpy/f2py/tests/src/parameter/constant_non_compound.f90 +23 -0
- numpy/f2py/tests/src/parameter/constant_real.f90 +23 -0
- numpy/f2py/tests/src/quoted_character/foo.f +14 -0
- numpy/f2py/tests/src/regression/AB.inc +1 -0
- numpy/f2py/tests/src/regression/assignOnlyModule.f90 +25 -0
- numpy/f2py/tests/src/regression/datonly.f90 +17 -0
- numpy/f2py/tests/src/regression/f77comments.f +26 -0
- numpy/f2py/tests/src/regression/f77fixedform.f95 +5 -0
- numpy/f2py/tests/src/regression/f90continuation.f90 +9 -0
- numpy/f2py/tests/src/regression/incfile.f90 +5 -0
- numpy/f2py/tests/src/regression/inout.f90 +9 -0
- numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 +5 -0
- numpy/f2py/tests/src/regression/mod_derived_types.f90 +23 -0
- numpy/f2py/tests/src/return_character/foo77.f +45 -0
- numpy/f2py/tests/src/return_character/foo90.f90 +48 -0
- numpy/f2py/tests/src/return_complex/foo77.f +45 -0
- numpy/f2py/tests/src/return_complex/foo90.f90 +48 -0
- numpy/f2py/tests/src/return_integer/foo77.f +56 -0
- numpy/f2py/tests/src/return_integer/foo90.f90 +59 -0
- numpy/f2py/tests/src/return_logical/foo77.f +56 -0
- numpy/f2py/tests/src/return_logical/foo90.f90 +59 -0
- numpy/f2py/tests/src/return_real/foo77.f +45 -0
- numpy/f2py/tests/src/return_real/foo90.f90 +48 -0
- numpy/f2py/tests/src/routines/funcfortranname.f +5 -0
- numpy/f2py/tests/src/routines/funcfortranname.pyf +11 -0
- numpy/f2py/tests/src/routines/subrout.f +4 -0
- numpy/f2py/tests/src/routines/subrout.pyf +10 -0
- numpy/f2py/tests/src/size/foo.f90 +44 -0
- numpy/f2py/tests/src/string/char.f90 +29 -0
- numpy/f2py/tests/src/string/fixed_string.f90 +34 -0
- numpy/f2py/tests/src/string/gh24008.f +8 -0
- numpy/f2py/tests/src/string/gh24662.f90 +7 -0
- numpy/f2py/tests/src/string/gh25286.f90 +14 -0
- numpy/f2py/tests/src/string/gh25286.pyf +12 -0
- numpy/f2py/tests/src/string/gh25286_bc.pyf +12 -0
- numpy/f2py/tests/src/string/scalar_string.f90 +9 -0
- numpy/f2py/tests/src/string/string.f +12 -0
- numpy/f2py/tests/src/value_attrspec/gh21665.f90 +9 -0
- numpy/f2py/tests/test_abstract_interface.py +26 -0
- numpy/f2py/tests/test_array_from_pyobj.py +678 -0
- numpy/f2py/tests/test_assumed_shape.py +50 -0
- numpy/f2py/tests/test_block_docstring.py +20 -0
- numpy/f2py/tests/test_callback.py +263 -0
- numpy/f2py/tests/test_character.py +641 -0
- numpy/f2py/tests/test_common.py +23 -0
- numpy/f2py/tests/test_crackfortran.py +421 -0
- numpy/f2py/tests/test_data.py +71 -0
- numpy/f2py/tests/test_docs.py +66 -0
- numpy/f2py/tests/test_f2cmap.py +17 -0
- numpy/f2py/tests/test_f2py2e.py +983 -0
- numpy/f2py/tests/test_isoc.py +56 -0
- numpy/f2py/tests/test_kind.py +52 -0
- numpy/f2py/tests/test_mixed.py +35 -0
- numpy/f2py/tests/test_modules.py +83 -0
- numpy/f2py/tests/test_parameter.py +129 -0
- numpy/f2py/tests/test_pyf_src.py +43 -0
- numpy/f2py/tests/test_quoted_character.py +18 -0
- numpy/f2py/tests/test_regression.py +187 -0
- numpy/f2py/tests/test_return_character.py +48 -0
- numpy/f2py/tests/test_return_complex.py +67 -0
- numpy/f2py/tests/test_return_integer.py +55 -0
- numpy/f2py/tests/test_return_logical.py +65 -0
- numpy/f2py/tests/test_return_real.py +109 -0
- numpy/f2py/tests/test_routines.py +29 -0
- numpy/f2py/tests/test_semicolon_split.py +75 -0
- numpy/f2py/tests/test_size.py +45 -0
- numpy/f2py/tests/test_string.py +100 -0
- numpy/f2py/tests/test_symbolic.py +500 -0
- numpy/f2py/tests/test_value_attrspec.py +15 -0
- numpy/f2py/tests/util.py +442 -0
- numpy/f2py/use_rules.py +99 -0
- numpy/f2py/use_rules.pyi +9 -0
- numpy/fft/__init__.py +213 -0
- numpy/fft/__init__.pyi +38 -0
- numpy/fft/_helper.py +235 -0
- numpy/fft/_helper.pyi +44 -0
- numpy/fft/_pocketfft.py +1693 -0
- numpy/fft/_pocketfft.pyi +137 -0
- numpy/fft/_pocketfft_umath.cp313t-win32.lib +0 -0
- numpy/fft/_pocketfft_umath.cp313t-win32.pyd +0 -0
- numpy/fft/tests/__init__.py +0 -0
- numpy/fft/tests/test_helper.py +167 -0
- numpy/fft/tests/test_pocketfft.py +589 -0
- numpy/lib/__init__.py +97 -0
- numpy/lib/__init__.pyi +52 -0
- numpy/lib/_array_utils_impl.py +62 -0
- numpy/lib/_array_utils_impl.pyi +10 -0
- numpy/lib/_arraypad_impl.py +926 -0
- numpy/lib/_arraypad_impl.pyi +88 -0
- numpy/lib/_arraysetops_impl.py +1158 -0
- numpy/lib/_arraysetops_impl.pyi +462 -0
- numpy/lib/_arrayterator_impl.py +224 -0
- numpy/lib/_arrayterator_impl.pyi +45 -0
- numpy/lib/_datasource.py +700 -0
- numpy/lib/_datasource.pyi +30 -0
- numpy/lib/_format_impl.py +1036 -0
- numpy/lib/_format_impl.pyi +56 -0
- numpy/lib/_function_base_impl.py +5760 -0
- numpy/lib/_function_base_impl.pyi +2324 -0
- numpy/lib/_histograms_impl.py +1085 -0
- numpy/lib/_histograms_impl.pyi +40 -0
- numpy/lib/_index_tricks_impl.py +1048 -0
- numpy/lib/_index_tricks_impl.pyi +267 -0
- numpy/lib/_iotools.py +900 -0
- numpy/lib/_iotools.pyi +116 -0
- numpy/lib/_nanfunctions_impl.py +2006 -0
- numpy/lib/_nanfunctions_impl.pyi +48 -0
- numpy/lib/_npyio_impl.py +2583 -0
- numpy/lib/_npyio_impl.pyi +299 -0
- numpy/lib/_polynomial_impl.py +1465 -0
- numpy/lib/_polynomial_impl.pyi +338 -0
- numpy/lib/_scimath_impl.py +642 -0
- numpy/lib/_scimath_impl.pyi +93 -0
- numpy/lib/_shape_base_impl.py +1289 -0
- numpy/lib/_shape_base_impl.pyi +236 -0
- numpy/lib/_stride_tricks_impl.py +582 -0
- numpy/lib/_stride_tricks_impl.pyi +73 -0
- numpy/lib/_twodim_base_impl.py +1201 -0
- numpy/lib/_twodim_base_impl.pyi +408 -0
- numpy/lib/_type_check_impl.py +710 -0
- numpy/lib/_type_check_impl.pyi +348 -0
- numpy/lib/_ufunclike_impl.py +199 -0
- numpy/lib/_ufunclike_impl.pyi +60 -0
- numpy/lib/_user_array_impl.py +310 -0
- numpy/lib/_user_array_impl.pyi +226 -0
- numpy/lib/_utils_impl.py +784 -0
- numpy/lib/_utils_impl.pyi +22 -0
- numpy/lib/_version.py +153 -0
- numpy/lib/_version.pyi +17 -0
- numpy/lib/array_utils.py +7 -0
- numpy/lib/array_utils.pyi +6 -0
- numpy/lib/format.py +24 -0
- numpy/lib/format.pyi +24 -0
- numpy/lib/introspect.py +94 -0
- numpy/lib/introspect.pyi +3 -0
- numpy/lib/mixins.py +180 -0
- numpy/lib/mixins.pyi +78 -0
- numpy/lib/npyio.py +1 -0
- numpy/lib/npyio.pyi +5 -0
- numpy/lib/recfunctions.py +1681 -0
- numpy/lib/recfunctions.pyi +444 -0
- numpy/lib/scimath.py +13 -0
- numpy/lib/scimath.pyi +12 -0
- numpy/lib/stride_tricks.py +1 -0
- numpy/lib/stride_tricks.pyi +4 -0
- numpy/lib/tests/__init__.py +0 -0
- numpy/lib/tests/data/py2-np0-objarr.npy +0 -0
- numpy/lib/tests/data/py2-objarr.npy +0 -0
- numpy/lib/tests/data/py2-objarr.npz +0 -0
- numpy/lib/tests/data/py3-objarr.npy +0 -0
- numpy/lib/tests/data/py3-objarr.npz +0 -0
- numpy/lib/tests/data/python3.npy +0 -0
- numpy/lib/tests/data/win64python2.npy +0 -0
- numpy/lib/tests/test__datasource.py +328 -0
- numpy/lib/tests/test__iotools.py +358 -0
- numpy/lib/tests/test__version.py +64 -0
- numpy/lib/tests/test_array_utils.py +32 -0
- numpy/lib/tests/test_arraypad.py +1427 -0
- numpy/lib/tests/test_arraysetops.py +1302 -0
- numpy/lib/tests/test_arrayterator.py +45 -0
- numpy/lib/tests/test_format.py +1054 -0
- numpy/lib/tests/test_function_base.py +4756 -0
- numpy/lib/tests/test_histograms.py +855 -0
- numpy/lib/tests/test_index_tricks.py +693 -0
- numpy/lib/tests/test_io.py +2857 -0
- numpy/lib/tests/test_loadtxt.py +1099 -0
- numpy/lib/tests/test_mixins.py +215 -0
- numpy/lib/tests/test_nanfunctions.py +1438 -0
- numpy/lib/tests/test_packbits.py +376 -0
- numpy/lib/tests/test_polynomial.py +325 -0
- numpy/lib/tests/test_recfunctions.py +1042 -0
- numpy/lib/tests/test_regression.py +231 -0
- numpy/lib/tests/test_shape_base.py +813 -0
- numpy/lib/tests/test_stride_tricks.py +655 -0
- numpy/lib/tests/test_twodim_base.py +559 -0
- numpy/lib/tests/test_type_check.py +473 -0
- numpy/lib/tests/test_ufunclike.py +97 -0
- numpy/lib/tests/test_utils.py +80 -0
- numpy/lib/user_array.py +1 -0
- numpy/lib/user_array.pyi +1 -0
- numpy/linalg/__init__.py +95 -0
- numpy/linalg/__init__.pyi +71 -0
- numpy/linalg/_linalg.py +3657 -0
- numpy/linalg/_linalg.pyi +548 -0
- numpy/linalg/_umath_linalg.cp313t-win32.lib +0 -0
- numpy/linalg/_umath_linalg.cp313t-win32.pyd +0 -0
- numpy/linalg/_umath_linalg.pyi +60 -0
- numpy/linalg/lapack_lite.cp313t-win32.lib +0 -0
- numpy/linalg/lapack_lite.cp313t-win32.pyd +0 -0
- numpy/linalg/lapack_lite.pyi +143 -0
- numpy/linalg/tests/__init__.py +0 -0
- numpy/linalg/tests/test_deprecations.py +21 -0
- numpy/linalg/tests/test_linalg.py +2442 -0
- numpy/linalg/tests/test_regression.py +182 -0
- numpy/ma/API_CHANGES.txt +135 -0
- numpy/ma/LICENSE +24 -0
- numpy/ma/README.rst +236 -0
- numpy/ma/__init__.py +53 -0
- numpy/ma/__init__.pyi +458 -0
- numpy/ma/core.py +8929 -0
- numpy/ma/core.pyi +3733 -0
- numpy/ma/extras.py +2266 -0
- numpy/ma/extras.pyi +297 -0
- numpy/ma/mrecords.py +762 -0
- numpy/ma/mrecords.pyi +96 -0
- numpy/ma/tests/__init__.py +0 -0
- numpy/ma/tests/test_arrayobject.py +40 -0
- numpy/ma/tests/test_core.py +6008 -0
- numpy/ma/tests/test_deprecations.py +65 -0
- numpy/ma/tests/test_extras.py +1945 -0
- numpy/ma/tests/test_mrecords.py +495 -0
- numpy/ma/tests/test_old_ma.py +939 -0
- numpy/ma/tests/test_regression.py +83 -0
- numpy/ma/tests/test_subclassing.py +469 -0
- numpy/ma/testutils.py +294 -0
- numpy/ma/testutils.pyi +69 -0
- numpy/matlib.py +380 -0
- numpy/matlib.pyi +580 -0
- numpy/matrixlib/__init__.py +12 -0
- numpy/matrixlib/__init__.pyi +3 -0
- numpy/matrixlib/defmatrix.py +1119 -0
- numpy/matrixlib/defmatrix.pyi +218 -0
- numpy/matrixlib/tests/__init__.py +0 -0
- numpy/matrixlib/tests/test_defmatrix.py +455 -0
- numpy/matrixlib/tests/test_interaction.py +360 -0
- numpy/matrixlib/tests/test_masked_matrix.py +240 -0
- numpy/matrixlib/tests/test_matrix_linalg.py +110 -0
- numpy/matrixlib/tests/test_multiarray.py +17 -0
- numpy/matrixlib/tests/test_numeric.py +18 -0
- numpy/matrixlib/tests/test_regression.py +31 -0
- numpy/polynomial/__init__.py +187 -0
- numpy/polynomial/__init__.pyi +31 -0
- numpy/polynomial/_polybase.py +1191 -0
- numpy/polynomial/_polybase.pyi +262 -0
- numpy/polynomial/_polytypes.pyi +501 -0
- numpy/polynomial/chebyshev.py +2001 -0
- numpy/polynomial/chebyshev.pyi +180 -0
- numpy/polynomial/hermite.py +1738 -0
- numpy/polynomial/hermite.pyi +106 -0
- numpy/polynomial/hermite_e.py +1640 -0
- numpy/polynomial/hermite_e.pyi +106 -0
- numpy/polynomial/laguerre.py +1673 -0
- numpy/polynomial/laguerre.pyi +100 -0
- numpy/polynomial/legendre.py +1603 -0
- numpy/polynomial/legendre.pyi +100 -0
- numpy/polynomial/polynomial.py +1625 -0
- numpy/polynomial/polynomial.pyi +109 -0
- numpy/polynomial/polyutils.py +759 -0
- numpy/polynomial/polyutils.pyi +307 -0
- numpy/polynomial/tests/__init__.py +0 -0
- numpy/polynomial/tests/test_chebyshev.py +618 -0
- numpy/polynomial/tests/test_classes.py +613 -0
- numpy/polynomial/tests/test_hermite.py +553 -0
- numpy/polynomial/tests/test_hermite_e.py +554 -0
- numpy/polynomial/tests/test_laguerre.py +535 -0
- numpy/polynomial/tests/test_legendre.py +566 -0
- numpy/polynomial/tests/test_polynomial.py +691 -0
- numpy/polynomial/tests/test_polyutils.py +123 -0
- numpy/polynomial/tests/test_printing.py +557 -0
- numpy/polynomial/tests/test_symbol.py +217 -0
- numpy/py.typed +0 -0
- numpy/random/LICENSE.md +71 -0
- numpy/random/__init__.pxd +14 -0
- numpy/random/__init__.py +213 -0
- numpy/random/__init__.pyi +124 -0
- numpy/random/_bounded_integers.cp313t-win32.lib +0 -0
- numpy/random/_bounded_integers.cp313t-win32.pyd +0 -0
- numpy/random/_bounded_integers.pxd +38 -0
- numpy/random/_bounded_integers.pyi +1 -0
- numpy/random/_common.cp313t-win32.lib +0 -0
- numpy/random/_common.cp313t-win32.pyd +0 -0
- numpy/random/_common.pxd +110 -0
- numpy/random/_common.pyi +16 -0
- numpy/random/_examples/cffi/extending.py +44 -0
- numpy/random/_examples/cffi/parse.py +53 -0
- numpy/random/_examples/cython/extending.pyx +77 -0
- numpy/random/_examples/cython/extending_distributions.pyx +117 -0
- numpy/random/_examples/cython/meson.build +53 -0
- numpy/random/_examples/numba/extending.py +86 -0
- numpy/random/_examples/numba/extending_distributions.py +67 -0
- numpy/random/_generator.cp313t-win32.lib +0 -0
- numpy/random/_generator.cp313t-win32.pyd +0 -0
- numpy/random/_generator.pyi +862 -0
- numpy/random/_mt19937.cp313t-win32.lib +0 -0
- numpy/random/_mt19937.cp313t-win32.pyd +0 -0
- numpy/random/_mt19937.pyi +27 -0
- numpy/random/_pcg64.cp313t-win32.lib +0 -0
- numpy/random/_pcg64.cp313t-win32.pyd +0 -0
- numpy/random/_pcg64.pyi +41 -0
- numpy/random/_philox.cp313t-win32.lib +0 -0
- numpy/random/_philox.cp313t-win32.pyd +0 -0
- numpy/random/_philox.pyi +36 -0
- numpy/random/_pickle.py +88 -0
- numpy/random/_pickle.pyi +43 -0
- numpy/random/_sfc64.cp313t-win32.lib +0 -0
- numpy/random/_sfc64.cp313t-win32.pyd +0 -0
- numpy/random/_sfc64.pyi +25 -0
- numpy/random/bit_generator.cp313t-win32.lib +0 -0
- numpy/random/bit_generator.cp313t-win32.pyd +0 -0
- numpy/random/bit_generator.pxd +40 -0
- numpy/random/bit_generator.pyi +123 -0
- numpy/random/c_distributions.pxd +119 -0
- numpy/random/lib/npyrandom.lib +0 -0
- numpy/random/mtrand.cp313t-win32.lib +0 -0
- numpy/random/mtrand.cp313t-win32.pyd +0 -0
- numpy/random/mtrand.pyi +759 -0
- numpy/random/tests/__init__.py +0 -0
- numpy/random/tests/data/__init__.py +0 -0
- numpy/random/tests/data/generator_pcg64_np121.pkl.gz +0 -0
- numpy/random/tests/data/generator_pcg64_np126.pkl.gz +0 -0
- numpy/random/tests/data/mt19937-testset-1.csv +1001 -0
- numpy/random/tests/data/mt19937-testset-2.csv +1001 -0
- numpy/random/tests/data/pcg64-testset-1.csv +1001 -0
- numpy/random/tests/data/pcg64-testset-2.csv +1001 -0
- numpy/random/tests/data/pcg64dxsm-testset-1.csv +1001 -0
- numpy/random/tests/data/pcg64dxsm-testset-2.csv +1001 -0
- numpy/random/tests/data/philox-testset-1.csv +1001 -0
- numpy/random/tests/data/philox-testset-2.csv +1001 -0
- numpy/random/tests/data/sfc64-testset-1.csv +1001 -0
- numpy/random/tests/data/sfc64-testset-2.csv +1001 -0
- numpy/random/tests/data/sfc64_np126.pkl.gz +0 -0
- numpy/random/tests/test_direct.py +595 -0
- numpy/random/tests/test_extending.py +131 -0
- numpy/random/tests/test_generator_mt19937.py +2825 -0
- numpy/random/tests/test_generator_mt19937_regressions.py +221 -0
- numpy/random/tests/test_random.py +1724 -0
- numpy/random/tests/test_randomstate.py +2099 -0
- numpy/random/tests/test_randomstate_regression.py +213 -0
- numpy/random/tests/test_regression.py +175 -0
- numpy/random/tests/test_seed_sequence.py +79 -0
- numpy/random/tests/test_smoke.py +882 -0
- numpy/rec/__init__.py +2 -0
- numpy/rec/__init__.pyi +23 -0
- numpy/strings/__init__.py +2 -0
- numpy/strings/__init__.pyi +97 -0
- numpy/testing/__init__.py +22 -0
- numpy/testing/__init__.pyi +107 -0
- numpy/testing/_private/__init__.py +0 -0
- numpy/testing/_private/__init__.pyi +0 -0
- numpy/testing/_private/extbuild.py +250 -0
- numpy/testing/_private/extbuild.pyi +25 -0
- numpy/testing/_private/utils.py +2830 -0
- numpy/testing/_private/utils.pyi +505 -0
- numpy/testing/overrides.py +84 -0
- numpy/testing/overrides.pyi +10 -0
- numpy/testing/print_coercion_tables.py +207 -0
- numpy/testing/print_coercion_tables.pyi +26 -0
- numpy/testing/tests/__init__.py +0 -0
- numpy/testing/tests/test_utils.py +2123 -0
- numpy/tests/__init__.py +0 -0
- numpy/tests/test__all__.py +10 -0
- numpy/tests/test_configtool.py +51 -0
- numpy/tests/test_ctypeslib.py +383 -0
- numpy/tests/test_lazyloading.py +42 -0
- numpy/tests/test_matlib.py +59 -0
- numpy/tests/test_numpy_config.py +47 -0
- numpy/tests/test_numpy_version.py +54 -0
- numpy/tests/test_public_api.py +807 -0
- numpy/tests/test_reloading.py +76 -0
- numpy/tests/test_scripts.py +48 -0
- numpy/tests/test_warnings.py +79 -0
- numpy/typing/__init__.py +233 -0
- numpy/typing/__init__.pyi +3 -0
- numpy/typing/mypy_plugin.py +200 -0
- numpy/typing/tests/__init__.py +0 -0
- numpy/typing/tests/data/fail/arithmetic.pyi +126 -0
- numpy/typing/tests/data/fail/array_constructors.pyi +34 -0
- numpy/typing/tests/data/fail/array_like.pyi +15 -0
- numpy/typing/tests/data/fail/array_pad.pyi +6 -0
- numpy/typing/tests/data/fail/arrayprint.pyi +15 -0
- numpy/typing/tests/data/fail/arrayterator.pyi +14 -0
- numpy/typing/tests/data/fail/bitwise_ops.pyi +17 -0
- numpy/typing/tests/data/fail/char.pyi +63 -0
- numpy/typing/tests/data/fail/chararray.pyi +61 -0
- numpy/typing/tests/data/fail/comparisons.pyi +27 -0
- numpy/typing/tests/data/fail/constants.pyi +3 -0
- numpy/typing/tests/data/fail/datasource.pyi +16 -0
- numpy/typing/tests/data/fail/dtype.pyi +17 -0
- numpy/typing/tests/data/fail/einsumfunc.pyi +12 -0
- numpy/typing/tests/data/fail/flatiter.pyi +38 -0
- numpy/typing/tests/data/fail/fromnumeric.pyi +148 -0
- numpy/typing/tests/data/fail/histograms.pyi +12 -0
- numpy/typing/tests/data/fail/index_tricks.pyi +14 -0
- numpy/typing/tests/data/fail/lib_function_base.pyi +60 -0
- numpy/typing/tests/data/fail/lib_polynomial.pyi +29 -0
- numpy/typing/tests/data/fail/lib_utils.pyi +3 -0
- numpy/typing/tests/data/fail/lib_version.pyi +6 -0
- numpy/typing/tests/data/fail/linalg.pyi +52 -0
- numpy/typing/tests/data/fail/ma.pyi +155 -0
- numpy/typing/tests/data/fail/memmap.pyi +5 -0
- numpy/typing/tests/data/fail/modules.pyi +17 -0
- numpy/typing/tests/data/fail/multiarray.pyi +52 -0
- numpy/typing/tests/data/fail/ndarray.pyi +11 -0
- numpy/typing/tests/data/fail/ndarray_misc.pyi +49 -0
- numpy/typing/tests/data/fail/nditer.pyi +8 -0
- numpy/typing/tests/data/fail/nested_sequence.pyi +17 -0
- numpy/typing/tests/data/fail/npyio.pyi +24 -0
- numpy/typing/tests/data/fail/numerictypes.pyi +5 -0
- numpy/typing/tests/data/fail/random.pyi +62 -0
- numpy/typing/tests/data/fail/rec.pyi +17 -0
- numpy/typing/tests/data/fail/scalars.pyi +86 -0
- numpy/typing/tests/data/fail/shape.pyi +7 -0
- numpy/typing/tests/data/fail/shape_base.pyi +8 -0
- numpy/typing/tests/data/fail/stride_tricks.pyi +9 -0
- numpy/typing/tests/data/fail/strings.pyi +52 -0
- numpy/typing/tests/data/fail/testing.pyi +28 -0
- numpy/typing/tests/data/fail/twodim_base.pyi +39 -0
- numpy/typing/tests/data/fail/type_check.pyi +12 -0
- numpy/typing/tests/data/fail/ufunc_config.pyi +21 -0
- numpy/typing/tests/data/fail/ufunclike.pyi +21 -0
- numpy/typing/tests/data/fail/ufuncs.pyi +17 -0
- numpy/typing/tests/data/fail/warnings_and_errors.pyi +5 -0
- numpy/typing/tests/data/misc/extended_precision.pyi +9 -0
- numpy/typing/tests/data/mypy.ini +8 -0
- numpy/typing/tests/data/pass/arithmetic.py +614 -0
- numpy/typing/tests/data/pass/array_constructors.py +138 -0
- numpy/typing/tests/data/pass/array_like.py +43 -0
- numpy/typing/tests/data/pass/arrayprint.py +37 -0
- numpy/typing/tests/data/pass/arrayterator.py +28 -0
- numpy/typing/tests/data/pass/bitwise_ops.py +131 -0
- numpy/typing/tests/data/pass/comparisons.py +316 -0
- numpy/typing/tests/data/pass/dtype.py +57 -0
- numpy/typing/tests/data/pass/einsumfunc.py +36 -0
- numpy/typing/tests/data/pass/flatiter.py +26 -0
- numpy/typing/tests/data/pass/fromnumeric.py +272 -0
- numpy/typing/tests/data/pass/index_tricks.py +62 -0
- numpy/typing/tests/data/pass/lib_user_array.py +22 -0
- numpy/typing/tests/data/pass/lib_utils.py +19 -0
- numpy/typing/tests/data/pass/lib_version.py +18 -0
- numpy/typing/tests/data/pass/literal.py +52 -0
- numpy/typing/tests/data/pass/ma.py +199 -0
- numpy/typing/tests/data/pass/mod.py +149 -0
- numpy/typing/tests/data/pass/modules.py +45 -0
- numpy/typing/tests/data/pass/multiarray.py +77 -0
- numpy/typing/tests/data/pass/ndarray_conversion.py +81 -0
- numpy/typing/tests/data/pass/ndarray_misc.py +199 -0
- numpy/typing/tests/data/pass/ndarray_shape_manipulation.py +47 -0
- numpy/typing/tests/data/pass/nditer.py +4 -0
- numpy/typing/tests/data/pass/numeric.py +90 -0
- numpy/typing/tests/data/pass/numerictypes.py +17 -0
- numpy/typing/tests/data/pass/random.py +1498 -0
- numpy/typing/tests/data/pass/recfunctions.py +164 -0
- numpy/typing/tests/data/pass/scalars.py +249 -0
- numpy/typing/tests/data/pass/shape.py +19 -0
- numpy/typing/tests/data/pass/simple.py +170 -0
- numpy/typing/tests/data/pass/ufunc_config.py +64 -0
- numpy/typing/tests/data/pass/ufunclike.py +52 -0
- numpy/typing/tests/data/pass/ufuncs.py +16 -0
- numpy/typing/tests/data/pass/warnings_and_errors.py +6 -0
- numpy/typing/tests/data/reveal/arithmetic.pyi +719 -0
- numpy/typing/tests/data/reveal/array_api_info.pyi +70 -0
- numpy/typing/tests/data/reveal/array_constructors.pyi +279 -0
- numpy/typing/tests/data/reveal/arraypad.pyi +27 -0
- numpy/typing/tests/data/reveal/arrayprint.pyi +25 -0
- numpy/typing/tests/data/reveal/arraysetops.pyi +74 -0
- numpy/typing/tests/data/reveal/arrayterator.pyi +27 -0
- numpy/typing/tests/data/reveal/bitwise_ops.pyi +166 -0
- numpy/typing/tests/data/reveal/char.pyi +225 -0
- numpy/typing/tests/data/reveal/chararray.pyi +138 -0
- numpy/typing/tests/data/reveal/comparisons.pyi +264 -0
- numpy/typing/tests/data/reveal/constants.pyi +14 -0
- numpy/typing/tests/data/reveal/ctypeslib.pyi +81 -0
- numpy/typing/tests/data/reveal/datasource.pyi +23 -0
- numpy/typing/tests/data/reveal/dtype.pyi +132 -0
- numpy/typing/tests/data/reveal/einsumfunc.pyi +39 -0
- numpy/typing/tests/data/reveal/emath.pyi +54 -0
- numpy/typing/tests/data/reveal/fft.pyi +37 -0
- numpy/typing/tests/data/reveal/flatiter.pyi +86 -0
- numpy/typing/tests/data/reveal/fromnumeric.pyi +347 -0
- numpy/typing/tests/data/reveal/getlimits.pyi +53 -0
- numpy/typing/tests/data/reveal/histograms.pyi +25 -0
- numpy/typing/tests/data/reveal/index_tricks.pyi +70 -0
- numpy/typing/tests/data/reveal/lib_function_base.pyi +409 -0
- numpy/typing/tests/data/reveal/lib_polynomial.pyi +147 -0
- numpy/typing/tests/data/reveal/lib_utils.pyi +17 -0
- numpy/typing/tests/data/reveal/lib_version.pyi +20 -0
- numpy/typing/tests/data/reveal/linalg.pyi +154 -0
- numpy/typing/tests/data/reveal/ma.pyi +1098 -0
- numpy/typing/tests/data/reveal/matrix.pyi +73 -0
- numpy/typing/tests/data/reveal/memmap.pyi +19 -0
- numpy/typing/tests/data/reveal/mod.pyi +178 -0
- numpy/typing/tests/data/reveal/modules.pyi +51 -0
- numpy/typing/tests/data/reveal/multiarray.pyi +197 -0
- numpy/typing/tests/data/reveal/nbit_base_example.pyi +20 -0
- numpy/typing/tests/data/reveal/ndarray_assignability.pyi +82 -0
- numpy/typing/tests/data/reveal/ndarray_conversion.pyi +83 -0
- numpy/typing/tests/data/reveal/ndarray_misc.pyi +246 -0
- numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi +47 -0
- numpy/typing/tests/data/reveal/nditer.pyi +49 -0
- numpy/typing/tests/data/reveal/nested_sequence.pyi +25 -0
- numpy/typing/tests/data/reveal/npyio.pyi +83 -0
- numpy/typing/tests/data/reveal/numeric.pyi +170 -0
- numpy/typing/tests/data/reveal/numerictypes.pyi +16 -0
- numpy/typing/tests/data/reveal/polynomial_polybase.pyi +217 -0
- numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +218 -0
- numpy/typing/tests/data/reveal/polynomial_series.pyi +138 -0
- numpy/typing/tests/data/reveal/random.pyi +1546 -0
- numpy/typing/tests/data/reveal/rec.pyi +171 -0
- numpy/typing/tests/data/reveal/scalars.pyi +191 -0
- numpy/typing/tests/data/reveal/shape.pyi +13 -0
- numpy/typing/tests/data/reveal/shape_base.pyi +52 -0
- numpy/typing/tests/data/reveal/stride_tricks.pyi +27 -0
- numpy/typing/tests/data/reveal/strings.pyi +196 -0
- numpy/typing/tests/data/reveal/testing.pyi +198 -0
- numpy/typing/tests/data/reveal/twodim_base.pyi +225 -0
- numpy/typing/tests/data/reveal/type_check.pyi +67 -0
- numpy/typing/tests/data/reveal/ufunc_config.pyi +29 -0
- numpy/typing/tests/data/reveal/ufunclike.pyi +31 -0
- numpy/typing/tests/data/reveal/ufuncs.pyi +142 -0
- numpy/typing/tests/data/reveal/warnings_and_errors.pyi +11 -0
- numpy/typing/tests/test_isfile.py +38 -0
- numpy/typing/tests/test_runtime.py +110 -0
- numpy/typing/tests/test_typing.py +205 -0
- numpy/version.py +11 -0
- numpy/version.pyi +9 -0
- numpy-2.4.2.dist-info/METADATA +139 -0
- numpy-2.4.2.dist-info/RECORD +929 -0
- numpy-2.4.2.dist-info/WHEEL +4 -0
- numpy-2.4.2.dist-info/entry_points.txt +13 -0
- numpy-2.4.2.dist-info/licenses/LICENSE.txt +914 -0
- numpy-2.4.2.dist-info/licenses/numpy/_core/include/numpy/libdivide/LICENSE.txt +21 -0
- numpy-2.4.2.dist-info/licenses/numpy/_core/src/common/pythoncapi-compat/COPYING +14 -0
- numpy-2.4.2.dist-info/licenses/numpy/_core/src/highway/LICENSE +371 -0
- numpy-2.4.2.dist-info/licenses/numpy/_core/src/multiarray/dragon4_LICENSE.txt +27 -0
- numpy-2.4.2.dist-info/licenses/numpy/_core/src/npysort/x86-simd-sort/LICENSE.md +28 -0
- numpy-2.4.2.dist-info/licenses/numpy/_core/src/umath/svml/LICENSE +30 -0
- numpy-2.4.2.dist-info/licenses/numpy/fft/pocketfft/LICENSE.md +25 -0
- numpy-2.4.2.dist-info/licenses/numpy/linalg/lapack_lite/LICENSE.txt +48 -0
- numpy-2.4.2.dist-info/licenses/numpy/ma/LICENSE +24 -0
- numpy-2.4.2.dist-info/licenses/numpy/random/LICENSE.md +71 -0
- numpy-2.4.2.dist-info/licenses/numpy/random/src/distributions/LICENSE.md +61 -0
- numpy-2.4.2.dist-info/licenses/numpy/random/src/mt19937/LICENSE.md +61 -0
- numpy-2.4.2.dist-info/licenses/numpy/random/src/pcg64/LICENSE.md +22 -0
- numpy-2.4.2.dist-info/licenses/numpy/random/src/philox/LICENSE.md +31 -0
- numpy-2.4.2.dist-info/licenses/numpy/random/src/sfc64/LICENSE.md +27 -0
- numpy-2.4.2.dist-info/licenses/numpy/random/src/splitmix64/LICENSE.md +9 -0
numpy/lib/_npyio_impl.py
ADDED
|
@@ -0,0 +1,2583 @@
|
|
|
1
|
+
"""
|
|
2
|
+
IO related functions.
|
|
3
|
+
"""
|
|
4
|
+
import contextlib
|
|
5
|
+
import functools
|
|
6
|
+
import itertools
|
|
7
|
+
import operator
|
|
8
|
+
import os
|
|
9
|
+
import pickle
|
|
10
|
+
import re
|
|
11
|
+
import warnings
|
|
12
|
+
import weakref
|
|
13
|
+
from collections.abc import Mapping
|
|
14
|
+
from operator import itemgetter
|
|
15
|
+
|
|
16
|
+
import numpy as np
|
|
17
|
+
from numpy._core import overrides
|
|
18
|
+
from numpy._core._multiarray_umath import _load_from_filelike
|
|
19
|
+
from numpy._core.multiarray import packbits, unpackbits
|
|
20
|
+
from numpy._core.overrides import finalize_array_function_like, set_module
|
|
21
|
+
from numpy._utils import asbytes, asunicode
|
|
22
|
+
|
|
23
|
+
from . import format
|
|
24
|
+
from ._datasource import DataSource # noqa: F401
|
|
25
|
+
from ._format_impl import _MAX_HEADER_SIZE
|
|
26
|
+
from ._iotools import (
|
|
27
|
+
ConversionWarning,
|
|
28
|
+
ConverterError,
|
|
29
|
+
ConverterLockError,
|
|
30
|
+
LineSplitter,
|
|
31
|
+
NameValidator,
|
|
32
|
+
StringConverter,
|
|
33
|
+
_decode_line,
|
|
34
|
+
_is_string_like,
|
|
35
|
+
easy_dtype,
|
|
36
|
+
flatten_dtype,
|
|
37
|
+
has_nested_fields,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
__all__ = [
|
|
41
|
+
'savetxt', 'loadtxt', 'genfromtxt', 'load', 'save', 'savez',
|
|
42
|
+
'savez_compressed', 'packbits', 'unpackbits', 'fromregex'
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
array_function_dispatch = functools.partial(
|
|
47
|
+
overrides.array_function_dispatch, module='numpy')
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class BagObj:
|
|
51
|
+
"""
|
|
52
|
+
BagObj(obj)
|
|
53
|
+
|
|
54
|
+
Convert attribute look-ups to getitems on the object passed in.
|
|
55
|
+
|
|
56
|
+
Parameters
|
|
57
|
+
----------
|
|
58
|
+
obj : class instance
|
|
59
|
+
Object on which attribute look-up is performed.
|
|
60
|
+
|
|
61
|
+
Examples
|
|
62
|
+
--------
|
|
63
|
+
>>> import numpy as np
|
|
64
|
+
>>> from numpy.lib._npyio_impl import BagObj as BO
|
|
65
|
+
>>> class BagDemo:
|
|
66
|
+
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
|
|
67
|
+
... # will call this method when any
|
|
68
|
+
... # attribute look-up is required
|
|
69
|
+
... result = "Doesn't matter what you want, "
|
|
70
|
+
... return result + "you're gonna get this"
|
|
71
|
+
...
|
|
72
|
+
>>> demo_obj = BagDemo()
|
|
73
|
+
>>> bagobj = BO(demo_obj)
|
|
74
|
+
>>> bagobj.hello_there
|
|
75
|
+
"Doesn't matter what you want, you're gonna get this"
|
|
76
|
+
>>> bagobj.I_can_be_anything
|
|
77
|
+
"Doesn't matter what you want, you're gonna get this"
|
|
78
|
+
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
def __init__(self, obj):
|
|
82
|
+
# Use weakref to make NpzFile objects collectable by refcount
|
|
83
|
+
self._obj = weakref.proxy(obj)
|
|
84
|
+
|
|
85
|
+
def __getattribute__(self, key):
|
|
86
|
+
try:
|
|
87
|
+
return object.__getattribute__(self, '_obj')[key]
|
|
88
|
+
except KeyError:
|
|
89
|
+
raise AttributeError(key) from None
|
|
90
|
+
|
|
91
|
+
def __dir__(self):
|
|
92
|
+
"""
|
|
93
|
+
Enables dir(bagobj) to list the files in an NpzFile.
|
|
94
|
+
|
|
95
|
+
This also enables tab-completion in an interpreter or IPython.
|
|
96
|
+
"""
|
|
97
|
+
return list(object.__getattribute__(self, '_obj').keys())
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def zipfile_factory(file, *args, **kwargs):
|
|
101
|
+
"""
|
|
102
|
+
Create a ZipFile.
|
|
103
|
+
|
|
104
|
+
Allows for Zip64, and the `file` argument can accept file, str, or
|
|
105
|
+
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
|
|
106
|
+
constructor.
|
|
107
|
+
"""
|
|
108
|
+
if not hasattr(file, 'read'):
|
|
109
|
+
file = os.fspath(file)
|
|
110
|
+
import zipfile
|
|
111
|
+
kwargs['allowZip64'] = True
|
|
112
|
+
return zipfile.ZipFile(file, *args, **kwargs)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
@set_module('numpy.lib.npyio')
|
|
116
|
+
class NpzFile(Mapping):
|
|
117
|
+
"""
|
|
118
|
+
NpzFile(fid)
|
|
119
|
+
|
|
120
|
+
A dictionary-like object with lazy-loading of files in the zipped
|
|
121
|
+
archive provided on construction.
|
|
122
|
+
|
|
123
|
+
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
|
|
124
|
+
format. It assumes that files in the archive have a ``.npy`` extension,
|
|
125
|
+
other files are ignored.
|
|
126
|
+
|
|
127
|
+
The arrays and file strings are lazily loaded on either
|
|
128
|
+
getitem access using ``obj['key']`` or attribute lookup using
|
|
129
|
+
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
|
|
130
|
+
be obtained with ``obj.files`` and the ZipFile object itself using
|
|
131
|
+
``obj.zip``.
|
|
132
|
+
|
|
133
|
+
Attributes
|
|
134
|
+
----------
|
|
135
|
+
files : list of str
|
|
136
|
+
List of all files in the archive with a ``.npy`` extension.
|
|
137
|
+
zip : ZipFile instance
|
|
138
|
+
The ZipFile object initialized with the zipped archive.
|
|
139
|
+
f : BagObj instance
|
|
140
|
+
An object on which attribute can be performed as an alternative
|
|
141
|
+
to getitem access on the `NpzFile` instance itself.
|
|
142
|
+
allow_pickle : bool, optional
|
|
143
|
+
Allow loading pickled data. Default: False
|
|
144
|
+
pickle_kwargs : dict, optional
|
|
145
|
+
Additional keyword arguments to pass on to pickle.load.
|
|
146
|
+
These are only useful when loading object arrays saved on
|
|
147
|
+
Python 2.
|
|
148
|
+
max_header_size : int, optional
|
|
149
|
+
Maximum allowed size of the header. Large headers may not be safe
|
|
150
|
+
to load securely and thus require explicitly passing a larger value.
|
|
151
|
+
See :py:func:`ast.literal_eval()` for details.
|
|
152
|
+
This option is ignored when `allow_pickle` is passed. In that case
|
|
153
|
+
the file is by definition trusted and the limit is unnecessary.
|
|
154
|
+
|
|
155
|
+
Parameters
|
|
156
|
+
----------
|
|
157
|
+
fid : file, str, or pathlib.Path
|
|
158
|
+
The zipped archive to open. This is either a file-like object
|
|
159
|
+
or a string containing the path to the archive.
|
|
160
|
+
own_fid : bool, optional
|
|
161
|
+
Whether NpzFile should close the file handle.
|
|
162
|
+
Requires that `fid` is a file-like object.
|
|
163
|
+
|
|
164
|
+
Examples
|
|
165
|
+
--------
|
|
166
|
+
>>> import numpy as np
|
|
167
|
+
>>> from tempfile import TemporaryFile
|
|
168
|
+
>>> outfile = TemporaryFile()
|
|
169
|
+
>>> x = np.arange(10)
|
|
170
|
+
>>> y = np.sin(x)
|
|
171
|
+
>>> np.savez(outfile, x=x, y=y)
|
|
172
|
+
>>> _ = outfile.seek(0)
|
|
173
|
+
|
|
174
|
+
>>> npz = np.load(outfile)
|
|
175
|
+
>>> isinstance(npz, np.lib.npyio.NpzFile)
|
|
176
|
+
True
|
|
177
|
+
>>> npz
|
|
178
|
+
NpzFile 'object' with keys: x, y
|
|
179
|
+
>>> sorted(npz.files)
|
|
180
|
+
['x', 'y']
|
|
181
|
+
>>> npz['x'] # getitem access
|
|
182
|
+
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
|
|
183
|
+
>>> npz.f.x # attribute lookup
|
|
184
|
+
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
|
|
185
|
+
|
|
186
|
+
"""
|
|
187
|
+
# Make __exit__ safe if zipfile_factory raises an exception
|
|
188
|
+
zip = None
|
|
189
|
+
fid = None
|
|
190
|
+
_MAX_REPR_ARRAY_COUNT = 5
|
|
191
|
+
|
|
192
|
+
def __init__(self, fid, own_fid=False, allow_pickle=False,
|
|
193
|
+
pickle_kwargs=None, *,
|
|
194
|
+
max_header_size=_MAX_HEADER_SIZE):
|
|
195
|
+
# Import is postponed to here since zipfile depends on gzip, an
|
|
196
|
+
# optional component of the so-called standard library.
|
|
197
|
+
_zip = zipfile_factory(fid)
|
|
198
|
+
_files = _zip.namelist()
|
|
199
|
+
self.files = [name.removesuffix(".npy") for name in _files]
|
|
200
|
+
self._files = dict(zip(self.files, _files))
|
|
201
|
+
self._files.update(zip(_files, _files))
|
|
202
|
+
self.allow_pickle = allow_pickle
|
|
203
|
+
self.max_header_size = max_header_size
|
|
204
|
+
self.pickle_kwargs = pickle_kwargs
|
|
205
|
+
self.zip = _zip
|
|
206
|
+
self.f = BagObj(self)
|
|
207
|
+
if own_fid:
|
|
208
|
+
self.fid = fid
|
|
209
|
+
|
|
210
|
+
def __enter__(self):
|
|
211
|
+
return self
|
|
212
|
+
|
|
213
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
214
|
+
self.close()
|
|
215
|
+
|
|
216
|
+
def close(self):
|
|
217
|
+
"""
|
|
218
|
+
Close the file.
|
|
219
|
+
|
|
220
|
+
"""
|
|
221
|
+
if self.zip is not None:
|
|
222
|
+
self.zip.close()
|
|
223
|
+
self.zip = None
|
|
224
|
+
if self.fid is not None:
|
|
225
|
+
self.fid.close()
|
|
226
|
+
self.fid = None
|
|
227
|
+
self.f = None # break reference cycle
|
|
228
|
+
|
|
229
|
+
def __del__(self):
|
|
230
|
+
self.close()
|
|
231
|
+
|
|
232
|
+
# Implement the Mapping ABC
|
|
233
|
+
def __iter__(self):
|
|
234
|
+
return iter(self.files)
|
|
235
|
+
|
|
236
|
+
def __len__(self):
|
|
237
|
+
return len(self.files)
|
|
238
|
+
|
|
239
|
+
def __getitem__(self, key):
|
|
240
|
+
try:
|
|
241
|
+
key = self._files[key]
|
|
242
|
+
except KeyError:
|
|
243
|
+
raise KeyError(f"{key} is not a file in the archive") from None
|
|
244
|
+
else:
|
|
245
|
+
with self.zip.open(key) as bytes:
|
|
246
|
+
magic = bytes.read(len(format.MAGIC_PREFIX))
|
|
247
|
+
bytes.seek(0)
|
|
248
|
+
if magic == format.MAGIC_PREFIX:
|
|
249
|
+
# FIXME: This seems like it will copy strings around
|
|
250
|
+
# more than is strictly necessary. The zipfile
|
|
251
|
+
# will read the string and then
|
|
252
|
+
# the format.read_array will copy the string
|
|
253
|
+
# to another place in memory.
|
|
254
|
+
# It would be better if the zipfile could read
|
|
255
|
+
# (or at least uncompress) the data
|
|
256
|
+
# directly into the array memory.
|
|
257
|
+
return format.read_array(
|
|
258
|
+
bytes,
|
|
259
|
+
allow_pickle=self.allow_pickle,
|
|
260
|
+
pickle_kwargs=self.pickle_kwargs,
|
|
261
|
+
max_header_size=self.max_header_size
|
|
262
|
+
)
|
|
263
|
+
else:
|
|
264
|
+
return bytes.read()
|
|
265
|
+
|
|
266
|
+
def __contains__(self, key):
|
|
267
|
+
return (key in self._files)
|
|
268
|
+
|
|
269
|
+
def __repr__(self):
|
|
270
|
+
# Get filename or default to `object`
|
|
271
|
+
if isinstance(self.fid, str):
|
|
272
|
+
filename = self.fid
|
|
273
|
+
else:
|
|
274
|
+
filename = getattr(self.fid, "name", "object")
|
|
275
|
+
|
|
276
|
+
# Get the name of arrays
|
|
277
|
+
array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT])
|
|
278
|
+
if len(self.files) > self._MAX_REPR_ARRAY_COUNT:
|
|
279
|
+
array_names += "..."
|
|
280
|
+
return f"NpzFile {filename!r} with keys: {array_names}"
|
|
281
|
+
|
|
282
|
+
# Work around problems with the docstrings in the Mapping methods
|
|
283
|
+
# They contain a `->`, which confuses the type annotation interpretations
|
|
284
|
+
# of sphinx-docs. See gh-25964
|
|
285
|
+
|
|
286
|
+
def get(self, key, default=None, /):
|
|
287
|
+
"""
|
|
288
|
+
D.get(k,[,d]) returns D[k] if k in D, else d. d defaults to None.
|
|
289
|
+
"""
|
|
290
|
+
return Mapping.get(self, key, default)
|
|
291
|
+
|
|
292
|
+
def items(self):
|
|
293
|
+
"""
|
|
294
|
+
D.items() returns a set-like object providing a view on the items
|
|
295
|
+
"""
|
|
296
|
+
return Mapping.items(self)
|
|
297
|
+
|
|
298
|
+
def keys(self):
|
|
299
|
+
"""
|
|
300
|
+
D.keys() returns a set-like object providing a view on the keys
|
|
301
|
+
"""
|
|
302
|
+
return Mapping.keys(self)
|
|
303
|
+
|
|
304
|
+
def values(self):
|
|
305
|
+
"""
|
|
306
|
+
D.values() returns a set-like object providing a view on the values
|
|
307
|
+
"""
|
|
308
|
+
return Mapping.values(self)
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
@set_module('numpy')
|
|
312
|
+
def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
|
|
313
|
+
encoding='ASCII', *, max_header_size=_MAX_HEADER_SIZE):
|
|
314
|
+
"""
|
|
315
|
+
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
|
|
316
|
+
|
|
317
|
+
.. warning:: Loading files that contain object arrays uses the ``pickle``
|
|
318
|
+
module, which is not secure against erroneous or maliciously
|
|
319
|
+
constructed data. Consider passing ``allow_pickle=False`` to
|
|
320
|
+
load data that is known not to contain object arrays for the
|
|
321
|
+
safer handling of untrusted sources.
|
|
322
|
+
|
|
323
|
+
Parameters
|
|
324
|
+
----------
|
|
325
|
+
file : file-like object, string, or pathlib.Path
|
|
326
|
+
The file to read. File-like objects must support the
|
|
327
|
+
``seek()`` and ``read()`` methods and must always
|
|
328
|
+
be opened in binary mode. Pickled files require that the
|
|
329
|
+
file-like object support the ``readline()`` method as well.
|
|
330
|
+
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
|
|
331
|
+
If not None, then memory-map the file, using the given mode (see
|
|
332
|
+
`numpy.memmap` for a detailed description of the modes). A
|
|
333
|
+
memory-mapped array is kept on disk. However, it can be accessed
|
|
334
|
+
and sliced like any ndarray. Memory mapping is especially useful
|
|
335
|
+
for accessing small fragments of large files without reading the
|
|
336
|
+
entire file into memory.
|
|
337
|
+
allow_pickle : bool, optional
|
|
338
|
+
Allow loading pickled object arrays stored in npy files. Reasons for
|
|
339
|
+
disallowing pickles include security, as loading pickled data can
|
|
340
|
+
execute arbitrary code. If pickles are disallowed, loading object
|
|
341
|
+
arrays will fail. Default: False
|
|
342
|
+
fix_imports : bool, optional
|
|
343
|
+
Only useful when loading Python 2 generated pickled files,
|
|
344
|
+
which includes npy/npz files containing object arrays. If `fix_imports`
|
|
345
|
+
is True, pickle will try to map the old Python 2 names to the new names
|
|
346
|
+
used in Python 3.
|
|
347
|
+
encoding : str, optional
|
|
348
|
+
What encoding to use when reading Python 2 strings. Only useful when
|
|
349
|
+
loading Python 2 generated pickled files, which includes
|
|
350
|
+
npy/npz files containing object arrays. Values other than 'latin1',
|
|
351
|
+
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
|
|
352
|
+
data. Default: 'ASCII'
|
|
353
|
+
max_header_size : int, optional
|
|
354
|
+
Maximum allowed size of the header. Large headers may not be safe
|
|
355
|
+
to load securely and thus require explicitly passing a larger value.
|
|
356
|
+
See :py:func:`ast.literal_eval()` for details.
|
|
357
|
+
This option is ignored when `allow_pickle` is passed. In that case
|
|
358
|
+
the file is by definition trusted and the limit is unnecessary.
|
|
359
|
+
|
|
360
|
+
Returns
|
|
361
|
+
-------
|
|
362
|
+
result : array, tuple, dict, etc.
|
|
363
|
+
Data stored in the file. For ``.npz`` files, the returned instance
|
|
364
|
+
of NpzFile class must be closed to avoid leaking file descriptors.
|
|
365
|
+
|
|
366
|
+
Raises
|
|
367
|
+
------
|
|
368
|
+
OSError
|
|
369
|
+
If the input file does not exist or cannot be read.
|
|
370
|
+
UnpicklingError
|
|
371
|
+
If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
|
|
372
|
+
ValueError
|
|
373
|
+
The file contains an object array, but ``allow_pickle=False`` given.
|
|
374
|
+
EOFError
|
|
375
|
+
When calling ``np.load`` multiple times on the same file handle,
|
|
376
|
+
if all data has already been read
|
|
377
|
+
|
|
378
|
+
See Also
|
|
379
|
+
--------
|
|
380
|
+
save, savez, savez_compressed, loadtxt
|
|
381
|
+
memmap : Create a memory-map to an array stored in a file on disk.
|
|
382
|
+
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
|
|
383
|
+
|
|
384
|
+
Notes
|
|
385
|
+
-----
|
|
386
|
+
- If the file contains pickle data, then whatever object is stored
|
|
387
|
+
in the pickle is returned.
|
|
388
|
+
- If the file is a ``.npy`` file, then a single array is returned.
|
|
389
|
+
- If the file is a ``.npz`` file, then a dictionary-like object is
|
|
390
|
+
returned, containing ``{filename: array}`` key-value pairs, one for
|
|
391
|
+
each file in the archive.
|
|
392
|
+
- If the file is a ``.npz`` file, the returned value supports the
|
|
393
|
+
context manager protocol in a similar fashion to the open function::
|
|
394
|
+
|
|
395
|
+
with load('foo.npz') as data:
|
|
396
|
+
a = data['a']
|
|
397
|
+
|
|
398
|
+
The underlying file descriptor is closed when exiting the 'with'
|
|
399
|
+
block.
|
|
400
|
+
|
|
401
|
+
Examples
|
|
402
|
+
--------
|
|
403
|
+
>>> import numpy as np
|
|
404
|
+
|
|
405
|
+
Store data to disk, and load it again:
|
|
406
|
+
|
|
407
|
+
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
|
|
408
|
+
>>> np.load('/tmp/123.npy')
|
|
409
|
+
array([[1, 2, 3],
|
|
410
|
+
[4, 5, 6]])
|
|
411
|
+
|
|
412
|
+
Store compressed data to disk, and load it again:
|
|
413
|
+
|
|
414
|
+
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
|
|
415
|
+
>>> b=np.array([1, 2])
|
|
416
|
+
>>> np.savez('/tmp/123.npz', a=a, b=b)
|
|
417
|
+
>>> data = np.load('/tmp/123.npz')
|
|
418
|
+
>>> data['a']
|
|
419
|
+
array([[1, 2, 3],
|
|
420
|
+
[4, 5, 6]])
|
|
421
|
+
>>> data['b']
|
|
422
|
+
array([1, 2])
|
|
423
|
+
>>> data.close()
|
|
424
|
+
|
|
425
|
+
Mem-map the stored array, and then access the second row
|
|
426
|
+
directly from disk:
|
|
427
|
+
|
|
428
|
+
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
|
|
429
|
+
>>> X[1, :]
|
|
430
|
+
memmap([4, 5, 6])
|
|
431
|
+
|
|
432
|
+
"""
|
|
433
|
+
if encoding not in ('ASCII', 'latin1', 'bytes'):
|
|
434
|
+
# The 'encoding' value for pickle also affects what encoding
|
|
435
|
+
# the serialized binary data of NumPy arrays is loaded
|
|
436
|
+
# in. Pickle does not pass on the encoding information to
|
|
437
|
+
# NumPy. The unpickling code in numpy._core.multiarray is
|
|
438
|
+
# written to assume that unicode data appearing where binary
|
|
439
|
+
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
|
|
440
|
+
#
|
|
441
|
+
# Other encoding values can corrupt binary data, and we
|
|
442
|
+
# purposefully disallow them. For the same reason, the errors=
|
|
443
|
+
# argument is not exposed, as values other than 'strict'
|
|
444
|
+
# result can similarly silently corrupt numerical data.
|
|
445
|
+
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
|
|
446
|
+
|
|
447
|
+
pickle_kwargs = {'encoding': encoding, 'fix_imports': fix_imports}
|
|
448
|
+
|
|
449
|
+
with contextlib.ExitStack() as stack:
|
|
450
|
+
if hasattr(file, 'read'):
|
|
451
|
+
fid = file
|
|
452
|
+
own_fid = False
|
|
453
|
+
else:
|
|
454
|
+
fid = stack.enter_context(open(os.fspath(file), "rb"))
|
|
455
|
+
own_fid = True
|
|
456
|
+
|
|
457
|
+
# Code to distinguish from NumPy binary files and pickles.
|
|
458
|
+
_ZIP_PREFIX = b'PK\x03\x04'
|
|
459
|
+
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
|
|
460
|
+
N = len(format.MAGIC_PREFIX)
|
|
461
|
+
magic = fid.read(N)
|
|
462
|
+
if not magic:
|
|
463
|
+
raise EOFError("No data left in file")
|
|
464
|
+
# If the file size is less than N, we need to make sure not
|
|
465
|
+
# to seek past the beginning of the file
|
|
466
|
+
fid.seek(-min(N, len(magic)), 1) # back-up
|
|
467
|
+
if magic.startswith((_ZIP_PREFIX, _ZIP_SUFFIX)):
|
|
468
|
+
# zip-file (assume .npz)
|
|
469
|
+
# Potentially transfer file ownership to NpzFile
|
|
470
|
+
stack.pop_all()
|
|
471
|
+
ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
|
|
472
|
+
pickle_kwargs=pickle_kwargs,
|
|
473
|
+
max_header_size=max_header_size)
|
|
474
|
+
return ret
|
|
475
|
+
elif magic == format.MAGIC_PREFIX:
|
|
476
|
+
# .npy file
|
|
477
|
+
if mmap_mode:
|
|
478
|
+
if allow_pickle:
|
|
479
|
+
max_header_size = 2**64
|
|
480
|
+
return format.open_memmap(file, mode=mmap_mode,
|
|
481
|
+
max_header_size=max_header_size)
|
|
482
|
+
else:
|
|
483
|
+
return format.read_array(fid, allow_pickle=allow_pickle,
|
|
484
|
+
pickle_kwargs=pickle_kwargs,
|
|
485
|
+
max_header_size=max_header_size)
|
|
486
|
+
else:
|
|
487
|
+
# Try a pickle
|
|
488
|
+
if not allow_pickle:
|
|
489
|
+
raise ValueError(
|
|
490
|
+
"This file contains pickled (object) data. If you trust "
|
|
491
|
+
"the file you can load it unsafely using the "
|
|
492
|
+
"`allow_pickle=` keyword argument or `pickle.load()`.")
|
|
493
|
+
try:
|
|
494
|
+
return pickle.load(fid, **pickle_kwargs)
|
|
495
|
+
except Exception as e:
|
|
496
|
+
raise pickle.UnpicklingError(
|
|
497
|
+
f"Failed to interpret file {file!r} as a pickle") from e
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
def _save_dispatcher(file, arr, allow_pickle=None):
|
|
501
|
+
return (arr,)
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
@array_function_dispatch(_save_dispatcher)
|
|
505
|
+
def save(file, arr, allow_pickle=True):
|
|
506
|
+
"""
|
|
507
|
+
Save an array to a binary file in NumPy ``.npy`` format.
|
|
508
|
+
|
|
509
|
+
Parameters
|
|
510
|
+
----------
|
|
511
|
+
file : file, str, or pathlib.Path
|
|
512
|
+
File or filename to which the data is saved. If file is a file-object,
|
|
513
|
+
then the filename is unchanged. If file is a string or Path,
|
|
514
|
+
a ``.npy`` extension will be appended to the filename if it does not
|
|
515
|
+
already have one.
|
|
516
|
+
arr : array_like
|
|
517
|
+
Array data to be saved.
|
|
518
|
+
allow_pickle : bool, optional
|
|
519
|
+
Allow saving object arrays using Python pickles. Reasons for
|
|
520
|
+
disallowing pickles include security (loading pickled data can execute
|
|
521
|
+
arbitrary code) and portability (pickled objects may not be loadable
|
|
522
|
+
on different Python installations, for example if the stored objects
|
|
523
|
+
require libraries that are not available, and not all pickled data is
|
|
524
|
+
compatible between different versions of Python).
|
|
525
|
+
Default: True
|
|
526
|
+
|
|
527
|
+
See Also
|
|
528
|
+
--------
|
|
529
|
+
savez : Save several arrays into a ``.npz`` archive
|
|
530
|
+
savetxt, load
|
|
531
|
+
|
|
532
|
+
Notes
|
|
533
|
+
-----
|
|
534
|
+
For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
|
|
535
|
+
|
|
536
|
+
Any data saved to the file is appended to the end of the file.
|
|
537
|
+
|
|
538
|
+
Examples
|
|
539
|
+
--------
|
|
540
|
+
>>> import numpy as np
|
|
541
|
+
|
|
542
|
+
>>> from tempfile import TemporaryFile
|
|
543
|
+
>>> outfile = TemporaryFile()
|
|
544
|
+
|
|
545
|
+
>>> x = np.arange(10)
|
|
546
|
+
>>> np.save(outfile, x)
|
|
547
|
+
|
|
548
|
+
>>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file
|
|
549
|
+
>>> np.load(outfile)
|
|
550
|
+
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
|
|
551
|
+
|
|
552
|
+
|
|
553
|
+
>>> with open('test.npy', 'wb') as f:
|
|
554
|
+
... np.save(f, np.array([1, 2]))
|
|
555
|
+
... np.save(f, np.array([1, 3]))
|
|
556
|
+
>>> with open('test.npy', 'rb') as f:
|
|
557
|
+
... a = np.load(f)
|
|
558
|
+
... b = np.load(f)
|
|
559
|
+
>>> print(a, b)
|
|
560
|
+
# [1 2] [1 3]
|
|
561
|
+
"""
|
|
562
|
+
if hasattr(file, 'write'):
|
|
563
|
+
file_ctx = contextlib.nullcontext(file)
|
|
564
|
+
else:
|
|
565
|
+
file = os.fspath(file)
|
|
566
|
+
if not file.endswith('.npy'):
|
|
567
|
+
file = file + '.npy'
|
|
568
|
+
file_ctx = open(file, "wb")
|
|
569
|
+
|
|
570
|
+
with file_ctx as fid:
|
|
571
|
+
arr = np.asanyarray(arr)
|
|
572
|
+
format.write_array(fid, arr, allow_pickle=allow_pickle)
|
|
573
|
+
|
|
574
|
+
|
|
575
|
+
def _savez_dispatcher(file, *args, allow_pickle=True, **kwds):
|
|
576
|
+
yield from args
|
|
577
|
+
yield from kwds.values()
|
|
578
|
+
|
|
579
|
+
|
|
580
|
+
@array_function_dispatch(_savez_dispatcher)
|
|
581
|
+
def savez(file, *args, allow_pickle=True, **kwds):
|
|
582
|
+
"""Save several arrays into a single file in uncompressed ``.npz`` format.
|
|
583
|
+
|
|
584
|
+
Provide arrays as keyword arguments to store them under the
|
|
585
|
+
corresponding name in the output file: ``savez(fn, x=x, y=y)``.
|
|
586
|
+
|
|
587
|
+
If arrays are specified as positional arguments, i.e., ``savez(fn,
|
|
588
|
+
x, y)``, their names will be `arr_0`, `arr_1`, etc.
|
|
589
|
+
|
|
590
|
+
Parameters
|
|
591
|
+
----------
|
|
592
|
+
file : file, str, or pathlib.Path
|
|
593
|
+
Either the filename (string) or an open file (file-like object)
|
|
594
|
+
where the data will be saved. If file is a string or a Path, the
|
|
595
|
+
``.npz`` extension will be appended to the filename if it is not
|
|
596
|
+
already there.
|
|
597
|
+
args : Arguments, optional
|
|
598
|
+
Arrays to save to the file. Please use keyword arguments (see
|
|
599
|
+
`kwds` below) to assign names to arrays. Arrays specified as
|
|
600
|
+
args will be named "arr_0", "arr_1", and so on.
|
|
601
|
+
allow_pickle : bool, optional
|
|
602
|
+
Allow saving object arrays using Python pickles. Reasons for
|
|
603
|
+
disallowing pickles include security (loading pickled data can execute
|
|
604
|
+
arbitrary code) and portability (pickled objects may not be loadable
|
|
605
|
+
on different Python installations, for example if the stored objects
|
|
606
|
+
require libraries that are not available, and not all pickled data is
|
|
607
|
+
compatible between different versions of Python).
|
|
608
|
+
Default: True
|
|
609
|
+
kwds : Keyword arguments, optional
|
|
610
|
+
Arrays to save to the file. Each array will be saved to the
|
|
611
|
+
output file with its corresponding keyword name.
|
|
612
|
+
|
|
613
|
+
Returns
|
|
614
|
+
-------
|
|
615
|
+
None
|
|
616
|
+
|
|
617
|
+
See Also
|
|
618
|
+
--------
|
|
619
|
+
save : Save a single array to a binary file in NumPy format.
|
|
620
|
+
savetxt : Save an array to a file as plain text.
|
|
621
|
+
savez_compressed : Save several arrays into a compressed ``.npz`` archive
|
|
622
|
+
|
|
623
|
+
Notes
|
|
624
|
+
-----
|
|
625
|
+
The ``.npz`` file format is a zipped archive of files named after the
|
|
626
|
+
variables they contain. The archive is not compressed and each file
|
|
627
|
+
in the archive contains one variable in ``.npy`` format. For a
|
|
628
|
+
description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
|
|
629
|
+
|
|
630
|
+
When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile`
|
|
631
|
+
object is returned. This is a dictionary-like object which can be queried
|
|
632
|
+
for its list of arrays (with the ``.files`` attribute), and for the arrays
|
|
633
|
+
themselves.
|
|
634
|
+
|
|
635
|
+
Keys passed in `kwds` are used as filenames inside the ZIP archive.
|
|
636
|
+
Therefore, keys should be valid filenames; e.g., avoid keys that begin with
|
|
637
|
+
``/`` or contain ``.``.
|
|
638
|
+
|
|
639
|
+
When naming variables with keyword arguments, it is not possible to name a
|
|
640
|
+
variable ``file``, as this would cause the ``file`` argument to be defined
|
|
641
|
+
twice in the call to ``savez``.
|
|
642
|
+
|
|
643
|
+
Examples
|
|
644
|
+
--------
|
|
645
|
+
>>> import numpy as np
|
|
646
|
+
>>> from tempfile import TemporaryFile
|
|
647
|
+
>>> outfile = TemporaryFile()
|
|
648
|
+
>>> x = np.arange(10)
|
|
649
|
+
>>> y = np.sin(x)
|
|
650
|
+
|
|
651
|
+
Using `savez` with \\*args, the arrays are saved with default names.
|
|
652
|
+
|
|
653
|
+
>>> np.savez(outfile, x, y)
|
|
654
|
+
>>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file
|
|
655
|
+
>>> npzfile = np.load(outfile)
|
|
656
|
+
>>> npzfile.files
|
|
657
|
+
['arr_0', 'arr_1']
|
|
658
|
+
>>> npzfile['arr_0']
|
|
659
|
+
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
|
|
660
|
+
|
|
661
|
+
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
|
|
662
|
+
|
|
663
|
+
>>> outfile = TemporaryFile()
|
|
664
|
+
>>> np.savez(outfile, x=x, y=y)
|
|
665
|
+
>>> _ = outfile.seek(0)
|
|
666
|
+
>>> npzfile = np.load(outfile)
|
|
667
|
+
>>> sorted(npzfile.files)
|
|
668
|
+
['x', 'y']
|
|
669
|
+
>>> npzfile['x']
|
|
670
|
+
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
|
|
671
|
+
|
|
672
|
+
"""
|
|
673
|
+
_savez(file, args, kwds, False, allow_pickle=allow_pickle)
|
|
674
|
+
|
|
675
|
+
|
|
676
|
+
def _savez_compressed_dispatcher(file, *args, allow_pickle=True, **kwds):
|
|
677
|
+
yield from args
|
|
678
|
+
yield from kwds.values()
|
|
679
|
+
|
|
680
|
+
|
|
681
|
+
@array_function_dispatch(_savez_compressed_dispatcher)
|
|
682
|
+
def savez_compressed(file, *args, allow_pickle=True, **kwds):
|
|
683
|
+
"""
|
|
684
|
+
Save several arrays into a single file in compressed ``.npz`` format.
|
|
685
|
+
|
|
686
|
+
Provide arrays as keyword arguments to store them under the
|
|
687
|
+
corresponding name in the output file: ``savez_compressed(fn, x=x, y=y)``.
|
|
688
|
+
|
|
689
|
+
If arrays are specified as positional arguments, i.e.,
|
|
690
|
+
``savez_compressed(fn, x, y)``, their names will be `arr_0`, `arr_1`, etc.
|
|
691
|
+
|
|
692
|
+
Parameters
|
|
693
|
+
----------
|
|
694
|
+
file : file, str, or pathlib.Path
|
|
695
|
+
Either the filename (string) or an open file (file-like object)
|
|
696
|
+
where the data will be saved. If file is a string or a Path, the
|
|
697
|
+
``.npz`` extension will be appended to the filename if it is not
|
|
698
|
+
already there.
|
|
699
|
+
args : Arguments, optional
|
|
700
|
+
Arrays to save to the file. Please use keyword arguments (see
|
|
701
|
+
`kwds` below) to assign names to arrays. Arrays specified as
|
|
702
|
+
args will be named "arr_0", "arr_1", and so on.
|
|
703
|
+
allow_pickle : bool, optional
|
|
704
|
+
Allow saving object arrays using Python pickles. Reasons for
|
|
705
|
+
disallowing pickles include security (loading pickled data can execute
|
|
706
|
+
arbitrary code) and portability (pickled objects may not be loadable
|
|
707
|
+
on different Python installations, for example if the stored objects
|
|
708
|
+
require libraries that are not available, and not all pickled data is
|
|
709
|
+
compatible between different versions of Python).
|
|
710
|
+
Default: True
|
|
711
|
+
kwds : Keyword arguments, optional
|
|
712
|
+
Arrays to save to the file. Each array will be saved to the
|
|
713
|
+
output file with its corresponding keyword name.
|
|
714
|
+
|
|
715
|
+
Returns
|
|
716
|
+
-------
|
|
717
|
+
None
|
|
718
|
+
|
|
719
|
+
See Also
|
|
720
|
+
--------
|
|
721
|
+
numpy.save : Save a single array to a binary file in NumPy format.
|
|
722
|
+
numpy.savetxt : Save an array to a file as plain text.
|
|
723
|
+
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
|
|
724
|
+
numpy.load : Load the files created by savez_compressed.
|
|
725
|
+
|
|
726
|
+
Notes
|
|
727
|
+
-----
|
|
728
|
+
The ``.npz`` file format is a zipped archive of files named after the
|
|
729
|
+
variables they contain. The archive is compressed with
|
|
730
|
+
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
|
|
731
|
+
in ``.npy`` format. For a description of the ``.npy`` format, see
|
|
732
|
+
:py:mod:`numpy.lib.format`.
|
|
733
|
+
|
|
734
|
+
|
|
735
|
+
When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile`
|
|
736
|
+
object is returned. This is a dictionary-like object which can be queried
|
|
737
|
+
for its list of arrays (with the ``.files`` attribute), and for the arrays
|
|
738
|
+
themselves.
|
|
739
|
+
|
|
740
|
+
Examples
|
|
741
|
+
--------
|
|
742
|
+
>>> import numpy as np
|
|
743
|
+
>>> test_array = np.random.rand(3, 2)
|
|
744
|
+
>>> test_vector = np.random.rand(4)
|
|
745
|
+
>>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
|
|
746
|
+
>>> loaded = np.load('/tmp/123.npz')
|
|
747
|
+
>>> print(np.array_equal(test_array, loaded['a']))
|
|
748
|
+
True
|
|
749
|
+
>>> print(np.array_equal(test_vector, loaded['b']))
|
|
750
|
+
True
|
|
751
|
+
|
|
752
|
+
"""
|
|
753
|
+
_savez(file, args, kwds, True, allow_pickle=allow_pickle)
|
|
754
|
+
|
|
755
|
+
|
|
756
|
+
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
|
|
757
|
+
# Import is postponed to here since zipfile depends on gzip, an optional
|
|
758
|
+
# component of the so-called standard library.
|
|
759
|
+
import zipfile
|
|
760
|
+
|
|
761
|
+
if not hasattr(file, 'write'):
|
|
762
|
+
file = os.fspath(file)
|
|
763
|
+
if not file.endswith('.npz'):
|
|
764
|
+
file = file + '.npz'
|
|
765
|
+
|
|
766
|
+
namedict = kwds
|
|
767
|
+
for i, val in enumerate(args):
|
|
768
|
+
key = 'arr_%d' % i
|
|
769
|
+
if key in namedict.keys():
|
|
770
|
+
raise ValueError(
|
|
771
|
+
f"Cannot use un-named variables and keyword {key}")
|
|
772
|
+
namedict[key] = val
|
|
773
|
+
|
|
774
|
+
if compress:
|
|
775
|
+
compression = zipfile.ZIP_DEFLATED
|
|
776
|
+
else:
|
|
777
|
+
compression = zipfile.ZIP_STORED
|
|
778
|
+
|
|
779
|
+
zipf = zipfile_factory(file, mode="w", compression=compression)
|
|
780
|
+
try:
|
|
781
|
+
for key, val in namedict.items():
|
|
782
|
+
fname = key + '.npy'
|
|
783
|
+
val = np.asanyarray(val)
|
|
784
|
+
# always force zip64, gh-10776
|
|
785
|
+
with zipf.open(fname, 'w', force_zip64=True) as fid:
|
|
786
|
+
format.write_array(fid, val,
|
|
787
|
+
allow_pickle=allow_pickle,
|
|
788
|
+
pickle_kwargs=pickle_kwargs)
|
|
789
|
+
finally:
|
|
790
|
+
zipf.close()
|
|
791
|
+
|
|
792
|
+
|
|
793
|
+
def _ensure_ndmin_ndarray_check_param(ndmin):
|
|
794
|
+
"""Just checks if the param ndmin is supported on
|
|
795
|
+
_ensure_ndmin_ndarray. It is intended to be used as
|
|
796
|
+
verification before running anything expensive.
|
|
797
|
+
e.g. loadtxt, genfromtxt
|
|
798
|
+
"""
|
|
799
|
+
# Check correctness of the values of `ndmin`
|
|
800
|
+
if ndmin not in [0, 1, 2]:
|
|
801
|
+
raise ValueError(f"Illegal value of ndmin keyword: {ndmin}")
|
|
802
|
+
|
|
803
|
+
def _ensure_ndmin_ndarray(a, *, ndmin: int):
|
|
804
|
+
"""This is a helper function of loadtxt and genfromtxt to ensure
|
|
805
|
+
proper minimum dimension as requested
|
|
806
|
+
|
|
807
|
+
ndim : int. Supported values 1, 2, 3
|
|
808
|
+
^^ whenever this changes, keep in sync with
|
|
809
|
+
_ensure_ndmin_ndarray_check_param
|
|
810
|
+
"""
|
|
811
|
+
# Verify that the array has at least dimensions `ndmin`.
|
|
812
|
+
# Tweak the size and shape of the arrays - remove extraneous dimensions
|
|
813
|
+
if a.ndim > ndmin:
|
|
814
|
+
a = np.squeeze(a)
|
|
815
|
+
# and ensure we have the minimum number of dimensions asked for
|
|
816
|
+
# - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0
|
|
817
|
+
if a.ndim < ndmin:
|
|
818
|
+
if ndmin == 1:
|
|
819
|
+
a = np.atleast_1d(a)
|
|
820
|
+
elif ndmin == 2:
|
|
821
|
+
a = np.atleast_2d(a).T
|
|
822
|
+
|
|
823
|
+
return a
|
|
824
|
+
|
|
825
|
+
|
|
826
|
+
# amount of lines loadtxt reads in one chunk, can be overridden for testing
|
|
827
|
+
_loadtxt_chunksize = 50000
|
|
828
|
+
|
|
829
|
+
|
|
830
|
+
def _check_nonneg_int(value, name="argument"):
|
|
831
|
+
try:
|
|
832
|
+
operator.index(value)
|
|
833
|
+
except TypeError:
|
|
834
|
+
raise TypeError(f"{name} must be an integer") from None
|
|
835
|
+
if value < 0:
|
|
836
|
+
raise ValueError(f"{name} must be nonnegative")
|
|
837
|
+
|
|
838
|
+
|
|
839
|
+
def _preprocess_comments(iterable, comments, encoding):
|
|
840
|
+
"""
|
|
841
|
+
Generator that consumes a line iterated iterable and strips out the
|
|
842
|
+
multiple (or multi-character) comments from lines.
|
|
843
|
+
This is a pre-processing step to achieve feature parity with loadtxt
|
|
844
|
+
(we assume that this feature is a nieche feature).
|
|
845
|
+
"""
|
|
846
|
+
for line in iterable:
|
|
847
|
+
if isinstance(line, bytes):
|
|
848
|
+
# Need to handle conversion here, or the splitting would fail
|
|
849
|
+
line = line.decode(encoding)
|
|
850
|
+
|
|
851
|
+
for c in comments:
|
|
852
|
+
line = line.split(c, 1)[0]
|
|
853
|
+
|
|
854
|
+
yield line
|
|
855
|
+
|
|
856
|
+
|
|
857
|
+
# The number of rows we read in one go if confronted with a parametric dtype
|
|
858
|
+
_loadtxt_chunksize = 50000
|
|
859
|
+
|
|
860
|
+
|
|
861
|
+
def _read(fname, *, delimiter=',', comment='#', quote='"',
|
|
862
|
+
imaginary_unit='j', usecols=None, skiplines=0,
|
|
863
|
+
max_rows=None, converters=None, ndmin=None, unpack=False,
|
|
864
|
+
dtype=np.float64, encoding=None):
|
|
865
|
+
r"""
|
|
866
|
+
Read a NumPy array from a text file.
|
|
867
|
+
This is a helper function for loadtxt.
|
|
868
|
+
|
|
869
|
+
Parameters
|
|
870
|
+
----------
|
|
871
|
+
fname : file, str, or pathlib.Path
|
|
872
|
+
The filename or the file to be read.
|
|
873
|
+
delimiter : str, optional
|
|
874
|
+
Field delimiter of the fields in line of the file.
|
|
875
|
+
Default is a comma, ','. If None any sequence of whitespace is
|
|
876
|
+
considered a delimiter.
|
|
877
|
+
comment : str or sequence of str or None, optional
|
|
878
|
+
Character that begins a comment. All text from the comment
|
|
879
|
+
character to the end of the line is ignored.
|
|
880
|
+
Multiple comments or multiple-character comment strings are supported,
|
|
881
|
+
but may be slower and `quote` must be empty if used.
|
|
882
|
+
Use None to disable all use of comments.
|
|
883
|
+
quote : str or None, optional
|
|
884
|
+
Character that is used to quote string fields. Default is '"'
|
|
885
|
+
(a double quote). Use None to disable quote support.
|
|
886
|
+
imaginary_unit : str, optional
|
|
887
|
+
Character that represent the imaginary unit `sqrt(-1)`.
|
|
888
|
+
Default is 'j'.
|
|
889
|
+
usecols : array_like, optional
|
|
890
|
+
A one-dimensional array of integer column numbers. These are the
|
|
891
|
+
columns from the file to be included in the array. If this value
|
|
892
|
+
is not given, all the columns are used.
|
|
893
|
+
skiplines : int, optional
|
|
894
|
+
Number of lines to skip before interpreting the data in the file.
|
|
895
|
+
max_rows : int, optional
|
|
896
|
+
Maximum number of rows of data to read. Default is to read the
|
|
897
|
+
entire file.
|
|
898
|
+
converters : dict or callable, optional
|
|
899
|
+
A function to parse all columns strings into the desired value, or
|
|
900
|
+
a dictionary mapping column number to a parser function.
|
|
901
|
+
E.g. if column 0 is a date string: ``converters = {0: datestr2num}``.
|
|
902
|
+
Converters can also be used to provide a default value for missing
|
|
903
|
+
data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will
|
|
904
|
+
convert empty fields to 0.
|
|
905
|
+
Default: None
|
|
906
|
+
ndmin : int, optional
|
|
907
|
+
Minimum dimension of the array returned.
|
|
908
|
+
Allowed values are 0, 1 or 2. Default is 0.
|
|
909
|
+
unpack : bool, optional
|
|
910
|
+
If True, the returned array is transposed, so that arguments may be
|
|
911
|
+
unpacked using ``x, y, z = read(...)``. When used with a structured
|
|
912
|
+
data-type, arrays are returned for each field. Default is False.
|
|
913
|
+
dtype : numpy data type
|
|
914
|
+
A NumPy dtype instance, can be a structured dtype to map to the
|
|
915
|
+
columns of the file.
|
|
916
|
+
encoding : str, optional
|
|
917
|
+
Encoding used to decode the inputfile. The special value 'bytes'
|
|
918
|
+
(the default) enables backwards-compatible behavior for `converters`,
|
|
919
|
+
ensuring that inputs to the converter functions are encoded
|
|
920
|
+
bytes objects. The special value 'bytes' has no additional effect if
|
|
921
|
+
``converters=None``. If encoding is ``'bytes'`` or ``None``, the
|
|
922
|
+
default system encoding is used.
|
|
923
|
+
|
|
924
|
+
Returns
|
|
925
|
+
-------
|
|
926
|
+
ndarray
|
|
927
|
+
NumPy array.
|
|
928
|
+
"""
|
|
929
|
+
# Handle special 'bytes' keyword for encoding
|
|
930
|
+
byte_converters = False
|
|
931
|
+
if encoding == 'bytes':
|
|
932
|
+
encoding = None
|
|
933
|
+
byte_converters = True
|
|
934
|
+
|
|
935
|
+
if dtype is None:
|
|
936
|
+
raise TypeError("a dtype must be provided.")
|
|
937
|
+
dtype = np.dtype(dtype)
|
|
938
|
+
|
|
939
|
+
read_dtype_via_object_chunks = None
|
|
940
|
+
if dtype.kind in 'SUM' and dtype in {
|
|
941
|
+
np.dtype("S0"), np.dtype("U0"), np.dtype("M8"), np.dtype("m8")}:
|
|
942
|
+
# This is a legacy "flexible" dtype. We do not truly support
|
|
943
|
+
# parametric dtypes currently (no dtype discovery step in the core),
|
|
944
|
+
# but have to support these for backward compatibility.
|
|
945
|
+
read_dtype_via_object_chunks = dtype
|
|
946
|
+
dtype = np.dtype(object)
|
|
947
|
+
|
|
948
|
+
if usecols is not None:
|
|
949
|
+
# Allow usecols to be a single int or a sequence of ints, the C-code
|
|
950
|
+
# handles the rest
|
|
951
|
+
try:
|
|
952
|
+
usecols = list(usecols)
|
|
953
|
+
except TypeError:
|
|
954
|
+
usecols = [usecols]
|
|
955
|
+
|
|
956
|
+
_ensure_ndmin_ndarray_check_param(ndmin)
|
|
957
|
+
|
|
958
|
+
if comment is None:
|
|
959
|
+
comments = None
|
|
960
|
+
else:
|
|
961
|
+
# assume comments are a sequence of strings
|
|
962
|
+
if "" in comment:
|
|
963
|
+
raise ValueError(
|
|
964
|
+
"comments cannot be an empty string. Use comments=None to "
|
|
965
|
+
"disable comments."
|
|
966
|
+
)
|
|
967
|
+
comments = tuple(comment)
|
|
968
|
+
comment = None
|
|
969
|
+
if len(comments) == 0:
|
|
970
|
+
comments = None # No comments at all
|
|
971
|
+
elif len(comments) == 1:
|
|
972
|
+
# If there is only one comment, and that comment has one character,
|
|
973
|
+
# the normal parsing can deal with it just fine.
|
|
974
|
+
if isinstance(comments[0], str) and len(comments[0]) == 1:
|
|
975
|
+
comment = comments[0]
|
|
976
|
+
comments = None
|
|
977
|
+
# Input validation if there are multiple comment characters
|
|
978
|
+
elif delimiter in comments:
|
|
979
|
+
raise TypeError(
|
|
980
|
+
f"Comment characters '{comments}' cannot include the "
|
|
981
|
+
f"delimiter '{delimiter}'"
|
|
982
|
+
)
|
|
983
|
+
|
|
984
|
+
# comment is now either a 1 or 0 character string or a tuple:
|
|
985
|
+
if comments is not None:
|
|
986
|
+
# Note: An earlier version support two character comments (and could
|
|
987
|
+
# have been extended to multiple characters, we assume this is
|
|
988
|
+
# rare enough to not optimize for.
|
|
989
|
+
if quote is not None:
|
|
990
|
+
raise ValueError(
|
|
991
|
+
"when multiple comments or a multi-character comment is "
|
|
992
|
+
"given, quotes are not supported. In this case quotechar "
|
|
993
|
+
"must be set to None.")
|
|
994
|
+
|
|
995
|
+
if len(imaginary_unit) != 1:
|
|
996
|
+
raise ValueError('len(imaginary_unit) must be 1.')
|
|
997
|
+
|
|
998
|
+
_check_nonneg_int(skiplines)
|
|
999
|
+
if max_rows is not None:
|
|
1000
|
+
_check_nonneg_int(max_rows)
|
|
1001
|
+
else:
|
|
1002
|
+
# Passing -1 to the C code means "read the entire file".
|
|
1003
|
+
max_rows = -1
|
|
1004
|
+
|
|
1005
|
+
fh_closing_ctx = contextlib.nullcontext()
|
|
1006
|
+
filelike = False
|
|
1007
|
+
try:
|
|
1008
|
+
if isinstance(fname, os.PathLike):
|
|
1009
|
+
fname = os.fspath(fname)
|
|
1010
|
+
if isinstance(fname, str):
|
|
1011
|
+
fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
|
|
1012
|
+
if encoding is None:
|
|
1013
|
+
encoding = getattr(fh, 'encoding', 'latin1')
|
|
1014
|
+
|
|
1015
|
+
fh_closing_ctx = contextlib.closing(fh)
|
|
1016
|
+
data = fh
|
|
1017
|
+
filelike = True
|
|
1018
|
+
else:
|
|
1019
|
+
if encoding is None:
|
|
1020
|
+
encoding = getattr(fname, 'encoding', 'latin1')
|
|
1021
|
+
data = iter(fname)
|
|
1022
|
+
except TypeError as e:
|
|
1023
|
+
raise ValueError(
|
|
1024
|
+
f"fname must be a string, filehandle, list of strings,\n"
|
|
1025
|
+
f"or generator. Got {type(fname)} instead.") from e
|
|
1026
|
+
|
|
1027
|
+
with fh_closing_ctx:
|
|
1028
|
+
if comments is not None:
|
|
1029
|
+
if filelike:
|
|
1030
|
+
data = iter(data)
|
|
1031
|
+
filelike = False
|
|
1032
|
+
data = _preprocess_comments(data, comments, encoding)
|
|
1033
|
+
|
|
1034
|
+
if read_dtype_via_object_chunks is None:
|
|
1035
|
+
arr = _load_from_filelike(
|
|
1036
|
+
data, delimiter=delimiter, comment=comment, quote=quote,
|
|
1037
|
+
imaginary_unit=imaginary_unit,
|
|
1038
|
+
usecols=usecols, skiplines=skiplines, max_rows=max_rows,
|
|
1039
|
+
converters=converters, dtype=dtype,
|
|
1040
|
+
encoding=encoding, filelike=filelike,
|
|
1041
|
+
byte_converters=byte_converters)
|
|
1042
|
+
|
|
1043
|
+
else:
|
|
1044
|
+
# This branch reads the file into chunks of object arrays and then
|
|
1045
|
+
# casts them to the desired actual dtype. This ensures correct
|
|
1046
|
+
# string-length and datetime-unit discovery (like `arr.astype()`).
|
|
1047
|
+
# Due to chunking, certain error reports are less clear, currently.
|
|
1048
|
+
if filelike:
|
|
1049
|
+
data = iter(data) # cannot chunk when reading from file
|
|
1050
|
+
filelike = False
|
|
1051
|
+
|
|
1052
|
+
c_byte_converters = False
|
|
1053
|
+
if read_dtype_via_object_chunks == "S":
|
|
1054
|
+
c_byte_converters = True # Use latin1 rather than ascii
|
|
1055
|
+
|
|
1056
|
+
chunks = []
|
|
1057
|
+
while max_rows != 0:
|
|
1058
|
+
if max_rows < 0:
|
|
1059
|
+
chunk_size = _loadtxt_chunksize
|
|
1060
|
+
else:
|
|
1061
|
+
chunk_size = min(_loadtxt_chunksize, max_rows)
|
|
1062
|
+
|
|
1063
|
+
next_arr = _load_from_filelike(
|
|
1064
|
+
data, delimiter=delimiter, comment=comment, quote=quote,
|
|
1065
|
+
imaginary_unit=imaginary_unit,
|
|
1066
|
+
usecols=usecols, skiplines=skiplines, max_rows=chunk_size,
|
|
1067
|
+
converters=converters, dtype=dtype,
|
|
1068
|
+
encoding=encoding, filelike=filelike,
|
|
1069
|
+
byte_converters=byte_converters,
|
|
1070
|
+
c_byte_converters=c_byte_converters)
|
|
1071
|
+
# Cast here already. We hope that this is better even for
|
|
1072
|
+
# large files because the storage is more compact. It could
|
|
1073
|
+
# be adapted (in principle the concatenate could cast).
|
|
1074
|
+
chunks.append(next_arr.astype(read_dtype_via_object_chunks))
|
|
1075
|
+
|
|
1076
|
+
skiplines = 0 # Only have to skip for first chunk
|
|
1077
|
+
if max_rows >= 0:
|
|
1078
|
+
max_rows -= chunk_size
|
|
1079
|
+
if len(next_arr) < chunk_size:
|
|
1080
|
+
# There was less data than requested, so we are done.
|
|
1081
|
+
break
|
|
1082
|
+
|
|
1083
|
+
# Need at least one chunk, but if empty, the last one may have
|
|
1084
|
+
# the wrong shape.
|
|
1085
|
+
if len(chunks) > 1 and len(chunks[-1]) == 0:
|
|
1086
|
+
del chunks[-1]
|
|
1087
|
+
if len(chunks) == 1:
|
|
1088
|
+
arr = chunks[0]
|
|
1089
|
+
else:
|
|
1090
|
+
arr = np.concatenate(chunks, axis=0)
|
|
1091
|
+
|
|
1092
|
+
# NOTE: ndmin works as advertised for structured dtypes, but normally
|
|
1093
|
+
# these would return a 1D result plus the structured dimension,
|
|
1094
|
+
# so ndmin=2 adds a third dimension even when no squeezing occurs.
|
|
1095
|
+
# A `squeeze=False` could be a better solution (pandas uses squeeze).
|
|
1096
|
+
arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin)
|
|
1097
|
+
|
|
1098
|
+
if arr.shape:
|
|
1099
|
+
if arr.shape[0] == 0:
|
|
1100
|
+
warnings.warn(
|
|
1101
|
+
f'loadtxt: input contained no data: "{fname}"',
|
|
1102
|
+
category=UserWarning,
|
|
1103
|
+
stacklevel=3
|
|
1104
|
+
)
|
|
1105
|
+
|
|
1106
|
+
if unpack:
|
|
1107
|
+
# Unpack structured dtypes if requested:
|
|
1108
|
+
dt = arr.dtype
|
|
1109
|
+
if dt.names is not None:
|
|
1110
|
+
# For structured arrays, return an array for each field.
|
|
1111
|
+
return [arr[field] for field in dt.names]
|
|
1112
|
+
else:
|
|
1113
|
+
return arr.T
|
|
1114
|
+
else:
|
|
1115
|
+
return arr
|
|
1116
|
+
|
|
1117
|
+
|
|
1118
|
+
@finalize_array_function_like
|
|
1119
|
+
@set_module('numpy')
|
|
1120
|
+
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
|
|
1121
|
+
converters=None, skiprows=0, usecols=None, unpack=False,
|
|
1122
|
+
ndmin=0, encoding=None, max_rows=None, *, quotechar=None,
|
|
1123
|
+
like=None):
|
|
1124
|
+
r"""
|
|
1125
|
+
Load data from a text file.
|
|
1126
|
+
|
|
1127
|
+
Parameters
|
|
1128
|
+
----------
|
|
1129
|
+
fname : file, str, pathlib.Path, list of str, generator
|
|
1130
|
+
File, filename, list, or generator to read. If the filename
|
|
1131
|
+
extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
|
|
1132
|
+
that generators must return bytes or strings. The strings
|
|
1133
|
+
in a list or produced by a generator are treated as lines.
|
|
1134
|
+
dtype : data-type, optional
|
|
1135
|
+
Data-type of the resulting array; default: float. If this is a
|
|
1136
|
+
structured data-type, the resulting array will be 1-dimensional, and
|
|
1137
|
+
each row will be interpreted as an element of the array. In this
|
|
1138
|
+
case, the number of columns used must match the number of fields in
|
|
1139
|
+
the data-type.
|
|
1140
|
+
comments : str or sequence of str or None, optional
|
|
1141
|
+
The characters or list of characters used to indicate the start of a
|
|
1142
|
+
comment. None implies no comments. For backwards compatibility, byte
|
|
1143
|
+
strings will be decoded as 'latin1'. The default is '#'.
|
|
1144
|
+
delimiter : str, optional
|
|
1145
|
+
The character used to separate the values. For backwards compatibility,
|
|
1146
|
+
byte strings will be decoded as 'latin1'. The default is whitespace.
|
|
1147
|
+
|
|
1148
|
+
.. versionchanged:: 1.23.0
|
|
1149
|
+
Only single character delimiters are supported. Newline characters
|
|
1150
|
+
cannot be used as the delimiter.
|
|
1151
|
+
|
|
1152
|
+
converters : dict or callable, optional
|
|
1153
|
+
Converter functions to customize value parsing. If `converters` is
|
|
1154
|
+
callable, the function is applied to all columns, else it must be a
|
|
1155
|
+
dict that maps column number to a parser function.
|
|
1156
|
+
See examples for further details.
|
|
1157
|
+
Default: None.
|
|
1158
|
+
|
|
1159
|
+
.. versionchanged:: 1.23.0
|
|
1160
|
+
The ability to pass a single callable to be applied to all columns
|
|
1161
|
+
was added.
|
|
1162
|
+
|
|
1163
|
+
skiprows : int, optional
|
|
1164
|
+
Skip the first `skiprows` lines, including comments; default: 0.
|
|
1165
|
+
usecols : int or sequence, optional
|
|
1166
|
+
Which columns to read, with 0 being the first. For example,
|
|
1167
|
+
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
|
|
1168
|
+
The default, None, results in all columns being read.
|
|
1169
|
+
unpack : bool, optional
|
|
1170
|
+
If True, the returned array is transposed, so that arguments may be
|
|
1171
|
+
unpacked using ``x, y, z = loadtxt(...)``. When used with a
|
|
1172
|
+
structured data-type, arrays are returned for each field.
|
|
1173
|
+
Default is False.
|
|
1174
|
+
ndmin : int, optional
|
|
1175
|
+
The returned array will have at least `ndmin` dimensions.
|
|
1176
|
+
Otherwise mono-dimensional axes will be squeezed.
|
|
1177
|
+
Legal values: 0 (default), 1 or 2.
|
|
1178
|
+
encoding : str, optional
|
|
1179
|
+
Encoding used to decode the inputfile. Does not apply to input streams.
|
|
1180
|
+
The special value 'bytes' enables backward compatibility workarounds
|
|
1181
|
+
that ensures you receive byte arrays as results if possible and passes
|
|
1182
|
+
'latin1' encoded strings to converters. Override this value to receive
|
|
1183
|
+
unicode arrays and pass strings as input to converters. If set to None
|
|
1184
|
+
the system default is used. The default value is None.
|
|
1185
|
+
|
|
1186
|
+
.. versionchanged:: 2.0
|
|
1187
|
+
Before NumPy 2, the default was ``'bytes'`` for Python 2
|
|
1188
|
+
compatibility. The default is now ``None``.
|
|
1189
|
+
|
|
1190
|
+
max_rows : int, optional
|
|
1191
|
+
Read `max_rows` rows of content after `skiprows` lines. The default is
|
|
1192
|
+
to read all the rows. Note that empty rows containing no data such as
|
|
1193
|
+
empty lines and comment lines are not counted towards `max_rows`,
|
|
1194
|
+
while such lines are counted in `skiprows`.
|
|
1195
|
+
|
|
1196
|
+
.. versionchanged:: 1.23.0
|
|
1197
|
+
Lines containing no data, including comment lines (e.g., lines
|
|
1198
|
+
starting with '#' or as specified via `comments`) are not counted
|
|
1199
|
+
towards `max_rows`.
|
|
1200
|
+
quotechar : unicode character or None, optional
|
|
1201
|
+
The character used to denote the start and end of a quoted item.
|
|
1202
|
+
Occurrences of the delimiter or comment characters are ignored within
|
|
1203
|
+
a quoted item. The default value is ``quotechar=None``, which means
|
|
1204
|
+
quoting support is disabled.
|
|
1205
|
+
|
|
1206
|
+
If two consecutive instances of `quotechar` are found within a quoted
|
|
1207
|
+
field, the first is treated as an escape character. See examples.
|
|
1208
|
+
|
|
1209
|
+
.. versionadded:: 1.23.0
|
|
1210
|
+
${ARRAY_FUNCTION_LIKE}
|
|
1211
|
+
|
|
1212
|
+
.. versionadded:: 1.20.0
|
|
1213
|
+
|
|
1214
|
+
Returns
|
|
1215
|
+
-------
|
|
1216
|
+
out : ndarray
|
|
1217
|
+
Data read from the text file.
|
|
1218
|
+
|
|
1219
|
+
See Also
|
|
1220
|
+
--------
|
|
1221
|
+
load, fromstring, fromregex
|
|
1222
|
+
genfromtxt : Load data with missing values handled as specified.
|
|
1223
|
+
scipy.io.loadmat : reads MATLAB data files
|
|
1224
|
+
|
|
1225
|
+
Notes
|
|
1226
|
+
-----
|
|
1227
|
+
This function aims to be a fast reader for simply formatted files. The
|
|
1228
|
+
`genfromtxt` function provides more sophisticated handling of, e.g.,
|
|
1229
|
+
lines with missing values.
|
|
1230
|
+
|
|
1231
|
+
Each row in the input text file must have the same number of values to be
|
|
1232
|
+
able to read all values. If all rows do not have same number of values, a
|
|
1233
|
+
subset of up to n columns (where n is the least number of values present
|
|
1234
|
+
in all rows) can be read by specifying the columns via `usecols`.
|
|
1235
|
+
|
|
1236
|
+
The strings produced by the Python float.hex method can be used as
|
|
1237
|
+
input for floats.
|
|
1238
|
+
|
|
1239
|
+
Examples
|
|
1240
|
+
--------
|
|
1241
|
+
>>> import numpy as np
|
|
1242
|
+
>>> from io import StringIO # StringIO behaves like a file object
|
|
1243
|
+
>>> c = StringIO("0 1\n2 3")
|
|
1244
|
+
>>> np.loadtxt(c)
|
|
1245
|
+
array([[0., 1.],
|
|
1246
|
+
[2., 3.]])
|
|
1247
|
+
|
|
1248
|
+
>>> d = StringIO("M 21 72\nF 35 58")
|
|
1249
|
+
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
|
|
1250
|
+
... 'formats': ('S1', 'i4', 'f4')})
|
|
1251
|
+
array([(b'M', 21, 72.), (b'F', 35, 58.)],
|
|
1252
|
+
dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
|
|
1253
|
+
|
|
1254
|
+
>>> c = StringIO("1,0,2\n3,0,4")
|
|
1255
|
+
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
|
|
1256
|
+
>>> x
|
|
1257
|
+
array([1., 3.])
|
|
1258
|
+
>>> y
|
|
1259
|
+
array([2., 4.])
|
|
1260
|
+
|
|
1261
|
+
The `converters` argument is used to specify functions to preprocess the
|
|
1262
|
+
text prior to parsing. `converters` can be a dictionary that maps
|
|
1263
|
+
preprocessing functions to each column:
|
|
1264
|
+
|
|
1265
|
+
>>> s = StringIO("1.618, 2.296\n3.141, 4.669\n")
|
|
1266
|
+
>>> conv = {
|
|
1267
|
+
... 0: lambda x: np.floor(float(x)), # conversion fn for column 0
|
|
1268
|
+
... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1
|
|
1269
|
+
... }
|
|
1270
|
+
>>> np.loadtxt(s, delimiter=",", converters=conv)
|
|
1271
|
+
array([[1., 3.],
|
|
1272
|
+
[3., 5.]])
|
|
1273
|
+
|
|
1274
|
+
`converters` can be a callable instead of a dictionary, in which case it
|
|
1275
|
+
is applied to all columns:
|
|
1276
|
+
|
|
1277
|
+
>>> s = StringIO("0xDE 0xAD\n0xC0 0xDE")
|
|
1278
|
+
>>> import functools
|
|
1279
|
+
>>> conv = functools.partial(int, base=16)
|
|
1280
|
+
>>> np.loadtxt(s, converters=conv)
|
|
1281
|
+
array([[222., 173.],
|
|
1282
|
+
[192., 222.]])
|
|
1283
|
+
|
|
1284
|
+
This example shows how `converters` can be used to convert a field
|
|
1285
|
+
with a trailing minus sign into a negative number.
|
|
1286
|
+
|
|
1287
|
+
>>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94")
|
|
1288
|
+
>>> def conv(fld):
|
|
1289
|
+
... return -float(fld[:-1]) if fld.endswith("-") else float(fld)
|
|
1290
|
+
...
|
|
1291
|
+
>>> np.loadtxt(s, converters=conv)
|
|
1292
|
+
array([[ 10.01, -31.25],
|
|
1293
|
+
[ 19.22, 64.31],
|
|
1294
|
+
[-17.57, 63.94]])
|
|
1295
|
+
|
|
1296
|
+
Using a callable as the converter can be particularly useful for handling
|
|
1297
|
+
values with different formatting, e.g. floats with underscores:
|
|
1298
|
+
|
|
1299
|
+
>>> s = StringIO("1 2.7 100_000")
|
|
1300
|
+
>>> np.loadtxt(s, converters=float)
|
|
1301
|
+
array([1.e+00, 2.7e+00, 1.e+05])
|
|
1302
|
+
|
|
1303
|
+
This idea can be extended to automatically handle values specified in
|
|
1304
|
+
many different formats, such as hex values:
|
|
1305
|
+
|
|
1306
|
+
>>> def conv(val):
|
|
1307
|
+
... try:
|
|
1308
|
+
... return float(val)
|
|
1309
|
+
... except ValueError:
|
|
1310
|
+
... return float.fromhex(val)
|
|
1311
|
+
>>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2")
|
|
1312
|
+
>>> np.loadtxt(s, delimiter=",", converters=conv)
|
|
1313
|
+
array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00])
|
|
1314
|
+
|
|
1315
|
+
Or a format where the ``-`` sign comes after the number:
|
|
1316
|
+
|
|
1317
|
+
>>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94")
|
|
1318
|
+
>>> conv = lambda x: -float(x[:-1]) if x.endswith("-") else float(x)
|
|
1319
|
+
>>> np.loadtxt(s, converters=conv)
|
|
1320
|
+
array([[ 10.01, -31.25],
|
|
1321
|
+
[ 19.22, 64.31],
|
|
1322
|
+
[-17.57, 63.94]])
|
|
1323
|
+
|
|
1324
|
+
Support for quoted fields is enabled with the `quotechar` parameter.
|
|
1325
|
+
Comment and delimiter characters are ignored when they appear within a
|
|
1326
|
+
quoted item delineated by `quotechar`:
|
|
1327
|
+
|
|
1328
|
+
>>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n')
|
|
1329
|
+
>>> dtype = np.dtype([("label", "U12"), ("value", float)])
|
|
1330
|
+
>>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"')
|
|
1331
|
+
array([('alpha, #42', 10.), ('beta, #64', 2.)],
|
|
1332
|
+
dtype=[('label', '<U12'), ('value', '<f8')])
|
|
1333
|
+
|
|
1334
|
+
Quoted fields can be separated by multiple whitespace characters:
|
|
1335
|
+
|
|
1336
|
+
>>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n')
|
|
1337
|
+
>>> dtype = np.dtype([("label", "U12"), ("value", float)])
|
|
1338
|
+
>>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"')
|
|
1339
|
+
array([('alpha, #42', 10.), ('beta, #64', 2.)],
|
|
1340
|
+
dtype=[('label', '<U12'), ('value', '<f8')])
|
|
1341
|
+
|
|
1342
|
+
Two consecutive quote characters within a quoted field are treated as a
|
|
1343
|
+
single escaped character:
|
|
1344
|
+
|
|
1345
|
+
>>> s = StringIO('"Hello, my name is ""Monty""!"')
|
|
1346
|
+
>>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"')
|
|
1347
|
+
array('Hello, my name is "Monty"!', dtype='<U26')
|
|
1348
|
+
|
|
1349
|
+
Read subset of columns when all rows do not contain equal number of values:
|
|
1350
|
+
|
|
1351
|
+
>>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20")
|
|
1352
|
+
>>> np.loadtxt(d, usecols=(0, 1))
|
|
1353
|
+
array([[ 1., 2.],
|
|
1354
|
+
[ 2., 4.],
|
|
1355
|
+
[ 3., 9.],
|
|
1356
|
+
[ 4., 16.]])
|
|
1357
|
+
|
|
1358
|
+
"""
|
|
1359
|
+
|
|
1360
|
+
if like is not None:
|
|
1361
|
+
return _loadtxt_with_like(
|
|
1362
|
+
like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
|
|
1363
|
+
converters=converters, skiprows=skiprows, usecols=usecols,
|
|
1364
|
+
unpack=unpack, ndmin=ndmin, encoding=encoding,
|
|
1365
|
+
max_rows=max_rows
|
|
1366
|
+
)
|
|
1367
|
+
|
|
1368
|
+
if isinstance(delimiter, bytes):
|
|
1369
|
+
delimiter.decode("latin1")
|
|
1370
|
+
|
|
1371
|
+
if dtype is None:
|
|
1372
|
+
dtype = np.float64
|
|
1373
|
+
|
|
1374
|
+
comment = comments
|
|
1375
|
+
# Control character type conversions for Py3 convenience
|
|
1376
|
+
if comment is not None:
|
|
1377
|
+
if isinstance(comment, (str, bytes)):
|
|
1378
|
+
comment = [comment]
|
|
1379
|
+
comment = [
|
|
1380
|
+
x.decode('latin1') if isinstance(x, bytes) else x for x in comment]
|
|
1381
|
+
if isinstance(delimiter, bytes):
|
|
1382
|
+
delimiter = delimiter.decode('latin1')
|
|
1383
|
+
|
|
1384
|
+
arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter,
|
|
1385
|
+
converters=converters, skiplines=skiprows, usecols=usecols,
|
|
1386
|
+
unpack=unpack, ndmin=ndmin, encoding=encoding,
|
|
1387
|
+
max_rows=max_rows, quote=quotechar)
|
|
1388
|
+
|
|
1389
|
+
return arr
|
|
1390
|
+
|
|
1391
|
+
|
|
1392
|
+
_loadtxt_with_like = array_function_dispatch()(loadtxt)
|
|
1393
|
+
|
|
1394
|
+
|
|
1395
|
+
def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
|
|
1396
|
+
header=None, footer=None, comments=None,
|
|
1397
|
+
encoding=None):
|
|
1398
|
+
return (X,)
|
|
1399
|
+
|
|
1400
|
+
|
|
1401
|
+
@array_function_dispatch(_savetxt_dispatcher)
|
|
1402
|
+
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
|
|
1403
|
+
footer='', comments='# ', encoding=None):
|
|
1404
|
+
"""
|
|
1405
|
+
Save an array to a text file.
|
|
1406
|
+
|
|
1407
|
+
Parameters
|
|
1408
|
+
----------
|
|
1409
|
+
fname : filename, file handle or pathlib.Path
|
|
1410
|
+
If the filename ends in ``.gz``, the file is automatically saved in
|
|
1411
|
+
compressed gzip format. `loadtxt` understands gzipped files
|
|
1412
|
+
transparently.
|
|
1413
|
+
X : 1D or 2D array_like
|
|
1414
|
+
Data to be saved to a text file.
|
|
1415
|
+
fmt : str or sequence of strs, optional
|
|
1416
|
+
A single format (%10.5f), a sequence of formats, or a
|
|
1417
|
+
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
|
|
1418
|
+
case `delimiter` is ignored. For complex `X`, the legal options
|
|
1419
|
+
for `fmt` are:
|
|
1420
|
+
|
|
1421
|
+
* a single specifier, ``fmt='%.4e'``, resulting in numbers formatted
|
|
1422
|
+
like ``' (%s+%sj)' % (fmt, fmt)``
|
|
1423
|
+
* a full string specifying every real and imaginary part, e.g.
|
|
1424
|
+
``' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'`` for 3 columns
|
|
1425
|
+
* a list of specifiers, one per column - in this case, the real
|
|
1426
|
+
and imaginary part must have separate specifiers,
|
|
1427
|
+
e.g. ``['%.3e + %.3ej', '(%.15e%+.15ej)']`` for 2 columns
|
|
1428
|
+
delimiter : str, optional
|
|
1429
|
+
String or character separating columns.
|
|
1430
|
+
newline : str, optional
|
|
1431
|
+
String or character separating lines.
|
|
1432
|
+
header : str, optional
|
|
1433
|
+
String that will be written at the beginning of the file.
|
|
1434
|
+
footer : str, optional
|
|
1435
|
+
String that will be written at the end of the file.
|
|
1436
|
+
comments : str, optional
|
|
1437
|
+
String that will be prepended to the ``header`` and ``footer`` strings,
|
|
1438
|
+
to mark them as comments. Default: '# ', as expected by e.g.
|
|
1439
|
+
``numpy.loadtxt``.
|
|
1440
|
+
encoding : {None, str}, optional
|
|
1441
|
+
Encoding used to encode the outputfile. Does not apply to output
|
|
1442
|
+
streams. If the encoding is something other than 'bytes' or 'latin1'
|
|
1443
|
+
you will not be able to load the file in NumPy versions < 1.14. Default
|
|
1444
|
+
is 'latin1'.
|
|
1445
|
+
|
|
1446
|
+
See Also
|
|
1447
|
+
--------
|
|
1448
|
+
save : Save an array to a binary file in NumPy ``.npy`` format
|
|
1449
|
+
savez : Save several arrays into an uncompressed ``.npz`` archive
|
|
1450
|
+
savez_compressed : Save several arrays into a compressed ``.npz`` archive
|
|
1451
|
+
|
|
1452
|
+
Notes
|
|
1453
|
+
-----
|
|
1454
|
+
Further explanation of the `fmt` parameter
|
|
1455
|
+
(``%[flag]width[.precision]specifier``):
|
|
1456
|
+
|
|
1457
|
+
flags:
|
|
1458
|
+
``-`` : left justify
|
|
1459
|
+
|
|
1460
|
+
``+`` : Forces to precede result with + or -.
|
|
1461
|
+
|
|
1462
|
+
``0`` : Left pad the number with zeros instead of space (see width).
|
|
1463
|
+
|
|
1464
|
+
width:
|
|
1465
|
+
Minimum number of characters to be printed. The value is not truncated
|
|
1466
|
+
if it has more characters.
|
|
1467
|
+
|
|
1468
|
+
precision:
|
|
1469
|
+
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
|
|
1470
|
+
digits.
|
|
1471
|
+
- For ``e, E`` and ``f`` specifiers, the number of digits to print
|
|
1472
|
+
after the decimal point.
|
|
1473
|
+
- For ``g`` and ``G``, the maximum number of significant digits.
|
|
1474
|
+
- For ``s``, the maximum number of characters.
|
|
1475
|
+
|
|
1476
|
+
specifiers:
|
|
1477
|
+
``c`` : character
|
|
1478
|
+
|
|
1479
|
+
``d`` or ``i`` : signed decimal integer
|
|
1480
|
+
|
|
1481
|
+
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
|
|
1482
|
+
|
|
1483
|
+
``f`` : decimal floating point
|
|
1484
|
+
|
|
1485
|
+
``g,G`` : use the shorter of ``e,E`` or ``f``
|
|
1486
|
+
|
|
1487
|
+
``o`` : signed octal
|
|
1488
|
+
|
|
1489
|
+
``s`` : string of characters
|
|
1490
|
+
|
|
1491
|
+
``u`` : unsigned decimal integer
|
|
1492
|
+
|
|
1493
|
+
``x,X`` : unsigned hexadecimal integer
|
|
1494
|
+
|
|
1495
|
+
This explanation of ``fmt`` is not complete, for an exhaustive
|
|
1496
|
+
specification see [1]_.
|
|
1497
|
+
|
|
1498
|
+
References
|
|
1499
|
+
----------
|
|
1500
|
+
.. [1] `Format Specification Mini-Language
|
|
1501
|
+
<https://docs.python.org/library/string.html#format-specification-mini-language>`_,
|
|
1502
|
+
Python Documentation.
|
|
1503
|
+
|
|
1504
|
+
Examples
|
|
1505
|
+
--------
|
|
1506
|
+
>>> import numpy as np
|
|
1507
|
+
>>> x = y = z = np.arange(0.0,5.0,1.0)
|
|
1508
|
+
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
|
|
1509
|
+
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
|
|
1510
|
+
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
|
|
1511
|
+
|
|
1512
|
+
"""
|
|
1513
|
+
|
|
1514
|
+
class WriteWrap:
|
|
1515
|
+
"""Convert to bytes on bytestream inputs.
|
|
1516
|
+
|
|
1517
|
+
"""
|
|
1518
|
+
def __init__(self, fh, encoding):
|
|
1519
|
+
self.fh = fh
|
|
1520
|
+
self.encoding = encoding
|
|
1521
|
+
self.do_write = self.first_write
|
|
1522
|
+
|
|
1523
|
+
def close(self):
|
|
1524
|
+
self.fh.close()
|
|
1525
|
+
|
|
1526
|
+
def write(self, v):
|
|
1527
|
+
self.do_write(v)
|
|
1528
|
+
|
|
1529
|
+
def write_bytes(self, v):
|
|
1530
|
+
if isinstance(v, bytes):
|
|
1531
|
+
self.fh.write(v)
|
|
1532
|
+
else:
|
|
1533
|
+
self.fh.write(v.encode(self.encoding))
|
|
1534
|
+
|
|
1535
|
+
def write_normal(self, v):
|
|
1536
|
+
self.fh.write(asunicode(v))
|
|
1537
|
+
|
|
1538
|
+
def first_write(self, v):
|
|
1539
|
+
try:
|
|
1540
|
+
self.write_normal(v)
|
|
1541
|
+
self.write = self.write_normal
|
|
1542
|
+
except TypeError:
|
|
1543
|
+
# input is probably a bytestream
|
|
1544
|
+
self.write_bytes(v)
|
|
1545
|
+
self.write = self.write_bytes
|
|
1546
|
+
|
|
1547
|
+
own_fh = False
|
|
1548
|
+
if isinstance(fname, os.PathLike):
|
|
1549
|
+
fname = os.fspath(fname)
|
|
1550
|
+
if _is_string_like(fname):
|
|
1551
|
+
# datasource doesn't support creating a new file ...
|
|
1552
|
+
open(fname, 'wt').close()
|
|
1553
|
+
fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
|
|
1554
|
+
own_fh = True
|
|
1555
|
+
elif hasattr(fname, 'write'):
|
|
1556
|
+
# wrap to handle byte output streams
|
|
1557
|
+
fh = WriteWrap(fname, encoding or 'latin1')
|
|
1558
|
+
else:
|
|
1559
|
+
raise ValueError('fname must be a string or file handle')
|
|
1560
|
+
|
|
1561
|
+
try:
|
|
1562
|
+
X = np.asarray(X)
|
|
1563
|
+
|
|
1564
|
+
# Handle 1-dimensional arrays
|
|
1565
|
+
if X.ndim == 0 or X.ndim > 2:
|
|
1566
|
+
raise ValueError(
|
|
1567
|
+
"Expected 1D or 2D array, got %dD array instead" % X.ndim)
|
|
1568
|
+
elif X.ndim == 1:
|
|
1569
|
+
# Common case -- 1d array of numbers
|
|
1570
|
+
if X.dtype.names is None:
|
|
1571
|
+
X = np.atleast_2d(X).T
|
|
1572
|
+
ncol = 1
|
|
1573
|
+
|
|
1574
|
+
# Complex dtype -- each field indicates a separate column
|
|
1575
|
+
else:
|
|
1576
|
+
ncol = len(X.dtype.names)
|
|
1577
|
+
else:
|
|
1578
|
+
ncol = X.shape[1]
|
|
1579
|
+
|
|
1580
|
+
iscomplex_X = np.iscomplexobj(X)
|
|
1581
|
+
# `fmt` can be a string with multiple insertion points or a
|
|
1582
|
+
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
|
|
1583
|
+
if type(fmt) in (list, tuple):
|
|
1584
|
+
if len(fmt) != ncol:
|
|
1585
|
+
raise AttributeError(f'fmt has wrong shape. {str(fmt)}')
|
|
1586
|
+
format = delimiter.join(fmt)
|
|
1587
|
+
elif isinstance(fmt, str):
|
|
1588
|
+
n_fmt_chars = fmt.count('%')
|
|
1589
|
+
error = ValueError(f'fmt has wrong number of % formats: {fmt}')
|
|
1590
|
+
if n_fmt_chars == 1:
|
|
1591
|
+
if iscomplex_X:
|
|
1592
|
+
fmt = [f' ({fmt}+{fmt}j)', ] * ncol
|
|
1593
|
+
else:
|
|
1594
|
+
fmt = [fmt, ] * ncol
|
|
1595
|
+
format = delimiter.join(fmt)
|
|
1596
|
+
elif iscomplex_X and n_fmt_chars != (2 * ncol):
|
|
1597
|
+
raise error
|
|
1598
|
+
elif ((not iscomplex_X) and n_fmt_chars != ncol):
|
|
1599
|
+
raise error
|
|
1600
|
+
else:
|
|
1601
|
+
format = fmt
|
|
1602
|
+
else:
|
|
1603
|
+
raise ValueError(f'invalid fmt: {fmt!r}')
|
|
1604
|
+
|
|
1605
|
+
if len(header) > 0:
|
|
1606
|
+
header = header.replace('\n', '\n' + comments)
|
|
1607
|
+
fh.write(comments + header + newline)
|
|
1608
|
+
if iscomplex_X:
|
|
1609
|
+
for row in X:
|
|
1610
|
+
row2 = []
|
|
1611
|
+
for number in row:
|
|
1612
|
+
row2.extend((number.real, number.imag))
|
|
1613
|
+
s = format % tuple(row2) + newline
|
|
1614
|
+
fh.write(s.replace('+-', '-'))
|
|
1615
|
+
else:
|
|
1616
|
+
for row in X:
|
|
1617
|
+
try:
|
|
1618
|
+
v = format % tuple(row) + newline
|
|
1619
|
+
except TypeError as e:
|
|
1620
|
+
raise TypeError("Mismatch between array dtype ('%s') and "
|
|
1621
|
+
"format specifier ('%s')"
|
|
1622
|
+
% (str(X.dtype), format)) from e
|
|
1623
|
+
fh.write(v)
|
|
1624
|
+
|
|
1625
|
+
if len(footer) > 0:
|
|
1626
|
+
footer = footer.replace('\n', '\n' + comments)
|
|
1627
|
+
fh.write(comments + footer + newline)
|
|
1628
|
+
finally:
|
|
1629
|
+
if own_fh:
|
|
1630
|
+
fh.close()
|
|
1631
|
+
|
|
1632
|
+
|
|
1633
|
+
@set_module('numpy')
|
|
1634
|
+
def fromregex(file, regexp, dtype, encoding=None):
|
|
1635
|
+
r"""
|
|
1636
|
+
Construct an array from a text file, using regular expression parsing.
|
|
1637
|
+
|
|
1638
|
+
The returned array is always a structured array, and is constructed from
|
|
1639
|
+
all matches of the regular expression in the file. Groups in the regular
|
|
1640
|
+
expression are converted to fields of the structured array.
|
|
1641
|
+
|
|
1642
|
+
Parameters
|
|
1643
|
+
----------
|
|
1644
|
+
file : file, str, or pathlib.Path
|
|
1645
|
+
Filename or file object to read.
|
|
1646
|
+
|
|
1647
|
+
.. versionchanged:: 1.22.0
|
|
1648
|
+
Now accepts `os.PathLike` implementations.
|
|
1649
|
+
|
|
1650
|
+
regexp : str or regexp
|
|
1651
|
+
Regular expression used to parse the file.
|
|
1652
|
+
Groups in the regular expression correspond to fields in the dtype.
|
|
1653
|
+
dtype : dtype or list of dtypes
|
|
1654
|
+
Dtype for the structured array; must be a structured datatype.
|
|
1655
|
+
encoding : str, optional
|
|
1656
|
+
Encoding used to decode the inputfile. Does not apply to input streams.
|
|
1657
|
+
|
|
1658
|
+
Returns
|
|
1659
|
+
-------
|
|
1660
|
+
output : ndarray
|
|
1661
|
+
The output array, containing the part of the content of `file` that
|
|
1662
|
+
was matched by `regexp`. `output` is always a structured array.
|
|
1663
|
+
|
|
1664
|
+
Raises
|
|
1665
|
+
------
|
|
1666
|
+
TypeError
|
|
1667
|
+
When `dtype` is not a valid dtype for a structured array.
|
|
1668
|
+
|
|
1669
|
+
See Also
|
|
1670
|
+
--------
|
|
1671
|
+
fromstring, loadtxt
|
|
1672
|
+
|
|
1673
|
+
Notes
|
|
1674
|
+
-----
|
|
1675
|
+
Dtypes for structured arrays can be specified in several forms, but all
|
|
1676
|
+
forms specify at least the data type and field name. For details see
|
|
1677
|
+
`basics.rec`.
|
|
1678
|
+
|
|
1679
|
+
Examples
|
|
1680
|
+
--------
|
|
1681
|
+
>>> import numpy as np
|
|
1682
|
+
>>> from io import StringIO
|
|
1683
|
+
>>> text = StringIO("1312 foo\n1534 bar\n444 qux")
|
|
1684
|
+
|
|
1685
|
+
>>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything]
|
|
1686
|
+
>>> output = np.fromregex(text, regexp,
|
|
1687
|
+
... [('num', np.int64), ('key', 'S3')])
|
|
1688
|
+
>>> output
|
|
1689
|
+
array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
|
|
1690
|
+
dtype=[('num', '<i8'), ('key', 'S3')])
|
|
1691
|
+
>>> output['num']
|
|
1692
|
+
array([1312, 1534, 444])
|
|
1693
|
+
|
|
1694
|
+
"""
|
|
1695
|
+
own_fh = False
|
|
1696
|
+
if not hasattr(file, "read"):
|
|
1697
|
+
file = os.fspath(file)
|
|
1698
|
+
file = np.lib._datasource.open(file, 'rt', encoding=encoding)
|
|
1699
|
+
own_fh = True
|
|
1700
|
+
|
|
1701
|
+
try:
|
|
1702
|
+
if not isinstance(dtype, np.dtype):
|
|
1703
|
+
dtype = np.dtype(dtype)
|
|
1704
|
+
if dtype.names is None:
|
|
1705
|
+
raise TypeError('dtype must be a structured datatype.')
|
|
1706
|
+
|
|
1707
|
+
content = file.read()
|
|
1708
|
+
if isinstance(content, bytes) and isinstance(regexp, str):
|
|
1709
|
+
regexp = asbytes(regexp)
|
|
1710
|
+
|
|
1711
|
+
if not hasattr(regexp, 'match'):
|
|
1712
|
+
regexp = re.compile(regexp)
|
|
1713
|
+
seq = regexp.findall(content)
|
|
1714
|
+
if seq and not isinstance(seq[0], tuple):
|
|
1715
|
+
# Only one group is in the regexp.
|
|
1716
|
+
# Create the new array as a single data-type and then
|
|
1717
|
+
# re-interpret as a single-field structured array.
|
|
1718
|
+
newdtype = np.dtype(dtype[dtype.names[0]])
|
|
1719
|
+
output = np.array(seq, dtype=newdtype)
|
|
1720
|
+
output = output.view(dtype)
|
|
1721
|
+
else:
|
|
1722
|
+
output = np.array(seq, dtype=dtype)
|
|
1723
|
+
|
|
1724
|
+
return output
|
|
1725
|
+
finally:
|
|
1726
|
+
if own_fh:
|
|
1727
|
+
file.close()
|
|
1728
|
+
|
|
1729
|
+
|
|
1730
|
+
#####--------------------------------------------------------------------------
|
|
1731
|
+
#---- --- ASCII functions ---
|
|
1732
|
+
#####--------------------------------------------------------------------------
|
|
1733
|
+
|
|
1734
|
+
|
|
1735
|
+
@finalize_array_function_like
|
|
1736
|
+
@set_module('numpy')
|
|
1737
|
+
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
|
|
1738
|
+
skip_header=0, skip_footer=0, converters=None,
|
|
1739
|
+
missing_values=None, filling_values=None, usecols=None,
|
|
1740
|
+
names=None, excludelist=None,
|
|
1741
|
+
deletechars=''.join(sorted(NameValidator.defaultdeletechars)), # noqa: B008
|
|
1742
|
+
replace_space='_', autostrip=False, case_sensitive=True,
|
|
1743
|
+
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
|
|
1744
|
+
invalid_raise=True, max_rows=None, encoding=None,
|
|
1745
|
+
*, ndmin=0, like=None):
|
|
1746
|
+
"""
|
|
1747
|
+
Load data from a text file, with missing values handled as specified.
|
|
1748
|
+
|
|
1749
|
+
Each line past the first `skip_header` lines is split at the `delimiter`
|
|
1750
|
+
character, and characters following the `comments` character are discarded.
|
|
1751
|
+
|
|
1752
|
+
Parameters
|
|
1753
|
+
----------
|
|
1754
|
+
fname : file, str, pathlib.Path, list of str, generator
|
|
1755
|
+
File, filename, list, or generator to read. If the filename
|
|
1756
|
+
extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
|
|
1757
|
+
that generators must return bytes or strings. The strings
|
|
1758
|
+
in a list or produced by a generator are treated as lines.
|
|
1759
|
+
dtype : dtype, optional
|
|
1760
|
+
Data type of the resulting array.
|
|
1761
|
+
If None, the dtypes will be determined by the contents of each
|
|
1762
|
+
column, individually.
|
|
1763
|
+
comments : str, optional
|
|
1764
|
+
The character used to indicate the start of a comment.
|
|
1765
|
+
All the characters occurring on a line after a comment are discarded.
|
|
1766
|
+
delimiter : str, int, or sequence, optional
|
|
1767
|
+
The string used to separate values. By default, any consecutive
|
|
1768
|
+
whitespaces act as delimiter. An integer or sequence of integers
|
|
1769
|
+
can also be provided as width(s) of each field.
|
|
1770
|
+
skiprows : int, optional
|
|
1771
|
+
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
|
|
1772
|
+
skip_header : int, optional
|
|
1773
|
+
The number of lines to skip at the beginning of the file.
|
|
1774
|
+
skip_footer : int, optional
|
|
1775
|
+
The number of lines to skip at the end of the file.
|
|
1776
|
+
converters : variable, optional
|
|
1777
|
+
The set of functions that convert the data of a column to a value.
|
|
1778
|
+
The converters can also be used to provide a default value
|
|
1779
|
+
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
|
|
1780
|
+
missing : variable, optional
|
|
1781
|
+
`missing` was removed in numpy 1.10. Please use `missing_values`
|
|
1782
|
+
instead.
|
|
1783
|
+
missing_values : variable, optional
|
|
1784
|
+
The set of strings corresponding to missing data.
|
|
1785
|
+
filling_values : variable, optional
|
|
1786
|
+
The set of values to be used as default when the data are missing.
|
|
1787
|
+
usecols : sequence, optional
|
|
1788
|
+
Which columns to read, with 0 being the first. For example,
|
|
1789
|
+
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
|
|
1790
|
+
names : {None, True, str, sequence}, optional
|
|
1791
|
+
If `names` is True, the field names are read from the first line after
|
|
1792
|
+
the first `skip_header` lines. This line can optionally be preceded
|
|
1793
|
+
by a comment delimiter. Any content before the comment delimiter is
|
|
1794
|
+
discarded. If `names` is a sequence or a single-string of
|
|
1795
|
+
comma-separated names, the names will be used to define the field
|
|
1796
|
+
names in a structured dtype. If `names` is None, the names of the
|
|
1797
|
+
dtype fields will be used, if any.
|
|
1798
|
+
excludelist : sequence, optional
|
|
1799
|
+
A list of names to exclude. This list is appended to the default list
|
|
1800
|
+
['return','file','print']. Excluded names are appended with an
|
|
1801
|
+
underscore: for example, `file` would become `file_`.
|
|
1802
|
+
deletechars : str, optional
|
|
1803
|
+
A string combining invalid characters that must be deleted from the
|
|
1804
|
+
names.
|
|
1805
|
+
defaultfmt : str, optional
|
|
1806
|
+
A format used to define default field names, such as "f%i" or "f_%02i".
|
|
1807
|
+
autostrip : bool, optional
|
|
1808
|
+
Whether to automatically strip white spaces from the variables.
|
|
1809
|
+
replace_space : char, optional
|
|
1810
|
+
Character(s) used in replacement of white spaces in the variable
|
|
1811
|
+
names. By default, use a '_'.
|
|
1812
|
+
case_sensitive : {True, False, 'upper', 'lower'}, optional
|
|
1813
|
+
If True, field names are case sensitive.
|
|
1814
|
+
If False or 'upper', field names are converted to upper case.
|
|
1815
|
+
If 'lower', field names are converted to lower case.
|
|
1816
|
+
unpack : bool, optional
|
|
1817
|
+
If True, the returned array is transposed, so that arguments may be
|
|
1818
|
+
unpacked using ``x, y, z = genfromtxt(...)``. When used with a
|
|
1819
|
+
structured data-type, arrays are returned for each field.
|
|
1820
|
+
Default is False.
|
|
1821
|
+
usemask : bool, optional
|
|
1822
|
+
If True, return a masked array.
|
|
1823
|
+
If False, return a regular array.
|
|
1824
|
+
loose : bool, optional
|
|
1825
|
+
If True, do not raise errors for invalid values.
|
|
1826
|
+
invalid_raise : bool, optional
|
|
1827
|
+
If True, an exception is raised if an inconsistency is detected in the
|
|
1828
|
+
number of columns.
|
|
1829
|
+
If False, a warning is emitted and the offending lines are skipped.
|
|
1830
|
+
max_rows : int, optional
|
|
1831
|
+
The maximum number of rows to read. Must not be used with skip_footer
|
|
1832
|
+
at the same time. If given, the value must be at least 1. Default is
|
|
1833
|
+
to read the entire file.
|
|
1834
|
+
encoding : str, optional
|
|
1835
|
+
Encoding used to decode the inputfile. Does not apply when `fname`
|
|
1836
|
+
is a file object. The special value 'bytes' enables backward
|
|
1837
|
+
compatibility workarounds that ensure that you receive byte arrays
|
|
1838
|
+
when possible and passes latin1 encoded strings to converters.
|
|
1839
|
+
Override this value to receive unicode arrays and pass strings
|
|
1840
|
+
as input to converters. If set to None the system default is used.
|
|
1841
|
+
The default value is 'bytes'.
|
|
1842
|
+
|
|
1843
|
+
.. versionchanged:: 2.0
|
|
1844
|
+
Before NumPy 2, the default was ``'bytes'`` for Python 2
|
|
1845
|
+
compatibility. The default is now ``None``.
|
|
1846
|
+
|
|
1847
|
+
ndmin : int, optional
|
|
1848
|
+
Same parameter as `loadtxt`
|
|
1849
|
+
|
|
1850
|
+
.. versionadded:: 1.23.0
|
|
1851
|
+
${ARRAY_FUNCTION_LIKE}
|
|
1852
|
+
|
|
1853
|
+
.. versionadded:: 1.20.0
|
|
1854
|
+
|
|
1855
|
+
Returns
|
|
1856
|
+
-------
|
|
1857
|
+
out : ndarray
|
|
1858
|
+
Data read from the text file. If `usemask` is True, this is a
|
|
1859
|
+
masked array.
|
|
1860
|
+
|
|
1861
|
+
See Also
|
|
1862
|
+
--------
|
|
1863
|
+
numpy.loadtxt : equivalent function when no data is missing.
|
|
1864
|
+
|
|
1865
|
+
Notes
|
|
1866
|
+
-----
|
|
1867
|
+
* When spaces are used as delimiters, or when no delimiter has been given
|
|
1868
|
+
as input, there should not be any missing data between two fields.
|
|
1869
|
+
* When variables are named (either by a flexible dtype or with a `names`
|
|
1870
|
+
sequence), there must not be any header in the file (else a ValueError
|
|
1871
|
+
exception is raised).
|
|
1872
|
+
* Individual values are not stripped of spaces by default.
|
|
1873
|
+
When using a custom converter, make sure the function does remove spaces.
|
|
1874
|
+
* Custom converters may receive unexpected values due to dtype
|
|
1875
|
+
discovery.
|
|
1876
|
+
|
|
1877
|
+
References
|
|
1878
|
+
----------
|
|
1879
|
+
.. [1] NumPy User Guide, section `I/O with NumPy
|
|
1880
|
+
<https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
|
|
1881
|
+
|
|
1882
|
+
Examples
|
|
1883
|
+
--------
|
|
1884
|
+
>>> from io import StringIO
|
|
1885
|
+
>>> import numpy as np
|
|
1886
|
+
|
|
1887
|
+
Comma delimited file with mixed dtype
|
|
1888
|
+
|
|
1889
|
+
>>> s = StringIO("1,1.3,abcde")
|
|
1890
|
+
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
|
|
1891
|
+
... ('mystring','S5')], delimiter=",")
|
|
1892
|
+
>>> data
|
|
1893
|
+
array((1, 1.3, b'abcde'),
|
|
1894
|
+
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
|
|
1895
|
+
|
|
1896
|
+
Using dtype = None
|
|
1897
|
+
|
|
1898
|
+
>>> _ = s.seek(0) # needed for StringIO example only
|
|
1899
|
+
>>> data = np.genfromtxt(s, dtype=None,
|
|
1900
|
+
... names = ['myint','myfloat','mystring'], delimiter=",")
|
|
1901
|
+
>>> data
|
|
1902
|
+
array((1, 1.3, 'abcde'),
|
|
1903
|
+
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '<U5')])
|
|
1904
|
+
|
|
1905
|
+
Specifying dtype and names
|
|
1906
|
+
|
|
1907
|
+
>>> _ = s.seek(0)
|
|
1908
|
+
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
|
|
1909
|
+
... names=['myint','myfloat','mystring'], delimiter=",")
|
|
1910
|
+
>>> data
|
|
1911
|
+
array((1, 1.3, b'abcde'),
|
|
1912
|
+
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
|
|
1913
|
+
|
|
1914
|
+
An example with fixed-width columns
|
|
1915
|
+
|
|
1916
|
+
>>> s = StringIO("11.3abcde")
|
|
1917
|
+
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
|
|
1918
|
+
... delimiter=[1,3,5])
|
|
1919
|
+
>>> data
|
|
1920
|
+
array((1, 1.3, 'abcde'),
|
|
1921
|
+
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '<U5')])
|
|
1922
|
+
|
|
1923
|
+
An example to show comments
|
|
1924
|
+
|
|
1925
|
+
>>> f = StringIO('''
|
|
1926
|
+
... text,# of chars
|
|
1927
|
+
... hello world,11
|
|
1928
|
+
... numpy,5''')
|
|
1929
|
+
>>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
|
|
1930
|
+
array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
|
|
1931
|
+
dtype=[('f0', 'S12'), ('f1', 'S12')])
|
|
1932
|
+
|
|
1933
|
+
"""
|
|
1934
|
+
|
|
1935
|
+
if like is not None:
|
|
1936
|
+
return _genfromtxt_with_like(
|
|
1937
|
+
like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
|
|
1938
|
+
skip_header=skip_header, skip_footer=skip_footer,
|
|
1939
|
+
converters=converters, missing_values=missing_values,
|
|
1940
|
+
filling_values=filling_values, usecols=usecols, names=names,
|
|
1941
|
+
excludelist=excludelist, deletechars=deletechars,
|
|
1942
|
+
replace_space=replace_space, autostrip=autostrip,
|
|
1943
|
+
case_sensitive=case_sensitive, defaultfmt=defaultfmt,
|
|
1944
|
+
unpack=unpack, usemask=usemask, loose=loose,
|
|
1945
|
+
invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
|
|
1946
|
+
ndmin=ndmin,
|
|
1947
|
+
)
|
|
1948
|
+
|
|
1949
|
+
_ensure_ndmin_ndarray_check_param(ndmin)
|
|
1950
|
+
|
|
1951
|
+
if max_rows is not None:
|
|
1952
|
+
if skip_footer:
|
|
1953
|
+
raise ValueError(
|
|
1954
|
+
"The keywords 'skip_footer' and 'max_rows' can not be "
|
|
1955
|
+
"specified at the same time.")
|
|
1956
|
+
if max_rows < 1:
|
|
1957
|
+
raise ValueError("'max_rows' must be at least 1.")
|
|
1958
|
+
|
|
1959
|
+
if usemask:
|
|
1960
|
+
from numpy.ma import MaskedArray, make_mask_descr
|
|
1961
|
+
# Check the input dictionary of converters
|
|
1962
|
+
user_converters = converters or {}
|
|
1963
|
+
if not isinstance(user_converters, dict):
|
|
1964
|
+
raise TypeError(
|
|
1965
|
+
"The input argument 'converter' should be a valid dictionary "
|
|
1966
|
+
"(got '%s' instead)" % type(user_converters))
|
|
1967
|
+
|
|
1968
|
+
if encoding == 'bytes':
|
|
1969
|
+
encoding = None
|
|
1970
|
+
byte_converters = True
|
|
1971
|
+
else:
|
|
1972
|
+
byte_converters = False
|
|
1973
|
+
|
|
1974
|
+
# Initialize the filehandle, the LineSplitter and the NameValidator
|
|
1975
|
+
if isinstance(fname, os.PathLike):
|
|
1976
|
+
fname = os.fspath(fname)
|
|
1977
|
+
if isinstance(fname, str):
|
|
1978
|
+
fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
|
|
1979
|
+
fid_ctx = contextlib.closing(fid)
|
|
1980
|
+
else:
|
|
1981
|
+
fid = fname
|
|
1982
|
+
fid_ctx = contextlib.nullcontext(fid)
|
|
1983
|
+
try:
|
|
1984
|
+
fhd = iter(fid)
|
|
1985
|
+
except TypeError as e:
|
|
1986
|
+
raise TypeError(
|
|
1987
|
+
"fname must be a string, a filehandle, a sequence of strings,\n"
|
|
1988
|
+
f"or an iterator of strings. Got {type(fname)} instead."
|
|
1989
|
+
) from e
|
|
1990
|
+
with fid_ctx:
|
|
1991
|
+
split_line = LineSplitter(delimiter=delimiter, comments=comments,
|
|
1992
|
+
autostrip=autostrip, encoding=encoding)
|
|
1993
|
+
validate_names = NameValidator(excludelist=excludelist,
|
|
1994
|
+
deletechars=deletechars,
|
|
1995
|
+
case_sensitive=case_sensitive,
|
|
1996
|
+
replace_space=replace_space)
|
|
1997
|
+
|
|
1998
|
+
# Skip the first `skip_header` rows
|
|
1999
|
+
try:
|
|
2000
|
+
for i in range(skip_header):
|
|
2001
|
+
next(fhd)
|
|
2002
|
+
|
|
2003
|
+
# Keep on until we find the first valid values
|
|
2004
|
+
first_values = None
|
|
2005
|
+
|
|
2006
|
+
while not first_values:
|
|
2007
|
+
first_line = _decode_line(next(fhd), encoding)
|
|
2008
|
+
if (names is True) and (comments is not None):
|
|
2009
|
+
if comments in first_line:
|
|
2010
|
+
first_line = (
|
|
2011
|
+
''.join(first_line.split(comments)[1:]))
|
|
2012
|
+
first_values = split_line(first_line)
|
|
2013
|
+
except StopIteration:
|
|
2014
|
+
# return an empty array if the datafile is empty
|
|
2015
|
+
first_line = ''
|
|
2016
|
+
first_values = []
|
|
2017
|
+
warnings.warn(
|
|
2018
|
+
f'genfromtxt: Empty input file: "{fname}"', stacklevel=2
|
|
2019
|
+
)
|
|
2020
|
+
|
|
2021
|
+
# Should we take the first values as names ?
|
|
2022
|
+
if names is True:
|
|
2023
|
+
fval = first_values[0].strip()
|
|
2024
|
+
if comments is not None:
|
|
2025
|
+
if fval in comments:
|
|
2026
|
+
del first_values[0]
|
|
2027
|
+
|
|
2028
|
+
# Check the columns to use: make sure `usecols` is a list
|
|
2029
|
+
if usecols is not None:
|
|
2030
|
+
try:
|
|
2031
|
+
usecols = [_.strip() for _ in usecols.split(",")]
|
|
2032
|
+
except AttributeError:
|
|
2033
|
+
try:
|
|
2034
|
+
usecols = list(usecols)
|
|
2035
|
+
except TypeError:
|
|
2036
|
+
usecols = [usecols, ]
|
|
2037
|
+
nbcols = len(usecols or first_values)
|
|
2038
|
+
|
|
2039
|
+
# Check the names and overwrite the dtype.names if needed
|
|
2040
|
+
if names is True:
|
|
2041
|
+
names = validate_names([str(_.strip()) for _ in first_values])
|
|
2042
|
+
first_line = ''
|
|
2043
|
+
elif _is_string_like(names):
|
|
2044
|
+
names = validate_names([_.strip() for _ in names.split(',')])
|
|
2045
|
+
elif names:
|
|
2046
|
+
names = validate_names(names)
|
|
2047
|
+
# Get the dtype
|
|
2048
|
+
if dtype is not None:
|
|
2049
|
+
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
|
|
2050
|
+
excludelist=excludelist,
|
|
2051
|
+
deletechars=deletechars,
|
|
2052
|
+
case_sensitive=case_sensitive,
|
|
2053
|
+
replace_space=replace_space)
|
|
2054
|
+
# Make sure the names is a list (for 2.5)
|
|
2055
|
+
if names is not None:
|
|
2056
|
+
names = list(names)
|
|
2057
|
+
|
|
2058
|
+
if usecols:
|
|
2059
|
+
for (i, current) in enumerate(usecols):
|
|
2060
|
+
# if usecols is a list of names, convert to a list of indices
|
|
2061
|
+
if _is_string_like(current):
|
|
2062
|
+
usecols[i] = names.index(current)
|
|
2063
|
+
elif current < 0:
|
|
2064
|
+
usecols[i] = current + len(first_values)
|
|
2065
|
+
# If the dtype is not None, make sure we update it
|
|
2066
|
+
if (dtype is not None) and (len(dtype) > nbcols):
|
|
2067
|
+
descr = dtype.descr
|
|
2068
|
+
dtype = np.dtype([descr[_] for _ in usecols])
|
|
2069
|
+
names = list(dtype.names)
|
|
2070
|
+
# If `names` is not None, update the names
|
|
2071
|
+
elif (names is not None) and (len(names) > nbcols):
|
|
2072
|
+
names = [names[_] for _ in usecols]
|
|
2073
|
+
elif (names is not None) and (dtype is not None):
|
|
2074
|
+
names = list(dtype.names)
|
|
2075
|
+
|
|
2076
|
+
# Process the missing values ...............................
|
|
2077
|
+
# Rename missing_values for convenience
|
|
2078
|
+
user_missing_values = missing_values or ()
|
|
2079
|
+
if isinstance(user_missing_values, bytes):
|
|
2080
|
+
user_missing_values = user_missing_values.decode('latin1')
|
|
2081
|
+
|
|
2082
|
+
# Define the list of missing_values (one column: one list)
|
|
2083
|
+
missing_values = [[''] for _ in range(nbcols)]
|
|
2084
|
+
|
|
2085
|
+
# We have a dictionary: process it field by field
|
|
2086
|
+
if isinstance(user_missing_values, dict):
|
|
2087
|
+
# Loop on the items
|
|
2088
|
+
for (key, val) in user_missing_values.items():
|
|
2089
|
+
# Is the key a string ?
|
|
2090
|
+
if _is_string_like(key):
|
|
2091
|
+
try:
|
|
2092
|
+
# Transform it into an integer
|
|
2093
|
+
key = names.index(key)
|
|
2094
|
+
except ValueError:
|
|
2095
|
+
# We couldn't find it: the name must have been dropped
|
|
2096
|
+
continue
|
|
2097
|
+
# Redefine the key as needed if it's a column number
|
|
2098
|
+
if usecols:
|
|
2099
|
+
try:
|
|
2100
|
+
key = usecols.index(key)
|
|
2101
|
+
except ValueError:
|
|
2102
|
+
pass
|
|
2103
|
+
# Transform the value as a list of string
|
|
2104
|
+
if isinstance(val, (list, tuple)):
|
|
2105
|
+
val = [str(_) for _ in val]
|
|
2106
|
+
else:
|
|
2107
|
+
val = [str(val), ]
|
|
2108
|
+
# Add the value(s) to the current list of missing
|
|
2109
|
+
if key is None:
|
|
2110
|
+
# None acts as default
|
|
2111
|
+
for miss in missing_values:
|
|
2112
|
+
miss.extend(val)
|
|
2113
|
+
else:
|
|
2114
|
+
missing_values[key].extend(val)
|
|
2115
|
+
# We have a sequence : each item matches a column
|
|
2116
|
+
elif isinstance(user_missing_values, (list, tuple)):
|
|
2117
|
+
for (value, entry) in zip(user_missing_values, missing_values):
|
|
2118
|
+
value = str(value)
|
|
2119
|
+
if value not in entry:
|
|
2120
|
+
entry.append(value)
|
|
2121
|
+
# We have a string : apply it to all entries
|
|
2122
|
+
elif isinstance(user_missing_values, str):
|
|
2123
|
+
user_value = user_missing_values.split(",")
|
|
2124
|
+
for entry in missing_values:
|
|
2125
|
+
entry.extend(user_value)
|
|
2126
|
+
# We have something else: apply it to all entries
|
|
2127
|
+
else:
|
|
2128
|
+
for entry in missing_values:
|
|
2129
|
+
entry.extend([str(user_missing_values)])
|
|
2130
|
+
|
|
2131
|
+
# Process the filling_values ...............................
|
|
2132
|
+
# Rename the input for convenience
|
|
2133
|
+
user_filling_values = filling_values
|
|
2134
|
+
if user_filling_values is None:
|
|
2135
|
+
user_filling_values = []
|
|
2136
|
+
# Define the default
|
|
2137
|
+
filling_values = [None] * nbcols
|
|
2138
|
+
# We have a dictionary : update each entry individually
|
|
2139
|
+
if isinstance(user_filling_values, dict):
|
|
2140
|
+
for (key, val) in user_filling_values.items():
|
|
2141
|
+
if _is_string_like(key):
|
|
2142
|
+
try:
|
|
2143
|
+
# Transform it into an integer
|
|
2144
|
+
key = names.index(key)
|
|
2145
|
+
except ValueError:
|
|
2146
|
+
# We couldn't find it: the name must have been dropped
|
|
2147
|
+
continue
|
|
2148
|
+
# Redefine the key if it's a column number
|
|
2149
|
+
# and usecols is defined
|
|
2150
|
+
if usecols:
|
|
2151
|
+
try:
|
|
2152
|
+
key = usecols.index(key)
|
|
2153
|
+
except ValueError:
|
|
2154
|
+
pass
|
|
2155
|
+
# Add the value to the list
|
|
2156
|
+
filling_values[key] = val
|
|
2157
|
+
# We have a sequence : update on a one-to-one basis
|
|
2158
|
+
elif isinstance(user_filling_values, (list, tuple)):
|
|
2159
|
+
n = len(user_filling_values)
|
|
2160
|
+
if (n <= nbcols):
|
|
2161
|
+
filling_values[:n] = user_filling_values
|
|
2162
|
+
else:
|
|
2163
|
+
filling_values = user_filling_values[:nbcols]
|
|
2164
|
+
# We have something else : use it for all entries
|
|
2165
|
+
else:
|
|
2166
|
+
filling_values = [user_filling_values] * nbcols
|
|
2167
|
+
|
|
2168
|
+
# Initialize the converters ................................
|
|
2169
|
+
if dtype is None:
|
|
2170
|
+
# Note: we can't use a [...]*nbcols, as we would have 3 times
|
|
2171
|
+
# the same converter, instead of 3 different converters.
|
|
2172
|
+
converters = [
|
|
2173
|
+
StringConverter(None, missing_values=miss, default=fill)
|
|
2174
|
+
for (miss, fill) in zip(missing_values, filling_values)
|
|
2175
|
+
]
|
|
2176
|
+
else:
|
|
2177
|
+
dtype_flat = flatten_dtype(dtype, flatten_base=True)
|
|
2178
|
+
# Initialize the converters
|
|
2179
|
+
if len(dtype_flat) > 1:
|
|
2180
|
+
# Flexible type : get a converter from each dtype
|
|
2181
|
+
zipit = zip(dtype_flat, missing_values, filling_values)
|
|
2182
|
+
converters = [StringConverter(dt,
|
|
2183
|
+
locked=True,
|
|
2184
|
+
missing_values=miss,
|
|
2185
|
+
default=fill)
|
|
2186
|
+
for (dt, miss, fill) in zipit]
|
|
2187
|
+
else:
|
|
2188
|
+
# Set to a default converter (but w/ different missing values)
|
|
2189
|
+
zipit = zip(missing_values, filling_values)
|
|
2190
|
+
converters = [StringConverter(dtype,
|
|
2191
|
+
locked=True,
|
|
2192
|
+
missing_values=miss,
|
|
2193
|
+
default=fill)
|
|
2194
|
+
for (miss, fill) in zipit]
|
|
2195
|
+
# Update the converters to use the user-defined ones
|
|
2196
|
+
uc_update = []
|
|
2197
|
+
for (j, conv) in user_converters.items():
|
|
2198
|
+
# If the converter is specified by column names,
|
|
2199
|
+
# use the index instead
|
|
2200
|
+
if _is_string_like(j):
|
|
2201
|
+
try:
|
|
2202
|
+
j = names.index(j)
|
|
2203
|
+
i = j
|
|
2204
|
+
except ValueError:
|
|
2205
|
+
continue
|
|
2206
|
+
elif usecols:
|
|
2207
|
+
try:
|
|
2208
|
+
i = usecols.index(j)
|
|
2209
|
+
except ValueError:
|
|
2210
|
+
# Unused converter specified
|
|
2211
|
+
continue
|
|
2212
|
+
else:
|
|
2213
|
+
i = j
|
|
2214
|
+
# Find the value to test - first_line is not filtered by usecols:
|
|
2215
|
+
if len(first_line):
|
|
2216
|
+
testing_value = first_values[j]
|
|
2217
|
+
else:
|
|
2218
|
+
testing_value = None
|
|
2219
|
+
if conv is bytes:
|
|
2220
|
+
user_conv = asbytes
|
|
2221
|
+
elif byte_converters:
|
|
2222
|
+
# Converters may use decode to workaround numpy's old
|
|
2223
|
+
# behavior, so encode the string again before passing
|
|
2224
|
+
# to the user converter.
|
|
2225
|
+
def tobytes_first(x, conv):
|
|
2226
|
+
if type(x) is bytes:
|
|
2227
|
+
return conv(x)
|
|
2228
|
+
return conv(x.encode("latin1"))
|
|
2229
|
+
user_conv = functools.partial(tobytes_first, conv=conv)
|
|
2230
|
+
else:
|
|
2231
|
+
user_conv = conv
|
|
2232
|
+
converters[i].update(user_conv, locked=True,
|
|
2233
|
+
testing_value=testing_value,
|
|
2234
|
+
default=filling_values[i],
|
|
2235
|
+
missing_values=missing_values[i],)
|
|
2236
|
+
uc_update.append((i, user_conv))
|
|
2237
|
+
# Make sure we have the corrected keys in user_converters...
|
|
2238
|
+
user_converters.update(uc_update)
|
|
2239
|
+
|
|
2240
|
+
# Fixme: possible error as following variable never used.
|
|
2241
|
+
# miss_chars = [_.missing_values for _ in converters]
|
|
2242
|
+
|
|
2243
|
+
# Initialize the output lists ...
|
|
2244
|
+
# ... rows
|
|
2245
|
+
rows = []
|
|
2246
|
+
append_to_rows = rows.append
|
|
2247
|
+
# ... masks
|
|
2248
|
+
if usemask:
|
|
2249
|
+
masks = []
|
|
2250
|
+
append_to_masks = masks.append
|
|
2251
|
+
# ... invalid
|
|
2252
|
+
invalid = []
|
|
2253
|
+
append_to_invalid = invalid.append
|
|
2254
|
+
|
|
2255
|
+
# Parse each line
|
|
2256
|
+
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
|
|
2257
|
+
values = split_line(line)
|
|
2258
|
+
nbvalues = len(values)
|
|
2259
|
+
# Skip an empty line
|
|
2260
|
+
if nbvalues == 0:
|
|
2261
|
+
continue
|
|
2262
|
+
if usecols:
|
|
2263
|
+
# Select only the columns we need
|
|
2264
|
+
try:
|
|
2265
|
+
values = [values[_] for _ in usecols]
|
|
2266
|
+
except IndexError:
|
|
2267
|
+
append_to_invalid((i + skip_header + 1, nbvalues))
|
|
2268
|
+
continue
|
|
2269
|
+
elif nbvalues != nbcols:
|
|
2270
|
+
append_to_invalid((i + skip_header + 1, nbvalues))
|
|
2271
|
+
continue
|
|
2272
|
+
# Store the values
|
|
2273
|
+
append_to_rows(tuple(values))
|
|
2274
|
+
if usemask:
|
|
2275
|
+
append_to_masks(tuple(v.strip() in m
|
|
2276
|
+
for (v, m) in zip(values,
|
|
2277
|
+
missing_values)))
|
|
2278
|
+
if len(rows) == max_rows:
|
|
2279
|
+
break
|
|
2280
|
+
|
|
2281
|
+
# Upgrade the converters (if needed)
|
|
2282
|
+
if dtype is None:
|
|
2283
|
+
for (i, converter) in enumerate(converters):
|
|
2284
|
+
current_column = [itemgetter(i)(_m) for _m in rows]
|
|
2285
|
+
try:
|
|
2286
|
+
converter.iterupgrade(current_column)
|
|
2287
|
+
except ConverterLockError:
|
|
2288
|
+
errmsg = f"Converter #{i} is locked and cannot be upgraded: "
|
|
2289
|
+
current_column = map(itemgetter(i), rows)
|
|
2290
|
+
for (j, value) in enumerate(current_column):
|
|
2291
|
+
try:
|
|
2292
|
+
converter.upgrade(value)
|
|
2293
|
+
except (ConverterError, ValueError):
|
|
2294
|
+
line_number = j + 1 + skip_header
|
|
2295
|
+
errmsg += f"(occurred line #{line_number} for value '{value}')"
|
|
2296
|
+
raise ConverterError(errmsg)
|
|
2297
|
+
|
|
2298
|
+
# Check that we don't have invalid values
|
|
2299
|
+
nbinvalid = len(invalid)
|
|
2300
|
+
if nbinvalid > 0:
|
|
2301
|
+
nbrows = len(rows) + nbinvalid - skip_footer
|
|
2302
|
+
# Construct the error message
|
|
2303
|
+
template = f" Line #%i (got %i columns instead of {nbcols})"
|
|
2304
|
+
if skip_footer > 0:
|
|
2305
|
+
nbinvalid_skipped = len([_ for _ in invalid
|
|
2306
|
+
if _[0] > nbrows + skip_header])
|
|
2307
|
+
invalid = invalid[:nbinvalid - nbinvalid_skipped]
|
|
2308
|
+
skip_footer -= nbinvalid_skipped
|
|
2309
|
+
#
|
|
2310
|
+
# nbrows -= skip_footer
|
|
2311
|
+
# errmsg = [template % (i, nb)
|
|
2312
|
+
# for (i, nb) in invalid if i < nbrows]
|
|
2313
|
+
# else:
|
|
2314
|
+
errmsg = [template % (i, nb)
|
|
2315
|
+
for (i, nb) in invalid]
|
|
2316
|
+
if len(errmsg):
|
|
2317
|
+
errmsg.insert(0, "Some errors were detected !")
|
|
2318
|
+
errmsg = "\n".join(errmsg)
|
|
2319
|
+
# Raise an exception ?
|
|
2320
|
+
if invalid_raise:
|
|
2321
|
+
raise ValueError(errmsg)
|
|
2322
|
+
# Issue a warning ?
|
|
2323
|
+
else:
|
|
2324
|
+
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
|
|
2325
|
+
|
|
2326
|
+
# Strip the last skip_footer data
|
|
2327
|
+
if skip_footer > 0:
|
|
2328
|
+
rows = rows[:-skip_footer]
|
|
2329
|
+
if usemask:
|
|
2330
|
+
masks = masks[:-skip_footer]
|
|
2331
|
+
|
|
2332
|
+
# Convert each value according to the converter:
|
|
2333
|
+
# We want to modify the list in place to avoid creating a new one...
|
|
2334
|
+
if loose:
|
|
2335
|
+
rows = list(
|
|
2336
|
+
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
|
|
2337
|
+
for (i, conv) in enumerate(converters)]))
|
|
2338
|
+
else:
|
|
2339
|
+
rows = list(
|
|
2340
|
+
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
|
|
2341
|
+
for (i, conv) in enumerate(converters)]))
|
|
2342
|
+
|
|
2343
|
+
# Reset the dtype
|
|
2344
|
+
data = rows
|
|
2345
|
+
if dtype is None:
|
|
2346
|
+
# Get the dtypes from the types of the converters
|
|
2347
|
+
column_types = [conv.type for conv in converters]
|
|
2348
|
+
# Find the columns with strings...
|
|
2349
|
+
strcolidx = [i for (i, v) in enumerate(column_types)
|
|
2350
|
+
if v == np.str_]
|
|
2351
|
+
|
|
2352
|
+
if byte_converters and strcolidx:
|
|
2353
|
+
# convert strings back to bytes for backward compatibility
|
|
2354
|
+
warnings.warn(
|
|
2355
|
+
"Reading unicode strings without specifying the encoding "
|
|
2356
|
+
"argument is deprecated. Set the encoding, use None for the "
|
|
2357
|
+
"system default.",
|
|
2358
|
+
np.exceptions.VisibleDeprecationWarning, stacklevel=2)
|
|
2359
|
+
|
|
2360
|
+
def encode_unicode_cols(row_tup):
|
|
2361
|
+
row = list(row_tup)
|
|
2362
|
+
for i in strcolidx:
|
|
2363
|
+
row[i] = row[i].encode('latin1')
|
|
2364
|
+
return tuple(row)
|
|
2365
|
+
|
|
2366
|
+
try:
|
|
2367
|
+
data = [encode_unicode_cols(r) for r in data]
|
|
2368
|
+
except UnicodeEncodeError:
|
|
2369
|
+
pass
|
|
2370
|
+
else:
|
|
2371
|
+
for i in strcolidx:
|
|
2372
|
+
column_types[i] = np.bytes_
|
|
2373
|
+
|
|
2374
|
+
# Update string types to be the right length
|
|
2375
|
+
sized_column_types = column_types.copy()
|
|
2376
|
+
for i, col_type in enumerate(column_types):
|
|
2377
|
+
if np.issubdtype(col_type, np.character):
|
|
2378
|
+
n_chars = max(len(row[i]) for row in data)
|
|
2379
|
+
sized_column_types[i] = (col_type, n_chars)
|
|
2380
|
+
|
|
2381
|
+
if names is None:
|
|
2382
|
+
# If the dtype is uniform (before sizing strings)
|
|
2383
|
+
base = {
|
|
2384
|
+
c_type
|
|
2385
|
+
for c, c_type in zip(converters, column_types)
|
|
2386
|
+
if c._checked}
|
|
2387
|
+
if len(base) == 1:
|
|
2388
|
+
uniform_type, = base
|
|
2389
|
+
(ddtype, mdtype) = (uniform_type, bool)
|
|
2390
|
+
else:
|
|
2391
|
+
ddtype = [(defaultfmt % i, dt)
|
|
2392
|
+
for (i, dt) in enumerate(sized_column_types)]
|
|
2393
|
+
if usemask:
|
|
2394
|
+
mdtype = [(defaultfmt % i, bool)
|
|
2395
|
+
for (i, dt) in enumerate(sized_column_types)]
|
|
2396
|
+
else:
|
|
2397
|
+
ddtype = list(zip(names, sized_column_types))
|
|
2398
|
+
mdtype = list(zip(names, [bool] * len(sized_column_types)))
|
|
2399
|
+
output = np.array(data, dtype=ddtype)
|
|
2400
|
+
if usemask:
|
|
2401
|
+
outputmask = np.array(masks, dtype=mdtype)
|
|
2402
|
+
else:
|
|
2403
|
+
# Overwrite the initial dtype names if needed
|
|
2404
|
+
if names and dtype.names is not None:
|
|
2405
|
+
dtype.names = names
|
|
2406
|
+
# Case 1. We have a structured type
|
|
2407
|
+
if len(dtype_flat) > 1:
|
|
2408
|
+
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
|
|
2409
|
+
# First, create the array using a flattened dtype:
|
|
2410
|
+
# [('a', int), ('b1', int), ('b2', float)]
|
|
2411
|
+
# Then, view the array using the specified dtype.
|
|
2412
|
+
if 'O' in (_.char for _ in dtype_flat):
|
|
2413
|
+
if has_nested_fields(dtype):
|
|
2414
|
+
raise NotImplementedError(
|
|
2415
|
+
"Nested fields involving objects are not supported...")
|
|
2416
|
+
else:
|
|
2417
|
+
output = np.array(data, dtype=dtype)
|
|
2418
|
+
else:
|
|
2419
|
+
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
|
|
2420
|
+
output = rows.view(dtype)
|
|
2421
|
+
# Now, process the rowmasks the same way
|
|
2422
|
+
if usemask:
|
|
2423
|
+
rowmasks = np.array(
|
|
2424
|
+
masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
|
|
2425
|
+
# Construct the new dtype
|
|
2426
|
+
mdtype = make_mask_descr(dtype)
|
|
2427
|
+
outputmask = rowmasks.view(mdtype)
|
|
2428
|
+
# Case #2. We have a basic dtype
|
|
2429
|
+
else:
|
|
2430
|
+
# We used some user-defined converters
|
|
2431
|
+
if user_converters:
|
|
2432
|
+
ishomogeneous = True
|
|
2433
|
+
descr = []
|
|
2434
|
+
for i, ttype in enumerate([conv.type for conv in converters]):
|
|
2435
|
+
# Keep the dtype of the current converter
|
|
2436
|
+
if i in user_converters:
|
|
2437
|
+
ishomogeneous &= (ttype == dtype.type)
|
|
2438
|
+
if np.issubdtype(ttype, np.character):
|
|
2439
|
+
ttype = (ttype, max(len(row[i]) for row in data))
|
|
2440
|
+
descr.append(('', ttype))
|
|
2441
|
+
else:
|
|
2442
|
+
descr.append(('', dtype))
|
|
2443
|
+
# So we changed the dtype ?
|
|
2444
|
+
if not ishomogeneous:
|
|
2445
|
+
# We have more than one field
|
|
2446
|
+
if len(descr) > 1:
|
|
2447
|
+
dtype = np.dtype(descr)
|
|
2448
|
+
# We have only one field: drop the name if not needed.
|
|
2449
|
+
else:
|
|
2450
|
+
dtype = np.dtype(ttype)
|
|
2451
|
+
#
|
|
2452
|
+
output = np.array(data, dtype)
|
|
2453
|
+
if usemask:
|
|
2454
|
+
if dtype.names is not None:
|
|
2455
|
+
mdtype = [(_, bool) for _ in dtype.names]
|
|
2456
|
+
else:
|
|
2457
|
+
mdtype = bool
|
|
2458
|
+
outputmask = np.array(masks, dtype=mdtype)
|
|
2459
|
+
# Try to take care of the missing data we missed
|
|
2460
|
+
names = output.dtype.names
|
|
2461
|
+
if usemask and names:
|
|
2462
|
+
for (name, conv) in zip(names, converters):
|
|
2463
|
+
missing_values = [conv(_) for _ in conv.missing_values
|
|
2464
|
+
if _ != '']
|
|
2465
|
+
for mval in missing_values:
|
|
2466
|
+
outputmask[name] |= (output[name] == mval)
|
|
2467
|
+
# Construct the final array
|
|
2468
|
+
if usemask:
|
|
2469
|
+
output = output.view(MaskedArray)
|
|
2470
|
+
output._mask = outputmask
|
|
2471
|
+
|
|
2472
|
+
output = _ensure_ndmin_ndarray(output, ndmin=ndmin)
|
|
2473
|
+
|
|
2474
|
+
if unpack:
|
|
2475
|
+
if names is None:
|
|
2476
|
+
return output.T
|
|
2477
|
+
elif len(names) == 1:
|
|
2478
|
+
# squeeze single-name dtypes too
|
|
2479
|
+
return output[names[0]]
|
|
2480
|
+
else:
|
|
2481
|
+
# For structured arrays with multiple fields,
|
|
2482
|
+
# return an array for each field.
|
|
2483
|
+
return [output[field] for field in names]
|
|
2484
|
+
return output
|
|
2485
|
+
|
|
2486
|
+
|
|
2487
|
+
_genfromtxt_with_like = array_function_dispatch()(genfromtxt)
|
|
2488
|
+
|
|
2489
|
+
|
|
2490
|
+
def recfromtxt(fname, **kwargs):
|
|
2491
|
+
"""
|
|
2492
|
+
Load ASCII data from a file and return it in a record array.
|
|
2493
|
+
|
|
2494
|
+
If ``usemask=False`` a standard `recarray` is returned,
|
|
2495
|
+
if ``usemask=True`` a MaskedRecords array is returned.
|
|
2496
|
+
|
|
2497
|
+
.. deprecated:: 2.0
|
|
2498
|
+
Use `numpy.genfromtxt` instead.
|
|
2499
|
+
|
|
2500
|
+
Parameters
|
|
2501
|
+
----------
|
|
2502
|
+
fname, kwargs : For a description of input parameters, see `genfromtxt`.
|
|
2503
|
+
|
|
2504
|
+
See Also
|
|
2505
|
+
--------
|
|
2506
|
+
numpy.genfromtxt : generic function
|
|
2507
|
+
|
|
2508
|
+
Notes
|
|
2509
|
+
-----
|
|
2510
|
+
By default, `dtype` is None, which means that the data-type of the output
|
|
2511
|
+
array will be determined from the data.
|
|
2512
|
+
|
|
2513
|
+
"""
|
|
2514
|
+
|
|
2515
|
+
# Deprecated in NumPy 2.0, 2023-07-11
|
|
2516
|
+
warnings.warn(
|
|
2517
|
+
"`recfromtxt` is deprecated, "
|
|
2518
|
+
"use `numpy.genfromtxt` instead."
|
|
2519
|
+
"(deprecated in NumPy 2.0)",
|
|
2520
|
+
DeprecationWarning,
|
|
2521
|
+
stacklevel=2
|
|
2522
|
+
)
|
|
2523
|
+
|
|
2524
|
+
kwargs.setdefault("dtype", None)
|
|
2525
|
+
usemask = kwargs.get('usemask', False)
|
|
2526
|
+
output = genfromtxt(fname, **kwargs)
|
|
2527
|
+
if usemask:
|
|
2528
|
+
from numpy.ma.mrecords import MaskedRecords
|
|
2529
|
+
output = output.view(MaskedRecords)
|
|
2530
|
+
else:
|
|
2531
|
+
output = output.view(np.recarray)
|
|
2532
|
+
return output
|
|
2533
|
+
|
|
2534
|
+
|
|
2535
|
+
def recfromcsv(fname, **kwargs):
|
|
2536
|
+
"""
|
|
2537
|
+
Load ASCII data stored in a comma-separated file.
|
|
2538
|
+
|
|
2539
|
+
The returned array is a record array (if ``usemask=False``, see
|
|
2540
|
+
`recarray`) or a masked record array (if ``usemask=True``,
|
|
2541
|
+
see `ma.mrecords.MaskedRecords`).
|
|
2542
|
+
|
|
2543
|
+
.. deprecated:: 2.0
|
|
2544
|
+
Use `numpy.genfromtxt` with comma as `delimiter` instead.
|
|
2545
|
+
|
|
2546
|
+
Parameters
|
|
2547
|
+
----------
|
|
2548
|
+
fname, kwargs : For a description of input parameters, see `genfromtxt`.
|
|
2549
|
+
|
|
2550
|
+
See Also
|
|
2551
|
+
--------
|
|
2552
|
+
numpy.genfromtxt : generic function to load ASCII data.
|
|
2553
|
+
|
|
2554
|
+
Notes
|
|
2555
|
+
-----
|
|
2556
|
+
By default, `dtype` is None, which means that the data-type of the output
|
|
2557
|
+
array will be determined from the data.
|
|
2558
|
+
|
|
2559
|
+
"""
|
|
2560
|
+
|
|
2561
|
+
# Deprecated in NumPy 2.0, 2023-07-11
|
|
2562
|
+
warnings.warn(
|
|
2563
|
+
"`recfromcsv` is deprecated, "
|
|
2564
|
+
"use `numpy.genfromtxt` with comma as `delimiter` instead. "
|
|
2565
|
+
"(deprecated in NumPy 2.0)",
|
|
2566
|
+
DeprecationWarning,
|
|
2567
|
+
stacklevel=2
|
|
2568
|
+
)
|
|
2569
|
+
|
|
2570
|
+
# Set default kwargs for genfromtxt as relevant to csv import.
|
|
2571
|
+
kwargs.setdefault("case_sensitive", "lower")
|
|
2572
|
+
kwargs.setdefault("names", True)
|
|
2573
|
+
kwargs.setdefault("delimiter", ",")
|
|
2574
|
+
kwargs.setdefault("dtype", None)
|
|
2575
|
+
output = genfromtxt(fname, **kwargs)
|
|
2576
|
+
|
|
2577
|
+
usemask = kwargs.get("usemask", False)
|
|
2578
|
+
if usemask:
|
|
2579
|
+
from numpy.ma.mrecords import MaskedRecords
|
|
2580
|
+
output = output.view(MaskedRecords)
|
|
2581
|
+
else:
|
|
2582
|
+
output = output.view(np.recarray)
|
|
2583
|
+
return output
|