basedpyright 1.36.2 → 1.37.0-961d59bacff1958014fe4e9ed2ef3116fe2266e2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/pyright-langserver.js +1 -1
- package/dist/pyright-langserver.js.map +1 -1
- package/dist/pyright.js +1 -1
- package/dist/pyright.js.map +1 -1
- package/dist/typeshed-fallback/README.md +6 -6
- package/dist/typeshed-fallback/commit.txt +1 -1
- package/dist/typeshed-fallback/stdlib/_compression.pyi +13 -2
- package/dist/typeshed-fallback/stdlib/_ctypes.pyi +49 -16
- package/dist/typeshed-fallback/stdlib/_winapi.pyi +9 -0
- package/dist/typeshed-fallback/stdlib/_zstd.pyi +7 -2
- package/dist/typeshed-fallback/stdlib/argparse.pyi +39 -10
- package/dist/typeshed-fallback/stdlib/asyncio/coroutines.pyi +27 -8
- package/dist/typeshed-fallback/stdlib/asyncio/protocols.pyi +2 -2
- package/dist/typeshed-fallback/stdlib/asyncio/runners.pyi +5 -2
- package/dist/typeshed-fallback/stdlib/asyncio/trsock.pyi +1 -1
- package/dist/typeshed-fallback/stdlib/builtins.pyi +54 -31
- package/dist/typeshed-fallback/stdlib/calendar.pyi +3 -1
- package/dist/typeshed-fallback/stdlib/codecs.pyi +2 -1
- package/dist/typeshed-fallback/stdlib/compression/_common/_streams.pyi +13 -2
- package/dist/typeshed-fallback/stdlib/compression/zstd/__init__.pyi +7 -2
- package/dist/typeshed-fallback/stdlib/compression/zstd/_zstdfile.pyi +6 -6
- package/dist/typeshed-fallback/stdlib/contextlib.pyi +15 -6
- package/dist/typeshed-fallback/stdlib/ctypes/__init__.pyi +3 -0
- package/dist/typeshed-fallback/stdlib/enum.pyi +2 -0
- package/dist/typeshed-fallback/stdlib/heapq.pyi +5 -0
- package/dist/typeshed-fallback/stdlib/html/parser.pyi +2 -1
- package/dist/typeshed-fallback/stdlib/http/client.pyi +1 -0
- package/dist/typeshed-fallback/stdlib/imaplib.pyi +2 -1
- package/dist/typeshed-fallback/stdlib/importlib/readers.pyi +2 -2
- package/dist/typeshed-fallback/stdlib/importlib/resources/abc.pyi +3 -2
- package/dist/typeshed-fallback/stdlib/importlib/resources/simple.pyi +2 -1
- package/dist/typeshed-fallback/stdlib/importlib/util.pyi +15 -1
- package/dist/typeshed-fallback/stdlib/ipaddress.pyi +1 -1
- package/dist/typeshed-fallback/stdlib/locale.pyi +4 -0
- package/dist/typeshed-fallback/stdlib/mimetypes.pyi +6 -6
- package/dist/typeshed-fallback/stdlib/mmap.pyi +12 -12
- package/dist/typeshed-fallback/stdlib/multiprocessing/dummy/__init__.pyi +20 -8
- package/dist/typeshed-fallback/stdlib/multiprocessing/managers.pyi +4 -1
- package/dist/typeshed-fallback/stdlib/multiprocessing/process.pyi +4 -0
- package/dist/typeshed-fallback/stdlib/multiprocessing/synchronize.pyi +3 -0
- package/dist/typeshed-fallback/stdlib/optparse.pyi +1 -1
- package/dist/typeshed-fallback/stdlib/os/__init__.pyi +217 -127
- package/dist/typeshed-fallback/stdlib/parser.pyi +4 -4
- package/dist/typeshed-fallback/stdlib/pathlib/__init__.pyi +11 -0
- package/dist/typeshed-fallback/stdlib/pdb.pyi +14 -4
- package/dist/typeshed-fallback/stdlib/pyexpat/__init__.pyi +6 -0
- package/dist/typeshed-fallback/stdlib/select.pyi +8 -4
- package/dist/typeshed-fallback/stdlib/socket.pyi +15 -0
- package/dist/typeshed-fallback/stdlib/sqlite3/__init__.pyi +3 -2
- package/dist/typeshed-fallback/stdlib/ssl.pyi +3 -0
- package/dist/typeshed-fallback/stdlib/stat.pyi +108 -1
- package/dist/typeshed-fallback/stdlib/subprocess.pyi +60 -61
- package/dist/typeshed-fallback/stdlib/sys/__init__.pyi +41 -4
- package/dist/typeshed-fallback/stdlib/sys/_monitoring.pyi +8 -4
- package/dist/typeshed-fallback/stdlib/sysconfig.pyi +13 -4
- package/dist/typeshed-fallback/stdlib/tarfile.pyi +6 -6
- package/dist/typeshed-fallback/stdlib/threading.pyi +9 -4
- package/dist/typeshed-fallback/stdlib/tkinter/constants.pyi +6 -6
- package/dist/typeshed-fallback/stdlib/tkinter/ttk.pyi +14 -6
- package/dist/typeshed-fallback/stdlib/types.pyi +8 -11
- package/dist/typeshed-fallback/stdlib/typing.pyi +18 -12
- package/dist/typeshed-fallback/stdlib/typing_extensions.pyi +1 -0
- package/dist/typeshed-fallback/stdlib/unittest/mock.pyi +1 -0
- package/dist/typeshed-fallback/stdlib/unittest/util.pyi +19 -2
- package/dist/typeshed-fallback/stdlib/urllib/request.pyi +7 -0
- package/dist/typeshed-fallback/stdlib/uuid.pyi +6 -5
- package/dist/typeshed-fallback/stdlib/winreg.pyi +3 -3
- package/dist/typeshed-fallback/stdlib/xml/etree/ElementTree.pyi +1 -0
- package/dist/typeshed-fallback/stdlib/zipfile/__init__.pyi +2 -0
- package/dist/typeshed-fallback/stdlib/zlib.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/METADATA.toml +1 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/common/urls.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/base_client/async_app.pyi +4 -3
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/base_client/async_openid.pyi +4 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/base_client/framework_integration.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/base_client/registry.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/base_client/sync_app.pyi +3 -3
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/base_client/sync_openid.pyi +4 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_client/__init__.pyi +10 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_client/apps.pyi +14 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_client/integration.pyi +11 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_oauth1/__init__.pyi +4 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_oauth1/authorization_server.pyi +26 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_oauth1/nonce.pyi +1 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_oauth1/resource_protector.pyi +14 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_oauth2/__init__.pyi +8 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_oauth2/authorization_server.pyi +24 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_oauth2/endpoints.pyi +5 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_oauth2/requests.pyi +20 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_oauth2/resource_protector.pyi +15 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/django_oauth2/signals.pyi +6 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_client/__init__.pyi +20 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_client/apps.pyi +18 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_client/integration.pyi +10 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_oauth1/__init__.pyi +7 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_oauth1/authorization_server.pyi +29 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_oauth1/cache.pyi +8 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_oauth1/resource_protector.pyi +18 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_oauth2/__init__.pyi +7 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_oauth2/authorization_server.pyi +23 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_oauth2/errors.pyi +14 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_oauth2/requests.pyi +27 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_oauth2/resource_protector.pyi +15 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/flask_oauth2/signals.pyi +5 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/httpx_client/__init__.pyi +37 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/httpx_client/assertion_client.pyi +50 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/httpx_client/oauth1_client.pyi +56 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/httpx_client/oauth2_client.pyi +72 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/httpx_client/utils.pyi +7 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/requests_client/__init__.pyi +28 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/requests_client/assertion_session.pyi +31 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/requests_client/oauth1_session.pyi +29 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/requests_client/oauth2_session.pyi +43 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/requests_client/utils.pyi +6 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/sqla_oauth2/__init__.pyi +20 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/sqla_oauth2/client_mixin.pyi +55 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/sqla_oauth2/functions.pyi +21 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/sqla_oauth2/tokens_mixins.pyi +39 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/starlette_client/__init__.pyi +14 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/starlette_client/apps.pyi +16 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/integrations/starlette_client/integration.pyi +13 -0
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/drafts/_jwe_algorithms.pyi +12 -10
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/drafts/_jwe_enc_cryptography.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/jwk.pyi +6 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7515/jws.pyi +6 -4
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7515/models.pyi +3 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7516/jwe.pyi +9 -8
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7516/models.pyi +12 -11
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7517/_cryptography_key.pyi +35 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7517/asymmetric_key.pyi +6 -5
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7517/base_key.pyi +8 -8
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7517/jwk.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7517/key_set.pyi +3 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7518/jwe_algs.pyi +28 -23
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7518/jwe_encs.pyi +6 -5
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7518/jws_algs.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7518/oct_key.pyi +5 -4
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7518/rsa_key.pyi +2 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7518/util.pyi +4 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc7519/jwt.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc8037/jws_eddsa.pyi +4 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/jose/rfc8037/okp_key.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth1/client.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth1/rfc5849/authorization_server.pyi +7 -7
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth1/rfc5849/base_server.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth1/rfc5849/client_auth.pyi +6 -6
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth1/rfc5849/errors.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth1/rfc5849/models.pyi +8 -8
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth1/rfc5849/parameters.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth1/rfc5849/resource_protector.pyi +4 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth1/rfc5849/rsa.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth1/rfc5849/signature.pyi +20 -18
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth1/rfc5849/util.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/base.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/client.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc6749/authorization_server.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc6749/grants/authorization_code.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc6749/grants/base.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc6749/grants/resource_owner_password_credentials.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc6749/parameters.pyi +5 -3
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc6749/resource_protector.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc6749/token_endpoint.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc6749/wrappers.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc6750/errors.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc6750/parameters.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc6750/validator.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc7009/revocation.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc7523/assertion.pyi +3 -3
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc7523/auth.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc7523/client.pyi +5 -3
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc7523/jwt_bearer.pyi +6 -5
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc7591/endpoint.pyi +4 -4
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc7592/endpoint.pyi +8 -8
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc7662/introspection.pyi +3 -3
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc7662/token_validator.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc8414/well_known.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc8628/device_code.pyi +3 -3
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc8628/endpoint.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc8628/models.pyi +4 -4
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc9068/introspection.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc9068/revocation.pyi +3 -2
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc9068/token.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oauth2/rfc9068/token_validator.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oidc/core/claims.pyi +6 -8
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oidc/core/grants/code.pyi +2 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oidc/core/grants/hybrid.pyi +4 -4
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oidc/core/grants/implicit.pyi +6 -4
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oidc/core/userinfo.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oidc/core/util.pyi +7 -1
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oidc/discovery/models.pyi +1 -3
- package/dist/typeshed-fallback/stubs/Authlib/authlib/oidc/discovery/well_known.pyi +1 -1
- package/dist/typeshed-fallback/stubs/Deprecated/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/Deprecated/deprecated/__init__.pyi +7 -2
- package/dist/typeshed-fallback/stubs/Deprecated/deprecated/params.pyi +20 -0
- package/dist/typeshed-fallback/stubs/Flask-SocketIO/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/Markdown/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/Markdown/markdown/htmlparser.pyi +2 -0
- package/dist/typeshed-fallback/stubs/PyMySQL/pymysql/cursors.pyi +1 -0
- package/dist/typeshed-fallback/stubs/Pygments/pygments/console.pyi +4 -4
- package/dist/typeshed-fallback/stubs/Pygments/pygments/filter.pyi +3 -3
- package/dist/typeshed-fallback/stubs/Pygments/pygments/filters/__init__.pyi +14 -14
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatter.pyi +10 -9
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/__init__.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/_mapping.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/bbcode.pyi +5 -4
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/html.pyi +27 -26
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/img.pyi +33 -32
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/irc.pyi +7 -6
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/latex.pyi +21 -20
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/other.pyi +9 -8
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/pangomarkup.pyi +5 -4
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/rtf.pyi +6 -5
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/svg.pyi +15 -14
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/terminal.pyi +7 -6
- package/dist/typeshed-fallback/stubs/Pygments/pygments/formatters/terminal256.pyi +18 -17
- package/dist/typeshed-fallback/stubs/Pygments/pygments/lexers/__init__.pyi +6 -7
- package/dist/typeshed-fallback/stubs/Pygments/pygments/plugin.pyi +5 -5
- package/dist/typeshed-fallback/stubs/Pygments/pygments/regexopt.pyi +3 -3
- package/dist/typeshed-fallback/stubs/Pygments/pygments/scanner.pyi +6 -6
- package/dist/typeshed-fallback/stubs/Pygments/pygments/unistring.pyi +2 -2
- package/dist/typeshed-fallback/stubs/Pygments/pygments/util.pyi +5 -5
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/BufferedTokenStream.pyi +14 -15
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/CommonTokenFactory.pyi +15 -7
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/CommonTokenStream.pyi +1 -3
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/FileStream.pyi +2 -4
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/InputStream.pyi +13 -11
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/Lexer.pyi +43 -29
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/Parser.pyi +47 -34
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/ParserInterpreter.pyi +13 -13
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/ParserRuleContext.pyi +30 -23
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/Recognizer.pyi +26 -17
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/RuleContext.pyi +22 -20
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/Token.pyi +26 -25
- package/dist/typeshed-fallback/stubs/antlr4-python3-runtime/antlr4/tree/Tree.pyi +30 -26
- package/dist/typeshed-fallback/stubs/assertpy/assertpy/assertpy.pyi +13 -10
- package/dist/typeshed-fallback/stubs/assertpy/assertpy/base.pyi +4 -5
- package/dist/typeshed-fallback/stubs/assertpy/assertpy/collection.pyi +12 -4
- package/dist/typeshed-fallback/stubs/assertpy/assertpy/contains.pyi +10 -8
- package/dist/typeshed-fallback/stubs/assertpy/assertpy/dict.pyi +8 -6
- package/dist/typeshed-fallback/stubs/assertpy/assertpy/dynamic.pyi +1 -1
- package/dist/typeshed-fallback/stubs/assertpy/assertpy/extracting.pyi +4 -2
- package/dist/typeshed-fallback/stubs/assertpy/assertpy/file.pyi +2 -3
- package/dist/typeshed-fallback/stubs/atheris/METADATA.toml +6 -0
- package/dist/typeshed-fallback/stubs/atheris/atheris/__init__.pyi +9 -0
- package/dist/typeshed-fallback/stubs/atheris/atheris/function_hooks.pyi +14 -0
- package/dist/typeshed-fallback/stubs/atheris/atheris/import_hook.pyi +36 -0
- package/dist/typeshed-fallback/stubs/atheris/atheris/instrument_bytecode.pyi +7 -0
- package/dist/typeshed-fallback/stubs/atheris/atheris/utils.pyi +18 -0
- package/dist/typeshed-fallback/stubs/atheris/atheris/version_dependent.pyi +27 -0
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/__init__.pyi +2 -1
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/async_context.pyi +15 -5
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/async_recorder.pyi +28 -9
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/context.pyi +7 -7
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/daemon_config.pyi +9 -7
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/emitters/udp_emitter.pyi +2 -2
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/lambda_launcher.pyi +8 -12
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/models/entity.pyi +1 -1
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/models/facade_segment.pyi +1 -1
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/models/http.pyi +9 -9
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/models/segment.pyi +1 -1
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/models/subsegment.pyi +3 -2
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/models/throwable.pyi +11 -3
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/patcher.pyi +3 -2
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/plugins/ec2_plugin.pyi +3 -3
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/plugins/ecs_plugin.pyi +2 -2
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/plugins/elasticbeanstalk_plugin.pyi +3 -3
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/recorder.pyi +19 -15
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/sampling/rule_cache.pyi +1 -1
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/sampling/sampler.pyi +3 -1
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/sampling/sampling_rule.pyi +23 -13
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/core/sampling/target_poller.pyi +5 -1
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/ext/psycopg/__init__.pyi +3 -0
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/ext/psycopg/patch.pyi +1 -0
- package/dist/typeshed-fallback/stubs/aws-xray-sdk/aws_xray_sdk/ext/util.pyi +1 -1
- package/dist/typeshed-fallback/stubs/bleach/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/bleach/bleach/html5lib_shim.pyi +1 -1
- package/dist/typeshed-fallback/stubs/bleach/bleach/linkifier.pyi +1 -1
- package/dist/typeshed-fallback/stubs/bleach/bleach/sanitizer.pyi +2 -2
- package/dist/typeshed-fallback/stubs/braintree/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/braintree/braintree/address.pyi +8 -4
- package/dist/typeshed-fallback/stubs/braintree/braintree/address_gateway.pyi +3 -2
- package/dist/typeshed-fallback/stubs/braintree/braintree/client_token.pyi +5 -1
- package/dist/typeshed-fallback/stubs/braintree/braintree/client_token_gateway.pyi +5 -3
- package/dist/typeshed-fallback/stubs/braintree/braintree/error_codes.pyi +11 -2
- package/dist/typeshed-fallback/stubs/braintree/braintree/payment_method.pyi +30 -1
- package/dist/typeshed-fallback/stubs/braintree/braintree/payment_method_gateway.pyi +30 -2
- package/dist/typeshed-fallback/stubs/braintree/braintree/subscription.pyi +10 -5
- package/dist/typeshed-fallback/stubs/braintree/braintree/subscription_details.pyi +5 -1
- package/dist/typeshed-fallback/stubs/braintree/braintree/subscription_gateway.pyi +12 -5
- package/dist/typeshed-fallback/stubs/braintree/braintree/transaction.pyi +6 -2
- package/dist/typeshed-fallback/stubs/braintree/braintree/transaction_gateway.pyi +5 -2
- package/dist/typeshed-fallback/stubs/colorful/METADATA.toml +2 -0
- package/dist/typeshed-fallback/stubs/colorful/colorful/ansi.pyi +14 -0
- package/dist/typeshed-fallback/stubs/colorful/colorful/colors.pyi +6 -0
- package/dist/typeshed-fallback/stubs/colorful/colorful/core.pyi +102 -0
- package/dist/typeshed-fallback/stubs/colorful/colorful/styles.pyi +4 -0
- package/dist/typeshed-fallback/stubs/colorful/colorful/terminal.pyi +16 -0
- package/dist/typeshed-fallback/stubs/colorful/colorful/utils.pyi +2 -0
- package/dist/typeshed-fallback/stubs/decorator/decorator.pyi +2 -2
- package/dist/typeshed-fallback/stubs/django-filter/django_filters/filterset.pyi +3 -1
- package/dist/typeshed-fallback/stubs/docker/docker/api/container.pyi +6 -1
- package/dist/typeshed-fallback/stubs/docker/docker/api/daemon.pyi +12 -4
- package/dist/typeshed-fallback/stubs/docker/docker/api/exec_api.pyi +123 -7
- package/dist/typeshed-fallback/stubs/docker/docker/client.pyi +12 -8
- package/dist/typeshed-fallback/stubs/docker/docker/models/containers.pyi +27 -17
- package/dist/typeshed-fallback/stubs/docker/docker/models/images.pyi +1 -1
- package/dist/typeshed-fallback/stubs/docker/docker/models/resource.pyi +5 -5
- package/dist/typeshed-fallback/stubs/docutils/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/docutils/docutils/parsers/rst/directives/body.pyi +10 -0
- package/dist/typeshed-fallback/stubs/et_xmlfile/METADATA.toml +2 -0
- package/dist/typeshed-fallback/stubs/et_xmlfile/et_xmlfile/__init__.pyi +9 -0
- package/dist/typeshed-fallback/stubs/et_xmlfile/et_xmlfile/incremental_tree.pyi +170 -0
- package/dist/typeshed-fallback/stubs/et_xmlfile/et_xmlfile/xmlfile.pyi +37 -0
- package/dist/typeshed-fallback/stubs/fanstatic/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/flake8-bugbear/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/flake8-bugbear/bugbear.pyi +37 -65
- package/dist/typeshed-fallback/stubs/flake8-builtins/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/flake8-simplify/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/flake8-simplify/flake8_simplify/__init__.pyi +1 -0
- package/dist/typeshed-fallback/stubs/flake8-simplify/flake8_simplify/constants.pyi +6 -3
- package/dist/typeshed-fallback/stubs/flake8-simplify/flake8_simplify/utils.pyi +0 -1
- package/dist/typeshed-fallback/stubs/fpdf2/fpdf/drawing.pyi +145 -81
- package/dist/typeshed-fallback/stubs/fpdf2/fpdf/text_region.pyi +1 -1
- package/dist/typeshed-fallback/stubs/geopandas/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/geopandas/geopandas/array.pyi +7 -14
- package/dist/typeshed-fallback/stubs/geopandas/geopandas/geodataframe.pyi +1 -0
- package/dist/typeshed-fallback/stubs/gevent/METADATA.toml +2 -2
- package/dist/typeshed-fallback/stubs/gevent/gevent/events.pyi +1 -12
- package/dist/typeshed-fallback/stubs/gevent/gevent/libuv/watcher.pyi +1 -0
- package/dist/typeshed-fallback/stubs/gevent/gevent/os.pyi +1 -0
- package/dist/typeshed-fallback/stubs/gevent/gevent/signal.pyi +2 -1
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/_cache.pyi +9 -10
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/_datastore_query.pyi +1 -2
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/_eventloop.pyi +11 -10
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/blobstore.pyi +6 -6
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/context.pyi +21 -21
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/exceptions.pyi +2 -2
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/global_cache.pyi +13 -14
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/key.pyi +2 -3
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/metadata.pyi +1 -2
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/polymodel.pyi +2 -2
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/query.pyi +26 -27
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/stats.pyi +37 -37
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/tasklets.pyi +7 -7
- package/dist/typeshed-fallback/stubs/google-cloud-ndb/google/cloud/ndb/utils.pyi +4 -4
- package/dist/typeshed-fallback/stubs/greenlet/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/gunicorn/gunicorn/arbiter.pyi +2 -2
- package/dist/typeshed-fallback/stubs/gunicorn/gunicorn/sock.pyi +2 -0
- package/dist/typeshed-fallback/stubs/html5lib/METADATA.toml +1 -0
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/_ihatexml.pyi +21 -23
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/_inputstream.pyi +79 -71
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/_tokenizer.pyi +87 -51
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/_utils.pyi +18 -11
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/constants.pyi +1 -3
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/filters/alphabeticalattributes.pyi +3 -2
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/filters/base.pyi +9 -6
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/filters/inject_meta_charset.pyi +5 -3
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/filters/lint.pyi +5 -3
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/filters/optionaltags.pyi +1 -2
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/filters/sanitizer.pyi +6 -6
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/filters/whitespace.pyi +2 -2
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/html5parser.pyi +2 -2
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/serializer.pyi +75 -19
- package/dist/typeshed-fallback/stubs/html5lib/html5lib/treeadapters/sax.pyi +0 -3
- package/dist/typeshed-fallback/stubs/hvac/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/auth_methods/__init__.pyi +3 -4
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/auth_methods/aws.pyi +2 -2
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/auth_methods/azure.pyi +3 -2
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/auth_methods/gcp.pyi +3 -2
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/auth_methods/kubernetes.pyi +1 -0
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/auth_methods/legacy_mfa.pyi +2 -4
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/secrets_engines/__init__.pyi +3 -4
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/secrets_engines/identity.pyi +2 -2
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/secrets_engines/kv.pyi +3 -3
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/system_backend/__init__.pyi +3 -4
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/system_backend/system_backend_mixin.pyi +2 -2
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/system_backend/wrapping.pyi +2 -3
- package/dist/typeshed-fallback/stubs/hvac/hvac/api/vault_api_category.pyi +2 -3
- package/dist/typeshed-fallback/stubs/icalendar/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/icalendar/icalendar/__init__.pyi +0 -1
- package/dist/typeshed-fallback/stubs/icalendar/icalendar/alarms.pyi +2 -2
- package/dist/typeshed-fallback/stubs/icalendar/icalendar/cal.pyi +16 -9
- package/dist/typeshed-fallback/stubs/icalendar/icalendar/parser.pyi +2 -1
- package/dist/typeshed-fallback/stubs/icalendar/icalendar/prop.pyi +3 -3
- package/dist/typeshed-fallback/stubs/icalendar/icalendar/timezone/zoneinfo.pyi +1 -5
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/__init__.pyi +5 -5
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/abstract/__init__.pyi +3 -3
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/abstract/attrDef.pyi +13 -13
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/abstract/attribute.pyi +9 -9
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/abstract/cursor.pyi +28 -27
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/abstract/entry.pyi +11 -11
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/core/exceptions.pyi +8 -8
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/core/pooling.pyi +15 -15
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/core/rdns.pyi +7 -7
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/core/results.pyi +3 -3
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/core/server.pyi +15 -14
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/core/timezone.pyi +3 -3
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/core/tls.pyi +13 -13
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/core/usage.pyi +6 -6
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/extend/__init__.pyi +4 -4
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/extend/microsoft/dirSync.pyi +12 -12
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/extend/microsoft/persistentSearch.pyi +7 -7
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/extend/standard/PersistentSearch.pyi +13 -13
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/operation/modify.pyi +2 -2
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/operation/search.pyi +5 -5
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/protocol/formatters/formatters.pyi +2 -2
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/protocol/formatters/standard.pyi +2 -2
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/protocol/oid.pyi +2 -2
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/protocol/rfc2849.pyi +2 -2
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/protocol/rfc4512.pyi +59 -59
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/strategy/asyncStream.pyi +7 -7
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/strategy/asynchronous.pyi +6 -6
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/strategy/base.pyi +7 -7
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/strategy/ldifProducer.pyi +4 -4
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/strategy/mockAsync.pyi +2 -2
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/strategy/mockBase.pyi +9 -9
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/strategy/mockSync.pyi +2 -2
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/strategy/restartable.pyi +4 -4
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/strategy/reusable.pyi +20 -20
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/strategy/sync.pyi +3 -3
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/utils/config.pyi +2 -2
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/utils/hashed.pyi +3 -3
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/utils/log.pyi +3 -3
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/utils/ntlm.pyi +41 -41
- package/dist/typeshed-fallback/stubs/ldap3/ldap3/utils/repr.pyi +2 -2
- package/dist/typeshed-fallback/stubs/lupa/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/netaddr/netaddr/ip/__init__.pyi +4 -1
- package/dist/typeshed-fallback/stubs/netaddr/netaddr/ip/iana.pyi +8 -8
- package/dist/typeshed-fallback/stubs/networkx/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/__init__.pyi +1 -0
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/approximation/steinertree.pyi +4 -0
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/approximation/treewidth.pyi +11 -7
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/assortativity/mixing.pyi +4 -4
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/bipartite/matching.pyi +3 -3
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/bipartite/matrix.pyi +8 -1
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/centrality/closeness.pyi +2 -2
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/centrality/current_flow_betweenness.pyi +2 -0
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/centrality/eigenvector.pyi +3 -2
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/centrality/katz.pyi +5 -4
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/centrality/percolation.pyi +3 -2
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/centrality/reaching.pyi +3 -2
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/centrality/subgraph_alg.pyi +2 -2
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/cluster.pyi +13 -3
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/community/__init__.pyi +1 -1
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/community/bipartitions.pyi +22 -0
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/core.pyi +6 -5
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/dag.pyi +0 -3
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/distance_regular.pyi +2 -2
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/isomorphism/ismags.pyi +17 -4
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/non_randomness.pyi +1 -1
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/perfect_graph.pyi +7 -0
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/planarity.pyi +13 -4
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/similarity.pyi +19 -3
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/threshold.pyi +0 -1
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/tree/__init__.pyi +1 -0
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/tree/distance_measures.pyi +10 -0
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/wiener.pyi +3 -1
- package/dist/typeshed-fallback/stubs/networkx/networkx/classes/digraph.pyi +13 -7
- package/dist/typeshed-fallback/stubs/networkx/networkx/classes/function.pyi +3 -1
- package/dist/typeshed-fallback/stubs/networkx/networkx/classes/graph.pyi +12 -6
- package/dist/typeshed-fallback/stubs/networkx/networkx/classes/multidigraph.pyi +10 -9
- package/dist/typeshed-fallback/stubs/networkx/networkx/classes/multigraph.pyi +23 -13
- package/dist/typeshed-fallback/stubs/networkx/networkx/drawing/layout.pyi +0 -1
- package/dist/typeshed-fallback/stubs/networkx/networkx/drawing/nx_pylab.pyi +20 -0
- package/dist/typeshed-fallback/stubs/networkx/networkx/generators/expanders.pyi +9 -1
- package/dist/typeshed-fallback/stubs/networkx/networkx/generators/random_graphs.pyi +21 -16
- package/dist/typeshed-fallback/stubs/networkx/networkx/generators/small.pyi +3 -0
- package/dist/typeshed-fallback/stubs/networkx/networkx/readwrite/json_graph/node_link.pyi +2 -24
- package/dist/typeshed-fallback/stubs/networkx/networkx/utils/misc.pyi +1 -1
- package/dist/typeshed-fallback/stubs/networkx/networkx/utils/random_sequence.pyi +5 -0
- package/dist/typeshed-fallback/stubs/networkx/networkx/utils/union_find.pyi +12 -10
- package/dist/typeshed-fallback/stubs/parsimonious/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/parsimonious/parsimonious/exceptions.pyi +5 -3
- package/dist/typeshed-fallback/stubs/peewee/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/protobuf/google/protobuf/internal/containers.pyi +8 -3
- package/dist/typeshed-fallback/stubs/protobuf/google/protobuf/internal/well_known_types.pyi +2 -2
- package/dist/typeshed-fallback/stubs/protobuf/google/protobuf/message.pyi +1 -1
- package/dist/typeshed-fallback/stubs/psutil/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/psutil/psutil/__init__.pyi +57 -77
- package/dist/typeshed-fallback/stubs/psutil/psutil/_common.pyi +135 -262
- package/dist/typeshed-fallback/stubs/psutil/psutil/_ntuples.pyi +384 -0
- package/dist/typeshed-fallback/stubs/psutil/psutil/_psaix.pyi +100 -86
- package/dist/typeshed-fallback/stubs/psutil/psutil/_psbsd.pyi +213 -168
- package/dist/typeshed-fallback/stubs/psutil/psutil/_pslinux.pyi +181 -237
- package/dist/typeshed-fallback/stubs/psutil/psutil/_psosx.pyi +81 -100
- package/dist/typeshed-fallback/stubs/psutil/psutil/_psposix.pyi +78 -4
- package/dist/typeshed-fallback/stubs/psutil/psutil/_pssunos.pyi +141 -119
- package/dist/typeshed-fallback/stubs/psutil/psutil/_psutil_aix.pyi +58 -0
- package/dist/typeshed-fallback/stubs/psutil/psutil/_psutil_bsd.pyi +141 -0
- package/dist/typeshed-fallback/stubs/psutil/psutil/_psutil_linux.pyi +47 -15
- package/dist/typeshed-fallback/stubs/psutil/psutil/_psutil_osx.pyi +77 -49
- package/dist/typeshed-fallback/stubs/psutil/psutil/_psutil_sunos.pyi +63 -0
- package/dist/typeshed-fallback/stubs/psutil/psutil/_psutil_windows.pyi +102 -91
- package/dist/typeshed-fallback/stubs/psutil/psutil/_pswindows.pyi +175 -200
- package/dist/typeshed-fallback/stubs/pycurl/pycurl.pyi +5 -3
- package/dist/typeshed-fallback/stubs/pyinstaller/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/python-crontab/crontab.pyi +4 -4
- package/dist/typeshed-fallback/stubs/python-dateutil/dateutil/parser/_parser.pyi +6 -7
- package/dist/typeshed-fallback/stubs/python-dateutil/dateutil/rrule.pyi +28 -25
- package/dist/typeshed-fallback/stubs/python-dateutil/dateutil/tz/_common.pyi +8 -3
- package/dist/typeshed-fallback/stubs/python-dateutil/dateutil/tz/tz.pyi +21 -19
- package/dist/typeshed-fallback/stubs/python-dateutil/dateutil/tz/win.pyi +5 -4
- package/dist/typeshed-fallback/stubs/python-dateutil/dateutil/zoneinfo/__init__.pyi +2 -1
- package/dist/typeshed-fallback/stubs/python-dateutil/dateutil/zoneinfo/rebuild.pyi +5 -1
- package/dist/typeshed-fallback/stubs/python-jenkins/jenkins/__init__.pyi +13 -6
- package/dist/typeshed-fallback/stubs/pytz/pytz/lazy.pyi +2 -2
- package/dist/typeshed-fallback/stubs/pyxdg/xdg/RecentFiles.pyi +5 -7
- package/dist/typeshed-fallback/stubs/qrbill/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/qrbill/qrbill/bill.pyi +10 -0
- package/dist/typeshed-fallback/stubs/regex/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/regex/regex/__init__.pyi +64 -1
- package/dist/typeshed-fallback/stubs/regex/regex/{regex.pyi → _main.pyi} +66 -0
- package/dist/typeshed-fallback/stubs/regex/regex/_regex.pyi +1 -1
- package/dist/typeshed-fallback/stubs/regex/regex/_regex_core.pyi +59 -19
- package/dist/typeshed-fallback/stubs/reportlab/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/reportlab/reportlab/lib/testutils.pyi +27 -25
- package/dist/typeshed-fallback/stubs/reportlab/reportlab/platypus/flowables.pyi +1 -1
- package/dist/typeshed-fallback/stubs/requests/requests/exceptions.pyi +2 -1
- package/dist/typeshed-fallback/stubs/seaborn/seaborn/_core/data.pyi +2 -3
- package/dist/typeshed-fallback/stubs/seaborn/seaborn/_core/typing.pyi +0 -2
- package/dist/typeshed-fallback/stubs/seaborn/seaborn/categorical.pyi +1 -1
- package/dist/typeshed-fallback/stubs/setuptools/setuptools/__init__.pyi +102 -45
- package/dist/typeshed-fallback/stubs/setuptools/setuptools/_distutils/cmd.pyi +6 -3
- package/dist/typeshed-fallback/stubs/setuptools/setuptools/_distutils/compilers/C/base.pyi +8 -2
- package/dist/typeshed-fallback/stubs/setuptools/setuptools/_distutils/compilers/C/msvc.pyi +3 -0
- package/dist/typeshed-fallback/stubs/setuptools/setuptools/_distutils/spawn.pyi +14 -4
- package/dist/typeshed-fallback/stubs/simple-websocket/METADATA.toml +3 -0
- package/dist/typeshed-fallback/stubs/simple-websocket/simple_websocket/__init__.pyi +3 -0
- package/dist/typeshed-fallback/stubs/simple-websocket/simple_websocket/aiows.pyi +130 -0
- package/dist/typeshed-fallback/stubs/simple-websocket/simple_websocket/asgi.pyi +44 -0
- package/dist/typeshed-fallback/stubs/simple-websocket/simple_websocket/errors.pyi +12 -0
- package/dist/typeshed-fallback/stubs/simple-websocket/simple_websocket/ws.pyi +136 -0
- package/dist/typeshed-fallback/stubs/tensorflow/METADATA.toml +16 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/__init__.pyi +437 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/_aliases.pyi +70 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/audio.pyi +7 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/autodiff.pyi +63 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/autograph/__init__.pyi +17 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/autograph/experimental.pyi +30 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/bitwise.pyi +37 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/compiler/xla/service/hlo_pb2.pyi +2113 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/compiler/xla/service/hlo_profile_printer_data_pb2.pyi +187 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/compiler/xla/service/metrics_pb2.pyi +283 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/compiler/xla/service/test_compilation_environment_pb2.pyi +59 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/compiler/xla/service/xla_compile_result_pb2.pyi +167 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/compiler/xla/tsl/protobuf/bfc_memory_map_pb2.pyi +218 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/compiler/xla/tsl/protobuf/test_log_pb2.pyi +707 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/compiler/xla/xla_data_pb2.pyi +2681 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/compiler/xla/xla_pb2.pyi +2558 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/config/__init__.pyi +12 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/config/experimental.pyi +16 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/example/example_parser_configuration_pb2.pyi +153 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/example/example_pb2.pyi +330 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/example/feature_pb2.pyi +222 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/allocation_description_pb2.pyi +64 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/api_def_pb2.pyi +312 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/attr_value_pb2.pyi +274 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/cost_graph_pb2.pyi +229 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/cpp_shape_inference_pb2.pyi +110 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/dataset_metadata_pb2.pyi +25 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/dataset_options_pb2.pyi +720 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/dataset_pb2.pyi +116 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/device_attributes_pb2.pyi +140 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/full_type_pb2.pyi +617 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/function_pb2.pyi +309 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/graph_debug_info_pb2.pyi +210 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/graph_pb2.pyi +100 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/graph_transfer_info_pb2.pyi +290 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/kernel_def_pb2.pyi +116 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/log_memory_pb2.pyi +218 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/model_pb2.pyi +368 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/node_def_pb2.pyi +192 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/op_def_pb2.pyi +391 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/optimized_function_graph_pb2.pyi +166 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/reader_base_pb2.pyi +52 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/resource_handle_pb2.pyi +104 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/step_stats_pb2.pyi +332 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/summary_pb2.pyi +368 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/tensor_description_pb2.pyi +49 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/tensor_pb2.pyi +235 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/tensor_shape_pb2.pyi +74 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/tensor_slice_pb2.pyi +56 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/types_pb2.pyi +220 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/variable_pb2.pyi +229 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/framework/versions_pb2.pyi +57 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/__init__.pyi +0 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/bfc_memory_map_pb2.pyi +15 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/cluster_pb2.pyi +126 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/composite_tensor_variant_pb2.pyi +33 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/config_pb2.pyi +1867 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/control_flow_pb2.pyi +243 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/core_platform_payloads_pb2.pyi +66 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/data_service_pb2.pyi +243 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/debug_event_pb2.pyi +746 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/debug_pb2.pyi +199 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/device_filters_pb2.pyi +124 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/device_properties_pb2.pyi +154 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/error_codes_pb2.pyi +33 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/fingerprint_pb2.pyi +76 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/meta_graph_pb2.pyi +735 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/named_tensor_pb2.pyi +41 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/queue_runner_pb2.pyi +73 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/remote_tensor_handle_pb2.pyi +96 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/rewriter_config_pb2.pyi +588 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/rpc_options_pb2.pyi +9 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/saved_model_pb2.pyi +51 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/saved_object_graph_pb2.pyi +715 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/saver_pb2.pyi +113 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/service_config_pb2.pyi +275 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/snapshot_pb2.pyi +164 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/status_pb2.pyi +13 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/struct_pb2.pyi +536 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/tensor_bundle_pb2.pyi +160 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/tensorflow_server_pb2.pyi +125 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/tpu/compilation_result_pb2.pyi +85 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/tpu/dynamic_padding_pb2.pyi +47 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/tpu/optimization_parameters_pb2.pyi +1326 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/tpu/topology_pb2.pyi +149 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/tpu/tpu_embedding_configuration_pb2.pyi +326 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/trackable_object_graph_pb2.pyi +213 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/transport_options_pb2.pyi +28 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/protobuf/verifier_config_pb2.pyi +65 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/util/event_pb2.pyi +419 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/util/memmapped_file_system_pb2.pyi +65 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/util/saved_tensor_slice_pb2.pyi +171 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/core/util/test_log_pb2.pyi +24 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/data/__init__.pyi +272 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/data/experimental.pyi +32 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/distribute/__init__.pyi +3 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/distribute/coordinator.pyi +3 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/distribute/experimental/coordinator.pyi +11 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/dtypes.pyi +57 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/experimental/__init__.pyi +10 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/experimental/dtensor.pyi +19 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/feature_column/__init__.pyi +95 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/initializers.pyi +1 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/io/__init__.pyi +104 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/io/gfile.pyi +11 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/__init__.pyi +15 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/activations.pyi +35 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/callbacks.pyi +170 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/constraints.pyi +16 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/initializers.pyi +50 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/layers/__init__.pyi +446 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/losses.pyi +177 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/metrics.pyi +117 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/models.pyi +167 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/optimizers/__init__.pyi +7 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/optimizers/legacy/__init__.pyi +61 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/optimizers/schedules.pyi +103 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/keras/regularizers.pyi +21 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/linalg.pyi +55 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/math.pyi +298 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/nn.pyi +194 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/__init__.pyi +1 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/distribute/distribute_lib.pyi +5 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/feature_column/__init__.pyi +0 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/feature_column/feature_column_v2.pyi +273 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/feature_column/sequence_feature_column.pyi +30 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/framework/dtypes.pyi +7 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/keras/__init__.pyi +1 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/keras/protobuf/projector_config_pb2.pyi +129 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/keras/protobuf/saved_metadata_pb2.pyi +89 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/keras/protobuf/versions_pb2.pyi +63 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/trackable/__init__.pyi +0 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/trackable/autotrackable.pyi +3 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/trackable/base.pyi +5 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/trackable/resource.pyi +9 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/trackable/ressource.pyi +7 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/python/training/tracking/autotrackable.pyi +3 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/random.pyi +231 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/raw_ops.pyi +44 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/saved_model/__init__.pyi +125 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/saved_model/experimental.pyi +39 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/signal.pyi +6 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/sparse.pyi +31 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/strings.pyi +241 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/summary.pyi +58 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/train/__init__.pyi +74 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/train/experimental.pyi +12 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/tsl/protobuf/coordination_config_pb2.pyi +156 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/tsl/protobuf/coordination_service_pb2.pyi +666 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/tsl/protobuf/distributed_runtime_payloads_pb2.pyi +69 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/tsl/protobuf/dnn_pb2.pyi +619 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/tsl/protobuf/error_codes_pb2.pyi +291 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/tsl/protobuf/histogram_pb2.pyi +78 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/tsl/protobuf/rpc_options_pb2.pyi +85 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/tsl/protobuf/status_pb2.pyi +34 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/types/__init__.pyi +1 -0
- package/dist/typeshed-fallback/stubs/tensorflow/tensorflow/types/experimental.pyi +30 -0
- package/dist/typeshed-fallback/stubs/ttkthemes/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/ttkthemes/ttkthemes/themed_style.pyi +1 -1
- package/dist/typeshed-fallback/stubs/ttkthemes/ttkthemes/themed_tk.pyi +11 -12
- package/dist/typeshed-fallback/stubs/tzdata/METADATA.toml +2 -0
- package/dist/typeshed-fallback/stubs/tzdata/tzdata/__init__.pyi +4 -0
- package/dist/typeshed-fallback/stubs/webencodings/METADATA.toml +2 -0
- package/dist/typeshed-fallback/stubs/webencodings/webencodings/__init__.pyi +33 -0
- package/dist/typeshed-fallback/stubs/webencodings/webencodings/labels.pyi +3 -0
- package/dist/typeshed-fallback/stubs/webencodings/webencodings/mklabels.pyi +5 -0
- package/dist/typeshed-fallback/stubs/webencodings/webencodings/x_user_defined.pyi +21 -0
- package/dist/typeshed-fallback/stubs/yt-dlp/METADATA.toml +1 -1
- package/dist/typeshed-fallback/stubs/yt-dlp/yt_dlp/__init__.pyi +2 -0
- package/dist/typeshed-fallback/stubs/yt-dlp/yt_dlp/extractor/common.pyi +1 -0
- package/dist/typeshed-fallback/stubs/yt-dlp/yt_dlp/globals.pyi +12 -1
- package/dist/typeshed-fallback/stubs/yt-dlp/yt_dlp/utils/_jsruntime.pyi +38 -0
- package/dist/typeshed-fallback/stubs/yt-dlp/yt_dlp/utils/_legacy.pyi +2 -68
- package/dist/typeshed-fallback/stubs/yt-dlp/yt_dlp/utils/_utils.pyi +1 -1
- package/package.json +7 -7
- package/dist/typeshed-fallback/stdlib/_typeshed/README.md +0 -34
- package/dist/typeshed-fallback/stubs/ExifRead/METADATA.toml +0 -3
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/__init__.pyi +0 -17
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/_types.pyi +0 -15
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/classes.pyi +0 -47
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/exceptions.pyi +0 -2
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/exif_log.pyi +0 -24
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/heic.pyi +0 -56
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/jpeg.pyi +0 -7
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/tags/__init__.pyi +0 -15
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/tags/exif.pyi +0 -7
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/tags/makernote/apple.pyi +0 -3
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/tags/makernote/canon.pyi +0 -26
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/tags/makernote/casio.pyi +0 -3
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/tags/makernote/fujifilm.pyi +0 -3
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/tags/makernote/nikon.pyi +0 -6
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/tags/makernote/olympus.pyi +0 -6
- package/dist/typeshed-fallback/stubs/ExifRead/exifread/utils.pyi +0 -22
- package/dist/typeshed-fallback/stubs/networkx/networkx/algorithms/community/kernighan_lin.pyi +0 -16
- package/dist/typeshed-fallback/stubs/psutil/psutil/_psutil_posix.pyi +0 -34
- package/dist/typeshed-fallback/stubs/vobject/@tests/stubtest_allowlist.txt +0 -22
- /package/dist/typeshed-fallback/stubs/{ExifRead/exifread/tags/makernote → colorful/colorful}/__init__.pyi +0 -0
|
@@ -0,0 +1,1867 @@
|
|
|
1
|
+
"""
|
|
2
|
+
@generated by mypy-protobuf. Do not edit manually!
|
|
3
|
+
isort:skip_file
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import builtins
|
|
7
|
+
import collections.abc
|
|
8
|
+
import sys
|
|
9
|
+
import typing
|
|
10
|
+
|
|
11
|
+
import google.protobuf.descriptor
|
|
12
|
+
import google.protobuf.internal.containers
|
|
13
|
+
import google.protobuf.internal.enum_type_wrapper
|
|
14
|
+
import google.protobuf.message
|
|
15
|
+
import tensorflow.core.framework.cost_graph_pb2
|
|
16
|
+
import tensorflow.core.framework.graph_pb2
|
|
17
|
+
import tensorflow.core.framework.step_stats_pb2
|
|
18
|
+
import tensorflow.core.protobuf.cluster_pb2
|
|
19
|
+
import tensorflow.core.protobuf.debug_pb2
|
|
20
|
+
import tensorflow.core.protobuf.rewriter_config_pb2
|
|
21
|
+
import tensorflow.tsl.protobuf.coordination_config_pb2
|
|
22
|
+
import tensorflow.tsl.protobuf.rpc_options_pb2
|
|
23
|
+
|
|
24
|
+
if sys.version_info >= (3, 10):
|
|
25
|
+
import typing as typing_extensions
|
|
26
|
+
else:
|
|
27
|
+
import typing_extensions
|
|
28
|
+
|
|
29
|
+
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
|
|
30
|
+
|
|
31
|
+
@typing.final
|
|
32
|
+
class GPUOptions(google.protobuf.message.Message):
|
|
33
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
34
|
+
|
|
35
|
+
@typing.final
|
|
36
|
+
class Experimental(google.protobuf.message.Message):
|
|
37
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
38
|
+
|
|
39
|
+
@typing.final
|
|
40
|
+
class VirtualDevices(google.protobuf.message.Message):
|
|
41
|
+
"""Configuration for breaking down a visible GPU into multiple "virtual"
|
|
42
|
+
devices.
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
46
|
+
|
|
47
|
+
MEMORY_LIMIT_MB_FIELD_NUMBER: builtins.int
|
|
48
|
+
PRIORITY_FIELD_NUMBER: builtins.int
|
|
49
|
+
DEVICE_ORDINAL_FIELD_NUMBER: builtins.int
|
|
50
|
+
@property
|
|
51
|
+
def memory_limit_mb(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.float]:
|
|
52
|
+
"""Per "virtual" device memory limit, in MB. The number of elements in
|
|
53
|
+
the list is the number of virtual devices to create on the
|
|
54
|
+
corresponding visible GPU (see "virtual_devices" below).
|
|
55
|
+
If empty and `num_virtual_devices_per_gpu` is not set, it will create
|
|
56
|
+
single virtual device taking all available memory from the device.
|
|
57
|
+
|
|
58
|
+
For the concept of "visible" and "virtual" GPU, see the comments for
|
|
59
|
+
"visible_device_list" above for more information.
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
@property
|
|
63
|
+
def priority(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
|
|
64
|
+
"""Priority values to use with the virtual devices. Use the cuda function
|
|
65
|
+
cudaDeviceGetStreamPriorityRange to query for valid range of values for
|
|
66
|
+
priority.
|
|
67
|
+
|
|
68
|
+
On a P4000 GPU with cuda 10.1, the priority range reported was 0 for
|
|
69
|
+
least priority and -1 for greatest priority.
|
|
70
|
+
|
|
71
|
+
If this field is not specified, then the virtual devices will be
|
|
72
|
+
created with the default. If this field has values set, then the size
|
|
73
|
+
of this must match with the above memory_limit_mb.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def device_ordinal(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
|
|
78
|
+
"""Virtual Device ordinal number determines the device ID of the device.
|
|
79
|
+
A Virtual device with a lower ordinal number always receives the a
|
|
80
|
+
smaller device id. The phyiscal device id and location in the
|
|
81
|
+
virtual device list is used to break ties.
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
def __init__(
|
|
85
|
+
self,
|
|
86
|
+
*,
|
|
87
|
+
memory_limit_mb: collections.abc.Iterable[builtins.float] | None = ...,
|
|
88
|
+
priority: collections.abc.Iterable[builtins.int] | None = ...,
|
|
89
|
+
device_ordinal: collections.abc.Iterable[builtins.int] | None = ...,
|
|
90
|
+
) -> None: ...
|
|
91
|
+
def ClearField(
|
|
92
|
+
self,
|
|
93
|
+
field_name: typing.Literal[
|
|
94
|
+
"device_ordinal", b"device_ordinal", "memory_limit_mb", b"memory_limit_mb", "priority", b"priority"
|
|
95
|
+
],
|
|
96
|
+
) -> None: ...
|
|
97
|
+
|
|
98
|
+
@typing.final
|
|
99
|
+
class StreamMergeOptions(google.protobuf.message.Message):
|
|
100
|
+
"""Whether to merge data transfer streams into the compute stream in the
|
|
101
|
+
same stream group. Stream merging helps reduce the overhead caused by
|
|
102
|
+
stream synchronization, especially when data transfers are frequent. For
|
|
103
|
+
example, setting "merge_host_to_device_stream = true" will make the
|
|
104
|
+
compute stream responsible for both computation and host to device memory
|
|
105
|
+
copy.
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
109
|
+
|
|
110
|
+
MERGE_HOST_TO_DEVICE_STREAM_FIELD_NUMBER: builtins.int
|
|
111
|
+
MERGE_DEVICE_TO_HOST_STREAM_FIELD_NUMBER: builtins.int
|
|
112
|
+
MERGE_DEVICE_TO_DEVICE_STREAM_FIELD_NUMBER: builtins.int
|
|
113
|
+
merge_host_to_device_stream: builtins.bool
|
|
114
|
+
"""If true, the compute stream will be used for host_to_device copy as
|
|
115
|
+
well. It's no longer necessary to record an event before the copy to
|
|
116
|
+
let the copy stream wait for the compute stream to finish. There is
|
|
117
|
+
also no need to wait for the copy to complete before executing the
|
|
118
|
+
callback function.
|
|
119
|
+
"""
|
|
120
|
+
merge_device_to_host_stream: builtins.bool
|
|
121
|
+
"""If true, the compute stream will be used for device_to_host copy as
|
|
122
|
+
well. It's no longer necessary to record an event before the copy to
|
|
123
|
+
let the copy stream wait for the compute stream to finish.
|
|
124
|
+
"""
|
|
125
|
+
merge_device_to_device_stream: builtins.bool
|
|
126
|
+
"""If true, the compute stream will be used for device_to_device copy as
|
|
127
|
+
well. It's no longer necessary to record an event before the copy to
|
|
128
|
+
let the copy stream wait for the compute stream of the sending device
|
|
129
|
+
to finish. There is also no need to wait for the compute stream of the
|
|
130
|
+
receiving device to finish if the copy is within the same device.
|
|
131
|
+
"""
|
|
132
|
+
def __init__(
|
|
133
|
+
self,
|
|
134
|
+
*,
|
|
135
|
+
merge_host_to_device_stream: builtins.bool | None = ...,
|
|
136
|
+
merge_device_to_host_stream: builtins.bool | None = ...,
|
|
137
|
+
merge_device_to_device_stream: builtins.bool | None = ...,
|
|
138
|
+
) -> None: ...
|
|
139
|
+
def ClearField(
|
|
140
|
+
self,
|
|
141
|
+
field_name: typing.Literal[
|
|
142
|
+
"merge_device_to_device_stream",
|
|
143
|
+
b"merge_device_to_device_stream",
|
|
144
|
+
"merge_device_to_host_stream",
|
|
145
|
+
b"merge_device_to_host_stream",
|
|
146
|
+
"merge_host_to_device_stream",
|
|
147
|
+
b"merge_host_to_device_stream",
|
|
148
|
+
],
|
|
149
|
+
) -> None: ...
|
|
150
|
+
|
|
151
|
+
VIRTUAL_DEVICES_FIELD_NUMBER: builtins.int
|
|
152
|
+
NUM_VIRTUAL_DEVICES_PER_GPU_FIELD_NUMBER: builtins.int
|
|
153
|
+
USE_UNIFIED_MEMORY_FIELD_NUMBER: builtins.int
|
|
154
|
+
NUM_DEV_TO_DEV_COPY_STREAMS_FIELD_NUMBER: builtins.int
|
|
155
|
+
COLLECTIVE_RING_ORDER_FIELD_NUMBER: builtins.int
|
|
156
|
+
TIMESTAMPED_ALLOCATOR_FIELD_NUMBER: builtins.int
|
|
157
|
+
KERNEL_TRACKER_MAX_INTERVAL_FIELD_NUMBER: builtins.int
|
|
158
|
+
KERNEL_TRACKER_MAX_BYTES_FIELD_NUMBER: builtins.int
|
|
159
|
+
KERNEL_TRACKER_MAX_PENDING_FIELD_NUMBER: builtins.int
|
|
160
|
+
INTERNAL_FRAGMENTATION_FRACTION_FIELD_NUMBER: builtins.int
|
|
161
|
+
USE_CUDA_MALLOC_ASYNC_FIELD_NUMBER: builtins.int
|
|
162
|
+
DISALLOW_RETRY_ON_ALLOCATION_FAILURE_FIELD_NUMBER: builtins.int
|
|
163
|
+
GPU_HOST_MEM_LIMIT_IN_MB_FIELD_NUMBER: builtins.int
|
|
164
|
+
GPU_HOST_MEM_DISALLOW_GROWTH_FIELD_NUMBER: builtins.int
|
|
165
|
+
GPU_SYSTEM_MEMORY_SIZE_IN_MB_FIELD_NUMBER: builtins.int
|
|
166
|
+
POPULATE_PJRT_GPU_CLIENT_CREATION_INFO_FIELD_NUMBER: builtins.int
|
|
167
|
+
NODE_ID_FIELD_NUMBER: builtins.int
|
|
168
|
+
STREAM_MERGE_OPTIONS_FIELD_NUMBER: builtins.int
|
|
169
|
+
num_virtual_devices_per_gpu: builtins.int
|
|
170
|
+
"""The number of virtual devices to create on each visible GPU. The
|
|
171
|
+
available memory will be split equally among all virtual devices. If the
|
|
172
|
+
field `memory_limit_mb` in `VirtualDevices` is not empty, this field will
|
|
173
|
+
be ignored.
|
|
174
|
+
"""
|
|
175
|
+
use_unified_memory: builtins.bool
|
|
176
|
+
"""If true, uses CUDA unified memory for memory allocations. If
|
|
177
|
+
per_process_gpu_memory_fraction option is greater than 1.0, then unified
|
|
178
|
+
memory is used regardless of the value for this field. See comments for
|
|
179
|
+
per_process_gpu_memory_fraction field for more details and requirements
|
|
180
|
+
of the unified memory. This option is useful to oversubscribe memory if
|
|
181
|
+
multiple processes are sharing a single GPU while individually using less
|
|
182
|
+
than 1.0 per process memory fraction.
|
|
183
|
+
"""
|
|
184
|
+
num_dev_to_dev_copy_streams: builtins.int
|
|
185
|
+
"""If > 1, the number of device-to-device copy streams to create
|
|
186
|
+
for each GPUDevice. Default value is 0, which is automatically
|
|
187
|
+
converted to 1.
|
|
188
|
+
"""
|
|
189
|
+
collective_ring_order: builtins.str
|
|
190
|
+
"""If non-empty, defines a good GPU ring order on a single worker based on
|
|
191
|
+
device interconnect. This assumes that all workers have the same GPU
|
|
192
|
+
topology. Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
|
|
193
|
+
This ring order is used by the RingReducer implementation of
|
|
194
|
+
CollectiveReduce, and serves as an override to automatic ring order
|
|
195
|
+
generation in OrderTaskDeviceMap() during CollectiveParam resolution.
|
|
196
|
+
"""
|
|
197
|
+
timestamped_allocator: builtins.bool
|
|
198
|
+
"""If true then extra work is done by GPUDevice and GPUBFCAllocator to
|
|
199
|
+
keep track of when GPU memory is freed and when kernels actually
|
|
200
|
+
complete so that we can know when a nominally free memory chunk
|
|
201
|
+
is really not subject to pending use.
|
|
202
|
+
"""
|
|
203
|
+
kernel_tracker_max_interval: builtins.int
|
|
204
|
+
"""reserved id: 6
|
|
205
|
+
|
|
206
|
+
Parameters for GPUKernelTracker. By default no kernel tracking is done.
|
|
207
|
+
Note that timestamped_allocator is only effective if some tracking is
|
|
208
|
+
specified.
|
|
209
|
+
|
|
210
|
+
If kernel_tracker_max_interval = n > 0, then a tracking event
|
|
211
|
+
is inserted after every n kernels without an event.
|
|
212
|
+
"""
|
|
213
|
+
kernel_tracker_max_bytes: builtins.int
|
|
214
|
+
"""If kernel_tracker_max_bytes = n > 0, then a tracking event is
|
|
215
|
+
inserted after every series of kernels allocating a sum of
|
|
216
|
+
memory >= n. If one kernel allocates b * n bytes, then one
|
|
217
|
+
event will be inserted after it, but it will count as b against
|
|
218
|
+
the pending limit.
|
|
219
|
+
"""
|
|
220
|
+
kernel_tracker_max_pending: builtins.int
|
|
221
|
+
"""If kernel_tracker_max_pending > 0 then no more than this many
|
|
222
|
+
tracking events can be outstanding at a time. An attempt to
|
|
223
|
+
launch an additional kernel will stall until an event
|
|
224
|
+
completes.
|
|
225
|
+
"""
|
|
226
|
+
internal_fragmentation_fraction: builtins.float
|
|
227
|
+
"""BFC Allocator can return an allocated chunk of memory upto 2x the
|
|
228
|
+
requested size. For virtual devices with tight memory constraints, and
|
|
229
|
+
proportionately large allocation requests, this can lead to a significant
|
|
230
|
+
reduction in available memory. The threshold below controls when a chunk
|
|
231
|
+
should be split if the chunk size exceeds requested memory size. It is
|
|
232
|
+
expressed as a fraction of total available memory for the tf device. For
|
|
233
|
+
example setting it to 0.05 would imply a chunk needs to be split if its
|
|
234
|
+
size exceeds the requested memory by 5% of the total virtual device/gpu
|
|
235
|
+
memory size.
|
|
236
|
+
"""
|
|
237
|
+
use_cuda_malloc_async: builtins.bool
|
|
238
|
+
"""When true, use CUDA cudaMallocAsync API instead of TF gpu allocator."""
|
|
239
|
+
disallow_retry_on_allocation_failure: builtins.bool
|
|
240
|
+
"""By default, BFCAllocator may sleep when it runs out of memory, in the
|
|
241
|
+
hopes that another thread will free up memory in the meantime. Setting
|
|
242
|
+
this to true disables the sleep; instead we'll OOM immediately.
|
|
243
|
+
"""
|
|
244
|
+
gpu_host_mem_limit_in_mb: builtins.float
|
|
245
|
+
"""Memory limit for "GPU host allocator", aka pinned memory allocator. This
|
|
246
|
+
can also be set via the envvar TF_GPU_HOST_MEM_LIMIT_IN_MB.
|
|
247
|
+
"""
|
|
248
|
+
gpu_host_mem_disallow_growth: builtins.bool
|
|
249
|
+
"""If true, then the host allocator allocates its max memory all upfront and
|
|
250
|
+
never grows. This can be useful for latency-sensitive systems, because
|
|
251
|
+
growing the GPU host memory pool can be expensive.
|
|
252
|
+
|
|
253
|
+
You probably only want to use this in combination with
|
|
254
|
+
gpu_host_mem_limit_in_mb, because the default GPU host memory limit is
|
|
255
|
+
quite high.
|
|
256
|
+
"""
|
|
257
|
+
gpu_system_memory_size_in_mb: builtins.int
|
|
258
|
+
"""Memory limit for gpu system. This can also be set by
|
|
259
|
+
TF_DEVICE_MIN_SYS_MEMORY_IN_MB, which takes precedence over
|
|
260
|
+
gpu_system_memory_size_in_mb. With this, user can configure the gpu
|
|
261
|
+
system memory size for better resource estimation of multi-tenancy(one
|
|
262
|
+
gpu with multiple model) use case.
|
|
263
|
+
"""
|
|
264
|
+
populate_pjrt_gpu_client_creation_info: builtins.bool
|
|
265
|
+
"""If true, save information needed for created a PjRt GPU client for
|
|
266
|
+
creating a client with remote devices.
|
|
267
|
+
"""
|
|
268
|
+
node_id: builtins.int
|
|
269
|
+
"""node_id for use when creating a PjRt GPU client with remote devices,
|
|
270
|
+
which enumerates jobs*tasks from a ServerDef.
|
|
271
|
+
"""
|
|
272
|
+
@property
|
|
273
|
+
def virtual_devices(
|
|
274
|
+
self,
|
|
275
|
+
) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___GPUOptions.Experimental.VirtualDevices]:
|
|
276
|
+
"""The multi virtual device settings. If empty (not set), it will create
|
|
277
|
+
single virtual device on each visible GPU, according to the settings
|
|
278
|
+
in "visible_device_list" above. Otherwise, the number of elements in the
|
|
279
|
+
list must be the same as the number of visible GPUs (after
|
|
280
|
+
"visible_device_list" filtering if it is set), and the string represented
|
|
281
|
+
device names (e.g. /device:GPU:<id>) will refer to the virtual
|
|
282
|
+
devices and have the <id> field assigned sequentially starting from 0,
|
|
283
|
+
according to the order of the virtual devices determined by
|
|
284
|
+
device_ordinal and the location in the virtual device list.
|
|
285
|
+
|
|
286
|
+
For example,
|
|
287
|
+
visible_device_list = "1,0"
|
|
288
|
+
virtual_devices { memory_limit: 1GB memory_limit: 2GB }
|
|
289
|
+
virtual_devices { memory_limit: 3GB memory_limit: 4GB }
|
|
290
|
+
will create 4 virtual devices as:
|
|
291
|
+
/device:GPU:0 -> visible GPU 1 with 1GB memory
|
|
292
|
+
/device:GPU:1 -> visible GPU 1 with 2GB memory
|
|
293
|
+
/device:GPU:2 -> visible GPU 0 with 3GB memory
|
|
294
|
+
/device:GPU:3 -> visible GPU 0 with 4GB memory
|
|
295
|
+
|
|
296
|
+
but
|
|
297
|
+
visible_device_list = "1,0"
|
|
298
|
+
virtual_devices { memory_limit: 1GB memory_limit: 2GB
|
|
299
|
+
device_ordinal: 10 device_ordinal: 20}
|
|
300
|
+
virtual_devices { memory_limit: 3GB memory_limit: 4GB
|
|
301
|
+
device_ordinal: 10 device_ordinal: 20}
|
|
302
|
+
will create 4 virtual devices as:
|
|
303
|
+
/device:GPU:0 -> visible GPU 1 with 1GB memory (ordinal 10)
|
|
304
|
+
/device:GPU:1 -> visible GPU 0 with 3GB memory (ordinal 10)
|
|
305
|
+
/device:GPU:2 -> visible GPU 1 with 2GB memory (ordinal 20)
|
|
306
|
+
/device:GPU:3 -> visible GPU 0 with 4GB memory (ordinal 20)
|
|
307
|
+
|
|
308
|
+
NOTE:
|
|
309
|
+
1. It's invalid to set both this and "per_process_gpu_memory_fraction"
|
|
310
|
+
at the same time.
|
|
311
|
+
2. Currently this setting is per-process, not per-session. Using
|
|
312
|
+
different settings in different sessions within same process will
|
|
313
|
+
result in undefined behavior.
|
|
314
|
+
"""
|
|
315
|
+
|
|
316
|
+
@property
|
|
317
|
+
def stream_merge_options(self) -> global___GPUOptions.Experimental.StreamMergeOptions: ...
|
|
318
|
+
def __init__(
|
|
319
|
+
self,
|
|
320
|
+
*,
|
|
321
|
+
virtual_devices: collections.abc.Iterable[global___GPUOptions.Experimental.VirtualDevices] | None = ...,
|
|
322
|
+
num_virtual_devices_per_gpu: builtins.int | None = ...,
|
|
323
|
+
use_unified_memory: builtins.bool | None = ...,
|
|
324
|
+
num_dev_to_dev_copy_streams: builtins.int | None = ...,
|
|
325
|
+
collective_ring_order: builtins.str | None = ...,
|
|
326
|
+
timestamped_allocator: builtins.bool | None = ...,
|
|
327
|
+
kernel_tracker_max_interval: builtins.int | None = ...,
|
|
328
|
+
kernel_tracker_max_bytes: builtins.int | None = ...,
|
|
329
|
+
kernel_tracker_max_pending: builtins.int | None = ...,
|
|
330
|
+
internal_fragmentation_fraction: builtins.float | None = ...,
|
|
331
|
+
use_cuda_malloc_async: builtins.bool | None = ...,
|
|
332
|
+
disallow_retry_on_allocation_failure: builtins.bool | None = ...,
|
|
333
|
+
gpu_host_mem_limit_in_mb: builtins.float | None = ...,
|
|
334
|
+
gpu_host_mem_disallow_growth: builtins.bool | None = ...,
|
|
335
|
+
gpu_system_memory_size_in_mb: builtins.int | None = ...,
|
|
336
|
+
populate_pjrt_gpu_client_creation_info: builtins.bool | None = ...,
|
|
337
|
+
node_id: builtins.int | None = ...,
|
|
338
|
+
stream_merge_options: global___GPUOptions.Experimental.StreamMergeOptions | None = ...,
|
|
339
|
+
) -> None: ...
|
|
340
|
+
def HasField(self, field_name: typing.Literal["stream_merge_options", b"stream_merge_options"]) -> builtins.bool: ...
|
|
341
|
+
def ClearField(
|
|
342
|
+
self,
|
|
343
|
+
field_name: typing.Literal[
|
|
344
|
+
"collective_ring_order",
|
|
345
|
+
b"collective_ring_order",
|
|
346
|
+
"disallow_retry_on_allocation_failure",
|
|
347
|
+
b"disallow_retry_on_allocation_failure",
|
|
348
|
+
"gpu_host_mem_disallow_growth",
|
|
349
|
+
b"gpu_host_mem_disallow_growth",
|
|
350
|
+
"gpu_host_mem_limit_in_mb",
|
|
351
|
+
b"gpu_host_mem_limit_in_mb",
|
|
352
|
+
"gpu_system_memory_size_in_mb",
|
|
353
|
+
b"gpu_system_memory_size_in_mb",
|
|
354
|
+
"internal_fragmentation_fraction",
|
|
355
|
+
b"internal_fragmentation_fraction",
|
|
356
|
+
"kernel_tracker_max_bytes",
|
|
357
|
+
b"kernel_tracker_max_bytes",
|
|
358
|
+
"kernel_tracker_max_interval",
|
|
359
|
+
b"kernel_tracker_max_interval",
|
|
360
|
+
"kernel_tracker_max_pending",
|
|
361
|
+
b"kernel_tracker_max_pending",
|
|
362
|
+
"node_id",
|
|
363
|
+
b"node_id",
|
|
364
|
+
"num_dev_to_dev_copy_streams",
|
|
365
|
+
b"num_dev_to_dev_copy_streams",
|
|
366
|
+
"num_virtual_devices_per_gpu",
|
|
367
|
+
b"num_virtual_devices_per_gpu",
|
|
368
|
+
"populate_pjrt_gpu_client_creation_info",
|
|
369
|
+
b"populate_pjrt_gpu_client_creation_info",
|
|
370
|
+
"stream_merge_options",
|
|
371
|
+
b"stream_merge_options",
|
|
372
|
+
"timestamped_allocator",
|
|
373
|
+
b"timestamped_allocator",
|
|
374
|
+
"use_cuda_malloc_async",
|
|
375
|
+
b"use_cuda_malloc_async",
|
|
376
|
+
"use_unified_memory",
|
|
377
|
+
b"use_unified_memory",
|
|
378
|
+
"virtual_devices",
|
|
379
|
+
b"virtual_devices",
|
|
380
|
+
],
|
|
381
|
+
) -> None: ...
|
|
382
|
+
|
|
383
|
+
PER_PROCESS_GPU_MEMORY_FRACTION_FIELD_NUMBER: builtins.int
|
|
384
|
+
ALLOW_GROWTH_FIELD_NUMBER: builtins.int
|
|
385
|
+
ALLOCATOR_TYPE_FIELD_NUMBER: builtins.int
|
|
386
|
+
DEFERRED_DELETION_BYTES_FIELD_NUMBER: builtins.int
|
|
387
|
+
VISIBLE_DEVICE_LIST_FIELD_NUMBER: builtins.int
|
|
388
|
+
POLLING_ACTIVE_DELAY_USECS_FIELD_NUMBER: builtins.int
|
|
389
|
+
POLLING_INACTIVE_DELAY_MSECS_FIELD_NUMBER: builtins.int
|
|
390
|
+
FORCE_GPU_COMPATIBLE_FIELD_NUMBER: builtins.int
|
|
391
|
+
EXPERIMENTAL_FIELD_NUMBER: builtins.int
|
|
392
|
+
per_process_gpu_memory_fraction: builtins.float
|
|
393
|
+
"""Fraction of the total GPU memory to allocate for each process.
|
|
394
|
+
1 means to allocate all of the GPU memory, 0.5 means the process
|
|
395
|
+
allocates up to ~50% of the total GPU memory.
|
|
396
|
+
|
|
397
|
+
GPU memory is pre-allocated unless the allow_growth option is enabled.
|
|
398
|
+
|
|
399
|
+
If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
|
|
400
|
+
the amount of memory available on the GPU device by using host memory as a
|
|
401
|
+
swap space. Accessing memory not available on the device will be
|
|
402
|
+
significantly slower as that would require memory transfer between the host
|
|
403
|
+
and the device. Options to reduce the memory requirement should be
|
|
404
|
+
considered before enabling this option as this may come with a negative
|
|
405
|
+
performance impact. Oversubscription using the unified memory requires
|
|
406
|
+
Pascal class or newer GPUs and it is currently only supported on the Linux
|
|
407
|
+
operating system. See
|
|
408
|
+
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
|
|
409
|
+
for the detailed requirements.
|
|
410
|
+
"""
|
|
411
|
+
allow_growth: builtins.bool
|
|
412
|
+
"""If true, the allocator does not pre-allocate the entire specified
|
|
413
|
+
GPU memory region, instead starting small and growing as needed.
|
|
414
|
+
"""
|
|
415
|
+
allocator_type: builtins.str
|
|
416
|
+
"""The type of GPU allocation strategy to use.
|
|
417
|
+
|
|
418
|
+
Allowed values:
|
|
419
|
+
"": The empty string (default) uses a system-chosen default
|
|
420
|
+
which may change over time.
|
|
421
|
+
|
|
422
|
+
"BFC": A "Best-fit with coalescing" algorithm, simplified from a
|
|
423
|
+
version of dlmalloc.
|
|
424
|
+
"""
|
|
425
|
+
deferred_deletion_bytes: builtins.int
|
|
426
|
+
"""Delay deletion of up to this many bytes to reduce the number of
|
|
427
|
+
interactions with gpu driver code. If 0, the system chooses
|
|
428
|
+
a reasonable default (several MBs).
|
|
429
|
+
"""
|
|
430
|
+
visible_device_list: builtins.str
|
|
431
|
+
"""A comma-separated list of GPU ids that determines the 'visible'
|
|
432
|
+
to 'virtual' mapping of GPU devices. For example, if TensorFlow
|
|
433
|
+
can see 8 GPU devices in the process, and one wanted to map
|
|
434
|
+
visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
|
|
435
|
+
then one would specify this field as "5,3". This field is similar in
|
|
436
|
+
spirit to the CUDA_VISIBLE_DEVICES environment variable, except
|
|
437
|
+
it applies to the visible GPU devices in the process.
|
|
438
|
+
|
|
439
|
+
NOTE:
|
|
440
|
+
1. The GPU driver provides the process with the visible GPUs
|
|
441
|
+
in an order which is not guaranteed to have any correlation to
|
|
442
|
+
the *physical* GPU id in the machine. This field is used for
|
|
443
|
+
remapping "visible" to "virtual", which means this operates only
|
|
444
|
+
after the process starts. Users are required to use vendor
|
|
445
|
+
specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
|
|
446
|
+
physical to visible device mapping prior to invoking TensorFlow.
|
|
447
|
+
2. In the code, the ids in this list are also called "platform GPU id"s,
|
|
448
|
+
and the 'virtual' ids of GPU devices (i.e. the ids in the device
|
|
449
|
+
name "/device:GPU:<id>") are also called "TF GPU id"s. Please
|
|
450
|
+
refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
|
|
451
|
+
for more information.
|
|
452
|
+
"""
|
|
453
|
+
polling_active_delay_usecs: builtins.int
|
|
454
|
+
"""In the event polling loop sleep this many microseconds between
|
|
455
|
+
PollEvents calls, when the queue is not empty. If value is not
|
|
456
|
+
set or set to 0, gets set to a non-zero default.
|
|
457
|
+
"""
|
|
458
|
+
polling_inactive_delay_msecs: builtins.int
|
|
459
|
+
"""This field is deprecated and ignored."""
|
|
460
|
+
force_gpu_compatible: builtins.bool
|
|
461
|
+
"""Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
|
|
462
|
+
enabling this option forces all CPU tensors to be allocated with Cuda
|
|
463
|
+
pinned memory. Normally, TensorFlow will infer which tensors should be
|
|
464
|
+
allocated as the pinned memory. But in case where the inference is
|
|
465
|
+
incomplete, this option can significantly speed up the cross-device memory
|
|
466
|
+
copy performance as long as it fits the memory.
|
|
467
|
+
Note that this option is not something that should be
|
|
468
|
+
enabled by default for unknown or very large models, since all Cuda pinned
|
|
469
|
+
memory is unpageable, having too much pinned memory might negatively impact
|
|
470
|
+
the overall host system performance.
|
|
471
|
+
"""
|
|
472
|
+
@property
|
|
473
|
+
def experimental(self) -> global___GPUOptions.Experimental:
|
|
474
|
+
"""Everything inside experimental is subject to change and is not subject
|
|
475
|
+
to API stability guarantees in
|
|
476
|
+
https://www.tensorflow.org/guide/version_compat.
|
|
477
|
+
"""
|
|
478
|
+
|
|
479
|
+
def __init__(
|
|
480
|
+
self,
|
|
481
|
+
*,
|
|
482
|
+
per_process_gpu_memory_fraction: builtins.float | None = ...,
|
|
483
|
+
allow_growth: builtins.bool | None = ...,
|
|
484
|
+
allocator_type: builtins.str | None = ...,
|
|
485
|
+
deferred_deletion_bytes: builtins.int | None = ...,
|
|
486
|
+
visible_device_list: builtins.str | None = ...,
|
|
487
|
+
polling_active_delay_usecs: builtins.int | None = ...,
|
|
488
|
+
polling_inactive_delay_msecs: builtins.int | None = ...,
|
|
489
|
+
force_gpu_compatible: builtins.bool | None = ...,
|
|
490
|
+
experimental: global___GPUOptions.Experimental | None = ...,
|
|
491
|
+
) -> None: ...
|
|
492
|
+
def HasField(self, field_name: typing.Literal["experimental", b"experimental"]) -> builtins.bool: ...
|
|
493
|
+
def ClearField(
|
|
494
|
+
self,
|
|
495
|
+
field_name: typing.Literal[
|
|
496
|
+
"allocator_type",
|
|
497
|
+
b"allocator_type",
|
|
498
|
+
"allow_growth",
|
|
499
|
+
b"allow_growth",
|
|
500
|
+
"deferred_deletion_bytes",
|
|
501
|
+
b"deferred_deletion_bytes",
|
|
502
|
+
"experimental",
|
|
503
|
+
b"experimental",
|
|
504
|
+
"force_gpu_compatible",
|
|
505
|
+
b"force_gpu_compatible",
|
|
506
|
+
"per_process_gpu_memory_fraction",
|
|
507
|
+
b"per_process_gpu_memory_fraction",
|
|
508
|
+
"polling_active_delay_usecs",
|
|
509
|
+
b"polling_active_delay_usecs",
|
|
510
|
+
"polling_inactive_delay_msecs",
|
|
511
|
+
b"polling_inactive_delay_msecs",
|
|
512
|
+
"visible_device_list",
|
|
513
|
+
b"visible_device_list",
|
|
514
|
+
],
|
|
515
|
+
) -> None: ...
|
|
516
|
+
|
|
517
|
+
global___GPUOptions = GPUOptions
|
|
518
|
+
|
|
519
|
+
@typing.final
|
|
520
|
+
class OptimizerOptions(google.protobuf.message.Message):
|
|
521
|
+
"""Options passed to the graph optimizer"""
|
|
522
|
+
|
|
523
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
524
|
+
|
|
525
|
+
class _Level:
|
|
526
|
+
ValueType = typing.NewType("ValueType", builtins.int)
|
|
527
|
+
V: typing_extensions.TypeAlias = ValueType
|
|
528
|
+
|
|
529
|
+
class _LevelEnumTypeWrapper(
|
|
530
|
+
google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[OptimizerOptions._Level.ValueType], builtins.type
|
|
531
|
+
):
|
|
532
|
+
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
|
533
|
+
L1: OptimizerOptions._Level.ValueType # 0
|
|
534
|
+
"""L1 is the default level.
|
|
535
|
+
Optimization performed at L1 :
|
|
536
|
+
1. Common subexpression elimination
|
|
537
|
+
2. Constant folding
|
|
538
|
+
"""
|
|
539
|
+
L0: OptimizerOptions._Level.ValueType # -1
|
|
540
|
+
"""No optimizations"""
|
|
541
|
+
|
|
542
|
+
class Level(_Level, metaclass=_LevelEnumTypeWrapper):
|
|
543
|
+
"""Optimization level"""
|
|
544
|
+
|
|
545
|
+
L1: OptimizerOptions.Level.ValueType # 0
|
|
546
|
+
"""L1 is the default level.
|
|
547
|
+
Optimization performed at L1 :
|
|
548
|
+
1. Common subexpression elimination
|
|
549
|
+
2. Constant folding
|
|
550
|
+
"""
|
|
551
|
+
L0: OptimizerOptions.Level.ValueType # -1
|
|
552
|
+
"""No optimizations"""
|
|
553
|
+
|
|
554
|
+
class _GlobalJitLevel:
|
|
555
|
+
ValueType = typing.NewType("ValueType", builtins.int)
|
|
556
|
+
V: typing_extensions.TypeAlias = ValueType
|
|
557
|
+
|
|
558
|
+
class _GlobalJitLevelEnumTypeWrapper(
|
|
559
|
+
google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[OptimizerOptions._GlobalJitLevel.ValueType], builtins.type
|
|
560
|
+
):
|
|
561
|
+
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
|
562
|
+
DEFAULT: OptimizerOptions._GlobalJitLevel.ValueType # 0
|
|
563
|
+
"""Default setting ("off" now, but later expected to be "on")"""
|
|
564
|
+
OFF: OptimizerOptions._GlobalJitLevel.ValueType # -1
|
|
565
|
+
ON_1: OptimizerOptions._GlobalJitLevel.ValueType # 1
|
|
566
|
+
"""The following settings turn on compilation, with higher values being
|
|
567
|
+
more aggressive. Higher values may reduce opportunities for parallelism
|
|
568
|
+
and may use more memory. (At present, there is no distinction, but this
|
|
569
|
+
is expected to change.)
|
|
570
|
+
"""
|
|
571
|
+
ON_2: OptimizerOptions._GlobalJitLevel.ValueType # 2
|
|
572
|
+
|
|
573
|
+
class GlobalJitLevel(_GlobalJitLevel, metaclass=_GlobalJitLevelEnumTypeWrapper):
|
|
574
|
+
"""Control the use of the compiler/jit. Experimental."""
|
|
575
|
+
|
|
576
|
+
DEFAULT: OptimizerOptions.GlobalJitLevel.ValueType # 0
|
|
577
|
+
"""Default setting ("off" now, but later expected to be "on")"""
|
|
578
|
+
OFF: OptimizerOptions.GlobalJitLevel.ValueType # -1
|
|
579
|
+
ON_1: OptimizerOptions.GlobalJitLevel.ValueType # 1
|
|
580
|
+
"""The following settings turn on compilation, with higher values being
|
|
581
|
+
more aggressive. Higher values may reduce opportunities for parallelism
|
|
582
|
+
and may use more memory. (At present, there is no distinction, but this
|
|
583
|
+
is expected to change.)
|
|
584
|
+
"""
|
|
585
|
+
ON_2: OptimizerOptions.GlobalJitLevel.ValueType # 2
|
|
586
|
+
|
|
587
|
+
DO_COMMON_SUBEXPRESSION_ELIMINATION_FIELD_NUMBER: builtins.int
|
|
588
|
+
DO_CONSTANT_FOLDING_FIELD_NUMBER: builtins.int
|
|
589
|
+
MAX_FOLDED_CONSTANT_IN_BYTES_FIELD_NUMBER: builtins.int
|
|
590
|
+
DO_FUNCTION_INLINING_FIELD_NUMBER: builtins.int
|
|
591
|
+
OPT_LEVEL_FIELD_NUMBER: builtins.int
|
|
592
|
+
GLOBAL_JIT_LEVEL_FIELD_NUMBER: builtins.int
|
|
593
|
+
CPU_GLOBAL_JIT_FIELD_NUMBER: builtins.int
|
|
594
|
+
do_common_subexpression_elimination: builtins.bool
|
|
595
|
+
"""If true, optimize the graph using common subexpression elimination.
|
|
596
|
+
Note: the optimization Level L1 will override this setting to true. So in
|
|
597
|
+
order to disable common subexpression elimination the opt_level has to be
|
|
598
|
+
set to L0.
|
|
599
|
+
"""
|
|
600
|
+
do_constant_folding: builtins.bool
|
|
601
|
+
"""If true, perform constant folding optimization on the graph.
|
|
602
|
+
Note: the optimization Level L1 will override this setting to true. So in
|
|
603
|
+
order to disable constant folding the opt_level has to be set to L0.
|
|
604
|
+
"""
|
|
605
|
+
max_folded_constant_in_bytes: builtins.int
|
|
606
|
+
"""Constant folding optimization replaces tensors whose values can be
|
|
607
|
+
predetermined, with constant nodes. To avoid inserting too large constants,
|
|
608
|
+
the size of each constant created can be limited. If this value is zero, a
|
|
609
|
+
default limit of 10 MiB will be applied. If constant folding optimization
|
|
610
|
+
is disabled, this value is ignored.
|
|
611
|
+
"""
|
|
612
|
+
do_function_inlining: builtins.bool
|
|
613
|
+
"""If true, perform function inlining on the graph."""
|
|
614
|
+
opt_level: global___OptimizerOptions.Level.ValueType
|
|
615
|
+
"""Overall optimization level. The actual optimizations applied will be the
|
|
616
|
+
logical OR of the flags that this level implies and any flags already set.
|
|
617
|
+
"""
|
|
618
|
+
global_jit_level: global___OptimizerOptions.GlobalJitLevel.ValueType
|
|
619
|
+
cpu_global_jit: builtins.bool
|
|
620
|
+
"""CPU code will be autoclustered only if global_jit_level >= ON_1 and either:
|
|
621
|
+
- this flag is true, or
|
|
622
|
+
- TF_XLA_FLAGS contains --tf_xla_cpu_global_jit=true.
|
|
623
|
+
"""
|
|
624
|
+
def __init__(
|
|
625
|
+
self,
|
|
626
|
+
*,
|
|
627
|
+
do_common_subexpression_elimination: builtins.bool | None = ...,
|
|
628
|
+
do_constant_folding: builtins.bool | None = ...,
|
|
629
|
+
max_folded_constant_in_bytes: builtins.int | None = ...,
|
|
630
|
+
do_function_inlining: builtins.bool | None = ...,
|
|
631
|
+
opt_level: global___OptimizerOptions.Level.ValueType | None = ...,
|
|
632
|
+
global_jit_level: global___OptimizerOptions.GlobalJitLevel.ValueType | None = ...,
|
|
633
|
+
cpu_global_jit: builtins.bool | None = ...,
|
|
634
|
+
) -> None: ...
|
|
635
|
+
def ClearField(
|
|
636
|
+
self,
|
|
637
|
+
field_name: typing.Literal[
|
|
638
|
+
"cpu_global_jit",
|
|
639
|
+
b"cpu_global_jit",
|
|
640
|
+
"do_common_subexpression_elimination",
|
|
641
|
+
b"do_common_subexpression_elimination",
|
|
642
|
+
"do_constant_folding",
|
|
643
|
+
b"do_constant_folding",
|
|
644
|
+
"do_function_inlining",
|
|
645
|
+
b"do_function_inlining",
|
|
646
|
+
"global_jit_level",
|
|
647
|
+
b"global_jit_level",
|
|
648
|
+
"max_folded_constant_in_bytes",
|
|
649
|
+
b"max_folded_constant_in_bytes",
|
|
650
|
+
"opt_level",
|
|
651
|
+
b"opt_level",
|
|
652
|
+
],
|
|
653
|
+
) -> None: ...
|
|
654
|
+
|
|
655
|
+
global___OptimizerOptions = OptimizerOptions
|
|
656
|
+
|
|
657
|
+
@typing.final
|
|
658
|
+
class GraphOptions(google.protobuf.message.Message):
|
|
659
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
660
|
+
|
|
661
|
+
ENABLE_RECV_SCHEDULING_FIELD_NUMBER: builtins.int
|
|
662
|
+
OPTIMIZER_OPTIONS_FIELD_NUMBER: builtins.int
|
|
663
|
+
BUILD_COST_MODEL_FIELD_NUMBER: builtins.int
|
|
664
|
+
BUILD_COST_MODEL_AFTER_FIELD_NUMBER: builtins.int
|
|
665
|
+
INFER_SHAPES_FIELD_NUMBER: builtins.int
|
|
666
|
+
PLACE_PRUNED_GRAPH_FIELD_NUMBER: builtins.int
|
|
667
|
+
ENABLE_BFLOAT16_SENDRECV_FIELD_NUMBER: builtins.int
|
|
668
|
+
TIMELINE_STEP_FIELD_NUMBER: builtins.int
|
|
669
|
+
REWRITE_OPTIONS_FIELD_NUMBER: builtins.int
|
|
670
|
+
enable_recv_scheduling: builtins.bool
|
|
671
|
+
"""If true, use control flow to schedule the activation of Recv nodes.
|
|
672
|
+
(Currently ignored.)
|
|
673
|
+
"""
|
|
674
|
+
build_cost_model: builtins.int
|
|
675
|
+
"""The number of steps to run before returning a cost model detailing
|
|
676
|
+
the memory usage and performance of each node of the graph. 0 means
|
|
677
|
+
no cost model.
|
|
678
|
+
"""
|
|
679
|
+
build_cost_model_after: builtins.int
|
|
680
|
+
"""The number of steps to skip before collecting statistics for the
|
|
681
|
+
cost model.
|
|
682
|
+
"""
|
|
683
|
+
infer_shapes: builtins.bool
|
|
684
|
+
"""Annotate each Node with Op output shape data, to the extent it can
|
|
685
|
+
be statically inferred.
|
|
686
|
+
"""
|
|
687
|
+
place_pruned_graph: builtins.bool
|
|
688
|
+
"""Only place the subgraphs that are run, rather than the entire graph.
|
|
689
|
+
|
|
690
|
+
This is useful for interactive graph building, where one might
|
|
691
|
+
produce graphs that cannot be placed during the debugging
|
|
692
|
+
process. In particular, it allows the client to continue work in
|
|
693
|
+
a session after adding a node to a graph whose placement
|
|
694
|
+
constraints are unsatisfiable.
|
|
695
|
+
"""
|
|
696
|
+
enable_bfloat16_sendrecv: builtins.bool
|
|
697
|
+
"""If true, transfer float values between processes as bfloat16."""
|
|
698
|
+
timeline_step: builtins.int
|
|
699
|
+
"""If > 0, record a timeline every this many steps.
|
|
700
|
+
EXPERIMENTAL: This currently has no effect in MasterSession.
|
|
701
|
+
"""
|
|
702
|
+
@property
|
|
703
|
+
def optimizer_options(self) -> global___OptimizerOptions:
|
|
704
|
+
"""Options controlling how graph is optimized."""
|
|
705
|
+
|
|
706
|
+
@property
|
|
707
|
+
def rewrite_options(self) -> tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig:
|
|
708
|
+
"""Options that control the type and amount of graph rewriting.
|
|
709
|
+
Not currently configurable via the public Python API (i.e. there is no API
|
|
710
|
+
stability guarantee if you import RewriterConfig explicitly).
|
|
711
|
+
"""
|
|
712
|
+
|
|
713
|
+
def __init__(
|
|
714
|
+
self,
|
|
715
|
+
*,
|
|
716
|
+
enable_recv_scheduling: builtins.bool | None = ...,
|
|
717
|
+
optimizer_options: global___OptimizerOptions | None = ...,
|
|
718
|
+
build_cost_model: builtins.int | None = ...,
|
|
719
|
+
build_cost_model_after: builtins.int | None = ...,
|
|
720
|
+
infer_shapes: builtins.bool | None = ...,
|
|
721
|
+
place_pruned_graph: builtins.bool | None = ...,
|
|
722
|
+
enable_bfloat16_sendrecv: builtins.bool | None = ...,
|
|
723
|
+
timeline_step: builtins.int | None = ...,
|
|
724
|
+
rewrite_options: tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig | None = ...,
|
|
725
|
+
) -> None: ...
|
|
726
|
+
def HasField(
|
|
727
|
+
self, field_name: typing.Literal["optimizer_options", b"optimizer_options", "rewrite_options", b"rewrite_options"]
|
|
728
|
+
) -> builtins.bool: ...
|
|
729
|
+
def ClearField(
|
|
730
|
+
self,
|
|
731
|
+
field_name: typing.Literal[
|
|
732
|
+
"build_cost_model",
|
|
733
|
+
b"build_cost_model",
|
|
734
|
+
"build_cost_model_after",
|
|
735
|
+
b"build_cost_model_after",
|
|
736
|
+
"enable_bfloat16_sendrecv",
|
|
737
|
+
b"enable_bfloat16_sendrecv",
|
|
738
|
+
"enable_recv_scheduling",
|
|
739
|
+
b"enable_recv_scheduling",
|
|
740
|
+
"infer_shapes",
|
|
741
|
+
b"infer_shapes",
|
|
742
|
+
"optimizer_options",
|
|
743
|
+
b"optimizer_options",
|
|
744
|
+
"place_pruned_graph",
|
|
745
|
+
b"place_pruned_graph",
|
|
746
|
+
"rewrite_options",
|
|
747
|
+
b"rewrite_options",
|
|
748
|
+
"timeline_step",
|
|
749
|
+
b"timeline_step",
|
|
750
|
+
],
|
|
751
|
+
) -> None: ...
|
|
752
|
+
|
|
753
|
+
global___GraphOptions = GraphOptions
|
|
754
|
+
|
|
755
|
+
@typing.final
|
|
756
|
+
class ThreadPoolOptionProto(google.protobuf.message.Message):
|
|
757
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
758
|
+
|
|
759
|
+
NUM_THREADS_FIELD_NUMBER: builtins.int
|
|
760
|
+
GLOBAL_NAME_FIELD_NUMBER: builtins.int
|
|
761
|
+
num_threads: builtins.int
|
|
762
|
+
"""The number of threads in the pool.
|
|
763
|
+
|
|
764
|
+
0 means the system picks a value based on where this option proto is used
|
|
765
|
+
(see the declaration of the specific field for more info).
|
|
766
|
+
"""
|
|
767
|
+
global_name: builtins.str
|
|
768
|
+
"""The global name of the threadpool.
|
|
769
|
+
|
|
770
|
+
If empty, then the threadpool is made and used according to the scope it's
|
|
771
|
+
in - e.g., for a session threadpool, it is used by that session only.
|
|
772
|
+
|
|
773
|
+
If non-empty, then:
|
|
774
|
+
- a global threadpool associated with this name is looked
|
|
775
|
+
up or created. This allows, for example, sharing one threadpool across
|
|
776
|
+
many sessions (e.g., like the default behavior, if
|
|
777
|
+
inter_op_parallelism_threads is not configured), but still partitioning
|
|
778
|
+
into a large and small pool.
|
|
779
|
+
- if the threadpool for this global_name already exists, then it is an
|
|
780
|
+
error if the existing pool was created using a different num_threads
|
|
781
|
+
value as is specified on this call.
|
|
782
|
+
- threadpools created this way are never garbage collected.
|
|
783
|
+
"""
|
|
784
|
+
def __init__(self, *, num_threads: builtins.int | None = ..., global_name: builtins.str | None = ...) -> None: ...
|
|
785
|
+
def ClearField(self, field_name: typing.Literal["global_name", b"global_name", "num_threads", b"num_threads"]) -> None: ...
|
|
786
|
+
|
|
787
|
+
global___ThreadPoolOptionProto = ThreadPoolOptionProto
|
|
788
|
+
|
|
789
|
+
@typing.final
|
|
790
|
+
class SessionMetadata(google.protobuf.message.Message):
|
|
791
|
+
"""Metadata about the session.
|
|
792
|
+
|
|
793
|
+
This can be used by the runtime and the Ops for debugging, monitoring, etc.
|
|
794
|
+
|
|
795
|
+
The (name, version) tuple is expected to be a unique identifier for
|
|
796
|
+
sessions within the same process.
|
|
797
|
+
|
|
798
|
+
NOTE: This is currently used and propagated only by the direct session.
|
|
799
|
+
"""
|
|
800
|
+
|
|
801
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
802
|
+
|
|
803
|
+
NAME_FIELD_NUMBER: builtins.int
|
|
804
|
+
VERSION_FIELD_NUMBER: builtins.int
|
|
805
|
+
name: builtins.str
|
|
806
|
+
version: builtins.int
|
|
807
|
+
"""The version is optional. If set, needs to be >= 0."""
|
|
808
|
+
def __init__(self, *, name: builtins.str | None = ..., version: builtins.int | None = ...) -> None: ...
|
|
809
|
+
def ClearField(self, field_name: typing.Literal["name", b"name", "version", b"version"]) -> None: ...
|
|
810
|
+
|
|
811
|
+
global___SessionMetadata = SessionMetadata
|
|
812
|
+
|
|
813
|
+
@typing.final
|
|
814
|
+
class ConfigProto(google.protobuf.message.Message):
|
|
815
|
+
"""Session configuration parameters.
|
|
816
|
+
The system picks appropriate values for fields that are not set.
|
|
817
|
+
"""
|
|
818
|
+
|
|
819
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
820
|
+
|
|
821
|
+
@typing.final
|
|
822
|
+
class DeviceCountEntry(google.protobuf.message.Message):
|
|
823
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
824
|
+
|
|
825
|
+
KEY_FIELD_NUMBER: builtins.int
|
|
826
|
+
VALUE_FIELD_NUMBER: builtins.int
|
|
827
|
+
key: builtins.str
|
|
828
|
+
value: builtins.int
|
|
829
|
+
def __init__(self, *, key: builtins.str | None = ..., value: builtins.int | None = ...) -> None: ...
|
|
830
|
+
def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ...
|
|
831
|
+
|
|
832
|
+
@typing.final
|
|
833
|
+
class Experimental(google.protobuf.message.Message):
|
|
834
|
+
"""Everything inside Experimental is subject to change and is not subject
|
|
835
|
+
to API stability guarantees in
|
|
836
|
+
https://www.tensorflow.org/guide/version_compat.
|
|
837
|
+
"""
|
|
838
|
+
|
|
839
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
840
|
+
|
|
841
|
+
class _MlirBridgeRollout:
|
|
842
|
+
ValueType = typing.NewType("ValueType", builtins.int)
|
|
843
|
+
V: typing_extensions.TypeAlias = ValueType
|
|
844
|
+
|
|
845
|
+
class _MlirBridgeRolloutEnumTypeWrapper(
|
|
846
|
+
google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ConfigProto.Experimental._MlirBridgeRollout.ValueType],
|
|
847
|
+
builtins.type,
|
|
848
|
+
):
|
|
849
|
+
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
|
850
|
+
MLIR_BRIDGE_ROLLOUT_UNSPECIFIED: ConfigProto.Experimental._MlirBridgeRollout.ValueType # 0
|
|
851
|
+
"""If this field is left unspecified, the MLIR bridge may be selectively
|
|
852
|
+
enabled on a per graph basis.
|
|
853
|
+
"""
|
|
854
|
+
MLIR_BRIDGE_ROLLOUT_ENABLED: ConfigProto.Experimental._MlirBridgeRollout.ValueType # 1
|
|
855
|
+
"""Enabling the MLIR bridge enables it for all graphs in this session."""
|
|
856
|
+
MLIR_BRIDGE_ROLLOUT_DISABLED: ConfigProto.Experimental._MlirBridgeRollout.ValueType # 2
|
|
857
|
+
"""Disabling the MLIR bridge disables it for all graphs in this session."""
|
|
858
|
+
|
|
859
|
+
class MlirBridgeRollout(_MlirBridgeRollout, metaclass=_MlirBridgeRolloutEnumTypeWrapper):
|
|
860
|
+
"""An enum that describes the state of the MLIR bridge rollout."""
|
|
861
|
+
|
|
862
|
+
MLIR_BRIDGE_ROLLOUT_UNSPECIFIED: ConfigProto.Experimental.MlirBridgeRollout.ValueType # 0
|
|
863
|
+
"""If this field is left unspecified, the MLIR bridge may be selectively
|
|
864
|
+
enabled on a per graph basis.
|
|
865
|
+
"""
|
|
866
|
+
MLIR_BRIDGE_ROLLOUT_ENABLED: ConfigProto.Experimental.MlirBridgeRollout.ValueType # 1
|
|
867
|
+
"""Enabling the MLIR bridge enables it for all graphs in this session."""
|
|
868
|
+
MLIR_BRIDGE_ROLLOUT_DISABLED: ConfigProto.Experimental.MlirBridgeRollout.ValueType # 2
|
|
869
|
+
"""Disabling the MLIR bridge disables it for all graphs in this session."""
|
|
870
|
+
|
|
871
|
+
COLLECTIVE_GROUP_LEADER_FIELD_NUMBER: builtins.int
|
|
872
|
+
EXECUTOR_TYPE_FIELD_NUMBER: builtins.int
|
|
873
|
+
RECV_BUF_MAX_CHUNK_FIELD_NUMBER: builtins.int
|
|
874
|
+
USE_NUMA_AFFINITY_FIELD_NUMBER: builtins.int
|
|
875
|
+
COLLECTIVE_DETERMINISTIC_SEQUENTIAL_EXECUTION_FIELD_NUMBER: builtins.int
|
|
876
|
+
COLLECTIVE_NCCL_FIELD_NUMBER: builtins.int
|
|
877
|
+
SHARE_SESSION_STATE_IN_CLUSTERSPEC_PROPAGATION_FIELD_NUMBER: builtins.int
|
|
878
|
+
DISABLE_THREAD_SPINNING_FIELD_NUMBER: builtins.int
|
|
879
|
+
SHARE_CLUSTER_DEVICES_IN_SESSION_FIELD_NUMBER: builtins.int
|
|
880
|
+
SESSION_METADATA_FIELD_NUMBER: builtins.int
|
|
881
|
+
OPTIMIZE_FOR_STATIC_GRAPH_FIELD_NUMBER: builtins.int
|
|
882
|
+
ENABLE_MLIR_BRIDGE_FIELD_NUMBER: builtins.int
|
|
883
|
+
MLIR_BRIDGE_ROLLOUT_FIELD_NUMBER: builtins.int
|
|
884
|
+
ENABLE_MLIR_GRAPH_OPTIMIZATION_FIELD_NUMBER: builtins.int
|
|
885
|
+
DISABLE_OUTPUT_PARTITION_GRAPHS_FIELD_NUMBER: builtins.int
|
|
886
|
+
XLA_FUSION_AUTOTUNER_THRESH_FIELD_NUMBER: builtins.int
|
|
887
|
+
USE_TFRT_FIELD_NUMBER: builtins.int
|
|
888
|
+
ENABLE_MULTI_HOST_FIELD_NUMBER: builtins.int
|
|
889
|
+
TFRT_USE_IFRT_FIELD_NUMBER: builtins.int
|
|
890
|
+
BACKEND_SERVER_PORT_FIELD_NUMBER: builtins.int
|
|
891
|
+
TARGET_TPU_FIELD_NUMBER: builtins.int
|
|
892
|
+
TARGET_GPU_FIELD_NUMBER: builtins.int
|
|
893
|
+
STREAM_MERGE_THRESHOLD_FIELD_NUMBER: builtins.int
|
|
894
|
+
DISABLE_FUNCTIONAL_OPS_LOWERING_FIELD_NUMBER: builtins.int
|
|
895
|
+
XLA_PREFER_SINGLE_GRAPH_CLUSTER_FIELD_NUMBER: builtins.int
|
|
896
|
+
COORDINATION_CONFIG_FIELD_NUMBER: builtins.int
|
|
897
|
+
DISABLE_OPTIMIZE_FOR_STATIC_GRAPH_FIELD_NUMBER: builtins.int
|
|
898
|
+
DISABLE_EAGER_EXECUTOR_STREAMING_ENQUEUE_FIELD_NUMBER: builtins.int
|
|
899
|
+
collective_group_leader: builtins.str
|
|
900
|
+
"""Task name for group resolution."""
|
|
901
|
+
executor_type: builtins.str
|
|
902
|
+
"""Which executor to use, the default executor will be used
|
|
903
|
+
if it is an empty string or "DEFAULT"
|
|
904
|
+
"""
|
|
905
|
+
recv_buf_max_chunk: builtins.int
|
|
906
|
+
"""Guidance to formatting of large RecvBuf fields for transfer.
|
|
907
|
+
Any positive value sets the max chunk size. 0 defaults to 4096.
|
|
908
|
+
Any negative value indicates no max, i.e. one chunk only.
|
|
909
|
+
"""
|
|
910
|
+
use_numa_affinity: builtins.bool
|
|
911
|
+
"""If true, and supported by the platform, the runtime will attempt to
|
|
912
|
+
use NUMA affinity where applicable. One consequence will be the
|
|
913
|
+
existence of as many CPU devices as there are available NUMA nodes.
|
|
914
|
+
"""
|
|
915
|
+
collective_deterministic_sequential_execution: builtins.bool
|
|
916
|
+
"""If true, make collective op execution order sequential and deterministic
|
|
917
|
+
for potentially concurrent collective instances.
|
|
918
|
+
"""
|
|
919
|
+
collective_nccl: builtins.bool
|
|
920
|
+
"""If true, use NCCL for CollectiveOps. This feature is highly
|
|
921
|
+
experimental.
|
|
922
|
+
"""
|
|
923
|
+
share_session_state_in_clusterspec_propagation: builtins.bool
|
|
924
|
+
"""In the following, session state means the value of a variable, elements
|
|
925
|
+
in a hash table, or any other resource, accessible by worker sessions
|
|
926
|
+
held by a TF server.
|
|
927
|
+
|
|
928
|
+
When ClusterSpec propagation is enabled, the value of
|
|
929
|
+
isolate_session_state is ignored when deciding whether to share session
|
|
930
|
+
states in a TF server (for backwards compatibility reasons).
|
|
931
|
+
- If share_session_state_in_clusterspec_propagation is true, the session
|
|
932
|
+
states are shared.
|
|
933
|
+
- If share_session_state_in_clusterspec_propagation is false, session
|
|
934
|
+
states are isolated.
|
|
935
|
+
|
|
936
|
+
When clusterspec propagation is not used, the value of
|
|
937
|
+
share_session_state_in_clusterspec_propagation is ignored when deciding
|
|
938
|
+
whether to share session states in a TF server.
|
|
939
|
+
- If isolate_session_state is true, session states are isolated.
|
|
940
|
+
- If isolate_session_state is false, session states are shared.
|
|
941
|
+
|
|
942
|
+
TODO(b/129330037): Add a single API that consistently treats
|
|
943
|
+
isolate_session_state and ClusterSpec propagation.
|
|
944
|
+
"""
|
|
945
|
+
disable_thread_spinning: builtins.bool
|
|
946
|
+
"""If using a direct session, disable spinning while waiting for work in
|
|
947
|
+
the thread pool. This may result in higher latency for completing ops,
|
|
948
|
+
but in the case where there is a lot of spinning may result in lower
|
|
949
|
+
CPU usage.
|
|
950
|
+
"""
|
|
951
|
+
share_cluster_devices_in_session: builtins.bool
|
|
952
|
+
"""This was promoted to a non-experimental API. Please use
|
|
953
|
+
ConfigProto.share_cluster_devices_in_session instead.
|
|
954
|
+
"""
|
|
955
|
+
optimize_for_static_graph: builtins.bool
|
|
956
|
+
"""If true, the session may treat the graph as being static for optimization
|
|
957
|
+
purposes.
|
|
958
|
+
|
|
959
|
+
If this option is set to true when a session is created, the full
|
|
960
|
+
GraphDef must be passed in a single call to Session::Create(), and
|
|
961
|
+
Session::Extend() may not be supported.
|
|
962
|
+
"""
|
|
963
|
+
enable_mlir_bridge: builtins.bool
|
|
964
|
+
"""Whether to enable the MLIR-based TF->XLA bridge. This is only used if set
|
|
965
|
+
to true. Default value or false is ignored. Use mlir_bridge_rollout for
|
|
966
|
+
finer control.
|
|
967
|
+
|
|
968
|
+
If this option is set to true when a session is created, MLIR is used to
|
|
969
|
+
perform the set of graph transformations to put the graph in a form that
|
|
970
|
+
can be executed with delegation of some computations to an accelerator.
|
|
971
|
+
This builds on the model of XLA where a subset of the graph is
|
|
972
|
+
encapsulated and attached to a "compile" operation, whose result is fed
|
|
973
|
+
to an "execute" operation. The kernel for these operations is responsible
|
|
974
|
+
to lower the encapsulated graph to a particular device.
|
|
975
|
+
"""
|
|
976
|
+
mlir_bridge_rollout: global___ConfigProto.Experimental.MlirBridgeRollout.ValueType
|
|
977
|
+
"""Whether to enable the MLIR-based TF->XLA bridge."""
|
|
978
|
+
enable_mlir_graph_optimization: builtins.bool
|
|
979
|
+
"""Whether to enable the MLIR-based Graph optimizations.
|
|
980
|
+
|
|
981
|
+
This will become a part of standard Tensorflow graph optimization
|
|
982
|
+
pipeline, currently this is only used for gradual migration and testing
|
|
983
|
+
new passes that are replacing existing optimizations in Grappler.
|
|
984
|
+
"""
|
|
985
|
+
disable_output_partition_graphs: builtins.bool
|
|
986
|
+
"""If true, the session will not store an additional copy of the graph for
|
|
987
|
+
each subgraph.
|
|
988
|
+
|
|
989
|
+
If this option is set to true when a session is created, the
|
|
990
|
+
`RunOptions.output_partition_graphs` options must not be set.
|
|
991
|
+
"""
|
|
992
|
+
xla_fusion_autotuner_thresh: builtins.int
|
|
993
|
+
"""Minimum number of batches run through the XLA graph before XLA fusion
|
|
994
|
+
autotuner is enabled. Default value of zero disables the autotuner.
|
|
995
|
+
|
|
996
|
+
The XLA fusion autotuner can improve performance by executing a heuristic
|
|
997
|
+
search on the compiler parameters.
|
|
998
|
+
"""
|
|
999
|
+
use_tfrt: builtins.bool
|
|
1000
|
+
"""Whether runtime execution uses TFRT."""
|
|
1001
|
+
enable_multi_host: builtins.bool
|
|
1002
|
+
"""If true, use Pathways with TFRT API for multi host support."""
|
|
1003
|
+
tfrt_use_ifrt: builtins.bool
|
|
1004
|
+
"""If true, use ifrt as the backend for TFRT. This is only used when
|
|
1005
|
+
`use_tfrt` is true.
|
|
1006
|
+
"""
|
|
1007
|
+
backend_server_port: builtins.int
|
|
1008
|
+
"""Port for the Pathways server. Ignored if enable_multi_host=false."""
|
|
1009
|
+
target_tpu: builtins.bool
|
|
1010
|
+
"""If true, TFRT will use TPU specific compiler passes and perform TPU
|
|
1011
|
+
specific initialization.
|
|
1012
|
+
"""
|
|
1013
|
+
target_gpu: builtins.bool
|
|
1014
|
+
"""If true, TFRT will use GPU specific compiler passes and perform GPU
|
|
1015
|
+
specific initialization.
|
|
1016
|
+
"""
|
|
1017
|
+
stream_merge_threshold: builtins.int
|
|
1018
|
+
"""The threshold to merge small streams in TFRT. The stream with cost
|
|
1019
|
+
smaller than the threshold will be merged. Setting it to value 1
|
|
1020
|
+
disables all merges.
|
|
1021
|
+
"""
|
|
1022
|
+
disable_functional_ops_lowering: builtins.bool
|
|
1023
|
+
"""Whether functional control flow op lowering should be disabled. This is
|
|
1024
|
+
useful when executing within a portable runtime where control flow op
|
|
1025
|
+
kernels may not be loaded due to selective registration.
|
|
1026
|
+
"""
|
|
1027
|
+
xla_prefer_single_graph_cluster: builtins.bool
|
|
1028
|
+
"""Provides a hint to XLA auto clustering to prefer forming a single large
|
|
1029
|
+
cluster that encompases most of the graph.
|
|
1030
|
+
"""
|
|
1031
|
+
disable_optimize_for_static_graph: builtins.bool
|
|
1032
|
+
"""If true, the session will treat the graph as being non-static for
|
|
1033
|
+
optimization purposes.
|
|
1034
|
+
|
|
1035
|
+
If this option is set to true when a session is created, the full
|
|
1036
|
+
GraphDef will be retained to enable calls to Session::Extend().
|
|
1037
|
+
Calling Extend() without setting this flag will result in errors.
|
|
1038
|
+
|
|
1039
|
+
This option is meant to replace `optimize_for_static_graph` and it
|
|
1040
|
+
aims to negate its value.
|
|
1041
|
+
"""
|
|
1042
|
+
disable_eager_executor_streaming_enqueue: builtins.bool
|
|
1043
|
+
"""Whether eager remote execution will stream all the function calls or
|
|
1044
|
+
allow them to happen in parallel. When true, streaming execution is
|
|
1045
|
+
disabled, and parallel execution is allowed.
|
|
1046
|
+
"""
|
|
1047
|
+
@property
|
|
1048
|
+
def session_metadata(self) -> global___SessionMetadata:
|
|
1049
|
+
"""Metadata about the session.
|
|
1050
|
+
|
|
1051
|
+
If set, this can be used by the runtime and the Ops for debugging,
|
|
1052
|
+
monitoring, etc.
|
|
1053
|
+
|
|
1054
|
+
NOTE: This is currently used and propagated only by the direct session
|
|
1055
|
+
and EagerContext.
|
|
1056
|
+
"""
|
|
1057
|
+
|
|
1058
|
+
@property
|
|
1059
|
+
def coordination_config(self) -> tensorflow.tsl.protobuf.coordination_config_pb2.CoordinationServiceConfig:
|
|
1060
|
+
"""Distributed coordination service configurations."""
|
|
1061
|
+
|
|
1062
|
+
def __init__(
|
|
1063
|
+
self,
|
|
1064
|
+
*,
|
|
1065
|
+
collective_group_leader: builtins.str | None = ...,
|
|
1066
|
+
executor_type: builtins.str | None = ...,
|
|
1067
|
+
recv_buf_max_chunk: builtins.int | None = ...,
|
|
1068
|
+
use_numa_affinity: builtins.bool | None = ...,
|
|
1069
|
+
collective_deterministic_sequential_execution: builtins.bool | None = ...,
|
|
1070
|
+
collective_nccl: builtins.bool | None = ...,
|
|
1071
|
+
share_session_state_in_clusterspec_propagation: builtins.bool | None = ...,
|
|
1072
|
+
disable_thread_spinning: builtins.bool | None = ...,
|
|
1073
|
+
share_cluster_devices_in_session: builtins.bool | None = ...,
|
|
1074
|
+
session_metadata: global___SessionMetadata | None = ...,
|
|
1075
|
+
optimize_for_static_graph: builtins.bool | None = ...,
|
|
1076
|
+
enable_mlir_bridge: builtins.bool | None = ...,
|
|
1077
|
+
mlir_bridge_rollout: global___ConfigProto.Experimental.MlirBridgeRollout.ValueType | None = ...,
|
|
1078
|
+
enable_mlir_graph_optimization: builtins.bool | None = ...,
|
|
1079
|
+
disable_output_partition_graphs: builtins.bool | None = ...,
|
|
1080
|
+
xla_fusion_autotuner_thresh: builtins.int | None = ...,
|
|
1081
|
+
use_tfrt: builtins.bool | None = ...,
|
|
1082
|
+
enable_multi_host: builtins.bool | None = ...,
|
|
1083
|
+
tfrt_use_ifrt: builtins.bool | None = ...,
|
|
1084
|
+
backend_server_port: builtins.int | None = ...,
|
|
1085
|
+
target_tpu: builtins.bool | None = ...,
|
|
1086
|
+
target_gpu: builtins.bool | None = ...,
|
|
1087
|
+
stream_merge_threshold: builtins.int | None = ...,
|
|
1088
|
+
disable_functional_ops_lowering: builtins.bool | None = ...,
|
|
1089
|
+
xla_prefer_single_graph_cluster: builtins.bool | None = ...,
|
|
1090
|
+
coordination_config: tensorflow.tsl.protobuf.coordination_config_pb2.CoordinationServiceConfig | None = ...,
|
|
1091
|
+
disable_optimize_for_static_graph: builtins.bool | None = ...,
|
|
1092
|
+
disable_eager_executor_streaming_enqueue: builtins.bool | None = ...,
|
|
1093
|
+
) -> None: ...
|
|
1094
|
+
def HasField(
|
|
1095
|
+
self,
|
|
1096
|
+
field_name: typing.Literal["coordination_config", b"coordination_config", "session_metadata", b"session_metadata"],
|
|
1097
|
+
) -> builtins.bool: ...
|
|
1098
|
+
def ClearField(
|
|
1099
|
+
self,
|
|
1100
|
+
field_name: typing.Literal[
|
|
1101
|
+
"backend_server_port",
|
|
1102
|
+
b"backend_server_port",
|
|
1103
|
+
"collective_deterministic_sequential_execution",
|
|
1104
|
+
b"collective_deterministic_sequential_execution",
|
|
1105
|
+
"collective_group_leader",
|
|
1106
|
+
b"collective_group_leader",
|
|
1107
|
+
"collective_nccl",
|
|
1108
|
+
b"collective_nccl",
|
|
1109
|
+
"coordination_config",
|
|
1110
|
+
b"coordination_config",
|
|
1111
|
+
"disable_eager_executor_streaming_enqueue",
|
|
1112
|
+
b"disable_eager_executor_streaming_enqueue",
|
|
1113
|
+
"disable_functional_ops_lowering",
|
|
1114
|
+
b"disable_functional_ops_lowering",
|
|
1115
|
+
"disable_optimize_for_static_graph",
|
|
1116
|
+
b"disable_optimize_for_static_graph",
|
|
1117
|
+
"disable_output_partition_graphs",
|
|
1118
|
+
b"disable_output_partition_graphs",
|
|
1119
|
+
"disable_thread_spinning",
|
|
1120
|
+
b"disable_thread_spinning",
|
|
1121
|
+
"enable_mlir_bridge",
|
|
1122
|
+
b"enable_mlir_bridge",
|
|
1123
|
+
"enable_mlir_graph_optimization",
|
|
1124
|
+
b"enable_mlir_graph_optimization",
|
|
1125
|
+
"enable_multi_host",
|
|
1126
|
+
b"enable_multi_host",
|
|
1127
|
+
"executor_type",
|
|
1128
|
+
b"executor_type",
|
|
1129
|
+
"mlir_bridge_rollout",
|
|
1130
|
+
b"mlir_bridge_rollout",
|
|
1131
|
+
"optimize_for_static_graph",
|
|
1132
|
+
b"optimize_for_static_graph",
|
|
1133
|
+
"recv_buf_max_chunk",
|
|
1134
|
+
b"recv_buf_max_chunk",
|
|
1135
|
+
"session_metadata",
|
|
1136
|
+
b"session_metadata",
|
|
1137
|
+
"share_cluster_devices_in_session",
|
|
1138
|
+
b"share_cluster_devices_in_session",
|
|
1139
|
+
"share_session_state_in_clusterspec_propagation",
|
|
1140
|
+
b"share_session_state_in_clusterspec_propagation",
|
|
1141
|
+
"stream_merge_threshold",
|
|
1142
|
+
b"stream_merge_threshold",
|
|
1143
|
+
"target_gpu",
|
|
1144
|
+
b"target_gpu",
|
|
1145
|
+
"target_tpu",
|
|
1146
|
+
b"target_tpu",
|
|
1147
|
+
"tfrt_use_ifrt",
|
|
1148
|
+
b"tfrt_use_ifrt",
|
|
1149
|
+
"use_numa_affinity",
|
|
1150
|
+
b"use_numa_affinity",
|
|
1151
|
+
"use_tfrt",
|
|
1152
|
+
b"use_tfrt",
|
|
1153
|
+
"xla_fusion_autotuner_thresh",
|
|
1154
|
+
b"xla_fusion_autotuner_thresh",
|
|
1155
|
+
"xla_prefer_single_graph_cluster",
|
|
1156
|
+
b"xla_prefer_single_graph_cluster",
|
|
1157
|
+
],
|
|
1158
|
+
) -> None: ...
|
|
1159
|
+
|
|
1160
|
+
DEVICE_COUNT_FIELD_NUMBER: builtins.int
|
|
1161
|
+
INTRA_OP_PARALLELISM_THREADS_FIELD_NUMBER: builtins.int
|
|
1162
|
+
INTER_OP_PARALLELISM_THREADS_FIELD_NUMBER: builtins.int
|
|
1163
|
+
USE_PER_SESSION_THREADS_FIELD_NUMBER: builtins.int
|
|
1164
|
+
SESSION_INTER_OP_THREAD_POOL_FIELD_NUMBER: builtins.int
|
|
1165
|
+
PLACEMENT_PERIOD_FIELD_NUMBER: builtins.int
|
|
1166
|
+
DEVICE_FILTERS_FIELD_NUMBER: builtins.int
|
|
1167
|
+
GPU_OPTIONS_FIELD_NUMBER: builtins.int
|
|
1168
|
+
PLUGGABLE_DEVICE_OPTIONS_FIELD_NUMBER: builtins.int
|
|
1169
|
+
ALLOW_SOFT_PLACEMENT_FIELD_NUMBER: builtins.int
|
|
1170
|
+
LOG_DEVICE_PLACEMENT_FIELD_NUMBER: builtins.int
|
|
1171
|
+
GRAPH_OPTIONS_FIELD_NUMBER: builtins.int
|
|
1172
|
+
OPERATION_TIMEOUT_IN_MS_FIELD_NUMBER: builtins.int
|
|
1173
|
+
RPC_OPTIONS_FIELD_NUMBER: builtins.int
|
|
1174
|
+
CLUSTER_DEF_FIELD_NUMBER: builtins.int
|
|
1175
|
+
ISOLATE_SESSION_STATE_FIELD_NUMBER: builtins.int
|
|
1176
|
+
SHARE_CLUSTER_DEVICES_IN_SESSION_FIELD_NUMBER: builtins.int
|
|
1177
|
+
EXPERIMENTAL_FIELD_NUMBER: builtins.int
|
|
1178
|
+
intra_op_parallelism_threads: builtins.int
|
|
1179
|
+
"""The execution of an individual op (for some op types) can be
|
|
1180
|
+
parallelized on a pool of intra_op_parallelism_threads.
|
|
1181
|
+
0 means the system picks an appropriate number.
|
|
1182
|
+
|
|
1183
|
+
If you create an ordinary session, e.g., from Python or C++,
|
|
1184
|
+
then there is exactly one intra op thread pool per process.
|
|
1185
|
+
The first session created determines the number of threads in this pool.
|
|
1186
|
+
All subsequent sessions reuse/share this one global pool.
|
|
1187
|
+
|
|
1188
|
+
There are notable exceptions to the default behavior described above:
|
|
1189
|
+
1. There is an environment variable for overriding this thread pool,
|
|
1190
|
+
named TF_OVERRIDE_GLOBAL_THREADPOOL.
|
|
1191
|
+
2. When connecting to a server, such as a remote `tf.train.Server`
|
|
1192
|
+
instance, then this option will be ignored altogether.
|
|
1193
|
+
"""
|
|
1194
|
+
inter_op_parallelism_threads: builtins.int
|
|
1195
|
+
"""Nodes that perform blocking operations are enqueued on a pool of
|
|
1196
|
+
inter_op_parallelism_threads available in each process.
|
|
1197
|
+
|
|
1198
|
+
0 means the system picks an appropriate number.
|
|
1199
|
+
Negative means all operations are performed in caller's thread.
|
|
1200
|
+
|
|
1201
|
+
Note that the first Session created in the process sets the
|
|
1202
|
+
number of threads for all future sessions unless use_per_session_threads is
|
|
1203
|
+
true or session_inter_op_thread_pool is configured.
|
|
1204
|
+
"""
|
|
1205
|
+
use_per_session_threads: builtins.bool
|
|
1206
|
+
"""If true, use a new set of threads for this session rather than the global
|
|
1207
|
+
pool of threads. Only supported by direct sessions.
|
|
1208
|
+
|
|
1209
|
+
If false, use the global threads created by the first session, or the
|
|
1210
|
+
per-session thread pools configured by session_inter_op_thread_pool.
|
|
1211
|
+
|
|
1212
|
+
This option is deprecated. The same effect can be achieved by setting
|
|
1213
|
+
session_inter_op_thread_pool to have one element, whose num_threads equals
|
|
1214
|
+
inter_op_parallelism_threads.
|
|
1215
|
+
"""
|
|
1216
|
+
placement_period: builtins.int
|
|
1217
|
+
"""Assignment of Nodes to Devices is recomputed every placement_period
|
|
1218
|
+
steps until the system warms up (at which point the recomputation
|
|
1219
|
+
typically slows down automatically).
|
|
1220
|
+
"""
|
|
1221
|
+
allow_soft_placement: builtins.bool
|
|
1222
|
+
"""Whether soft placement is allowed. If allow_soft_placement is true,
|
|
1223
|
+
an op will be placed on CPU if
|
|
1224
|
+
1. there's no GPU implementation for the OP
|
|
1225
|
+
or
|
|
1226
|
+
2. no GPU devices are known or registered
|
|
1227
|
+
or
|
|
1228
|
+
3. need to co-locate with reftype input(s) which are from CPU.
|
|
1229
|
+
"""
|
|
1230
|
+
log_device_placement: builtins.bool
|
|
1231
|
+
"""Whether device placements should be logged."""
|
|
1232
|
+
operation_timeout_in_ms: builtins.int
|
|
1233
|
+
"""Global timeout for all blocking operations in this session. If non-zero,
|
|
1234
|
+
and not overridden on a per-operation basis, this value will be used as the
|
|
1235
|
+
deadline for all blocking operations.
|
|
1236
|
+
"""
|
|
1237
|
+
isolate_session_state: builtins.bool
|
|
1238
|
+
"""If true, any resources such as Variables used in the session will not be
|
|
1239
|
+
shared with other sessions. However, when clusterspec propagation is
|
|
1240
|
+
enabled, this field is ignored and sessions are always isolated.
|
|
1241
|
+
"""
|
|
1242
|
+
share_cluster_devices_in_session: builtins.bool
|
|
1243
|
+
"""When true, WorkerSessions are created with device attributes from the
|
|
1244
|
+
full cluster.
|
|
1245
|
+
This is helpful when a worker wants to partition a graph
|
|
1246
|
+
(for example during a PartitionedCallOp).
|
|
1247
|
+
"""
|
|
1248
|
+
@property
|
|
1249
|
+
def device_count(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.int]:
|
|
1250
|
+
"""Map from device type name (e.g., "CPU" or "GPU" ) to maximum
|
|
1251
|
+
number of devices of that type to use. If a particular device
|
|
1252
|
+
type is not found in the map, the system picks an appropriate
|
|
1253
|
+
number.
|
|
1254
|
+
"""
|
|
1255
|
+
|
|
1256
|
+
@property
|
|
1257
|
+
def session_inter_op_thread_pool(
|
|
1258
|
+
self,
|
|
1259
|
+
) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ThreadPoolOptionProto]:
|
|
1260
|
+
"""This option is experimental - it may be replaced with a different mechanism
|
|
1261
|
+
in the future.
|
|
1262
|
+
|
|
1263
|
+
Configures session thread pools. If this is configured, then RunOptions for
|
|
1264
|
+
a Run call can select the thread pool to use.
|
|
1265
|
+
|
|
1266
|
+
The intended use is for when some session invocations need to run in a
|
|
1267
|
+
background pool limited to a small number of threads:
|
|
1268
|
+
- For example, a session may be configured to have one large pool (for
|
|
1269
|
+
regular compute) and one small pool (for periodic, low priority work);
|
|
1270
|
+
using the small pool is currently the mechanism for limiting the inter-op
|
|
1271
|
+
parallelism of the low priority work. Note that it does not limit the
|
|
1272
|
+
parallelism of work spawned by a single op kernel implementation.
|
|
1273
|
+
- Using this setting is normally not needed in training, but may help some
|
|
1274
|
+
serving use cases.
|
|
1275
|
+
- It is also generally recommended to set the global_name field of this
|
|
1276
|
+
proto, to avoid creating multiple large pools. It is typically better to
|
|
1277
|
+
run the non-low-priority work, even across sessions, in a single large
|
|
1278
|
+
pool.
|
|
1279
|
+
"""
|
|
1280
|
+
|
|
1281
|
+
@property
|
|
1282
|
+
def device_filters(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
|
|
1283
|
+
"""When any filters are present sessions will ignore all devices which do not
|
|
1284
|
+
match the filters. Each filter can be partially specified, e.g. "/job:ps"
|
|
1285
|
+
"/job:worker/replica:3", etc.
|
|
1286
|
+
"""
|
|
1287
|
+
|
|
1288
|
+
@property
|
|
1289
|
+
def gpu_options(self) -> global___GPUOptions:
|
|
1290
|
+
"""Options that apply to all GPUs."""
|
|
1291
|
+
|
|
1292
|
+
@property
|
|
1293
|
+
def pluggable_device_options(self) -> global___GPUOptions:
|
|
1294
|
+
"""Options that apply to pluggable devices."""
|
|
1295
|
+
|
|
1296
|
+
@property
|
|
1297
|
+
def graph_options(self) -> global___GraphOptions:
|
|
1298
|
+
"""Options that apply to all graphs."""
|
|
1299
|
+
|
|
1300
|
+
@property
|
|
1301
|
+
def rpc_options(self) -> tensorflow.tsl.protobuf.rpc_options_pb2.RPCOptions:
|
|
1302
|
+
"""Options that apply when this session uses the distributed runtime."""
|
|
1303
|
+
|
|
1304
|
+
@property
|
|
1305
|
+
def cluster_def(self) -> tensorflow.core.protobuf.cluster_pb2.ClusterDef:
|
|
1306
|
+
"""Optional list of all workers to use in this session."""
|
|
1307
|
+
|
|
1308
|
+
@property
|
|
1309
|
+
def experimental(self) -> global___ConfigProto.Experimental: ...
|
|
1310
|
+
def __init__(
|
|
1311
|
+
self,
|
|
1312
|
+
*,
|
|
1313
|
+
device_count: collections.abc.Mapping[builtins.str, builtins.int] | None = ...,
|
|
1314
|
+
intra_op_parallelism_threads: builtins.int | None = ...,
|
|
1315
|
+
inter_op_parallelism_threads: builtins.int | None = ...,
|
|
1316
|
+
use_per_session_threads: builtins.bool | None = ...,
|
|
1317
|
+
session_inter_op_thread_pool: collections.abc.Iterable[global___ThreadPoolOptionProto] | None = ...,
|
|
1318
|
+
placement_period: builtins.int | None = ...,
|
|
1319
|
+
device_filters: collections.abc.Iterable[builtins.str] | None = ...,
|
|
1320
|
+
gpu_options: global___GPUOptions | None = ...,
|
|
1321
|
+
pluggable_device_options: global___GPUOptions | None = ...,
|
|
1322
|
+
allow_soft_placement: builtins.bool | None = ...,
|
|
1323
|
+
log_device_placement: builtins.bool | None = ...,
|
|
1324
|
+
graph_options: global___GraphOptions | None = ...,
|
|
1325
|
+
operation_timeout_in_ms: builtins.int | None = ...,
|
|
1326
|
+
rpc_options: tensorflow.tsl.protobuf.rpc_options_pb2.RPCOptions | None = ...,
|
|
1327
|
+
cluster_def: tensorflow.core.protobuf.cluster_pb2.ClusterDef | None = ...,
|
|
1328
|
+
isolate_session_state: builtins.bool | None = ...,
|
|
1329
|
+
share_cluster_devices_in_session: builtins.bool | None = ...,
|
|
1330
|
+
experimental: global___ConfigProto.Experimental | None = ...,
|
|
1331
|
+
) -> None: ...
|
|
1332
|
+
def HasField(
|
|
1333
|
+
self,
|
|
1334
|
+
field_name: typing.Literal[
|
|
1335
|
+
"cluster_def",
|
|
1336
|
+
b"cluster_def",
|
|
1337
|
+
"experimental",
|
|
1338
|
+
b"experimental",
|
|
1339
|
+
"gpu_options",
|
|
1340
|
+
b"gpu_options",
|
|
1341
|
+
"graph_options",
|
|
1342
|
+
b"graph_options",
|
|
1343
|
+
"pluggable_device_options",
|
|
1344
|
+
b"pluggable_device_options",
|
|
1345
|
+
"rpc_options",
|
|
1346
|
+
b"rpc_options",
|
|
1347
|
+
],
|
|
1348
|
+
) -> builtins.bool: ...
|
|
1349
|
+
def ClearField(
|
|
1350
|
+
self,
|
|
1351
|
+
field_name: typing.Literal[
|
|
1352
|
+
"allow_soft_placement",
|
|
1353
|
+
b"allow_soft_placement",
|
|
1354
|
+
"cluster_def",
|
|
1355
|
+
b"cluster_def",
|
|
1356
|
+
"device_count",
|
|
1357
|
+
b"device_count",
|
|
1358
|
+
"device_filters",
|
|
1359
|
+
b"device_filters",
|
|
1360
|
+
"experimental",
|
|
1361
|
+
b"experimental",
|
|
1362
|
+
"gpu_options",
|
|
1363
|
+
b"gpu_options",
|
|
1364
|
+
"graph_options",
|
|
1365
|
+
b"graph_options",
|
|
1366
|
+
"inter_op_parallelism_threads",
|
|
1367
|
+
b"inter_op_parallelism_threads",
|
|
1368
|
+
"intra_op_parallelism_threads",
|
|
1369
|
+
b"intra_op_parallelism_threads",
|
|
1370
|
+
"isolate_session_state",
|
|
1371
|
+
b"isolate_session_state",
|
|
1372
|
+
"log_device_placement",
|
|
1373
|
+
b"log_device_placement",
|
|
1374
|
+
"operation_timeout_in_ms",
|
|
1375
|
+
b"operation_timeout_in_ms",
|
|
1376
|
+
"placement_period",
|
|
1377
|
+
b"placement_period",
|
|
1378
|
+
"pluggable_device_options",
|
|
1379
|
+
b"pluggable_device_options",
|
|
1380
|
+
"rpc_options",
|
|
1381
|
+
b"rpc_options",
|
|
1382
|
+
"session_inter_op_thread_pool",
|
|
1383
|
+
b"session_inter_op_thread_pool",
|
|
1384
|
+
"share_cluster_devices_in_session",
|
|
1385
|
+
b"share_cluster_devices_in_session",
|
|
1386
|
+
"use_per_session_threads",
|
|
1387
|
+
b"use_per_session_threads",
|
|
1388
|
+
],
|
|
1389
|
+
) -> None: ...
|
|
1390
|
+
|
|
1391
|
+
global___ConfigProto = ConfigProto
|
|
1392
|
+
|
|
1393
|
+
@typing.final
|
|
1394
|
+
class RunOptions(google.protobuf.message.Message):
|
|
1395
|
+
"""Options for a single Run() call."""
|
|
1396
|
+
|
|
1397
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
1398
|
+
|
|
1399
|
+
class _TraceLevel:
|
|
1400
|
+
ValueType = typing.NewType("ValueType", builtins.int)
|
|
1401
|
+
V: typing_extensions.TypeAlias = ValueType
|
|
1402
|
+
|
|
1403
|
+
class _TraceLevelEnumTypeWrapper(
|
|
1404
|
+
google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[RunOptions._TraceLevel.ValueType], builtins.type
|
|
1405
|
+
):
|
|
1406
|
+
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
|
|
1407
|
+
NO_TRACE: RunOptions._TraceLevel.ValueType # 0
|
|
1408
|
+
SOFTWARE_TRACE: RunOptions._TraceLevel.ValueType # 1
|
|
1409
|
+
HARDWARE_TRACE: RunOptions._TraceLevel.ValueType # 2
|
|
1410
|
+
FULL_TRACE: RunOptions._TraceLevel.ValueType # 3
|
|
1411
|
+
|
|
1412
|
+
class TraceLevel(_TraceLevel, metaclass=_TraceLevelEnumTypeWrapper):
|
|
1413
|
+
"""TODO(pbar) Turn this into a TraceOptions proto which allows
|
|
1414
|
+
tracing to be controlled in a more orthogonal manner?
|
|
1415
|
+
"""
|
|
1416
|
+
|
|
1417
|
+
NO_TRACE: RunOptions.TraceLevel.ValueType # 0
|
|
1418
|
+
SOFTWARE_TRACE: RunOptions.TraceLevel.ValueType # 1
|
|
1419
|
+
HARDWARE_TRACE: RunOptions.TraceLevel.ValueType # 2
|
|
1420
|
+
FULL_TRACE: RunOptions.TraceLevel.ValueType # 3
|
|
1421
|
+
|
|
1422
|
+
@typing.final
|
|
1423
|
+
class Experimental(google.protobuf.message.Message):
|
|
1424
|
+
"""Everything inside Experimental is subject to change and is not subject
|
|
1425
|
+
to API stability guarantees in
|
|
1426
|
+
https://www.tensorflow.org/guide/version_compat.
|
|
1427
|
+
"""
|
|
1428
|
+
|
|
1429
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
1430
|
+
|
|
1431
|
+
@typing.final
|
|
1432
|
+
class RunHandlerPoolOptions(google.protobuf.message.Message):
|
|
1433
|
+
"""Options for run handler thread pool."""
|
|
1434
|
+
|
|
1435
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
1436
|
+
|
|
1437
|
+
PRIORITY_FIELD_NUMBER: builtins.int
|
|
1438
|
+
priority: builtins.int
|
|
1439
|
+
"""Priority of the request. The run handler thread pool will schedule ops
|
|
1440
|
+
based on the priority number. The larger number means higher priority.
|
|
1441
|
+
"""
|
|
1442
|
+
def __init__(self, *, priority: builtins.int | None = ...) -> None: ...
|
|
1443
|
+
def ClearField(self, field_name: typing.Literal["priority", b"priority"]) -> None: ...
|
|
1444
|
+
|
|
1445
|
+
COLLECTIVE_GRAPH_KEY_FIELD_NUMBER: builtins.int
|
|
1446
|
+
USE_RUN_HANDLER_POOL_FIELD_NUMBER: builtins.int
|
|
1447
|
+
RUN_HANDLER_POOL_OPTIONS_FIELD_NUMBER: builtins.int
|
|
1448
|
+
collective_graph_key: builtins.int
|
|
1449
|
+
"""If non-zero, declares that this graph is going to use collective
|
|
1450
|
+
ops and must synchronize step_ids with any other graph with this
|
|
1451
|
+
same group_key value (in a distributed computation where tasks
|
|
1452
|
+
run disjoint graphs).
|
|
1453
|
+
"""
|
|
1454
|
+
use_run_handler_pool: builtins.bool
|
|
1455
|
+
"""If true, then operations (using the inter-op pool) across all
|
|
1456
|
+
session::run() calls will be centrally scheduled, optimizing for (median
|
|
1457
|
+
and tail) latency.
|
|
1458
|
+
Consider using this option for CPU-bound workloads like inference.
|
|
1459
|
+
"""
|
|
1460
|
+
@property
|
|
1461
|
+
def run_handler_pool_options(self) -> global___RunOptions.Experimental.RunHandlerPoolOptions: ...
|
|
1462
|
+
def __init__(
|
|
1463
|
+
self,
|
|
1464
|
+
*,
|
|
1465
|
+
collective_graph_key: builtins.int | None = ...,
|
|
1466
|
+
use_run_handler_pool: builtins.bool | None = ...,
|
|
1467
|
+
run_handler_pool_options: global___RunOptions.Experimental.RunHandlerPoolOptions | None = ...,
|
|
1468
|
+
) -> None: ...
|
|
1469
|
+
def HasField(
|
|
1470
|
+
self, field_name: typing.Literal["run_handler_pool_options", b"run_handler_pool_options"]
|
|
1471
|
+
) -> builtins.bool: ...
|
|
1472
|
+
def ClearField(
|
|
1473
|
+
self,
|
|
1474
|
+
field_name: typing.Literal[
|
|
1475
|
+
"collective_graph_key",
|
|
1476
|
+
b"collective_graph_key",
|
|
1477
|
+
"run_handler_pool_options",
|
|
1478
|
+
b"run_handler_pool_options",
|
|
1479
|
+
"use_run_handler_pool",
|
|
1480
|
+
b"use_run_handler_pool",
|
|
1481
|
+
],
|
|
1482
|
+
) -> None: ...
|
|
1483
|
+
|
|
1484
|
+
TRACE_LEVEL_FIELD_NUMBER: builtins.int
|
|
1485
|
+
TIMEOUT_IN_MS_FIELD_NUMBER: builtins.int
|
|
1486
|
+
INTER_OP_THREAD_POOL_FIELD_NUMBER: builtins.int
|
|
1487
|
+
OUTPUT_PARTITION_GRAPHS_FIELD_NUMBER: builtins.int
|
|
1488
|
+
DEBUG_OPTIONS_FIELD_NUMBER: builtins.int
|
|
1489
|
+
REPORT_TENSOR_ALLOCATIONS_UPON_OOM_FIELD_NUMBER: builtins.int
|
|
1490
|
+
EXPERIMENTAL_FIELD_NUMBER: builtins.int
|
|
1491
|
+
trace_level: global___RunOptions.TraceLevel.ValueType
|
|
1492
|
+
timeout_in_ms: builtins.int
|
|
1493
|
+
"""Time to wait for operation to complete in milliseconds."""
|
|
1494
|
+
inter_op_thread_pool: builtins.int
|
|
1495
|
+
"""The thread pool to use, if session_inter_op_thread_pool is configured.
|
|
1496
|
+
To use the caller thread set this to -1 - this uses the caller thread
|
|
1497
|
+
to execute Session::Run() and thus avoids a context switch. Using the
|
|
1498
|
+
caller thread to execute Session::Run() should be done ONLY for simple
|
|
1499
|
+
graphs, where the overhead of an additional context switch is
|
|
1500
|
+
comparable with the overhead of Session::Run().
|
|
1501
|
+
"""
|
|
1502
|
+
output_partition_graphs: builtins.bool
|
|
1503
|
+
"""Whether the partition graph(s) executed by the executor(s) should be
|
|
1504
|
+
outputted via RunMetadata.
|
|
1505
|
+
"""
|
|
1506
|
+
report_tensor_allocations_upon_oom: builtins.bool
|
|
1507
|
+
"""When enabled, causes tensor allocation information to be included in
|
|
1508
|
+
the error message when the Run() call fails because the allocator ran
|
|
1509
|
+
out of memory (OOM).
|
|
1510
|
+
|
|
1511
|
+
Enabling this option can slow down the Run() call.
|
|
1512
|
+
"""
|
|
1513
|
+
@property
|
|
1514
|
+
def debug_options(self) -> tensorflow.core.protobuf.debug_pb2.DebugOptions:
|
|
1515
|
+
"""EXPERIMENTAL. Options used to initialize DebuggerState, if enabled."""
|
|
1516
|
+
|
|
1517
|
+
@property
|
|
1518
|
+
def experimental(self) -> global___RunOptions.Experimental: ...
|
|
1519
|
+
def __init__(
|
|
1520
|
+
self,
|
|
1521
|
+
*,
|
|
1522
|
+
trace_level: global___RunOptions.TraceLevel.ValueType | None = ...,
|
|
1523
|
+
timeout_in_ms: builtins.int | None = ...,
|
|
1524
|
+
inter_op_thread_pool: builtins.int | None = ...,
|
|
1525
|
+
output_partition_graphs: builtins.bool | None = ...,
|
|
1526
|
+
debug_options: tensorflow.core.protobuf.debug_pb2.DebugOptions | None = ...,
|
|
1527
|
+
report_tensor_allocations_upon_oom: builtins.bool | None = ...,
|
|
1528
|
+
experimental: global___RunOptions.Experimental | None = ...,
|
|
1529
|
+
) -> None: ...
|
|
1530
|
+
def HasField(
|
|
1531
|
+
self, field_name: typing.Literal["debug_options", b"debug_options", "experimental", b"experimental"]
|
|
1532
|
+
) -> builtins.bool: ...
|
|
1533
|
+
def ClearField(
|
|
1534
|
+
self,
|
|
1535
|
+
field_name: typing.Literal[
|
|
1536
|
+
"debug_options",
|
|
1537
|
+
b"debug_options",
|
|
1538
|
+
"experimental",
|
|
1539
|
+
b"experimental",
|
|
1540
|
+
"inter_op_thread_pool",
|
|
1541
|
+
b"inter_op_thread_pool",
|
|
1542
|
+
"output_partition_graphs",
|
|
1543
|
+
b"output_partition_graphs",
|
|
1544
|
+
"report_tensor_allocations_upon_oom",
|
|
1545
|
+
b"report_tensor_allocations_upon_oom",
|
|
1546
|
+
"timeout_in_ms",
|
|
1547
|
+
b"timeout_in_ms",
|
|
1548
|
+
"trace_level",
|
|
1549
|
+
b"trace_level",
|
|
1550
|
+
],
|
|
1551
|
+
) -> None: ...
|
|
1552
|
+
|
|
1553
|
+
global___RunOptions = RunOptions
|
|
1554
|
+
|
|
1555
|
+
@typing.final
|
|
1556
|
+
class RunMetadata(google.protobuf.message.Message):
|
|
1557
|
+
"""Metadata output (i.e., non-Tensor) for a single Run() call."""
|
|
1558
|
+
|
|
1559
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
1560
|
+
|
|
1561
|
+
@typing.final
|
|
1562
|
+
class FunctionGraphs(google.protobuf.message.Message):
|
|
1563
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
1564
|
+
|
|
1565
|
+
PARTITION_GRAPHS_FIELD_NUMBER: builtins.int
|
|
1566
|
+
PRE_OPTIMIZATION_GRAPH_FIELD_NUMBER: builtins.int
|
|
1567
|
+
POST_OPTIMIZATION_GRAPH_FIELD_NUMBER: builtins.int
|
|
1568
|
+
@property
|
|
1569
|
+
def partition_graphs(
|
|
1570
|
+
self,
|
|
1571
|
+
) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[tensorflow.core.framework.graph_pb2.GraphDef]:
|
|
1572
|
+
"""TODO(nareshmodi): Include some sort of function/cache-key identifier?"""
|
|
1573
|
+
|
|
1574
|
+
@property
|
|
1575
|
+
def pre_optimization_graph(self) -> tensorflow.core.framework.graph_pb2.GraphDef: ...
|
|
1576
|
+
@property
|
|
1577
|
+
def post_optimization_graph(self) -> tensorflow.core.framework.graph_pb2.GraphDef: ...
|
|
1578
|
+
def __init__(
|
|
1579
|
+
self,
|
|
1580
|
+
*,
|
|
1581
|
+
partition_graphs: collections.abc.Iterable[tensorflow.core.framework.graph_pb2.GraphDef] | None = ...,
|
|
1582
|
+
pre_optimization_graph: tensorflow.core.framework.graph_pb2.GraphDef | None = ...,
|
|
1583
|
+
post_optimization_graph: tensorflow.core.framework.graph_pb2.GraphDef | None = ...,
|
|
1584
|
+
) -> None: ...
|
|
1585
|
+
def HasField(
|
|
1586
|
+
self,
|
|
1587
|
+
field_name: typing.Literal[
|
|
1588
|
+
"post_optimization_graph", b"post_optimization_graph", "pre_optimization_graph", b"pre_optimization_graph"
|
|
1589
|
+
],
|
|
1590
|
+
) -> builtins.bool: ...
|
|
1591
|
+
def ClearField(
|
|
1592
|
+
self,
|
|
1593
|
+
field_name: typing.Literal[
|
|
1594
|
+
"partition_graphs",
|
|
1595
|
+
b"partition_graphs",
|
|
1596
|
+
"post_optimization_graph",
|
|
1597
|
+
b"post_optimization_graph",
|
|
1598
|
+
"pre_optimization_graph",
|
|
1599
|
+
b"pre_optimization_graph",
|
|
1600
|
+
],
|
|
1601
|
+
) -> None: ...
|
|
1602
|
+
|
|
1603
|
+
STEP_STATS_FIELD_NUMBER: builtins.int
|
|
1604
|
+
COST_GRAPH_FIELD_NUMBER: builtins.int
|
|
1605
|
+
PARTITION_GRAPHS_FIELD_NUMBER: builtins.int
|
|
1606
|
+
FUNCTION_GRAPHS_FIELD_NUMBER: builtins.int
|
|
1607
|
+
SESSION_METADATA_FIELD_NUMBER: builtins.int
|
|
1608
|
+
@property
|
|
1609
|
+
def step_stats(self) -> tensorflow.core.framework.step_stats_pb2.StepStats:
|
|
1610
|
+
"""Statistics traced for this step. Populated if tracing is turned on via the
|
|
1611
|
+
"RunOptions" proto.
|
|
1612
|
+
EXPERIMENTAL: The format and set of events may change in future versions.
|
|
1613
|
+
"""
|
|
1614
|
+
|
|
1615
|
+
@property
|
|
1616
|
+
def cost_graph(self) -> tensorflow.core.framework.cost_graph_pb2.CostGraphDef:
|
|
1617
|
+
"""The cost graph for the computation defined by the run call."""
|
|
1618
|
+
|
|
1619
|
+
@property
|
|
1620
|
+
def partition_graphs(
|
|
1621
|
+
self,
|
|
1622
|
+
) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[tensorflow.core.framework.graph_pb2.GraphDef]:
|
|
1623
|
+
"""Graphs of the partitions executed by executors."""
|
|
1624
|
+
|
|
1625
|
+
@property
|
|
1626
|
+
def function_graphs(
|
|
1627
|
+
self,
|
|
1628
|
+
) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___RunMetadata.FunctionGraphs]:
|
|
1629
|
+
"""This is only populated for graphs that are run as functions in TensorFlow
|
|
1630
|
+
V2. There will be an entry below for each function that is traced.
|
|
1631
|
+
The main use cases of the post_optimization_graph and the partition_graphs
|
|
1632
|
+
is to give the caller insight into the graphs that were actually run by the
|
|
1633
|
+
runtime. Additional information (such as those in step_stats) will match
|
|
1634
|
+
these graphs.
|
|
1635
|
+
We also include the pre_optimization_graph since it is usually easier to
|
|
1636
|
+
read, and is helpful in situations where the caller wants to get a high
|
|
1637
|
+
level idea of what the built graph looks like (since the various graph
|
|
1638
|
+
optimization passes might change the structure of the graph significantly).
|
|
1639
|
+
"""
|
|
1640
|
+
|
|
1641
|
+
@property
|
|
1642
|
+
def session_metadata(self) -> global___SessionMetadata:
|
|
1643
|
+
"""Metadata about the session."""
|
|
1644
|
+
|
|
1645
|
+
def __init__(
|
|
1646
|
+
self,
|
|
1647
|
+
*,
|
|
1648
|
+
step_stats: tensorflow.core.framework.step_stats_pb2.StepStats | None = ...,
|
|
1649
|
+
cost_graph: tensorflow.core.framework.cost_graph_pb2.CostGraphDef | None = ...,
|
|
1650
|
+
partition_graphs: collections.abc.Iterable[tensorflow.core.framework.graph_pb2.GraphDef] | None = ...,
|
|
1651
|
+
function_graphs: collections.abc.Iterable[global___RunMetadata.FunctionGraphs] | None = ...,
|
|
1652
|
+
session_metadata: global___SessionMetadata | None = ...,
|
|
1653
|
+
) -> None: ...
|
|
1654
|
+
def HasField(
|
|
1655
|
+
self,
|
|
1656
|
+
field_name: typing.Literal[
|
|
1657
|
+
"cost_graph", b"cost_graph", "session_metadata", b"session_metadata", "step_stats", b"step_stats"
|
|
1658
|
+
],
|
|
1659
|
+
) -> builtins.bool: ...
|
|
1660
|
+
def ClearField(
|
|
1661
|
+
self,
|
|
1662
|
+
field_name: typing.Literal[
|
|
1663
|
+
"cost_graph",
|
|
1664
|
+
b"cost_graph",
|
|
1665
|
+
"function_graphs",
|
|
1666
|
+
b"function_graphs",
|
|
1667
|
+
"partition_graphs",
|
|
1668
|
+
b"partition_graphs",
|
|
1669
|
+
"session_metadata",
|
|
1670
|
+
b"session_metadata",
|
|
1671
|
+
"step_stats",
|
|
1672
|
+
b"step_stats",
|
|
1673
|
+
],
|
|
1674
|
+
) -> None: ...
|
|
1675
|
+
|
|
1676
|
+
global___RunMetadata = RunMetadata
|
|
1677
|
+
|
|
1678
|
+
@typing.final
|
|
1679
|
+
class TensorConnection(google.protobuf.message.Message):
|
|
1680
|
+
"""Defines a connection between two tensors in a `GraphDef`."""
|
|
1681
|
+
|
|
1682
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
1683
|
+
|
|
1684
|
+
FROM_TENSOR_FIELD_NUMBER: builtins.int
|
|
1685
|
+
TO_TENSOR_FIELD_NUMBER: builtins.int
|
|
1686
|
+
from_tensor: builtins.str
|
|
1687
|
+
"""A tensor name. The value of this tensor will be substituted for
|
|
1688
|
+
the tensor named in `to_tensor`.
|
|
1689
|
+
"""
|
|
1690
|
+
to_tensor: builtins.str
|
|
1691
|
+
"""A tensor name. The value of this tensor will be bound to the
|
|
1692
|
+
value of the tensor named in `from_tensor`.
|
|
1693
|
+
"""
|
|
1694
|
+
def __init__(self, *, from_tensor: builtins.str | None = ..., to_tensor: builtins.str | None = ...) -> None: ...
|
|
1695
|
+
def ClearField(self, field_name: typing.Literal["from_tensor", b"from_tensor", "to_tensor", b"to_tensor"]) -> None: ...
|
|
1696
|
+
|
|
1697
|
+
global___TensorConnection = TensorConnection
|
|
1698
|
+
|
|
1699
|
+
@typing.final
|
|
1700
|
+
class CallableOptions(google.protobuf.message.Message):
|
|
1701
|
+
"""Defines a subgraph in another `GraphDef` as a set of feed points and nodes
|
|
1702
|
+
to be fetched or executed.
|
|
1703
|
+
|
|
1704
|
+
Compare with the arguments to `Session::Run()`.
|
|
1705
|
+
"""
|
|
1706
|
+
|
|
1707
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
1708
|
+
|
|
1709
|
+
@typing.final
|
|
1710
|
+
class FeedDevicesEntry(google.protobuf.message.Message):
|
|
1711
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
1712
|
+
|
|
1713
|
+
KEY_FIELD_NUMBER: builtins.int
|
|
1714
|
+
VALUE_FIELD_NUMBER: builtins.int
|
|
1715
|
+
key: builtins.str
|
|
1716
|
+
value: builtins.str
|
|
1717
|
+
def __init__(self, *, key: builtins.str | None = ..., value: builtins.str | None = ...) -> None: ...
|
|
1718
|
+
def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ...
|
|
1719
|
+
|
|
1720
|
+
@typing.final
|
|
1721
|
+
class FetchDevicesEntry(google.protobuf.message.Message):
|
|
1722
|
+
DESCRIPTOR: google.protobuf.descriptor.Descriptor
|
|
1723
|
+
|
|
1724
|
+
KEY_FIELD_NUMBER: builtins.int
|
|
1725
|
+
VALUE_FIELD_NUMBER: builtins.int
|
|
1726
|
+
key: builtins.str
|
|
1727
|
+
value: builtins.str
|
|
1728
|
+
def __init__(self, *, key: builtins.str | None = ..., value: builtins.str | None = ...) -> None: ...
|
|
1729
|
+
def ClearField(self, field_name: typing.Literal["key", b"key", "value", b"value"]) -> None: ...
|
|
1730
|
+
|
|
1731
|
+
FEED_FIELD_NUMBER: builtins.int
|
|
1732
|
+
FETCH_FIELD_NUMBER: builtins.int
|
|
1733
|
+
TARGET_FIELD_NUMBER: builtins.int
|
|
1734
|
+
RUN_OPTIONS_FIELD_NUMBER: builtins.int
|
|
1735
|
+
TENSOR_CONNECTION_FIELD_NUMBER: builtins.int
|
|
1736
|
+
FEED_DEVICES_FIELD_NUMBER: builtins.int
|
|
1737
|
+
FETCH_DEVICES_FIELD_NUMBER: builtins.int
|
|
1738
|
+
FETCH_SKIP_SYNC_FIELD_NUMBER: builtins.int
|
|
1739
|
+
fetch_skip_sync: builtins.bool
|
|
1740
|
+
"""By default, RunCallable() will synchronize the GPU stream before returning
|
|
1741
|
+
fetched tensors on a GPU device, to ensure that the values in those tensors
|
|
1742
|
+
have been produced. This simplifies interacting with the tensors, but
|
|
1743
|
+
potentially incurs a performance hit.
|
|
1744
|
+
|
|
1745
|
+
If this options is set to true, the caller is responsible for ensuring
|
|
1746
|
+
that the values in the fetched tensors have been produced before they are
|
|
1747
|
+
used. The caller can do this by invoking `Device::Sync()` on the underlying
|
|
1748
|
+
device(s), or by feeding the tensors back to the same Session using
|
|
1749
|
+
`feed_devices` with the same corresponding device name.
|
|
1750
|
+
"""
|
|
1751
|
+
@property
|
|
1752
|
+
def feed(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
|
|
1753
|
+
"""Tensors to be fed in the callable. Each feed is the name of a tensor."""
|
|
1754
|
+
|
|
1755
|
+
@property
|
|
1756
|
+
def fetch(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
|
|
1757
|
+
"""Fetches. A list of tensor names. The caller of the callable expects a
|
|
1758
|
+
tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
|
|
1759
|
+
order of specified fetches does not change the execution order.
|
|
1760
|
+
"""
|
|
1761
|
+
|
|
1762
|
+
@property
|
|
1763
|
+
def target(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
|
|
1764
|
+
"""Target Nodes. A list of node names. The named nodes will be run by the
|
|
1765
|
+
callable but their outputs will not be returned.
|
|
1766
|
+
"""
|
|
1767
|
+
|
|
1768
|
+
@property
|
|
1769
|
+
def run_options(self) -> global___RunOptions:
|
|
1770
|
+
"""Options that will be applied to each run."""
|
|
1771
|
+
|
|
1772
|
+
@property
|
|
1773
|
+
def tensor_connection(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___TensorConnection]:
|
|
1774
|
+
"""Tensors to be connected in the callable. Each TensorConnection denotes
|
|
1775
|
+
a pair of tensors in the graph, between which an edge will be created
|
|
1776
|
+
in the callable.
|
|
1777
|
+
"""
|
|
1778
|
+
|
|
1779
|
+
@property
|
|
1780
|
+
def feed_devices(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]:
|
|
1781
|
+
"""The Tensor objects fed in the callable and fetched from the callable
|
|
1782
|
+
are expected to be backed by host (CPU) memory by default.
|
|
1783
|
+
|
|
1784
|
+
The options below allow changing that - feeding tensors backed by
|
|
1785
|
+
device memory, or returning tensors that are backed by device memory.
|
|
1786
|
+
|
|
1787
|
+
The maps below map the name of a feed/fetch tensor (which appears in
|
|
1788
|
+
'feed' or 'fetch' fields above), to the fully qualified name of the device
|
|
1789
|
+
owning the memory backing the contents of the tensor.
|
|
1790
|
+
|
|
1791
|
+
For example, creating a callable with the following options:
|
|
1792
|
+
|
|
1793
|
+
CallableOptions {
|
|
1794
|
+
feed: "a:0"
|
|
1795
|
+
feed: "b:0"
|
|
1796
|
+
|
|
1797
|
+
fetch: "x:0"
|
|
1798
|
+
fetch: "y:0"
|
|
1799
|
+
|
|
1800
|
+
feed_devices: {
|
|
1801
|
+
"a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
|
|
1802
|
+
}
|
|
1803
|
+
|
|
1804
|
+
fetch_devices: {
|
|
1805
|
+
"y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
|
|
1806
|
+
}
|
|
1807
|
+
}
|
|
1808
|
+
|
|
1809
|
+
means that the Callable expects:
|
|
1810
|
+
- The first argument ("a:0") is a Tensor backed by GPU memory.
|
|
1811
|
+
- The second argument ("b:0") is a Tensor backed by host memory.
|
|
1812
|
+
and of its return values:
|
|
1813
|
+
- The first output ("x:0") will be backed by host memory.
|
|
1814
|
+
- The second output ("y:0") will be backed by GPU memory.
|
|
1815
|
+
|
|
1816
|
+
FEEDS:
|
|
1817
|
+
It is the responsibility of the caller to ensure that the memory of the fed
|
|
1818
|
+
tensors will be correctly initialized and synchronized before it is
|
|
1819
|
+
accessed by operations executed during the call to Session::RunCallable().
|
|
1820
|
+
|
|
1821
|
+
This is typically ensured by using the TensorFlow memory allocators
|
|
1822
|
+
(Device::GetAllocator()) to create the Tensor to be fed.
|
|
1823
|
+
|
|
1824
|
+
Alternatively, for CUDA-enabled GPU devices, this typically means that the
|
|
1825
|
+
operation that produced the contents of the tensor has completed, i.e., the
|
|
1826
|
+
CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
|
|
1827
|
+
cuStreamSynchronize()).
|
|
1828
|
+
"""
|
|
1829
|
+
|
|
1830
|
+
@property
|
|
1831
|
+
def fetch_devices(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]: ...
|
|
1832
|
+
def __init__(
|
|
1833
|
+
self,
|
|
1834
|
+
*,
|
|
1835
|
+
feed: collections.abc.Iterable[builtins.str] | None = ...,
|
|
1836
|
+
fetch: collections.abc.Iterable[builtins.str] | None = ...,
|
|
1837
|
+
target: collections.abc.Iterable[builtins.str] | None = ...,
|
|
1838
|
+
run_options: global___RunOptions | None = ...,
|
|
1839
|
+
tensor_connection: collections.abc.Iterable[global___TensorConnection] | None = ...,
|
|
1840
|
+
feed_devices: collections.abc.Mapping[builtins.str, builtins.str] | None = ...,
|
|
1841
|
+
fetch_devices: collections.abc.Mapping[builtins.str, builtins.str] | None = ...,
|
|
1842
|
+
fetch_skip_sync: builtins.bool | None = ...,
|
|
1843
|
+
) -> None: ...
|
|
1844
|
+
def HasField(self, field_name: typing.Literal["run_options", b"run_options"]) -> builtins.bool: ...
|
|
1845
|
+
def ClearField(
|
|
1846
|
+
self,
|
|
1847
|
+
field_name: typing.Literal[
|
|
1848
|
+
"feed",
|
|
1849
|
+
b"feed",
|
|
1850
|
+
"feed_devices",
|
|
1851
|
+
b"feed_devices",
|
|
1852
|
+
"fetch",
|
|
1853
|
+
b"fetch",
|
|
1854
|
+
"fetch_devices",
|
|
1855
|
+
b"fetch_devices",
|
|
1856
|
+
"fetch_skip_sync",
|
|
1857
|
+
b"fetch_skip_sync",
|
|
1858
|
+
"run_options",
|
|
1859
|
+
b"run_options",
|
|
1860
|
+
"target",
|
|
1861
|
+
b"target",
|
|
1862
|
+
"tensor_connection",
|
|
1863
|
+
b"tensor_connection",
|
|
1864
|
+
],
|
|
1865
|
+
) -> None: ...
|
|
1866
|
+
|
|
1867
|
+
global___CallableOptions = CallableOptions
|