ruby_memprofiler_pprof 0.0.3 → 0.0.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/ext/ruby_memprofiler_pprof_ext/collector.c +803 -0
- data/ext/ruby_memprofiler_pprof_ext/compat.c +184 -0
- data/ext/ruby_memprofiler_pprof_ext/compile_commands.json +1 -0
- data/ext/ruby_memprofiler_pprof_ext/extconf.rb +152 -0
- data/ext/ruby_memprofiler_pprof_ext/pprof.upb.c +199 -0
- data/ext/ruby_memprofiler_pprof_ext/pprof.upb.h +924 -0
- data/ext/ruby_memprofiler_pprof_ext/pprof_out.c +430 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_hacks.c +118 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_memprofiler_pprof.c +10 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_memprofiler_pprof.h +183 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_private/ruby26/gc_private.h +324 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_private/ruby27/gc_private.h +339 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_private/ruby30/gc_private.h +361 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_private/ruby31/gc_private.h +374 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_private.h +31 -0
- data/ext/ruby_memprofiler_pprof_ext/sample.c +43 -0
- data/ext/ruby_memprofiler_pprof_ext/vendor/backtracie/backtracie.h +268 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/CONTRIBUTING.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/DESIGN.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/LICENSE +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/README.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/WORKSPACE +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/amalgamate.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/build_defs.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/lua.BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/protobuf.patch +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/py_proto_library.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/python_downloads.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/system_python.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/upb_proto_library.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/workspace_deps.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/BUILD.googleapis +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/benchmark.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/build_defs.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/compare.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/descriptor.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/descriptor_sv.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/empty.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/gen_protobuf_binary_cc.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/gen_synthetic_protos.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/gen_upb_binary_c.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/cmake/BUILD.bazel +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/cmake/README.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/cmake/build_defs.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/cmake/make_cmakelists.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/cmake/staleness_test.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/cmake/staleness_test_lib.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/docs/render.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/docs/style-guide.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/docs/vs-cpp-protos.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/docs/wrapping-upb.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/convert.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/convert.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/descriptor.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/descriptor.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/descriptor_containers.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/descriptor_containers.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/descriptor_pool.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/descriptor_pool.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/dist/BUILD.bazel +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/dist/dist.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/extension_dict.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/extension_dict.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/map.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/map.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/message.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/message.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/minimal_test.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/README.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/descriptor_database_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/descriptor_pool_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/descriptor_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/generator_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/json_format_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/keywords_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/message_factory_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/message_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/proto_builder_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/pyproto_test_wrapper.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/reflection_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/service_reflection_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/symbol_database_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/text_encoding_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/text_format_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/unknown_fields_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/well_known_types_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/wire_format_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/protobuf.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/protobuf.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/py_extension.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/python_api.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/repeated.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/repeated.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/unknown_fields.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/unknown_fields.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/version_script.lds +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/lunit/LICENSE +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/lunit/README.google +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/lunit/console.lua +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/lunit/lunit.lua +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/utf8_range/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/utf8_range/LICENSE +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/utf8_range/naive.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/utf8_range/range2-neon.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/utf8_range/range2-sse.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/utf8_range/utf8_range.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/BUILD.bazel +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/README.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/def.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/lua_proto_library.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/main.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/msg.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/test_upb.lua +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/upb.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/upb.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/upb.lua +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/upbc.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/collections.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/collections.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/conformance_upb.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/conformance_upb_failures.txt +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/decode.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/decode.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/decode_fast.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/decode_fast.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/decode_internal.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/def.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/def.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/def.hpp +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/empty.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/encode.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/encode.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/fuzz/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/fuzz/file_descriptor_parsenew_fuzzer.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/json_decode.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/json_decode.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/json_encode.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/json_encode.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table.hpp +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table_accessors.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table_accessors.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table_accessors_internal.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table_accessors_test.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table_test.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/msg.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/msg.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/msg_internal.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/msg_test.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/msg_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/port_def.inc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/port_undef.inc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/reflection.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/reflection.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/reflection.hpp +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/table.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/table_internal.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/test_cpp.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/test_cpp.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/test_generated_code.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/test_table.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/text_encode.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/text_encode.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/upb.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/upb.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/upb.hpp +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/upb_internal.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/README.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/compare.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/compare.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/compare_test.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto_public_import_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto_regular_import_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto_test.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto_weak_import_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto_wweak_import_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/required_fields.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/required_fields.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/required_fields_test.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/required_fields_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upbc/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upbc/common.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upbc/common.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upbc/protoc-gen-upb.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upbc/protoc-gen-upbdefs.cc +0 -0
- data/lib/ruby_memprofiler_pprof/atfork.rb +1 -1
- data/lib/ruby_memprofiler_pprof/block_flusher.rb +48 -4
- data/lib/ruby_memprofiler_pprof/file_flusher.rb +13 -6
- data/lib/ruby_memprofiler_pprof/profile_app.rb +8 -12
- data/lib/ruby_memprofiler_pprof/profile_data.rb +7 -8
- data/lib/ruby_memprofiler_pprof/version.rb +1 -1
- data/lib/ruby_memprofiler_pprof.rb +5 -4
- data/libexec/ruby_memprofiler_pprof_profile +6 -6
- metadata +207 -200
- data/ext/ruby_memprofiler_pprof/backtrace.c +0 -429
- data/ext/ruby_memprofiler_pprof/collector.c +0 -1055
- data/ext/ruby_memprofiler_pprof/compat.c +0 -182
- data/ext/ruby_memprofiler_pprof/extconf.rb +0 -72
- data/ext/ruby_memprofiler_pprof/pprof.upb.c +0 -170
- data/ext/ruby_memprofiler_pprof/pprof.upb.h +0 -848
- data/ext/ruby_memprofiler_pprof/pprof_out.c +0 -285
- data/ext/ruby_memprofiler_pprof/ruby_memprofiler_pprof.c +0 -11
- data/ext/ruby_memprofiler_pprof/ruby_memprofiler_pprof.h +0 -301
- data/ext/ruby_memprofiler_pprof/strtab.c +0 -391
@@ -1,1055 +0,0 @@
|
|
1
|
-
#include <pthread.h>
|
2
|
-
#include <stdbool.h>
|
3
|
-
#include <stdlib.h>
|
4
|
-
#include <string.h>
|
5
|
-
#include <time.h>
|
6
|
-
|
7
|
-
#include <ruby.h>
|
8
|
-
#include <ruby/debug.h>
|
9
|
-
#include <vm_core.h>
|
10
|
-
#include <iseq.h>
|
11
|
-
|
12
|
-
#include "ruby_memprofiler_pprof.h"
|
13
|
-
|
14
|
-
struct collector_cdata {
|
15
|
-
// Internal, cross-ractor lock for this data
|
16
|
-
pthread_mutex_t lock;
|
17
|
-
|
18
|
-
// Global variables we need to keep a hold of
|
19
|
-
VALUE cCollector;
|
20
|
-
VALUE cProfileData;
|
21
|
-
VALUE mMemprofilerPprof;
|
22
|
-
|
23
|
-
// Ruby Tracepoint objects for our hooks
|
24
|
-
VALUE newobj_trace;
|
25
|
-
VALUE freeobj_trace;
|
26
|
-
VALUE creturn_trace;
|
27
|
-
|
28
|
-
// How often (as a fraction of UINT32_MAX) we should sample allocations;
|
29
|
-
// Must be accessed through atomics
|
30
|
-
uint32_t u32_sample_rate;
|
31
|
-
// How often (as a fraction of UINT32_MAX) we should retain allocations, to profile allocations
|
32
|
-
// as well as just heap usage.
|
33
|
-
// Does _NOT_ need to be accessed through atomics.
|
34
|
-
uint32_t u32_allocation_retain_rate;
|
35
|
-
// This flag is used to make sure we detach our tracepoints as we're getting GC'd.
|
36
|
-
bool is_tracing;
|
37
|
-
|
38
|
-
// ======== Allocation samples ========
|
39
|
-
// A linked list of samples, added to each time memory is allocated and cleared when
|
40
|
-
// #flush is called.
|
41
|
-
struct mpp_sample *allocation_samples;
|
42
|
-
// Number of elements currently in the list
|
43
|
-
int64_t allocation_samples_count;
|
44
|
-
// How big the linked list can grow
|
45
|
-
int64_t max_allocation_samples;
|
46
|
-
// When objects are first allocated, we won't actually know their _real_ size; the object is not
|
47
|
-
// in a state where calling rb_obj_memsize_of() on it is well-defined. Among other things, if the
|
48
|
-
// object is T_CLASS, the ivar table won't be initialized yet, and trying to get its size will crash.
|
49
|
-
// Even if it _did_ work (which it did, in versions of Ruby before variable-sized RValues), calling
|
50
|
-
// rb_obj_memsize_of() will return sizeof(RVALUE). If e.g. a T_STRING is being allocated,
|
51
|
-
// the heap memory for that is actually only allocated _after_ the newobj tracepoint fires.
|
52
|
-
//
|
53
|
-
// To make sure we can see the "real" size of these, we add a tracepoint on CRETURN. When that hook
|
54
|
-
// fires, we check the size of all (still-live) objects recently allocated, and store _that_ as
|
55
|
-
// the allocation size. This works well for T_STRING, T_DATA, T_STRUCT's etc that are allocated
|
56
|
-
// inside C and then immediately filled; the correct memsize will be recorded on them before the
|
57
|
-
// Ruby backtrace even changes.
|
58
|
-
// This counter therefore keeps track of how many elements of *allocation_samples have yet to have
|
59
|
-
// this hook called on them.
|
60
|
-
int64_t pending_size_count;
|
61
|
-
|
62
|
-
// ======== Heap samples ========
|
63
|
-
// A hash-table keying live VALUEs to their allocation sample. This is _not_ cleared
|
64
|
-
// when #flush is called; instead, elements are deleted when they are free'd. This is
|
65
|
-
// used for building heap profiles.
|
66
|
-
st_table *heap_samples;
|
67
|
-
// Number of elements currently in the heap profile hash
|
68
|
-
int64_t heap_samples_count;
|
69
|
-
// How big the sample table can grow
|
70
|
-
int64_t max_heap_samples;
|
71
|
-
|
72
|
-
// ======== Sample drop counters ========
|
73
|
-
// These are all accessed via atomics; how else would we have a counter for how often we failed
|
74
|
-
// to acquire the lock?
|
75
|
-
//
|
76
|
-
// Number of samples dropped for want of obtaining the lock.
|
77
|
-
int64_t dropped_samples_nolock;
|
78
|
-
// Number of samples dropped for want of space in the allocation buffer.
|
79
|
-
int64_t dropped_samples_allocation_bufsize;
|
80
|
-
// Number of samples dropped for want of space in the heap allocation table.
|
81
|
-
int64_t dropped_samples_heap_bufsize;
|
82
|
-
|
83
|
-
// String interning table used to keep constant pointers to every string; this saves memory
|
84
|
-
// used in backtraces, and also helps us efficiently build up the pprof protobuf format (since that
|
85
|
-
// _requires_ that strings are interned in a string table).
|
86
|
-
struct mpp_strtab *string_tab;
|
87
|
-
// Same thing, but for backtrace locations.
|
88
|
-
struct mpp_rb_loctab *loctab;
|
89
|
-
|
90
|
-
// Which method to use for getting backtraces
|
91
|
-
int bt_method;
|
92
|
-
|
93
|
-
// This we need to know so we can at least give a non-zero size for new objects.
|
94
|
-
size_t rvalue_size;
|
95
|
-
};
|
96
|
-
|
97
|
-
// We need a global list of all collectors, so that, in our atfork handler, we can correctly lock/unlock
|
98
|
-
// all of their mutexes and guarantee correctness across forks.
|
99
|
-
static st_table *global_collectors;
|
100
|
-
static pthread_mutex_t global_collectors_lock;
|
101
|
-
|
102
|
-
static void internal_sample_decrement_refcount(struct collector_cdata *cd, struct mpp_sample *s) {
|
103
|
-
s->refcount--;
|
104
|
-
if (!s->refcount) {
|
105
|
-
mpp_rb_backtrace_destroy(cd->loctab, s->bt);
|
106
|
-
mpp_free(s);
|
107
|
-
}
|
108
|
-
}
|
109
|
-
|
110
|
-
static int collector_cdata_gc_decrement_live_object_refcounts(st_data_t key, st_data_t value, st_data_t arg) {
|
111
|
-
struct mpp_sample *s = (struct mpp_sample *)value;
|
112
|
-
struct collector_cdata *cd = (struct collector_cdata *)arg;
|
113
|
-
internal_sample_decrement_refcount(cd, s);
|
114
|
-
return ST_CONTINUE;
|
115
|
-
}
|
116
|
-
|
117
|
-
static void collector_cdata_gc_free_heap_samples(struct collector_cdata *cd) {
|
118
|
-
if (cd->heap_samples) {
|
119
|
-
st_foreach(cd->heap_samples, collector_cdata_gc_decrement_live_object_refcounts, (st_data_t)cd);
|
120
|
-
st_free_table(cd->heap_samples);
|
121
|
-
}
|
122
|
-
cd->heap_samples = NULL;
|
123
|
-
}
|
124
|
-
|
125
|
-
static void internal_sample_list_decrement_refcount(struct collector_cdata *cd, struct mpp_sample *s) {
|
126
|
-
while (s) {
|
127
|
-
struct mpp_sample *next_s = s->next_alloc;
|
128
|
-
internal_sample_decrement_refcount(cd, s);
|
129
|
-
s = next_s;
|
130
|
-
}
|
131
|
-
}
|
132
|
-
|
133
|
-
static void collector_cdata_gc_free_allocation_samples(struct collector_cdata *cd) {
|
134
|
-
internal_sample_list_decrement_refcount(cd, cd->allocation_samples);
|
135
|
-
cd->allocation_samples = NULL;
|
136
|
-
}
|
137
|
-
|
138
|
-
static int collector_cdata_gc_memsize_live_objects(st_data_t key, st_data_t value, st_data_t arg) {
|
139
|
-
size_t *acc_ptr = (size_t *)arg;
|
140
|
-
struct mpp_sample *s = (struct mpp_sample *)value;
|
141
|
-
|
142
|
-
// Only consider the live object list to be holding the backtrace, for accounting purposes, if it's
|
143
|
-
// not also in the allocation sample list.
|
144
|
-
if (s->refcount == 1) {
|
145
|
-
*acc_ptr += sizeof(*s);
|
146
|
-
*acc_ptr += mpp_rb_backtrace_memsize(s->bt);
|
147
|
-
}
|
148
|
-
return ST_CONTINUE;
|
149
|
-
}
|
150
|
-
|
151
|
-
static void collector_cdata_gc_free_loctab(struct collector_cdata *cd) {
|
152
|
-
if (cd->loctab) {
|
153
|
-
mpp_rb_loctab_destroy(cd->loctab);
|
154
|
-
}
|
155
|
-
}
|
156
|
-
|
157
|
-
static void collector_cdata_gc_free_strtab(struct collector_cdata *cd) {
|
158
|
-
if (cd->string_tab) {
|
159
|
-
mpp_strtab_destroy(cd->string_tab);
|
160
|
-
}
|
161
|
-
}
|
162
|
-
|
163
|
-
static void collector_cdata_gc_mark(void *ptr) {
|
164
|
-
struct collector_cdata *cd = (struct collector_cdata *)ptr;
|
165
|
-
rb_gc_mark_movable(cd->newobj_trace);
|
166
|
-
rb_gc_mark_movable(cd->freeobj_trace);
|
167
|
-
rb_gc_mark_movable(cd->creturn_trace);
|
168
|
-
rb_gc_mark_movable(cd->mMemprofilerPprof);
|
169
|
-
rb_gc_mark_movable(cd->cCollector);
|
170
|
-
rb_gc_mark_movable(cd->cProfileData);
|
171
|
-
}
|
172
|
-
|
173
|
-
static void collector_cdata_gc_free(void *ptr) {
|
174
|
-
struct collector_cdata *cd = (struct collector_cdata *)ptr;
|
175
|
-
if (cd->is_tracing) {
|
176
|
-
if (cd->newobj_trace) {
|
177
|
-
rb_tracepoint_disable(cd->newobj_trace);
|
178
|
-
}
|
179
|
-
if (cd->freeobj_trace) {
|
180
|
-
rb_tracepoint_disable(cd->freeobj_trace);
|
181
|
-
}
|
182
|
-
}
|
183
|
-
|
184
|
-
// Needed in case there are any in-flight tracepoints we just disabled above.
|
185
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
186
|
-
|
187
|
-
collector_cdata_gc_free_heap_samples(cd);
|
188
|
-
collector_cdata_gc_free_allocation_samples(cd);
|
189
|
-
collector_cdata_gc_free_loctab(cd);
|
190
|
-
collector_cdata_gc_free_strtab(cd);
|
191
|
-
|
192
|
-
// Remove from global collectors list.
|
193
|
-
mpp_pthread_mutex_lock(&global_collectors_lock);
|
194
|
-
st_data_t cd_key = (st_data_t)cd;
|
195
|
-
st_delete(global_collectors, &cd_key, NULL);
|
196
|
-
mpp_pthread_mutex_unlock(&global_collectors_lock);
|
197
|
-
|
198
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
199
|
-
mpp_pthread_mutex_destroy(&cd->lock);
|
200
|
-
|
201
|
-
ruby_xfree(ptr);
|
202
|
-
}
|
203
|
-
|
204
|
-
static size_t collector_cdata_memsize(const void *ptr) {
|
205
|
-
struct collector_cdata *cd = (struct collector_cdata *)ptr;
|
206
|
-
size_t sz = sizeof(*cd);
|
207
|
-
if (cd->heap_samples) {
|
208
|
-
st_foreach(cd->heap_samples, collector_cdata_gc_memsize_live_objects, (st_data_t)&sz);
|
209
|
-
sz += st_memsize(cd->heap_samples);
|
210
|
-
}
|
211
|
-
if (cd->string_tab) {
|
212
|
-
sz += mpp_strtab_memsize(cd->string_tab);
|
213
|
-
}
|
214
|
-
if (cd->loctab) {
|
215
|
-
sz += mpp_rb_loctab_memsize(cd->loctab);
|
216
|
-
}
|
217
|
-
struct mpp_sample *s = cd->allocation_samples;
|
218
|
-
while (s) {
|
219
|
-
sz += sizeof(*s);
|
220
|
-
sz += mpp_rb_backtrace_memsize(s->bt);
|
221
|
-
s = s->next_alloc;
|
222
|
-
}
|
223
|
-
|
224
|
-
return sz;
|
225
|
-
}
|
226
|
-
|
227
|
-
#ifdef HAVE_RB_GC_MARK_MOVABLE
|
228
|
-
// Support VALUES we're tracking being moved away in Ruby 2.7+ with GC.compact
|
229
|
-
static int collector_move_each_live_object(st_data_t key, st_data_t value, st_data_t arg) {
|
230
|
-
struct collector_cdata *cd = (struct collector_cdata *)arg;
|
231
|
-
struct mpp_sample *sample = (struct mpp_sample *)value;
|
232
|
-
|
233
|
-
if (rb_gc_location(sample->allocated_value_weak) == sample->allocated_value_weak) {
|
234
|
-
return ST_CONTINUE;
|
235
|
-
} else {
|
236
|
-
sample->allocated_value_weak = rb_gc_location(sample->allocated_value_weak);
|
237
|
-
st_insert(cd->heap_samples, sample->allocated_value_weak, (st_data_t)sample);
|
238
|
-
return ST_DELETE;
|
239
|
-
}
|
240
|
-
}
|
241
|
-
|
242
|
-
static void collector_cdata_gc_compact(void *ptr) {
|
243
|
-
struct collector_cdata *cd = (struct collector_cdata *)ptr;
|
244
|
-
cd->newobj_trace = rb_gc_location(cd->newobj_trace);
|
245
|
-
cd->freeobj_trace = rb_gc_location(cd->freeobj_trace);
|
246
|
-
cd->creturn_trace = rb_gc_location(cd->creturn_trace);
|
247
|
-
cd->mMemprofilerPprof = rb_gc_location(cd->mMemprofilerPprof);
|
248
|
-
cd->cCollector = rb_gc_location(cd->cCollector);
|
249
|
-
cd->cProfileData = rb_gc_location(cd->cProfileData);
|
250
|
-
st_foreach(cd->heap_samples, collector_move_each_live_object, (st_data_t)cd);
|
251
|
-
}
|
252
|
-
#endif
|
253
|
-
|
254
|
-
static const rb_data_type_t collector_cdata_type = {
|
255
|
-
"collector_cdata",
|
256
|
-
{
|
257
|
-
collector_cdata_gc_mark, collector_cdata_gc_free, collector_cdata_memsize,
|
258
|
-
#ifdef HAVE_RB_GC_MARK_MOVABLE
|
259
|
-
collector_cdata_gc_compact,
|
260
|
-
#endif
|
261
|
-
{ 0 }, /* reserved */
|
262
|
-
},
|
263
|
-
/* parent, data, [ flags ] */
|
264
|
-
NULL, NULL, 0
|
265
|
-
};
|
266
|
-
|
267
|
-
static struct collector_cdata *collector_cdata_get(VALUE self) {
|
268
|
-
struct collector_cdata *a;
|
269
|
-
TypedData_Get_Struct(self, struct collector_cdata, &collector_cdata_type, a);
|
270
|
-
return a;
|
271
|
-
}
|
272
|
-
|
273
|
-
static VALUE collector_alloc(VALUE klass) {
|
274
|
-
struct collector_cdata *cd;
|
275
|
-
VALUE v = TypedData_Make_Struct(klass, struct collector_cdata, &collector_cdata_type, cd);
|
276
|
-
|
277
|
-
cd->newobj_trace = Qnil;
|
278
|
-
cd->freeobj_trace = Qnil;
|
279
|
-
cd->creturn_trace = Qnil;
|
280
|
-
|
281
|
-
__atomic_store_n(&cd->u32_sample_rate, 0, __ATOMIC_SEQ_CST);
|
282
|
-
cd->is_tracing = false;
|
283
|
-
|
284
|
-
cd->allocation_samples = NULL;
|
285
|
-
cd->allocation_samples_count = 0;
|
286
|
-
cd->max_allocation_samples = 0;
|
287
|
-
cd->pending_size_count = 0;
|
288
|
-
|
289
|
-
cd->heap_samples = NULL;
|
290
|
-
cd->heap_samples_count = 0;
|
291
|
-
cd->max_heap_samples = 0;
|
292
|
-
|
293
|
-
__atomic_store_n(&cd->dropped_samples_allocation_bufsize, 0, __ATOMIC_SEQ_CST);
|
294
|
-
__atomic_store_n(&cd->dropped_samples_heap_bufsize, 0, __ATOMIC_SEQ_CST);
|
295
|
-
__atomic_store_n(&cd->dropped_samples_nolock, 0, __ATOMIC_SEQ_CST);
|
296
|
-
|
297
|
-
cd->string_tab = NULL;
|
298
|
-
cd->loctab = NULL;
|
299
|
-
|
300
|
-
// Initialize the mutex.
|
301
|
-
// It really does need to be recursive - if we call a rb_* function while holding
|
302
|
-
// the lock, that could trigger the GC to run and call our freeobj tracepoint,
|
303
|
-
// which _also_ needs the lock.
|
304
|
-
pthread_mutexattr_t mutex_attr;
|
305
|
-
mpp_pthread_mutexattr_init(&mutex_attr);
|
306
|
-
mpp_pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE);
|
307
|
-
mpp_pthread_mutex_init(&cd->lock, &mutex_attr);
|
308
|
-
mpp_pthread_mutexattr_destroy(&mutex_attr);
|
309
|
-
|
310
|
-
// Add us to the global list of collectors, to handle pthread_atfork.
|
311
|
-
mpp_pthread_mutex_lock(&global_collectors_lock);
|
312
|
-
st_insert(global_collectors, (st_data_t)cd, (st_data_t)cd);
|
313
|
-
mpp_pthread_mutex_unlock(&global_collectors_lock);
|
314
|
-
return v;
|
315
|
-
}
|
316
|
-
|
317
|
-
struct initialize_protected_args {
|
318
|
-
int argc;
|
319
|
-
VALUE *argv;
|
320
|
-
VALUE self;
|
321
|
-
struct collector_cdata *cd;
|
322
|
-
};
|
323
|
-
|
324
|
-
static VALUE collector_initialize_protected(VALUE vargs) {
|
325
|
-
struct initialize_protected_args *args = (struct initialize_protected_args *)vargs;
|
326
|
-
struct collector_cdata *cd = args->cd;
|
327
|
-
|
328
|
-
// Save constants
|
329
|
-
cd->mMemprofilerPprof = rb_const_get(rb_cObject, rb_intern("MemprofilerPprof"));
|
330
|
-
cd->cCollector = rb_const_get(cd->mMemprofilerPprof, rb_intern("Collector"));
|
331
|
-
cd->cProfileData = rb_const_get(cd->mMemprofilerPprof, rb_intern("ProfileData"));
|
332
|
-
|
333
|
-
// Argument parsing
|
334
|
-
VALUE kwargs_hash = Qnil;
|
335
|
-
rb_scan_args_kw(RB_SCAN_ARGS_LAST_HASH_KEYWORDS, args->argc, args->argv, "00:", &kwargs_hash);
|
336
|
-
VALUE kwarg_values[5];
|
337
|
-
ID kwarg_ids[5];
|
338
|
-
kwarg_ids[0] = rb_intern("sample_rate");
|
339
|
-
kwarg_ids[1] = rb_intern("max_allocation_samples");
|
340
|
-
kwarg_ids[2] = rb_intern("max_heap_samples");
|
341
|
-
kwarg_ids[3] = rb_intern("bt_method");
|
342
|
-
kwarg_ids[4] = rb_intern("allocation_retain_rate");
|
343
|
-
rb_get_kwargs(kwargs_hash, kwarg_ids, 0, 5, kwarg_values);
|
344
|
-
|
345
|
-
// Default values...
|
346
|
-
if (kwarg_values[0] == Qundef) kwarg_values[0] = DBL2NUM(0.01);
|
347
|
-
if (kwarg_values[1] == Qundef) kwarg_values[1] = LONG2NUM(10000);
|
348
|
-
if (kwarg_values[2] == Qundef) kwarg_values[2] = LONG2NUM(50000);
|
349
|
-
if (kwarg_values[3] == Qundef) kwarg_values[3] = rb_id2sym(rb_intern("cfp"));
|
350
|
-
if (kwarg_values[4] == Qundef) kwarg_values[4] = DBL2NUM(1);
|
351
|
-
|
352
|
-
rb_funcall(args->self, rb_intern("sample_rate="), 1, kwarg_values[0]);
|
353
|
-
rb_funcall(args->self, rb_intern("max_allocation_samples="), 1, kwarg_values[1]);
|
354
|
-
rb_funcall(args->self, rb_intern("max_heap_samples="), 1, kwarg_values[2]);
|
355
|
-
rb_funcall(args->self, rb_intern("bt_method="), 1, kwarg_values[3]);
|
356
|
-
rb_funcall(args->self, rb_intern("allocation_retain_rate="), 1, kwarg_values[4]);
|
357
|
-
|
358
|
-
cd->string_tab = mpp_strtab_new();
|
359
|
-
cd->loctab = mpp_rb_loctab_new(cd->string_tab);
|
360
|
-
cd->allocation_samples = NULL;
|
361
|
-
cd->allocation_samples_count = 0;
|
362
|
-
cd->pending_size_count = 0;
|
363
|
-
cd->heap_samples = st_init_numtable();
|
364
|
-
cd->heap_samples_count = 0;
|
365
|
-
|
366
|
-
VALUE internal_constants = rb_const_get(rb_mGC, rb_intern("INTERNAL_CONSTANTS"));
|
367
|
-
cd->rvalue_size = NUM2LONG(rb_hash_aref(internal_constants, rb_id2sym(rb_intern("RVALUE_SIZE"))));
|
368
|
-
|
369
|
-
return Qnil;
|
370
|
-
}
|
371
|
-
|
372
|
-
static VALUE collector_initialize(int argc, VALUE *argv, VALUE self) {
|
373
|
-
// Need to do this rb_protect dance to ensure that all access to collector_cdata is through the mutex.
|
374
|
-
struct initialize_protected_args args;
|
375
|
-
args.argc = argc;
|
376
|
-
args.argv = argv;
|
377
|
-
args.self = self;
|
378
|
-
args.cd = collector_cdata_get(self);
|
379
|
-
|
380
|
-
mpp_pthread_mutex_lock(&args.cd->lock);
|
381
|
-
int jump_tag = 0;
|
382
|
-
VALUE r = rb_protect(collector_initialize_protected, (VALUE)&args, &jump_tag);
|
383
|
-
mpp_pthread_mutex_unlock(&args.cd->lock);
|
384
|
-
if (jump_tag) rb_jump_tag(jump_tag);
|
385
|
-
return r;
|
386
|
-
}
|
387
|
-
|
388
|
-
static VALUE collector_get_sample_rate(VALUE self) {
|
389
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
390
|
-
uint32_t sample_rate = __atomic_load_n(&cd->u32_sample_rate, __ATOMIC_SEQ_CST);
|
391
|
-
return DBL2NUM(((double)sample_rate)/UINT32_MAX);
|
392
|
-
}
|
393
|
-
|
394
|
-
static VALUE collector_set_sample_rate(VALUE self, VALUE newval) {
|
395
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
396
|
-
double dbl_sample_rate = NUM2DBL(newval);
|
397
|
-
// Convert the double sample rate (between 0 and 1) to a value between 0 and UINT32_MAX
|
398
|
-
uint32_t new_sample_rate_uint = UINT32_MAX * dbl_sample_rate;
|
399
|
-
__atomic_store_n(&cd->u32_sample_rate, new_sample_rate_uint, __ATOMIC_SEQ_CST);
|
400
|
-
return newval;
|
401
|
-
}
|
402
|
-
|
403
|
-
static VALUE collector_get_allocation_retain_rate(VALUE self) {
|
404
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
405
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
406
|
-
uint32_t retain_rate_u32 = cd->u32_allocation_retain_rate;
|
407
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
408
|
-
return DBL2NUM(((double)retain_rate_u32)/UINT32_MAX);
|
409
|
-
}
|
410
|
-
|
411
|
-
static VALUE collector_set_allocation_retain_rate(VALUE self, VALUE newval) {
|
412
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
413
|
-
uint32_t retain_rate_u32 = UINT32_MAX * NUM2DBL(newval);
|
414
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
415
|
-
cd->u32_allocation_retain_rate = retain_rate_u32;
|
416
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
417
|
-
return newval;
|
418
|
-
}
|
419
|
-
|
420
|
-
static VALUE collector_get_max_allocation_samples(VALUE self) {
|
421
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
422
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
423
|
-
int64_t v = cd->max_allocation_samples;
|
424
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
425
|
-
return LONG2NUM(v);
|
426
|
-
}
|
427
|
-
|
428
|
-
static VALUE collector_set_max_allocation_samples(VALUE self, VALUE newval) {
|
429
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
430
|
-
int64_t v = NUM2LONG(newval);
|
431
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
432
|
-
cd->max_allocation_samples = v;
|
433
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
434
|
-
return newval;
|
435
|
-
}
|
436
|
-
|
437
|
-
static VALUE collector_get_max_heap_samples(VALUE self) {
|
438
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
439
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
440
|
-
int64_t v = cd->max_heap_samples;
|
441
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
442
|
-
return LONG2NUM(v);
|
443
|
-
}
|
444
|
-
|
445
|
-
static VALUE collector_set_max_heap_samples(VALUE self, VALUE newval) {
|
446
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
447
|
-
int64_t v = NUM2LONG(newval);
|
448
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
449
|
-
cd->max_heap_samples = v;
|
450
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
451
|
-
return newval;
|
452
|
-
}
|
453
|
-
|
454
|
-
static void collector_mark_sample_as_freed(struct collector_cdata *cd, VALUE freed_obj) {
|
455
|
-
struct mpp_sample *sample;
|
456
|
-
if (st_delete(cd->heap_samples, (st_data_t *)&freed_obj, (st_data_t *)&sample)) {
|
457
|
-
// Clear out the reference to it
|
458
|
-
sample->allocated_value_weak = Qundef;
|
459
|
-
// We deleted it out of live objects; decrement its refcount.
|
460
|
-
internal_sample_decrement_refcount(cd, sample);
|
461
|
-
cd->heap_samples_count--;
|
462
|
-
}
|
463
|
-
}
|
464
|
-
|
465
|
-
|
466
|
-
struct newobj_impl_args {
|
467
|
-
struct collector_cdata *cd;
|
468
|
-
struct mpp_rb_backtrace *bt;
|
469
|
-
VALUE tpval;
|
470
|
-
VALUE newobj;
|
471
|
-
size_t allocation_size;
|
472
|
-
};
|
473
|
-
|
474
|
-
// Collects all the parts of collector_tphook_newobj that could throw.
|
475
|
-
static VALUE collector_tphook_newobj_protected(VALUE args_as_uintptr) {
|
476
|
-
struct newobj_impl_args *args = (struct newobj_impl_args*)args_as_uintptr;
|
477
|
-
struct collector_cdata *cd = args->cd;
|
478
|
-
if (cd->bt_method == MPP_BT_METHOD_CFP) {
|
479
|
-
mpp_rb_backtrace_capture(cd->loctab, &args->bt);
|
480
|
-
} else if (cd->bt_method == MPP_BT_METHOD_SLOWRB) {
|
481
|
-
mpp_rb_backtrace_capture_slowrb(cd->loctab, &args->bt);
|
482
|
-
} else {
|
483
|
-
MPP_ASSERT_FAIL("unknown bt_method");
|
484
|
-
}
|
485
|
-
args->allocation_size = cd->rvalue_size;
|
486
|
-
return Qnil;
|
487
|
-
}
|
488
|
-
|
489
|
-
static void collector_tphook_newobj(VALUE tpval, void *data) {
|
490
|
-
struct collector_cdata *cd = (struct collector_cdata *)data;
|
491
|
-
struct newobj_impl_args args;
|
492
|
-
args.cd = cd;
|
493
|
-
args.tpval = tpval;
|
494
|
-
args.bt = NULL;
|
495
|
-
rb_trace_arg_t *tparg = rb_tracearg_from_tracepoint(tpval);
|
496
|
-
args.newobj = rb_tracearg_object(tparg);
|
497
|
-
int jump_tag = 0;
|
498
|
-
VALUE original_errinfo = Qundef;
|
499
|
-
|
500
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
501
|
-
|
502
|
-
// For every new object that is created, we _MUST_ check if there is already another VALUE with the same,
|
503
|
-
// well, value, in our heap profiling table of live objects. This is because Ruby reserves the right to
|
504
|
-
// simply free some kinds of internal objects (such as T_IMEMOs) by simply setting the flags value on it
|
505
|
-
// to zero, without invoking the GC and without calling any kind of hook. So, we need to detect when such
|
506
|
-
// an object is freed and then the RVALUE is re-used for a new object to track it appropriately.
|
507
|
-
collector_mark_sample_as_freed(cd, args.newobj);
|
508
|
-
|
509
|
-
// Skip the rest of this method if we're not sampling.
|
510
|
-
uint32_t sample_rate = __atomic_load_n(&cd->u32_sample_rate, __ATOMIC_SEQ_CST);
|
511
|
-
if (mpp_rand() > sample_rate) {
|
512
|
-
goto out;
|
513
|
-
}
|
514
|
-
|
515
|
-
// Make sure there's enough space in our buffers.
|
516
|
-
if (cd->allocation_samples_count >= cd->max_allocation_samples) {
|
517
|
-
__atomic_add_fetch(&cd->dropped_samples_allocation_bufsize, 1, __ATOMIC_SEQ_CST);
|
518
|
-
goto out;
|
519
|
-
}
|
520
|
-
if (cd->heap_samples_count >= cd->max_heap_samples) {
|
521
|
-
__atomic_add_fetch(&cd->dropped_samples_heap_bufsize, 1, __ATOMIC_SEQ_CST);
|
522
|
-
goto out;
|
523
|
-
}
|
524
|
-
|
525
|
-
// OK - run our code in here under rb_protect now so that it cannot longjmp out
|
526
|
-
original_errinfo = rb_errinfo();
|
527
|
-
rb_protect(collector_tphook_newobj_protected, (VALUE)&args, &jump_tag);
|
528
|
-
if (jump_tag) goto out;
|
529
|
-
|
530
|
-
// This looks super redundant, _BUT_ there is a narrow possibility that some of the code we invoke
|
531
|
-
// inside the rb_protect actually does RVALUE allocations itself, and so recursively runs this hook
|
532
|
-
// (which will work, because the &cd->lock mutex is recursive). So, we need to actually check
|
533
|
-
// our buffer sizes _again_.
|
534
|
-
if (cd->allocation_samples_count >= cd->max_allocation_samples) {
|
535
|
-
__atomic_add_fetch(&cd->dropped_samples_allocation_bufsize, 1, __ATOMIC_SEQ_CST);
|
536
|
-
goto out;
|
537
|
-
}
|
538
|
-
if (cd->heap_samples_count >= cd->max_heap_samples) {
|
539
|
-
__atomic_add_fetch(&cd->dropped_samples_heap_bufsize, 1, __ATOMIC_SEQ_CST);
|
540
|
-
goto out;
|
541
|
-
}
|
542
|
-
|
543
|
-
// OK, now it's time to add to our sample buffers.
|
544
|
-
struct mpp_sample *sample = mpp_xmalloc(sizeof(struct mpp_sample));
|
545
|
-
// Set the sample refcount to two. Once because it's going in the allocation sampling buffer,
|
546
|
-
// and once because it's going in the heap profiling set.
|
547
|
-
sample->refcount = 2;
|
548
|
-
sample->bt = args.bt;
|
549
|
-
sample->allocation_size = args.allocation_size;
|
550
|
-
sample->current_size = args.allocation_size;
|
551
|
-
sample->allocated_value_weak = args.newobj;
|
552
|
-
|
553
|
-
// Insert into allocation profiling list.
|
554
|
-
sample->next_alloc = cd->allocation_samples;
|
555
|
-
cd->allocation_samples = sample;
|
556
|
-
cd->allocation_samples_count++;
|
557
|
-
cd->pending_size_count++;
|
558
|
-
|
559
|
-
// Also insert into live object list
|
560
|
-
st_insert(cd->heap_samples, args.newobj, (st_data_t)sample);
|
561
|
-
cd->heap_samples_count++;
|
562
|
-
|
563
|
-
// Clear args.bt so it doesn't get free'd below.
|
564
|
-
args.bt = NULL;
|
565
|
-
|
566
|
-
out:
|
567
|
-
// If this wasn't cleared, we need to free it.
|
568
|
-
if (args.bt) mpp_rb_backtrace_destroy(cd->loctab, args.bt);
|
569
|
-
// If there was an exception, ignore it and restore the original errinfo.
|
570
|
-
if (jump_tag && original_errinfo != Qundef) rb_set_errinfo(original_errinfo);
|
571
|
-
|
572
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
573
|
-
}
|
574
|
-
|
575
|
-
static void collector_tphook_freeobj(VALUE tpval, void *data) {
|
576
|
-
struct collector_cdata *cd = (struct collector_cdata *)data;
|
577
|
-
|
578
|
-
// We unfortunately do really need the mutex here, because if we don't handle this, we might
|
579
|
-
// leave an allocation kicking around in live_objects that has been freed.
|
580
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
581
|
-
|
582
|
-
// Definitely do _NOT_ try and run any Ruby code in here. Any allocation will crash
|
583
|
-
// the process.
|
584
|
-
rb_trace_arg_t *tparg = rb_tracearg_from_tracepoint(tpval);
|
585
|
-
VALUE freed_obj = rb_tracearg_object(tparg);
|
586
|
-
collector_mark_sample_as_freed(cd, freed_obj);
|
587
|
-
|
588
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
589
|
-
}
|
590
|
-
|
591
|
-
static VALUE collector_tphook_creturn_protected(VALUE cdataptr) {
|
592
|
-
struct collector_cdata *cd = (struct collector_cdata *)cdataptr;
|
593
|
-
|
594
|
-
struct mpp_sample *s = cd->allocation_samples;
|
595
|
-
struct mpp_sample **prev_slot = &cd->allocation_samples;
|
596
|
-
for (int64_t i = 0; i < cd->pending_size_count; i++) {
|
597
|
-
MPP_ASSERT_MSG(s, "More pending size samples than samples in linked list??");
|
598
|
-
// Ruby apparently has the right to free stuff that's used internally (like T_IMEMOs)
|
599
|
-
// _without_ invoking the garbage collector (and thus, _without_ invoking our hook). When
|
600
|
-
// it does that, it will set flags of the RVALUE to zero, which indicates that the object
|
601
|
-
// is now free.
|
602
|
-
// Detect this and consider it the same as free'ing an object. Otherwise, we might try and
|
603
|
-
// memsize() it, which will cause an rb_bug to trigger
|
604
|
-
if (RB_TYPE_P(s->allocated_value_weak, T_NONE)) {
|
605
|
-
collector_mark_sample_as_freed(cd, s->allocated_value_weak);
|
606
|
-
s->allocated_value_weak = Qundef;
|
607
|
-
}
|
608
|
-
if (s->allocated_value_weak != Qundef) {
|
609
|
-
s->allocation_size = rb_obj_memsize_of(s->allocated_value_weak);
|
610
|
-
s->current_size = s->allocation_size;
|
611
|
-
}
|
612
|
-
|
613
|
-
if (mpp_rand() > cd->u32_allocation_retain_rate) {
|
614
|
-
// Drop this sample out of the allocation sample list. We've been asked to drop a certain
|
615
|
-
// percentage of things out of this list, so we don't OOM with piles of short-lived objects.
|
616
|
-
*prev_slot = s->next_alloc;
|
617
|
-
|
618
|
-
// Annoying little dance here so we don't read s->next_alloc after freeing s.
|
619
|
-
struct mpp_sample *next_s = s->next_alloc;
|
620
|
-
internal_sample_decrement_refcount(cd, s);
|
621
|
-
s = next_s;
|
622
|
-
|
623
|
-
cd->allocation_samples_count--;
|
624
|
-
} else {
|
625
|
-
prev_slot = &s->next_alloc;
|
626
|
-
s = s->next_alloc;
|
627
|
-
}
|
628
|
-
}
|
629
|
-
return Qnil;
|
630
|
-
}
|
631
|
-
|
632
|
-
static void collector_tphook_creturn(VALUE tpval, void *data) {
|
633
|
-
struct collector_cdata *cd = (struct collector_cdata *)data;
|
634
|
-
int jump_tag = 0;
|
635
|
-
VALUE original_errinfo;
|
636
|
-
// If we can't get the lock this time round, we can just do it later.
|
637
|
-
if (mpp_pthread_mutex_trylock(&cd->lock) != 0) {
|
638
|
-
return;
|
639
|
-
}
|
640
|
-
if (cd->pending_size_count == 0) goto out;
|
641
|
-
|
642
|
-
original_errinfo = rb_errinfo();
|
643
|
-
rb_protect(collector_tphook_creturn_protected, (VALUE)cd, &jump_tag);
|
644
|
-
cd->pending_size_count = 0;
|
645
|
-
if (jump_tag) {
|
646
|
-
rb_set_errinfo(original_errinfo);
|
647
|
-
}
|
648
|
-
|
649
|
-
out:
|
650
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
651
|
-
}
|
652
|
-
|
653
|
-
static VALUE collector_start_protected(VALUE self) {
|
654
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
655
|
-
|
656
|
-
if (cd->newobj_trace == Qnil) {
|
657
|
-
cd->newobj_trace = rb_tracepoint_new(
|
658
|
-
0, RUBY_INTERNAL_EVENT_NEWOBJ, collector_tphook_newobj, cd
|
659
|
-
);
|
660
|
-
}
|
661
|
-
if (cd->freeobj_trace == Qnil) {
|
662
|
-
cd->freeobj_trace = rb_tracepoint_new(
|
663
|
-
0, RUBY_INTERNAL_EVENT_FREEOBJ, collector_tphook_freeobj, cd
|
664
|
-
);
|
665
|
-
}
|
666
|
-
if (cd->creturn_trace == Qnil) {
|
667
|
-
cd->creturn_trace = rb_tracepoint_new(
|
668
|
-
0, RUBY_EVENT_C_RETURN, collector_tphook_creturn, cd
|
669
|
-
);
|
670
|
-
}
|
671
|
-
|
672
|
-
rb_tracepoint_enable(cd->newobj_trace);
|
673
|
-
rb_tracepoint_enable(cd->freeobj_trace);
|
674
|
-
rb_tracepoint_enable(cd->creturn_trace);
|
675
|
-
return Qnil;
|
676
|
-
}
|
677
|
-
|
678
|
-
static VALUE collector_start(VALUE self) {
|
679
|
-
int jump_tag = 0;
|
680
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
681
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
682
|
-
if (cd->is_tracing) goto out;
|
683
|
-
|
684
|
-
// Don't needlessly double-initialize everything
|
685
|
-
if (cd->heap_samples_count > 0) {
|
686
|
-
collector_cdata_gc_free_heap_samples(cd);
|
687
|
-
cd->heap_samples = st_init_numtable();
|
688
|
-
cd->heap_samples_count = 0;
|
689
|
-
}
|
690
|
-
if (cd->allocation_samples_count > 0) {
|
691
|
-
collector_cdata_gc_free_allocation_samples(cd);
|
692
|
-
cd->allocation_samples = NULL;
|
693
|
-
cd->allocation_samples_count = 0;
|
694
|
-
cd->pending_size_count = 0;
|
695
|
-
}
|
696
|
-
cd->is_tracing = true;
|
697
|
-
__atomic_store_n(&cd->dropped_samples_allocation_bufsize, 0, __ATOMIC_SEQ_CST);
|
698
|
-
__atomic_store_n(&cd->dropped_samples_heap_bufsize, 0, __ATOMIC_SEQ_CST);
|
699
|
-
__atomic_store_n(&cd->dropped_samples_nolock, 0, __ATOMIC_SEQ_CST);
|
700
|
-
|
701
|
-
// Now do the things that might throw
|
702
|
-
rb_protect(collector_start_protected, self, &jump_tag);
|
703
|
-
|
704
|
-
out:
|
705
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
706
|
-
if (jump_tag) {
|
707
|
-
rb_jump_tag(jump_tag);
|
708
|
-
}
|
709
|
-
return Qnil;
|
710
|
-
}
|
711
|
-
|
712
|
-
static VALUE collector_stop_protected(VALUE self) {
|
713
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
714
|
-
rb_tracepoint_disable(cd->newobj_trace);
|
715
|
-
rb_tracepoint_disable(cd->freeobj_trace);
|
716
|
-
rb_tracepoint_disable(cd->creturn_trace);
|
717
|
-
return Qnil;
|
718
|
-
}
|
719
|
-
|
720
|
-
static VALUE collector_stop(VALUE self) {
|
721
|
-
int jump_tag = 0;
|
722
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
723
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
724
|
-
if (!cd->is_tracing) goto out;
|
725
|
-
|
726
|
-
rb_protect(collector_stop_protected, self, &jump_tag);
|
727
|
-
if (jump_tag) goto out;
|
728
|
-
|
729
|
-
cd->is_tracing = false;
|
730
|
-
// Don't clear any of our buffers - it's OK to access the profiling info after calling stop!
|
731
|
-
out:
|
732
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
733
|
-
if (jump_tag) {
|
734
|
-
rb_jump_tag(jump_tag);
|
735
|
-
}
|
736
|
-
return Qnil;
|
737
|
-
}
|
738
|
-
|
739
|
-
static VALUE collector_is_running(VALUE self) {
|
740
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
741
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
742
|
-
bool running = cd->is_tracing;
|
743
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
744
|
-
return running ? Qtrue : Qfalse;
|
745
|
-
}
|
746
|
-
|
747
|
-
static int collector_heap_samples_each_calc_size(st_data_t key, st_data_t val, st_data_t arg) {
|
748
|
-
struct mpp_sample *sample = (struct mpp_sample *)val;
|
749
|
-
struct collector_cdata *cd = (struct collector_cdata *)arg;
|
750
|
-
MPP_ASSERT_MSG(sample->allocated_value_weak != Qundef, "undef was in heap sample map");
|
751
|
-
|
752
|
-
// Check that the sample is, in fact, still live. This can happen if an object is freed internally
|
753
|
-
// by Ruby without firing our freeobj hook (which Ruby is allowed to do for some kinds of objects).
|
754
|
-
// In that case, flags will be zero and so type will be T_NONE.
|
755
|
-
// Note that if an object is freed and then the slot is subsequently re-used for a different object,
|
756
|
-
// our newobj hook will fire in that case and do this too. So this method captures the sequence
|
757
|
-
// allocate -> free -> flush, but the newobj hook handles the allocate -> free -> reuse -> flush case.
|
758
|
-
if (RB_TYPE_P(sample->allocated_value_weak, T_NONE)) {
|
759
|
-
sample->allocated_value_weak = Qundef;
|
760
|
-
internal_sample_decrement_refcount(cd, sample);
|
761
|
-
cd->heap_samples_count--;
|
762
|
-
return ST_DELETE;
|
763
|
-
}
|
764
|
-
|
765
|
-
sample->current_size = rb_obj_memsize_of(sample->allocated_value_weak);
|
766
|
-
return ST_CONTINUE;
|
767
|
-
}
|
768
|
-
|
769
|
-
struct collector_heap_samples_each_add_args {
|
770
|
-
struct mpp_pprof_serctx *serctx;
|
771
|
-
char *errbuf;
|
772
|
-
size_t errbuf_len;
|
773
|
-
int r;
|
774
|
-
};
|
775
|
-
|
776
|
-
static int collector_heap_samples_each_add(st_data_t key, st_data_t val, st_data_t arg) {
|
777
|
-
struct mpp_sample *sample = (struct mpp_sample *)val;
|
778
|
-
struct collector_heap_samples_each_add_args *args = (struct collector_heap_samples_each_add_args *)arg;
|
779
|
-
|
780
|
-
int r = mpp_pprof_serctx_add_sample(args->serctx, sample, MPP_SAMPLE_TYPE_HEAP, args->errbuf, args->errbuf_len);
|
781
|
-
if (r != 0) {
|
782
|
-
args->r = r;
|
783
|
-
return ST_STOP;
|
784
|
-
}
|
785
|
-
return ST_CONTINUE;
|
786
|
-
}
|
787
|
-
|
788
|
-
struct collector_flush_prepresult_args {
|
789
|
-
const char *pprofbuf;
|
790
|
-
size_t pprofbuf_len;
|
791
|
-
VALUE cProfileData;
|
792
|
-
|
793
|
-
// Extra struff that needs to go onto the struct.
|
794
|
-
int64_t allocation_samples_count;
|
795
|
-
int64_t heap_samples_count;
|
796
|
-
int64_t dropped_samples_nolock;
|
797
|
-
int64_t dropped_samples_allocation_bufsize;
|
798
|
-
int64_t dropped_samples_heap_bufsize;
|
799
|
-
};
|
800
|
-
|
801
|
-
static VALUE collector_flush_protected_heap_sample_size(VALUE self) {
|
802
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
803
|
-
st_foreach(cd->heap_samples, collector_heap_samples_each_calc_size, (st_data_t)cd);
|
804
|
-
return Qnil;
|
805
|
-
}
|
806
|
-
|
807
|
-
static VALUE collector_flush_prepresult(VALUE vargs) {
|
808
|
-
struct collector_flush_prepresult_args *args =
|
809
|
-
(struct collector_flush_prepresult_args *)vargs;
|
810
|
-
|
811
|
-
VALUE pprof_data = rb_str_new(args->pprofbuf, args->pprofbuf_len);
|
812
|
-
VALUE profile_data = rb_class_new_instance(0, NULL, args->cProfileData);
|
813
|
-
rb_funcall(profile_data, rb_intern("pprof_data="), 1, pprof_data);
|
814
|
-
rb_funcall(profile_data, rb_intern("allocation_samples_count="), 1, LONG2NUM(args->allocation_samples_count));
|
815
|
-
rb_funcall(profile_data, rb_intern("heap_samples_count="), 1, LONG2NUM(args->heap_samples_count));
|
816
|
-
rb_funcall(profile_data, rb_intern("dropped_samples_nolock="), 1, LONG2NUM(args->dropped_samples_nolock));
|
817
|
-
rb_funcall(
|
818
|
-
profile_data, rb_intern("dropped_samples_allocation_bufsize="),
|
819
|
-
1, LONG2NUM(args->dropped_samples_allocation_bufsize)
|
820
|
-
);
|
821
|
-
rb_funcall(
|
822
|
-
profile_data, rb_intern("dropped_samples_heap_bufsize="),
|
823
|
-
1, LONG2NUM(args->dropped_samples_heap_bufsize)
|
824
|
-
);
|
825
|
-
return profile_data;
|
826
|
-
}
|
827
|
-
|
828
|
-
static VALUE collector_flush(VALUE self) {
|
829
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
830
|
-
struct mpp_pprof_serctx *serctx = NULL;
|
831
|
-
char *buf_out;
|
832
|
-
size_t buflen_out;
|
833
|
-
char errbuf[256];
|
834
|
-
int jump_tag = 0;
|
835
|
-
int r = 0;
|
836
|
-
VALUE retval = Qundef;
|
837
|
-
struct mpp_sample *sample_list = NULL;
|
838
|
-
struct collector_flush_prepresult_args prepresult_args;
|
839
|
-
int lock_held = 0;
|
840
|
-
|
841
|
-
// Whilst under the GVL, we need to get the collector lock
|
842
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
843
|
-
lock_held = 1;
|
844
|
-
|
845
|
-
sample_list = cd->allocation_samples;
|
846
|
-
cd->allocation_samples = NULL;
|
847
|
-
prepresult_args.allocation_samples_count = cd->allocation_samples_count;
|
848
|
-
prepresult_args.heap_samples_count = cd->heap_samples_count;
|
849
|
-
cd->allocation_samples_count = 0;
|
850
|
-
cd->pending_size_count = 0;
|
851
|
-
|
852
|
-
prepresult_args.dropped_samples_nolock =
|
853
|
-
__atomic_exchange_n(&cd->dropped_samples_nolock, 0, __ATOMIC_SEQ_CST);
|
854
|
-
prepresult_args.dropped_samples_allocation_bufsize =
|
855
|
-
__atomic_exchange_n(&cd->dropped_samples_allocation_bufsize, 0, __ATOMIC_SEQ_CST);
|
856
|
-
prepresult_args.dropped_samples_heap_bufsize =
|
857
|
-
__atomic_exchange_n(&cd->dropped_samples_heap_bufsize, 0, __ATOMIC_SEQ_CST);
|
858
|
-
|
859
|
-
// Get the current size for everything in the live allocations table.
|
860
|
-
rb_protect(collector_flush_protected_heap_sample_size, self, &jump_tag);
|
861
|
-
if (jump_tag) goto out;
|
862
|
-
|
863
|
-
serctx = mpp_pprof_serctx_new();
|
864
|
-
MPP_ASSERT_MSG(serctx, "mpp_pprof_serctx_new failed??");
|
865
|
-
r = mpp_pprof_serctx_set_loctab(serctx, cd->loctab, errbuf, sizeof(errbuf));
|
866
|
-
if (r == -1) {
|
867
|
-
goto out;
|
868
|
-
}
|
869
|
-
|
870
|
-
// Now that we have the samples (and have processed the stringtab) we can
|
871
|
-
// yield the lock.
|
872
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
873
|
-
lock_held = 0;
|
874
|
-
|
875
|
-
// Add the allocation samples
|
876
|
-
struct mpp_sample *s = sample_list;
|
877
|
-
while (s) {
|
878
|
-
r = mpp_pprof_serctx_add_sample(serctx, s, MPP_SAMPLE_TYPE_ALLOCATION, errbuf, sizeof(errbuf));
|
879
|
-
if (r == -1) {
|
880
|
-
goto out;
|
881
|
-
}
|
882
|
-
s = s->next_alloc;
|
883
|
-
}
|
884
|
-
|
885
|
-
// Add the heap samples
|
886
|
-
struct collector_heap_samples_each_add_args heap_add_args;
|
887
|
-
heap_add_args.serctx = serctx;
|
888
|
-
heap_add_args.errbuf = errbuf;
|
889
|
-
heap_add_args.errbuf_len = sizeof(errbuf);
|
890
|
-
heap_add_args.r = 0;
|
891
|
-
st_foreach(cd->heap_samples, collector_heap_samples_each_add, (st_data_t)&heap_add_args);
|
892
|
-
if (heap_add_args.r != 0) goto out;
|
893
|
-
|
894
|
-
r = mpp_pprof_serctx_serialize(serctx, &buf_out, &buflen_out, errbuf, sizeof(errbuf));
|
895
|
-
if ( r == -1) {
|
896
|
-
goto out;
|
897
|
-
}
|
898
|
-
// Annoyingly, since rb_str_new could (in theory) throw, we have to rb_protect the whole construction
|
899
|
-
// of our return value to ensure we don't leak serctx.
|
900
|
-
prepresult_args.pprofbuf = buf_out;
|
901
|
-
prepresult_args.pprofbuf_len = buflen_out;
|
902
|
-
prepresult_args.cProfileData = cd->cProfileData;
|
903
|
-
retval = rb_protect(collector_flush_prepresult, (VALUE)&prepresult_args, &jump_tag);
|
904
|
-
|
905
|
-
// Do cleanup here now.
|
906
|
-
out:
|
907
|
-
if (serctx) mpp_pprof_serctx_destroy(serctx);
|
908
|
-
if (lock_held) mpp_pthread_mutex_unlock(&cd->lock);
|
909
|
-
if (sample_list) internal_sample_decrement_refcount(cd, sample_list);
|
910
|
-
|
911
|
-
// Now return-or-raise back to ruby.
|
912
|
-
if (jump_tag) {
|
913
|
-
rb_jump_tag(jump_tag);
|
914
|
-
}
|
915
|
-
if (retval == Qundef) {
|
916
|
-
// Means we have an error to construct and throw
|
917
|
-
rb_raise(rb_eRuntimeError, "ruby_memprofiler_pprof failed serializing pprof protobuf: %s", errbuf);
|
918
|
-
}
|
919
|
-
return retval;
|
920
|
-
|
921
|
-
RB_GC_GUARD(self);
|
922
|
-
}
|
923
|
-
|
924
|
-
static VALUE collector_profile(VALUE self) {
|
925
|
-
rb_need_block();
|
926
|
-
|
927
|
-
rb_funcall(self, rb_intern("start!"), 0);
|
928
|
-
rb_yield_values(0);
|
929
|
-
VALUE profile_output = rb_funcall(self, rb_intern("flush"), 0);
|
930
|
-
rb_funcall(self, rb_intern("stop!"), 0);
|
931
|
-
|
932
|
-
return profile_output;
|
933
|
-
}
|
934
|
-
|
935
|
-
static VALUE collector_live_heap_samples_count(VALUE self) {
|
936
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
937
|
-
|
938
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
939
|
-
int64_t counter = cd->heap_samples_count;
|
940
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
941
|
-
return LONG2NUM(counter);
|
942
|
-
}
|
943
|
-
|
944
|
-
static VALUE collector_bt_method_get(VALUE self) {
|
945
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
946
|
-
|
947
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
948
|
-
int method = cd->bt_method;
|
949
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
950
|
-
|
951
|
-
if (method == MPP_BT_METHOD_CFP) {
|
952
|
-
return rb_id2sym(rb_intern("cfp"));
|
953
|
-
} else if (method == MPP_BT_METHOD_SLOWRB) {
|
954
|
-
return rb_id2sym(rb_intern("slowrb"));
|
955
|
-
} else {
|
956
|
-
MPP_ASSERT_FAIL("unknown bt_method");
|
957
|
-
return Qundef;
|
958
|
-
}
|
959
|
-
}
|
960
|
-
|
961
|
-
static VALUE collector_bt_method_set(VALUE self, VALUE newval) {
|
962
|
-
struct collector_cdata *cd = collector_cdata_get(self);
|
963
|
-
|
964
|
-
ID bt_method = rb_sym2id(newval);
|
965
|
-
int method;
|
966
|
-
if (bt_method == rb_intern("cfp")) {
|
967
|
-
method = MPP_BT_METHOD_CFP;
|
968
|
-
} else if (bt_method == rb_intern("slowrb")) {
|
969
|
-
method = MPP_BT_METHOD_SLOWRB;
|
970
|
-
} else {
|
971
|
-
rb_raise(rb_eArgError, "passed value for bt_method was not recognised");
|
972
|
-
}
|
973
|
-
|
974
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
975
|
-
cd->bt_method = method;
|
976
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
977
|
-
|
978
|
-
return newval;
|
979
|
-
}
|
980
|
-
|
981
|
-
static int mpp_collector_atfork_lock_el(st_data_t key, st_data_t value, st_data_t arg) {
|
982
|
-
struct collector_cdata *cd = (struct collector_cdata *)key;
|
983
|
-
mpp_pthread_mutex_lock(&cd->lock);
|
984
|
-
return ST_CONTINUE;
|
985
|
-
}
|
986
|
-
|
987
|
-
static int mpp_collector_atfork_unlock_el(st_data_t key, st_data_t value, st_data_t arg) {
|
988
|
-
struct collector_cdata *cd = (struct collector_cdata *)key;
|
989
|
-
mpp_pthread_mutex_unlock(&cd->lock);
|
990
|
-
return ST_CONTINUE;
|
991
|
-
}
|
992
|
-
|
993
|
-
static int mpp_collector_atfork_replace_el(st_data_t key, st_data_t value, st_data_t arg) {
|
994
|
-
struct collector_cdata *cd = (struct collector_cdata *)key;
|
995
|
-
|
996
|
-
// In the parent process, we simply release the mutexes, but in the child process, we have
|
997
|
-
// to _RECREATE_ them. This is because they're recursive mutexes, and must hold some kind of
|
998
|
-
// thread ID in them somehow; unlocking them post-fork simply doesn't work it seems.
|
999
|
-
// It's safe to re-create the mutex at this point, because no other thread can possibly be
|
1000
|
-
// holding it since we took it pre-fork
|
1001
|
-
mpp_pthread_mutex_destroy(&cd->lock);
|
1002
|
-
pthread_mutexattr_t mutex_attr;
|
1003
|
-
mpp_pthread_mutexattr_init(&mutex_attr);
|
1004
|
-
mpp_pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE);
|
1005
|
-
memset(&cd->lock, 0, sizeof(cd->lock));
|
1006
|
-
mpp_pthread_mutex_init(&cd->lock, &mutex_attr);
|
1007
|
-
mpp_pthread_mutexattr_destroy(&mutex_attr);
|
1008
|
-
|
1009
|
-
return ST_CONTINUE;
|
1010
|
-
}
|
1011
|
-
|
1012
|
-
static void mpp_collector_atfork_prepare() {
|
1013
|
-
mpp_pthread_mutex_lock(&global_collectors_lock);
|
1014
|
-
st_foreach(global_collectors, mpp_collector_atfork_lock_el, 0);
|
1015
|
-
}
|
1016
|
-
|
1017
|
-
static void mpp_collector_atfork_release_parent() {
|
1018
|
-
st_foreach(global_collectors, mpp_collector_atfork_unlock_el, 0);
|
1019
|
-
mpp_pthread_mutex_unlock(&global_collectors_lock);
|
1020
|
-
}
|
1021
|
-
|
1022
|
-
static void mpp_collector_atfork_release_child() {
|
1023
|
-
st_foreach(global_collectors, mpp_collector_atfork_replace_el, 0);
|
1024
|
-
mpp_pthread_mutex_unlock(&global_collectors_lock);
|
1025
|
-
}
|
1026
|
-
|
1027
|
-
|
1028
|
-
void mpp_setup_collector_class() {
|
1029
|
-
VALUE mMemprofilerPprof = rb_const_get(rb_cObject, rb_intern("MemprofilerPprof"));
|
1030
|
-
VALUE cCollector = rb_define_class_under(mMemprofilerPprof, "Collector", rb_cObject);
|
1031
|
-
rb_define_alloc_func(cCollector, collector_alloc);
|
1032
|
-
|
1033
|
-
|
1034
|
-
rb_define_method(cCollector, "initialize", collector_initialize, -1);
|
1035
|
-
rb_define_method(cCollector, "sample_rate", collector_get_sample_rate, 0);
|
1036
|
-
rb_define_method(cCollector, "sample_rate=", collector_set_sample_rate, 1);
|
1037
|
-
rb_define_method(cCollector, "max_allocation_samples", collector_get_max_allocation_samples, 0);
|
1038
|
-
rb_define_method(cCollector, "max_allocation_samples=", collector_set_max_allocation_samples, 1);
|
1039
|
-
rb_define_method(cCollector, "max_heap_samples", collector_get_max_heap_samples, 0);
|
1040
|
-
rb_define_method(cCollector, "max_heap_samples=", collector_set_max_heap_samples, 1);
|
1041
|
-
rb_define_method(cCollector, "bt_method", collector_bt_method_get, 0);
|
1042
|
-
rb_define_method(cCollector, "bt_method=", collector_bt_method_set, 1);
|
1043
|
-
rb_define_method(cCollector, "allocation_retain_rate", collector_get_allocation_retain_rate, 0);
|
1044
|
-
rb_define_method(cCollector, "allocation_retain_rate=", collector_set_allocation_retain_rate, 1);
|
1045
|
-
rb_define_method(cCollector, "running?", collector_is_running, 0);
|
1046
|
-
rb_define_method(cCollector, "start!", collector_start, 0);
|
1047
|
-
rb_define_method(cCollector, "stop!", collector_stop, 0);
|
1048
|
-
rb_define_method(cCollector, "flush", collector_flush, 0);
|
1049
|
-
rb_define_method(cCollector, "profile", collector_profile, 0);
|
1050
|
-
rb_define_method(cCollector, "live_heap_samples_count", collector_live_heap_samples_count, 0);
|
1051
|
-
|
1052
|
-
global_collectors = st_init_numtable();
|
1053
|
-
mpp_pthread_mutex_init(&global_collectors_lock, NULL);
|
1054
|
-
mpp_pthread_atfork(mpp_collector_atfork_prepare, mpp_collector_atfork_release_parent, mpp_collector_atfork_release_child);
|
1055
|
-
}
|