ruby_memprofiler_pprof 0.0.3 → 0.0.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/ext/ruby_memprofiler_pprof_ext/collector.c +803 -0
- data/ext/ruby_memprofiler_pprof_ext/compat.c +184 -0
- data/ext/ruby_memprofiler_pprof_ext/compile_commands.json +1 -0
- data/ext/ruby_memprofiler_pprof_ext/extconf.rb +152 -0
- data/ext/ruby_memprofiler_pprof_ext/pprof.upb.c +199 -0
- data/ext/ruby_memprofiler_pprof_ext/pprof.upb.h +924 -0
- data/ext/ruby_memprofiler_pprof_ext/pprof_out.c +430 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_hacks.c +118 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_memprofiler_pprof.c +10 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_memprofiler_pprof.h +183 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_private/ruby26/gc_private.h +324 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_private/ruby27/gc_private.h +339 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_private/ruby30/gc_private.h +361 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_private/ruby31/gc_private.h +374 -0
- data/ext/ruby_memprofiler_pprof_ext/ruby_private.h +31 -0
- data/ext/ruby_memprofiler_pprof_ext/sample.c +43 -0
- data/ext/ruby_memprofiler_pprof_ext/vendor/backtracie/backtracie.h +268 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/CONTRIBUTING.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/DESIGN.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/LICENSE +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/README.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/WORKSPACE +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/amalgamate.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/build_defs.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/lua.BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/protobuf.patch +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/py_proto_library.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/python_downloads.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/system_python.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/upb_proto_library.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/bazel/workspace_deps.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/BUILD.googleapis +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/benchmark.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/build_defs.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/compare.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/descriptor.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/descriptor_sv.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/empty.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/gen_protobuf_binary_cc.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/gen_synthetic_protos.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/benchmarks/gen_upb_binary_c.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/cmake/BUILD.bazel +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/cmake/README.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/cmake/build_defs.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/cmake/make_cmakelists.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/cmake/staleness_test.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/cmake/staleness_test_lib.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/docs/render.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/docs/style-guide.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/docs/vs-cpp-protos.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/docs/wrapping-upb.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/convert.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/convert.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/descriptor.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/descriptor.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/descriptor_containers.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/descriptor_containers.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/descriptor_pool.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/descriptor_pool.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/dist/BUILD.bazel +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/dist/dist.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/extension_dict.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/extension_dict.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/map.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/map.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/message.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/message.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/minimal_test.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/README.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/descriptor_database_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/descriptor_pool_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/descriptor_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/generator_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/json_format_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/keywords_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/message_factory_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/message_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/proto_builder_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/pyproto_test_wrapper.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/reflection_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/service_reflection_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/symbol_database_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/text_encoding_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/text_format_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/unknown_fields_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/well_known_types_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/pb_unit_tests/wire_format_test_wrapper.py +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/protobuf.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/protobuf.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/py_extension.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/python_api.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/repeated.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/repeated.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/unknown_fields.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/unknown_fields.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/python/version_script.lds +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/lunit/LICENSE +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/lunit/README.google +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/lunit/console.lua +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/lunit/lunit.lua +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/utf8_range/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/utf8_range/LICENSE +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/utf8_range/naive.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/utf8_range/range2-neon.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/utf8_range/range2-sse.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/third_party/utf8_range/utf8_range.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/BUILD.bazel +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/README.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/def.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/lua_proto_library.bzl +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/main.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/msg.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/test_upb.lua +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/upb.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/upb.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/upb.lua +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/bindings/lua/upbc.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/collections.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/collections.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/conformance_upb.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/conformance_upb_failures.txt +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/decode.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/decode.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/decode_fast.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/decode_fast.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/decode_internal.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/def.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/def.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/def.hpp +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/empty.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/encode.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/encode.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/fuzz/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/fuzz/file_descriptor_parsenew_fuzzer.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/json_decode.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/json_decode.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/json_encode.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/json_encode.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table.hpp +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table_accessors.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table_accessors.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table_accessors_internal.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table_accessors_test.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/mini_table_test.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/msg.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/msg.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/msg_internal.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/msg_test.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/msg_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/port_def.inc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/port_undef.inc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/reflection.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/reflection.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/reflection.hpp +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/table.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/table_internal.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/test_cpp.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/test_cpp.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/test_generated_code.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/test_table.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/text_encode.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/text_encode.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/upb.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/upb.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/upb.hpp +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/upb_internal.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/README.md +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/compare.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/compare.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/compare_test.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto_public_import_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto_regular_import_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto_test.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto_weak_import_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/def_to_proto_wweak_import_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/required_fields.c +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/required_fields.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/required_fields_test.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upb/util/required_fields_test.proto +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upbc/BUILD +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upbc/common.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upbc/common.h +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upbc/protoc-gen-upb.cc +0 -0
- data/ext/{ruby_memprofiler_pprof → ruby_memprofiler_pprof_ext}/vendor/upb/upbc/protoc-gen-upbdefs.cc +0 -0
- data/lib/ruby_memprofiler_pprof/atfork.rb +1 -1
- data/lib/ruby_memprofiler_pprof/block_flusher.rb +48 -4
- data/lib/ruby_memprofiler_pprof/file_flusher.rb +13 -6
- data/lib/ruby_memprofiler_pprof/profile_app.rb +8 -12
- data/lib/ruby_memprofiler_pprof/profile_data.rb +7 -8
- data/lib/ruby_memprofiler_pprof/version.rb +1 -1
- data/lib/ruby_memprofiler_pprof.rb +5 -4
- data/libexec/ruby_memprofiler_pprof_profile +6 -6
- metadata +207 -200
- data/ext/ruby_memprofiler_pprof/backtrace.c +0 -429
- data/ext/ruby_memprofiler_pprof/collector.c +0 -1055
- data/ext/ruby_memprofiler_pprof/compat.c +0 -182
- data/ext/ruby_memprofiler_pprof/extconf.rb +0 -72
- data/ext/ruby_memprofiler_pprof/pprof.upb.c +0 -170
- data/ext/ruby_memprofiler_pprof/pprof.upb.h +0 -848
- data/ext/ruby_memprofiler_pprof/pprof_out.c +0 -285
- data/ext/ruby_memprofiler_pprof/ruby_memprofiler_pprof.c +0 -11
- data/ext/ruby_memprofiler_pprof/ruby_memprofiler_pprof.h +0 -301
- data/ext/ruby_memprofiler_pprof/strtab.c +0 -391
@@ -0,0 +1,803 @@
|
|
1
|
+
#include <pthread.h>
|
2
|
+
#include <stdbool.h>
|
3
|
+
#include <stdlib.h>
|
4
|
+
#include <string.h>
|
5
|
+
#include <time.h>
|
6
|
+
|
7
|
+
#include <ruby.h>
|
8
|
+
#include <ruby/debug.h>
|
9
|
+
#include <ruby/thread.h>
|
10
|
+
|
11
|
+
#include <backtracie.h>
|
12
|
+
|
13
|
+
#include "ruby/st.h"
|
14
|
+
#include "ruby_memprofiler_pprof.h"
|
15
|
+
|
16
|
+
struct collector_cdata {
|
17
|
+
// Global variables we need to keep a hold of
|
18
|
+
VALUE cCollector;
|
19
|
+
VALUE cProfileData;
|
20
|
+
VALUE mMemprofilerPprof;
|
21
|
+
|
22
|
+
// Ruby Tracepoint objects for our hooks
|
23
|
+
VALUE newobj_trace;
|
24
|
+
VALUE freeobj_trace;
|
25
|
+
|
26
|
+
// How often (as a fraction of UINT32_MAX) we should sample allocations
|
27
|
+
uint32_t u32_sample_rate;
|
28
|
+
// This flag is used to make sure we detach our tracepoints as we're getting GC'd.
|
29
|
+
bool is_tracing;
|
30
|
+
// If we're flushing, this contains the thread that's doing the flushing. This is used
|
31
|
+
// to exclude allocations from that thread from heap profiling.
|
32
|
+
VALUE flush_thread;
|
33
|
+
// Whether or not to use pretty backtraces (true) or fast ones (false)
|
34
|
+
bool pretty_backtraces;
|
35
|
+
|
36
|
+
// ======== Heap samples ========
|
37
|
+
// A hash-table keying live VALUEs to their struct mpp_sample. This is _not_ cleared
|
38
|
+
// when #flush is called; instead, elements are deleted when they are free'd. This is
|
39
|
+
// used for building heap profiles.
|
40
|
+
st_table *heap_samples;
|
41
|
+
// Number of elements currently in the heap profile hash
|
42
|
+
size_t heap_samples_count;
|
43
|
+
// How big the sample table can grow
|
44
|
+
size_t max_heap_samples;
|
45
|
+
// This number goes up by one every time #flush is called, and is used to keep _new_ samples from winding up in a
|
46
|
+
// profile we're in the process of flushing.
|
47
|
+
unsigned int current_flush_epoch;
|
48
|
+
|
49
|
+
// ======== Sample drop counters ========
|
50
|
+
// Number of samples dropped for want of space in the heap allocation table.
|
51
|
+
size_t dropped_samples_heap_bufsize;
|
52
|
+
|
53
|
+
// Table of (VALUE) -> (refcount) which is used to make sure we only mark the parts of our samples once, since many of
|
54
|
+
// the samples will hold references to the same iseq's etc.
|
55
|
+
st_table *mark_table;
|
56
|
+
|
57
|
+
// Debugging counters
|
58
|
+
int64_t last_gc_mark_ns;
|
59
|
+
};
|
60
|
+
|
61
|
+
static struct collector_cdata *collector_cdata_get(VALUE self);
|
62
|
+
static VALUE collector_alloc(VALUE klass);
|
63
|
+
static VALUE collector_initialize(int argc, VALUE *argv, VALUE self);
|
64
|
+
static void collector_cdata_gc_mark(void *ptr);
|
65
|
+
static int collector_gc_mark_each_table_entry(st_data_t key, st_data_t value, st_data_t ctxarg);
|
66
|
+
static void collector_gc_free(void *ptr);
|
67
|
+
static void collector_gc_free_heap_samples(struct collector_cdata *cd);
|
68
|
+
static int collector_gc_free_each_heap_sample(st_data_t key, st_data_t value, st_data_t ctxarg);
|
69
|
+
static size_t collector_gc_memsize(const void *ptr);
|
70
|
+
static int collector_gc_memsize_each_heap_sample(st_data_t key, st_data_t value, st_data_t arg);
|
71
|
+
#ifdef HAVE_RB_GC_MARK_MOVABLE
|
72
|
+
static void collector_cdata_gc_compact(void *ptr);
|
73
|
+
static int collector_compact_each_table_entry(st_data_t key, st_data_t value, st_data_t ctxarg);
|
74
|
+
static int collector_compact_each_heap_sample(st_data_t key, st_data_t value, st_data_t ctxarg);
|
75
|
+
#endif
|
76
|
+
static void collector_mark_sample_value_as_freed(struct collector_cdata *cd, VALUE freed_obj);
|
77
|
+
static void collector_tphook_newobj(VALUE tpval, void *data);
|
78
|
+
static void collector_tphook_freeobj(VALUE tpval, void *data);
|
79
|
+
static VALUE collector_start(VALUE self);
|
80
|
+
static VALUE collector_stop(VALUE self);
|
81
|
+
static VALUE collector_is_running(VALUE self);
|
82
|
+
static VALUE collector_flush(int argc, VALUE *argv, VALUE self);
|
83
|
+
struct flush_protected_ctx {
|
84
|
+
struct collector_cdata *cd;
|
85
|
+
struct mpp_pprof_serctx *serctx;
|
86
|
+
bool yield_gvl;
|
87
|
+
bool proactively_yield_gvl;
|
88
|
+
};
|
89
|
+
static VALUE flush_protected(VALUE ctxarg);
|
90
|
+
struct flush_each_sample_ctx {
|
91
|
+
struct collector_cdata *cd;
|
92
|
+
struct mpp_pprof_serctx *serctx;
|
93
|
+
bool proactively_yield_gvl;
|
94
|
+
char *errbuf;
|
95
|
+
size_t sizeof_errbuf;
|
96
|
+
int r;
|
97
|
+
int i;
|
98
|
+
size_t actual_sample_count;
|
99
|
+
int64_t nogvl_duration;
|
100
|
+
int64_t gvl_yield_count;
|
101
|
+
int64_t gvl_check_yield_count;
|
102
|
+
unsigned int flush_epoch;
|
103
|
+
};
|
104
|
+
int flush_each_sample(st_data_t key, st_data_t value, st_data_t ctxarg);
|
105
|
+
struct flush_nogvl_ctx {
|
106
|
+
struct collector_cdata *cd;
|
107
|
+
struct mpp_pprof_serctx *serctx;
|
108
|
+
char *pprof_outbuf;
|
109
|
+
size_t pprof_outbuf_len;
|
110
|
+
char *errbuf;
|
111
|
+
size_t sizeof_errbuf;
|
112
|
+
int r;
|
113
|
+
};
|
114
|
+
static void *flush_nogvl(void *ctx);
|
115
|
+
static void flush_nogvl_unblock(void *ctx);
|
116
|
+
static VALUE collector_profile(VALUE self);
|
117
|
+
static VALUE collector_live_heap_samples_count(VALUE self);
|
118
|
+
static VALUE collector_get_sample_rate(VALUE self);
|
119
|
+
static VALUE collector_set_sample_rate(VALUE self, VALUE newval);
|
120
|
+
static VALUE collector_get_max_heap_samples(VALUE self);
|
121
|
+
static VALUE collector_set_max_heap_samples(VALUE self, VALUE newval);
|
122
|
+
static VALUE collector_get_pretty_backtraces(VALUE self);
|
123
|
+
static VALUE collector_set_pretty_backtraces(VALUE self, VALUE newval);
|
124
|
+
static VALUE collector_get_last_mark_nsecs(VALUE self);
|
125
|
+
static VALUE collector_get_mark_table_size(VALUE self);
|
126
|
+
static void mark_table_refcount_inc(st_table *mark_table, VALUE key);
|
127
|
+
static void mark_table_refcount_dec(st_table *mark_table, VALUE key);
|
128
|
+
|
129
|
+
static const rb_data_type_t collector_cdata_type = {"collector_cdata",
|
130
|
+
{
|
131
|
+
collector_cdata_gc_mark,
|
132
|
+
collector_gc_free,
|
133
|
+
collector_gc_memsize,
|
134
|
+
#ifdef HAVE_RB_GC_MARK_MOVABLE
|
135
|
+
collector_cdata_gc_compact,
|
136
|
+
#endif
|
137
|
+
{0}, /* reserved */
|
138
|
+
},
|
139
|
+
/* parent, data, [ flags ] */
|
140
|
+
NULL,
|
141
|
+
NULL,
|
142
|
+
0};
|
143
|
+
|
144
|
+
void mpp_setup_collector_class() {
|
145
|
+
VALUE mMemprofilerPprof = rb_const_get(rb_cObject, rb_intern("MemprofilerPprof"));
|
146
|
+
VALUE cCollector = rb_define_class_under(mMemprofilerPprof, "Collector", rb_cObject);
|
147
|
+
rb_define_alloc_func(cCollector, collector_alloc);
|
148
|
+
|
149
|
+
rb_define_method(cCollector, "initialize", collector_initialize, -1);
|
150
|
+
rb_define_method(cCollector, "sample_rate", collector_get_sample_rate, 0);
|
151
|
+
rb_define_method(cCollector, "sample_rate=", collector_set_sample_rate, 1);
|
152
|
+
rb_define_method(cCollector, "max_heap_samples", collector_get_max_heap_samples, 0);
|
153
|
+
rb_define_method(cCollector, "max_heap_samples=", collector_set_max_heap_samples, 1);
|
154
|
+
rb_define_method(cCollector, "pretty_backtraces", collector_get_pretty_backtraces, 0);
|
155
|
+
rb_define_method(cCollector, "pretty_backtraces=", collector_set_pretty_backtraces, 1);
|
156
|
+
rb_define_method(cCollector, "running?", collector_is_running, 0);
|
157
|
+
rb_define_method(cCollector, "start!", collector_start, 0);
|
158
|
+
rb_define_method(cCollector, "stop!", collector_stop, 0);
|
159
|
+
rb_define_method(cCollector, "flush", collector_flush, -1);
|
160
|
+
rb_define_method(cCollector, "profile", collector_profile, 0);
|
161
|
+
rb_define_method(cCollector, "live_heap_samples_count", collector_live_heap_samples_count, 0);
|
162
|
+
rb_define_method(cCollector, "last_mark_nsecs", collector_get_last_mark_nsecs, 0);
|
163
|
+
rb_define_method(cCollector, "mark_table_size", collector_get_mark_table_size, 0);
|
164
|
+
}
|
165
|
+
|
166
|
+
static struct collector_cdata *collector_cdata_get(VALUE self) {
|
167
|
+
struct collector_cdata *a;
|
168
|
+
TypedData_Get_Struct(self, struct collector_cdata, &collector_cdata_type, a);
|
169
|
+
return a;
|
170
|
+
}
|
171
|
+
|
172
|
+
static VALUE collector_alloc(VALUE klass) {
|
173
|
+
struct collector_cdata *cd;
|
174
|
+
VALUE v = TypedData_Make_Struct(klass, struct collector_cdata, &collector_cdata_type, cd);
|
175
|
+
|
176
|
+
cd->newobj_trace = Qnil;
|
177
|
+
cd->freeobj_trace = Qnil;
|
178
|
+
cd->flush_thread = Qnil;
|
179
|
+
|
180
|
+
cd->u32_sample_rate = 0;
|
181
|
+
cd->is_tracing = false;
|
182
|
+
cd->heap_samples = NULL;
|
183
|
+
cd->heap_samples_count = 0;
|
184
|
+
cd->max_heap_samples = 0;
|
185
|
+
cd->dropped_samples_heap_bufsize = 0;
|
186
|
+
cd->current_flush_epoch = 0;
|
187
|
+
cd->mark_table = NULL;
|
188
|
+
cd->last_gc_mark_ns = 0;
|
189
|
+
return v;
|
190
|
+
}
|
191
|
+
|
192
|
+
static VALUE collector_initialize(int argc, VALUE *argv, VALUE self) {
|
193
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
194
|
+
|
195
|
+
// Save constants
|
196
|
+
cd->mMemprofilerPprof = rb_const_get(rb_cObject, rb_intern("MemprofilerPprof"));
|
197
|
+
cd->cCollector = rb_const_get(cd->mMemprofilerPprof, rb_intern("Collector"));
|
198
|
+
cd->cProfileData = rb_const_get(cd->mMemprofilerPprof, rb_intern("ProfileData"));
|
199
|
+
|
200
|
+
// Argument parsing
|
201
|
+
VALUE kwargs_hash = Qnil;
|
202
|
+
rb_scan_args_kw(RB_SCAN_ARGS_LAST_HASH_KEYWORDS, argc, argv, "00:", &kwargs_hash);
|
203
|
+
VALUE kwarg_values[3];
|
204
|
+
ID kwarg_ids[3];
|
205
|
+
kwarg_ids[0] = rb_intern("sample_rate");
|
206
|
+
kwarg_ids[1] = rb_intern("max_heap_samples");
|
207
|
+
kwarg_ids[2] = rb_intern("pretty_backtraces");
|
208
|
+
rb_get_kwargs(kwargs_hash, kwarg_ids, 0, 3, kwarg_values);
|
209
|
+
|
210
|
+
// Default values...
|
211
|
+
if (kwarg_values[0] == Qundef)
|
212
|
+
kwarg_values[0] = DBL2NUM(0.01);
|
213
|
+
if (kwarg_values[1] == Qundef)
|
214
|
+
kwarg_values[1] = LONG2NUM(50000);
|
215
|
+
if (kwarg_values[2] == Qundef)
|
216
|
+
kwarg_values[2] = Qtrue;
|
217
|
+
|
218
|
+
rb_funcall(self, rb_intern("sample_rate="), 1, kwarg_values[0]);
|
219
|
+
rb_funcall(self, rb_intern("max_heap_samples="), 1, kwarg_values[1]);
|
220
|
+
rb_funcall(self, rb_intern("pretty_backtraces="), 1, kwarg_values[2]);
|
221
|
+
|
222
|
+
cd->heap_samples = st_init_numtable();
|
223
|
+
cd->heap_samples_count = 0;
|
224
|
+
|
225
|
+
cd->mark_table = st_init_numtable();
|
226
|
+
|
227
|
+
return Qnil;
|
228
|
+
}
|
229
|
+
|
230
|
+
static void collector_cdata_gc_mark(void *ptr) {
|
231
|
+
struct timespec t1 = mpp_gettime_monotonic();
|
232
|
+
|
233
|
+
struct collector_cdata *cd = (struct collector_cdata *)ptr;
|
234
|
+
rb_gc_mark_movable(cd->newobj_trace);
|
235
|
+
rb_gc_mark_movable(cd->freeobj_trace);
|
236
|
+
rb_gc_mark_movable(cd->mMemprofilerPprof);
|
237
|
+
rb_gc_mark_movable(cd->cCollector);
|
238
|
+
rb_gc_mark_movable(cd->cProfileData);
|
239
|
+
rb_gc_mark_movable(cd->flush_thread);
|
240
|
+
st_foreach(cd->mark_table, collector_gc_mark_each_table_entry, 0);
|
241
|
+
|
242
|
+
struct timespec t2 = mpp_gettime_monotonic();
|
243
|
+
cd->last_gc_mark_ns = mpp_time_delta_nsec(t1, t2);
|
244
|
+
}
|
245
|
+
|
246
|
+
static int collector_gc_mark_each_table_entry(st_data_t key, st_data_t value, st_data_t ctxarg) {
|
247
|
+
rb_gc_mark_movable((VALUE)key);
|
248
|
+
return ST_CONTINUE;
|
249
|
+
}
|
250
|
+
|
251
|
+
static void collector_gc_free(void *ptr) {
|
252
|
+
struct collector_cdata *cd = (struct collector_cdata *)ptr;
|
253
|
+
if (cd->is_tracing) {
|
254
|
+
if (cd->newobj_trace) {
|
255
|
+
rb_tracepoint_disable(cd->newobj_trace);
|
256
|
+
}
|
257
|
+
if (cd->freeobj_trace) {
|
258
|
+
rb_tracepoint_disable(cd->freeobj_trace);
|
259
|
+
}
|
260
|
+
}
|
261
|
+
|
262
|
+
collector_gc_free_heap_samples(cd);
|
263
|
+
ruby_xfree(ptr);
|
264
|
+
}
|
265
|
+
|
266
|
+
static void collector_gc_free_heap_samples(struct collector_cdata *cd) {
|
267
|
+
if (cd->heap_samples) {
|
268
|
+
st_foreach(cd->heap_samples, collector_gc_free_each_heap_sample, (st_data_t)cd);
|
269
|
+
st_free_table(cd->heap_samples);
|
270
|
+
}
|
271
|
+
cd->heap_samples = NULL;
|
272
|
+
}
|
273
|
+
|
274
|
+
static int collector_gc_free_each_heap_sample(st_data_t key, st_data_t value, st_data_t ctxarg) {
|
275
|
+
struct mpp_sample *sample = (struct mpp_sample *)value;
|
276
|
+
mpp_sample_free(sample);
|
277
|
+
return ST_DELETE;
|
278
|
+
}
|
279
|
+
|
280
|
+
static size_t collector_gc_memsize(const void *ptr) {
|
281
|
+
struct collector_cdata *cd = (struct collector_cdata *)ptr;
|
282
|
+
size_t sz = sizeof(*cd);
|
283
|
+
if (cd->heap_samples) {
|
284
|
+
st_foreach(cd->heap_samples, collector_gc_memsize_each_heap_sample, (st_data_t)&sz);
|
285
|
+
sz += st_memsize(cd->heap_samples);
|
286
|
+
}
|
287
|
+
|
288
|
+
return sz;
|
289
|
+
}
|
290
|
+
|
291
|
+
static int collector_gc_memsize_each_heap_sample(st_data_t key, st_data_t value, st_data_t arg) {
|
292
|
+
size_t *acc_ptr = (size_t *)arg;
|
293
|
+
struct mpp_sample *sample = (struct mpp_sample *)value;
|
294
|
+
*acc_ptr += mpp_sample_memsize(sample);
|
295
|
+
return ST_CONTINUE;
|
296
|
+
}
|
297
|
+
|
298
|
+
#ifdef HAVE_RB_GC_MARK_MOVABLE
|
299
|
+
// Support VALUES we're tracking being moved away in Ruby 2.7+ with GC.compact
|
300
|
+
static void collector_cdata_gc_compact(void *ptr) {
|
301
|
+
struct collector_cdata *cd = (struct collector_cdata *)ptr;
|
302
|
+
cd->newobj_trace = rb_gc_location(cd->newobj_trace);
|
303
|
+
cd->freeobj_trace = rb_gc_location(cd->freeobj_trace);
|
304
|
+
cd->mMemprofilerPprof = rb_gc_location(cd->mMemprofilerPprof);
|
305
|
+
cd->cCollector = rb_gc_location(cd->cCollector);
|
306
|
+
cd->cProfileData = rb_gc_location(cd->cProfileData);
|
307
|
+
cd->flush_thread = rb_gc_location(cd->flush_thread);
|
308
|
+
|
309
|
+
// Keep track of allocated objects we sampled that might move.
|
310
|
+
st_foreach(cd->heap_samples, collector_compact_each_heap_sample, (st_data_t)cd);
|
311
|
+
st_foreach(cd->mark_table, collector_compact_each_table_entry, (st_data_t)cd);
|
312
|
+
}
|
313
|
+
|
314
|
+
static int mark_table_refcount_update(st_data_t *key, st_data_t *value, st_data_t ctxarg, int existing) {
|
315
|
+
if (existing) {
|
316
|
+
*value += ((int)ctxarg);
|
317
|
+
} else {
|
318
|
+
*value = ((int)ctxarg);
|
319
|
+
}
|
320
|
+
return *value == 0 ? ST_DELETE : ST_CONTINUE;
|
321
|
+
}
|
322
|
+
|
323
|
+
static int collector_compact_each_table_entry(st_data_t key, st_data_t value, st_data_t ctxarg) {
|
324
|
+
struct collector_cdata *cd = (struct collector_cdata *)ctxarg;
|
325
|
+
VALUE key_value = (VALUE)key;
|
326
|
+
VALUE new_value = rb_gc_location(key_value);
|
327
|
+
if (new_value == key_value) {
|
328
|
+
return ST_CONTINUE;
|
329
|
+
} else {
|
330
|
+
// Insert a new netry for the moved value, or add this items refcount to the existing entry.
|
331
|
+
st_update(cd->mark_table, new_value, mark_table_refcount_update, value);
|
332
|
+
return ST_DELETE;
|
333
|
+
}
|
334
|
+
}
|
335
|
+
|
336
|
+
static int collector_compact_each_heap_sample(st_data_t key, st_data_t value, st_data_t ctxarg) {
|
337
|
+
struct collector_cdata *cd = (struct collector_cdata *)ctxarg;
|
338
|
+
struct mpp_sample *sample = (struct mpp_sample *)value;
|
339
|
+
|
340
|
+
for (size_t i = 0; i < sample->frames_count; i++) {
|
341
|
+
minimal_location_t *frame = &sample->frames[i];
|
342
|
+
if (frame->method_name_contents == BACKTRACIE_METHOD_NAME_CONTENTS_BASE_LABEL) {
|
343
|
+
frame->method_name.base_label = rb_gc_location(frame->method_name.base_label);
|
344
|
+
}
|
345
|
+
switch (frame->method_qualifier_contents) {
|
346
|
+
case BACKTRACIE_METHOD_QUALIFIER_CONTENTS_SELF:
|
347
|
+
frame->method_qualifier.self = rb_gc_location(frame->method_qualifier.self);
|
348
|
+
break;
|
349
|
+
case BACKTRACIE_METHOD_QUALIFIER_CONTENTS_SELF_CLASS:
|
350
|
+
frame->method_qualifier.self_class = rb_gc_location(frame->method_qualifier.self_class);
|
351
|
+
break;
|
352
|
+
case BACKTRACIE_METHOD_QUALIFIER_CONTENTS_CME_CLASS:
|
353
|
+
frame->method_qualifier.cme_defined_class = rb_gc_location(frame->method_qualifier.cme_defined_class);
|
354
|
+
break;
|
355
|
+
}
|
356
|
+
frame->filename = rb_gc_location(frame->filename);
|
357
|
+
}
|
358
|
+
|
359
|
+
// Handle compaction of our weak reference to the heap sample.
|
360
|
+
if (rb_gc_location(sample->allocated_value_weak) == sample->allocated_value_weak) {
|
361
|
+
return ST_CONTINUE;
|
362
|
+
} else {
|
363
|
+
sample->allocated_value_weak = rb_gc_location(sample->allocated_value_weak);
|
364
|
+
st_insert(cd->heap_samples, sample->allocated_value_weak, (st_data_t)sample);
|
365
|
+
return ST_DELETE;
|
366
|
+
}
|
367
|
+
}
|
368
|
+
|
369
|
+
#endif
|
370
|
+
|
371
|
+
static void collector_mark_sample_value_as_freed(struct collector_cdata *cd, VALUE freed_obj) {
|
372
|
+
struct mpp_sample *sample;
|
373
|
+
if (st_delete(cd->heap_samples, (st_data_t *)&freed_obj, (st_data_t *)&sample)) {
|
374
|
+
for (size_t i = 0; i < sample->frames_count; i++) {
|
375
|
+
minimal_location_t *frame = &sample->frames[i];
|
376
|
+
if (frame->method_name_contents == BACKTRACIE_METHOD_NAME_CONTENTS_BASE_LABEL) {
|
377
|
+
mark_table_refcount_dec(cd->mark_table, frame->method_name.base_label);
|
378
|
+
}
|
379
|
+
switch (frame->method_qualifier_contents) {
|
380
|
+
case BACKTRACIE_METHOD_QUALIFIER_CONTENTS_SELF:
|
381
|
+
mark_table_refcount_dec(cd->mark_table, frame->method_qualifier.self);
|
382
|
+
break;
|
383
|
+
case BACKTRACIE_METHOD_QUALIFIER_CONTENTS_SELF_CLASS:
|
384
|
+
mark_table_refcount_dec(cd->mark_table, frame->method_qualifier.self_class);
|
385
|
+
break;
|
386
|
+
case BACKTRACIE_METHOD_QUALIFIER_CONTENTS_CME_CLASS:
|
387
|
+
mark_table_refcount_dec(cd->mark_table, frame->method_qualifier.cme_defined_class);
|
388
|
+
break;
|
389
|
+
}
|
390
|
+
mark_table_refcount_dec(cd->mark_table, frame->filename);
|
391
|
+
}
|
392
|
+
|
393
|
+
// We deleted it out of live objects; free the sample
|
394
|
+
mpp_sample_free(sample);
|
395
|
+
cd->heap_samples_count--;
|
396
|
+
}
|
397
|
+
}
|
398
|
+
|
399
|
+
static void mark_table_refcount_inc(st_table *mark_table, VALUE key) {
|
400
|
+
if (key == Qnil || key == Qundef || key == 0) {
|
401
|
+
return;
|
402
|
+
}
|
403
|
+
st_update(mark_table, key, mark_table_refcount_update, 1);
|
404
|
+
}
|
405
|
+
static void mark_table_refcount_dec(st_table *mark_table, VALUE key) {
|
406
|
+
if (key == Qnil || key == Qundef || key == 0) {
|
407
|
+
return;
|
408
|
+
}
|
409
|
+
st_update(mark_table, key, mark_table_refcount_update, (st_data_t)-1);
|
410
|
+
}
|
411
|
+
|
412
|
+
static void collector_tphook_newobj(VALUE tpval, void *data) {
|
413
|
+
// If an object is created or freed during our newobj hook, Ruby refuses to recursively run
|
414
|
+
// the newobj/freeobj hook! It's just silently skipped. Thus, we can wind up missing
|
415
|
+
// some objects (not good), or we can wind up missing the fact that some objects
|
416
|
+
// were freed (even worse!).
|
417
|
+
// Thus, we have to make sure that
|
418
|
+
// 1) No Ruby objects are created in this method,
|
419
|
+
// 2) No Ruby objects are freed in this method,
|
420
|
+
// We achieve 1 by, well, not creating any Ruby objects. 2 would happen if the GC runs; one
|
421
|
+
// of the things that _can_ trigger the GC to run, unfortunately, is ruby_xmalloc() and
|
422
|
+
// friends (used internally by backtracie, and by st_hash, and also by this gem's malloc
|
423
|
+
// wrapper). So, we need to disable the GC, and re-enable it at the end of our hook.
|
424
|
+
// The normal "disable the GC" function, rb_gc_disable(), doesn't quite do it, because _that_
|
425
|
+
// calls rb_gc_rest() to finish off any in-progress collection! If we called that, we'd free
|
426
|
+
// objects, and miss removing them from our sample map. So, instead, we twiddle the dont_gc
|
427
|
+
// flag on the objspace directly with this compat wrapper.
|
428
|
+
VALUE gc_was_already_disabled = mpp_rb_gc_disable_no_rest();
|
429
|
+
|
430
|
+
rb_trace_arg_t *tparg;
|
431
|
+
VALUE newobj;
|
432
|
+
struct collector_cdata *cd = (struct collector_cdata *)data;
|
433
|
+
|
434
|
+
#ifdef HAVE_WORKING_RB_GC_FORCE_RECYCLE
|
435
|
+
tparg = rb_tracearg_from_tracepoint(tpval);
|
436
|
+
newobj = rb_tracearg_object(tparg);
|
437
|
+
|
438
|
+
// Normally, any object allocated that calls the newobj hook will be freed during GC,
|
439
|
+
// and the freeobj tracepoint will then be called. Thus, any object added to the heap sample
|
440
|
+
// map will be removed before a different object in the same slot is creeated.
|
441
|
+
// Unfortunately, the one place that _isn't_ true is if an object is freed manually with
|
442
|
+
// rb_gc_force_recycle(). This is deprecated in Ruby >= 3.1, but before that the only time
|
443
|
+
// we find out that such an object is freed is when a new object is created in the same
|
444
|
+
// slot. Handle that by marking an existing object in the sample map as "free'd".
|
445
|
+
collector_mark_sample_value_as_freed(cd, newobj);
|
446
|
+
#endif
|
447
|
+
// Skip the rest of this method if we're not sampling.
|
448
|
+
if (mpp_rand() > cd->u32_sample_rate) {
|
449
|
+
goto out;
|
450
|
+
}
|
451
|
+
// Don't profile allocations that were caused by the flusher; these allocations are
|
452
|
+
// 1) numerous,
|
453
|
+
// 2) probably not of interest,
|
454
|
+
// 3) guaranteed not to actually make it into a heap usage profile anyway, since
|
455
|
+
// they get freed at the end of the flushing routine.
|
456
|
+
if (rb_thread_current() == cd->flush_thread) {
|
457
|
+
goto out;
|
458
|
+
}
|
459
|
+
// Make sure there's enough space in our buffer
|
460
|
+
if (cd->heap_samples_count >= cd->max_heap_samples) {
|
461
|
+
cd->dropped_samples_heap_bufsize++;
|
462
|
+
goto out;
|
463
|
+
}
|
464
|
+
|
465
|
+
#ifndef HAVE_WORKING_RB_GC_FORCE_RECYCLE
|
466
|
+
// If we don't need to do the "check every object to see if it was freed" check,
|
467
|
+
// we can defer actually calling these functions until we've decided whether or not
|
468
|
+
// to sample.
|
469
|
+
tparg = rb_tracearg_from_tracepoint(tpval);
|
470
|
+
newobj = rb_tracearg_object(tparg);
|
471
|
+
#endif
|
472
|
+
|
473
|
+
// OK, now it's time to add to our sample buffer.
|
474
|
+
struct mpp_sample *sample = mpp_sample_capture(newobj);
|
475
|
+
sample->flush_epoch = cd->current_flush_epoch;
|
476
|
+
// insert into live sample map
|
477
|
+
int alread_existed = st_insert(cd->heap_samples, newobj, (st_data_t)sample);
|
478
|
+
MPP_ASSERT_MSG(alread_existed == 0, "st_insert did an update in the newobj hook");
|
479
|
+
cd->heap_samples_count++;
|
480
|
+
|
481
|
+
// Add them to the list of things we will GC mark
|
482
|
+
for (size_t i = 0; i < sample->frames_count; i++) {
|
483
|
+
minimal_location_t *frame = &sample->frames[i];
|
484
|
+
if (frame->method_name_contents == BACKTRACIE_METHOD_NAME_CONTENTS_BASE_LABEL) {
|
485
|
+
mark_table_refcount_inc(cd->mark_table, frame->method_name.base_label);
|
486
|
+
}
|
487
|
+
switch (frame->method_qualifier_contents) {
|
488
|
+
case BACKTRACIE_METHOD_QUALIFIER_CONTENTS_SELF:
|
489
|
+
mark_table_refcount_inc(cd->mark_table, frame->method_qualifier.self);
|
490
|
+
break;
|
491
|
+
case BACKTRACIE_METHOD_QUALIFIER_CONTENTS_SELF_CLASS:
|
492
|
+
mark_table_refcount_inc(cd->mark_table, frame->method_qualifier.self_class);
|
493
|
+
break;
|
494
|
+
case BACKTRACIE_METHOD_QUALIFIER_CONTENTS_CME_CLASS:
|
495
|
+
mark_table_refcount_inc(cd->mark_table, frame->method_qualifier.cme_defined_class);
|
496
|
+
break;
|
497
|
+
}
|
498
|
+
mark_table_refcount_inc(cd->mark_table, frame->filename);
|
499
|
+
}
|
500
|
+
out:
|
501
|
+
if (!RTEST(gc_was_already_disabled)) {
|
502
|
+
rb_gc_enable();
|
503
|
+
}
|
504
|
+
}
|
505
|
+
|
506
|
+
static void collector_tphook_freeobj(VALUE tpval, void *data) {
|
507
|
+
// See discussion about disabling GC in newobj tracepoint, it all applies here too.
|
508
|
+
VALUE gc_was_already_disabled = mpp_rb_gc_disable_no_rest();
|
509
|
+
|
510
|
+
struct collector_cdata *cd = (struct collector_cdata *)data;
|
511
|
+
|
512
|
+
// Definitely do _NOT_ try and run any Ruby code in here. Any allocation will crash
|
513
|
+
// the process.
|
514
|
+
rb_trace_arg_t *tparg = rb_tracearg_from_tracepoint(tpval);
|
515
|
+
VALUE freed_obj = rb_tracearg_object(tparg);
|
516
|
+
collector_mark_sample_value_as_freed(cd, freed_obj);
|
517
|
+
|
518
|
+
if (!RTEST(gc_was_already_disabled)) {
|
519
|
+
rb_gc_enable();
|
520
|
+
}
|
521
|
+
}
|
522
|
+
|
523
|
+
static VALUE collector_start(VALUE self) {
|
524
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
525
|
+
if (cd->is_tracing)
|
526
|
+
return Qnil;
|
527
|
+
|
528
|
+
// Don't needlessly double-initialize everything
|
529
|
+
if (cd->heap_samples_count > 0) {
|
530
|
+
collector_gc_free_heap_samples(cd);
|
531
|
+
cd->heap_samples = st_init_numtable();
|
532
|
+
cd->heap_samples_count = 0;
|
533
|
+
}
|
534
|
+
cd->dropped_samples_heap_bufsize = 0;
|
535
|
+
|
536
|
+
if (cd->newobj_trace == Qnil) {
|
537
|
+
cd->newobj_trace = rb_tracepoint_new(0, RUBY_INTERNAL_EVENT_NEWOBJ, collector_tphook_newobj, cd);
|
538
|
+
}
|
539
|
+
if (cd->freeobj_trace == Qnil) {
|
540
|
+
cd->freeobj_trace = rb_tracepoint_new(0, RUBY_INTERNAL_EVENT_FREEOBJ, collector_tphook_freeobj, cd);
|
541
|
+
}
|
542
|
+
|
543
|
+
rb_tracepoint_enable(cd->newobj_trace);
|
544
|
+
rb_tracepoint_enable(cd->freeobj_trace);
|
545
|
+
|
546
|
+
cd->is_tracing = true;
|
547
|
+
return Qnil;
|
548
|
+
}
|
549
|
+
|
550
|
+
static VALUE collector_stop(VALUE self) {
|
551
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
552
|
+
if (!cd->is_tracing)
|
553
|
+
return Qnil;
|
554
|
+
rb_tracepoint_disable(cd->newobj_trace);
|
555
|
+
rb_tracepoint_disable(cd->freeobj_trace);
|
556
|
+
cd->is_tracing = false;
|
557
|
+
// Don't clear any of our buffers - it's OK to access the profiling info after calling stop!
|
558
|
+
return Qnil;
|
559
|
+
}
|
560
|
+
|
561
|
+
static VALUE collector_is_running(VALUE self) {
|
562
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
563
|
+
return cd->is_tracing ? Qtrue : Qfalse;
|
564
|
+
}
|
565
|
+
|
566
|
+
static VALUE collector_flush(int argc, VALUE *argv, VALUE self) {
|
567
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
568
|
+
|
569
|
+
// kwarg handling
|
570
|
+
VALUE kwargs_hash = Qnil;
|
571
|
+
rb_scan_args_kw(RB_SCAN_ARGS_LAST_HASH_KEYWORDS, argc, argv, "00:", &kwargs_hash);
|
572
|
+
VALUE kwarg_values[2];
|
573
|
+
ID kwarg_ids[2];
|
574
|
+
kwarg_ids[0] = rb_intern("yield_gvl");
|
575
|
+
kwarg_ids[1] = rb_intern("proactively_yield_gvl");
|
576
|
+
rb_get_kwargs(kwargs_hash, kwarg_ids, 0, 2, kwarg_values);
|
577
|
+
|
578
|
+
bool yield_gvl = false;
|
579
|
+
bool proactively_yield_gvl = false;
|
580
|
+
|
581
|
+
if (kwarg_values[0] != Qundef) {
|
582
|
+
yield_gvl = RTEST(kwarg_values[0]);
|
583
|
+
}
|
584
|
+
if (kwarg_values[1] != Qundef) {
|
585
|
+
proactively_yield_gvl = RTEST(kwarg_values[1]);
|
586
|
+
}
|
587
|
+
|
588
|
+
struct flush_protected_ctx ctx;
|
589
|
+
ctx.cd = cd;
|
590
|
+
ctx.proactively_yield_gvl = proactively_yield_gvl;
|
591
|
+
ctx.yield_gvl = yield_gvl;
|
592
|
+
ctx.serctx = NULL;
|
593
|
+
int jump_tag = 0;
|
594
|
+
VALUE retval = rb_protect(flush_protected, (VALUE)&ctx, &jump_tag);
|
595
|
+
|
596
|
+
if (ctx.serctx)
|
597
|
+
mpp_pprof_serctx_destroy(ctx.serctx);
|
598
|
+
cd->flush_thread = Qnil;
|
599
|
+
|
600
|
+
// Now return-or-raise back to ruby.
|
601
|
+
if (jump_tag) {
|
602
|
+
rb_jump_tag(jump_tag);
|
603
|
+
}
|
604
|
+
return retval;
|
605
|
+
}
|
606
|
+
|
607
|
+
int flush_each_sample(st_data_t key, st_data_t value, st_data_t ctxarg) {
|
608
|
+
struct flush_each_sample_ctx *ctx = (struct flush_each_sample_ctx *)ctxarg;
|
609
|
+
struct collector_cdata *cd = ctx->cd;
|
610
|
+
struct mpp_sample *sample = (struct mpp_sample *)value;
|
611
|
+
int ret;
|
612
|
+
|
613
|
+
if (ctx->proactively_yield_gvl && (ctx->i % 25 == 0)) {
|
614
|
+
ctx->gvl_check_yield_count++;
|
615
|
+
if (mpp_is_someone_else_waiting_for_gvl()) {
|
616
|
+
ctx->gvl_yield_count++;
|
617
|
+
struct timespec t1 = mpp_gettime_monotonic();
|
618
|
+
rb_thread_schedule();
|
619
|
+
struct timespec t2 = mpp_gettime_monotonic();
|
620
|
+
ctx->nogvl_duration += mpp_time_delta_nsec(t1, t2);
|
621
|
+
}
|
622
|
+
}
|
623
|
+
ctx->i++;
|
624
|
+
|
625
|
+
if (sample->flush_epoch > ctx->flush_epoch) {
|
626
|
+
// This is a new sample captured since we started calling #flush; skip it.
|
627
|
+
return ST_CONTINUE;
|
628
|
+
}
|
629
|
+
|
630
|
+
// Need to disable GC so that our freeobj tracepoint hook can't delete the sample out of the map
|
631
|
+
// after we've decided we're _also_ going to delete the sample out of the map.
|
632
|
+
VALUE gc_was_already_disabled = mpp_rb_gc_disable_no_rest();
|
633
|
+
|
634
|
+
if (!mpp_is_value_still_validish(sample->allocated_value_weak)) {
|
635
|
+
mpp_sample_free(sample);
|
636
|
+
cd->heap_samples_count--;
|
637
|
+
ret = ST_DELETE;
|
638
|
+
} else {
|
639
|
+
sample->allocated_value_objsize = mpp_rb_obj_memsize_of(sample->allocated_value_weak);
|
640
|
+
ctx->r = mpp_pprof_serctx_add_sample(ctx->serctx, sample, ctx->errbuf, ctx->sizeof_errbuf);
|
641
|
+
if (ctx->r == -1) {
|
642
|
+
ret = ST_STOP;
|
643
|
+
} else {
|
644
|
+
ctx->actual_sample_count++;
|
645
|
+
ret = ST_CONTINUE;
|
646
|
+
}
|
647
|
+
}
|
648
|
+
|
649
|
+
if (!RTEST(gc_was_already_disabled)) {
|
650
|
+
rb_gc_enable();
|
651
|
+
}
|
652
|
+
|
653
|
+
return ret;
|
654
|
+
}
|
655
|
+
|
656
|
+
static VALUE flush_protected(VALUE ctxarg) {
|
657
|
+
struct timespec t_start = mpp_gettime_monotonic();
|
658
|
+
|
659
|
+
struct flush_protected_ctx *ctx = (struct flush_protected_ctx *)ctxarg;
|
660
|
+
struct collector_cdata *cd = ctx->cd;
|
661
|
+
bool proactively_yield_gvl = ctx->proactively_yield_gvl;
|
662
|
+
cd->flush_thread = rb_thread_current();
|
663
|
+
unsigned int flush_epoch = cd->current_flush_epoch++;
|
664
|
+
|
665
|
+
size_t dropped_samples_bufsize = cd->dropped_samples_heap_bufsize;
|
666
|
+
cd->dropped_samples_heap_bufsize = 0;
|
667
|
+
|
668
|
+
// Begin setting up pprof serialisation.
|
669
|
+
char errbuf[256];
|
670
|
+
ctx->serctx = mpp_pprof_serctx_new(errbuf, sizeof(errbuf));
|
671
|
+
if (!ctx->serctx) {
|
672
|
+
rb_raise(rb_eRuntimeError, "ruby_memprofiler_pprof: setting up serialisation: %s", errbuf);
|
673
|
+
}
|
674
|
+
struct mpp_pprof_serctx *serctx = ctx->serctx;
|
675
|
+
struct flush_each_sample_ctx sample_ctx;
|
676
|
+
sample_ctx.r = 0;
|
677
|
+
sample_ctx.i = 0;
|
678
|
+
sample_ctx.actual_sample_count = 0;
|
679
|
+
sample_ctx.errbuf = errbuf;
|
680
|
+
sample_ctx.sizeof_errbuf = sizeof(errbuf);
|
681
|
+
sample_ctx.serctx = serctx;
|
682
|
+
sample_ctx.cd = cd;
|
683
|
+
sample_ctx.proactively_yield_gvl = proactively_yield_gvl;
|
684
|
+
sample_ctx.nogvl_duration = 0;
|
685
|
+
sample_ctx.gvl_yield_count = 0;
|
686
|
+
sample_ctx.gvl_check_yield_count = 0;
|
687
|
+
sample_ctx.flush_epoch = flush_epoch;
|
688
|
+
st_foreach(cd->heap_samples, flush_each_sample, (st_data_t)&sample_ctx);
|
689
|
+
if (sample_ctx.r == -1) {
|
690
|
+
rb_raise(rb_eRuntimeError, "ruby_memprofiler_pprof: failed preparing samples for serialisation: %s",
|
691
|
+
sample_ctx.errbuf);
|
692
|
+
}
|
693
|
+
|
694
|
+
struct flush_nogvl_ctx nogvl_ctx;
|
695
|
+
nogvl_ctx.errbuf = errbuf;
|
696
|
+
nogvl_ctx.sizeof_errbuf = sizeof(errbuf);
|
697
|
+
nogvl_ctx.serctx = serctx;
|
698
|
+
nogvl_ctx.cd = cd;
|
699
|
+
|
700
|
+
struct timespec t_serialize_start = mpp_gettime_monotonic();
|
701
|
+
|
702
|
+
if (ctx->yield_gvl) {
|
703
|
+
rb_thread_call_without_gvl(flush_nogvl, &nogvl_ctx, flush_nogvl_unblock, &nogvl_ctx);
|
704
|
+
} else {
|
705
|
+
flush_nogvl(&nogvl_ctx);
|
706
|
+
}
|
707
|
+
|
708
|
+
if (nogvl_ctx.r == -1) {
|
709
|
+
rb_raise(rb_eRuntimeError, "ruby_memprofiler_pprof: failed serialising samples: %s", nogvl_ctx.errbuf);
|
710
|
+
}
|
711
|
+
|
712
|
+
VALUE pprof_data = rb_str_new(nogvl_ctx.pprof_outbuf, nogvl_ctx.pprof_outbuf_len);
|
713
|
+
|
714
|
+
struct timespec t_end = mpp_gettime_monotonic();
|
715
|
+
|
716
|
+
VALUE profile_data = rb_class_new_instance(0, NULL, cd->cProfileData);
|
717
|
+
rb_funcall(profile_data, rb_intern("pprof_data="), 1, pprof_data);
|
718
|
+
rb_funcall(profile_data, rb_intern("heap_samples_count="), 1, SIZET2NUM(sample_ctx.actual_sample_count));
|
719
|
+
rb_funcall(profile_data, rb_intern("dropped_samples_heap_bufsize="), 1, SIZET2NUM(dropped_samples_bufsize));
|
720
|
+
rb_funcall(profile_data, rb_intern("flush_duration_nsecs="), 1, INT2NUM(mpp_time_delta_nsec(t_start, t_end)));
|
721
|
+
rb_funcall(profile_data, rb_intern("pprof_serialization_nsecs="), 1,
|
722
|
+
INT2NUM(mpp_time_delta_nsec(t_serialize_start, t_end)));
|
723
|
+
rb_funcall(profile_data, rb_intern("sample_add_nsecs="), 1, INT2NUM(mpp_time_delta_nsec(t_start, t_serialize_start)));
|
724
|
+
rb_funcall(profile_data, rb_intern("sample_add_without_gvl_nsecs="), 1, INT2NUM(sample_ctx.nogvl_duration));
|
725
|
+
rb_funcall(profile_data, rb_intern("gvl_proactive_yield_count="), 1, INT2NUM(sample_ctx.gvl_yield_count));
|
726
|
+
rb_funcall(profile_data, rb_intern("gvl_proactive_check_yield_count="), 1, INT2NUM(sample_ctx.gvl_check_yield_count));
|
727
|
+
|
728
|
+
return profile_data;
|
729
|
+
}
|
730
|
+
|
731
|
+
static void *flush_nogvl(void *ctxarg) {
|
732
|
+
struct flush_nogvl_ctx *ctx = (struct flush_nogvl_ctx *)ctxarg;
|
733
|
+
|
734
|
+
ctx->r = mpp_pprof_serctx_serialize(ctx->serctx, &ctx->pprof_outbuf, &ctx->pprof_outbuf_len, ctx->errbuf,
|
735
|
+
ctx->sizeof_errbuf);
|
736
|
+
return NULL;
|
737
|
+
}
|
738
|
+
|
739
|
+
static void flush_nogvl_unblock(void *ctxarg) {
|
740
|
+
struct flush_nogvl_ctx *ctx = (struct flush_nogvl_ctx *)ctxarg;
|
741
|
+
uint8_t one = 1;
|
742
|
+
__atomic_store(&ctx->serctx->interrupt, &one, __ATOMIC_SEQ_CST);
|
743
|
+
}
|
744
|
+
|
745
|
+
static VALUE collector_profile(VALUE self) {
|
746
|
+
rb_need_block();
|
747
|
+
|
748
|
+
rb_funcall(self, rb_intern("start!"), 0);
|
749
|
+
rb_yield_values(0);
|
750
|
+
VALUE profile_output = rb_funcall(self, rb_intern("flush"), 0);
|
751
|
+
rb_funcall(self, rb_intern("stop!"), 0);
|
752
|
+
|
753
|
+
return profile_output;
|
754
|
+
}
|
755
|
+
|
756
|
+
static VALUE collector_live_heap_samples_count(VALUE self) {
|
757
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
758
|
+
return SIZET2NUM(cd->heap_samples_count);
|
759
|
+
}
|
760
|
+
|
761
|
+
static VALUE collector_get_sample_rate(VALUE self) {
|
762
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
763
|
+
return DBL2NUM(((double)cd->u32_sample_rate) / UINT32_MAX);
|
764
|
+
}
|
765
|
+
|
766
|
+
static VALUE collector_set_sample_rate(VALUE self, VALUE newval) {
|
767
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
768
|
+
// Convert the double sample rate (between 0 and 1) to a value between 0 and UINT32_MAX
|
769
|
+
cd->u32_sample_rate = UINT32_MAX * NUM2DBL(newval);
|
770
|
+
return newval;
|
771
|
+
}
|
772
|
+
|
773
|
+
static VALUE collector_get_max_heap_samples(VALUE self) {
|
774
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
775
|
+
return SIZET2NUM(cd->max_heap_samples);
|
776
|
+
}
|
777
|
+
|
778
|
+
static VALUE collector_set_max_heap_samples(VALUE self, VALUE newval) {
|
779
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
780
|
+
cd->max_heap_samples = NUM2SIZET(newval);
|
781
|
+
return newval;
|
782
|
+
}
|
783
|
+
|
784
|
+
static VALUE collector_get_pretty_backtraces(VALUE self) {
|
785
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
786
|
+
return cd->pretty_backtraces ? Qtrue : Qfalse;
|
787
|
+
}
|
788
|
+
|
789
|
+
static VALUE collector_set_pretty_backtraces(VALUE self, VALUE newval) {
|
790
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
791
|
+
cd->pretty_backtraces = RTEST(newval);
|
792
|
+
return newval;
|
793
|
+
}
|
794
|
+
|
795
|
+
static VALUE collector_get_last_mark_nsecs(VALUE self) {
|
796
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
797
|
+
return INT2NUM(cd->last_gc_mark_ns);
|
798
|
+
}
|
799
|
+
|
800
|
+
static VALUE collector_get_mark_table_size(VALUE self) {
|
801
|
+
struct collector_cdata *cd = collector_cdata_get(self);
|
802
|
+
return UINT2NUM(cd->mark_table->num_entries);
|
803
|
+
}
|