mediapipe-nightly 0.10.21.post20241223__cp310-cp310-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mediapipe/__init__.py +26 -0
- mediapipe/calculators/__init__.py +0 -0
- mediapipe/calculators/audio/__init__.py +0 -0
- mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py +33 -0
- mediapipe/calculators/audio/rational_factor_resample_calculator_pb2.py +33 -0
- mediapipe/calculators/audio/spectrogram_calculator_pb2.py +37 -0
- mediapipe/calculators/audio/stabilized_log_calculator_pb2.py +31 -0
- mediapipe/calculators/audio/time_series_framer_calculator_pb2.py +33 -0
- mediapipe/calculators/core/__init__.py +0 -0
- mediapipe/calculators/core/bypass_calculator_pb2.py +31 -0
- mediapipe/calculators/core/clip_vector_size_calculator_pb2.py +31 -0
- mediapipe/calculators/core/concatenate_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/constant_side_packet_calculator_pb2.py +39 -0
- mediapipe/calculators/core/dequantize_byte_array_calculator_pb2.py +31 -0
- mediapipe/calculators/core/flow_limiter_calculator_pb2.py +32 -0
- mediapipe/calculators/core/gate_calculator_pb2.py +33 -0
- mediapipe/calculators/core/get_vector_item_calculator_pb2.py +31 -0
- mediapipe/calculators/core/graph_profile_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_cloner_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_resampler_calculator_pb2.py +33 -0
- mediapipe/calculators/core/packet_thinner_calculator_pb2.py +33 -0
- mediapipe/calculators/core/quantize_float_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/sequence_shift_calculator_pb2.py +31 -0
- mediapipe/calculators/core/split_vector_calculator_pb2.py +33 -0
- mediapipe/calculators/image/__init__.py +0 -0
- mediapipe/calculators/image/bilateral_filter_calculator_pb2.py +31 -0
- mediapipe/calculators/image/feature_detector_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_clone_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_cropping_calculator_pb2.py +33 -0
- mediapipe/calculators/image/image_transformation_calculator_pb2.py +38 -0
- mediapipe/calculators/image/mask_overlay_calculator_pb2.py +33 -0
- mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator_pb2.py +31 -0
- mediapipe/calculators/image/opencv_image_encoder_calculator_pb2.py +35 -0
- mediapipe/calculators/image/recolor_calculator_pb2.py +34 -0
- mediapipe/calculators/image/rotation_mode_pb2.py +29 -0
- mediapipe/calculators/image/scale_image_calculator_pb2.py +34 -0
- mediapipe/calculators/image/segmentation_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/image/set_alpha_calculator_pb2.py +31 -0
- mediapipe/calculators/image/warp_affine_calculator_pb2.py +36 -0
- mediapipe/calculators/internal/__init__.py +0 -0
- mediapipe/calculators/internal/callback_packet_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/__init__.py +0 -0
- mediapipe/calculators/tensor/audio_to_tensor_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/bert_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/feedback_tensors_calculator_pb2.py +37 -0
- mediapipe/calculators/tensor/image_to_tensor_calculator_pb2.py +40 -0
- mediapipe/calculators/tensor/inference_calculator_pb2.py +63 -0
- mediapipe/calculators/tensor/landmarks_to_tensor_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/regex_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensor_converter_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/tensor_to_joints_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensors_readback_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/tensors_to_audio_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_classification_calculator_pb2.py +44 -0
- mediapipe/calculators/tensor/tensors_to_detections_calculator_pb2.py +39 -0
- mediapipe/calculators/tensor/tensors_to_floats_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/vector_to_tensor_calculator_pb2.py +27 -0
- mediapipe/calculators/tflite/__init__.py +0 -0
- mediapipe/calculators/tflite/ssd_anchors_calculator_pb2.py +32 -0
- mediapipe/calculators/tflite/tflite_converter_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_inference_calculator_pb2.py +49 -0
- mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/__init__.py +0 -0
- mediapipe/calculators/util/align_hand_to_pose_in_world_calculator_pb2.py +31 -0
- mediapipe/calculators/util/annotation_overlay_calculator_pb2.py +32 -0
- mediapipe/calculators/util/association_calculator_pb2.py +31 -0
- mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py +31 -0
- mediapipe/calculators/util/combine_joints_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detection_label_id_to_text_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detections_to_rects_calculator_pb2.py +33 -0
- mediapipe/calculators/util/detections_to_render_data_calculator_pb2.py +33 -0
- mediapipe/calculators/util/face_to_rect_calculator_pb2.py +26 -0
- mediapipe/calculators/util/filter_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/util/flat_color_image_calculator_pb2.py +32 -0
- mediapipe/calculators/util/labels_to_render_data_calculator_pb2.py +34 -0
- mediapipe/calculators/util/landmark_projection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_refinement_calculator_pb2.py +41 -0
- mediapipe/calculators/util/landmarks_smoothing_calculator_pb2.py +33 -0
- mediapipe/calculators/util/landmarks_to_detection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_floats_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/landmarks_transformation_calculator_pb2.py +37 -0
- mediapipe/calculators/util/latency_pb2.py +26 -0
- mediapipe/calculators/util/local_file_contents_calculator_pb2.py +31 -0
- mediapipe/calculators/util/logic_calculator_pb2.py +34 -0
- mediapipe/calculators/util/non_max_suppression_calculator_pb2.py +35 -0
- mediapipe/calculators/util/packet_frequency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/packet_frequency_pb2.py +26 -0
- mediapipe/calculators/util/packet_latency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/rect_to_render_scale_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_transformation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/refine_landmarks_from_heatmap_calculator_pb2.py +31 -0
- mediapipe/calculators/util/resource_provider_calculator_pb2.py +28 -0
- mediapipe/calculators/util/set_joints_visibility_calculator_pb2.py +41 -0
- mediapipe/calculators/util/thresholding_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_id_to_label_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/top_k_scores_calculator_pb2.py +31 -0
- mediapipe/calculators/util/visibility_copy_calculator_pb2.py +27 -0
- mediapipe/calculators/util/visibility_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/video/__init__.py +0 -0
- mediapipe/calculators/video/box_detector_calculator_pb2.py +32 -0
- mediapipe/calculators/video/box_tracker_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_packager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_to_image_calculator_pb2.py +31 -0
- mediapipe/calculators/video/motion_analysis_calculator_pb2.py +42 -0
- mediapipe/calculators/video/opencv_video_encoder_calculator_pb2.py +31 -0
- mediapipe/calculators/video/tool/__init__.py +0 -0
- mediapipe/calculators/video/tool/flow_quantizer_model_pb2.py +26 -0
- mediapipe/calculators/video/tracked_detection_manager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/video_pre_stream_calculator_pb2.py +35 -0
- mediapipe/examples/__init__.py +14 -0
- mediapipe/examples/desktop/__init__.py +14 -0
- mediapipe/framework/__init__.py +0 -0
- mediapipe/framework/calculator_options_pb2.py +29 -0
- mediapipe/framework/calculator_pb2.py +59 -0
- mediapipe/framework/calculator_profile_pb2.py +48 -0
- mediapipe/framework/deps/__init__.py +0 -0
- mediapipe/framework/deps/proto_descriptor_pb2.py +29 -0
- mediapipe/framework/formats/__init__.py +0 -0
- mediapipe/framework/formats/affine_transform_data_pb2.py +28 -0
- mediapipe/framework/formats/annotation/__init__.py +0 -0
- mediapipe/framework/formats/annotation/locus_pb2.py +32 -0
- mediapipe/framework/formats/annotation/rasterization_pb2.py +29 -0
- mediapipe/framework/formats/body_rig_pb2.py +28 -0
- mediapipe/framework/formats/classification_pb2.py +31 -0
- mediapipe/framework/formats/detection_pb2.py +36 -0
- mediapipe/framework/formats/image_file_properties_pb2.py +26 -0
- mediapipe/framework/formats/image_format_pb2.py +29 -0
- mediapipe/framework/formats/landmark_pb2.py +37 -0
- mediapipe/framework/formats/location_data_pb2.py +38 -0
- mediapipe/framework/formats/matrix_data_pb2.py +31 -0
- mediapipe/framework/formats/motion/__init__.py +0 -0
- mediapipe/framework/formats/motion/optical_flow_field_data_pb2.py +30 -0
- mediapipe/framework/formats/object_detection/__init__.py +0 -0
- mediapipe/framework/formats/object_detection/anchor_pb2.py +26 -0
- mediapipe/framework/formats/rect_pb2.py +29 -0
- mediapipe/framework/formats/time_series_header_pb2.py +28 -0
- mediapipe/framework/graph_runtime_info_pb2.py +31 -0
- mediapipe/framework/mediapipe_options_pb2.py +27 -0
- mediapipe/framework/packet_factory_pb2.py +31 -0
- mediapipe/framework/packet_generator_pb2.py +33 -0
- mediapipe/framework/status_handler_pb2.py +28 -0
- mediapipe/framework/stream_handler/__init__.py +0 -0
- mediapipe/framework/stream_handler/default_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/fixed_size_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/sync_set_input_stream_handler_pb2.py +29 -0
- mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler_pb2.py +30 -0
- mediapipe/framework/test_calculators_pb2.py +31 -0
- mediapipe/framework/thread_pool_executor_pb2.py +29 -0
- mediapipe/framework/tool/__init__.py +0 -0
- mediapipe/framework/tool/calculator_graph_template_pb2.py +44 -0
- mediapipe/framework/tool/field_data_pb2.py +28 -0
- mediapipe/framework/tool/node_chain_subgraph_pb2.py +31 -0
- mediapipe/framework/tool/packet_generator_wrapper_calculator_pb2.py +28 -0
- mediapipe/framework/tool/source_pb2.py +33 -0
- mediapipe/framework/tool/switch_container_pb2.py +32 -0
- mediapipe/gpu/__init__.py +0 -0
- mediapipe/gpu/copy_calculator_pb2.py +33 -0
- mediapipe/gpu/gl_animation_overlay_calculator_pb2.py +31 -0
- mediapipe/gpu/gl_context_options_pb2.py +31 -0
- mediapipe/gpu/gl_scaler_calculator_pb2.py +32 -0
- mediapipe/gpu/gl_surface_sink_calculator_pb2.py +32 -0
- mediapipe/gpu/gpu_origin_pb2.py +29 -0
- mediapipe/gpu/scale_mode_pb2.py +28 -0
- mediapipe/model_maker/__init__.py +27 -0
- mediapipe/model_maker/setup.py +107 -0
- mediapipe/modules/__init__.py +0 -0
- mediapipe/modules/face_detection/__init__.py +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_cpu.binarypb +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_sparse.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_pb2.py +30 -0
- mediapipe/modules/face_detection/face_detection_short_range.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_short_range_cpu.binarypb +0 -0
- mediapipe/modules/face_geometry/__init__.py +0 -0
- mediapipe/modules/face_geometry/data/__init__.py +0 -0
- mediapipe/modules/face_geometry/effect_renderer_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/env_generator_calculator_pb2.py +28 -0
- mediapipe/modules/face_geometry/geometry_pipeline_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/libs/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/environment_pb2.py +31 -0
- mediapipe/modules/face_geometry/protos/face_geometry_pb2.py +29 -0
- mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/modules/face_geometry/protos/mesh_3d_pb2.py +31 -0
- mediapipe/modules/face_landmark/__init__.py +0 -0
- mediapipe/modules/face_landmark/face_landmark.tflite +0 -0
- mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb +0 -0
- mediapipe/modules/face_landmark/face_landmark_with_attention.tflite +0 -0
- mediapipe/modules/hand_landmark/__init__.py +0 -0
- mediapipe/modules/hand_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_full.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_lite.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb +0 -0
- mediapipe/modules/hand_landmark/handedness.txt +2 -0
- mediapipe/modules/holistic_landmark/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator_pb2.py +37 -0
- mediapipe/modules/holistic_landmark/hand_recrop.tflite +0 -0
- mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb +0 -0
- mediapipe/modules/iris_landmark/__init__.py +0 -0
- mediapipe/modules/iris_landmark/iris_landmark.tflite +0 -0
- mediapipe/modules/objectron/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/a_r_capture_metadata_pb2.py +102 -0
- mediapipe/modules/objectron/calculators/annotation_data_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/belief_decoder_config_pb2.py +28 -0
- mediapipe/modules/objectron/calculators/camera_parameters_pb2.py +30 -0
- mediapipe/modules/objectron/calculators/filter_detection_calculator_pb2.py +35 -0
- mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/object_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/object_detection_oidv4_labelmap.txt +24 -0
- mediapipe/modules/objectron/objectron_cpu.binarypb +0 -0
- mediapipe/modules/palm_detection/__init__.py +0 -0
- mediapipe/modules/palm_detection/palm_detection_full.tflite +0 -0
- mediapipe/modules/palm_detection/palm_detection_lite.tflite +0 -0
- mediapipe/modules/pose_detection/__init__.py +0 -0
- mediapipe/modules/pose_detection/pose_detection.tflite +0 -0
- mediapipe/modules/pose_landmark/__init__.py +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_full.tflite +0 -0
- mediapipe/modules/selfie_segmentation/__init__.py +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation.tflite +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_cpu.binarypb +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_landscape.tflite +0 -0
- mediapipe/python/__init__.py +29 -0
- mediapipe/python/_framework_bindings.cpython-310-x86_64-linux-gnu.so +0 -0
- mediapipe/python/calculator_graph_test.py +251 -0
- mediapipe/python/image_frame_test.py +194 -0
- mediapipe/python/image_test.py +218 -0
- mediapipe/python/packet_creator.py +275 -0
- mediapipe/python/packet_getter.py +120 -0
- mediapipe/python/packet_test.py +533 -0
- mediapipe/python/solution_base.py +604 -0
- mediapipe/python/solution_base_test.py +396 -0
- mediapipe/python/solutions/__init__.py +27 -0
- mediapipe/python/solutions/download_utils.py +37 -0
- mediapipe/python/solutions/drawing_styles.py +249 -0
- mediapipe/python/solutions/drawing_utils.py +320 -0
- mediapipe/python/solutions/drawing_utils_test.py +258 -0
- mediapipe/python/solutions/face_detection.py +105 -0
- mediapipe/python/solutions/face_detection_test.py +92 -0
- mediapipe/python/solutions/face_mesh.py +125 -0
- mediapipe/python/solutions/face_mesh_connections.py +500 -0
- mediapipe/python/solutions/face_mesh_test.py +170 -0
- mediapipe/python/solutions/hands.py +153 -0
- mediapipe/python/solutions/hands_connections.py +32 -0
- mediapipe/python/solutions/hands_test.py +219 -0
- mediapipe/python/solutions/holistic.py +167 -0
- mediapipe/python/solutions/holistic_test.py +142 -0
- mediapipe/python/solutions/objectron.py +288 -0
- mediapipe/python/solutions/objectron_test.py +81 -0
- mediapipe/python/solutions/pose.py +192 -0
- mediapipe/python/solutions/pose_connections.py +22 -0
- mediapipe/python/solutions/pose_test.py +262 -0
- mediapipe/python/solutions/selfie_segmentation.py +76 -0
- mediapipe/python/solutions/selfie_segmentation_test.py +68 -0
- mediapipe/python/timestamp_test.py +78 -0
- mediapipe/tasks/__init__.py +14 -0
- mediapipe/tasks/cc/__init__.py +0 -0
- mediapipe/tasks/cc/audio/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/audio_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/audio_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/audio_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/core/__init__.py +0 -0
- mediapipe/tasks/cc/audio/utils/__init__.py +0 -0
- mediapipe/tasks/cc/components/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/classification_aggregation_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/components/calculators/score_calibration_calculator_pb2.py +35 -0
- mediapipe/tasks/cc/components/calculators/tensors_to_embeddings_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/components/containers/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/classifications_pb2.py +30 -0
- mediapipe/tasks/cc/components/containers/proto/embeddings_pb2.py +35 -0
- mediapipe/tasks/cc/components/containers/proto/landmarks_detection_result_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/classification_postprocessing_graph_options_pb2.py +38 -0
- mediapipe/tasks/cc/components/processors/proto/classifier_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/detection_postprocessing_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/components/processors/proto/detector_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedder_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedding_postprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/proto/image_preprocessing_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/components/processors/proto/text_model_type_pb2.py +28 -0
- mediapipe/tasks/cc/components/processors/proto/text_preprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/utils/__init__.py +0 -0
- mediapipe/tasks/cc/core/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/acceleration_pb2.py +28 -0
- mediapipe/tasks/cc/core/proto/base_options_pb2.py +30 -0
- mediapipe/tasks/cc/core/proto/external_file_pb2.py +31 -0
- mediapipe/tasks/cc/core/proto/inference_subgraph_pb2.py +32 -0
- mediapipe/tasks/cc/core/proto/model_resources_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/c/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/calculators/model_data_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/common/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_params_pb2.py +33 -0
- mediapipe/tasks/cc/genai/inference/proto/prompt_template_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/proto/sampler_params_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/proto/transformer_params_pb2.py +45 -0
- mediapipe/tasks/cc/genai/inference/utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/llm_utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/xnn_utils/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/_pywrap_metadata_version.cpython-310-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/cc/metadata/tests/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/ragged/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/testdata/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/hash/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/text_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/text_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/text_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/tokenizers/__init__.py +0 -0
- mediapipe/tasks/cc/text/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/__init__.py +0 -0
- mediapipe/tasks/cc/vision/core/__init__.py +0 -0
- mediapipe/tasks/cc/vision/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_geometry/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/env_generator_calculator_pb2.py +28 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/geometry_pipeline_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/data/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/libs/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/environment_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_graph_options_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/mesh_3d_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options_pb2.py +37 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/tensors_to_face_landmarks_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_stylizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/tensors_to_image_calculator_pb2.py +36 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/face_stylizer_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/combined_prediction_calculator_pb2.py +33 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_embedder_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/hand_gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_result_pb2.py +30 -0
- mediapipe/tasks/cc/vision/hand_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_roi_refinement_graph_options_pb2.py +28 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result_pb2.py +29 -0
- mediapipe/tasks/cc/vision/image_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/image_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/image_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_generator/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/stable_diffusion_iterate_calculator_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/proto/conditioned_image_graph_options_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/control_plugin_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_generator/proto/image_generator_graph_options_pb2.py +30 -0
- mediapipe/tasks/cc/vision/image_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/image_segmenter_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/segmenter_options_pb2.py +33 -0
- mediapipe/tasks/cc/vision/interactive_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/object_detector_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/pose_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/utils/ghum/__init__.py +0 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema.fbs +59 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema_py_generated.py +108 -0
- mediapipe/tasks/metadata/metadata_schema.fbs +732 -0
- mediapipe/tasks/metadata/metadata_schema_py_generated.py +3251 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema.fbs +98 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema_py_generated.py +674 -0
- mediapipe/tasks/metadata/schema_py_generated.py +18438 -0
- mediapipe/tasks/python/__init__.py +27 -0
- mediapipe/tasks/python/audio/__init__.py +33 -0
- mediapipe/tasks/python/audio/audio_classifier.py +324 -0
- mediapipe/tasks/python/audio/audio_embedder.py +285 -0
- mediapipe/tasks/python/audio/core/__init__.py +16 -0
- mediapipe/tasks/python/audio/core/audio_record.py +125 -0
- mediapipe/tasks/python/audio/core/audio_task_running_mode.py +29 -0
- mediapipe/tasks/python/audio/core/base_audio_task_api.py +181 -0
- mediapipe/tasks/python/benchmark/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/benchmark_utils.py +70 -0
- mediapipe/tasks/python/benchmark/vision/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/vision/benchmark.py +99 -0
- mediapipe/tasks/python/benchmark/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py +40 -0
- mediapipe/tasks/python/components/__init__.py +13 -0
- mediapipe/tasks/python/components/containers/__init__.py +53 -0
- mediapipe/tasks/python/components/containers/audio_data.py +137 -0
- mediapipe/tasks/python/components/containers/bounding_box.py +73 -0
- mediapipe/tasks/python/components/containers/category.py +78 -0
- mediapipe/tasks/python/components/containers/classification_result.py +111 -0
- mediapipe/tasks/python/components/containers/detections.py +181 -0
- mediapipe/tasks/python/components/containers/embedding_result.py +89 -0
- mediapipe/tasks/python/components/containers/keypoint.py +77 -0
- mediapipe/tasks/python/components/containers/landmark.py +122 -0
- mediapipe/tasks/python/components/containers/landmark_detection_result.py +106 -0
- mediapipe/tasks/python/components/containers/rect.py +109 -0
- mediapipe/tasks/python/components/processors/__init__.py +23 -0
- mediapipe/tasks/python/components/processors/classifier_options.py +86 -0
- mediapipe/tasks/python/components/utils/__init__.py +13 -0
- mediapipe/tasks/python/components/utils/cosine_similarity.py +68 -0
- mediapipe/tasks/python/core/__init__.py +13 -0
- mediapipe/tasks/python/core/base_options.py +121 -0
- mediapipe/tasks/python/core/optional_dependencies.py +25 -0
- mediapipe/tasks/python/core/task_info.py +139 -0
- mediapipe/tasks/python/genai/__init__.py +14 -0
- mediapipe/tasks/python/genai/bundler/__init__.py +23 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler.py +130 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler_test.py +168 -0
- mediapipe/tasks/python/genai/converter/__init__.py +24 -0
- mediapipe/tasks/python/genai/converter/converter_base.py +179 -0
- mediapipe/tasks/python/genai/converter/converter_factory.py +79 -0
- mediapipe/tasks/python/genai/converter/llm_converter.py +374 -0
- mediapipe/tasks/python/genai/converter/llm_converter_test.py +63 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter.py +318 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter_test.py +86 -0
- mediapipe/tasks/python/genai/converter/quantization_util.py +516 -0
- mediapipe/tasks/python/genai/converter/quantization_util_test.py +259 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter.py +580 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter_test.py +83 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer.py +120 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer_test.py +95 -0
- mediapipe/tasks/python/metadata/__init__.py +13 -0
- mediapipe/tasks/python/metadata/flatbuffers_lib/_pywrap_flatbuffers.cpython-310-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/python/metadata/metadata.py +928 -0
- mediapipe/tasks/python/metadata/metadata_displayer_cli.py +34 -0
- mediapipe/tasks/python/metadata/metadata_writers/__init__.py +13 -0
- mediapipe/tasks/python/metadata/metadata_writers/face_stylizer.py +138 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_classifier.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py +170 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py +1166 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_writer.py +845 -0
- mediapipe/tasks/python/metadata/metadata_writers/model_asset_bundle_utils.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/object_detector.py +331 -0
- mediapipe/tasks/python/metadata/metadata_writers/text_classifier.py +119 -0
- mediapipe/tasks/python/metadata/metadata_writers/writer_utils.py +91 -0
- mediapipe/tasks/python/test/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/audio_classifier_test.py +387 -0
- mediapipe/tasks/python/test/audio/audio_embedder_test.py +297 -0
- mediapipe/tasks/python/test/test_utils.py +196 -0
- mediapipe/tasks/python/test/text/__init__.py +13 -0
- mediapipe/tasks/python/test/text/language_detector_test.py +228 -0
- mediapipe/tasks/python/test/text/text_classifier_test.py +235 -0
- mediapipe/tasks/python/test/text/text_embedder_test.py +326 -0
- mediapipe/tasks/python/test/vision/__init__.py +13 -0
- mediapipe/tasks/python/test/vision/face_aligner_test.py +190 -0
- mediapipe/tasks/python/test/vision/face_detector_test.py +523 -0
- mediapipe/tasks/python/test/vision/face_landmarker_test.py +565 -0
- mediapipe/tasks/python/test/vision/face_stylizer_test.py +191 -0
- mediapipe/tasks/python/test/vision/hand_landmarker_test.py +437 -0
- mediapipe/tasks/python/test/vision/holistic_landmarker_test.py +544 -0
- mediapipe/tasks/python/test/vision/image_classifier_test.py +657 -0
- mediapipe/tasks/python/test/vision/image_embedder_test.py +423 -0
- mediapipe/tasks/python/test/vision/image_segmenter_test.py +512 -0
- mediapipe/tasks/python/test/vision/interactive_segmenter_test.py +341 -0
- mediapipe/tasks/python/test/vision/object_detector_test.py +493 -0
- mediapipe/tasks/python/test/vision/pose_landmarker_test.py +518 -0
- mediapipe/tasks/python/text/__init__.py +35 -0
- mediapipe/tasks/python/text/core/__init__.py +16 -0
- mediapipe/tasks/python/text/core/base_text_task_api.py +54 -0
- mediapipe/tasks/python/text/language_detector.py +220 -0
- mediapipe/tasks/python/text/text_classifier.py +187 -0
- mediapipe/tasks/python/text/text_embedder.py +188 -0
- mediapipe/tasks/python/vision/__init__.py +90 -0
- mediapipe/tasks/python/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/vision/core/base_vision_task_api.py +226 -0
- mediapipe/tasks/python/vision/core/image_processing_options.py +39 -0
- mediapipe/tasks/python/vision/core/vision_task_running_mode.py +31 -0
- mediapipe/tasks/python/vision/face_aligner.py +158 -0
- mediapipe/tasks/python/vision/face_detector.py +332 -0
- mediapipe/tasks/python/vision/face_landmarker.py +3244 -0
- mediapipe/tasks/python/vision/face_stylizer.py +158 -0
- mediapipe/tasks/python/vision/gesture_recognizer.py +480 -0
- mediapipe/tasks/python/vision/hand_landmarker.py +504 -0
- mediapipe/tasks/python/vision/holistic_landmarker.py +576 -0
- mediapipe/tasks/python/vision/image_classifier.py +358 -0
- mediapipe/tasks/python/vision/image_embedder.py +362 -0
- mediapipe/tasks/python/vision/image_segmenter.py +433 -0
- mediapipe/tasks/python/vision/interactive_segmenter.py +285 -0
- mediapipe/tasks/python/vision/object_detector.py +389 -0
- mediapipe/tasks/python/vision/pose_landmarker.py +455 -0
- mediapipe/util/__init__.py +0 -0
- mediapipe/util/analytics/__init__.py +0 -0
- mediapipe/util/analytics/mediapipe_log_extension_pb2.py +44 -0
- mediapipe/util/analytics/mediapipe_logging_enums_pb2.py +37 -0
- mediapipe/util/audio_decoder_pb2.py +33 -0
- mediapipe/util/color_pb2.py +33 -0
- mediapipe/util/label_map_pb2.py +27 -0
- mediapipe/util/render_data_pb2.py +58 -0
- mediapipe/util/sequence/__init__.py +14 -0
- mediapipe/util/sequence/media_sequence.py +716 -0
- mediapipe/util/sequence/media_sequence_test.py +290 -0
- mediapipe/util/sequence/media_sequence_util.py +800 -0
- mediapipe/util/sequence/media_sequence_util_test.py +389 -0
- mediapipe/util/tracking/__init__.py +0 -0
- mediapipe/util/tracking/box_detector_pb2.py +39 -0
- mediapipe/util/tracking/box_tracker_pb2.py +32 -0
- mediapipe/util/tracking/camera_motion_pb2.py +31 -0
- mediapipe/util/tracking/flow_packager_pb2.py +60 -0
- mediapipe/util/tracking/frame_selection_pb2.py +35 -0
- mediapipe/util/tracking/frame_selection_solution_evaluator_pb2.py +28 -0
- mediapipe/util/tracking/motion_analysis_pb2.py +35 -0
- mediapipe/util/tracking/motion_estimation_pb2.py +66 -0
- mediapipe/util/tracking/motion_models_pb2.py +42 -0
- mediapipe/util/tracking/motion_saliency_pb2.py +26 -0
- mediapipe/util/tracking/push_pull_filtering_pb2.py +26 -0
- mediapipe/util/tracking/region_flow_computation_pb2.py +59 -0
- mediapipe/util/tracking/region_flow_pb2.py +49 -0
- mediapipe/util/tracking/tone_estimation_pb2.py +45 -0
- mediapipe/util/tracking/tone_models_pb2.py +32 -0
- mediapipe/util/tracking/tracked_detection_manager_config_pb2.py +26 -0
- mediapipe/util/tracking/tracking_pb2.py +73 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/LICENSE +218 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/METADATA +199 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/RECORD +593 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/WHEEL +5 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/top_level.txt +4 -0
- mediapipe_nightly.libs/libEGL-48f73270.so.1.1.0 +0 -0
- mediapipe_nightly.libs/libGLESv2-ed5eda4f.so.2.1.0 +0 -0
- mediapipe_nightly.libs/libGLdispatch-64b28464.so.0.0.0 +0 -0
@@ -0,0 +1,192 @@
|
|
1
|
+
# Copyright 2020-2021 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""MediaPipe Pose."""
|
16
|
+
|
17
|
+
import enum
|
18
|
+
from typing import NamedTuple
|
19
|
+
|
20
|
+
import numpy as np
|
21
|
+
|
22
|
+
# The following imports are needed because python pb2 silently discards
|
23
|
+
# unknown protobuf fields.
|
24
|
+
# pylint: disable=unused-import
|
25
|
+
from mediapipe.calculators.core import constant_side_packet_calculator_pb2
|
26
|
+
from mediapipe.calculators.core import gate_calculator_pb2
|
27
|
+
from mediapipe.calculators.core import split_vector_calculator_pb2
|
28
|
+
from mediapipe.calculators.image import warp_affine_calculator_pb2
|
29
|
+
from mediapipe.calculators.tensor import image_to_tensor_calculator_pb2
|
30
|
+
from mediapipe.calculators.tensor import inference_calculator_pb2
|
31
|
+
from mediapipe.calculators.tensor import tensors_to_classification_calculator_pb2
|
32
|
+
from mediapipe.calculators.tensor import tensors_to_detections_calculator_pb2
|
33
|
+
from mediapipe.calculators.tensor import tensors_to_landmarks_calculator_pb2
|
34
|
+
from mediapipe.calculators.tensor import tensors_to_segmentation_calculator_pb2
|
35
|
+
from mediapipe.calculators.tflite import ssd_anchors_calculator_pb2
|
36
|
+
from mediapipe.calculators.util import detections_to_rects_calculator_pb2
|
37
|
+
from mediapipe.calculators.util import landmarks_smoothing_calculator_pb2
|
38
|
+
from mediapipe.calculators.util import local_file_contents_calculator_pb2
|
39
|
+
from mediapipe.calculators.util import logic_calculator_pb2
|
40
|
+
from mediapipe.calculators.util import non_max_suppression_calculator_pb2
|
41
|
+
from mediapipe.calculators.util import rect_transformation_calculator_pb2
|
42
|
+
from mediapipe.calculators.util import thresholding_calculator_pb2
|
43
|
+
from mediapipe.calculators.util import visibility_smoothing_calculator_pb2
|
44
|
+
from mediapipe.framework.tool import switch_container_pb2
|
45
|
+
# pylint: enable=unused-import
|
46
|
+
from mediapipe.python.solution_base import SolutionBase
|
47
|
+
from mediapipe.python.solutions import download_utils
|
48
|
+
# pylint: disable=unused-import
|
49
|
+
from mediapipe.python.solutions.pose_connections import POSE_CONNECTIONS
|
50
|
+
# pylint: enable=unused-import
|
51
|
+
|
52
|
+
|
53
|
+
class PoseLandmark(enum.IntEnum):
|
54
|
+
"""The 33 pose landmarks."""
|
55
|
+
NOSE = 0
|
56
|
+
LEFT_EYE_INNER = 1
|
57
|
+
LEFT_EYE = 2
|
58
|
+
LEFT_EYE_OUTER = 3
|
59
|
+
RIGHT_EYE_INNER = 4
|
60
|
+
RIGHT_EYE = 5
|
61
|
+
RIGHT_EYE_OUTER = 6
|
62
|
+
LEFT_EAR = 7
|
63
|
+
RIGHT_EAR = 8
|
64
|
+
MOUTH_LEFT = 9
|
65
|
+
MOUTH_RIGHT = 10
|
66
|
+
LEFT_SHOULDER = 11
|
67
|
+
RIGHT_SHOULDER = 12
|
68
|
+
LEFT_ELBOW = 13
|
69
|
+
RIGHT_ELBOW = 14
|
70
|
+
LEFT_WRIST = 15
|
71
|
+
RIGHT_WRIST = 16
|
72
|
+
LEFT_PINKY = 17
|
73
|
+
RIGHT_PINKY = 18
|
74
|
+
LEFT_INDEX = 19
|
75
|
+
RIGHT_INDEX = 20
|
76
|
+
LEFT_THUMB = 21
|
77
|
+
RIGHT_THUMB = 22
|
78
|
+
LEFT_HIP = 23
|
79
|
+
RIGHT_HIP = 24
|
80
|
+
LEFT_KNEE = 25
|
81
|
+
RIGHT_KNEE = 26
|
82
|
+
LEFT_ANKLE = 27
|
83
|
+
RIGHT_ANKLE = 28
|
84
|
+
LEFT_HEEL = 29
|
85
|
+
RIGHT_HEEL = 30
|
86
|
+
LEFT_FOOT_INDEX = 31
|
87
|
+
RIGHT_FOOT_INDEX = 32
|
88
|
+
|
89
|
+
|
90
|
+
_BINARYPB_FILE_PATH = 'mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb'
|
91
|
+
|
92
|
+
|
93
|
+
def _download_oss_pose_landmark_model(model_complexity):
|
94
|
+
"""Downloads the pose landmark lite/heavy model from the MediaPipe Github repo if it doesn't exist in the package."""
|
95
|
+
|
96
|
+
if model_complexity == 0:
|
97
|
+
download_utils.download_oss_model(
|
98
|
+
'mediapipe/modules/pose_landmark/pose_landmark_lite.tflite')
|
99
|
+
elif model_complexity == 2:
|
100
|
+
download_utils.download_oss_model(
|
101
|
+
'mediapipe/modules/pose_landmark/pose_landmark_heavy.tflite')
|
102
|
+
|
103
|
+
|
104
|
+
class Pose(SolutionBase):
|
105
|
+
"""MediaPipe Pose.
|
106
|
+
|
107
|
+
MediaPipe Pose processes an RGB image and returns pose landmarks on the most
|
108
|
+
prominent person detected.
|
109
|
+
|
110
|
+
Please refer to https://solutions.mediapipe.dev/pose#python-solution-api for
|
111
|
+
usage examples.
|
112
|
+
"""
|
113
|
+
|
114
|
+
def __init__(self,
|
115
|
+
static_image_mode=False,
|
116
|
+
model_complexity=1,
|
117
|
+
smooth_landmarks=True,
|
118
|
+
enable_segmentation=False,
|
119
|
+
smooth_segmentation=True,
|
120
|
+
min_detection_confidence=0.5,
|
121
|
+
min_tracking_confidence=0.5):
|
122
|
+
"""Initializes a MediaPipe Pose object.
|
123
|
+
|
124
|
+
Args:
|
125
|
+
static_image_mode: Whether to treat the input images as a batch of static
|
126
|
+
and possibly unrelated images, or a video stream. See details in
|
127
|
+
https://solutions.mediapipe.dev/pose#static_image_mode.
|
128
|
+
model_complexity: Complexity of the pose landmark model: 0, 1 or 2. See
|
129
|
+
details in https://solutions.mediapipe.dev/pose#model_complexity.
|
130
|
+
smooth_landmarks: Whether to filter landmarks across different input
|
131
|
+
images to reduce jitter. See details in
|
132
|
+
https://solutions.mediapipe.dev/pose#smooth_landmarks.
|
133
|
+
enable_segmentation: Whether to predict segmentation mask. See details in
|
134
|
+
https://solutions.mediapipe.dev/pose#enable_segmentation.
|
135
|
+
smooth_segmentation: Whether to filter segmentation across different input
|
136
|
+
images to reduce jitter. See details in
|
137
|
+
https://solutions.mediapipe.dev/pose#smooth_segmentation.
|
138
|
+
min_detection_confidence: Minimum confidence value ([0.0, 1.0]) for person
|
139
|
+
detection to be considered successful. See details in
|
140
|
+
https://solutions.mediapipe.dev/pose#min_detection_confidence.
|
141
|
+
min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) for the
|
142
|
+
pose landmarks to be considered tracked successfully. See details in
|
143
|
+
https://solutions.mediapipe.dev/pose#min_tracking_confidence.
|
144
|
+
"""
|
145
|
+
_download_oss_pose_landmark_model(model_complexity)
|
146
|
+
super().__init__(
|
147
|
+
binary_graph_path=_BINARYPB_FILE_PATH,
|
148
|
+
side_inputs={
|
149
|
+
'model_complexity': model_complexity,
|
150
|
+
'smooth_landmarks': smooth_landmarks and not static_image_mode,
|
151
|
+
'enable_segmentation': enable_segmentation,
|
152
|
+
'smooth_segmentation':
|
153
|
+
smooth_segmentation and not static_image_mode,
|
154
|
+
'use_prev_landmarks': not static_image_mode,
|
155
|
+
},
|
156
|
+
calculator_params={
|
157
|
+
'posedetectioncpu__TensorsToDetectionsCalculator.min_score_thresh':
|
158
|
+
min_detection_confidence,
|
159
|
+
'poselandmarkbyroicpu__tensorstoposelandmarksandsegmentation__ThresholdingCalculator.threshold':
|
160
|
+
min_tracking_confidence,
|
161
|
+
},
|
162
|
+
outputs=['pose_landmarks', 'pose_world_landmarks', 'segmentation_mask'])
|
163
|
+
|
164
|
+
def process(self, image: np.ndarray) -> NamedTuple:
|
165
|
+
"""Processes an RGB image and returns the pose landmarks on the most prominent person detected.
|
166
|
+
|
167
|
+
Args:
|
168
|
+
image: An RGB image represented as a numpy ndarray.
|
169
|
+
|
170
|
+
Raises:
|
171
|
+
RuntimeError: If the underlying graph throws any error.
|
172
|
+
ValueError: If the input image is not three channel RGB.
|
173
|
+
|
174
|
+
Returns:
|
175
|
+
A NamedTuple with fields describing the landmarks on the most prominate
|
176
|
+
person detected:
|
177
|
+
1) "pose_landmarks" field that contains the pose landmarks.
|
178
|
+
2) "pose_world_landmarks" field that contains the pose landmarks in
|
179
|
+
real-world 3D coordinates that are in meters with the origin at the
|
180
|
+
center between hips.
|
181
|
+
3) "segmentation_mask" field that contains the segmentation mask if
|
182
|
+
"enable_segmentation" is set to true.
|
183
|
+
"""
|
184
|
+
|
185
|
+
results = super().process(input_data={'image': image})
|
186
|
+
if results.pose_landmarks: # pytype: disable=attribute-error
|
187
|
+
for landmark in results.pose_landmarks.landmark: # pytype: disable=attribute-error
|
188
|
+
landmark.ClearField('presence')
|
189
|
+
if results.pose_world_landmarks: # pytype: disable=attribute-error
|
190
|
+
for landmark in results.pose_world_landmarks.landmark: # pytype: disable=attribute-error
|
191
|
+
landmark.ClearField('presence')
|
192
|
+
return results
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# Copyright 2021 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""MediaPipe Pose connections."""
|
15
|
+
|
16
|
+
POSE_CONNECTIONS = frozenset([(0, 1), (1, 2), (2, 3), (3, 7), (0, 4), (4, 5),
|
17
|
+
(5, 6), (6, 8), (9, 10), (11, 12), (11, 13),
|
18
|
+
(13, 15), (15, 17), (15, 19), (15, 21), (17, 19),
|
19
|
+
(12, 14), (14, 16), (16, 18), (16, 20), (16, 22),
|
20
|
+
(18, 20), (11, 23), (12, 24), (23, 24), (23, 25),
|
21
|
+
(24, 26), (25, 27), (26, 28), (27, 29), (28, 30),
|
22
|
+
(29, 31), (30, 32), (27, 31), (28, 32)])
|
@@ -0,0 +1,262 @@
|
|
1
|
+
# Copyright 2020 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""Tests for mediapipe.python.solutions.pose."""
|
15
|
+
|
16
|
+
import json
|
17
|
+
import os
|
18
|
+
# pylint: disable=unused-import
|
19
|
+
import tempfile
|
20
|
+
# pylint: enable=unused-import
|
21
|
+
from typing import NamedTuple
|
22
|
+
|
23
|
+
from absl.testing import absltest
|
24
|
+
from absl.testing import parameterized
|
25
|
+
import cv2
|
26
|
+
import numpy as np
|
27
|
+
import numpy.testing as npt
|
28
|
+
from PIL import Image
|
29
|
+
|
30
|
+
# resources dependency
|
31
|
+
# undeclared dependency
|
32
|
+
from mediapipe.python.solutions import drawing_styles
|
33
|
+
from mediapipe.python.solutions import drawing_utils as mp_drawing
|
34
|
+
from mediapipe.python.solutions import pose as mp_pose
|
35
|
+
|
36
|
+
TEST_IMAGE_PATH = 'mediapipe/python/solutions/testdata'
|
37
|
+
DIFF_THRESHOLD = 15 # pixels
|
38
|
+
EXPECTED_POSE_LANDMARKS = np.array([[460, 283], [467, 273], [471, 273],
|
39
|
+
[474, 273], [465, 273], [465, 273],
|
40
|
+
[466, 273], [491, 277], [480, 277],
|
41
|
+
[470, 294], [465, 294], [545, 319],
|
42
|
+
[453, 329], [622, 323], [375, 316],
|
43
|
+
[696, 316], [299, 307], [719, 316],
|
44
|
+
[278, 306], [721, 311], [274, 304],
|
45
|
+
[713, 313], [283, 306], [520, 476],
|
46
|
+
[467, 471], [612, 550], [358, 490],
|
47
|
+
[701, 613], [349, 611], [709, 624],
|
48
|
+
[363, 630], [730, 633], [303, 628]])
|
49
|
+
WORLD_DIFF_THRESHOLD = 0.2 # meters
|
50
|
+
EXPECTED_POSE_WORLD_LANDMARKS = np.array([
|
51
|
+
[-0.11, -0.59, -0.15], [-0.09, -0.64, -0.16], [-0.09, -0.64, -0.16],
|
52
|
+
[-0.09, -0.64, -0.16], [-0.11, -0.64, -0.14], [-0.11, -0.64, -0.14],
|
53
|
+
[-0.11, -0.64, -0.14], [0.01, -0.65, -0.15], [-0.06, -0.64, -0.05],
|
54
|
+
[-0.07, -0.57, -0.15], [-0.09, -0.57, -0.12], [0.18, -0.49, -0.09],
|
55
|
+
[-0.14, -0.5, -0.03], [0.41, -0.48, -0.11], [-0.42, -0.5, -0.02],
|
56
|
+
[0.64, -0.49, -0.17], [-0.63, -0.51, -0.13], [0.7, -0.5, -0.19],
|
57
|
+
[-0.71, -0.53, -0.15], [0.72, -0.51, -0.23], [-0.69, -0.54, -0.19],
|
58
|
+
[0.66, -0.49, -0.19], [-0.64, -0.52, -0.15], [0.09, 0., -0.04],
|
59
|
+
[-0.09, -0., 0.03], [0.41, 0.23, -0.09], [-0.43, 0.1, -0.11],
|
60
|
+
[0.69, 0.49, -0.04], [-0.48, 0.47, -0.02], [0.72, 0.52, -0.04],
|
61
|
+
[-0.48, 0.51, -0.02], [0.8, 0.5, -0.14], [-0.59, 0.52, -0.11],
|
62
|
+
])
|
63
|
+
IOU_THRESHOLD = 0.85 # percents
|
64
|
+
|
65
|
+
|
66
|
+
class PoseTest(parameterized.TestCase):
|
67
|
+
|
68
|
+
def _landmarks_list_to_array(self, landmark_list, image_shape):
|
69
|
+
rows, cols, _ = image_shape
|
70
|
+
return np.asarray([(lmk.x * cols, lmk.y * rows, lmk.z * cols)
|
71
|
+
for lmk in landmark_list.landmark])
|
72
|
+
|
73
|
+
def _world_landmarks_list_to_array(self, landmark_list):
|
74
|
+
return np.asarray([(lmk.x, lmk.y, lmk.z)
|
75
|
+
for lmk in landmark_list.landmark])
|
76
|
+
|
77
|
+
def _assert_diff_less(self, array1, array2, threshold):
|
78
|
+
npt.assert_array_less(np.abs(array1 - array2), threshold)
|
79
|
+
|
80
|
+
def _get_output_path(self, name):
|
81
|
+
return os.path.join(tempfile.gettempdir(), self.id().split('.')[-1] + name)
|
82
|
+
|
83
|
+
def _annotate(self, frame: np.ndarray, results: NamedTuple, idx: int):
|
84
|
+
mp_drawing.draw_landmarks(
|
85
|
+
frame,
|
86
|
+
results.pose_landmarks,
|
87
|
+
mp_pose.POSE_CONNECTIONS,
|
88
|
+
landmark_drawing_spec=drawing_styles.get_default_pose_landmarks_style())
|
89
|
+
path = self._get_output_path('_frame_{}.png'.format(idx))
|
90
|
+
cv2.imwrite(path, frame)
|
91
|
+
|
92
|
+
def _annotate_segmentation(self, segmentation, expected_segmentation,
|
93
|
+
idx: int):
|
94
|
+
path = self._get_output_path('_segmentation_{}.png'.format(idx))
|
95
|
+
self._segmentation_to_rgb(segmentation).save(path)
|
96
|
+
path = self._get_output_path('_segmentation_diff_{}.png'.format(idx))
|
97
|
+
self._segmentation_diff_to_rgb(
|
98
|
+
expected_segmentation, segmentation).save(path)
|
99
|
+
|
100
|
+
def _rgb_to_segmentation(self, img, back_color=(255, 0, 0),
|
101
|
+
front_color=(0, 0, 255)):
|
102
|
+
img = np.array(img)
|
103
|
+
# Check all pixels are either front or back.
|
104
|
+
is_back = (img == back_color).all(axis=2)
|
105
|
+
is_front = (img == front_color).all(axis=2)
|
106
|
+
np.logical_or(is_back, is_front).all()
|
107
|
+
segm = np.zeros(img.shape[:2], dtype=np.uint8)
|
108
|
+
segm[is_front] = 1
|
109
|
+
return segm
|
110
|
+
|
111
|
+
def _segmentation_to_rgb(self, segm, back_color=(255, 0, 0),
|
112
|
+
front_color=(0, 0, 255)):
|
113
|
+
height, width = segm.shape
|
114
|
+
img = np.zeros((height, width, 3), dtype=np.uint8)
|
115
|
+
img[:, :] = back_color
|
116
|
+
img[segm == 1] = front_color
|
117
|
+
return Image.fromarray(img)
|
118
|
+
|
119
|
+
def _segmentation_iou(self, segm_expected, segm_actual):
|
120
|
+
intersection = segm_expected * segm_actual
|
121
|
+
expected_dot = segm_expected * segm_expected
|
122
|
+
actual_dot = segm_actual * segm_actual
|
123
|
+
eps = np.finfo(np.float32).eps
|
124
|
+
result = intersection.sum() / (expected_dot.sum() +
|
125
|
+
actual_dot.sum() -
|
126
|
+
intersection.sum() + eps)
|
127
|
+
return result
|
128
|
+
|
129
|
+
def _segmentation_diff_to_rgb(self, segm_expected, segm_actual,
|
130
|
+
expected_color=(0, 255, 0),
|
131
|
+
actual_color=(255, 0, 0)):
|
132
|
+
height, width = segm_expected.shape
|
133
|
+
img = np.zeros((height, width, 3), dtype=np.uint8)
|
134
|
+
img[np.logical_and(segm_expected == 1, segm_actual == 0)] = expected_color
|
135
|
+
img[np.logical_and(segm_expected == 0, segm_actual == 1)] = actual_color
|
136
|
+
return Image.fromarray(img)
|
137
|
+
|
138
|
+
def test_invalid_image_shape(self):
|
139
|
+
with mp_pose.Pose() as pose:
|
140
|
+
with self.assertRaisesRegex(
|
141
|
+
ValueError, 'Input image must contain three channel rgb data.'):
|
142
|
+
pose.process(np.arange(36, dtype=np.uint8).reshape(3, 3, 4))
|
143
|
+
|
144
|
+
def test_blank_image(self):
|
145
|
+
with mp_pose.Pose(enable_segmentation=True) as pose:
|
146
|
+
image = np.zeros([100, 100, 3], dtype=np.uint8)
|
147
|
+
image.fill(255)
|
148
|
+
results = pose.process(image)
|
149
|
+
self.assertIsNone(results.pose_landmarks)
|
150
|
+
self.assertIsNone(results.segmentation_mask)
|
151
|
+
|
152
|
+
@parameterized.named_parameters(('static_lite', True, 0, 3),
|
153
|
+
('static_full', True, 1, 3),
|
154
|
+
('static_heavy', True, 2, 3),
|
155
|
+
('video_lite', False, 0, 3),
|
156
|
+
('video_full', False, 1, 3),
|
157
|
+
('video_heavy', False, 2, 3))
|
158
|
+
def test_on_image(self, static_image_mode, model_complexity, num_frames):
|
159
|
+
image_path = os.path.join(os.path.dirname(__file__), 'testdata/pose.jpg')
|
160
|
+
expected_segmentation_path = os.path.join(
|
161
|
+
os.path.dirname(__file__), 'testdata/pose_segmentation.png')
|
162
|
+
image = cv2.imread(image_path)
|
163
|
+
expected_segmentation = self._rgb_to_segmentation(
|
164
|
+
Image.open(expected_segmentation_path).convert('RGB'))
|
165
|
+
|
166
|
+
with mp_pose.Pose(static_image_mode=static_image_mode,
|
167
|
+
model_complexity=model_complexity,
|
168
|
+
enable_segmentation=True) as pose:
|
169
|
+
for idx in range(num_frames):
|
170
|
+
results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
171
|
+
segmentation = results.segmentation_mask.round().astype(np.uint8)
|
172
|
+
|
173
|
+
# TODO: Add rendering of world 3D when supported.
|
174
|
+
self._annotate(image.copy(), results, idx)
|
175
|
+
self._annotate_segmentation(segmentation, expected_segmentation, idx)
|
176
|
+
|
177
|
+
self._assert_diff_less(
|
178
|
+
self._landmarks_list_to_array(results.pose_landmarks,
|
179
|
+
image.shape)[:, :2],
|
180
|
+
EXPECTED_POSE_LANDMARKS, DIFF_THRESHOLD)
|
181
|
+
self._assert_diff_less(
|
182
|
+
self._world_landmarks_list_to_array(results.pose_world_landmarks),
|
183
|
+
EXPECTED_POSE_WORLD_LANDMARKS, WORLD_DIFF_THRESHOLD)
|
184
|
+
self.assertGreaterEqual(
|
185
|
+
self._segmentation_iou(expected_segmentation, segmentation),
|
186
|
+
IOU_THRESHOLD)
|
187
|
+
|
188
|
+
@parameterized.named_parameters(
|
189
|
+
('full', 1, 'pose_squats.full.npz'))
|
190
|
+
def test_on_video(self, model_complexity, expected_name):
|
191
|
+
"""Tests pose models on a video."""
|
192
|
+
# Set threshold for comparing actual and expected predictions in pixels.
|
193
|
+
diff_threshold = 15
|
194
|
+
world_diff_threshold = 0.1
|
195
|
+
|
196
|
+
video_path = os.path.join(os.path.dirname(__file__),
|
197
|
+
'testdata/pose_squats.mp4')
|
198
|
+
expected_path = os.path.join(os.path.dirname(__file__),
|
199
|
+
'testdata/{}'.format(expected_name))
|
200
|
+
|
201
|
+
# Predict pose landmarks for each frame.
|
202
|
+
video_cap = cv2.VideoCapture(video_path)
|
203
|
+
actual_per_frame = []
|
204
|
+
actual_world_per_frame = []
|
205
|
+
frame_idx = 0
|
206
|
+
with mp_pose.Pose(static_image_mode=False,
|
207
|
+
model_complexity=model_complexity) as pose:
|
208
|
+
while True:
|
209
|
+
# Get next frame of the video.
|
210
|
+
success, input_frame = video_cap.read()
|
211
|
+
if not success:
|
212
|
+
break
|
213
|
+
|
214
|
+
# Run pose tracker.
|
215
|
+
input_frame = cv2.cvtColor(input_frame, cv2.COLOR_BGR2RGB)
|
216
|
+
result = pose.process(image=input_frame)
|
217
|
+
pose_landmarks = self._landmarks_list_to_array(result.pose_landmarks,
|
218
|
+
input_frame.shape)
|
219
|
+
pose_world_landmarks = self._world_landmarks_list_to_array(
|
220
|
+
result.pose_world_landmarks)
|
221
|
+
|
222
|
+
actual_per_frame.append(pose_landmarks)
|
223
|
+
actual_world_per_frame.append(pose_world_landmarks)
|
224
|
+
|
225
|
+
input_frame = cv2.cvtColor(input_frame, cv2.COLOR_RGB2BGR)
|
226
|
+
self._annotate(input_frame, result, frame_idx)
|
227
|
+
frame_idx += 1
|
228
|
+
actual = np.array(actual_per_frame)
|
229
|
+
actual_world = np.array(actual_world_per_frame)
|
230
|
+
|
231
|
+
# Dump actual .npz.
|
232
|
+
npz_path = self._get_output_path(expected_name)
|
233
|
+
np.savez(npz_path, predictions=actual, predictions_world=actual_world)
|
234
|
+
|
235
|
+
# Dump actual JSON.
|
236
|
+
json_path = self._get_output_path(expected_name.replace('.npz', '.json'))
|
237
|
+
with open(json_path, 'w') as fl:
|
238
|
+
dump_data = {
|
239
|
+
'predictions': np.around(actual, 3).tolist(),
|
240
|
+
'predictions_world': np.around(actual_world, 3).tolist()
|
241
|
+
}
|
242
|
+
fl.write(json.dumps(dump_data, indent=2, separators=(',', ': ')))
|
243
|
+
|
244
|
+
# Validate actual vs. expected landmarks.
|
245
|
+
expected = np.load(expected_path)['predictions']
|
246
|
+
assert actual.shape == expected.shape, (
|
247
|
+
'Unexpected shape of predictions: {} instead of {}'.format(
|
248
|
+
actual.shape, expected.shape))
|
249
|
+
self._assert_diff_less(
|
250
|
+
actual[..., :2], expected[..., :2], threshold=diff_threshold)
|
251
|
+
|
252
|
+
# Validate actual vs. expected world landmarks.
|
253
|
+
expected_world = np.load(expected_path)['predictions_world']
|
254
|
+
assert actual_world.shape == expected_world.shape, (
|
255
|
+
'Unexpected shape of world predictions: {} instead of {}'.format(
|
256
|
+
actual_world.shape, expected_world.shape))
|
257
|
+
self._assert_diff_less(
|
258
|
+
actual_world, expected_world, threshold=world_diff_threshold)
|
259
|
+
|
260
|
+
|
261
|
+
if __name__ == '__main__':
|
262
|
+
absltest.main()
|
@@ -0,0 +1,76 @@
|
|
1
|
+
# Copyright 2021 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""MediaPipe Selfie Segmentation."""
|
15
|
+
|
16
|
+
from typing import NamedTuple
|
17
|
+
|
18
|
+
import numpy as np
|
19
|
+
# The following imports are needed because python pb2 silently discards
|
20
|
+
# unknown protobuf fields.
|
21
|
+
# pylint: disable=unused-import
|
22
|
+
from mediapipe.calculators.core import constant_side_packet_calculator_pb2
|
23
|
+
from mediapipe.calculators.tensor import image_to_tensor_calculator_pb2
|
24
|
+
from mediapipe.calculators.tensor import inference_calculator_pb2
|
25
|
+
from mediapipe.calculators.tensor import tensors_to_segmentation_calculator_pb2
|
26
|
+
from mediapipe.calculators.util import local_file_contents_calculator_pb2
|
27
|
+
from mediapipe.framework.tool import switch_container_pb2
|
28
|
+
# pylint: enable=unused-import
|
29
|
+
|
30
|
+
from mediapipe.python.solution_base import SolutionBase
|
31
|
+
|
32
|
+
_BINARYPB_FILE_PATH = 'mediapipe/modules/selfie_segmentation/selfie_segmentation_cpu.binarypb'
|
33
|
+
|
34
|
+
|
35
|
+
class SelfieSegmentation(SolutionBase):
|
36
|
+
"""MediaPipe Selfie Segmentation.
|
37
|
+
|
38
|
+
MediaPipe Selfie Segmentation processes an RGB image and returns a
|
39
|
+
segmentation mask.
|
40
|
+
|
41
|
+
Please refer to
|
42
|
+
https://solutions.mediapipe.dev/selfie_segmentation#python-solution-api for
|
43
|
+
usage examples.
|
44
|
+
"""
|
45
|
+
|
46
|
+
def __init__(self, model_selection=0):
|
47
|
+
"""Initializes a MediaPipe Selfie Segmentation object.
|
48
|
+
|
49
|
+
Args:
|
50
|
+
model_selection: 0 or 1. 0 to select a general-purpose model, and 1 to
|
51
|
+
select a model more optimized for landscape images. See details in
|
52
|
+
https://solutions.mediapipe.dev/selfie_segmentation#model_selection.
|
53
|
+
"""
|
54
|
+
super().__init__(
|
55
|
+
binary_graph_path=_BINARYPB_FILE_PATH,
|
56
|
+
side_inputs={
|
57
|
+
'model_selection': model_selection,
|
58
|
+
},
|
59
|
+
outputs=['segmentation_mask'])
|
60
|
+
|
61
|
+
def process(self, image: np.ndarray) -> NamedTuple:
|
62
|
+
"""Processes an RGB image and returns a segmentation mask.
|
63
|
+
|
64
|
+
Args:
|
65
|
+
image: An RGB image represented as a numpy ndarray.
|
66
|
+
|
67
|
+
Raises:
|
68
|
+
RuntimeError: If the underlying graph throws any error.
|
69
|
+
ValueError: If the input image is not three channel RGB.
|
70
|
+
|
71
|
+
Returns:
|
72
|
+
A NamedTuple object with a "segmentation_mask" field that contains a float
|
73
|
+
type 2d np array representing the mask.
|
74
|
+
"""
|
75
|
+
|
76
|
+
return super().process(input_data={'image': image})
|
@@ -0,0 +1,68 @@
|
|
1
|
+
# Copyright 2021 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""Tests for mediapipe.python.solutions.selfie_segmentation."""
|
15
|
+
|
16
|
+
import os
|
17
|
+
|
18
|
+
from absl.testing import absltest
|
19
|
+
from absl.testing import parameterized
|
20
|
+
import cv2
|
21
|
+
import numpy as np
|
22
|
+
|
23
|
+
# resources dependency
|
24
|
+
# undeclared dependency
|
25
|
+
from mediapipe.python.solutions import selfie_segmentation as mp_selfie_segmentation
|
26
|
+
|
27
|
+
TEST_IMAGE_PATH = 'mediapipe/python/solutions/testdata'
|
28
|
+
|
29
|
+
|
30
|
+
class SelfieSegmentationTest(parameterized.TestCase):
|
31
|
+
|
32
|
+
def _draw(self, frame: np.ndarray, mask: np.ndarray):
|
33
|
+
frame = np.minimum(frame, np.stack((mask,) * 3, axis=-1))
|
34
|
+
path = os.path.join(tempfile.gettempdir(), self.id().split('.')[-1] + '.png')
|
35
|
+
cv2.imwrite(path, frame)
|
36
|
+
|
37
|
+
def test_invalid_image_shape(self):
|
38
|
+
with mp_selfie_segmentation.SelfieSegmentation() as selfie_segmentation:
|
39
|
+
with self.assertRaisesRegex(
|
40
|
+
ValueError, 'Input image must contain three channel rgb data.'):
|
41
|
+
selfie_segmentation.process(
|
42
|
+
np.arange(36, dtype=np.uint8).reshape(3, 3, 4))
|
43
|
+
|
44
|
+
def test_blank_image(self):
|
45
|
+
with mp_selfie_segmentation.SelfieSegmentation() as selfie_segmentation:
|
46
|
+
image = np.zeros([100, 100, 3], dtype=np.uint8)
|
47
|
+
image.fill(255)
|
48
|
+
results = selfie_segmentation.process(image)
|
49
|
+
normalized_segmentation_mask = (results.segmentation_mask *
|
50
|
+
255).astype(int)
|
51
|
+
self.assertLess(np.amax(normalized_segmentation_mask), 1)
|
52
|
+
|
53
|
+
@parameterized.named_parameters(('general', 0), ('landscape', 1))
|
54
|
+
def test_segmentation(self, model_selection):
|
55
|
+
image_path = os.path.join(os.path.dirname(__file__),
|
56
|
+
'testdata/portrait.jpg')
|
57
|
+
image = cv2.imread(image_path)
|
58
|
+
with mp_selfie_segmentation.SelfieSegmentation(
|
59
|
+
model_selection=model_selection) as selfie_segmentation:
|
60
|
+
results = selfie_segmentation.process(
|
61
|
+
cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
62
|
+
normalized_segmentation_mask = (results.segmentation_mask *
|
63
|
+
255).astype(int)
|
64
|
+
self._draw(image.copy(), normalized_segmentation_mask)
|
65
|
+
|
66
|
+
|
67
|
+
if __name__ == '__main__':
|
68
|
+
absltest.main()
|