mediapipe-nightly 0.10.21.post20241223__cp312-cp312-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mediapipe/__init__.py +26 -0
- mediapipe/calculators/__init__.py +0 -0
- mediapipe/calculators/audio/__init__.py +0 -0
- mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py +33 -0
- mediapipe/calculators/audio/rational_factor_resample_calculator_pb2.py +33 -0
- mediapipe/calculators/audio/spectrogram_calculator_pb2.py +37 -0
- mediapipe/calculators/audio/stabilized_log_calculator_pb2.py +31 -0
- mediapipe/calculators/audio/time_series_framer_calculator_pb2.py +33 -0
- mediapipe/calculators/core/__init__.py +0 -0
- mediapipe/calculators/core/bypass_calculator_pb2.py +31 -0
- mediapipe/calculators/core/clip_vector_size_calculator_pb2.py +31 -0
- mediapipe/calculators/core/concatenate_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/constant_side_packet_calculator_pb2.py +39 -0
- mediapipe/calculators/core/dequantize_byte_array_calculator_pb2.py +31 -0
- mediapipe/calculators/core/flow_limiter_calculator_pb2.py +32 -0
- mediapipe/calculators/core/gate_calculator_pb2.py +33 -0
- mediapipe/calculators/core/get_vector_item_calculator_pb2.py +31 -0
- mediapipe/calculators/core/graph_profile_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_cloner_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_resampler_calculator_pb2.py +33 -0
- mediapipe/calculators/core/packet_thinner_calculator_pb2.py +33 -0
- mediapipe/calculators/core/quantize_float_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/sequence_shift_calculator_pb2.py +31 -0
- mediapipe/calculators/core/split_vector_calculator_pb2.py +33 -0
- mediapipe/calculators/image/__init__.py +0 -0
- mediapipe/calculators/image/bilateral_filter_calculator_pb2.py +31 -0
- mediapipe/calculators/image/feature_detector_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_clone_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_cropping_calculator_pb2.py +33 -0
- mediapipe/calculators/image/image_transformation_calculator_pb2.py +38 -0
- mediapipe/calculators/image/mask_overlay_calculator_pb2.py +33 -0
- mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator_pb2.py +31 -0
- mediapipe/calculators/image/opencv_image_encoder_calculator_pb2.py +35 -0
- mediapipe/calculators/image/recolor_calculator_pb2.py +34 -0
- mediapipe/calculators/image/rotation_mode_pb2.py +29 -0
- mediapipe/calculators/image/scale_image_calculator_pb2.py +34 -0
- mediapipe/calculators/image/segmentation_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/image/set_alpha_calculator_pb2.py +31 -0
- mediapipe/calculators/image/warp_affine_calculator_pb2.py +36 -0
- mediapipe/calculators/internal/__init__.py +0 -0
- mediapipe/calculators/internal/callback_packet_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/__init__.py +0 -0
- mediapipe/calculators/tensor/audio_to_tensor_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/bert_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/feedback_tensors_calculator_pb2.py +37 -0
- mediapipe/calculators/tensor/image_to_tensor_calculator_pb2.py +40 -0
- mediapipe/calculators/tensor/inference_calculator_pb2.py +63 -0
- mediapipe/calculators/tensor/landmarks_to_tensor_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/regex_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensor_converter_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/tensor_to_joints_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensors_readback_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/tensors_to_audio_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_classification_calculator_pb2.py +44 -0
- mediapipe/calculators/tensor/tensors_to_detections_calculator_pb2.py +39 -0
- mediapipe/calculators/tensor/tensors_to_floats_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/vector_to_tensor_calculator_pb2.py +27 -0
- mediapipe/calculators/tflite/__init__.py +0 -0
- mediapipe/calculators/tflite/ssd_anchors_calculator_pb2.py +32 -0
- mediapipe/calculators/tflite/tflite_converter_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_inference_calculator_pb2.py +49 -0
- mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/__init__.py +0 -0
- mediapipe/calculators/util/align_hand_to_pose_in_world_calculator_pb2.py +31 -0
- mediapipe/calculators/util/annotation_overlay_calculator_pb2.py +32 -0
- mediapipe/calculators/util/association_calculator_pb2.py +31 -0
- mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py +31 -0
- mediapipe/calculators/util/combine_joints_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detection_label_id_to_text_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detections_to_rects_calculator_pb2.py +33 -0
- mediapipe/calculators/util/detections_to_render_data_calculator_pb2.py +33 -0
- mediapipe/calculators/util/face_to_rect_calculator_pb2.py +26 -0
- mediapipe/calculators/util/filter_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/util/flat_color_image_calculator_pb2.py +32 -0
- mediapipe/calculators/util/labels_to_render_data_calculator_pb2.py +34 -0
- mediapipe/calculators/util/landmark_projection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_refinement_calculator_pb2.py +41 -0
- mediapipe/calculators/util/landmarks_smoothing_calculator_pb2.py +33 -0
- mediapipe/calculators/util/landmarks_to_detection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_floats_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/landmarks_transformation_calculator_pb2.py +37 -0
- mediapipe/calculators/util/latency_pb2.py +26 -0
- mediapipe/calculators/util/local_file_contents_calculator_pb2.py +31 -0
- mediapipe/calculators/util/logic_calculator_pb2.py +34 -0
- mediapipe/calculators/util/non_max_suppression_calculator_pb2.py +35 -0
- mediapipe/calculators/util/packet_frequency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/packet_frequency_pb2.py +26 -0
- mediapipe/calculators/util/packet_latency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/rect_to_render_scale_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_transformation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/refine_landmarks_from_heatmap_calculator_pb2.py +31 -0
- mediapipe/calculators/util/resource_provider_calculator_pb2.py +28 -0
- mediapipe/calculators/util/set_joints_visibility_calculator_pb2.py +41 -0
- mediapipe/calculators/util/thresholding_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_id_to_label_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/top_k_scores_calculator_pb2.py +31 -0
- mediapipe/calculators/util/visibility_copy_calculator_pb2.py +27 -0
- mediapipe/calculators/util/visibility_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/video/__init__.py +0 -0
- mediapipe/calculators/video/box_detector_calculator_pb2.py +32 -0
- mediapipe/calculators/video/box_tracker_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_packager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_to_image_calculator_pb2.py +31 -0
- mediapipe/calculators/video/motion_analysis_calculator_pb2.py +42 -0
- mediapipe/calculators/video/opencv_video_encoder_calculator_pb2.py +31 -0
- mediapipe/calculators/video/tool/__init__.py +0 -0
- mediapipe/calculators/video/tool/flow_quantizer_model_pb2.py +26 -0
- mediapipe/calculators/video/tracked_detection_manager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/video_pre_stream_calculator_pb2.py +35 -0
- mediapipe/examples/__init__.py +14 -0
- mediapipe/examples/desktop/__init__.py +14 -0
- mediapipe/framework/__init__.py +0 -0
- mediapipe/framework/calculator_options_pb2.py +29 -0
- mediapipe/framework/calculator_pb2.py +59 -0
- mediapipe/framework/calculator_profile_pb2.py +48 -0
- mediapipe/framework/deps/__init__.py +0 -0
- mediapipe/framework/deps/proto_descriptor_pb2.py +29 -0
- mediapipe/framework/formats/__init__.py +0 -0
- mediapipe/framework/formats/affine_transform_data_pb2.py +28 -0
- mediapipe/framework/formats/annotation/__init__.py +0 -0
- mediapipe/framework/formats/annotation/locus_pb2.py +32 -0
- mediapipe/framework/formats/annotation/rasterization_pb2.py +29 -0
- mediapipe/framework/formats/body_rig_pb2.py +28 -0
- mediapipe/framework/formats/classification_pb2.py +31 -0
- mediapipe/framework/formats/detection_pb2.py +36 -0
- mediapipe/framework/formats/image_file_properties_pb2.py +26 -0
- mediapipe/framework/formats/image_format_pb2.py +29 -0
- mediapipe/framework/formats/landmark_pb2.py +37 -0
- mediapipe/framework/formats/location_data_pb2.py +38 -0
- mediapipe/framework/formats/matrix_data_pb2.py +31 -0
- mediapipe/framework/formats/motion/__init__.py +0 -0
- mediapipe/framework/formats/motion/optical_flow_field_data_pb2.py +30 -0
- mediapipe/framework/formats/object_detection/__init__.py +0 -0
- mediapipe/framework/formats/object_detection/anchor_pb2.py +26 -0
- mediapipe/framework/formats/rect_pb2.py +29 -0
- mediapipe/framework/formats/time_series_header_pb2.py +28 -0
- mediapipe/framework/graph_runtime_info_pb2.py +31 -0
- mediapipe/framework/mediapipe_options_pb2.py +27 -0
- mediapipe/framework/packet_factory_pb2.py +31 -0
- mediapipe/framework/packet_generator_pb2.py +33 -0
- mediapipe/framework/status_handler_pb2.py +28 -0
- mediapipe/framework/stream_handler/__init__.py +0 -0
- mediapipe/framework/stream_handler/default_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/fixed_size_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/sync_set_input_stream_handler_pb2.py +29 -0
- mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler_pb2.py +30 -0
- mediapipe/framework/test_calculators_pb2.py +31 -0
- mediapipe/framework/thread_pool_executor_pb2.py +29 -0
- mediapipe/framework/tool/__init__.py +0 -0
- mediapipe/framework/tool/calculator_graph_template_pb2.py +44 -0
- mediapipe/framework/tool/field_data_pb2.py +28 -0
- mediapipe/framework/tool/node_chain_subgraph_pb2.py +31 -0
- mediapipe/framework/tool/packet_generator_wrapper_calculator_pb2.py +28 -0
- mediapipe/framework/tool/source_pb2.py +33 -0
- mediapipe/framework/tool/switch_container_pb2.py +32 -0
- mediapipe/gpu/__init__.py +0 -0
- mediapipe/gpu/copy_calculator_pb2.py +33 -0
- mediapipe/gpu/gl_animation_overlay_calculator_pb2.py +31 -0
- mediapipe/gpu/gl_context_options_pb2.py +31 -0
- mediapipe/gpu/gl_scaler_calculator_pb2.py +32 -0
- mediapipe/gpu/gl_surface_sink_calculator_pb2.py +32 -0
- mediapipe/gpu/gpu_origin_pb2.py +29 -0
- mediapipe/gpu/scale_mode_pb2.py +28 -0
- mediapipe/model_maker/__init__.py +27 -0
- mediapipe/model_maker/setup.py +107 -0
- mediapipe/modules/__init__.py +0 -0
- mediapipe/modules/face_detection/__init__.py +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_cpu.binarypb +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_sparse.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_pb2.py +30 -0
- mediapipe/modules/face_detection/face_detection_short_range.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_short_range_cpu.binarypb +0 -0
- mediapipe/modules/face_geometry/__init__.py +0 -0
- mediapipe/modules/face_geometry/data/__init__.py +0 -0
- mediapipe/modules/face_geometry/effect_renderer_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/env_generator_calculator_pb2.py +28 -0
- mediapipe/modules/face_geometry/geometry_pipeline_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/libs/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/environment_pb2.py +31 -0
- mediapipe/modules/face_geometry/protos/face_geometry_pb2.py +29 -0
- mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/modules/face_geometry/protos/mesh_3d_pb2.py +31 -0
- mediapipe/modules/face_landmark/__init__.py +0 -0
- mediapipe/modules/face_landmark/face_landmark.tflite +0 -0
- mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb +0 -0
- mediapipe/modules/face_landmark/face_landmark_with_attention.tflite +0 -0
- mediapipe/modules/hand_landmark/__init__.py +0 -0
- mediapipe/modules/hand_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_full.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_lite.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb +0 -0
- mediapipe/modules/hand_landmark/handedness.txt +2 -0
- mediapipe/modules/holistic_landmark/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator_pb2.py +37 -0
- mediapipe/modules/holistic_landmark/hand_recrop.tflite +0 -0
- mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb +0 -0
- mediapipe/modules/iris_landmark/__init__.py +0 -0
- mediapipe/modules/iris_landmark/iris_landmark.tflite +0 -0
- mediapipe/modules/objectron/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/a_r_capture_metadata_pb2.py +102 -0
- mediapipe/modules/objectron/calculators/annotation_data_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/belief_decoder_config_pb2.py +28 -0
- mediapipe/modules/objectron/calculators/camera_parameters_pb2.py +30 -0
- mediapipe/modules/objectron/calculators/filter_detection_calculator_pb2.py +35 -0
- mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/object_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/object_detection_oidv4_labelmap.txt +24 -0
- mediapipe/modules/objectron/objectron_cpu.binarypb +0 -0
- mediapipe/modules/palm_detection/__init__.py +0 -0
- mediapipe/modules/palm_detection/palm_detection_full.tflite +0 -0
- mediapipe/modules/palm_detection/palm_detection_lite.tflite +0 -0
- mediapipe/modules/pose_detection/__init__.py +0 -0
- mediapipe/modules/pose_detection/pose_detection.tflite +0 -0
- mediapipe/modules/pose_landmark/__init__.py +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_full.tflite +0 -0
- mediapipe/modules/selfie_segmentation/__init__.py +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation.tflite +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_cpu.binarypb +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_landscape.tflite +0 -0
- mediapipe/python/__init__.py +29 -0
- mediapipe/python/_framework_bindings.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/python/calculator_graph_test.py +251 -0
- mediapipe/python/image_frame_test.py +194 -0
- mediapipe/python/image_test.py +218 -0
- mediapipe/python/packet_creator.py +275 -0
- mediapipe/python/packet_getter.py +120 -0
- mediapipe/python/packet_test.py +533 -0
- mediapipe/python/solution_base.py +604 -0
- mediapipe/python/solution_base_test.py +396 -0
- mediapipe/python/solutions/__init__.py +27 -0
- mediapipe/python/solutions/download_utils.py +37 -0
- mediapipe/python/solutions/drawing_styles.py +249 -0
- mediapipe/python/solutions/drawing_utils.py +320 -0
- mediapipe/python/solutions/drawing_utils_test.py +258 -0
- mediapipe/python/solutions/face_detection.py +105 -0
- mediapipe/python/solutions/face_detection_test.py +92 -0
- mediapipe/python/solutions/face_mesh.py +125 -0
- mediapipe/python/solutions/face_mesh_connections.py +500 -0
- mediapipe/python/solutions/face_mesh_test.py +170 -0
- mediapipe/python/solutions/hands.py +153 -0
- mediapipe/python/solutions/hands_connections.py +32 -0
- mediapipe/python/solutions/hands_test.py +219 -0
- mediapipe/python/solutions/holistic.py +167 -0
- mediapipe/python/solutions/holistic_test.py +142 -0
- mediapipe/python/solutions/objectron.py +288 -0
- mediapipe/python/solutions/objectron_test.py +81 -0
- mediapipe/python/solutions/pose.py +192 -0
- mediapipe/python/solutions/pose_connections.py +22 -0
- mediapipe/python/solutions/pose_test.py +262 -0
- mediapipe/python/solutions/selfie_segmentation.py +76 -0
- mediapipe/python/solutions/selfie_segmentation_test.py +68 -0
- mediapipe/python/timestamp_test.py +78 -0
- mediapipe/tasks/__init__.py +14 -0
- mediapipe/tasks/cc/__init__.py +0 -0
- mediapipe/tasks/cc/audio/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/audio_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/audio_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/audio_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/core/__init__.py +0 -0
- mediapipe/tasks/cc/audio/utils/__init__.py +0 -0
- mediapipe/tasks/cc/components/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/classification_aggregation_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/components/calculators/score_calibration_calculator_pb2.py +35 -0
- mediapipe/tasks/cc/components/calculators/tensors_to_embeddings_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/components/containers/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/classifications_pb2.py +30 -0
- mediapipe/tasks/cc/components/containers/proto/embeddings_pb2.py +35 -0
- mediapipe/tasks/cc/components/containers/proto/landmarks_detection_result_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/classification_postprocessing_graph_options_pb2.py +38 -0
- mediapipe/tasks/cc/components/processors/proto/classifier_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/detection_postprocessing_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/components/processors/proto/detector_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedder_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedding_postprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/proto/image_preprocessing_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/components/processors/proto/text_model_type_pb2.py +28 -0
- mediapipe/tasks/cc/components/processors/proto/text_preprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/utils/__init__.py +0 -0
- mediapipe/tasks/cc/core/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/acceleration_pb2.py +28 -0
- mediapipe/tasks/cc/core/proto/base_options_pb2.py +30 -0
- mediapipe/tasks/cc/core/proto/external_file_pb2.py +31 -0
- mediapipe/tasks/cc/core/proto/inference_subgraph_pb2.py +32 -0
- mediapipe/tasks/cc/core/proto/model_resources_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/c/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/calculators/model_data_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/common/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_params_pb2.py +33 -0
- mediapipe/tasks/cc/genai/inference/proto/prompt_template_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/proto/sampler_params_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/proto/transformer_params_pb2.py +45 -0
- mediapipe/tasks/cc/genai/inference/utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/llm_utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/xnn_utils/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/_pywrap_metadata_version.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/cc/metadata/tests/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/ragged/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/testdata/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/hash/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/text_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/text_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/text_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/tokenizers/__init__.py +0 -0
- mediapipe/tasks/cc/text/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/__init__.py +0 -0
- mediapipe/tasks/cc/vision/core/__init__.py +0 -0
- mediapipe/tasks/cc/vision/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_geometry/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/env_generator_calculator_pb2.py +28 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/geometry_pipeline_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/data/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/libs/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/environment_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_graph_options_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/mesh_3d_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options_pb2.py +37 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/tensors_to_face_landmarks_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_stylizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/tensors_to_image_calculator_pb2.py +36 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/face_stylizer_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/combined_prediction_calculator_pb2.py +33 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_embedder_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/hand_gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_result_pb2.py +30 -0
- mediapipe/tasks/cc/vision/hand_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_roi_refinement_graph_options_pb2.py +28 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result_pb2.py +29 -0
- mediapipe/tasks/cc/vision/image_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/image_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/image_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_generator/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/stable_diffusion_iterate_calculator_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/proto/conditioned_image_graph_options_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/control_plugin_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_generator/proto/image_generator_graph_options_pb2.py +30 -0
- mediapipe/tasks/cc/vision/image_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/image_segmenter_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/segmenter_options_pb2.py +33 -0
- mediapipe/tasks/cc/vision/interactive_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/object_detector_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/pose_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/utils/ghum/__init__.py +0 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema.fbs +59 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema_py_generated.py +108 -0
- mediapipe/tasks/metadata/metadata_schema.fbs +732 -0
- mediapipe/tasks/metadata/metadata_schema_py_generated.py +3251 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema.fbs +98 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema_py_generated.py +674 -0
- mediapipe/tasks/metadata/schema_py_generated.py +18438 -0
- mediapipe/tasks/python/__init__.py +27 -0
- mediapipe/tasks/python/audio/__init__.py +33 -0
- mediapipe/tasks/python/audio/audio_classifier.py +324 -0
- mediapipe/tasks/python/audio/audio_embedder.py +285 -0
- mediapipe/tasks/python/audio/core/__init__.py +16 -0
- mediapipe/tasks/python/audio/core/audio_record.py +125 -0
- mediapipe/tasks/python/audio/core/audio_task_running_mode.py +29 -0
- mediapipe/tasks/python/audio/core/base_audio_task_api.py +181 -0
- mediapipe/tasks/python/benchmark/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/benchmark_utils.py +70 -0
- mediapipe/tasks/python/benchmark/vision/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/vision/benchmark.py +99 -0
- mediapipe/tasks/python/benchmark/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py +40 -0
- mediapipe/tasks/python/components/__init__.py +13 -0
- mediapipe/tasks/python/components/containers/__init__.py +53 -0
- mediapipe/tasks/python/components/containers/audio_data.py +137 -0
- mediapipe/tasks/python/components/containers/bounding_box.py +73 -0
- mediapipe/tasks/python/components/containers/category.py +78 -0
- mediapipe/tasks/python/components/containers/classification_result.py +111 -0
- mediapipe/tasks/python/components/containers/detections.py +181 -0
- mediapipe/tasks/python/components/containers/embedding_result.py +89 -0
- mediapipe/tasks/python/components/containers/keypoint.py +77 -0
- mediapipe/tasks/python/components/containers/landmark.py +122 -0
- mediapipe/tasks/python/components/containers/landmark_detection_result.py +106 -0
- mediapipe/tasks/python/components/containers/rect.py +109 -0
- mediapipe/tasks/python/components/processors/__init__.py +23 -0
- mediapipe/tasks/python/components/processors/classifier_options.py +86 -0
- mediapipe/tasks/python/components/utils/__init__.py +13 -0
- mediapipe/tasks/python/components/utils/cosine_similarity.py +68 -0
- mediapipe/tasks/python/core/__init__.py +13 -0
- mediapipe/tasks/python/core/base_options.py +121 -0
- mediapipe/tasks/python/core/optional_dependencies.py +25 -0
- mediapipe/tasks/python/core/task_info.py +139 -0
- mediapipe/tasks/python/genai/__init__.py +14 -0
- mediapipe/tasks/python/genai/bundler/__init__.py +23 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler.py +130 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler_test.py +168 -0
- mediapipe/tasks/python/genai/converter/__init__.py +24 -0
- mediapipe/tasks/python/genai/converter/converter_base.py +179 -0
- mediapipe/tasks/python/genai/converter/converter_factory.py +79 -0
- mediapipe/tasks/python/genai/converter/llm_converter.py +374 -0
- mediapipe/tasks/python/genai/converter/llm_converter_test.py +63 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter.py +318 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter_test.py +86 -0
- mediapipe/tasks/python/genai/converter/quantization_util.py +516 -0
- mediapipe/tasks/python/genai/converter/quantization_util_test.py +259 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter.py +580 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter_test.py +83 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer.py +120 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer_test.py +95 -0
- mediapipe/tasks/python/metadata/__init__.py +13 -0
- mediapipe/tasks/python/metadata/flatbuffers_lib/_pywrap_flatbuffers.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/python/metadata/metadata.py +928 -0
- mediapipe/tasks/python/metadata/metadata_displayer_cli.py +34 -0
- mediapipe/tasks/python/metadata/metadata_writers/__init__.py +13 -0
- mediapipe/tasks/python/metadata/metadata_writers/face_stylizer.py +138 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_classifier.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py +170 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py +1166 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_writer.py +845 -0
- mediapipe/tasks/python/metadata/metadata_writers/model_asset_bundle_utils.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/object_detector.py +331 -0
- mediapipe/tasks/python/metadata/metadata_writers/text_classifier.py +119 -0
- mediapipe/tasks/python/metadata/metadata_writers/writer_utils.py +91 -0
- mediapipe/tasks/python/test/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/audio_classifier_test.py +387 -0
- mediapipe/tasks/python/test/audio/audio_embedder_test.py +297 -0
- mediapipe/tasks/python/test/test_utils.py +196 -0
- mediapipe/tasks/python/test/text/__init__.py +13 -0
- mediapipe/tasks/python/test/text/language_detector_test.py +228 -0
- mediapipe/tasks/python/test/text/text_classifier_test.py +235 -0
- mediapipe/tasks/python/test/text/text_embedder_test.py +326 -0
- mediapipe/tasks/python/test/vision/__init__.py +13 -0
- mediapipe/tasks/python/test/vision/face_aligner_test.py +190 -0
- mediapipe/tasks/python/test/vision/face_detector_test.py +523 -0
- mediapipe/tasks/python/test/vision/face_landmarker_test.py +565 -0
- mediapipe/tasks/python/test/vision/face_stylizer_test.py +191 -0
- mediapipe/tasks/python/test/vision/hand_landmarker_test.py +437 -0
- mediapipe/tasks/python/test/vision/holistic_landmarker_test.py +544 -0
- mediapipe/tasks/python/test/vision/image_classifier_test.py +657 -0
- mediapipe/tasks/python/test/vision/image_embedder_test.py +423 -0
- mediapipe/tasks/python/test/vision/image_segmenter_test.py +512 -0
- mediapipe/tasks/python/test/vision/interactive_segmenter_test.py +341 -0
- mediapipe/tasks/python/test/vision/object_detector_test.py +493 -0
- mediapipe/tasks/python/test/vision/pose_landmarker_test.py +518 -0
- mediapipe/tasks/python/text/__init__.py +35 -0
- mediapipe/tasks/python/text/core/__init__.py +16 -0
- mediapipe/tasks/python/text/core/base_text_task_api.py +54 -0
- mediapipe/tasks/python/text/language_detector.py +220 -0
- mediapipe/tasks/python/text/text_classifier.py +187 -0
- mediapipe/tasks/python/text/text_embedder.py +188 -0
- mediapipe/tasks/python/vision/__init__.py +90 -0
- mediapipe/tasks/python/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/vision/core/base_vision_task_api.py +226 -0
- mediapipe/tasks/python/vision/core/image_processing_options.py +39 -0
- mediapipe/tasks/python/vision/core/vision_task_running_mode.py +31 -0
- mediapipe/tasks/python/vision/face_aligner.py +158 -0
- mediapipe/tasks/python/vision/face_detector.py +332 -0
- mediapipe/tasks/python/vision/face_landmarker.py +3244 -0
- mediapipe/tasks/python/vision/face_stylizer.py +158 -0
- mediapipe/tasks/python/vision/gesture_recognizer.py +480 -0
- mediapipe/tasks/python/vision/hand_landmarker.py +504 -0
- mediapipe/tasks/python/vision/holistic_landmarker.py +576 -0
- mediapipe/tasks/python/vision/image_classifier.py +358 -0
- mediapipe/tasks/python/vision/image_embedder.py +362 -0
- mediapipe/tasks/python/vision/image_segmenter.py +433 -0
- mediapipe/tasks/python/vision/interactive_segmenter.py +285 -0
- mediapipe/tasks/python/vision/object_detector.py +389 -0
- mediapipe/tasks/python/vision/pose_landmarker.py +455 -0
- mediapipe/util/__init__.py +0 -0
- mediapipe/util/analytics/__init__.py +0 -0
- mediapipe/util/analytics/mediapipe_log_extension_pb2.py +44 -0
- mediapipe/util/analytics/mediapipe_logging_enums_pb2.py +37 -0
- mediapipe/util/audio_decoder_pb2.py +33 -0
- mediapipe/util/color_pb2.py +33 -0
- mediapipe/util/label_map_pb2.py +27 -0
- mediapipe/util/render_data_pb2.py +58 -0
- mediapipe/util/sequence/__init__.py +14 -0
- mediapipe/util/sequence/media_sequence.py +716 -0
- mediapipe/util/sequence/media_sequence_test.py +290 -0
- mediapipe/util/sequence/media_sequence_util.py +800 -0
- mediapipe/util/sequence/media_sequence_util_test.py +389 -0
- mediapipe/util/tracking/__init__.py +0 -0
- mediapipe/util/tracking/box_detector_pb2.py +39 -0
- mediapipe/util/tracking/box_tracker_pb2.py +32 -0
- mediapipe/util/tracking/camera_motion_pb2.py +31 -0
- mediapipe/util/tracking/flow_packager_pb2.py +60 -0
- mediapipe/util/tracking/frame_selection_pb2.py +35 -0
- mediapipe/util/tracking/frame_selection_solution_evaluator_pb2.py +28 -0
- mediapipe/util/tracking/motion_analysis_pb2.py +35 -0
- mediapipe/util/tracking/motion_estimation_pb2.py +66 -0
- mediapipe/util/tracking/motion_models_pb2.py +42 -0
- mediapipe/util/tracking/motion_saliency_pb2.py +26 -0
- mediapipe/util/tracking/push_pull_filtering_pb2.py +26 -0
- mediapipe/util/tracking/region_flow_computation_pb2.py +59 -0
- mediapipe/util/tracking/region_flow_pb2.py +49 -0
- mediapipe/util/tracking/tone_estimation_pb2.py +45 -0
- mediapipe/util/tracking/tone_models_pb2.py +32 -0
- mediapipe/util/tracking/tracked_detection_manager_config_pb2.py +26 -0
- mediapipe/util/tracking/tracking_pb2.py +73 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/LICENSE +218 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/METADATA +199 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/RECORD +593 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/WHEEL +5 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/top_level.txt +4 -0
- mediapipe_nightly.libs/libEGL-48f73270.so.1.1.0 +0 -0
- mediapipe_nightly.libs/libGLESv2-ed5eda4f.so.2.1.0 +0 -0
- mediapipe_nightly.libs/libGLdispatch-64b28464.so.0.0.0 +0 -0
@@ -0,0 +1,258 @@
|
|
1
|
+
# Copyright 2020 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""Tests for mediapipe.python.solutions.drawing_utils."""
|
16
|
+
|
17
|
+
from absl.testing import absltest
|
18
|
+
from absl.testing import parameterized
|
19
|
+
import cv2
|
20
|
+
import numpy as np
|
21
|
+
|
22
|
+
from google.protobuf import text_format
|
23
|
+
from mediapipe.framework.formats import detection_pb2
|
24
|
+
from mediapipe.framework.formats import landmark_pb2
|
25
|
+
from mediapipe.python.solutions import drawing_utils
|
26
|
+
|
27
|
+
DEFAULT_BBOX_DRAWING_SPEC = drawing_utils.DrawingSpec()
|
28
|
+
DEFAULT_CONNECTION_DRAWING_SPEC = drawing_utils.DrawingSpec()
|
29
|
+
DEFAULT_CIRCLE_DRAWING_SPEC = drawing_utils.DrawingSpec(
|
30
|
+
color=drawing_utils.RED_COLOR)
|
31
|
+
DEFAULT_AXIS_DRAWING_SPEC = drawing_utils.DrawingSpec()
|
32
|
+
DEFAULT_CYCLE_BORDER_COLOR = (224, 224, 224)
|
33
|
+
|
34
|
+
|
35
|
+
class DrawingUtilTest(parameterized.TestCase):
|
36
|
+
|
37
|
+
def test_invalid_input_image(self):
|
38
|
+
image = np.arange(18, dtype=np.uint8).reshape(3, 3, 2)
|
39
|
+
with self.assertRaisesRegex(
|
40
|
+
ValueError, 'Input image must contain three channel bgr data.'):
|
41
|
+
drawing_utils.draw_landmarks(image, landmark_pb2.NormalizedLandmarkList())
|
42
|
+
with self.assertRaisesRegex(
|
43
|
+
ValueError, 'Input image must contain three channel bgr data.'):
|
44
|
+
drawing_utils.draw_detection(image, detection_pb2.Detection())
|
45
|
+
with self.assertRaisesRegex(
|
46
|
+
ValueError, 'Input image must contain three channel bgr data.'):
|
47
|
+
rotation = np.eye(3, dtype=np.float32)
|
48
|
+
translation = np.array([0., 0., 1.])
|
49
|
+
drawing_utils.draw_axis(image, rotation, translation)
|
50
|
+
|
51
|
+
def test_invalid_connection(self):
|
52
|
+
landmark_list = text_format.Parse(
|
53
|
+
'landmark {x: 0.5 y: 0.5} landmark {x: 0.2 y: 0.2}',
|
54
|
+
landmark_pb2.NormalizedLandmarkList())
|
55
|
+
image = np.arange(27, dtype=np.uint8).reshape(3, 3, 3)
|
56
|
+
with self.assertRaisesRegex(ValueError, 'Landmark index is out of range.'):
|
57
|
+
drawing_utils.draw_landmarks(image, landmark_list, [(0, 2)])
|
58
|
+
|
59
|
+
def test_unqualified_detection(self):
|
60
|
+
detection = text_format.Parse('location_data {format: GLOBAL}',
|
61
|
+
detection_pb2.Detection())
|
62
|
+
image = np.arange(27, dtype=np.uint8).reshape(3, 3, 3)
|
63
|
+
with self.assertRaisesRegex(ValueError, 'LocationData must be relative'):
|
64
|
+
drawing_utils.draw_detection(image, detection)
|
65
|
+
|
66
|
+
def test_draw_keypoints_only(self):
|
67
|
+
detection = text_format.Parse(
|
68
|
+
'location_data {'
|
69
|
+
' format: RELATIVE_BOUNDING_BOX'
|
70
|
+
' relative_keypoints {x: 0 y: 1}'
|
71
|
+
' relative_keypoints {x: 1 y: 0}}', detection_pb2.Detection())
|
72
|
+
image = np.zeros((100, 100, 3), np.uint8)
|
73
|
+
expected_result = np.copy(image)
|
74
|
+
cv2.circle(expected_result, (0, 99),
|
75
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.circle_radius,
|
76
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.color,
|
77
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.thickness)
|
78
|
+
cv2.circle(expected_result, (99, 0),
|
79
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.circle_radius,
|
80
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.color,
|
81
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.thickness)
|
82
|
+
drawing_utils.draw_detection(image, detection)
|
83
|
+
np.testing.assert_array_equal(image, expected_result)
|
84
|
+
|
85
|
+
def test_draw_bboxs_only(self):
|
86
|
+
detection = text_format.Parse(
|
87
|
+
'location_data {'
|
88
|
+
' format: RELATIVE_BOUNDING_BOX'
|
89
|
+
' relative_bounding_box {xmin: 0 ymin: 0 width: 1 height: 1}}',
|
90
|
+
detection_pb2.Detection())
|
91
|
+
image = np.zeros((100, 100, 3), np.uint8)
|
92
|
+
expected_result = np.copy(image)
|
93
|
+
cv2.rectangle(expected_result, (0, 0), (99, 99),
|
94
|
+
DEFAULT_BBOX_DRAWING_SPEC.color,
|
95
|
+
DEFAULT_BBOX_DRAWING_SPEC.thickness)
|
96
|
+
drawing_utils.draw_detection(image, detection)
|
97
|
+
np.testing.assert_array_equal(image, expected_result)
|
98
|
+
|
99
|
+
@parameterized.named_parameters(
|
100
|
+
('landmark_list_has_only_one_element', 'landmark {x: 0.1 y: 0.1}'),
|
101
|
+
('second_landmark_is_invisible',
|
102
|
+
'landmark {x: 0.1 y: 0.1} landmark {x: 0.5 y: 0.5 visibility: 0.0}'))
|
103
|
+
def test_draw_single_landmark_point(self, landmark_list_text):
|
104
|
+
landmark_list = text_format.Parse(landmark_list_text,
|
105
|
+
landmark_pb2.NormalizedLandmarkList())
|
106
|
+
image = np.zeros((100, 100, 3), np.uint8)
|
107
|
+
expected_result = np.copy(image)
|
108
|
+
cv2.circle(expected_result, (10, 10),
|
109
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.circle_radius + 1,
|
110
|
+
DEFAULT_CYCLE_BORDER_COLOR,
|
111
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.thickness)
|
112
|
+
cv2.circle(expected_result, (10, 10),
|
113
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.circle_radius,
|
114
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.color,
|
115
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.thickness)
|
116
|
+
drawing_utils.draw_landmarks(image, landmark_list)
|
117
|
+
np.testing.assert_array_equal(image, expected_result)
|
118
|
+
|
119
|
+
@parameterized.named_parameters(
|
120
|
+
('landmarks_have_x_and_y_only',
|
121
|
+
'landmark {x: 0.1 y: 0.5} landmark {x: 0.5 y: 0.1}'),
|
122
|
+
('landmark_zero_visibility_and_presence',
|
123
|
+
'landmark {x: 0.1 y: 0.5 presence: 0.5}'
|
124
|
+
'landmark {x: 0.5 y: 0.1 visibility: 0.5}'))
|
125
|
+
def test_draw_landmarks_and_connections(self, landmark_list_text):
|
126
|
+
landmark_list = text_format.Parse(landmark_list_text,
|
127
|
+
landmark_pb2.NormalizedLandmarkList())
|
128
|
+
image = np.zeros((100, 100, 3), np.uint8)
|
129
|
+
expected_result = np.copy(image)
|
130
|
+
start_point = (10, 50)
|
131
|
+
end_point = (50, 10)
|
132
|
+
cv2.line(expected_result, start_point, end_point,
|
133
|
+
DEFAULT_CONNECTION_DRAWING_SPEC.color,
|
134
|
+
DEFAULT_CONNECTION_DRAWING_SPEC.thickness)
|
135
|
+
cv2.circle(expected_result, start_point,
|
136
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.circle_radius + 1,
|
137
|
+
DEFAULT_CYCLE_BORDER_COLOR,
|
138
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.thickness)
|
139
|
+
cv2.circle(expected_result, end_point,
|
140
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.circle_radius + 1,
|
141
|
+
DEFAULT_CYCLE_BORDER_COLOR,
|
142
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.thickness)
|
143
|
+
cv2.circle(expected_result, start_point,
|
144
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.circle_radius,
|
145
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.color,
|
146
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.thickness)
|
147
|
+
cv2.circle(expected_result, end_point,
|
148
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.circle_radius,
|
149
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.color,
|
150
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.thickness)
|
151
|
+
drawing_utils.draw_landmarks(
|
152
|
+
image=image, landmark_list=landmark_list, connections=[(0, 1)])
|
153
|
+
np.testing.assert_array_equal(image, expected_result)
|
154
|
+
|
155
|
+
def test_draw_axis(self):
|
156
|
+
image = np.zeros((100, 100, 3), np.uint8)
|
157
|
+
expected_result = np.copy(image)
|
158
|
+
origin = (50, 50)
|
159
|
+
x_axis = (75, 50)
|
160
|
+
y_axis = (50, 22)
|
161
|
+
z_axis = (50, 77)
|
162
|
+
cv2.arrowedLine(expected_result, origin, x_axis, drawing_utils.RED_COLOR,
|
163
|
+
DEFAULT_AXIS_DRAWING_SPEC.thickness)
|
164
|
+
cv2.arrowedLine(expected_result, origin, y_axis, drawing_utils.GREEN_COLOR,
|
165
|
+
DEFAULT_AXIS_DRAWING_SPEC.thickness)
|
166
|
+
cv2.arrowedLine(expected_result, origin, z_axis, drawing_utils.BLUE_COLOR,
|
167
|
+
DEFAULT_AXIS_DRAWING_SPEC.thickness)
|
168
|
+
r = np.sqrt(2.) / 2.
|
169
|
+
rotation = np.array([[1., 0., 0.], [0., r, -r], [0., r, r]])
|
170
|
+
translation = np.array([0, 0, -0.2])
|
171
|
+
drawing_utils.draw_axis(image, rotation, translation)
|
172
|
+
np.testing.assert_array_equal(image, expected_result)
|
173
|
+
|
174
|
+
def test_draw_axis_zero_translation(self):
|
175
|
+
image = np.zeros((100, 100, 3), np.uint8)
|
176
|
+
expected_result = np.copy(image)
|
177
|
+
origin = (50, 50)
|
178
|
+
x_axis = (0, 50)
|
179
|
+
y_axis = (50, 100)
|
180
|
+
z_axis = (50, 50)
|
181
|
+
cv2.arrowedLine(expected_result, origin, x_axis, drawing_utils.RED_COLOR,
|
182
|
+
DEFAULT_AXIS_DRAWING_SPEC.thickness)
|
183
|
+
cv2.arrowedLine(expected_result, origin, y_axis, drawing_utils.GREEN_COLOR,
|
184
|
+
DEFAULT_AXIS_DRAWING_SPEC.thickness)
|
185
|
+
cv2.arrowedLine(expected_result, origin, z_axis, drawing_utils.BLUE_COLOR,
|
186
|
+
DEFAULT_AXIS_DRAWING_SPEC.thickness)
|
187
|
+
rotation = np.eye(3, dtype=np.float32)
|
188
|
+
translation = np.zeros((3,), dtype=np.float32)
|
189
|
+
drawing_utils.draw_axis(image, rotation, translation)
|
190
|
+
np.testing.assert_array_equal(image, expected_result)
|
191
|
+
|
192
|
+
def test_min_and_max_coordinate_values(self):
|
193
|
+
landmark_list = text_format.Parse(
|
194
|
+
'landmark {x: 0.0 y: 1.0}'
|
195
|
+
'landmark {x: 1.0 y: 0.0}', landmark_pb2.NormalizedLandmarkList())
|
196
|
+
image = np.zeros((100, 100, 3), np.uint8)
|
197
|
+
expected_result = np.copy(image)
|
198
|
+
start_point = (0, 99)
|
199
|
+
end_point = (99, 0)
|
200
|
+
cv2.line(expected_result, start_point, end_point,
|
201
|
+
DEFAULT_CONNECTION_DRAWING_SPEC.color,
|
202
|
+
DEFAULT_CONNECTION_DRAWING_SPEC.thickness)
|
203
|
+
cv2.circle(expected_result, start_point,
|
204
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.circle_radius + 1,
|
205
|
+
DEFAULT_CYCLE_BORDER_COLOR,
|
206
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.thickness)
|
207
|
+
cv2.circle(expected_result, end_point,
|
208
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.circle_radius + 1,
|
209
|
+
DEFAULT_CYCLE_BORDER_COLOR,
|
210
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.thickness)
|
211
|
+
cv2.circle(expected_result, start_point,
|
212
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.circle_radius,
|
213
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.color,
|
214
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.thickness)
|
215
|
+
cv2.circle(expected_result, end_point,
|
216
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.circle_radius,
|
217
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.color,
|
218
|
+
DEFAULT_CIRCLE_DRAWING_SPEC.thickness)
|
219
|
+
drawing_utils.draw_landmarks(
|
220
|
+
image=image, landmark_list=landmark_list, connections=[(0, 1)])
|
221
|
+
np.testing.assert_array_equal(image, expected_result)
|
222
|
+
|
223
|
+
def test_drawing_spec(self):
|
224
|
+
landmark_list = text_format.Parse(
|
225
|
+
'landmark {x: 0.1 y: 0.1}'
|
226
|
+
'landmark {x: 0.8 y: 0.8}', landmark_pb2.NormalizedLandmarkList())
|
227
|
+
image = np.zeros((100, 100, 3), np.uint8)
|
228
|
+
landmark_drawing_spec = drawing_utils.DrawingSpec(
|
229
|
+
color=(0, 0, 255), thickness=5)
|
230
|
+
connection_drawing_spec = drawing_utils.DrawingSpec(
|
231
|
+
color=(255, 0, 0), thickness=3)
|
232
|
+
expected_result = np.copy(image)
|
233
|
+
start_point = (10, 10)
|
234
|
+
end_point = (80, 80)
|
235
|
+
cv2.line(expected_result, start_point, end_point,
|
236
|
+
connection_drawing_spec.color, connection_drawing_spec.thickness)
|
237
|
+
cv2.circle(expected_result, start_point,
|
238
|
+
landmark_drawing_spec.circle_radius + 1,
|
239
|
+
DEFAULT_CYCLE_BORDER_COLOR, landmark_drawing_spec.thickness)
|
240
|
+
cv2.circle(expected_result, end_point,
|
241
|
+
landmark_drawing_spec.circle_radius + 1,
|
242
|
+
DEFAULT_CYCLE_BORDER_COLOR, landmark_drawing_spec.thickness)
|
243
|
+
cv2.circle(expected_result, start_point,
|
244
|
+
landmark_drawing_spec.circle_radius, landmark_drawing_spec.color,
|
245
|
+
landmark_drawing_spec.thickness)
|
246
|
+
cv2.circle(expected_result, end_point, landmark_drawing_spec.circle_radius,
|
247
|
+
landmark_drawing_spec.color, landmark_drawing_spec.thickness)
|
248
|
+
drawing_utils.draw_landmarks(
|
249
|
+
image=image,
|
250
|
+
landmark_list=landmark_list,
|
251
|
+
connections=[(0, 1)],
|
252
|
+
landmark_drawing_spec=landmark_drawing_spec,
|
253
|
+
connection_drawing_spec=connection_drawing_spec)
|
254
|
+
np.testing.assert_array_equal(image, expected_result)
|
255
|
+
|
256
|
+
|
257
|
+
if __name__ == '__main__':
|
258
|
+
absltest.main()
|
@@ -0,0 +1,105 @@
|
|
1
|
+
# Copyright 2021 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""MediaPipe Face Detection."""
|
15
|
+
|
16
|
+
import enum
|
17
|
+
from typing import NamedTuple, Union
|
18
|
+
|
19
|
+
import numpy as np
|
20
|
+
from mediapipe.framework.formats import detection_pb2
|
21
|
+
from mediapipe.framework.formats import location_data_pb2
|
22
|
+
from mediapipe.modules.face_detection import face_detection_pb2
|
23
|
+
from mediapipe.python.solution_base import SolutionBase
|
24
|
+
|
25
|
+
_SHORT_RANGE_GRAPH_FILE_PATH = 'mediapipe/modules/face_detection/face_detection_short_range_cpu.binarypb'
|
26
|
+
_FULL_RANGE_GRAPH_FILE_PATH = 'mediapipe/modules/face_detection/face_detection_full_range_cpu.binarypb'
|
27
|
+
|
28
|
+
|
29
|
+
def get_key_point(
|
30
|
+
detection: detection_pb2.Detection, key_point_enum: 'FaceKeyPoint'
|
31
|
+
) -> Union[None, location_data_pb2.LocationData.RelativeKeypoint]:
|
32
|
+
"""A convenience method to return a face key point by the FaceKeyPoint type.
|
33
|
+
|
34
|
+
Args:
|
35
|
+
detection: A detection proto message that contains face key points.
|
36
|
+
key_point_enum: A FaceKeyPoint type.
|
37
|
+
|
38
|
+
Returns:
|
39
|
+
A RelativeKeypoint proto message.
|
40
|
+
"""
|
41
|
+
if not detection or not detection.location_data:
|
42
|
+
return None
|
43
|
+
return detection.location_data.relative_keypoints[key_point_enum]
|
44
|
+
|
45
|
+
|
46
|
+
class FaceKeyPoint(enum.IntEnum):
|
47
|
+
"""The enum type of the six face detection key points."""
|
48
|
+
RIGHT_EYE = 0
|
49
|
+
LEFT_EYE = 1
|
50
|
+
NOSE_TIP = 2
|
51
|
+
MOUTH_CENTER = 3
|
52
|
+
RIGHT_EAR_TRAGION = 4
|
53
|
+
LEFT_EAR_TRAGION = 5
|
54
|
+
|
55
|
+
|
56
|
+
class FaceDetection(SolutionBase):
|
57
|
+
"""MediaPipe Face Detection.
|
58
|
+
|
59
|
+
MediaPipe Face Detection processes an RGB image and returns a list of the
|
60
|
+
detected face location data.
|
61
|
+
|
62
|
+
Please refer to
|
63
|
+
https://solutions.mediapipe.dev/face_detection#python-solution-api
|
64
|
+
for usage examples.
|
65
|
+
"""
|
66
|
+
|
67
|
+
def __init__(self, min_detection_confidence=0.5, model_selection=0):
|
68
|
+
"""Initializes a MediaPipe Face Detection object.
|
69
|
+
|
70
|
+
Args:
|
71
|
+
min_detection_confidence: Minimum confidence value ([0.0, 1.0]) for face
|
72
|
+
detection to be considered successful. See details in
|
73
|
+
https://solutions.mediapipe.dev/face_detection#min_detection_confidence.
|
74
|
+
model_selection: 0 or 1. 0 to select a short-range model that works
|
75
|
+
best for faces within 2 meters from the camera, and 1 for a full-range
|
76
|
+
model best for faces within 5 meters. See details in
|
77
|
+
https://solutions.mediapipe.dev/face_detection#model_selection.
|
78
|
+
"""
|
79
|
+
|
80
|
+
binary_graph_path = _FULL_RANGE_GRAPH_FILE_PATH if model_selection == 1 else _SHORT_RANGE_GRAPH_FILE_PATH
|
81
|
+
|
82
|
+
super().__init__(
|
83
|
+
binary_graph_path=binary_graph_path,
|
84
|
+
graph_options=self.create_graph_options(
|
85
|
+
face_detection_pb2.FaceDetectionOptions(), {
|
86
|
+
'min_score_thresh': min_detection_confidence,
|
87
|
+
}),
|
88
|
+
outputs=['detections'])
|
89
|
+
|
90
|
+
def process(self, image: np.ndarray) -> NamedTuple:
|
91
|
+
"""Processes an RGB image and returns a list of the detected face location data.
|
92
|
+
|
93
|
+
Args:
|
94
|
+
image: An RGB image represented as a numpy ndarray.
|
95
|
+
|
96
|
+
Raises:
|
97
|
+
RuntimeError: If the underlying graph throws any error.
|
98
|
+
ValueError: If the input image is not three channel RGB.
|
99
|
+
|
100
|
+
Returns:
|
101
|
+
A NamedTuple object with a "detections" field that contains a list of the
|
102
|
+
detected face location data.
|
103
|
+
"""
|
104
|
+
|
105
|
+
return super().process(input_data={'image': image})
|
@@ -0,0 +1,92 @@
|
|
1
|
+
# Copyright 2021 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""Tests for mediapipe.python.solutions.face_detection."""
|
15
|
+
|
16
|
+
import os
|
17
|
+
import tempfile # pylint: disable=unused-import
|
18
|
+
from typing import NamedTuple
|
19
|
+
|
20
|
+
from absl.testing import absltest
|
21
|
+
from absl.testing import parameterized
|
22
|
+
import cv2
|
23
|
+
import numpy as np
|
24
|
+
import numpy.testing as npt
|
25
|
+
|
26
|
+
# resources dependency
|
27
|
+
# undeclared dependency
|
28
|
+
from mediapipe.python.solutions import drawing_utils as mp_drawing
|
29
|
+
from mediapipe.python.solutions import face_detection as mp_faces
|
30
|
+
|
31
|
+
TEST_IMAGE_PATH = 'mediapipe/python/solutions/testdata'
|
32
|
+
SHORT_RANGE_EXPECTED_FACE_KEY_POINTS = [[363, 182], [460, 186], [420, 241],
|
33
|
+
[417, 284], [295, 199], [502, 198]]
|
34
|
+
FULL_RANGE_EXPECTED_FACE_KEY_POINTS = [[363, 181], [455, 181], [413, 233],
|
35
|
+
[411, 278], [306, 204], [499, 207]]
|
36
|
+
DIFF_THRESHOLD = 5 # pixels
|
37
|
+
|
38
|
+
|
39
|
+
class FaceDetectionTest(parameterized.TestCase):
|
40
|
+
|
41
|
+
def _annotate(self, frame: np.ndarray, results: NamedTuple, idx: int):
|
42
|
+
for detection in results.detections:
|
43
|
+
mp_drawing.draw_detection(frame, detection)
|
44
|
+
path = os.path.join(tempfile.gettempdir(), self.id().split('.')[-1] +
|
45
|
+
'_frame_{}.png'.format(idx))
|
46
|
+
cv2.imwrite(path, frame)
|
47
|
+
|
48
|
+
def test_invalid_image_shape(self):
|
49
|
+
with mp_faces.FaceDetection() as faces:
|
50
|
+
with self.assertRaisesRegex(
|
51
|
+
ValueError, 'Input image must contain three channel rgb data.'):
|
52
|
+
faces.process(np.arange(36, dtype=np.uint8).reshape(3, 3, 4))
|
53
|
+
|
54
|
+
def test_blank_image(self):
|
55
|
+
image = np.zeros([100, 100, 3], dtype=np.uint8)
|
56
|
+
image.fill(255)
|
57
|
+
with mp_faces.FaceDetection(min_detection_confidence=0.5) as faces:
|
58
|
+
results = faces.process(image)
|
59
|
+
self.assertIsNone(results.detections)
|
60
|
+
|
61
|
+
@parameterized.named_parameters(('short_range_model', 0),
|
62
|
+
('full_range_model', 1))
|
63
|
+
def test_face(self, model_selection):
|
64
|
+
image_path = os.path.join(os.path.dirname(__file__),
|
65
|
+
'testdata/portrait.jpg')
|
66
|
+
image = cv2.imread(image_path)
|
67
|
+
rows, cols, _ = image.shape
|
68
|
+
with mp_faces.FaceDetection(
|
69
|
+
min_detection_confidence=0.5, model_selection=model_selection) as faces:
|
70
|
+
for idx in range(5):
|
71
|
+
results = faces.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
72
|
+
self._annotate(image.copy(), results, idx)
|
73
|
+
location_data = results.detections[0].location_data
|
74
|
+
x = [keypoint.x * cols for keypoint in location_data.relative_keypoints]
|
75
|
+
y = [keypoint.y * rows for keypoint in location_data.relative_keypoints]
|
76
|
+
face_keypoints = np.column_stack((x, y))
|
77
|
+
if model_selection == 0:
|
78
|
+
prediction_error = np.abs(
|
79
|
+
np.asarray(face_keypoints) -
|
80
|
+
np.asarray(SHORT_RANGE_EXPECTED_FACE_KEY_POINTS))
|
81
|
+
else:
|
82
|
+
prediction_error = np.abs(
|
83
|
+
np.asarray(face_keypoints) -
|
84
|
+
np.asarray(FULL_RANGE_EXPECTED_FACE_KEY_POINTS))
|
85
|
+
|
86
|
+
self.assertLen(results.detections, 1)
|
87
|
+
self.assertLen(location_data.relative_keypoints, 6)
|
88
|
+
npt.assert_array_less(prediction_error, DIFF_THRESHOLD)
|
89
|
+
|
90
|
+
|
91
|
+
if __name__ == '__main__':
|
92
|
+
absltest.main()
|
@@ -0,0 +1,125 @@
|
|
1
|
+
# Copyright 2020 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""MediaPipe Face Mesh."""
|
16
|
+
|
17
|
+
from typing import NamedTuple
|
18
|
+
|
19
|
+
import numpy as np
|
20
|
+
|
21
|
+
# pylint: disable=unused-import
|
22
|
+
from mediapipe.calculators.core import constant_side_packet_calculator_pb2
|
23
|
+
from mediapipe.calculators.core import gate_calculator_pb2
|
24
|
+
from mediapipe.calculators.core import split_vector_calculator_pb2
|
25
|
+
from mediapipe.calculators.tensor import image_to_tensor_calculator_pb2
|
26
|
+
from mediapipe.calculators.tensor import inference_calculator_pb2
|
27
|
+
from mediapipe.calculators.tensor import tensors_to_classification_calculator_pb2
|
28
|
+
from mediapipe.calculators.tensor import tensors_to_detections_calculator_pb2
|
29
|
+
from mediapipe.calculators.tensor import tensors_to_landmarks_calculator_pb2
|
30
|
+
from mediapipe.calculators.tflite import ssd_anchors_calculator_pb2
|
31
|
+
from mediapipe.calculators.util import association_calculator_pb2
|
32
|
+
from mediapipe.calculators.util import detections_to_rects_calculator_pb2
|
33
|
+
from mediapipe.calculators.util import landmarks_refinement_calculator_pb2
|
34
|
+
from mediapipe.calculators.util import logic_calculator_pb2
|
35
|
+
from mediapipe.calculators.util import non_max_suppression_calculator_pb2
|
36
|
+
from mediapipe.calculators.util import rect_transformation_calculator_pb2
|
37
|
+
from mediapipe.calculators.util import thresholding_calculator_pb2
|
38
|
+
# pylint: enable=unused-import
|
39
|
+
from mediapipe.python.solution_base import SolutionBase
|
40
|
+
# pylint: disable=unused-import
|
41
|
+
from mediapipe.python.solutions.face_mesh_connections import FACEMESH_CONTOURS
|
42
|
+
from mediapipe.python.solutions.face_mesh_connections import FACEMESH_FACE_OVAL
|
43
|
+
from mediapipe.python.solutions.face_mesh_connections import FACEMESH_IRISES
|
44
|
+
from mediapipe.python.solutions.face_mesh_connections import FACEMESH_LEFT_EYE
|
45
|
+
from mediapipe.python.solutions.face_mesh_connections import FACEMESH_LEFT_EYEBROW
|
46
|
+
from mediapipe.python.solutions.face_mesh_connections import FACEMESH_LEFT_IRIS
|
47
|
+
from mediapipe.python.solutions.face_mesh_connections import FACEMESH_LIPS
|
48
|
+
from mediapipe.python.solutions.face_mesh_connections import FACEMESH_NOSE
|
49
|
+
from mediapipe.python.solutions.face_mesh_connections import FACEMESH_RIGHT_EYE
|
50
|
+
from mediapipe.python.solutions.face_mesh_connections import FACEMESH_RIGHT_EYEBROW
|
51
|
+
from mediapipe.python.solutions.face_mesh_connections import FACEMESH_RIGHT_IRIS
|
52
|
+
from mediapipe.python.solutions.face_mesh_connections import FACEMESH_TESSELATION
|
53
|
+
# pylint: enable=unused-import
|
54
|
+
|
55
|
+
FACEMESH_NUM_LANDMARKS = 468
|
56
|
+
FACEMESH_NUM_LANDMARKS_WITH_IRISES = 478
|
57
|
+
_BINARYPB_FILE_PATH = 'mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb'
|
58
|
+
|
59
|
+
|
60
|
+
class FaceMesh(SolutionBase):
|
61
|
+
"""MediaPipe Face Mesh.
|
62
|
+
|
63
|
+
MediaPipe Face Mesh processes an RGB image and returns the face landmarks on
|
64
|
+
each detected face.
|
65
|
+
|
66
|
+
Please refer to https://solutions.mediapipe.dev/face_mesh#python-solution-api
|
67
|
+
for usage examples.
|
68
|
+
"""
|
69
|
+
|
70
|
+
def __init__(self,
|
71
|
+
static_image_mode=False,
|
72
|
+
max_num_faces=1,
|
73
|
+
refine_landmarks=False,
|
74
|
+
min_detection_confidence=0.5,
|
75
|
+
min_tracking_confidence=0.5):
|
76
|
+
"""Initializes a MediaPipe Face Mesh object.
|
77
|
+
|
78
|
+
Args:
|
79
|
+
static_image_mode: Whether to treat the input images as a batch of static
|
80
|
+
and possibly unrelated images, or a video stream. See details in
|
81
|
+
https://solutions.mediapipe.dev/face_mesh#static_image_mode.
|
82
|
+
max_num_faces: Maximum number of faces to detect. See details in
|
83
|
+
https://solutions.mediapipe.dev/face_mesh#max_num_faces.
|
84
|
+
refine_landmarks: Whether to further refine the landmark coordinates
|
85
|
+
around the eyes and lips, and output additional landmarks around the
|
86
|
+
irises. Default to False. See details in
|
87
|
+
https://solutions.mediapipe.dev/face_mesh#refine_landmarks.
|
88
|
+
min_detection_confidence: Minimum confidence value ([0.0, 1.0]) for face
|
89
|
+
detection to be considered successful. See details in
|
90
|
+
https://solutions.mediapipe.dev/face_mesh#min_detection_confidence.
|
91
|
+
min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) for the
|
92
|
+
face landmarks to be considered tracked successfully. See details in
|
93
|
+
https://solutions.mediapipe.dev/face_mesh#min_tracking_confidence.
|
94
|
+
"""
|
95
|
+
super().__init__(
|
96
|
+
binary_graph_path=_BINARYPB_FILE_PATH,
|
97
|
+
side_inputs={
|
98
|
+
'num_faces': max_num_faces,
|
99
|
+
'with_attention': refine_landmarks,
|
100
|
+
'use_prev_landmarks': not static_image_mode,
|
101
|
+
},
|
102
|
+
calculator_params={
|
103
|
+
'facedetectionshortrangecpu__facedetectionshortrange__facedetection__TensorsToDetectionsCalculator.min_score_thresh':
|
104
|
+
min_detection_confidence,
|
105
|
+
'facelandmarkcpu__ThresholdingCalculator.threshold':
|
106
|
+
min_tracking_confidence,
|
107
|
+
},
|
108
|
+
outputs=['multi_face_landmarks'])
|
109
|
+
|
110
|
+
def process(self, image: np.ndarray) -> NamedTuple:
|
111
|
+
"""Processes an RGB image and returns the face landmarks on each detected face.
|
112
|
+
|
113
|
+
Args:
|
114
|
+
image: An RGB image represented as a numpy ndarray.
|
115
|
+
|
116
|
+
Raises:
|
117
|
+
RuntimeError: If the underlying graph throws any error.
|
118
|
+
ValueError: If the input image is not three channel RGB.
|
119
|
+
|
120
|
+
Returns:
|
121
|
+
A NamedTuple object with a "multi_face_landmarks" field that contains the
|
122
|
+
face landmarks on each detected face.
|
123
|
+
"""
|
124
|
+
|
125
|
+
return super().process(input_data={'image': image})
|