mediapipe-nightly 0.10.21.post20241223__cp310-cp310-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mediapipe/__init__.py +26 -0
- mediapipe/calculators/__init__.py +0 -0
- mediapipe/calculators/audio/__init__.py +0 -0
- mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py +33 -0
- mediapipe/calculators/audio/rational_factor_resample_calculator_pb2.py +33 -0
- mediapipe/calculators/audio/spectrogram_calculator_pb2.py +37 -0
- mediapipe/calculators/audio/stabilized_log_calculator_pb2.py +31 -0
- mediapipe/calculators/audio/time_series_framer_calculator_pb2.py +33 -0
- mediapipe/calculators/core/__init__.py +0 -0
- mediapipe/calculators/core/bypass_calculator_pb2.py +31 -0
- mediapipe/calculators/core/clip_vector_size_calculator_pb2.py +31 -0
- mediapipe/calculators/core/concatenate_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/constant_side_packet_calculator_pb2.py +39 -0
- mediapipe/calculators/core/dequantize_byte_array_calculator_pb2.py +31 -0
- mediapipe/calculators/core/flow_limiter_calculator_pb2.py +32 -0
- mediapipe/calculators/core/gate_calculator_pb2.py +33 -0
- mediapipe/calculators/core/get_vector_item_calculator_pb2.py +31 -0
- mediapipe/calculators/core/graph_profile_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_cloner_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_resampler_calculator_pb2.py +33 -0
- mediapipe/calculators/core/packet_thinner_calculator_pb2.py +33 -0
- mediapipe/calculators/core/quantize_float_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/sequence_shift_calculator_pb2.py +31 -0
- mediapipe/calculators/core/split_vector_calculator_pb2.py +33 -0
- mediapipe/calculators/image/__init__.py +0 -0
- mediapipe/calculators/image/bilateral_filter_calculator_pb2.py +31 -0
- mediapipe/calculators/image/feature_detector_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_clone_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_cropping_calculator_pb2.py +33 -0
- mediapipe/calculators/image/image_transformation_calculator_pb2.py +38 -0
- mediapipe/calculators/image/mask_overlay_calculator_pb2.py +33 -0
- mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator_pb2.py +31 -0
- mediapipe/calculators/image/opencv_image_encoder_calculator_pb2.py +35 -0
- mediapipe/calculators/image/recolor_calculator_pb2.py +34 -0
- mediapipe/calculators/image/rotation_mode_pb2.py +29 -0
- mediapipe/calculators/image/scale_image_calculator_pb2.py +34 -0
- mediapipe/calculators/image/segmentation_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/image/set_alpha_calculator_pb2.py +31 -0
- mediapipe/calculators/image/warp_affine_calculator_pb2.py +36 -0
- mediapipe/calculators/internal/__init__.py +0 -0
- mediapipe/calculators/internal/callback_packet_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/__init__.py +0 -0
- mediapipe/calculators/tensor/audio_to_tensor_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/bert_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/feedback_tensors_calculator_pb2.py +37 -0
- mediapipe/calculators/tensor/image_to_tensor_calculator_pb2.py +40 -0
- mediapipe/calculators/tensor/inference_calculator_pb2.py +63 -0
- mediapipe/calculators/tensor/landmarks_to_tensor_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/regex_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensor_converter_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/tensor_to_joints_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensors_readback_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/tensors_to_audio_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_classification_calculator_pb2.py +44 -0
- mediapipe/calculators/tensor/tensors_to_detections_calculator_pb2.py +39 -0
- mediapipe/calculators/tensor/tensors_to_floats_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/vector_to_tensor_calculator_pb2.py +27 -0
- mediapipe/calculators/tflite/__init__.py +0 -0
- mediapipe/calculators/tflite/ssd_anchors_calculator_pb2.py +32 -0
- mediapipe/calculators/tflite/tflite_converter_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_inference_calculator_pb2.py +49 -0
- mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/__init__.py +0 -0
- mediapipe/calculators/util/align_hand_to_pose_in_world_calculator_pb2.py +31 -0
- mediapipe/calculators/util/annotation_overlay_calculator_pb2.py +32 -0
- mediapipe/calculators/util/association_calculator_pb2.py +31 -0
- mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py +31 -0
- mediapipe/calculators/util/combine_joints_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detection_label_id_to_text_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detections_to_rects_calculator_pb2.py +33 -0
- mediapipe/calculators/util/detections_to_render_data_calculator_pb2.py +33 -0
- mediapipe/calculators/util/face_to_rect_calculator_pb2.py +26 -0
- mediapipe/calculators/util/filter_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/util/flat_color_image_calculator_pb2.py +32 -0
- mediapipe/calculators/util/labels_to_render_data_calculator_pb2.py +34 -0
- mediapipe/calculators/util/landmark_projection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_refinement_calculator_pb2.py +41 -0
- mediapipe/calculators/util/landmarks_smoothing_calculator_pb2.py +33 -0
- mediapipe/calculators/util/landmarks_to_detection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_floats_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/landmarks_transformation_calculator_pb2.py +37 -0
- mediapipe/calculators/util/latency_pb2.py +26 -0
- mediapipe/calculators/util/local_file_contents_calculator_pb2.py +31 -0
- mediapipe/calculators/util/logic_calculator_pb2.py +34 -0
- mediapipe/calculators/util/non_max_suppression_calculator_pb2.py +35 -0
- mediapipe/calculators/util/packet_frequency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/packet_frequency_pb2.py +26 -0
- mediapipe/calculators/util/packet_latency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/rect_to_render_scale_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_transformation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/refine_landmarks_from_heatmap_calculator_pb2.py +31 -0
- mediapipe/calculators/util/resource_provider_calculator_pb2.py +28 -0
- mediapipe/calculators/util/set_joints_visibility_calculator_pb2.py +41 -0
- mediapipe/calculators/util/thresholding_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_id_to_label_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/top_k_scores_calculator_pb2.py +31 -0
- mediapipe/calculators/util/visibility_copy_calculator_pb2.py +27 -0
- mediapipe/calculators/util/visibility_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/video/__init__.py +0 -0
- mediapipe/calculators/video/box_detector_calculator_pb2.py +32 -0
- mediapipe/calculators/video/box_tracker_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_packager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_to_image_calculator_pb2.py +31 -0
- mediapipe/calculators/video/motion_analysis_calculator_pb2.py +42 -0
- mediapipe/calculators/video/opencv_video_encoder_calculator_pb2.py +31 -0
- mediapipe/calculators/video/tool/__init__.py +0 -0
- mediapipe/calculators/video/tool/flow_quantizer_model_pb2.py +26 -0
- mediapipe/calculators/video/tracked_detection_manager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/video_pre_stream_calculator_pb2.py +35 -0
- mediapipe/examples/__init__.py +14 -0
- mediapipe/examples/desktop/__init__.py +14 -0
- mediapipe/framework/__init__.py +0 -0
- mediapipe/framework/calculator_options_pb2.py +29 -0
- mediapipe/framework/calculator_pb2.py +59 -0
- mediapipe/framework/calculator_profile_pb2.py +48 -0
- mediapipe/framework/deps/__init__.py +0 -0
- mediapipe/framework/deps/proto_descriptor_pb2.py +29 -0
- mediapipe/framework/formats/__init__.py +0 -0
- mediapipe/framework/formats/affine_transform_data_pb2.py +28 -0
- mediapipe/framework/formats/annotation/__init__.py +0 -0
- mediapipe/framework/formats/annotation/locus_pb2.py +32 -0
- mediapipe/framework/formats/annotation/rasterization_pb2.py +29 -0
- mediapipe/framework/formats/body_rig_pb2.py +28 -0
- mediapipe/framework/formats/classification_pb2.py +31 -0
- mediapipe/framework/formats/detection_pb2.py +36 -0
- mediapipe/framework/formats/image_file_properties_pb2.py +26 -0
- mediapipe/framework/formats/image_format_pb2.py +29 -0
- mediapipe/framework/formats/landmark_pb2.py +37 -0
- mediapipe/framework/formats/location_data_pb2.py +38 -0
- mediapipe/framework/formats/matrix_data_pb2.py +31 -0
- mediapipe/framework/formats/motion/__init__.py +0 -0
- mediapipe/framework/formats/motion/optical_flow_field_data_pb2.py +30 -0
- mediapipe/framework/formats/object_detection/__init__.py +0 -0
- mediapipe/framework/formats/object_detection/anchor_pb2.py +26 -0
- mediapipe/framework/formats/rect_pb2.py +29 -0
- mediapipe/framework/formats/time_series_header_pb2.py +28 -0
- mediapipe/framework/graph_runtime_info_pb2.py +31 -0
- mediapipe/framework/mediapipe_options_pb2.py +27 -0
- mediapipe/framework/packet_factory_pb2.py +31 -0
- mediapipe/framework/packet_generator_pb2.py +33 -0
- mediapipe/framework/status_handler_pb2.py +28 -0
- mediapipe/framework/stream_handler/__init__.py +0 -0
- mediapipe/framework/stream_handler/default_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/fixed_size_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/sync_set_input_stream_handler_pb2.py +29 -0
- mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler_pb2.py +30 -0
- mediapipe/framework/test_calculators_pb2.py +31 -0
- mediapipe/framework/thread_pool_executor_pb2.py +29 -0
- mediapipe/framework/tool/__init__.py +0 -0
- mediapipe/framework/tool/calculator_graph_template_pb2.py +44 -0
- mediapipe/framework/tool/field_data_pb2.py +28 -0
- mediapipe/framework/tool/node_chain_subgraph_pb2.py +31 -0
- mediapipe/framework/tool/packet_generator_wrapper_calculator_pb2.py +28 -0
- mediapipe/framework/tool/source_pb2.py +33 -0
- mediapipe/framework/tool/switch_container_pb2.py +32 -0
- mediapipe/gpu/__init__.py +0 -0
- mediapipe/gpu/copy_calculator_pb2.py +33 -0
- mediapipe/gpu/gl_animation_overlay_calculator_pb2.py +31 -0
- mediapipe/gpu/gl_context_options_pb2.py +31 -0
- mediapipe/gpu/gl_scaler_calculator_pb2.py +32 -0
- mediapipe/gpu/gl_surface_sink_calculator_pb2.py +32 -0
- mediapipe/gpu/gpu_origin_pb2.py +29 -0
- mediapipe/gpu/scale_mode_pb2.py +28 -0
- mediapipe/model_maker/__init__.py +27 -0
- mediapipe/model_maker/setup.py +107 -0
- mediapipe/modules/__init__.py +0 -0
- mediapipe/modules/face_detection/__init__.py +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_cpu.binarypb +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_sparse.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_pb2.py +30 -0
- mediapipe/modules/face_detection/face_detection_short_range.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_short_range_cpu.binarypb +0 -0
- mediapipe/modules/face_geometry/__init__.py +0 -0
- mediapipe/modules/face_geometry/data/__init__.py +0 -0
- mediapipe/modules/face_geometry/effect_renderer_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/env_generator_calculator_pb2.py +28 -0
- mediapipe/modules/face_geometry/geometry_pipeline_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/libs/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/environment_pb2.py +31 -0
- mediapipe/modules/face_geometry/protos/face_geometry_pb2.py +29 -0
- mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/modules/face_geometry/protos/mesh_3d_pb2.py +31 -0
- mediapipe/modules/face_landmark/__init__.py +0 -0
- mediapipe/modules/face_landmark/face_landmark.tflite +0 -0
- mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb +0 -0
- mediapipe/modules/face_landmark/face_landmark_with_attention.tflite +0 -0
- mediapipe/modules/hand_landmark/__init__.py +0 -0
- mediapipe/modules/hand_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_full.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_lite.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb +0 -0
- mediapipe/modules/hand_landmark/handedness.txt +2 -0
- mediapipe/modules/holistic_landmark/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator_pb2.py +37 -0
- mediapipe/modules/holistic_landmark/hand_recrop.tflite +0 -0
- mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb +0 -0
- mediapipe/modules/iris_landmark/__init__.py +0 -0
- mediapipe/modules/iris_landmark/iris_landmark.tflite +0 -0
- mediapipe/modules/objectron/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/a_r_capture_metadata_pb2.py +102 -0
- mediapipe/modules/objectron/calculators/annotation_data_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/belief_decoder_config_pb2.py +28 -0
- mediapipe/modules/objectron/calculators/camera_parameters_pb2.py +30 -0
- mediapipe/modules/objectron/calculators/filter_detection_calculator_pb2.py +35 -0
- mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/object_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/object_detection_oidv4_labelmap.txt +24 -0
- mediapipe/modules/objectron/objectron_cpu.binarypb +0 -0
- mediapipe/modules/palm_detection/__init__.py +0 -0
- mediapipe/modules/palm_detection/palm_detection_full.tflite +0 -0
- mediapipe/modules/palm_detection/palm_detection_lite.tflite +0 -0
- mediapipe/modules/pose_detection/__init__.py +0 -0
- mediapipe/modules/pose_detection/pose_detection.tflite +0 -0
- mediapipe/modules/pose_landmark/__init__.py +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_full.tflite +0 -0
- mediapipe/modules/selfie_segmentation/__init__.py +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation.tflite +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_cpu.binarypb +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_landscape.tflite +0 -0
- mediapipe/python/__init__.py +29 -0
- mediapipe/python/_framework_bindings.cpython-310-x86_64-linux-gnu.so +0 -0
- mediapipe/python/calculator_graph_test.py +251 -0
- mediapipe/python/image_frame_test.py +194 -0
- mediapipe/python/image_test.py +218 -0
- mediapipe/python/packet_creator.py +275 -0
- mediapipe/python/packet_getter.py +120 -0
- mediapipe/python/packet_test.py +533 -0
- mediapipe/python/solution_base.py +604 -0
- mediapipe/python/solution_base_test.py +396 -0
- mediapipe/python/solutions/__init__.py +27 -0
- mediapipe/python/solutions/download_utils.py +37 -0
- mediapipe/python/solutions/drawing_styles.py +249 -0
- mediapipe/python/solutions/drawing_utils.py +320 -0
- mediapipe/python/solutions/drawing_utils_test.py +258 -0
- mediapipe/python/solutions/face_detection.py +105 -0
- mediapipe/python/solutions/face_detection_test.py +92 -0
- mediapipe/python/solutions/face_mesh.py +125 -0
- mediapipe/python/solutions/face_mesh_connections.py +500 -0
- mediapipe/python/solutions/face_mesh_test.py +170 -0
- mediapipe/python/solutions/hands.py +153 -0
- mediapipe/python/solutions/hands_connections.py +32 -0
- mediapipe/python/solutions/hands_test.py +219 -0
- mediapipe/python/solutions/holistic.py +167 -0
- mediapipe/python/solutions/holistic_test.py +142 -0
- mediapipe/python/solutions/objectron.py +288 -0
- mediapipe/python/solutions/objectron_test.py +81 -0
- mediapipe/python/solutions/pose.py +192 -0
- mediapipe/python/solutions/pose_connections.py +22 -0
- mediapipe/python/solutions/pose_test.py +262 -0
- mediapipe/python/solutions/selfie_segmentation.py +76 -0
- mediapipe/python/solutions/selfie_segmentation_test.py +68 -0
- mediapipe/python/timestamp_test.py +78 -0
- mediapipe/tasks/__init__.py +14 -0
- mediapipe/tasks/cc/__init__.py +0 -0
- mediapipe/tasks/cc/audio/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/audio_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/audio_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/audio_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/core/__init__.py +0 -0
- mediapipe/tasks/cc/audio/utils/__init__.py +0 -0
- mediapipe/tasks/cc/components/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/classification_aggregation_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/components/calculators/score_calibration_calculator_pb2.py +35 -0
- mediapipe/tasks/cc/components/calculators/tensors_to_embeddings_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/components/containers/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/classifications_pb2.py +30 -0
- mediapipe/tasks/cc/components/containers/proto/embeddings_pb2.py +35 -0
- mediapipe/tasks/cc/components/containers/proto/landmarks_detection_result_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/classification_postprocessing_graph_options_pb2.py +38 -0
- mediapipe/tasks/cc/components/processors/proto/classifier_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/detection_postprocessing_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/components/processors/proto/detector_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedder_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedding_postprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/proto/image_preprocessing_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/components/processors/proto/text_model_type_pb2.py +28 -0
- mediapipe/tasks/cc/components/processors/proto/text_preprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/utils/__init__.py +0 -0
- mediapipe/tasks/cc/core/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/acceleration_pb2.py +28 -0
- mediapipe/tasks/cc/core/proto/base_options_pb2.py +30 -0
- mediapipe/tasks/cc/core/proto/external_file_pb2.py +31 -0
- mediapipe/tasks/cc/core/proto/inference_subgraph_pb2.py +32 -0
- mediapipe/tasks/cc/core/proto/model_resources_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/c/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/calculators/model_data_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/common/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_params_pb2.py +33 -0
- mediapipe/tasks/cc/genai/inference/proto/prompt_template_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/proto/sampler_params_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/proto/transformer_params_pb2.py +45 -0
- mediapipe/tasks/cc/genai/inference/utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/llm_utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/xnn_utils/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/_pywrap_metadata_version.cpython-310-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/cc/metadata/tests/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/ragged/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/testdata/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/hash/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/text_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/text_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/text_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/tokenizers/__init__.py +0 -0
- mediapipe/tasks/cc/text/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/__init__.py +0 -0
- mediapipe/tasks/cc/vision/core/__init__.py +0 -0
- mediapipe/tasks/cc/vision/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_geometry/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/env_generator_calculator_pb2.py +28 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/geometry_pipeline_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/data/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/libs/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/environment_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_graph_options_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/mesh_3d_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options_pb2.py +37 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/tensors_to_face_landmarks_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_stylizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/tensors_to_image_calculator_pb2.py +36 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/face_stylizer_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/combined_prediction_calculator_pb2.py +33 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_embedder_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/hand_gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_result_pb2.py +30 -0
- mediapipe/tasks/cc/vision/hand_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_roi_refinement_graph_options_pb2.py +28 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result_pb2.py +29 -0
- mediapipe/tasks/cc/vision/image_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/image_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/image_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_generator/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/stable_diffusion_iterate_calculator_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/proto/conditioned_image_graph_options_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/control_plugin_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_generator/proto/image_generator_graph_options_pb2.py +30 -0
- mediapipe/tasks/cc/vision/image_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/image_segmenter_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/segmenter_options_pb2.py +33 -0
- mediapipe/tasks/cc/vision/interactive_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/object_detector_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/pose_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/utils/ghum/__init__.py +0 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema.fbs +59 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema_py_generated.py +108 -0
- mediapipe/tasks/metadata/metadata_schema.fbs +732 -0
- mediapipe/tasks/metadata/metadata_schema_py_generated.py +3251 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema.fbs +98 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema_py_generated.py +674 -0
- mediapipe/tasks/metadata/schema_py_generated.py +18438 -0
- mediapipe/tasks/python/__init__.py +27 -0
- mediapipe/tasks/python/audio/__init__.py +33 -0
- mediapipe/tasks/python/audio/audio_classifier.py +324 -0
- mediapipe/tasks/python/audio/audio_embedder.py +285 -0
- mediapipe/tasks/python/audio/core/__init__.py +16 -0
- mediapipe/tasks/python/audio/core/audio_record.py +125 -0
- mediapipe/tasks/python/audio/core/audio_task_running_mode.py +29 -0
- mediapipe/tasks/python/audio/core/base_audio_task_api.py +181 -0
- mediapipe/tasks/python/benchmark/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/benchmark_utils.py +70 -0
- mediapipe/tasks/python/benchmark/vision/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/vision/benchmark.py +99 -0
- mediapipe/tasks/python/benchmark/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py +40 -0
- mediapipe/tasks/python/components/__init__.py +13 -0
- mediapipe/tasks/python/components/containers/__init__.py +53 -0
- mediapipe/tasks/python/components/containers/audio_data.py +137 -0
- mediapipe/tasks/python/components/containers/bounding_box.py +73 -0
- mediapipe/tasks/python/components/containers/category.py +78 -0
- mediapipe/tasks/python/components/containers/classification_result.py +111 -0
- mediapipe/tasks/python/components/containers/detections.py +181 -0
- mediapipe/tasks/python/components/containers/embedding_result.py +89 -0
- mediapipe/tasks/python/components/containers/keypoint.py +77 -0
- mediapipe/tasks/python/components/containers/landmark.py +122 -0
- mediapipe/tasks/python/components/containers/landmark_detection_result.py +106 -0
- mediapipe/tasks/python/components/containers/rect.py +109 -0
- mediapipe/tasks/python/components/processors/__init__.py +23 -0
- mediapipe/tasks/python/components/processors/classifier_options.py +86 -0
- mediapipe/tasks/python/components/utils/__init__.py +13 -0
- mediapipe/tasks/python/components/utils/cosine_similarity.py +68 -0
- mediapipe/tasks/python/core/__init__.py +13 -0
- mediapipe/tasks/python/core/base_options.py +121 -0
- mediapipe/tasks/python/core/optional_dependencies.py +25 -0
- mediapipe/tasks/python/core/task_info.py +139 -0
- mediapipe/tasks/python/genai/__init__.py +14 -0
- mediapipe/tasks/python/genai/bundler/__init__.py +23 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler.py +130 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler_test.py +168 -0
- mediapipe/tasks/python/genai/converter/__init__.py +24 -0
- mediapipe/tasks/python/genai/converter/converter_base.py +179 -0
- mediapipe/tasks/python/genai/converter/converter_factory.py +79 -0
- mediapipe/tasks/python/genai/converter/llm_converter.py +374 -0
- mediapipe/tasks/python/genai/converter/llm_converter_test.py +63 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter.py +318 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter_test.py +86 -0
- mediapipe/tasks/python/genai/converter/quantization_util.py +516 -0
- mediapipe/tasks/python/genai/converter/quantization_util_test.py +259 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter.py +580 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter_test.py +83 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer.py +120 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer_test.py +95 -0
- mediapipe/tasks/python/metadata/__init__.py +13 -0
- mediapipe/tasks/python/metadata/flatbuffers_lib/_pywrap_flatbuffers.cpython-310-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/python/metadata/metadata.py +928 -0
- mediapipe/tasks/python/metadata/metadata_displayer_cli.py +34 -0
- mediapipe/tasks/python/metadata/metadata_writers/__init__.py +13 -0
- mediapipe/tasks/python/metadata/metadata_writers/face_stylizer.py +138 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_classifier.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py +170 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py +1166 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_writer.py +845 -0
- mediapipe/tasks/python/metadata/metadata_writers/model_asset_bundle_utils.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/object_detector.py +331 -0
- mediapipe/tasks/python/metadata/metadata_writers/text_classifier.py +119 -0
- mediapipe/tasks/python/metadata/metadata_writers/writer_utils.py +91 -0
- mediapipe/tasks/python/test/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/audio_classifier_test.py +387 -0
- mediapipe/tasks/python/test/audio/audio_embedder_test.py +297 -0
- mediapipe/tasks/python/test/test_utils.py +196 -0
- mediapipe/tasks/python/test/text/__init__.py +13 -0
- mediapipe/tasks/python/test/text/language_detector_test.py +228 -0
- mediapipe/tasks/python/test/text/text_classifier_test.py +235 -0
- mediapipe/tasks/python/test/text/text_embedder_test.py +326 -0
- mediapipe/tasks/python/test/vision/__init__.py +13 -0
- mediapipe/tasks/python/test/vision/face_aligner_test.py +190 -0
- mediapipe/tasks/python/test/vision/face_detector_test.py +523 -0
- mediapipe/tasks/python/test/vision/face_landmarker_test.py +565 -0
- mediapipe/tasks/python/test/vision/face_stylizer_test.py +191 -0
- mediapipe/tasks/python/test/vision/hand_landmarker_test.py +437 -0
- mediapipe/tasks/python/test/vision/holistic_landmarker_test.py +544 -0
- mediapipe/tasks/python/test/vision/image_classifier_test.py +657 -0
- mediapipe/tasks/python/test/vision/image_embedder_test.py +423 -0
- mediapipe/tasks/python/test/vision/image_segmenter_test.py +512 -0
- mediapipe/tasks/python/test/vision/interactive_segmenter_test.py +341 -0
- mediapipe/tasks/python/test/vision/object_detector_test.py +493 -0
- mediapipe/tasks/python/test/vision/pose_landmarker_test.py +518 -0
- mediapipe/tasks/python/text/__init__.py +35 -0
- mediapipe/tasks/python/text/core/__init__.py +16 -0
- mediapipe/tasks/python/text/core/base_text_task_api.py +54 -0
- mediapipe/tasks/python/text/language_detector.py +220 -0
- mediapipe/tasks/python/text/text_classifier.py +187 -0
- mediapipe/tasks/python/text/text_embedder.py +188 -0
- mediapipe/tasks/python/vision/__init__.py +90 -0
- mediapipe/tasks/python/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/vision/core/base_vision_task_api.py +226 -0
- mediapipe/tasks/python/vision/core/image_processing_options.py +39 -0
- mediapipe/tasks/python/vision/core/vision_task_running_mode.py +31 -0
- mediapipe/tasks/python/vision/face_aligner.py +158 -0
- mediapipe/tasks/python/vision/face_detector.py +332 -0
- mediapipe/tasks/python/vision/face_landmarker.py +3244 -0
- mediapipe/tasks/python/vision/face_stylizer.py +158 -0
- mediapipe/tasks/python/vision/gesture_recognizer.py +480 -0
- mediapipe/tasks/python/vision/hand_landmarker.py +504 -0
- mediapipe/tasks/python/vision/holistic_landmarker.py +576 -0
- mediapipe/tasks/python/vision/image_classifier.py +358 -0
- mediapipe/tasks/python/vision/image_embedder.py +362 -0
- mediapipe/tasks/python/vision/image_segmenter.py +433 -0
- mediapipe/tasks/python/vision/interactive_segmenter.py +285 -0
- mediapipe/tasks/python/vision/object_detector.py +389 -0
- mediapipe/tasks/python/vision/pose_landmarker.py +455 -0
- mediapipe/util/__init__.py +0 -0
- mediapipe/util/analytics/__init__.py +0 -0
- mediapipe/util/analytics/mediapipe_log_extension_pb2.py +44 -0
- mediapipe/util/analytics/mediapipe_logging_enums_pb2.py +37 -0
- mediapipe/util/audio_decoder_pb2.py +33 -0
- mediapipe/util/color_pb2.py +33 -0
- mediapipe/util/label_map_pb2.py +27 -0
- mediapipe/util/render_data_pb2.py +58 -0
- mediapipe/util/sequence/__init__.py +14 -0
- mediapipe/util/sequence/media_sequence.py +716 -0
- mediapipe/util/sequence/media_sequence_test.py +290 -0
- mediapipe/util/sequence/media_sequence_util.py +800 -0
- mediapipe/util/sequence/media_sequence_util_test.py +389 -0
- mediapipe/util/tracking/__init__.py +0 -0
- mediapipe/util/tracking/box_detector_pb2.py +39 -0
- mediapipe/util/tracking/box_tracker_pb2.py +32 -0
- mediapipe/util/tracking/camera_motion_pb2.py +31 -0
- mediapipe/util/tracking/flow_packager_pb2.py +60 -0
- mediapipe/util/tracking/frame_selection_pb2.py +35 -0
- mediapipe/util/tracking/frame_selection_solution_evaluator_pb2.py +28 -0
- mediapipe/util/tracking/motion_analysis_pb2.py +35 -0
- mediapipe/util/tracking/motion_estimation_pb2.py +66 -0
- mediapipe/util/tracking/motion_models_pb2.py +42 -0
- mediapipe/util/tracking/motion_saliency_pb2.py +26 -0
- mediapipe/util/tracking/push_pull_filtering_pb2.py +26 -0
- mediapipe/util/tracking/region_flow_computation_pb2.py +59 -0
- mediapipe/util/tracking/region_flow_pb2.py +49 -0
- mediapipe/util/tracking/tone_estimation_pb2.py +45 -0
- mediapipe/util/tracking/tone_models_pb2.py +32 -0
- mediapipe/util/tracking/tracked_detection_manager_config_pb2.py +26 -0
- mediapipe/util/tracking/tracking_pb2.py +73 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/LICENSE +218 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/METADATA +199 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/RECORD +593 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/WHEEL +5 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/top_level.txt +4 -0
- mediapipe_nightly.libs/libEGL-48f73270.so.1.1.0 +0 -0
- mediapipe_nightly.libs/libGLESv2-ed5eda4f.so.2.1.0 +0 -0
- mediapipe_nightly.libs/libGLdispatch-64b28464.so.0.0.0 +0 -0
@@ -0,0 +1,170 @@
|
|
1
|
+
# Copyright 2020 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""Tests for mediapipe.python.solutions.face_mesh."""
|
16
|
+
|
17
|
+
import os
|
18
|
+
import tempfile # pylint: disable=unused-import
|
19
|
+
from typing import NamedTuple
|
20
|
+
|
21
|
+
from absl.testing import absltest
|
22
|
+
from absl.testing import parameterized
|
23
|
+
import cv2
|
24
|
+
import numpy as np
|
25
|
+
import numpy.testing as npt
|
26
|
+
|
27
|
+
# resources dependency
|
28
|
+
# undeclared dependency
|
29
|
+
from mediapipe.python.solutions import drawing_styles
|
30
|
+
from mediapipe.python.solutions import drawing_utils as mp_drawing
|
31
|
+
from mediapipe.python.solutions import face_mesh as mp_faces
|
32
|
+
|
33
|
+
TEST_IMAGE_PATH = 'mediapipe/python/solutions/testdata'
|
34
|
+
DIFF_THRESHOLD = 5 # pixels
|
35
|
+
EYE_INDICES_TO_LANDMARKS = {
|
36
|
+
33: [345, 178],
|
37
|
+
7: [348, 179],
|
38
|
+
163: [352, 178],
|
39
|
+
144: [357, 179],
|
40
|
+
145: [365, 179],
|
41
|
+
153: [371, 179],
|
42
|
+
154: [378, 178],
|
43
|
+
155: [381, 177],
|
44
|
+
133: [383, 177],
|
45
|
+
246: [347, 175],
|
46
|
+
161: [350, 174],
|
47
|
+
160: [355, 172],
|
48
|
+
159: [362, 170],
|
49
|
+
158: [368, 171],
|
50
|
+
157: [375, 172],
|
51
|
+
173: [380, 175],
|
52
|
+
263: [467, 176],
|
53
|
+
249: [464, 177],
|
54
|
+
390: [460, 177],
|
55
|
+
373: [455, 178],
|
56
|
+
374: [448, 179],
|
57
|
+
380: [441, 179],
|
58
|
+
381: [435, 178],
|
59
|
+
382: [432, 177],
|
60
|
+
362: [430, 177],
|
61
|
+
466: [465, 175],
|
62
|
+
388: [462, 173],
|
63
|
+
387: [457, 171],
|
64
|
+
386: [450, 170],
|
65
|
+
385: [444, 171],
|
66
|
+
384: [437, 172],
|
67
|
+
398: [432, 175]
|
68
|
+
}
|
69
|
+
|
70
|
+
IRIS_INDICES_TO_LANDMARKS = {
|
71
|
+
468: [362, 175],
|
72
|
+
469: [371, 175],
|
73
|
+
470: [362, 167],
|
74
|
+
471: [354, 175],
|
75
|
+
472: [363, 182],
|
76
|
+
473: [449, 174],
|
77
|
+
474: [458, 174],
|
78
|
+
475: [449, 167],
|
79
|
+
476: [440, 174],
|
80
|
+
477: [449, 181]
|
81
|
+
}
|
82
|
+
|
83
|
+
|
84
|
+
class FaceMeshTest(parameterized.TestCase):
|
85
|
+
|
86
|
+
def _annotate(self, frame: np.ndarray, results: NamedTuple, idx: int,
|
87
|
+
draw_iris: bool):
|
88
|
+
for face_landmarks in results.multi_face_landmarks:
|
89
|
+
mp_drawing.draw_landmarks(
|
90
|
+
frame,
|
91
|
+
face_landmarks,
|
92
|
+
mp_faces.FACEMESH_TESSELATION,
|
93
|
+
landmark_drawing_spec=None,
|
94
|
+
connection_drawing_spec=drawing_styles
|
95
|
+
.get_default_face_mesh_tesselation_style())
|
96
|
+
mp_drawing.draw_landmarks(
|
97
|
+
frame,
|
98
|
+
face_landmarks,
|
99
|
+
mp_faces.FACEMESH_CONTOURS,
|
100
|
+
landmark_drawing_spec=None,
|
101
|
+
connection_drawing_spec=drawing_styles
|
102
|
+
.get_default_face_mesh_contours_style())
|
103
|
+
if draw_iris:
|
104
|
+
mp_drawing.draw_landmarks(
|
105
|
+
frame,
|
106
|
+
face_landmarks,
|
107
|
+
mp_faces.FACEMESH_IRISES,
|
108
|
+
landmark_drawing_spec=None,
|
109
|
+
connection_drawing_spec=drawing_styles
|
110
|
+
.get_default_face_mesh_iris_connections_style())
|
111
|
+
path = os.path.join(tempfile.gettempdir(), self.id().split('.')[-1] +
|
112
|
+
'_frame_{}.png'.format(idx))
|
113
|
+
cv2.imwrite(path, frame)
|
114
|
+
|
115
|
+
def test_invalid_image_shape(self):
|
116
|
+
with mp_faces.FaceMesh() as faces:
|
117
|
+
with self.assertRaisesRegex(
|
118
|
+
ValueError, 'Input image must contain three channel rgb data.'):
|
119
|
+
faces.process(np.arange(36, dtype=np.uint8).reshape(3, 3, 4))
|
120
|
+
|
121
|
+
def test_blank_image(self):
|
122
|
+
with mp_faces.FaceMesh() as faces:
|
123
|
+
image = np.zeros([100, 100, 3], dtype=np.uint8)
|
124
|
+
image.fill(255)
|
125
|
+
results = faces.process(image)
|
126
|
+
self.assertIsNone(results.multi_face_landmarks)
|
127
|
+
|
128
|
+
@parameterized.named_parameters(
|
129
|
+
('static_image_mode_no_attention', True, False, 5),
|
130
|
+
('static_image_mode_with_attention', True, True, 5),
|
131
|
+
('streaming_mode_no_attention', False, False, 10),
|
132
|
+
('streaming_mode_with_attention', False, True, 10))
|
133
|
+
def test_face(self, static_image_mode: bool, refine_landmarks: bool,
|
134
|
+
num_frames: int):
|
135
|
+
image_path = os.path.join(os.path.dirname(__file__),
|
136
|
+
'testdata/portrait.jpg')
|
137
|
+
image = cv2.imread(image_path)
|
138
|
+
rows, cols, _ = image.shape
|
139
|
+
with mp_faces.FaceMesh(
|
140
|
+
static_image_mode=static_image_mode,
|
141
|
+
refine_landmarks=refine_landmarks,
|
142
|
+
min_detection_confidence=0.5) as faces:
|
143
|
+
for idx in range(num_frames):
|
144
|
+
results = faces.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
145
|
+
self._annotate(image.copy(), results, idx, refine_landmarks)
|
146
|
+
multi_face_landmarks = []
|
147
|
+
for landmarks in results.multi_face_landmarks:
|
148
|
+
self.assertLen(
|
149
|
+
landmarks.landmark, mp_faces.FACEMESH_NUM_LANDMARKS_WITH_IRISES
|
150
|
+
if refine_landmarks else mp_faces.FACEMESH_NUM_LANDMARKS)
|
151
|
+
x = [landmark.x * cols for landmark in landmarks.landmark]
|
152
|
+
y = [landmark.y * rows for landmark in landmarks.landmark]
|
153
|
+
face_landmarks = np.column_stack((x, y))
|
154
|
+
multi_face_landmarks.append(face_landmarks)
|
155
|
+
self.assertLen(multi_face_landmarks, 1)
|
156
|
+
# Verify the eye landmarks are correct as sanity check.
|
157
|
+
for eye_idx, gt_lds in EYE_INDICES_TO_LANDMARKS.items():
|
158
|
+
prediction_error = np.abs(
|
159
|
+
np.asarray(multi_face_landmarks[0][eye_idx]) - np.asarray(gt_lds))
|
160
|
+
npt.assert_array_less(prediction_error, DIFF_THRESHOLD)
|
161
|
+
if refine_landmarks:
|
162
|
+
for iris_idx, gt_lds in IRIS_INDICES_TO_LANDMARKS.items():
|
163
|
+
prediction_error = np.abs(
|
164
|
+
np.asarray(multi_face_landmarks[0][iris_idx]) -
|
165
|
+
np.asarray(gt_lds))
|
166
|
+
npt.assert_array_less(prediction_error, DIFF_THRESHOLD)
|
167
|
+
|
168
|
+
|
169
|
+
if __name__ == '__main__':
|
170
|
+
absltest.main()
|
@@ -0,0 +1,153 @@
|
|
1
|
+
# Copyright 2020 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""MediaPipe Hands."""
|
16
|
+
|
17
|
+
import enum
|
18
|
+
from typing import NamedTuple
|
19
|
+
|
20
|
+
import numpy as np
|
21
|
+
|
22
|
+
# pylint: disable=unused-import
|
23
|
+
from mediapipe.calculators.core import constant_side_packet_calculator_pb2
|
24
|
+
from mediapipe.calculators.core import gate_calculator_pb2
|
25
|
+
from mediapipe.calculators.core import split_vector_calculator_pb2
|
26
|
+
from mediapipe.calculators.tensor import image_to_tensor_calculator_pb2
|
27
|
+
from mediapipe.calculators.tensor import inference_calculator_pb2
|
28
|
+
from mediapipe.calculators.tensor import tensors_to_classification_calculator_pb2
|
29
|
+
from mediapipe.calculators.tensor import tensors_to_detections_calculator_pb2
|
30
|
+
from mediapipe.calculators.tensor import tensors_to_landmarks_calculator_pb2
|
31
|
+
from mediapipe.calculators.tflite import ssd_anchors_calculator_pb2
|
32
|
+
from mediapipe.calculators.util import association_calculator_pb2
|
33
|
+
from mediapipe.calculators.util import detections_to_rects_calculator_pb2
|
34
|
+
from mediapipe.calculators.util import logic_calculator_pb2
|
35
|
+
from mediapipe.calculators.util import non_max_suppression_calculator_pb2
|
36
|
+
from mediapipe.calculators.util import rect_transformation_calculator_pb2
|
37
|
+
from mediapipe.calculators.util import thresholding_calculator_pb2
|
38
|
+
# pylint: enable=unused-import
|
39
|
+
from mediapipe.python.solution_base import SolutionBase
|
40
|
+
# pylint: disable=unused-import
|
41
|
+
from mediapipe.python.solutions.hands_connections import HAND_CONNECTIONS
|
42
|
+
# pylint: enable=unused-import
|
43
|
+
|
44
|
+
|
45
|
+
class HandLandmark(enum.IntEnum):
|
46
|
+
"""The 21 hand landmarks."""
|
47
|
+
WRIST = 0
|
48
|
+
THUMB_CMC = 1
|
49
|
+
THUMB_MCP = 2
|
50
|
+
THUMB_IP = 3
|
51
|
+
THUMB_TIP = 4
|
52
|
+
INDEX_FINGER_MCP = 5
|
53
|
+
INDEX_FINGER_PIP = 6
|
54
|
+
INDEX_FINGER_DIP = 7
|
55
|
+
INDEX_FINGER_TIP = 8
|
56
|
+
MIDDLE_FINGER_MCP = 9
|
57
|
+
MIDDLE_FINGER_PIP = 10
|
58
|
+
MIDDLE_FINGER_DIP = 11
|
59
|
+
MIDDLE_FINGER_TIP = 12
|
60
|
+
RING_FINGER_MCP = 13
|
61
|
+
RING_FINGER_PIP = 14
|
62
|
+
RING_FINGER_DIP = 15
|
63
|
+
RING_FINGER_TIP = 16
|
64
|
+
PINKY_MCP = 17
|
65
|
+
PINKY_PIP = 18
|
66
|
+
PINKY_DIP = 19
|
67
|
+
PINKY_TIP = 20
|
68
|
+
|
69
|
+
|
70
|
+
_BINARYPB_FILE_PATH = 'mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb'
|
71
|
+
|
72
|
+
|
73
|
+
class Hands(SolutionBase):
|
74
|
+
"""MediaPipe Hands.
|
75
|
+
|
76
|
+
MediaPipe Hands processes an RGB image and returns the hand landmarks and
|
77
|
+
handedness (left v.s. right hand) of each detected hand.
|
78
|
+
|
79
|
+
Note that it determines handedness assuming the input image is mirrored,
|
80
|
+
i.e., taken with a front-facing/selfie camera (
|
81
|
+
https://en.wikipedia.org/wiki/Front-facing_camera) with images flipped
|
82
|
+
horizontally. If that is not the case, use, for instance, cv2.flip(image, 1)
|
83
|
+
to flip the image first for a correct handedness output.
|
84
|
+
|
85
|
+
Please refer to https://solutions.mediapipe.dev/hands#python-solution-api for
|
86
|
+
usage examples.
|
87
|
+
"""
|
88
|
+
|
89
|
+
def __init__(self,
|
90
|
+
static_image_mode=False,
|
91
|
+
max_num_hands=2,
|
92
|
+
model_complexity=1,
|
93
|
+
min_detection_confidence=0.5,
|
94
|
+
min_tracking_confidence=0.5):
|
95
|
+
"""Initializes a MediaPipe Hand object.
|
96
|
+
|
97
|
+
Args:
|
98
|
+
static_image_mode: Whether to treat the input images as a batch of static
|
99
|
+
and possibly unrelated images, or a video stream. See details in
|
100
|
+
https://solutions.mediapipe.dev/hands#static_image_mode.
|
101
|
+
max_num_hands: Maximum number of hands to detect. See details in
|
102
|
+
https://solutions.mediapipe.dev/hands#max_num_hands.
|
103
|
+
model_complexity: Complexity of the hand landmark model: 0 or 1.
|
104
|
+
Landmark accuracy as well as inference latency generally go up with the
|
105
|
+
model complexity. See details in
|
106
|
+
https://solutions.mediapipe.dev/hands#model_complexity.
|
107
|
+
min_detection_confidence: Minimum confidence value ([0.0, 1.0]) for hand
|
108
|
+
detection to be considered successful. See details in
|
109
|
+
https://solutions.mediapipe.dev/hands#min_detection_confidence.
|
110
|
+
min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) for the
|
111
|
+
hand landmarks to be considered tracked successfully. See details in
|
112
|
+
https://solutions.mediapipe.dev/hands#min_tracking_confidence.
|
113
|
+
"""
|
114
|
+
super().__init__(
|
115
|
+
binary_graph_path=_BINARYPB_FILE_PATH,
|
116
|
+
side_inputs={
|
117
|
+
'model_complexity': model_complexity,
|
118
|
+
'num_hands': max_num_hands,
|
119
|
+
'use_prev_landmarks': not static_image_mode,
|
120
|
+
},
|
121
|
+
calculator_params={
|
122
|
+
'palmdetectioncpu__TensorsToDetectionsCalculator.min_score_thresh':
|
123
|
+
min_detection_confidence,
|
124
|
+
'handlandmarkcpu__ThresholdingCalculator.threshold':
|
125
|
+
min_tracking_confidence,
|
126
|
+
},
|
127
|
+
outputs=[
|
128
|
+
'multi_hand_landmarks', 'multi_hand_world_landmarks',
|
129
|
+
'multi_handedness'
|
130
|
+
])
|
131
|
+
|
132
|
+
def process(self, image: np.ndarray) -> NamedTuple:
|
133
|
+
"""Processes an RGB image and returns the hand landmarks and handedness of each detected hand.
|
134
|
+
|
135
|
+
Args:
|
136
|
+
image: An RGB image represented as a numpy ndarray.
|
137
|
+
|
138
|
+
Raises:
|
139
|
+
RuntimeError: If the underlying graph throws any error.
|
140
|
+
ValueError: If the input image is not three channel RGB.
|
141
|
+
|
142
|
+
Returns:
|
143
|
+
A NamedTuple object with the following fields:
|
144
|
+
1) a "multi_hand_landmarks" field that contains the hand landmarks on
|
145
|
+
each detected hand.
|
146
|
+
2) a "multi_hand_world_landmarks" field that contains the hand landmarks
|
147
|
+
on each detected hand in real-world 3D coordinates that are in meters
|
148
|
+
with the origin at the hand's approximate geometric center.
|
149
|
+
3) a "multi_handedness" field that contains the handedness (left v.s.
|
150
|
+
right hand) of the detected hand.
|
151
|
+
"""
|
152
|
+
|
153
|
+
return super().process(input_data={'image': image})
|
@@ -0,0 +1,32 @@
|
|
1
|
+
# Copyright 2021 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""MediaPipe Hands connections."""
|
15
|
+
|
16
|
+
HAND_PALM_CONNECTIONS = ((0, 1), (0, 5), (9, 13), (13, 17), (5, 9), (0, 17))
|
17
|
+
|
18
|
+
HAND_THUMB_CONNECTIONS = ((1, 2), (2, 3), (3, 4))
|
19
|
+
|
20
|
+
HAND_INDEX_FINGER_CONNECTIONS = ((5, 6), (6, 7), (7, 8))
|
21
|
+
|
22
|
+
HAND_MIDDLE_FINGER_CONNECTIONS = ((9, 10), (10, 11), (11, 12))
|
23
|
+
|
24
|
+
HAND_RING_FINGER_CONNECTIONS = ((13, 14), (14, 15), (15, 16))
|
25
|
+
|
26
|
+
HAND_PINKY_FINGER_CONNECTIONS = ((17, 18), (18, 19), (19, 20))
|
27
|
+
|
28
|
+
HAND_CONNECTIONS = frozenset().union(*[
|
29
|
+
HAND_PALM_CONNECTIONS, HAND_THUMB_CONNECTIONS,
|
30
|
+
HAND_INDEX_FINGER_CONNECTIONS, HAND_MIDDLE_FINGER_CONNECTIONS,
|
31
|
+
HAND_RING_FINGER_CONNECTIONS, HAND_PINKY_FINGER_CONNECTIONS
|
32
|
+
])
|
@@ -0,0 +1,219 @@
|
|
1
|
+
# Copyright 2020 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""Tests for mediapipe.python.solutions.hands."""
|
16
|
+
|
17
|
+
import json
|
18
|
+
import os
|
19
|
+
import tempfile # pylint: disable=unused-import
|
20
|
+
from typing import NamedTuple
|
21
|
+
|
22
|
+
from absl.testing import absltest
|
23
|
+
from absl.testing import parameterized
|
24
|
+
import cv2
|
25
|
+
import numpy as np
|
26
|
+
import numpy.testing as npt
|
27
|
+
|
28
|
+
# resources dependency
|
29
|
+
# undeclared dependency
|
30
|
+
from mediapipe.python.solutions import drawing_styles
|
31
|
+
from mediapipe.python.solutions import drawing_utils as mp_drawing
|
32
|
+
from mediapipe.python.solutions import hands as mp_hands
|
33
|
+
|
34
|
+
|
35
|
+
TEST_IMAGE_PATH = 'mediapipe/python/solutions/testdata'
|
36
|
+
LITE_MODEL_DIFF_THRESHOLD = 25 # pixels
|
37
|
+
FULL_MODEL_DIFF_THRESHOLD = 20 # pixels
|
38
|
+
EXPECTED_HAND_COORDINATES_PREDICTION = [[[580, 34], [504, 50], [459, 94],
|
39
|
+
[429, 146], [397, 182], [507, 167],
|
40
|
+
[479, 245], [469, 292], [464, 330],
|
41
|
+
[545, 180], [534, 265], [533, 319],
|
42
|
+
[536, 360], [581, 172], [587, 252],
|
43
|
+
[593, 304], [599, 346], [615, 168],
|
44
|
+
[628, 223], [638, 258], [648, 288]],
|
45
|
+
[[138, 343], [211, 330], [257, 286],
|
46
|
+
[289, 237], [322, 203], [219, 216],
|
47
|
+
[238, 138], [249, 90], [253, 51],
|
48
|
+
[177, 204], [184, 115], [187, 60],
|
49
|
+
[185, 19], [138, 208], [131, 127],
|
50
|
+
[124, 77], [117, 36], [106, 222],
|
51
|
+
[92, 159], [79, 124], [68, 93]]]
|
52
|
+
|
53
|
+
|
54
|
+
class HandsTest(parameterized.TestCase):
|
55
|
+
|
56
|
+
def _get_output_path(self, name):
|
57
|
+
return os.path.join(tempfile.gettempdir(), self.id().split('.')[-1] + name)
|
58
|
+
|
59
|
+
def _landmarks_list_to_array(self, landmark_list, image_shape):
|
60
|
+
rows, cols, _ = image_shape
|
61
|
+
return np.asarray([(lmk.x * cols, lmk.y * rows, lmk.z * cols)
|
62
|
+
for lmk in landmark_list.landmark])
|
63
|
+
|
64
|
+
def _world_landmarks_list_to_array(self, landmark_list):
|
65
|
+
return np.asarray([(lmk.x, lmk.y, lmk.z)
|
66
|
+
for lmk in landmark_list.landmark])
|
67
|
+
|
68
|
+
def _assert_diff_less(self, array1, array2, threshold):
|
69
|
+
npt.assert_array_less(np.abs(array1 - array2), threshold)
|
70
|
+
|
71
|
+
def _annotate(self, frame: np.ndarray, results: NamedTuple, idx: int):
|
72
|
+
for hand_landmarks in results.multi_hand_landmarks:
|
73
|
+
mp_drawing.draw_landmarks(
|
74
|
+
frame, hand_landmarks, mp_hands.HAND_CONNECTIONS,
|
75
|
+
drawing_styles.get_default_hand_landmarks_style(),
|
76
|
+
drawing_styles.get_default_hand_connections_style())
|
77
|
+
path = os.path.join(tempfile.gettempdir(), self.id().split('.')[-1] +
|
78
|
+
'_frame_{}.png'.format(idx))
|
79
|
+
cv2.imwrite(path, frame)
|
80
|
+
|
81
|
+
def test_invalid_image_shape(self):
|
82
|
+
with mp_hands.Hands() as hands:
|
83
|
+
with self.assertRaisesRegex(
|
84
|
+
ValueError, 'Input image must contain three channel rgb data.'):
|
85
|
+
hands.process(np.arange(36, dtype=np.uint8).reshape(3, 3, 4))
|
86
|
+
|
87
|
+
def test_blank_image(self):
|
88
|
+
with mp_hands.Hands() as hands:
|
89
|
+
image = np.zeros([100, 100, 3], dtype=np.uint8)
|
90
|
+
image.fill(255)
|
91
|
+
results = hands.process(image)
|
92
|
+
self.assertIsNone(results.multi_hand_landmarks)
|
93
|
+
self.assertIsNone(results.multi_handedness)
|
94
|
+
|
95
|
+
@parameterized.named_parameters(
|
96
|
+
('static_image_mode_with_lite_model', True, 0, 5),
|
97
|
+
('video_mode_with_lite_model', False, 0, 10),
|
98
|
+
('static_image_mode_with_full_model', True, 1, 5),
|
99
|
+
('video_mode_with_full_model', False, 1, 10))
|
100
|
+
def test_multi_hands(self, static_image_mode, model_complexity, num_frames):
|
101
|
+
image_path = os.path.join(os.path.dirname(__file__), 'testdata/hands.jpg')
|
102
|
+
image = cv2.imread(image_path)
|
103
|
+
with mp_hands.Hands(
|
104
|
+
static_image_mode=static_image_mode,
|
105
|
+
max_num_hands=2,
|
106
|
+
model_complexity=model_complexity,
|
107
|
+
min_detection_confidence=0.5) as hands:
|
108
|
+
for idx in range(num_frames):
|
109
|
+
results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
110
|
+
self._annotate(image.copy(), results, idx)
|
111
|
+
handedness = [
|
112
|
+
handedness.classification[0].label
|
113
|
+
for handedness in results.multi_handedness
|
114
|
+
]
|
115
|
+
multi_hand_coordinates = []
|
116
|
+
rows, cols, _ = image.shape
|
117
|
+
for landmarks in results.multi_hand_landmarks:
|
118
|
+
self.assertLen(landmarks.landmark, 21)
|
119
|
+
x = [landmark.x * cols for landmark in landmarks.landmark]
|
120
|
+
y = [landmark.y * rows for landmark in landmarks.landmark]
|
121
|
+
hand_coordinates = np.column_stack((x, y))
|
122
|
+
multi_hand_coordinates.append(hand_coordinates)
|
123
|
+
self.assertLen(handedness, 2)
|
124
|
+
self.assertLen(multi_hand_coordinates, 2)
|
125
|
+
prediction_error = np.abs(
|
126
|
+
np.asarray(multi_hand_coordinates) -
|
127
|
+
np.asarray(EXPECTED_HAND_COORDINATES_PREDICTION))
|
128
|
+
diff_threshold = LITE_MODEL_DIFF_THRESHOLD if model_complexity == 0 else FULL_MODEL_DIFF_THRESHOLD
|
129
|
+
npt.assert_array_less(prediction_error, diff_threshold)
|
130
|
+
|
131
|
+
def _process_video(self, model_complexity, video_path,
|
132
|
+
max_num_hands=1,
|
133
|
+
num_landmarks=21,
|
134
|
+
num_dimensions=3):
|
135
|
+
# Predict pose landmarks for each frame.
|
136
|
+
video_cap = cv2.VideoCapture(video_path)
|
137
|
+
landmarks_per_frame = []
|
138
|
+
w_landmarks_per_frame = []
|
139
|
+
with mp_hands.Hands(
|
140
|
+
static_image_mode=False,
|
141
|
+
max_num_hands=max_num_hands,
|
142
|
+
model_complexity=model_complexity,
|
143
|
+
min_detection_confidence=0.5) as hands:
|
144
|
+
while True:
|
145
|
+
# Get next frame of the video.
|
146
|
+
success, input_frame = video_cap.read()
|
147
|
+
if not success:
|
148
|
+
break
|
149
|
+
|
150
|
+
# Run pose tracker.
|
151
|
+
input_frame = cv2.cvtColor(input_frame, cv2.COLOR_BGR2RGB)
|
152
|
+
frame_shape = input_frame.shape
|
153
|
+
result = hands.process(image=input_frame)
|
154
|
+
frame_landmarks = np.zeros([max_num_hands,
|
155
|
+
num_landmarks, num_dimensions]) * np.nan
|
156
|
+
frame_w_landmarks = np.zeros([max_num_hands,
|
157
|
+
num_landmarks, num_dimensions]) * np.nan
|
158
|
+
|
159
|
+
if result.multi_hand_landmarks:
|
160
|
+
for idx, landmarks in enumerate(result.multi_hand_landmarks):
|
161
|
+
landmarks = self._landmarks_list_to_array(landmarks, frame_shape)
|
162
|
+
frame_landmarks[idx] = landmarks
|
163
|
+
if result.multi_hand_world_landmarks:
|
164
|
+
for idx, w_landmarks in enumerate(result.multi_hand_world_landmarks):
|
165
|
+
w_landmarks = self._world_landmarks_list_to_array(w_landmarks)
|
166
|
+
frame_w_landmarks[idx] = w_landmarks
|
167
|
+
|
168
|
+
landmarks_per_frame.append(frame_landmarks)
|
169
|
+
w_landmarks_per_frame.append(frame_w_landmarks)
|
170
|
+
return (np.array(landmarks_per_frame), np.array(w_landmarks_per_frame))
|
171
|
+
|
172
|
+
@parameterized.named_parameters(
|
173
|
+
('full', 1, 'asl_hand.full.npz'))
|
174
|
+
def test_on_video(self, model_complexity, expected_name):
|
175
|
+
"""Tests hand models on a video."""
|
176
|
+
video_path = os.path.join(os.path.dirname(__file__),
|
177
|
+
'testdata/asl_hand.25fps.mp4')
|
178
|
+
expected_path = os.path.join(os.path.dirname(__file__),
|
179
|
+
'testdata/{}'.format(expected_name))
|
180
|
+
actual, actual_world = self._process_video(model_complexity, video_path)
|
181
|
+
|
182
|
+
# Dump actual .npz.
|
183
|
+
npz_path = self._get_output_path(expected_name)
|
184
|
+
np.savez(npz_path, predictions=actual, w_predictions=actual_world)
|
185
|
+
|
186
|
+
# Dump actual JSON.
|
187
|
+
json_path = self._get_output_path(expected_name.replace('.npz', '.json'))
|
188
|
+
with open(json_path, 'w') as fl:
|
189
|
+
dump_data = {
|
190
|
+
'predictions': np.around(actual, 3).tolist(),
|
191
|
+
'predictions_world': np.around(actual_world, 3).tolist(),
|
192
|
+
}
|
193
|
+
fl.write(json.dumps(dump_data, indent=2, separators=(',', ': ')))
|
194
|
+
|
195
|
+
# Validate actual vs. expected landmarks.
|
196
|
+
expected = np.load(expected_path)['predictions']
|
197
|
+
assert (
|
198
|
+
actual.shape == expected.shape
|
199
|
+
), 'Unexpected shape of predictions: {} instead of {}'.format(
|
200
|
+
actual.shape, expected.shape
|
201
|
+
)
|
202
|
+
# large values, use relative tolerance for testing.
|
203
|
+
np.testing.assert_allclose(actual[..., :2], expected[..., :2], rtol=0.1)
|
204
|
+
|
205
|
+
# Validate actual vs. expected world landmarks.
|
206
|
+
expected_world = np.load(expected_path)['w_predictions']
|
207
|
+
assert (
|
208
|
+
actual_world.shape == expected_world.shape
|
209
|
+
), 'Unexpected shape of world predictions: {} instead of {}'.format(
|
210
|
+
actual_world.shape, expected_world.shape
|
211
|
+
)
|
212
|
+
# small values, use absolute tolerance for testing.
|
213
|
+
np.testing.assert_array_almost_equal(
|
214
|
+
actual_world, expected_world, decimal=1
|
215
|
+
)
|
216
|
+
|
217
|
+
|
218
|
+
if __name__ == '__main__':
|
219
|
+
absltest.main()
|