mediapipe-nightly 0.10.21.post20241223__cp310-cp310-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mediapipe/__init__.py +26 -0
- mediapipe/calculators/__init__.py +0 -0
- mediapipe/calculators/audio/__init__.py +0 -0
- mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py +33 -0
- mediapipe/calculators/audio/rational_factor_resample_calculator_pb2.py +33 -0
- mediapipe/calculators/audio/spectrogram_calculator_pb2.py +37 -0
- mediapipe/calculators/audio/stabilized_log_calculator_pb2.py +31 -0
- mediapipe/calculators/audio/time_series_framer_calculator_pb2.py +33 -0
- mediapipe/calculators/core/__init__.py +0 -0
- mediapipe/calculators/core/bypass_calculator_pb2.py +31 -0
- mediapipe/calculators/core/clip_vector_size_calculator_pb2.py +31 -0
- mediapipe/calculators/core/concatenate_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/constant_side_packet_calculator_pb2.py +39 -0
- mediapipe/calculators/core/dequantize_byte_array_calculator_pb2.py +31 -0
- mediapipe/calculators/core/flow_limiter_calculator_pb2.py +32 -0
- mediapipe/calculators/core/gate_calculator_pb2.py +33 -0
- mediapipe/calculators/core/get_vector_item_calculator_pb2.py +31 -0
- mediapipe/calculators/core/graph_profile_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_cloner_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_resampler_calculator_pb2.py +33 -0
- mediapipe/calculators/core/packet_thinner_calculator_pb2.py +33 -0
- mediapipe/calculators/core/quantize_float_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/sequence_shift_calculator_pb2.py +31 -0
- mediapipe/calculators/core/split_vector_calculator_pb2.py +33 -0
- mediapipe/calculators/image/__init__.py +0 -0
- mediapipe/calculators/image/bilateral_filter_calculator_pb2.py +31 -0
- mediapipe/calculators/image/feature_detector_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_clone_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_cropping_calculator_pb2.py +33 -0
- mediapipe/calculators/image/image_transformation_calculator_pb2.py +38 -0
- mediapipe/calculators/image/mask_overlay_calculator_pb2.py +33 -0
- mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator_pb2.py +31 -0
- mediapipe/calculators/image/opencv_image_encoder_calculator_pb2.py +35 -0
- mediapipe/calculators/image/recolor_calculator_pb2.py +34 -0
- mediapipe/calculators/image/rotation_mode_pb2.py +29 -0
- mediapipe/calculators/image/scale_image_calculator_pb2.py +34 -0
- mediapipe/calculators/image/segmentation_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/image/set_alpha_calculator_pb2.py +31 -0
- mediapipe/calculators/image/warp_affine_calculator_pb2.py +36 -0
- mediapipe/calculators/internal/__init__.py +0 -0
- mediapipe/calculators/internal/callback_packet_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/__init__.py +0 -0
- mediapipe/calculators/tensor/audio_to_tensor_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/bert_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/feedback_tensors_calculator_pb2.py +37 -0
- mediapipe/calculators/tensor/image_to_tensor_calculator_pb2.py +40 -0
- mediapipe/calculators/tensor/inference_calculator_pb2.py +63 -0
- mediapipe/calculators/tensor/landmarks_to_tensor_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/regex_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensor_converter_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/tensor_to_joints_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensors_readback_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/tensors_to_audio_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_classification_calculator_pb2.py +44 -0
- mediapipe/calculators/tensor/tensors_to_detections_calculator_pb2.py +39 -0
- mediapipe/calculators/tensor/tensors_to_floats_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/vector_to_tensor_calculator_pb2.py +27 -0
- mediapipe/calculators/tflite/__init__.py +0 -0
- mediapipe/calculators/tflite/ssd_anchors_calculator_pb2.py +32 -0
- mediapipe/calculators/tflite/tflite_converter_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_inference_calculator_pb2.py +49 -0
- mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/__init__.py +0 -0
- mediapipe/calculators/util/align_hand_to_pose_in_world_calculator_pb2.py +31 -0
- mediapipe/calculators/util/annotation_overlay_calculator_pb2.py +32 -0
- mediapipe/calculators/util/association_calculator_pb2.py +31 -0
- mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py +31 -0
- mediapipe/calculators/util/combine_joints_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detection_label_id_to_text_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detections_to_rects_calculator_pb2.py +33 -0
- mediapipe/calculators/util/detections_to_render_data_calculator_pb2.py +33 -0
- mediapipe/calculators/util/face_to_rect_calculator_pb2.py +26 -0
- mediapipe/calculators/util/filter_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/util/flat_color_image_calculator_pb2.py +32 -0
- mediapipe/calculators/util/labels_to_render_data_calculator_pb2.py +34 -0
- mediapipe/calculators/util/landmark_projection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_refinement_calculator_pb2.py +41 -0
- mediapipe/calculators/util/landmarks_smoothing_calculator_pb2.py +33 -0
- mediapipe/calculators/util/landmarks_to_detection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_floats_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/landmarks_transformation_calculator_pb2.py +37 -0
- mediapipe/calculators/util/latency_pb2.py +26 -0
- mediapipe/calculators/util/local_file_contents_calculator_pb2.py +31 -0
- mediapipe/calculators/util/logic_calculator_pb2.py +34 -0
- mediapipe/calculators/util/non_max_suppression_calculator_pb2.py +35 -0
- mediapipe/calculators/util/packet_frequency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/packet_frequency_pb2.py +26 -0
- mediapipe/calculators/util/packet_latency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/rect_to_render_scale_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_transformation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/refine_landmarks_from_heatmap_calculator_pb2.py +31 -0
- mediapipe/calculators/util/resource_provider_calculator_pb2.py +28 -0
- mediapipe/calculators/util/set_joints_visibility_calculator_pb2.py +41 -0
- mediapipe/calculators/util/thresholding_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_id_to_label_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/top_k_scores_calculator_pb2.py +31 -0
- mediapipe/calculators/util/visibility_copy_calculator_pb2.py +27 -0
- mediapipe/calculators/util/visibility_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/video/__init__.py +0 -0
- mediapipe/calculators/video/box_detector_calculator_pb2.py +32 -0
- mediapipe/calculators/video/box_tracker_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_packager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_to_image_calculator_pb2.py +31 -0
- mediapipe/calculators/video/motion_analysis_calculator_pb2.py +42 -0
- mediapipe/calculators/video/opencv_video_encoder_calculator_pb2.py +31 -0
- mediapipe/calculators/video/tool/__init__.py +0 -0
- mediapipe/calculators/video/tool/flow_quantizer_model_pb2.py +26 -0
- mediapipe/calculators/video/tracked_detection_manager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/video_pre_stream_calculator_pb2.py +35 -0
- mediapipe/examples/__init__.py +14 -0
- mediapipe/examples/desktop/__init__.py +14 -0
- mediapipe/framework/__init__.py +0 -0
- mediapipe/framework/calculator_options_pb2.py +29 -0
- mediapipe/framework/calculator_pb2.py +59 -0
- mediapipe/framework/calculator_profile_pb2.py +48 -0
- mediapipe/framework/deps/__init__.py +0 -0
- mediapipe/framework/deps/proto_descriptor_pb2.py +29 -0
- mediapipe/framework/formats/__init__.py +0 -0
- mediapipe/framework/formats/affine_transform_data_pb2.py +28 -0
- mediapipe/framework/formats/annotation/__init__.py +0 -0
- mediapipe/framework/formats/annotation/locus_pb2.py +32 -0
- mediapipe/framework/formats/annotation/rasterization_pb2.py +29 -0
- mediapipe/framework/formats/body_rig_pb2.py +28 -0
- mediapipe/framework/formats/classification_pb2.py +31 -0
- mediapipe/framework/formats/detection_pb2.py +36 -0
- mediapipe/framework/formats/image_file_properties_pb2.py +26 -0
- mediapipe/framework/formats/image_format_pb2.py +29 -0
- mediapipe/framework/formats/landmark_pb2.py +37 -0
- mediapipe/framework/formats/location_data_pb2.py +38 -0
- mediapipe/framework/formats/matrix_data_pb2.py +31 -0
- mediapipe/framework/formats/motion/__init__.py +0 -0
- mediapipe/framework/formats/motion/optical_flow_field_data_pb2.py +30 -0
- mediapipe/framework/formats/object_detection/__init__.py +0 -0
- mediapipe/framework/formats/object_detection/anchor_pb2.py +26 -0
- mediapipe/framework/formats/rect_pb2.py +29 -0
- mediapipe/framework/formats/time_series_header_pb2.py +28 -0
- mediapipe/framework/graph_runtime_info_pb2.py +31 -0
- mediapipe/framework/mediapipe_options_pb2.py +27 -0
- mediapipe/framework/packet_factory_pb2.py +31 -0
- mediapipe/framework/packet_generator_pb2.py +33 -0
- mediapipe/framework/status_handler_pb2.py +28 -0
- mediapipe/framework/stream_handler/__init__.py +0 -0
- mediapipe/framework/stream_handler/default_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/fixed_size_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/sync_set_input_stream_handler_pb2.py +29 -0
- mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler_pb2.py +30 -0
- mediapipe/framework/test_calculators_pb2.py +31 -0
- mediapipe/framework/thread_pool_executor_pb2.py +29 -0
- mediapipe/framework/tool/__init__.py +0 -0
- mediapipe/framework/tool/calculator_graph_template_pb2.py +44 -0
- mediapipe/framework/tool/field_data_pb2.py +28 -0
- mediapipe/framework/tool/node_chain_subgraph_pb2.py +31 -0
- mediapipe/framework/tool/packet_generator_wrapper_calculator_pb2.py +28 -0
- mediapipe/framework/tool/source_pb2.py +33 -0
- mediapipe/framework/tool/switch_container_pb2.py +32 -0
- mediapipe/gpu/__init__.py +0 -0
- mediapipe/gpu/copy_calculator_pb2.py +33 -0
- mediapipe/gpu/gl_animation_overlay_calculator_pb2.py +31 -0
- mediapipe/gpu/gl_context_options_pb2.py +31 -0
- mediapipe/gpu/gl_scaler_calculator_pb2.py +32 -0
- mediapipe/gpu/gl_surface_sink_calculator_pb2.py +32 -0
- mediapipe/gpu/gpu_origin_pb2.py +29 -0
- mediapipe/gpu/scale_mode_pb2.py +28 -0
- mediapipe/model_maker/__init__.py +27 -0
- mediapipe/model_maker/setup.py +107 -0
- mediapipe/modules/__init__.py +0 -0
- mediapipe/modules/face_detection/__init__.py +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_cpu.binarypb +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_sparse.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_pb2.py +30 -0
- mediapipe/modules/face_detection/face_detection_short_range.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_short_range_cpu.binarypb +0 -0
- mediapipe/modules/face_geometry/__init__.py +0 -0
- mediapipe/modules/face_geometry/data/__init__.py +0 -0
- mediapipe/modules/face_geometry/effect_renderer_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/env_generator_calculator_pb2.py +28 -0
- mediapipe/modules/face_geometry/geometry_pipeline_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/libs/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/environment_pb2.py +31 -0
- mediapipe/modules/face_geometry/protos/face_geometry_pb2.py +29 -0
- mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/modules/face_geometry/protos/mesh_3d_pb2.py +31 -0
- mediapipe/modules/face_landmark/__init__.py +0 -0
- mediapipe/modules/face_landmark/face_landmark.tflite +0 -0
- mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb +0 -0
- mediapipe/modules/face_landmark/face_landmark_with_attention.tflite +0 -0
- mediapipe/modules/hand_landmark/__init__.py +0 -0
- mediapipe/modules/hand_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_full.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_lite.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb +0 -0
- mediapipe/modules/hand_landmark/handedness.txt +2 -0
- mediapipe/modules/holistic_landmark/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator_pb2.py +37 -0
- mediapipe/modules/holistic_landmark/hand_recrop.tflite +0 -0
- mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb +0 -0
- mediapipe/modules/iris_landmark/__init__.py +0 -0
- mediapipe/modules/iris_landmark/iris_landmark.tflite +0 -0
- mediapipe/modules/objectron/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/a_r_capture_metadata_pb2.py +102 -0
- mediapipe/modules/objectron/calculators/annotation_data_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/belief_decoder_config_pb2.py +28 -0
- mediapipe/modules/objectron/calculators/camera_parameters_pb2.py +30 -0
- mediapipe/modules/objectron/calculators/filter_detection_calculator_pb2.py +35 -0
- mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/object_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/object_detection_oidv4_labelmap.txt +24 -0
- mediapipe/modules/objectron/objectron_cpu.binarypb +0 -0
- mediapipe/modules/palm_detection/__init__.py +0 -0
- mediapipe/modules/palm_detection/palm_detection_full.tflite +0 -0
- mediapipe/modules/palm_detection/palm_detection_lite.tflite +0 -0
- mediapipe/modules/pose_detection/__init__.py +0 -0
- mediapipe/modules/pose_detection/pose_detection.tflite +0 -0
- mediapipe/modules/pose_landmark/__init__.py +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_full.tflite +0 -0
- mediapipe/modules/selfie_segmentation/__init__.py +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation.tflite +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_cpu.binarypb +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_landscape.tflite +0 -0
- mediapipe/python/__init__.py +29 -0
- mediapipe/python/_framework_bindings.cpython-310-x86_64-linux-gnu.so +0 -0
- mediapipe/python/calculator_graph_test.py +251 -0
- mediapipe/python/image_frame_test.py +194 -0
- mediapipe/python/image_test.py +218 -0
- mediapipe/python/packet_creator.py +275 -0
- mediapipe/python/packet_getter.py +120 -0
- mediapipe/python/packet_test.py +533 -0
- mediapipe/python/solution_base.py +604 -0
- mediapipe/python/solution_base_test.py +396 -0
- mediapipe/python/solutions/__init__.py +27 -0
- mediapipe/python/solutions/download_utils.py +37 -0
- mediapipe/python/solutions/drawing_styles.py +249 -0
- mediapipe/python/solutions/drawing_utils.py +320 -0
- mediapipe/python/solutions/drawing_utils_test.py +258 -0
- mediapipe/python/solutions/face_detection.py +105 -0
- mediapipe/python/solutions/face_detection_test.py +92 -0
- mediapipe/python/solutions/face_mesh.py +125 -0
- mediapipe/python/solutions/face_mesh_connections.py +500 -0
- mediapipe/python/solutions/face_mesh_test.py +170 -0
- mediapipe/python/solutions/hands.py +153 -0
- mediapipe/python/solutions/hands_connections.py +32 -0
- mediapipe/python/solutions/hands_test.py +219 -0
- mediapipe/python/solutions/holistic.py +167 -0
- mediapipe/python/solutions/holistic_test.py +142 -0
- mediapipe/python/solutions/objectron.py +288 -0
- mediapipe/python/solutions/objectron_test.py +81 -0
- mediapipe/python/solutions/pose.py +192 -0
- mediapipe/python/solutions/pose_connections.py +22 -0
- mediapipe/python/solutions/pose_test.py +262 -0
- mediapipe/python/solutions/selfie_segmentation.py +76 -0
- mediapipe/python/solutions/selfie_segmentation_test.py +68 -0
- mediapipe/python/timestamp_test.py +78 -0
- mediapipe/tasks/__init__.py +14 -0
- mediapipe/tasks/cc/__init__.py +0 -0
- mediapipe/tasks/cc/audio/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/audio_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/audio_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/audio_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/core/__init__.py +0 -0
- mediapipe/tasks/cc/audio/utils/__init__.py +0 -0
- mediapipe/tasks/cc/components/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/classification_aggregation_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/components/calculators/score_calibration_calculator_pb2.py +35 -0
- mediapipe/tasks/cc/components/calculators/tensors_to_embeddings_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/components/containers/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/classifications_pb2.py +30 -0
- mediapipe/tasks/cc/components/containers/proto/embeddings_pb2.py +35 -0
- mediapipe/tasks/cc/components/containers/proto/landmarks_detection_result_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/classification_postprocessing_graph_options_pb2.py +38 -0
- mediapipe/tasks/cc/components/processors/proto/classifier_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/detection_postprocessing_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/components/processors/proto/detector_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedder_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedding_postprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/proto/image_preprocessing_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/components/processors/proto/text_model_type_pb2.py +28 -0
- mediapipe/tasks/cc/components/processors/proto/text_preprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/utils/__init__.py +0 -0
- mediapipe/tasks/cc/core/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/acceleration_pb2.py +28 -0
- mediapipe/tasks/cc/core/proto/base_options_pb2.py +30 -0
- mediapipe/tasks/cc/core/proto/external_file_pb2.py +31 -0
- mediapipe/tasks/cc/core/proto/inference_subgraph_pb2.py +32 -0
- mediapipe/tasks/cc/core/proto/model_resources_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/c/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/calculators/model_data_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/common/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_params_pb2.py +33 -0
- mediapipe/tasks/cc/genai/inference/proto/prompt_template_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/proto/sampler_params_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/proto/transformer_params_pb2.py +45 -0
- mediapipe/tasks/cc/genai/inference/utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/llm_utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/xnn_utils/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/_pywrap_metadata_version.cpython-310-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/cc/metadata/tests/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/ragged/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/testdata/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/hash/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/text_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/text_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/text_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/tokenizers/__init__.py +0 -0
- mediapipe/tasks/cc/text/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/__init__.py +0 -0
- mediapipe/tasks/cc/vision/core/__init__.py +0 -0
- mediapipe/tasks/cc/vision/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_geometry/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/env_generator_calculator_pb2.py +28 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/geometry_pipeline_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/data/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/libs/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/environment_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_graph_options_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/mesh_3d_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options_pb2.py +37 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/tensors_to_face_landmarks_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_stylizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/tensors_to_image_calculator_pb2.py +36 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/face_stylizer_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/combined_prediction_calculator_pb2.py +33 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_embedder_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/hand_gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_result_pb2.py +30 -0
- mediapipe/tasks/cc/vision/hand_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_roi_refinement_graph_options_pb2.py +28 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result_pb2.py +29 -0
- mediapipe/tasks/cc/vision/image_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/image_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/image_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_generator/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/stable_diffusion_iterate_calculator_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/proto/conditioned_image_graph_options_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/control_plugin_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_generator/proto/image_generator_graph_options_pb2.py +30 -0
- mediapipe/tasks/cc/vision/image_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/image_segmenter_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/segmenter_options_pb2.py +33 -0
- mediapipe/tasks/cc/vision/interactive_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/object_detector_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/pose_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/utils/ghum/__init__.py +0 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema.fbs +59 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema_py_generated.py +108 -0
- mediapipe/tasks/metadata/metadata_schema.fbs +732 -0
- mediapipe/tasks/metadata/metadata_schema_py_generated.py +3251 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema.fbs +98 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema_py_generated.py +674 -0
- mediapipe/tasks/metadata/schema_py_generated.py +18438 -0
- mediapipe/tasks/python/__init__.py +27 -0
- mediapipe/tasks/python/audio/__init__.py +33 -0
- mediapipe/tasks/python/audio/audio_classifier.py +324 -0
- mediapipe/tasks/python/audio/audio_embedder.py +285 -0
- mediapipe/tasks/python/audio/core/__init__.py +16 -0
- mediapipe/tasks/python/audio/core/audio_record.py +125 -0
- mediapipe/tasks/python/audio/core/audio_task_running_mode.py +29 -0
- mediapipe/tasks/python/audio/core/base_audio_task_api.py +181 -0
- mediapipe/tasks/python/benchmark/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/benchmark_utils.py +70 -0
- mediapipe/tasks/python/benchmark/vision/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/vision/benchmark.py +99 -0
- mediapipe/tasks/python/benchmark/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py +40 -0
- mediapipe/tasks/python/components/__init__.py +13 -0
- mediapipe/tasks/python/components/containers/__init__.py +53 -0
- mediapipe/tasks/python/components/containers/audio_data.py +137 -0
- mediapipe/tasks/python/components/containers/bounding_box.py +73 -0
- mediapipe/tasks/python/components/containers/category.py +78 -0
- mediapipe/tasks/python/components/containers/classification_result.py +111 -0
- mediapipe/tasks/python/components/containers/detections.py +181 -0
- mediapipe/tasks/python/components/containers/embedding_result.py +89 -0
- mediapipe/tasks/python/components/containers/keypoint.py +77 -0
- mediapipe/tasks/python/components/containers/landmark.py +122 -0
- mediapipe/tasks/python/components/containers/landmark_detection_result.py +106 -0
- mediapipe/tasks/python/components/containers/rect.py +109 -0
- mediapipe/tasks/python/components/processors/__init__.py +23 -0
- mediapipe/tasks/python/components/processors/classifier_options.py +86 -0
- mediapipe/tasks/python/components/utils/__init__.py +13 -0
- mediapipe/tasks/python/components/utils/cosine_similarity.py +68 -0
- mediapipe/tasks/python/core/__init__.py +13 -0
- mediapipe/tasks/python/core/base_options.py +121 -0
- mediapipe/tasks/python/core/optional_dependencies.py +25 -0
- mediapipe/tasks/python/core/task_info.py +139 -0
- mediapipe/tasks/python/genai/__init__.py +14 -0
- mediapipe/tasks/python/genai/bundler/__init__.py +23 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler.py +130 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler_test.py +168 -0
- mediapipe/tasks/python/genai/converter/__init__.py +24 -0
- mediapipe/tasks/python/genai/converter/converter_base.py +179 -0
- mediapipe/tasks/python/genai/converter/converter_factory.py +79 -0
- mediapipe/tasks/python/genai/converter/llm_converter.py +374 -0
- mediapipe/tasks/python/genai/converter/llm_converter_test.py +63 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter.py +318 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter_test.py +86 -0
- mediapipe/tasks/python/genai/converter/quantization_util.py +516 -0
- mediapipe/tasks/python/genai/converter/quantization_util_test.py +259 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter.py +580 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter_test.py +83 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer.py +120 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer_test.py +95 -0
- mediapipe/tasks/python/metadata/__init__.py +13 -0
- mediapipe/tasks/python/metadata/flatbuffers_lib/_pywrap_flatbuffers.cpython-310-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/python/metadata/metadata.py +928 -0
- mediapipe/tasks/python/metadata/metadata_displayer_cli.py +34 -0
- mediapipe/tasks/python/metadata/metadata_writers/__init__.py +13 -0
- mediapipe/tasks/python/metadata/metadata_writers/face_stylizer.py +138 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_classifier.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py +170 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py +1166 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_writer.py +845 -0
- mediapipe/tasks/python/metadata/metadata_writers/model_asset_bundle_utils.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/object_detector.py +331 -0
- mediapipe/tasks/python/metadata/metadata_writers/text_classifier.py +119 -0
- mediapipe/tasks/python/metadata/metadata_writers/writer_utils.py +91 -0
- mediapipe/tasks/python/test/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/audio_classifier_test.py +387 -0
- mediapipe/tasks/python/test/audio/audio_embedder_test.py +297 -0
- mediapipe/tasks/python/test/test_utils.py +196 -0
- mediapipe/tasks/python/test/text/__init__.py +13 -0
- mediapipe/tasks/python/test/text/language_detector_test.py +228 -0
- mediapipe/tasks/python/test/text/text_classifier_test.py +235 -0
- mediapipe/tasks/python/test/text/text_embedder_test.py +326 -0
- mediapipe/tasks/python/test/vision/__init__.py +13 -0
- mediapipe/tasks/python/test/vision/face_aligner_test.py +190 -0
- mediapipe/tasks/python/test/vision/face_detector_test.py +523 -0
- mediapipe/tasks/python/test/vision/face_landmarker_test.py +565 -0
- mediapipe/tasks/python/test/vision/face_stylizer_test.py +191 -0
- mediapipe/tasks/python/test/vision/hand_landmarker_test.py +437 -0
- mediapipe/tasks/python/test/vision/holistic_landmarker_test.py +544 -0
- mediapipe/tasks/python/test/vision/image_classifier_test.py +657 -0
- mediapipe/tasks/python/test/vision/image_embedder_test.py +423 -0
- mediapipe/tasks/python/test/vision/image_segmenter_test.py +512 -0
- mediapipe/tasks/python/test/vision/interactive_segmenter_test.py +341 -0
- mediapipe/tasks/python/test/vision/object_detector_test.py +493 -0
- mediapipe/tasks/python/test/vision/pose_landmarker_test.py +518 -0
- mediapipe/tasks/python/text/__init__.py +35 -0
- mediapipe/tasks/python/text/core/__init__.py +16 -0
- mediapipe/tasks/python/text/core/base_text_task_api.py +54 -0
- mediapipe/tasks/python/text/language_detector.py +220 -0
- mediapipe/tasks/python/text/text_classifier.py +187 -0
- mediapipe/tasks/python/text/text_embedder.py +188 -0
- mediapipe/tasks/python/vision/__init__.py +90 -0
- mediapipe/tasks/python/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/vision/core/base_vision_task_api.py +226 -0
- mediapipe/tasks/python/vision/core/image_processing_options.py +39 -0
- mediapipe/tasks/python/vision/core/vision_task_running_mode.py +31 -0
- mediapipe/tasks/python/vision/face_aligner.py +158 -0
- mediapipe/tasks/python/vision/face_detector.py +332 -0
- mediapipe/tasks/python/vision/face_landmarker.py +3244 -0
- mediapipe/tasks/python/vision/face_stylizer.py +158 -0
- mediapipe/tasks/python/vision/gesture_recognizer.py +480 -0
- mediapipe/tasks/python/vision/hand_landmarker.py +504 -0
- mediapipe/tasks/python/vision/holistic_landmarker.py +576 -0
- mediapipe/tasks/python/vision/image_classifier.py +358 -0
- mediapipe/tasks/python/vision/image_embedder.py +362 -0
- mediapipe/tasks/python/vision/image_segmenter.py +433 -0
- mediapipe/tasks/python/vision/interactive_segmenter.py +285 -0
- mediapipe/tasks/python/vision/object_detector.py +389 -0
- mediapipe/tasks/python/vision/pose_landmarker.py +455 -0
- mediapipe/util/__init__.py +0 -0
- mediapipe/util/analytics/__init__.py +0 -0
- mediapipe/util/analytics/mediapipe_log_extension_pb2.py +44 -0
- mediapipe/util/analytics/mediapipe_logging_enums_pb2.py +37 -0
- mediapipe/util/audio_decoder_pb2.py +33 -0
- mediapipe/util/color_pb2.py +33 -0
- mediapipe/util/label_map_pb2.py +27 -0
- mediapipe/util/render_data_pb2.py +58 -0
- mediapipe/util/sequence/__init__.py +14 -0
- mediapipe/util/sequence/media_sequence.py +716 -0
- mediapipe/util/sequence/media_sequence_test.py +290 -0
- mediapipe/util/sequence/media_sequence_util.py +800 -0
- mediapipe/util/sequence/media_sequence_util_test.py +389 -0
- mediapipe/util/tracking/__init__.py +0 -0
- mediapipe/util/tracking/box_detector_pb2.py +39 -0
- mediapipe/util/tracking/box_tracker_pb2.py +32 -0
- mediapipe/util/tracking/camera_motion_pb2.py +31 -0
- mediapipe/util/tracking/flow_packager_pb2.py +60 -0
- mediapipe/util/tracking/frame_selection_pb2.py +35 -0
- mediapipe/util/tracking/frame_selection_solution_evaluator_pb2.py +28 -0
- mediapipe/util/tracking/motion_analysis_pb2.py +35 -0
- mediapipe/util/tracking/motion_estimation_pb2.py +66 -0
- mediapipe/util/tracking/motion_models_pb2.py +42 -0
- mediapipe/util/tracking/motion_saliency_pb2.py +26 -0
- mediapipe/util/tracking/push_pull_filtering_pb2.py +26 -0
- mediapipe/util/tracking/region_flow_computation_pb2.py +59 -0
- mediapipe/util/tracking/region_flow_pb2.py +49 -0
- mediapipe/util/tracking/tone_estimation_pb2.py +45 -0
- mediapipe/util/tracking/tone_models_pb2.py +32 -0
- mediapipe/util/tracking/tracked_detection_manager_config_pb2.py +26 -0
- mediapipe/util/tracking/tracking_pb2.py +73 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/LICENSE +218 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/METADATA +199 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/RECORD +593 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/WHEEL +5 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/top_level.txt +4 -0
- mediapipe_nightly.libs/libEGL-48f73270.so.1.1.0 +0 -0
- mediapipe_nightly.libs/libGLESv2-ed5eda4f.so.2.1.0 +0 -0
- mediapipe_nightly.libs/libGLdispatch-64b28464.so.0.0.0 +0 -0
@@ -0,0 +1,191 @@
|
|
1
|
+
# Copyright 2023 The MediaPipe Authors. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""Tests for face stylizer."""
|
15
|
+
|
16
|
+
import enum
|
17
|
+
import os
|
18
|
+
|
19
|
+
from absl.testing import absltest
|
20
|
+
from absl.testing import parameterized
|
21
|
+
|
22
|
+
from mediapipe.python._framework_bindings import image as image_module
|
23
|
+
from mediapipe.tasks.python.components.containers import rect
|
24
|
+
from mediapipe.tasks.python.core import base_options as base_options_module
|
25
|
+
from mediapipe.tasks.python.test import test_utils
|
26
|
+
from mediapipe.tasks.python.vision import face_stylizer
|
27
|
+
from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
|
28
|
+
|
29
|
+
|
30
|
+
_BaseOptions = base_options_module.BaseOptions
|
31
|
+
_Rect = rect.Rect
|
32
|
+
_Image = image_module.Image
|
33
|
+
_FaceStylizer = face_stylizer.FaceStylizer
|
34
|
+
_FaceStylizerOptions = face_stylizer.FaceStylizerOptions
|
35
|
+
_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
|
36
|
+
|
37
|
+
_MODEL = 'face_stylizer_color_ink.task'
|
38
|
+
_LARGE_FACE_IMAGE = 'portrait.jpg'
|
39
|
+
_MODEL_IMAGE_SIZE = 256
|
40
|
+
_TEST_DATA_DIR = 'mediapipe/tasks/testdata/vision'
|
41
|
+
|
42
|
+
|
43
|
+
class ModelFileType(enum.Enum):
|
44
|
+
FILE_CONTENT = 1
|
45
|
+
FILE_NAME = 2
|
46
|
+
|
47
|
+
|
48
|
+
class FaceStylizerTest(parameterized.TestCase):
|
49
|
+
|
50
|
+
def setUp(self):
|
51
|
+
super().setUp()
|
52
|
+
self.test_image = _Image.create_from_file(
|
53
|
+
test_utils.get_test_data_path(
|
54
|
+
os.path.join(_TEST_DATA_DIR, _LARGE_FACE_IMAGE)
|
55
|
+
)
|
56
|
+
)
|
57
|
+
self.model_path = test_utils.get_test_data_path(
|
58
|
+
os.path.join(_TEST_DATA_DIR, _MODEL)
|
59
|
+
)
|
60
|
+
|
61
|
+
def test_create_from_file_succeeds_with_valid_model_path(self):
|
62
|
+
# Creates with default option and valid model file successfully.
|
63
|
+
with _FaceStylizer.create_from_model_path(self.model_path) as stylizer:
|
64
|
+
self.assertIsInstance(stylizer, _FaceStylizer)
|
65
|
+
|
66
|
+
def test_create_from_options_succeeds_with_valid_model_path(self):
|
67
|
+
# Creates with options containing model file successfully.
|
68
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
69
|
+
options = _FaceStylizerOptions(base_options=base_options)
|
70
|
+
with _FaceStylizer.create_from_options(options) as stylizer:
|
71
|
+
self.assertIsInstance(stylizer, _FaceStylizer)
|
72
|
+
|
73
|
+
def test_create_from_options_fails_with_invalid_model_path(self):
|
74
|
+
with self.assertRaisesRegex(
|
75
|
+
RuntimeError, 'Unable to open file at /path/to/invalid/model.tflite'
|
76
|
+
):
|
77
|
+
base_options = _BaseOptions(
|
78
|
+
model_asset_path='/path/to/invalid/model.tflite'
|
79
|
+
)
|
80
|
+
options = _FaceStylizerOptions(base_options=base_options)
|
81
|
+
_FaceStylizer.create_from_options(options)
|
82
|
+
|
83
|
+
def test_create_from_options_succeeds_with_valid_model_content(self):
|
84
|
+
# Creates with options containing model content successfully.
|
85
|
+
with open(self.model_path, 'rb') as f:
|
86
|
+
base_options = _BaseOptions(model_asset_buffer=f.read())
|
87
|
+
options = _FaceStylizerOptions(base_options=base_options)
|
88
|
+
stylizer = _FaceStylizer.create_from_options(options)
|
89
|
+
self.assertIsInstance(stylizer, _FaceStylizer)
|
90
|
+
|
91
|
+
@parameterized.parameters(
|
92
|
+
(ModelFileType.FILE_NAME, _LARGE_FACE_IMAGE),
|
93
|
+
(ModelFileType.FILE_CONTENT, _LARGE_FACE_IMAGE),
|
94
|
+
)
|
95
|
+
def test_stylize(self, model_file_type, image_file_name):
|
96
|
+
# Load the test image.
|
97
|
+
self.test_image = _Image.create_from_file(
|
98
|
+
test_utils.get_test_data_path(
|
99
|
+
os.path.join(_TEST_DATA_DIR, image_file_name)
|
100
|
+
)
|
101
|
+
)
|
102
|
+
# Creates stylizer.
|
103
|
+
if model_file_type is ModelFileType.FILE_NAME:
|
104
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
105
|
+
elif model_file_type is ModelFileType.FILE_CONTENT:
|
106
|
+
with open(self.model_path, 'rb') as f:
|
107
|
+
model_content = f.read()
|
108
|
+
base_options = _BaseOptions(model_asset_buffer=model_content)
|
109
|
+
else:
|
110
|
+
# Should never happen
|
111
|
+
raise ValueError('model_file_type is invalid.')
|
112
|
+
|
113
|
+
options = _FaceStylizerOptions(base_options=base_options)
|
114
|
+
stylizer = _FaceStylizer.create_from_options(options)
|
115
|
+
|
116
|
+
# Performs face stylization on the input.
|
117
|
+
stylized_image = stylizer.stylize(self.test_image)
|
118
|
+
self.assertIsInstance(stylized_image, _Image)
|
119
|
+
# Closes the stylizer explicitly when the stylizer is not used in
|
120
|
+
# a context.
|
121
|
+
stylizer.close()
|
122
|
+
|
123
|
+
@parameterized.parameters(
|
124
|
+
(ModelFileType.FILE_NAME, _LARGE_FACE_IMAGE),
|
125
|
+
(ModelFileType.FILE_CONTENT, _LARGE_FACE_IMAGE),
|
126
|
+
)
|
127
|
+
def test_stylize_in_context(self, model_file_type, image_file_name):
|
128
|
+
# Load the test image.
|
129
|
+
self.test_image = _Image.create_from_file(
|
130
|
+
test_utils.get_test_data_path(
|
131
|
+
os.path.join(_TEST_DATA_DIR, image_file_name)
|
132
|
+
)
|
133
|
+
)
|
134
|
+
# Creates stylizer.
|
135
|
+
if model_file_type is ModelFileType.FILE_NAME:
|
136
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
137
|
+
elif model_file_type is ModelFileType.FILE_CONTENT:
|
138
|
+
with open(self.model_path, 'rb') as f:
|
139
|
+
model_content = f.read()
|
140
|
+
base_options = _BaseOptions(model_asset_buffer=model_content)
|
141
|
+
else:
|
142
|
+
# Should never happen
|
143
|
+
raise ValueError('model_file_type is invalid.')
|
144
|
+
|
145
|
+
options = _FaceStylizerOptions(base_options=base_options)
|
146
|
+
with _FaceStylizer.create_from_options(options) as stylizer:
|
147
|
+
# Performs face stylization on the input.
|
148
|
+
stylized_image = stylizer.stylize(self.test_image)
|
149
|
+
self.assertIsInstance(stylized_image, _Image)
|
150
|
+
self.assertEqual(stylized_image.width, _MODEL_IMAGE_SIZE)
|
151
|
+
self.assertEqual(stylized_image.height, _MODEL_IMAGE_SIZE)
|
152
|
+
|
153
|
+
def test_stylize_succeeds_with_region_of_interest(self):
|
154
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
155
|
+
options = _FaceStylizerOptions(base_options=base_options)
|
156
|
+
with _FaceStylizer.create_from_options(options) as stylizer:
|
157
|
+
# Load the test image.
|
158
|
+
test_image = _Image.create_from_file(
|
159
|
+
test_utils.get_test_data_path(
|
160
|
+
os.path.join(_TEST_DATA_DIR, _LARGE_FACE_IMAGE)
|
161
|
+
)
|
162
|
+
)
|
163
|
+
# Region-of-interest around the face.
|
164
|
+
roi = _Rect(left=0.32, top=0.02, right=0.67, bottom=0.32)
|
165
|
+
image_processing_options = _ImageProcessingOptions(roi)
|
166
|
+
# Performs face stylization on the input.
|
167
|
+
stylized_image = stylizer.stylize(test_image, image_processing_options)
|
168
|
+
self.assertIsInstance(stylized_image, _Image)
|
169
|
+
self.assertEqual(stylized_image.width, _MODEL_IMAGE_SIZE)
|
170
|
+
self.assertEqual(stylized_image.height, _MODEL_IMAGE_SIZE)
|
171
|
+
|
172
|
+
def test_stylize_succeeds_with_no_face_detected(self):
|
173
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
174
|
+
options = _FaceStylizerOptions(base_options=base_options)
|
175
|
+
with _FaceStylizer.create_from_options(options) as stylizer:
|
176
|
+
# Load the test image.
|
177
|
+
test_image = _Image.create_from_file(
|
178
|
+
test_utils.get_test_data_path(
|
179
|
+
os.path.join(_TEST_DATA_DIR, _LARGE_FACE_IMAGE)
|
180
|
+
)
|
181
|
+
)
|
182
|
+
# Region-of-interest that doesn't contain a human face.
|
183
|
+
roi = _Rect(left=0.1, top=0.1, right=0.2, bottom=0.2)
|
184
|
+
image_processing_options = _ImageProcessingOptions(roi)
|
185
|
+
# Performs face stylization on the input.
|
186
|
+
stylized_image = stylizer.stylize(test_image, image_processing_options)
|
187
|
+
self.assertIsNone(stylized_image)
|
188
|
+
|
189
|
+
|
190
|
+
if __name__ == '__main__':
|
191
|
+
absltest.main()
|
@@ -0,0 +1,437 @@
|
|
1
|
+
# Copyright 2022 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""Tests for hand landmarker."""
|
15
|
+
|
16
|
+
import enum
|
17
|
+
from unittest import mock
|
18
|
+
|
19
|
+
from absl.testing import absltest
|
20
|
+
from absl.testing import parameterized
|
21
|
+
import numpy as np
|
22
|
+
|
23
|
+
from google.protobuf import text_format
|
24
|
+
from mediapipe.python._framework_bindings import image as image_module
|
25
|
+
from mediapipe.tasks.cc.components.containers.proto import landmarks_detection_result_pb2
|
26
|
+
from mediapipe.tasks.python.components.containers import landmark as landmark_module
|
27
|
+
from mediapipe.tasks.python.components.containers import landmark_detection_result as landmark_detection_result_module
|
28
|
+
from mediapipe.tasks.python.components.containers import rect as rect_module
|
29
|
+
from mediapipe.tasks.python.core import base_options as base_options_module
|
30
|
+
from mediapipe.tasks.python.test import test_utils
|
31
|
+
from mediapipe.tasks.python.vision import hand_landmarker
|
32
|
+
from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
|
33
|
+
from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module
|
34
|
+
|
35
|
+
_LandmarksDetectionResultProto = (
|
36
|
+
landmarks_detection_result_pb2.LandmarksDetectionResult)
|
37
|
+
_BaseOptions = base_options_module.BaseOptions
|
38
|
+
_Rect = rect_module.Rect
|
39
|
+
_Landmark = landmark_module.Landmark
|
40
|
+
_NormalizedLandmark = landmark_module.NormalizedLandmark
|
41
|
+
_LandmarksDetectionResult = (
|
42
|
+
landmark_detection_result_module.LandmarksDetectionResult)
|
43
|
+
_Image = image_module.Image
|
44
|
+
_HandLandmarker = hand_landmarker.HandLandmarker
|
45
|
+
_HandLandmarkerOptions = hand_landmarker.HandLandmarkerOptions
|
46
|
+
_HandLandmarkerResult = hand_landmarker.HandLandmarkerResult
|
47
|
+
_RUNNING_MODE = running_mode_module.VisionTaskRunningMode
|
48
|
+
_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
|
49
|
+
|
50
|
+
_HAND_LANDMARKER_BUNDLE_ASSET_FILE = 'hand_landmarker.task'
|
51
|
+
_NO_HANDS_IMAGE = 'cats_and_dogs.jpg'
|
52
|
+
_TWO_HANDS_IMAGE = 'right_hands.jpg'
|
53
|
+
_THUMB_UP_IMAGE = 'thumb_up.jpg'
|
54
|
+
_THUMB_UP_LANDMARKS = 'thumb_up_landmarks.pbtxt'
|
55
|
+
_POINTING_UP_IMAGE = 'pointing_up.jpg'
|
56
|
+
_POINTING_UP_LANDMARKS = 'pointing_up_landmarks.pbtxt'
|
57
|
+
_POINTING_UP_ROTATED_IMAGE = 'pointing_up_rotated.jpg'
|
58
|
+
_POINTING_UP_ROTATED_LANDMARKS = 'pointing_up_rotated_landmarks.pbtxt'
|
59
|
+
_LANDMARKS_MARGIN = 0.03
|
60
|
+
_HANDEDNESS_MARGIN = 0.05
|
61
|
+
|
62
|
+
|
63
|
+
def _get_expected_hand_landmarker_result(
|
64
|
+
file_path: str) -> _HandLandmarkerResult:
|
65
|
+
landmarks_detection_result_file_path = test_utils.get_test_data_path(
|
66
|
+
file_path)
|
67
|
+
with open(landmarks_detection_result_file_path, 'rb') as f:
|
68
|
+
landmarks_detection_result_proto = _LandmarksDetectionResultProto()
|
69
|
+
# Use this if a .pb file is available.
|
70
|
+
# landmarks_detection_result_proto.ParseFromString(f.read())
|
71
|
+
text_format.Parse(f.read(), landmarks_detection_result_proto)
|
72
|
+
landmarks_detection_result = _LandmarksDetectionResult.create_from_pb2(
|
73
|
+
landmarks_detection_result_proto)
|
74
|
+
return _HandLandmarkerResult(
|
75
|
+
handedness=[landmarks_detection_result.categories],
|
76
|
+
hand_landmarks=[landmarks_detection_result.landmarks],
|
77
|
+
hand_world_landmarks=[landmarks_detection_result.world_landmarks])
|
78
|
+
|
79
|
+
|
80
|
+
class ModelFileType(enum.Enum):
|
81
|
+
FILE_CONTENT = 1
|
82
|
+
FILE_NAME = 2
|
83
|
+
|
84
|
+
|
85
|
+
class HandLandmarkerTest(parameterized.TestCase):
|
86
|
+
|
87
|
+
def setUp(self):
|
88
|
+
super().setUp()
|
89
|
+
self.test_image = _Image.create_from_file(
|
90
|
+
test_utils.get_test_data_path(_THUMB_UP_IMAGE))
|
91
|
+
self.model_path = test_utils.get_test_data_path(
|
92
|
+
_HAND_LANDMARKER_BUNDLE_ASSET_FILE)
|
93
|
+
|
94
|
+
def _expect_hand_landmarks_correct(
|
95
|
+
self, actual_landmarks, expected_landmarks, margin
|
96
|
+
):
|
97
|
+
# Expects to have the same number of hands detected.
|
98
|
+
self.assertLen(actual_landmarks, len(expected_landmarks))
|
99
|
+
|
100
|
+
for i, _ in enumerate(actual_landmarks):
|
101
|
+
for j, elem in enumerate(actual_landmarks[i]):
|
102
|
+
self.assertAlmostEqual(elem.x, expected_landmarks[i][j].x, delta=margin)
|
103
|
+
self.assertAlmostEqual(elem.y, expected_landmarks[i][j].y, delta=margin)
|
104
|
+
|
105
|
+
def _expect_handedness_correct(
|
106
|
+
self, actual_handedness, expected_handedness, margin
|
107
|
+
):
|
108
|
+
# Actual top handedness matches expected top handedness.
|
109
|
+
actual_top_handedness = actual_handedness[0][0]
|
110
|
+
expected_top_handedness = expected_handedness[0][0]
|
111
|
+
self.assertEqual(actual_top_handedness.index, expected_top_handedness.index)
|
112
|
+
self.assertEqual(actual_top_handedness.category_name,
|
113
|
+
expected_top_handedness.category_name)
|
114
|
+
self.assertAlmostEqual(
|
115
|
+
actual_top_handedness.score, expected_top_handedness.score, delta=margin
|
116
|
+
)
|
117
|
+
|
118
|
+
def _expect_hand_landmarker_results_correct(
|
119
|
+
self,
|
120
|
+
actual_result: _HandLandmarkerResult,
|
121
|
+
expected_result: _HandLandmarkerResult,
|
122
|
+
):
|
123
|
+
self._expect_hand_landmarks_correct(
|
124
|
+
actual_result.hand_landmarks,
|
125
|
+
expected_result.hand_landmarks,
|
126
|
+
_LANDMARKS_MARGIN,
|
127
|
+
)
|
128
|
+
self._expect_handedness_correct(
|
129
|
+
actual_result.handedness, expected_result.handedness, _HANDEDNESS_MARGIN
|
130
|
+
)
|
131
|
+
|
132
|
+
def test_create_from_file_succeeds_with_valid_model_path(self):
|
133
|
+
# Creates with default option and valid model file successfully.
|
134
|
+
with _HandLandmarker.create_from_model_path(self.model_path) as landmarker:
|
135
|
+
self.assertIsInstance(landmarker, _HandLandmarker)
|
136
|
+
|
137
|
+
def test_create_from_options_succeeds_with_valid_model_path(self):
|
138
|
+
# Creates with options containing model file successfully.
|
139
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
140
|
+
options = _HandLandmarkerOptions(base_options=base_options)
|
141
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
142
|
+
self.assertIsInstance(landmarker, _HandLandmarker)
|
143
|
+
|
144
|
+
def test_create_from_options_fails_with_invalid_model_path(self):
|
145
|
+
# Invalid empty model path.
|
146
|
+
with self.assertRaisesRegex(
|
147
|
+
RuntimeError, 'Unable to open file at /path/to/invalid/model.tflite'):
|
148
|
+
base_options = _BaseOptions(
|
149
|
+
model_asset_path='/path/to/invalid/model.tflite')
|
150
|
+
options = _HandLandmarkerOptions(base_options=base_options)
|
151
|
+
_HandLandmarker.create_from_options(options)
|
152
|
+
|
153
|
+
def test_create_from_options_succeeds_with_valid_model_content(self):
|
154
|
+
# Creates with options containing model content successfully.
|
155
|
+
with open(self.model_path, 'rb') as f:
|
156
|
+
base_options = _BaseOptions(model_asset_buffer=f.read())
|
157
|
+
options = _HandLandmarkerOptions(base_options=base_options)
|
158
|
+
landmarker = _HandLandmarker.create_from_options(options)
|
159
|
+
self.assertIsInstance(landmarker, _HandLandmarker)
|
160
|
+
|
161
|
+
@parameterized.parameters(
|
162
|
+
(ModelFileType.FILE_NAME,
|
163
|
+
_get_expected_hand_landmarker_result(_THUMB_UP_LANDMARKS)),
|
164
|
+
(ModelFileType.FILE_CONTENT,
|
165
|
+
_get_expected_hand_landmarker_result(_THUMB_UP_LANDMARKS)))
|
166
|
+
def test_detect(self, model_file_type, expected_detection_result):
|
167
|
+
# Creates hand landmarker.
|
168
|
+
if model_file_type is ModelFileType.FILE_NAME:
|
169
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
170
|
+
elif model_file_type is ModelFileType.FILE_CONTENT:
|
171
|
+
with open(self.model_path, 'rb') as f:
|
172
|
+
model_content = f.read()
|
173
|
+
base_options = _BaseOptions(model_asset_buffer=model_content)
|
174
|
+
else:
|
175
|
+
# Should never happen
|
176
|
+
raise ValueError('model_file_type is invalid.')
|
177
|
+
|
178
|
+
options = _HandLandmarkerOptions(base_options=base_options)
|
179
|
+
landmarker = _HandLandmarker.create_from_options(options)
|
180
|
+
|
181
|
+
# Performs hand landmarks detection on the input.
|
182
|
+
detection_result = landmarker.detect(self.test_image)
|
183
|
+
# Comparing results.
|
184
|
+
self._expect_hand_landmarker_results_correct(
|
185
|
+
detection_result, expected_detection_result
|
186
|
+
)
|
187
|
+
# Closes the hand landmarker explicitly when the hand landmarker is not used
|
188
|
+
# in a context.
|
189
|
+
landmarker.close()
|
190
|
+
|
191
|
+
@parameterized.parameters(
|
192
|
+
(ModelFileType.FILE_NAME,
|
193
|
+
_get_expected_hand_landmarker_result(_THUMB_UP_LANDMARKS)),
|
194
|
+
(ModelFileType.FILE_CONTENT,
|
195
|
+
_get_expected_hand_landmarker_result(_THUMB_UP_LANDMARKS)))
|
196
|
+
def test_detect_in_context(self, model_file_type, expected_detection_result):
|
197
|
+
# Creates hand landmarker.
|
198
|
+
if model_file_type is ModelFileType.FILE_NAME:
|
199
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
200
|
+
elif model_file_type is ModelFileType.FILE_CONTENT:
|
201
|
+
with open(self.model_path, 'rb') as f:
|
202
|
+
model_content = f.read()
|
203
|
+
base_options = _BaseOptions(model_asset_buffer=model_content)
|
204
|
+
else:
|
205
|
+
# Should never happen
|
206
|
+
raise ValueError('model_file_type is invalid.')
|
207
|
+
|
208
|
+
options = _HandLandmarkerOptions(base_options=base_options)
|
209
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
210
|
+
# Performs hand landmarks detection on the input.
|
211
|
+
detection_result = landmarker.detect(self.test_image)
|
212
|
+
# Comparing results.
|
213
|
+
self._expect_hand_landmarker_results_correct(
|
214
|
+
detection_result, expected_detection_result
|
215
|
+
)
|
216
|
+
|
217
|
+
def test_detect_succeeds_with_num_hands(self):
|
218
|
+
# Creates hand landmarker.
|
219
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
220
|
+
options = _HandLandmarkerOptions(base_options=base_options, num_hands=2)
|
221
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
222
|
+
# Load the two hands image.
|
223
|
+
test_image = _Image.create_from_file(
|
224
|
+
test_utils.get_test_data_path(_TWO_HANDS_IMAGE))
|
225
|
+
# Performs hand landmarks detection on the input.
|
226
|
+
detection_result = landmarker.detect(test_image)
|
227
|
+
# Comparing results.
|
228
|
+
self.assertLen(detection_result.handedness, 2)
|
229
|
+
|
230
|
+
def test_detect_succeeds_with_rotation(self):
|
231
|
+
# Creates hand landmarker.
|
232
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
233
|
+
options = _HandLandmarkerOptions(base_options=base_options)
|
234
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
235
|
+
# Load the pointing up rotated image.
|
236
|
+
test_image = _Image.create_from_file(
|
237
|
+
test_utils.get_test_data_path(_POINTING_UP_ROTATED_IMAGE))
|
238
|
+
# Set rotation parameters using ImageProcessingOptions.
|
239
|
+
image_processing_options = _ImageProcessingOptions(rotation_degrees=-90)
|
240
|
+
# Performs hand landmarks detection on the input.
|
241
|
+
detection_result = landmarker.detect(test_image, image_processing_options)
|
242
|
+
expected_detection_result = _get_expected_hand_landmarker_result(
|
243
|
+
_POINTING_UP_ROTATED_LANDMARKS)
|
244
|
+
# Comparing results.
|
245
|
+
self._expect_hand_landmarker_results_correct(
|
246
|
+
detection_result, expected_detection_result
|
247
|
+
)
|
248
|
+
|
249
|
+
def test_detect_fails_with_region_of_interest(self):
|
250
|
+
# Creates hand landmarker.
|
251
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
252
|
+
options = _HandLandmarkerOptions(base_options=base_options)
|
253
|
+
with self.assertRaisesRegex(
|
254
|
+
ValueError, "This task doesn't support region-of-interest."):
|
255
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
256
|
+
# Set the `region_of_interest` parameter using `ImageProcessingOptions`.
|
257
|
+
image_processing_options = _ImageProcessingOptions(
|
258
|
+
region_of_interest=_Rect(0, 0, 1, 1))
|
259
|
+
# Attempt to perform hand landmarks detection on the cropped input.
|
260
|
+
landmarker.detect(self.test_image, image_processing_options)
|
261
|
+
|
262
|
+
def test_empty_detection_outputs(self):
|
263
|
+
options = _HandLandmarkerOptions(
|
264
|
+
base_options=_BaseOptions(model_asset_path=self.model_path))
|
265
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
266
|
+
# Load the image with no hands.
|
267
|
+
no_hands_test_image = _Image.create_from_file(
|
268
|
+
test_utils.get_test_data_path(_NO_HANDS_IMAGE))
|
269
|
+
# Performs hand landmarks detection on the input.
|
270
|
+
detection_result = landmarker.detect(no_hands_test_image)
|
271
|
+
self.assertEmpty(detection_result.hand_landmarks)
|
272
|
+
self.assertEmpty(detection_result.hand_world_landmarks)
|
273
|
+
self.assertEmpty(detection_result.handedness)
|
274
|
+
|
275
|
+
def test_missing_result_callback(self):
|
276
|
+
options = _HandLandmarkerOptions(
|
277
|
+
base_options=_BaseOptions(model_asset_path=self.model_path),
|
278
|
+
running_mode=_RUNNING_MODE.LIVE_STREAM)
|
279
|
+
with self.assertRaisesRegex(ValueError,
|
280
|
+
r'result callback must be provided'):
|
281
|
+
with _HandLandmarker.create_from_options(options) as unused_landmarker:
|
282
|
+
pass
|
283
|
+
|
284
|
+
@parameterized.parameters((_RUNNING_MODE.IMAGE), (_RUNNING_MODE.VIDEO))
|
285
|
+
def test_illegal_result_callback(self, running_mode):
|
286
|
+
options = _HandLandmarkerOptions(
|
287
|
+
base_options=_BaseOptions(model_asset_path=self.model_path),
|
288
|
+
running_mode=running_mode,
|
289
|
+
result_callback=mock.MagicMock())
|
290
|
+
with self.assertRaisesRegex(ValueError,
|
291
|
+
r'result callback should not be provided'):
|
292
|
+
with _HandLandmarker.create_from_options(options) as unused_landmarker:
|
293
|
+
pass
|
294
|
+
|
295
|
+
def test_calling_detect_for_video_in_image_mode(self):
|
296
|
+
options = _HandLandmarkerOptions(
|
297
|
+
base_options=_BaseOptions(model_asset_path=self.model_path),
|
298
|
+
running_mode=_RUNNING_MODE.IMAGE)
|
299
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
300
|
+
with self.assertRaisesRegex(ValueError,
|
301
|
+
r'not initialized with the video mode'):
|
302
|
+
landmarker.detect_for_video(self.test_image, 0)
|
303
|
+
|
304
|
+
def test_calling_detect_async_in_image_mode(self):
|
305
|
+
options = _HandLandmarkerOptions(
|
306
|
+
base_options=_BaseOptions(model_asset_path=self.model_path),
|
307
|
+
running_mode=_RUNNING_MODE.IMAGE)
|
308
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
309
|
+
with self.assertRaisesRegex(ValueError,
|
310
|
+
r'not initialized with the live stream mode'):
|
311
|
+
landmarker.detect_async(self.test_image, 0)
|
312
|
+
|
313
|
+
def test_calling_detect_in_video_mode(self):
|
314
|
+
options = _HandLandmarkerOptions(
|
315
|
+
base_options=_BaseOptions(model_asset_path=self.model_path),
|
316
|
+
running_mode=_RUNNING_MODE.VIDEO)
|
317
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
318
|
+
with self.assertRaisesRegex(ValueError,
|
319
|
+
r'not initialized with the image mode'):
|
320
|
+
landmarker.detect(self.test_image)
|
321
|
+
|
322
|
+
def test_calling_detect_async_in_video_mode(self):
|
323
|
+
options = _HandLandmarkerOptions(
|
324
|
+
base_options=_BaseOptions(model_asset_path=self.model_path),
|
325
|
+
running_mode=_RUNNING_MODE.VIDEO)
|
326
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
327
|
+
with self.assertRaisesRegex(ValueError,
|
328
|
+
r'not initialized with the live stream mode'):
|
329
|
+
landmarker.detect_async(self.test_image, 0)
|
330
|
+
|
331
|
+
def test_detect_for_video_with_out_of_order_timestamp(self):
|
332
|
+
options = _HandLandmarkerOptions(
|
333
|
+
base_options=_BaseOptions(model_asset_path=self.model_path),
|
334
|
+
running_mode=_RUNNING_MODE.VIDEO)
|
335
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
336
|
+
unused_result = landmarker.detect_for_video(self.test_image, 1)
|
337
|
+
with self.assertRaisesRegex(
|
338
|
+
ValueError, r'Input timestamp must be monotonically increasing'):
|
339
|
+
landmarker.detect_for_video(self.test_image, 0)
|
340
|
+
|
341
|
+
@parameterized.parameters(
|
342
|
+
(_THUMB_UP_IMAGE, 0,
|
343
|
+
_get_expected_hand_landmarker_result(_THUMB_UP_LANDMARKS)),
|
344
|
+
(_POINTING_UP_IMAGE, 0,
|
345
|
+
_get_expected_hand_landmarker_result(_POINTING_UP_LANDMARKS)),
|
346
|
+
(_POINTING_UP_ROTATED_IMAGE, -90,
|
347
|
+
_get_expected_hand_landmarker_result(_POINTING_UP_ROTATED_LANDMARKS)),
|
348
|
+
(_NO_HANDS_IMAGE, 0, _HandLandmarkerResult([], [], [])))
|
349
|
+
def test_detect_for_video(self, image_path, rotation, expected_result):
|
350
|
+
test_image = _Image.create_from_file(
|
351
|
+
test_utils.get_test_data_path(image_path))
|
352
|
+
# Set rotation parameters using ImageProcessingOptions.
|
353
|
+
image_processing_options = _ImageProcessingOptions(
|
354
|
+
rotation_degrees=rotation)
|
355
|
+
options = _HandLandmarkerOptions(
|
356
|
+
base_options=_BaseOptions(model_asset_path=self.model_path),
|
357
|
+
running_mode=_RUNNING_MODE.VIDEO)
|
358
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
359
|
+
for timestamp in range(0, 300, 30):
|
360
|
+
result = landmarker.detect_for_video(test_image, timestamp,
|
361
|
+
image_processing_options)
|
362
|
+
if (result.hand_landmarks and result.hand_world_landmarks and
|
363
|
+
result.handedness):
|
364
|
+
self._expect_hand_landmarker_results_correct(result, expected_result)
|
365
|
+
else:
|
366
|
+
self.assertEqual(result, expected_result)
|
367
|
+
|
368
|
+
def test_calling_detect_in_live_stream_mode(self):
|
369
|
+
options = _HandLandmarkerOptions(
|
370
|
+
base_options=_BaseOptions(model_asset_path=self.model_path),
|
371
|
+
running_mode=_RUNNING_MODE.LIVE_STREAM,
|
372
|
+
result_callback=mock.MagicMock())
|
373
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
374
|
+
with self.assertRaisesRegex(ValueError,
|
375
|
+
r'not initialized with the image mode'):
|
376
|
+
landmarker.detect(self.test_image)
|
377
|
+
|
378
|
+
def test_calling_detect_for_video_in_live_stream_mode(self):
|
379
|
+
options = _HandLandmarkerOptions(
|
380
|
+
base_options=_BaseOptions(model_asset_path=self.model_path),
|
381
|
+
running_mode=_RUNNING_MODE.LIVE_STREAM,
|
382
|
+
result_callback=mock.MagicMock())
|
383
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
384
|
+
with self.assertRaisesRegex(ValueError,
|
385
|
+
r'not initialized with the video mode'):
|
386
|
+
landmarker.detect_for_video(self.test_image, 0)
|
387
|
+
|
388
|
+
def test_detect_async_calls_with_illegal_timestamp(self):
|
389
|
+
options = _HandLandmarkerOptions(
|
390
|
+
base_options=_BaseOptions(model_asset_path=self.model_path),
|
391
|
+
running_mode=_RUNNING_MODE.LIVE_STREAM,
|
392
|
+
result_callback=mock.MagicMock())
|
393
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
394
|
+
landmarker.detect_async(self.test_image, 100)
|
395
|
+
with self.assertRaisesRegex(
|
396
|
+
ValueError, r'Input timestamp must be monotonically increasing'):
|
397
|
+
landmarker.detect_async(self.test_image, 0)
|
398
|
+
|
399
|
+
@parameterized.parameters(
|
400
|
+
(_THUMB_UP_IMAGE, 0,
|
401
|
+
_get_expected_hand_landmarker_result(_THUMB_UP_LANDMARKS)),
|
402
|
+
(_POINTING_UP_IMAGE, 0,
|
403
|
+
_get_expected_hand_landmarker_result(_POINTING_UP_LANDMARKS)),
|
404
|
+
(_POINTING_UP_ROTATED_IMAGE, -90,
|
405
|
+
_get_expected_hand_landmarker_result(_POINTING_UP_ROTATED_LANDMARKS)),
|
406
|
+
(_NO_HANDS_IMAGE, 0, _HandLandmarkerResult([], [], [])))
|
407
|
+
def test_detect_async_calls(self, image_path, rotation, expected_result):
|
408
|
+
test_image = _Image.create_from_file(
|
409
|
+
test_utils.get_test_data_path(image_path))
|
410
|
+
# Set rotation parameters using ImageProcessingOptions.
|
411
|
+
image_processing_options = _ImageProcessingOptions(
|
412
|
+
rotation_degrees=rotation)
|
413
|
+
observed_timestamp_ms = -1
|
414
|
+
|
415
|
+
def check_result(result: _HandLandmarkerResult, output_image: _Image,
|
416
|
+
timestamp_ms: int):
|
417
|
+
if (result.hand_landmarks and result.hand_world_landmarks and
|
418
|
+
result.handedness):
|
419
|
+
self._expect_hand_landmarker_results_correct(result, expected_result)
|
420
|
+
else:
|
421
|
+
self.assertEqual(result, expected_result)
|
422
|
+
self.assertTrue(
|
423
|
+
np.array_equal(output_image.numpy_view(), test_image.numpy_view()))
|
424
|
+
self.assertLess(observed_timestamp_ms, timestamp_ms)
|
425
|
+
self.observed_timestamp_ms = timestamp_ms
|
426
|
+
|
427
|
+
options = _HandLandmarkerOptions(
|
428
|
+
base_options=_BaseOptions(model_asset_path=self.model_path),
|
429
|
+
running_mode=_RUNNING_MODE.LIVE_STREAM,
|
430
|
+
result_callback=check_result)
|
431
|
+
with _HandLandmarker.create_from_options(options) as landmarker:
|
432
|
+
for timestamp in range(0, 300, 30):
|
433
|
+
landmarker.detect_async(test_image, timestamp, image_processing_options)
|
434
|
+
|
435
|
+
|
436
|
+
if __name__ == '__main__':
|
437
|
+
absltest.main()
|