mediapipe-nightly 0.10.21.post20241223__cp311-cp311-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mediapipe/__init__.py +26 -0
- mediapipe/calculators/__init__.py +0 -0
- mediapipe/calculators/audio/__init__.py +0 -0
- mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py +33 -0
- mediapipe/calculators/audio/rational_factor_resample_calculator_pb2.py +33 -0
- mediapipe/calculators/audio/spectrogram_calculator_pb2.py +37 -0
- mediapipe/calculators/audio/stabilized_log_calculator_pb2.py +31 -0
- mediapipe/calculators/audio/time_series_framer_calculator_pb2.py +33 -0
- mediapipe/calculators/core/__init__.py +0 -0
- mediapipe/calculators/core/bypass_calculator_pb2.py +31 -0
- mediapipe/calculators/core/clip_vector_size_calculator_pb2.py +31 -0
- mediapipe/calculators/core/concatenate_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/constant_side_packet_calculator_pb2.py +39 -0
- mediapipe/calculators/core/dequantize_byte_array_calculator_pb2.py +31 -0
- mediapipe/calculators/core/flow_limiter_calculator_pb2.py +32 -0
- mediapipe/calculators/core/gate_calculator_pb2.py +33 -0
- mediapipe/calculators/core/get_vector_item_calculator_pb2.py +31 -0
- mediapipe/calculators/core/graph_profile_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_cloner_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_resampler_calculator_pb2.py +33 -0
- mediapipe/calculators/core/packet_thinner_calculator_pb2.py +33 -0
- mediapipe/calculators/core/quantize_float_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/sequence_shift_calculator_pb2.py +31 -0
- mediapipe/calculators/core/split_vector_calculator_pb2.py +33 -0
- mediapipe/calculators/image/__init__.py +0 -0
- mediapipe/calculators/image/bilateral_filter_calculator_pb2.py +31 -0
- mediapipe/calculators/image/feature_detector_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_clone_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_cropping_calculator_pb2.py +33 -0
- mediapipe/calculators/image/image_transformation_calculator_pb2.py +38 -0
- mediapipe/calculators/image/mask_overlay_calculator_pb2.py +33 -0
- mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator_pb2.py +31 -0
- mediapipe/calculators/image/opencv_image_encoder_calculator_pb2.py +35 -0
- mediapipe/calculators/image/recolor_calculator_pb2.py +34 -0
- mediapipe/calculators/image/rotation_mode_pb2.py +29 -0
- mediapipe/calculators/image/scale_image_calculator_pb2.py +34 -0
- mediapipe/calculators/image/segmentation_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/image/set_alpha_calculator_pb2.py +31 -0
- mediapipe/calculators/image/warp_affine_calculator_pb2.py +36 -0
- mediapipe/calculators/internal/__init__.py +0 -0
- mediapipe/calculators/internal/callback_packet_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/__init__.py +0 -0
- mediapipe/calculators/tensor/audio_to_tensor_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/bert_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/feedback_tensors_calculator_pb2.py +37 -0
- mediapipe/calculators/tensor/image_to_tensor_calculator_pb2.py +40 -0
- mediapipe/calculators/tensor/inference_calculator_pb2.py +63 -0
- mediapipe/calculators/tensor/landmarks_to_tensor_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/regex_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensor_converter_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/tensor_to_joints_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensors_readback_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/tensors_to_audio_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_classification_calculator_pb2.py +44 -0
- mediapipe/calculators/tensor/tensors_to_detections_calculator_pb2.py +39 -0
- mediapipe/calculators/tensor/tensors_to_floats_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/vector_to_tensor_calculator_pb2.py +27 -0
- mediapipe/calculators/tflite/__init__.py +0 -0
- mediapipe/calculators/tflite/ssd_anchors_calculator_pb2.py +32 -0
- mediapipe/calculators/tflite/tflite_converter_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_inference_calculator_pb2.py +49 -0
- mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/__init__.py +0 -0
- mediapipe/calculators/util/align_hand_to_pose_in_world_calculator_pb2.py +31 -0
- mediapipe/calculators/util/annotation_overlay_calculator_pb2.py +32 -0
- mediapipe/calculators/util/association_calculator_pb2.py +31 -0
- mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py +31 -0
- mediapipe/calculators/util/combine_joints_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detection_label_id_to_text_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detections_to_rects_calculator_pb2.py +33 -0
- mediapipe/calculators/util/detections_to_render_data_calculator_pb2.py +33 -0
- mediapipe/calculators/util/face_to_rect_calculator_pb2.py +26 -0
- mediapipe/calculators/util/filter_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/util/flat_color_image_calculator_pb2.py +32 -0
- mediapipe/calculators/util/labels_to_render_data_calculator_pb2.py +34 -0
- mediapipe/calculators/util/landmark_projection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_refinement_calculator_pb2.py +41 -0
- mediapipe/calculators/util/landmarks_smoothing_calculator_pb2.py +33 -0
- mediapipe/calculators/util/landmarks_to_detection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_floats_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/landmarks_transformation_calculator_pb2.py +37 -0
- mediapipe/calculators/util/latency_pb2.py +26 -0
- mediapipe/calculators/util/local_file_contents_calculator_pb2.py +31 -0
- mediapipe/calculators/util/logic_calculator_pb2.py +34 -0
- mediapipe/calculators/util/non_max_suppression_calculator_pb2.py +35 -0
- mediapipe/calculators/util/packet_frequency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/packet_frequency_pb2.py +26 -0
- mediapipe/calculators/util/packet_latency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/rect_to_render_scale_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_transformation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/refine_landmarks_from_heatmap_calculator_pb2.py +31 -0
- mediapipe/calculators/util/resource_provider_calculator_pb2.py +28 -0
- mediapipe/calculators/util/set_joints_visibility_calculator_pb2.py +41 -0
- mediapipe/calculators/util/thresholding_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_id_to_label_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/top_k_scores_calculator_pb2.py +31 -0
- mediapipe/calculators/util/visibility_copy_calculator_pb2.py +27 -0
- mediapipe/calculators/util/visibility_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/video/__init__.py +0 -0
- mediapipe/calculators/video/box_detector_calculator_pb2.py +32 -0
- mediapipe/calculators/video/box_tracker_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_packager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_to_image_calculator_pb2.py +31 -0
- mediapipe/calculators/video/motion_analysis_calculator_pb2.py +42 -0
- mediapipe/calculators/video/opencv_video_encoder_calculator_pb2.py +31 -0
- mediapipe/calculators/video/tool/__init__.py +0 -0
- mediapipe/calculators/video/tool/flow_quantizer_model_pb2.py +26 -0
- mediapipe/calculators/video/tracked_detection_manager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/video_pre_stream_calculator_pb2.py +35 -0
- mediapipe/examples/__init__.py +14 -0
- mediapipe/examples/desktop/__init__.py +14 -0
- mediapipe/framework/__init__.py +0 -0
- mediapipe/framework/calculator_options_pb2.py +29 -0
- mediapipe/framework/calculator_pb2.py +59 -0
- mediapipe/framework/calculator_profile_pb2.py +48 -0
- mediapipe/framework/deps/__init__.py +0 -0
- mediapipe/framework/deps/proto_descriptor_pb2.py +29 -0
- mediapipe/framework/formats/__init__.py +0 -0
- mediapipe/framework/formats/affine_transform_data_pb2.py +28 -0
- mediapipe/framework/formats/annotation/__init__.py +0 -0
- mediapipe/framework/formats/annotation/locus_pb2.py +32 -0
- mediapipe/framework/formats/annotation/rasterization_pb2.py +29 -0
- mediapipe/framework/formats/body_rig_pb2.py +28 -0
- mediapipe/framework/formats/classification_pb2.py +31 -0
- mediapipe/framework/formats/detection_pb2.py +36 -0
- mediapipe/framework/formats/image_file_properties_pb2.py +26 -0
- mediapipe/framework/formats/image_format_pb2.py +29 -0
- mediapipe/framework/formats/landmark_pb2.py +37 -0
- mediapipe/framework/formats/location_data_pb2.py +38 -0
- mediapipe/framework/formats/matrix_data_pb2.py +31 -0
- mediapipe/framework/formats/motion/__init__.py +0 -0
- mediapipe/framework/formats/motion/optical_flow_field_data_pb2.py +30 -0
- mediapipe/framework/formats/object_detection/__init__.py +0 -0
- mediapipe/framework/formats/object_detection/anchor_pb2.py +26 -0
- mediapipe/framework/formats/rect_pb2.py +29 -0
- mediapipe/framework/formats/time_series_header_pb2.py +28 -0
- mediapipe/framework/graph_runtime_info_pb2.py +31 -0
- mediapipe/framework/mediapipe_options_pb2.py +27 -0
- mediapipe/framework/packet_factory_pb2.py +31 -0
- mediapipe/framework/packet_generator_pb2.py +33 -0
- mediapipe/framework/status_handler_pb2.py +28 -0
- mediapipe/framework/stream_handler/__init__.py +0 -0
- mediapipe/framework/stream_handler/default_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/fixed_size_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/sync_set_input_stream_handler_pb2.py +29 -0
- mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler_pb2.py +30 -0
- mediapipe/framework/test_calculators_pb2.py +31 -0
- mediapipe/framework/thread_pool_executor_pb2.py +29 -0
- mediapipe/framework/tool/__init__.py +0 -0
- mediapipe/framework/tool/calculator_graph_template_pb2.py +44 -0
- mediapipe/framework/tool/field_data_pb2.py +28 -0
- mediapipe/framework/tool/node_chain_subgraph_pb2.py +31 -0
- mediapipe/framework/tool/packet_generator_wrapper_calculator_pb2.py +28 -0
- mediapipe/framework/tool/source_pb2.py +33 -0
- mediapipe/framework/tool/switch_container_pb2.py +32 -0
- mediapipe/gpu/__init__.py +0 -0
- mediapipe/gpu/copy_calculator_pb2.py +33 -0
- mediapipe/gpu/gl_animation_overlay_calculator_pb2.py +31 -0
- mediapipe/gpu/gl_context_options_pb2.py +31 -0
- mediapipe/gpu/gl_scaler_calculator_pb2.py +32 -0
- mediapipe/gpu/gl_surface_sink_calculator_pb2.py +32 -0
- mediapipe/gpu/gpu_origin_pb2.py +29 -0
- mediapipe/gpu/scale_mode_pb2.py +28 -0
- mediapipe/model_maker/__init__.py +27 -0
- mediapipe/model_maker/setup.py +107 -0
- mediapipe/modules/__init__.py +0 -0
- mediapipe/modules/face_detection/__init__.py +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_cpu.binarypb +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_sparse.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_pb2.py +30 -0
- mediapipe/modules/face_detection/face_detection_short_range.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_short_range_cpu.binarypb +0 -0
- mediapipe/modules/face_geometry/__init__.py +0 -0
- mediapipe/modules/face_geometry/data/__init__.py +0 -0
- mediapipe/modules/face_geometry/effect_renderer_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/env_generator_calculator_pb2.py +28 -0
- mediapipe/modules/face_geometry/geometry_pipeline_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/libs/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/environment_pb2.py +31 -0
- mediapipe/modules/face_geometry/protos/face_geometry_pb2.py +29 -0
- mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/modules/face_geometry/protos/mesh_3d_pb2.py +31 -0
- mediapipe/modules/face_landmark/__init__.py +0 -0
- mediapipe/modules/face_landmark/face_landmark.tflite +0 -0
- mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb +0 -0
- mediapipe/modules/face_landmark/face_landmark_with_attention.tflite +0 -0
- mediapipe/modules/hand_landmark/__init__.py +0 -0
- mediapipe/modules/hand_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_full.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_lite.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb +0 -0
- mediapipe/modules/hand_landmark/handedness.txt +2 -0
- mediapipe/modules/holistic_landmark/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator_pb2.py +37 -0
- mediapipe/modules/holistic_landmark/hand_recrop.tflite +0 -0
- mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb +0 -0
- mediapipe/modules/iris_landmark/__init__.py +0 -0
- mediapipe/modules/iris_landmark/iris_landmark.tflite +0 -0
- mediapipe/modules/objectron/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/a_r_capture_metadata_pb2.py +102 -0
- mediapipe/modules/objectron/calculators/annotation_data_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/belief_decoder_config_pb2.py +28 -0
- mediapipe/modules/objectron/calculators/camera_parameters_pb2.py +30 -0
- mediapipe/modules/objectron/calculators/filter_detection_calculator_pb2.py +35 -0
- mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/object_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/object_detection_oidv4_labelmap.txt +24 -0
- mediapipe/modules/objectron/objectron_cpu.binarypb +0 -0
- mediapipe/modules/palm_detection/__init__.py +0 -0
- mediapipe/modules/palm_detection/palm_detection_full.tflite +0 -0
- mediapipe/modules/palm_detection/palm_detection_lite.tflite +0 -0
- mediapipe/modules/pose_detection/__init__.py +0 -0
- mediapipe/modules/pose_detection/pose_detection.tflite +0 -0
- mediapipe/modules/pose_landmark/__init__.py +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_full.tflite +0 -0
- mediapipe/modules/selfie_segmentation/__init__.py +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation.tflite +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_cpu.binarypb +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_landscape.tflite +0 -0
- mediapipe/python/__init__.py +29 -0
- mediapipe/python/_framework_bindings.cpython-311-x86_64-linux-gnu.so +0 -0
- mediapipe/python/calculator_graph_test.py +251 -0
- mediapipe/python/image_frame_test.py +194 -0
- mediapipe/python/image_test.py +218 -0
- mediapipe/python/packet_creator.py +275 -0
- mediapipe/python/packet_getter.py +120 -0
- mediapipe/python/packet_test.py +533 -0
- mediapipe/python/solution_base.py +604 -0
- mediapipe/python/solution_base_test.py +396 -0
- mediapipe/python/solutions/__init__.py +27 -0
- mediapipe/python/solutions/download_utils.py +37 -0
- mediapipe/python/solutions/drawing_styles.py +249 -0
- mediapipe/python/solutions/drawing_utils.py +320 -0
- mediapipe/python/solutions/drawing_utils_test.py +258 -0
- mediapipe/python/solutions/face_detection.py +105 -0
- mediapipe/python/solutions/face_detection_test.py +92 -0
- mediapipe/python/solutions/face_mesh.py +125 -0
- mediapipe/python/solutions/face_mesh_connections.py +500 -0
- mediapipe/python/solutions/face_mesh_test.py +170 -0
- mediapipe/python/solutions/hands.py +153 -0
- mediapipe/python/solutions/hands_connections.py +32 -0
- mediapipe/python/solutions/hands_test.py +219 -0
- mediapipe/python/solutions/holistic.py +167 -0
- mediapipe/python/solutions/holistic_test.py +142 -0
- mediapipe/python/solutions/objectron.py +288 -0
- mediapipe/python/solutions/objectron_test.py +81 -0
- mediapipe/python/solutions/pose.py +192 -0
- mediapipe/python/solutions/pose_connections.py +22 -0
- mediapipe/python/solutions/pose_test.py +262 -0
- mediapipe/python/solutions/selfie_segmentation.py +76 -0
- mediapipe/python/solutions/selfie_segmentation_test.py +68 -0
- mediapipe/python/timestamp_test.py +78 -0
- mediapipe/tasks/__init__.py +14 -0
- mediapipe/tasks/cc/__init__.py +0 -0
- mediapipe/tasks/cc/audio/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/audio_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/audio_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/audio_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/core/__init__.py +0 -0
- mediapipe/tasks/cc/audio/utils/__init__.py +0 -0
- mediapipe/tasks/cc/components/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/classification_aggregation_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/components/calculators/score_calibration_calculator_pb2.py +35 -0
- mediapipe/tasks/cc/components/calculators/tensors_to_embeddings_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/components/containers/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/classifications_pb2.py +30 -0
- mediapipe/tasks/cc/components/containers/proto/embeddings_pb2.py +35 -0
- mediapipe/tasks/cc/components/containers/proto/landmarks_detection_result_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/classification_postprocessing_graph_options_pb2.py +38 -0
- mediapipe/tasks/cc/components/processors/proto/classifier_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/detection_postprocessing_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/components/processors/proto/detector_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedder_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedding_postprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/proto/image_preprocessing_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/components/processors/proto/text_model_type_pb2.py +28 -0
- mediapipe/tasks/cc/components/processors/proto/text_preprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/utils/__init__.py +0 -0
- mediapipe/tasks/cc/core/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/acceleration_pb2.py +28 -0
- mediapipe/tasks/cc/core/proto/base_options_pb2.py +30 -0
- mediapipe/tasks/cc/core/proto/external_file_pb2.py +31 -0
- mediapipe/tasks/cc/core/proto/inference_subgraph_pb2.py +32 -0
- mediapipe/tasks/cc/core/proto/model_resources_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/c/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/calculators/model_data_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/common/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_params_pb2.py +33 -0
- mediapipe/tasks/cc/genai/inference/proto/prompt_template_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/proto/sampler_params_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/proto/transformer_params_pb2.py +45 -0
- mediapipe/tasks/cc/genai/inference/utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/llm_utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/xnn_utils/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/_pywrap_metadata_version.cpython-311-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/cc/metadata/tests/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/ragged/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/testdata/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/hash/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/text_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/text_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/text_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/tokenizers/__init__.py +0 -0
- mediapipe/tasks/cc/text/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/__init__.py +0 -0
- mediapipe/tasks/cc/vision/core/__init__.py +0 -0
- mediapipe/tasks/cc/vision/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_geometry/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/env_generator_calculator_pb2.py +28 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/geometry_pipeline_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/data/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/libs/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/environment_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_graph_options_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/mesh_3d_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options_pb2.py +37 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/tensors_to_face_landmarks_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_stylizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/tensors_to_image_calculator_pb2.py +36 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/face_stylizer_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/combined_prediction_calculator_pb2.py +33 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_embedder_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/hand_gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_result_pb2.py +30 -0
- mediapipe/tasks/cc/vision/hand_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_roi_refinement_graph_options_pb2.py +28 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result_pb2.py +29 -0
- mediapipe/tasks/cc/vision/image_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/image_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/image_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_generator/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/stable_diffusion_iterate_calculator_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/proto/conditioned_image_graph_options_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/control_plugin_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_generator/proto/image_generator_graph_options_pb2.py +30 -0
- mediapipe/tasks/cc/vision/image_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/image_segmenter_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/segmenter_options_pb2.py +33 -0
- mediapipe/tasks/cc/vision/interactive_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/object_detector_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/pose_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/utils/ghum/__init__.py +0 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema.fbs +59 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema_py_generated.py +108 -0
- mediapipe/tasks/metadata/metadata_schema.fbs +732 -0
- mediapipe/tasks/metadata/metadata_schema_py_generated.py +3251 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema.fbs +98 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema_py_generated.py +674 -0
- mediapipe/tasks/metadata/schema_py_generated.py +18438 -0
- mediapipe/tasks/python/__init__.py +27 -0
- mediapipe/tasks/python/audio/__init__.py +33 -0
- mediapipe/tasks/python/audio/audio_classifier.py +324 -0
- mediapipe/tasks/python/audio/audio_embedder.py +285 -0
- mediapipe/tasks/python/audio/core/__init__.py +16 -0
- mediapipe/tasks/python/audio/core/audio_record.py +125 -0
- mediapipe/tasks/python/audio/core/audio_task_running_mode.py +29 -0
- mediapipe/tasks/python/audio/core/base_audio_task_api.py +181 -0
- mediapipe/tasks/python/benchmark/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/benchmark_utils.py +70 -0
- mediapipe/tasks/python/benchmark/vision/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/vision/benchmark.py +99 -0
- mediapipe/tasks/python/benchmark/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py +40 -0
- mediapipe/tasks/python/components/__init__.py +13 -0
- mediapipe/tasks/python/components/containers/__init__.py +53 -0
- mediapipe/tasks/python/components/containers/audio_data.py +137 -0
- mediapipe/tasks/python/components/containers/bounding_box.py +73 -0
- mediapipe/tasks/python/components/containers/category.py +78 -0
- mediapipe/tasks/python/components/containers/classification_result.py +111 -0
- mediapipe/tasks/python/components/containers/detections.py +181 -0
- mediapipe/tasks/python/components/containers/embedding_result.py +89 -0
- mediapipe/tasks/python/components/containers/keypoint.py +77 -0
- mediapipe/tasks/python/components/containers/landmark.py +122 -0
- mediapipe/tasks/python/components/containers/landmark_detection_result.py +106 -0
- mediapipe/tasks/python/components/containers/rect.py +109 -0
- mediapipe/tasks/python/components/processors/__init__.py +23 -0
- mediapipe/tasks/python/components/processors/classifier_options.py +86 -0
- mediapipe/tasks/python/components/utils/__init__.py +13 -0
- mediapipe/tasks/python/components/utils/cosine_similarity.py +68 -0
- mediapipe/tasks/python/core/__init__.py +13 -0
- mediapipe/tasks/python/core/base_options.py +121 -0
- mediapipe/tasks/python/core/optional_dependencies.py +25 -0
- mediapipe/tasks/python/core/task_info.py +139 -0
- mediapipe/tasks/python/genai/__init__.py +14 -0
- mediapipe/tasks/python/genai/bundler/__init__.py +23 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler.py +130 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler_test.py +168 -0
- mediapipe/tasks/python/genai/converter/__init__.py +24 -0
- mediapipe/tasks/python/genai/converter/converter_base.py +179 -0
- mediapipe/tasks/python/genai/converter/converter_factory.py +79 -0
- mediapipe/tasks/python/genai/converter/llm_converter.py +374 -0
- mediapipe/tasks/python/genai/converter/llm_converter_test.py +63 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter.py +318 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter_test.py +86 -0
- mediapipe/tasks/python/genai/converter/quantization_util.py +516 -0
- mediapipe/tasks/python/genai/converter/quantization_util_test.py +259 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter.py +580 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter_test.py +83 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer.py +120 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer_test.py +95 -0
- mediapipe/tasks/python/metadata/__init__.py +13 -0
- mediapipe/tasks/python/metadata/flatbuffers_lib/_pywrap_flatbuffers.cpython-311-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/python/metadata/metadata.py +928 -0
- mediapipe/tasks/python/metadata/metadata_displayer_cli.py +34 -0
- mediapipe/tasks/python/metadata/metadata_writers/__init__.py +13 -0
- mediapipe/tasks/python/metadata/metadata_writers/face_stylizer.py +138 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_classifier.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py +170 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py +1166 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_writer.py +845 -0
- mediapipe/tasks/python/metadata/metadata_writers/model_asset_bundle_utils.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/object_detector.py +331 -0
- mediapipe/tasks/python/metadata/metadata_writers/text_classifier.py +119 -0
- mediapipe/tasks/python/metadata/metadata_writers/writer_utils.py +91 -0
- mediapipe/tasks/python/test/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/audio_classifier_test.py +387 -0
- mediapipe/tasks/python/test/audio/audio_embedder_test.py +297 -0
- mediapipe/tasks/python/test/test_utils.py +196 -0
- mediapipe/tasks/python/test/text/__init__.py +13 -0
- mediapipe/tasks/python/test/text/language_detector_test.py +228 -0
- mediapipe/tasks/python/test/text/text_classifier_test.py +235 -0
- mediapipe/tasks/python/test/text/text_embedder_test.py +326 -0
- mediapipe/tasks/python/test/vision/__init__.py +13 -0
- mediapipe/tasks/python/test/vision/face_aligner_test.py +190 -0
- mediapipe/tasks/python/test/vision/face_detector_test.py +523 -0
- mediapipe/tasks/python/test/vision/face_landmarker_test.py +565 -0
- mediapipe/tasks/python/test/vision/face_stylizer_test.py +191 -0
- mediapipe/tasks/python/test/vision/hand_landmarker_test.py +437 -0
- mediapipe/tasks/python/test/vision/holistic_landmarker_test.py +544 -0
- mediapipe/tasks/python/test/vision/image_classifier_test.py +657 -0
- mediapipe/tasks/python/test/vision/image_embedder_test.py +423 -0
- mediapipe/tasks/python/test/vision/image_segmenter_test.py +512 -0
- mediapipe/tasks/python/test/vision/interactive_segmenter_test.py +341 -0
- mediapipe/tasks/python/test/vision/object_detector_test.py +493 -0
- mediapipe/tasks/python/test/vision/pose_landmarker_test.py +518 -0
- mediapipe/tasks/python/text/__init__.py +35 -0
- mediapipe/tasks/python/text/core/__init__.py +16 -0
- mediapipe/tasks/python/text/core/base_text_task_api.py +54 -0
- mediapipe/tasks/python/text/language_detector.py +220 -0
- mediapipe/tasks/python/text/text_classifier.py +187 -0
- mediapipe/tasks/python/text/text_embedder.py +188 -0
- mediapipe/tasks/python/vision/__init__.py +90 -0
- mediapipe/tasks/python/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/vision/core/base_vision_task_api.py +226 -0
- mediapipe/tasks/python/vision/core/image_processing_options.py +39 -0
- mediapipe/tasks/python/vision/core/vision_task_running_mode.py +31 -0
- mediapipe/tasks/python/vision/face_aligner.py +158 -0
- mediapipe/tasks/python/vision/face_detector.py +332 -0
- mediapipe/tasks/python/vision/face_landmarker.py +3244 -0
- mediapipe/tasks/python/vision/face_stylizer.py +158 -0
- mediapipe/tasks/python/vision/gesture_recognizer.py +480 -0
- mediapipe/tasks/python/vision/hand_landmarker.py +504 -0
- mediapipe/tasks/python/vision/holistic_landmarker.py +576 -0
- mediapipe/tasks/python/vision/image_classifier.py +358 -0
- mediapipe/tasks/python/vision/image_embedder.py +362 -0
- mediapipe/tasks/python/vision/image_segmenter.py +433 -0
- mediapipe/tasks/python/vision/interactive_segmenter.py +285 -0
- mediapipe/tasks/python/vision/object_detector.py +389 -0
- mediapipe/tasks/python/vision/pose_landmarker.py +455 -0
- mediapipe/util/__init__.py +0 -0
- mediapipe/util/analytics/__init__.py +0 -0
- mediapipe/util/analytics/mediapipe_log_extension_pb2.py +44 -0
- mediapipe/util/analytics/mediapipe_logging_enums_pb2.py +37 -0
- mediapipe/util/audio_decoder_pb2.py +33 -0
- mediapipe/util/color_pb2.py +33 -0
- mediapipe/util/label_map_pb2.py +27 -0
- mediapipe/util/render_data_pb2.py +58 -0
- mediapipe/util/sequence/__init__.py +14 -0
- mediapipe/util/sequence/media_sequence.py +716 -0
- mediapipe/util/sequence/media_sequence_test.py +290 -0
- mediapipe/util/sequence/media_sequence_util.py +800 -0
- mediapipe/util/sequence/media_sequence_util_test.py +389 -0
- mediapipe/util/tracking/__init__.py +0 -0
- mediapipe/util/tracking/box_detector_pb2.py +39 -0
- mediapipe/util/tracking/box_tracker_pb2.py +32 -0
- mediapipe/util/tracking/camera_motion_pb2.py +31 -0
- mediapipe/util/tracking/flow_packager_pb2.py +60 -0
- mediapipe/util/tracking/frame_selection_pb2.py +35 -0
- mediapipe/util/tracking/frame_selection_solution_evaluator_pb2.py +28 -0
- mediapipe/util/tracking/motion_analysis_pb2.py +35 -0
- mediapipe/util/tracking/motion_estimation_pb2.py +66 -0
- mediapipe/util/tracking/motion_models_pb2.py +42 -0
- mediapipe/util/tracking/motion_saliency_pb2.py +26 -0
- mediapipe/util/tracking/push_pull_filtering_pb2.py +26 -0
- mediapipe/util/tracking/region_flow_computation_pb2.py +59 -0
- mediapipe/util/tracking/region_flow_pb2.py +49 -0
- mediapipe/util/tracking/tone_estimation_pb2.py +45 -0
- mediapipe/util/tracking/tone_models_pb2.py +32 -0
- mediapipe/util/tracking/tracked_detection_manager_config_pb2.py +26 -0
- mediapipe/util/tracking/tracking_pb2.py +73 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/LICENSE +218 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/METADATA +199 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/RECORD +593 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/WHEEL +5 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/top_level.txt +4 -0
- mediapipe_nightly.libs/libEGL-48f73270.so.1.1.0 +0 -0
- mediapipe_nightly.libs/libGLESv2-ed5eda4f.so.2.1.0 +0 -0
- mediapipe_nightly.libs/libGLdispatch-64b28464.so.0.0.0 +0 -0
@@ -0,0 +1,576 @@
|
|
1
|
+
# Copyright 2023 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""MediaPipe holistic landmarker task."""
|
15
|
+
|
16
|
+
import dataclasses
|
17
|
+
from typing import Callable, List, Mapping, Optional
|
18
|
+
|
19
|
+
from mediapipe.framework.formats import classification_pb2
|
20
|
+
from mediapipe.framework.formats import landmark_pb2
|
21
|
+
from mediapipe.python import packet_creator
|
22
|
+
from mediapipe.python import packet_getter
|
23
|
+
from mediapipe.python._framework_bindings import image as image_module
|
24
|
+
from mediapipe.python._framework_bindings import packet as packet_module
|
25
|
+
from mediapipe.tasks.cc.vision.holistic_landmarker.proto import holistic_landmarker_graph_options_pb2
|
26
|
+
from mediapipe.tasks.cc.vision.holistic_landmarker.proto import holistic_result_pb2
|
27
|
+
from mediapipe.tasks.python.components.containers import category as category_module
|
28
|
+
from mediapipe.tasks.python.components.containers import landmark as landmark_module
|
29
|
+
from mediapipe.tasks.python.core import base_options as base_options_module
|
30
|
+
from mediapipe.tasks.python.core import task_info as task_info_module
|
31
|
+
from mediapipe.tasks.python.core.optional_dependencies import doc_controls
|
32
|
+
from mediapipe.tasks.python.vision.core import base_vision_task_api
|
33
|
+
from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
|
34
|
+
from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module
|
35
|
+
|
36
|
+
_BaseOptions = base_options_module.BaseOptions
|
37
|
+
_HolisticResultProto = holistic_result_pb2.HolisticResult
|
38
|
+
_HolisticLandmarkerGraphOptionsProto = (
|
39
|
+
holistic_landmarker_graph_options_pb2.HolisticLandmarkerGraphOptions
|
40
|
+
)
|
41
|
+
_RunningMode = running_mode_module.VisionTaskRunningMode
|
42
|
+
_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
|
43
|
+
_TaskInfo = task_info_module.TaskInfo
|
44
|
+
|
45
|
+
_IMAGE_IN_STREAM_NAME = 'image_in'
|
46
|
+
_IMAGE_OUT_STREAM_NAME = 'image_out'
|
47
|
+
_IMAGE_TAG = 'IMAGE'
|
48
|
+
|
49
|
+
_POSE_LANDMARKS_STREAM_NAME = 'pose_landmarks'
|
50
|
+
_POSE_LANDMARKS_TAG_NAME = 'POSE_LANDMARKS'
|
51
|
+
_POSE_WORLD_LANDMARKS_STREAM_NAME = 'pose_world_landmarks'
|
52
|
+
_POSE_WORLD_LANDMARKS_TAG = 'POSE_WORLD_LANDMARKS'
|
53
|
+
_POSE_SEGMENTATION_MASK_STREAM_NAME = 'pose_segmentation_mask'
|
54
|
+
_POSE_SEGMENTATION_MASK_TAG = 'POSE_SEGMENTATION_MASK'
|
55
|
+
_FACE_LANDMARKS_STREAM_NAME = 'face_landmarks'
|
56
|
+
_FACE_LANDMARKS_TAG = 'FACE_LANDMARKS'
|
57
|
+
_FACE_BLENDSHAPES_STREAM_NAME = 'extra_blendshapes'
|
58
|
+
_FACE_BLENDSHAPES_TAG = 'FACE_BLENDSHAPES'
|
59
|
+
_LEFT_HAND_LANDMARKS_STREAM_NAME = 'left_hand_landmarks'
|
60
|
+
_LEFT_HAND_LANDMARKS_TAG = 'LEFT_HAND_LANDMARKS'
|
61
|
+
_LEFT_HAND_WORLD_LANDMARKS_STREAM_NAME = 'left_hand_world_landmarks'
|
62
|
+
_LEFT_HAND_WORLD_LANDMARKS_TAG = 'LEFT_HAND_WORLD_LANDMARKS'
|
63
|
+
_RIGHT_HAND_LANDMARKS_STREAM_NAME = 'right_hand_landmarks'
|
64
|
+
_RIGHT_HAND_LANDMARKS_TAG = 'RIGHT_HAND_LANDMARKS'
|
65
|
+
_RIGHT_HAND_WORLD_LANDMARKS_STREAM_NAME = 'right_hand_world_landmarks'
|
66
|
+
_RIGHT_HAND_WORLD_LANDMARKS_TAG = 'RIGHT_HAND_WORLD_LANDMARKS'
|
67
|
+
|
68
|
+
_TASK_GRAPH_NAME = (
|
69
|
+
'mediapipe.tasks.vision.holistic_landmarker.HolisticLandmarkerGraph'
|
70
|
+
)
|
71
|
+
_MICRO_SECONDS_PER_MILLISECOND = 1000
|
72
|
+
|
73
|
+
|
74
|
+
@dataclasses.dataclass
|
75
|
+
class HolisticLandmarkerResult:
|
76
|
+
"""The holistic landmarks result from HolisticLandmarker, where each vector element represents a single holistic detected in the image.
|
77
|
+
|
78
|
+
Attributes:
|
79
|
+
face_landmarks: Detected face landmarks in normalized image coordinates.
|
80
|
+
pose_landmarks: Detected pose landmarks in normalized image coordinates.
|
81
|
+
pose_world_landmarks: Detected pose world landmarks in image coordinates.
|
82
|
+
left_hand_landmarks: Detected left hand landmarks in normalized image
|
83
|
+
coordinates.
|
84
|
+
left_hand_world_landmarks: Detected left hand landmarks in image
|
85
|
+
coordinates.
|
86
|
+
right_hand_landmarks: Detected right hand landmarks in normalized image
|
87
|
+
coordinates.
|
88
|
+
right_hand_world_landmarks: Detected right hand landmarks in image
|
89
|
+
coordinates.
|
90
|
+
face_blendshapes: Optional face blendshapes.
|
91
|
+
segmentation_mask: Optional segmentation mask for pose.
|
92
|
+
"""
|
93
|
+
|
94
|
+
face_landmarks: List[landmark_module.NormalizedLandmark]
|
95
|
+
pose_landmarks: List[landmark_module.NormalizedLandmark]
|
96
|
+
pose_world_landmarks: List[landmark_module.Landmark]
|
97
|
+
left_hand_landmarks: List[landmark_module.NormalizedLandmark]
|
98
|
+
left_hand_world_landmarks: List[landmark_module.Landmark]
|
99
|
+
right_hand_landmarks: List[landmark_module.NormalizedLandmark]
|
100
|
+
right_hand_world_landmarks: List[landmark_module.Landmark]
|
101
|
+
face_blendshapes: Optional[List[category_module.Category]] = None
|
102
|
+
segmentation_mask: Optional[image_module.Image] = None
|
103
|
+
|
104
|
+
@classmethod
|
105
|
+
@doc_controls.do_not_generate_docs
|
106
|
+
def create_from_pb2(
|
107
|
+
cls, pb2_obj: _HolisticResultProto
|
108
|
+
) -> 'HolisticLandmarkerResult':
|
109
|
+
"""Creates a `HolisticLandmarkerResult` object from the given protobuf object."""
|
110
|
+
face_blendshapes = None
|
111
|
+
if hasattr(pb2_obj, 'face_blendshapes'):
|
112
|
+
face_blendshapes = [
|
113
|
+
category_module.Category(
|
114
|
+
score=classification.score,
|
115
|
+
index=classification.index,
|
116
|
+
category_name=classification.label,
|
117
|
+
display_name=classification.display_name,
|
118
|
+
)
|
119
|
+
for classification in pb2_obj.face_blendshapes.classification
|
120
|
+
]
|
121
|
+
|
122
|
+
return HolisticLandmarkerResult(
|
123
|
+
face_landmarks=[
|
124
|
+
landmark_module.NormalizedLandmark.create_from_pb2(landmark)
|
125
|
+
for landmark in pb2_obj.face_landmarks.landmark
|
126
|
+
],
|
127
|
+
pose_landmarks=[
|
128
|
+
landmark_module.NormalizedLandmark.create_from_pb2(landmark)
|
129
|
+
for landmark in pb2_obj.pose_landmarks.landmark
|
130
|
+
],
|
131
|
+
pose_world_landmarks=[
|
132
|
+
landmark_module.Landmark.create_from_pb2(landmark)
|
133
|
+
for landmark in pb2_obj.pose_world_landmarks.landmark
|
134
|
+
],
|
135
|
+
left_hand_landmarks=[
|
136
|
+
landmark_module.NormalizedLandmark.create_from_pb2(landmark)
|
137
|
+
for landmark in pb2_obj.left_hand_landmarks.landmark
|
138
|
+
],
|
139
|
+
left_hand_world_landmarks=[],
|
140
|
+
right_hand_landmarks=[
|
141
|
+
landmark_module.NormalizedLandmark.create_from_pb2(landmark)
|
142
|
+
for landmark in pb2_obj.right_hand_landmarks.landmark
|
143
|
+
],
|
144
|
+
right_hand_world_landmarks=[],
|
145
|
+
face_blendshapes=face_blendshapes,
|
146
|
+
segmentation_mask=None,
|
147
|
+
)
|
148
|
+
|
149
|
+
|
150
|
+
def _build_landmarker_result(
|
151
|
+
output_packets: Mapping[str, packet_module.Packet]
|
152
|
+
) -> HolisticLandmarkerResult:
|
153
|
+
"""Constructs a `HolisticLandmarksDetectionResult` from output packets."""
|
154
|
+
holistic_landmarker_result = HolisticLandmarkerResult(
|
155
|
+
[], [], [], [], [], [], []
|
156
|
+
)
|
157
|
+
|
158
|
+
face_landmarks_proto_list = packet_getter.get_proto(
|
159
|
+
output_packets[_FACE_LANDMARKS_STREAM_NAME]
|
160
|
+
)
|
161
|
+
|
162
|
+
pose_landmarks_proto_list = packet_getter.get_proto(
|
163
|
+
output_packets[_POSE_LANDMARKS_STREAM_NAME]
|
164
|
+
)
|
165
|
+
|
166
|
+
pose_world_landmarks_proto_list = packet_getter.get_proto(
|
167
|
+
output_packets[_POSE_WORLD_LANDMARKS_STREAM_NAME]
|
168
|
+
)
|
169
|
+
|
170
|
+
left_hand_landmarks_proto_list = packet_getter.get_proto(
|
171
|
+
output_packets[_LEFT_HAND_LANDMARKS_STREAM_NAME]
|
172
|
+
)
|
173
|
+
|
174
|
+
left_hand_world_landmarks_proto_list = packet_getter.get_proto(
|
175
|
+
output_packets[_LEFT_HAND_WORLD_LANDMARKS_STREAM_NAME]
|
176
|
+
)
|
177
|
+
|
178
|
+
right_hand_landmarks_proto_list = packet_getter.get_proto(
|
179
|
+
output_packets[_RIGHT_HAND_LANDMARKS_STREAM_NAME]
|
180
|
+
)
|
181
|
+
|
182
|
+
right_hand_world_landmarks_proto_list = packet_getter.get_proto(
|
183
|
+
output_packets[_RIGHT_HAND_WORLD_LANDMARKS_STREAM_NAME]
|
184
|
+
)
|
185
|
+
|
186
|
+
face_landmarks = landmark_pb2.NormalizedLandmarkList()
|
187
|
+
face_landmarks.MergeFrom(face_landmarks_proto_list)
|
188
|
+
for face_landmark in face_landmarks.landmark:
|
189
|
+
holistic_landmarker_result.face_landmarks.append(
|
190
|
+
landmark_module.NormalizedLandmark.create_from_pb2(face_landmark)
|
191
|
+
)
|
192
|
+
|
193
|
+
pose_landmarks = landmark_pb2.NormalizedLandmarkList()
|
194
|
+
pose_landmarks.MergeFrom(pose_landmarks_proto_list)
|
195
|
+
for pose_landmark in pose_landmarks.landmark:
|
196
|
+
holistic_landmarker_result.pose_landmarks.append(
|
197
|
+
landmark_module.NormalizedLandmark.create_from_pb2(pose_landmark)
|
198
|
+
)
|
199
|
+
|
200
|
+
pose_world_landmarks = landmark_pb2.LandmarkList()
|
201
|
+
pose_world_landmarks.MergeFrom(pose_world_landmarks_proto_list)
|
202
|
+
for pose_world_landmark in pose_world_landmarks.landmark:
|
203
|
+
holistic_landmarker_result.pose_world_landmarks.append(
|
204
|
+
landmark_module.Landmark.create_from_pb2(pose_world_landmark)
|
205
|
+
)
|
206
|
+
|
207
|
+
left_hand_landmarks = landmark_pb2.NormalizedLandmarkList()
|
208
|
+
left_hand_landmarks.MergeFrom(left_hand_landmarks_proto_list)
|
209
|
+
for hand_landmark in left_hand_landmarks.landmark:
|
210
|
+
holistic_landmarker_result.left_hand_landmarks.append(
|
211
|
+
landmark_module.NormalizedLandmark.create_from_pb2(hand_landmark)
|
212
|
+
)
|
213
|
+
|
214
|
+
left_hand_world_landmarks = landmark_pb2.LandmarkList()
|
215
|
+
left_hand_world_landmarks.MergeFrom(left_hand_world_landmarks_proto_list)
|
216
|
+
for left_hand_world_landmark in left_hand_world_landmarks.landmark:
|
217
|
+
holistic_landmarker_result.left_hand_world_landmarks.append(
|
218
|
+
landmark_module.Landmark.create_from_pb2(left_hand_world_landmark)
|
219
|
+
)
|
220
|
+
|
221
|
+
right_hand_landmarks = landmark_pb2.NormalizedLandmarkList()
|
222
|
+
right_hand_landmarks.MergeFrom(right_hand_landmarks_proto_list)
|
223
|
+
for hand_landmark in right_hand_landmarks.landmark:
|
224
|
+
holistic_landmarker_result.right_hand_landmarks.append(
|
225
|
+
landmark_module.NormalizedLandmark.create_from_pb2(hand_landmark)
|
226
|
+
)
|
227
|
+
|
228
|
+
right_hand_world_landmarks = landmark_pb2.LandmarkList()
|
229
|
+
right_hand_world_landmarks.MergeFrom(right_hand_world_landmarks_proto_list)
|
230
|
+
for right_hand_world_landmark in right_hand_world_landmarks.landmark:
|
231
|
+
holistic_landmarker_result.right_hand_world_landmarks.append(
|
232
|
+
landmark_module.Landmark.create_from_pb2(right_hand_world_landmark)
|
233
|
+
)
|
234
|
+
|
235
|
+
if _FACE_BLENDSHAPES_STREAM_NAME in output_packets:
|
236
|
+
face_blendshapes_proto_list = packet_getter.get_proto(
|
237
|
+
output_packets[_FACE_BLENDSHAPES_STREAM_NAME]
|
238
|
+
)
|
239
|
+
face_blendshapes_classifications = classification_pb2.ClassificationList()
|
240
|
+
face_blendshapes_classifications.MergeFrom(face_blendshapes_proto_list)
|
241
|
+
holistic_landmarker_result.face_blendshapes = []
|
242
|
+
for face_blendshapes in face_blendshapes_classifications.classification:
|
243
|
+
holistic_landmarker_result.face_blendshapes.append(
|
244
|
+
category_module.Category(
|
245
|
+
index=face_blendshapes.index,
|
246
|
+
score=face_blendshapes.score,
|
247
|
+
display_name=face_blendshapes.display_name,
|
248
|
+
category_name=face_blendshapes.label,
|
249
|
+
)
|
250
|
+
)
|
251
|
+
|
252
|
+
if _POSE_SEGMENTATION_MASK_STREAM_NAME in output_packets:
|
253
|
+
holistic_landmarker_result.segmentation_mask = packet_getter.get_image(
|
254
|
+
output_packets[_POSE_SEGMENTATION_MASK_STREAM_NAME]
|
255
|
+
)
|
256
|
+
|
257
|
+
return holistic_landmarker_result
|
258
|
+
|
259
|
+
|
260
|
+
@dataclasses.dataclass
|
261
|
+
class HolisticLandmarkerOptions:
|
262
|
+
"""Options for the holistic landmarker task.
|
263
|
+
|
264
|
+
Attributes:
|
265
|
+
base_options: Base options for the holistic landmarker task.
|
266
|
+
running_mode: The running mode of the task. Default to the image mode.
|
267
|
+
HolisticLandmarker has three running modes: 1) The image mode for
|
268
|
+
detecting holistic landmarks on single image inputs. 2) The video mode for
|
269
|
+
detecting holistic landmarks on the decoded frames of a video. 3) The live
|
270
|
+
stream mode for detecting holistic landmarks on the live stream of input
|
271
|
+
data, such as from camera. In this mode, the "result_callback" below must
|
272
|
+
be specified to receive the detection results asynchronously.
|
273
|
+
min_face_detection_confidence: The minimum confidence score for the face
|
274
|
+
detection to be considered successful.
|
275
|
+
min_face_suppression_threshold: The minimum non-maximum-suppression
|
276
|
+
threshold for face detection to be considered overlapped.
|
277
|
+
min_face_landmarks_confidence: The minimum confidence score for the face
|
278
|
+
landmark detection to be considered successful.
|
279
|
+
min_pose_detection_confidence: The minimum confidence score for the pose
|
280
|
+
detection to be considered successful.
|
281
|
+
min_pose_suppression_threshold: The minimum non-maximum-suppression
|
282
|
+
threshold for pose detection to be considered overlapped.
|
283
|
+
min_pose_landmarks_confidence: The minimum confidence score for the pose
|
284
|
+
landmark detection to be considered successful.
|
285
|
+
min_hand_landmarks_confidence: The minimum confidence score for the hand
|
286
|
+
landmark detection to be considered successful.
|
287
|
+
output_face_blendshapes: Whether HolisticLandmarker outputs face blendshapes
|
288
|
+
classification. Face blendshapes are used for rendering the 3D face model.
|
289
|
+
output_segmentation_mask: whether to output segmentation masks.
|
290
|
+
result_callback: The user-defined result callback for processing live stream
|
291
|
+
data. The result callback should only be specified when the running mode
|
292
|
+
is set to the live stream mode.
|
293
|
+
"""
|
294
|
+
|
295
|
+
base_options: _BaseOptions
|
296
|
+
running_mode: _RunningMode = _RunningMode.IMAGE
|
297
|
+
min_face_detection_confidence: float = 0.5
|
298
|
+
min_face_suppression_threshold: float = 0.5
|
299
|
+
min_face_landmarks_confidence: float = 0.5
|
300
|
+
min_pose_detection_confidence: float = 0.5
|
301
|
+
min_pose_suppression_threshold: float = 0.5
|
302
|
+
min_pose_landmarks_confidence: float = 0.5
|
303
|
+
min_hand_landmarks_confidence: float = 0.5
|
304
|
+
output_face_blendshapes: bool = False
|
305
|
+
output_segmentation_mask: bool = False
|
306
|
+
result_callback: Optional[
|
307
|
+
Callable[[HolisticLandmarkerResult, image_module.Image, int], None]
|
308
|
+
] = None
|
309
|
+
|
310
|
+
@doc_controls.do_not_generate_docs
|
311
|
+
def to_pb2(self) -> _HolisticLandmarkerGraphOptionsProto:
|
312
|
+
"""Generates an HolisticLandmarkerGraphOptions protobuf object."""
|
313
|
+
base_options_proto = self.base_options.to_pb2()
|
314
|
+
base_options_proto.use_stream_mode = (
|
315
|
+
False if self.running_mode == _RunningMode.IMAGE else True
|
316
|
+
)
|
317
|
+
|
318
|
+
# Initialize the holistic landmarker options from base options.
|
319
|
+
holistic_landmarker_options_proto = _HolisticLandmarkerGraphOptionsProto(
|
320
|
+
base_options=base_options_proto
|
321
|
+
)
|
322
|
+
# Configure face detector and face landmarks detector options.
|
323
|
+
holistic_landmarker_options_proto.face_detector_graph_options.min_detection_confidence = (
|
324
|
+
self.min_face_detection_confidence
|
325
|
+
)
|
326
|
+
holistic_landmarker_options_proto.face_detector_graph_options.min_suppression_threshold = (
|
327
|
+
self.min_face_suppression_threshold
|
328
|
+
)
|
329
|
+
holistic_landmarker_options_proto.face_landmarks_detector_graph_options.min_detection_confidence = (
|
330
|
+
self.min_face_landmarks_confidence
|
331
|
+
)
|
332
|
+
# Configure pose detector and pose landmarks detector options.
|
333
|
+
holistic_landmarker_options_proto.pose_detector_graph_options.min_detection_confidence = (
|
334
|
+
self.min_pose_detection_confidence
|
335
|
+
)
|
336
|
+
holistic_landmarker_options_proto.pose_detector_graph_options.min_suppression_threshold = (
|
337
|
+
self.min_pose_suppression_threshold
|
338
|
+
)
|
339
|
+
holistic_landmarker_options_proto.pose_landmarks_detector_graph_options.min_detection_confidence = (
|
340
|
+
self.min_pose_landmarks_confidence
|
341
|
+
)
|
342
|
+
# Configure hand landmarks detector options.
|
343
|
+
holistic_landmarker_options_proto.hand_landmarks_detector_graph_options.min_detection_confidence = (
|
344
|
+
self.min_hand_landmarks_confidence
|
345
|
+
)
|
346
|
+
return holistic_landmarker_options_proto
|
347
|
+
|
348
|
+
|
349
|
+
class HolisticLandmarker(base_vision_task_api.BaseVisionTaskApi):
|
350
|
+
"""Class that performs holistic landmarks detection on images."""
|
351
|
+
|
352
|
+
@classmethod
|
353
|
+
def create_from_model_path(cls, model_path: str) -> 'HolisticLandmarker':
|
354
|
+
"""Creates an `HolisticLandmarker` object from a TensorFlow Lite model and the default `HolisticLandmarkerOptions`.
|
355
|
+
|
356
|
+
Note that the created `HolisticLandmarker` instance is in image mode, for
|
357
|
+
detecting holistic landmarks on single image inputs.
|
358
|
+
|
359
|
+
Args:
|
360
|
+
model_path: Path to the model.
|
361
|
+
|
362
|
+
Returns:
|
363
|
+
`HolisticLandmarker` object that's created from the model file and the
|
364
|
+
default `HolisticLandmarkerOptions`.
|
365
|
+
|
366
|
+
Raises:
|
367
|
+
ValueError: If failed to create `HolisticLandmarker` object from the
|
368
|
+
provided file such as invalid file path.
|
369
|
+
RuntimeError: If other types of error occurred.
|
370
|
+
"""
|
371
|
+
base_options = _BaseOptions(model_asset_path=model_path)
|
372
|
+
options = HolisticLandmarkerOptions(
|
373
|
+
base_options=base_options, running_mode=_RunningMode.IMAGE
|
374
|
+
)
|
375
|
+
return cls.create_from_options(options)
|
376
|
+
|
377
|
+
@classmethod
|
378
|
+
def create_from_options(
|
379
|
+
cls, options: HolisticLandmarkerOptions
|
380
|
+
) -> 'HolisticLandmarker':
|
381
|
+
"""Creates the `HolisticLandmarker` object from holistic landmarker options.
|
382
|
+
|
383
|
+
Args:
|
384
|
+
options: Options for the holistic landmarker task.
|
385
|
+
|
386
|
+
Returns:
|
387
|
+
`HolisticLandmarker` object that's created from `options`.
|
388
|
+
|
389
|
+
Raises:
|
390
|
+
ValueError: If failed to create `HolisticLandmarker` object from
|
391
|
+
`HolisticLandmarkerOptions` such as missing the model.
|
392
|
+
RuntimeError: If other types of error occurred.
|
393
|
+
"""
|
394
|
+
|
395
|
+
def packets_callback(output_packets: Mapping[str, packet_module.Packet]):
|
396
|
+
if output_packets[_IMAGE_OUT_STREAM_NAME].is_empty():
|
397
|
+
return
|
398
|
+
|
399
|
+
image = packet_getter.get_image(output_packets[_IMAGE_OUT_STREAM_NAME])
|
400
|
+
|
401
|
+
if output_packets[_FACE_LANDMARKS_STREAM_NAME].is_empty():
|
402
|
+
empty_packet = output_packets[_FACE_LANDMARKS_STREAM_NAME]
|
403
|
+
options.result_callback(
|
404
|
+
HolisticLandmarkerResult([], [], [], [], [], [], []),
|
405
|
+
image,
|
406
|
+
empty_packet.timestamp.value // _MICRO_SECONDS_PER_MILLISECOND,
|
407
|
+
)
|
408
|
+
return
|
409
|
+
|
410
|
+
holistic_landmarks_detection_result = _build_landmarker_result(
|
411
|
+
output_packets
|
412
|
+
)
|
413
|
+
timestamp = output_packets[_FACE_LANDMARKS_STREAM_NAME].timestamp
|
414
|
+
options.result_callback(
|
415
|
+
holistic_landmarks_detection_result,
|
416
|
+
image,
|
417
|
+
timestamp.value // _MICRO_SECONDS_PER_MILLISECOND,
|
418
|
+
)
|
419
|
+
|
420
|
+
output_streams = [
|
421
|
+
':'.join([_FACE_LANDMARKS_TAG, _FACE_LANDMARKS_STREAM_NAME]),
|
422
|
+
':'.join([_POSE_LANDMARKS_TAG_NAME, _POSE_LANDMARKS_STREAM_NAME]),
|
423
|
+
':'.join(
|
424
|
+
[_POSE_WORLD_LANDMARKS_TAG, _POSE_WORLD_LANDMARKS_STREAM_NAME]
|
425
|
+
),
|
426
|
+
':'.join([_LEFT_HAND_LANDMARKS_TAG, _LEFT_HAND_LANDMARKS_STREAM_NAME]),
|
427
|
+
':'.join([
|
428
|
+
_LEFT_HAND_WORLD_LANDMARKS_TAG,
|
429
|
+
_LEFT_HAND_WORLD_LANDMARKS_STREAM_NAME,
|
430
|
+
]),
|
431
|
+
':'.join(
|
432
|
+
[_RIGHT_HAND_LANDMARKS_TAG, _RIGHT_HAND_LANDMARKS_STREAM_NAME]
|
433
|
+
),
|
434
|
+
':'.join([
|
435
|
+
_RIGHT_HAND_WORLD_LANDMARKS_TAG,
|
436
|
+
_RIGHT_HAND_WORLD_LANDMARKS_STREAM_NAME,
|
437
|
+
]),
|
438
|
+
':'.join([_IMAGE_TAG, _IMAGE_OUT_STREAM_NAME]),
|
439
|
+
]
|
440
|
+
|
441
|
+
if options.output_segmentation_mask:
|
442
|
+
output_streams.append(
|
443
|
+
':'.join(
|
444
|
+
[_POSE_SEGMENTATION_MASK_TAG, _POSE_SEGMENTATION_MASK_STREAM_NAME]
|
445
|
+
)
|
446
|
+
)
|
447
|
+
|
448
|
+
if options.output_face_blendshapes:
|
449
|
+
output_streams.append(
|
450
|
+
':'.join([_FACE_BLENDSHAPES_TAG, _FACE_BLENDSHAPES_STREAM_NAME])
|
451
|
+
)
|
452
|
+
|
453
|
+
task_info = _TaskInfo(
|
454
|
+
task_graph=_TASK_GRAPH_NAME,
|
455
|
+
input_streams=[
|
456
|
+
':'.join([_IMAGE_TAG, _IMAGE_IN_STREAM_NAME]),
|
457
|
+
],
|
458
|
+
output_streams=output_streams,
|
459
|
+
task_options=options,
|
460
|
+
)
|
461
|
+
return cls(
|
462
|
+
task_info.generate_graph_config(
|
463
|
+
enable_flow_limiting=options.running_mode
|
464
|
+
== _RunningMode.LIVE_STREAM
|
465
|
+
),
|
466
|
+
options.running_mode,
|
467
|
+
packets_callback if options.result_callback else None,
|
468
|
+
)
|
469
|
+
|
470
|
+
def detect(
|
471
|
+
self,
|
472
|
+
image: image_module.Image,
|
473
|
+
) -> HolisticLandmarkerResult:
|
474
|
+
"""Performs holistic landmarks detection on the given image.
|
475
|
+
|
476
|
+
Only use this method when the HolisticLandmarker is created with the image
|
477
|
+
running mode.
|
478
|
+
|
479
|
+
The image can be of any size with format RGB or RGBA.
|
480
|
+
|
481
|
+
Args:
|
482
|
+
image: MediaPipe Image.
|
483
|
+
|
484
|
+
Returns:
|
485
|
+
The holistic landmarks detection results.
|
486
|
+
|
487
|
+
Raises:
|
488
|
+
ValueError: If any of the input arguments is invalid.
|
489
|
+
RuntimeError: If holistic landmarker detection failed to run.
|
490
|
+
"""
|
491
|
+
output_packets = self._process_image_data({
|
492
|
+
_IMAGE_IN_STREAM_NAME: packet_creator.create_image(image),
|
493
|
+
})
|
494
|
+
|
495
|
+
if output_packets[_FACE_LANDMARKS_STREAM_NAME].is_empty():
|
496
|
+
return HolisticLandmarkerResult([], [], [], [], [], [], [])
|
497
|
+
|
498
|
+
return _build_landmarker_result(output_packets)
|
499
|
+
|
500
|
+
def detect_for_video(
|
501
|
+
self,
|
502
|
+
image: image_module.Image,
|
503
|
+
timestamp_ms: int,
|
504
|
+
) -> HolisticLandmarkerResult:
|
505
|
+
"""Performs holistic landmarks detection on the provided video frame.
|
506
|
+
|
507
|
+
Only use this method when the HolisticLandmarker is created with the video
|
508
|
+
running mode.
|
509
|
+
|
510
|
+
Only use this method when the HolisticLandmarker is created with the video
|
511
|
+
running mode. It's required to provide the video frame's timestamp (in
|
512
|
+
milliseconds) along with the video frame. The input timestamps should be
|
513
|
+
monotonically increasing for adjacent calls of this method.
|
514
|
+
|
515
|
+
Args:
|
516
|
+
image: MediaPipe Image.
|
517
|
+
timestamp_ms: The timestamp of the input video frame in milliseconds.
|
518
|
+
|
519
|
+
Returns:
|
520
|
+
The holistic landmarks detection results.
|
521
|
+
|
522
|
+
Raises:
|
523
|
+
ValueError: If any of the input arguments is invalid.
|
524
|
+
RuntimeError: If holistic landmarker detection failed to run.
|
525
|
+
"""
|
526
|
+
output_packets = self._process_video_data({
|
527
|
+
_IMAGE_IN_STREAM_NAME: packet_creator.create_image(image).at(
|
528
|
+
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND
|
529
|
+
),
|
530
|
+
})
|
531
|
+
|
532
|
+
if output_packets[_FACE_LANDMARKS_STREAM_NAME].is_empty():
|
533
|
+
return HolisticLandmarkerResult([], [], [], [], [], [], [])
|
534
|
+
|
535
|
+
return _build_landmarker_result(output_packets)
|
536
|
+
|
537
|
+
def detect_async(
|
538
|
+
self,
|
539
|
+
image: image_module.Image,
|
540
|
+
timestamp_ms: int,
|
541
|
+
) -> None:
|
542
|
+
"""Sends live image data to perform holistic landmarks detection.
|
543
|
+
|
544
|
+
The results will be available via the "result_callback" provided in the
|
545
|
+
HolisticLandmarkerOptions. Only use this method when the HolisticLandmarker
|
546
|
+
is
|
547
|
+
created with the live stream running mode.
|
548
|
+
|
549
|
+
Only use this method when the HolisticLandmarker is created with the live
|
550
|
+
stream running mode. The input timestamps should be monotonically increasing
|
551
|
+
for adjacent calls of this method. This method will return immediately after
|
552
|
+
the input image is accepted. The results will be available via the
|
553
|
+
`result_callback` provided in the `HolisticLandmarkerOptions`. The
|
554
|
+
`detect_async` method is designed to process live stream data such as
|
555
|
+
camera input. To lower the overall latency, holistic landmarker may drop the
|
556
|
+
input images if needed. In other words, it's not guaranteed to have output
|
557
|
+
per input image.
|
558
|
+
|
559
|
+
The `result_callback` provides:
|
560
|
+
- The holistic landmarks detection results.
|
561
|
+
- The input image that the holistic landmarker runs on.
|
562
|
+
- The input timestamp in milliseconds.
|
563
|
+
|
564
|
+
Args:
|
565
|
+
image: MediaPipe Image.
|
566
|
+
timestamp_ms: The timestamp of the input image in milliseconds.
|
567
|
+
|
568
|
+
Raises:
|
569
|
+
ValueError: If the current input timestamp is smaller than what the
|
570
|
+
holistic landmarker has already processed.
|
571
|
+
"""
|
572
|
+
self._send_live_stream_data({
|
573
|
+
_IMAGE_IN_STREAM_NAME: packet_creator.create_image(image).at(
|
574
|
+
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND
|
575
|
+
),
|
576
|
+
})
|