mediapipe-nightly 0.10.21.post20241223__cp312-cp312-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mediapipe/__init__.py +26 -0
- mediapipe/calculators/__init__.py +0 -0
- mediapipe/calculators/audio/__init__.py +0 -0
- mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py +33 -0
- mediapipe/calculators/audio/rational_factor_resample_calculator_pb2.py +33 -0
- mediapipe/calculators/audio/spectrogram_calculator_pb2.py +37 -0
- mediapipe/calculators/audio/stabilized_log_calculator_pb2.py +31 -0
- mediapipe/calculators/audio/time_series_framer_calculator_pb2.py +33 -0
- mediapipe/calculators/core/__init__.py +0 -0
- mediapipe/calculators/core/bypass_calculator_pb2.py +31 -0
- mediapipe/calculators/core/clip_vector_size_calculator_pb2.py +31 -0
- mediapipe/calculators/core/concatenate_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/constant_side_packet_calculator_pb2.py +39 -0
- mediapipe/calculators/core/dequantize_byte_array_calculator_pb2.py +31 -0
- mediapipe/calculators/core/flow_limiter_calculator_pb2.py +32 -0
- mediapipe/calculators/core/gate_calculator_pb2.py +33 -0
- mediapipe/calculators/core/get_vector_item_calculator_pb2.py +31 -0
- mediapipe/calculators/core/graph_profile_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_cloner_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_resampler_calculator_pb2.py +33 -0
- mediapipe/calculators/core/packet_thinner_calculator_pb2.py +33 -0
- mediapipe/calculators/core/quantize_float_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/sequence_shift_calculator_pb2.py +31 -0
- mediapipe/calculators/core/split_vector_calculator_pb2.py +33 -0
- mediapipe/calculators/image/__init__.py +0 -0
- mediapipe/calculators/image/bilateral_filter_calculator_pb2.py +31 -0
- mediapipe/calculators/image/feature_detector_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_clone_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_cropping_calculator_pb2.py +33 -0
- mediapipe/calculators/image/image_transformation_calculator_pb2.py +38 -0
- mediapipe/calculators/image/mask_overlay_calculator_pb2.py +33 -0
- mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator_pb2.py +31 -0
- mediapipe/calculators/image/opencv_image_encoder_calculator_pb2.py +35 -0
- mediapipe/calculators/image/recolor_calculator_pb2.py +34 -0
- mediapipe/calculators/image/rotation_mode_pb2.py +29 -0
- mediapipe/calculators/image/scale_image_calculator_pb2.py +34 -0
- mediapipe/calculators/image/segmentation_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/image/set_alpha_calculator_pb2.py +31 -0
- mediapipe/calculators/image/warp_affine_calculator_pb2.py +36 -0
- mediapipe/calculators/internal/__init__.py +0 -0
- mediapipe/calculators/internal/callback_packet_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/__init__.py +0 -0
- mediapipe/calculators/tensor/audio_to_tensor_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/bert_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/feedback_tensors_calculator_pb2.py +37 -0
- mediapipe/calculators/tensor/image_to_tensor_calculator_pb2.py +40 -0
- mediapipe/calculators/tensor/inference_calculator_pb2.py +63 -0
- mediapipe/calculators/tensor/landmarks_to_tensor_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/regex_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensor_converter_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/tensor_to_joints_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensors_readback_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/tensors_to_audio_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_classification_calculator_pb2.py +44 -0
- mediapipe/calculators/tensor/tensors_to_detections_calculator_pb2.py +39 -0
- mediapipe/calculators/tensor/tensors_to_floats_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/vector_to_tensor_calculator_pb2.py +27 -0
- mediapipe/calculators/tflite/__init__.py +0 -0
- mediapipe/calculators/tflite/ssd_anchors_calculator_pb2.py +32 -0
- mediapipe/calculators/tflite/tflite_converter_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_inference_calculator_pb2.py +49 -0
- mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/__init__.py +0 -0
- mediapipe/calculators/util/align_hand_to_pose_in_world_calculator_pb2.py +31 -0
- mediapipe/calculators/util/annotation_overlay_calculator_pb2.py +32 -0
- mediapipe/calculators/util/association_calculator_pb2.py +31 -0
- mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py +31 -0
- mediapipe/calculators/util/combine_joints_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detection_label_id_to_text_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detections_to_rects_calculator_pb2.py +33 -0
- mediapipe/calculators/util/detections_to_render_data_calculator_pb2.py +33 -0
- mediapipe/calculators/util/face_to_rect_calculator_pb2.py +26 -0
- mediapipe/calculators/util/filter_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/util/flat_color_image_calculator_pb2.py +32 -0
- mediapipe/calculators/util/labels_to_render_data_calculator_pb2.py +34 -0
- mediapipe/calculators/util/landmark_projection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_refinement_calculator_pb2.py +41 -0
- mediapipe/calculators/util/landmarks_smoothing_calculator_pb2.py +33 -0
- mediapipe/calculators/util/landmarks_to_detection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_floats_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/landmarks_transformation_calculator_pb2.py +37 -0
- mediapipe/calculators/util/latency_pb2.py +26 -0
- mediapipe/calculators/util/local_file_contents_calculator_pb2.py +31 -0
- mediapipe/calculators/util/logic_calculator_pb2.py +34 -0
- mediapipe/calculators/util/non_max_suppression_calculator_pb2.py +35 -0
- mediapipe/calculators/util/packet_frequency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/packet_frequency_pb2.py +26 -0
- mediapipe/calculators/util/packet_latency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/rect_to_render_scale_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_transformation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/refine_landmarks_from_heatmap_calculator_pb2.py +31 -0
- mediapipe/calculators/util/resource_provider_calculator_pb2.py +28 -0
- mediapipe/calculators/util/set_joints_visibility_calculator_pb2.py +41 -0
- mediapipe/calculators/util/thresholding_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_id_to_label_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/top_k_scores_calculator_pb2.py +31 -0
- mediapipe/calculators/util/visibility_copy_calculator_pb2.py +27 -0
- mediapipe/calculators/util/visibility_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/video/__init__.py +0 -0
- mediapipe/calculators/video/box_detector_calculator_pb2.py +32 -0
- mediapipe/calculators/video/box_tracker_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_packager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_to_image_calculator_pb2.py +31 -0
- mediapipe/calculators/video/motion_analysis_calculator_pb2.py +42 -0
- mediapipe/calculators/video/opencv_video_encoder_calculator_pb2.py +31 -0
- mediapipe/calculators/video/tool/__init__.py +0 -0
- mediapipe/calculators/video/tool/flow_quantizer_model_pb2.py +26 -0
- mediapipe/calculators/video/tracked_detection_manager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/video_pre_stream_calculator_pb2.py +35 -0
- mediapipe/examples/__init__.py +14 -0
- mediapipe/examples/desktop/__init__.py +14 -0
- mediapipe/framework/__init__.py +0 -0
- mediapipe/framework/calculator_options_pb2.py +29 -0
- mediapipe/framework/calculator_pb2.py +59 -0
- mediapipe/framework/calculator_profile_pb2.py +48 -0
- mediapipe/framework/deps/__init__.py +0 -0
- mediapipe/framework/deps/proto_descriptor_pb2.py +29 -0
- mediapipe/framework/formats/__init__.py +0 -0
- mediapipe/framework/formats/affine_transform_data_pb2.py +28 -0
- mediapipe/framework/formats/annotation/__init__.py +0 -0
- mediapipe/framework/formats/annotation/locus_pb2.py +32 -0
- mediapipe/framework/formats/annotation/rasterization_pb2.py +29 -0
- mediapipe/framework/formats/body_rig_pb2.py +28 -0
- mediapipe/framework/formats/classification_pb2.py +31 -0
- mediapipe/framework/formats/detection_pb2.py +36 -0
- mediapipe/framework/formats/image_file_properties_pb2.py +26 -0
- mediapipe/framework/formats/image_format_pb2.py +29 -0
- mediapipe/framework/formats/landmark_pb2.py +37 -0
- mediapipe/framework/formats/location_data_pb2.py +38 -0
- mediapipe/framework/formats/matrix_data_pb2.py +31 -0
- mediapipe/framework/formats/motion/__init__.py +0 -0
- mediapipe/framework/formats/motion/optical_flow_field_data_pb2.py +30 -0
- mediapipe/framework/formats/object_detection/__init__.py +0 -0
- mediapipe/framework/formats/object_detection/anchor_pb2.py +26 -0
- mediapipe/framework/formats/rect_pb2.py +29 -0
- mediapipe/framework/formats/time_series_header_pb2.py +28 -0
- mediapipe/framework/graph_runtime_info_pb2.py +31 -0
- mediapipe/framework/mediapipe_options_pb2.py +27 -0
- mediapipe/framework/packet_factory_pb2.py +31 -0
- mediapipe/framework/packet_generator_pb2.py +33 -0
- mediapipe/framework/status_handler_pb2.py +28 -0
- mediapipe/framework/stream_handler/__init__.py +0 -0
- mediapipe/framework/stream_handler/default_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/fixed_size_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/sync_set_input_stream_handler_pb2.py +29 -0
- mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler_pb2.py +30 -0
- mediapipe/framework/test_calculators_pb2.py +31 -0
- mediapipe/framework/thread_pool_executor_pb2.py +29 -0
- mediapipe/framework/tool/__init__.py +0 -0
- mediapipe/framework/tool/calculator_graph_template_pb2.py +44 -0
- mediapipe/framework/tool/field_data_pb2.py +28 -0
- mediapipe/framework/tool/node_chain_subgraph_pb2.py +31 -0
- mediapipe/framework/tool/packet_generator_wrapper_calculator_pb2.py +28 -0
- mediapipe/framework/tool/source_pb2.py +33 -0
- mediapipe/framework/tool/switch_container_pb2.py +32 -0
- mediapipe/gpu/__init__.py +0 -0
- mediapipe/gpu/copy_calculator_pb2.py +33 -0
- mediapipe/gpu/gl_animation_overlay_calculator_pb2.py +31 -0
- mediapipe/gpu/gl_context_options_pb2.py +31 -0
- mediapipe/gpu/gl_scaler_calculator_pb2.py +32 -0
- mediapipe/gpu/gl_surface_sink_calculator_pb2.py +32 -0
- mediapipe/gpu/gpu_origin_pb2.py +29 -0
- mediapipe/gpu/scale_mode_pb2.py +28 -0
- mediapipe/model_maker/__init__.py +27 -0
- mediapipe/model_maker/setup.py +107 -0
- mediapipe/modules/__init__.py +0 -0
- mediapipe/modules/face_detection/__init__.py +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_cpu.binarypb +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_sparse.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_pb2.py +30 -0
- mediapipe/modules/face_detection/face_detection_short_range.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_short_range_cpu.binarypb +0 -0
- mediapipe/modules/face_geometry/__init__.py +0 -0
- mediapipe/modules/face_geometry/data/__init__.py +0 -0
- mediapipe/modules/face_geometry/effect_renderer_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/env_generator_calculator_pb2.py +28 -0
- mediapipe/modules/face_geometry/geometry_pipeline_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/libs/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/environment_pb2.py +31 -0
- mediapipe/modules/face_geometry/protos/face_geometry_pb2.py +29 -0
- mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/modules/face_geometry/protos/mesh_3d_pb2.py +31 -0
- mediapipe/modules/face_landmark/__init__.py +0 -0
- mediapipe/modules/face_landmark/face_landmark.tflite +0 -0
- mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb +0 -0
- mediapipe/modules/face_landmark/face_landmark_with_attention.tflite +0 -0
- mediapipe/modules/hand_landmark/__init__.py +0 -0
- mediapipe/modules/hand_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_full.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_lite.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb +0 -0
- mediapipe/modules/hand_landmark/handedness.txt +2 -0
- mediapipe/modules/holistic_landmark/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator_pb2.py +37 -0
- mediapipe/modules/holistic_landmark/hand_recrop.tflite +0 -0
- mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb +0 -0
- mediapipe/modules/iris_landmark/__init__.py +0 -0
- mediapipe/modules/iris_landmark/iris_landmark.tflite +0 -0
- mediapipe/modules/objectron/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/a_r_capture_metadata_pb2.py +102 -0
- mediapipe/modules/objectron/calculators/annotation_data_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/belief_decoder_config_pb2.py +28 -0
- mediapipe/modules/objectron/calculators/camera_parameters_pb2.py +30 -0
- mediapipe/modules/objectron/calculators/filter_detection_calculator_pb2.py +35 -0
- mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/object_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/object_detection_oidv4_labelmap.txt +24 -0
- mediapipe/modules/objectron/objectron_cpu.binarypb +0 -0
- mediapipe/modules/palm_detection/__init__.py +0 -0
- mediapipe/modules/palm_detection/palm_detection_full.tflite +0 -0
- mediapipe/modules/palm_detection/palm_detection_lite.tflite +0 -0
- mediapipe/modules/pose_detection/__init__.py +0 -0
- mediapipe/modules/pose_detection/pose_detection.tflite +0 -0
- mediapipe/modules/pose_landmark/__init__.py +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_full.tflite +0 -0
- mediapipe/modules/selfie_segmentation/__init__.py +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation.tflite +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_cpu.binarypb +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_landscape.tflite +0 -0
- mediapipe/python/__init__.py +29 -0
- mediapipe/python/_framework_bindings.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/python/calculator_graph_test.py +251 -0
- mediapipe/python/image_frame_test.py +194 -0
- mediapipe/python/image_test.py +218 -0
- mediapipe/python/packet_creator.py +275 -0
- mediapipe/python/packet_getter.py +120 -0
- mediapipe/python/packet_test.py +533 -0
- mediapipe/python/solution_base.py +604 -0
- mediapipe/python/solution_base_test.py +396 -0
- mediapipe/python/solutions/__init__.py +27 -0
- mediapipe/python/solutions/download_utils.py +37 -0
- mediapipe/python/solutions/drawing_styles.py +249 -0
- mediapipe/python/solutions/drawing_utils.py +320 -0
- mediapipe/python/solutions/drawing_utils_test.py +258 -0
- mediapipe/python/solutions/face_detection.py +105 -0
- mediapipe/python/solutions/face_detection_test.py +92 -0
- mediapipe/python/solutions/face_mesh.py +125 -0
- mediapipe/python/solutions/face_mesh_connections.py +500 -0
- mediapipe/python/solutions/face_mesh_test.py +170 -0
- mediapipe/python/solutions/hands.py +153 -0
- mediapipe/python/solutions/hands_connections.py +32 -0
- mediapipe/python/solutions/hands_test.py +219 -0
- mediapipe/python/solutions/holistic.py +167 -0
- mediapipe/python/solutions/holistic_test.py +142 -0
- mediapipe/python/solutions/objectron.py +288 -0
- mediapipe/python/solutions/objectron_test.py +81 -0
- mediapipe/python/solutions/pose.py +192 -0
- mediapipe/python/solutions/pose_connections.py +22 -0
- mediapipe/python/solutions/pose_test.py +262 -0
- mediapipe/python/solutions/selfie_segmentation.py +76 -0
- mediapipe/python/solutions/selfie_segmentation_test.py +68 -0
- mediapipe/python/timestamp_test.py +78 -0
- mediapipe/tasks/__init__.py +14 -0
- mediapipe/tasks/cc/__init__.py +0 -0
- mediapipe/tasks/cc/audio/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/audio_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/audio_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/audio_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/core/__init__.py +0 -0
- mediapipe/tasks/cc/audio/utils/__init__.py +0 -0
- mediapipe/tasks/cc/components/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/classification_aggregation_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/components/calculators/score_calibration_calculator_pb2.py +35 -0
- mediapipe/tasks/cc/components/calculators/tensors_to_embeddings_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/components/containers/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/classifications_pb2.py +30 -0
- mediapipe/tasks/cc/components/containers/proto/embeddings_pb2.py +35 -0
- mediapipe/tasks/cc/components/containers/proto/landmarks_detection_result_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/classification_postprocessing_graph_options_pb2.py +38 -0
- mediapipe/tasks/cc/components/processors/proto/classifier_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/detection_postprocessing_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/components/processors/proto/detector_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedder_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedding_postprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/proto/image_preprocessing_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/components/processors/proto/text_model_type_pb2.py +28 -0
- mediapipe/tasks/cc/components/processors/proto/text_preprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/utils/__init__.py +0 -0
- mediapipe/tasks/cc/core/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/acceleration_pb2.py +28 -0
- mediapipe/tasks/cc/core/proto/base_options_pb2.py +30 -0
- mediapipe/tasks/cc/core/proto/external_file_pb2.py +31 -0
- mediapipe/tasks/cc/core/proto/inference_subgraph_pb2.py +32 -0
- mediapipe/tasks/cc/core/proto/model_resources_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/c/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/calculators/model_data_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/common/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_params_pb2.py +33 -0
- mediapipe/tasks/cc/genai/inference/proto/prompt_template_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/proto/sampler_params_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/proto/transformer_params_pb2.py +45 -0
- mediapipe/tasks/cc/genai/inference/utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/llm_utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/xnn_utils/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/_pywrap_metadata_version.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/cc/metadata/tests/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/ragged/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/testdata/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/hash/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/text_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/text_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/text_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/tokenizers/__init__.py +0 -0
- mediapipe/tasks/cc/text/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/__init__.py +0 -0
- mediapipe/tasks/cc/vision/core/__init__.py +0 -0
- mediapipe/tasks/cc/vision/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_geometry/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/env_generator_calculator_pb2.py +28 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/geometry_pipeline_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/data/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/libs/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/environment_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_graph_options_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/mesh_3d_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options_pb2.py +37 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/tensors_to_face_landmarks_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_stylizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/tensors_to_image_calculator_pb2.py +36 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/face_stylizer_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/combined_prediction_calculator_pb2.py +33 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_embedder_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/hand_gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_result_pb2.py +30 -0
- mediapipe/tasks/cc/vision/hand_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_roi_refinement_graph_options_pb2.py +28 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result_pb2.py +29 -0
- mediapipe/tasks/cc/vision/image_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/image_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/image_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_generator/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/stable_diffusion_iterate_calculator_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/proto/conditioned_image_graph_options_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/control_plugin_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_generator/proto/image_generator_graph_options_pb2.py +30 -0
- mediapipe/tasks/cc/vision/image_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/image_segmenter_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/segmenter_options_pb2.py +33 -0
- mediapipe/tasks/cc/vision/interactive_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/object_detector_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/pose_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/utils/ghum/__init__.py +0 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema.fbs +59 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema_py_generated.py +108 -0
- mediapipe/tasks/metadata/metadata_schema.fbs +732 -0
- mediapipe/tasks/metadata/metadata_schema_py_generated.py +3251 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema.fbs +98 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema_py_generated.py +674 -0
- mediapipe/tasks/metadata/schema_py_generated.py +18438 -0
- mediapipe/tasks/python/__init__.py +27 -0
- mediapipe/tasks/python/audio/__init__.py +33 -0
- mediapipe/tasks/python/audio/audio_classifier.py +324 -0
- mediapipe/tasks/python/audio/audio_embedder.py +285 -0
- mediapipe/tasks/python/audio/core/__init__.py +16 -0
- mediapipe/tasks/python/audio/core/audio_record.py +125 -0
- mediapipe/tasks/python/audio/core/audio_task_running_mode.py +29 -0
- mediapipe/tasks/python/audio/core/base_audio_task_api.py +181 -0
- mediapipe/tasks/python/benchmark/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/benchmark_utils.py +70 -0
- mediapipe/tasks/python/benchmark/vision/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/vision/benchmark.py +99 -0
- mediapipe/tasks/python/benchmark/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py +40 -0
- mediapipe/tasks/python/components/__init__.py +13 -0
- mediapipe/tasks/python/components/containers/__init__.py +53 -0
- mediapipe/tasks/python/components/containers/audio_data.py +137 -0
- mediapipe/tasks/python/components/containers/bounding_box.py +73 -0
- mediapipe/tasks/python/components/containers/category.py +78 -0
- mediapipe/tasks/python/components/containers/classification_result.py +111 -0
- mediapipe/tasks/python/components/containers/detections.py +181 -0
- mediapipe/tasks/python/components/containers/embedding_result.py +89 -0
- mediapipe/tasks/python/components/containers/keypoint.py +77 -0
- mediapipe/tasks/python/components/containers/landmark.py +122 -0
- mediapipe/tasks/python/components/containers/landmark_detection_result.py +106 -0
- mediapipe/tasks/python/components/containers/rect.py +109 -0
- mediapipe/tasks/python/components/processors/__init__.py +23 -0
- mediapipe/tasks/python/components/processors/classifier_options.py +86 -0
- mediapipe/tasks/python/components/utils/__init__.py +13 -0
- mediapipe/tasks/python/components/utils/cosine_similarity.py +68 -0
- mediapipe/tasks/python/core/__init__.py +13 -0
- mediapipe/tasks/python/core/base_options.py +121 -0
- mediapipe/tasks/python/core/optional_dependencies.py +25 -0
- mediapipe/tasks/python/core/task_info.py +139 -0
- mediapipe/tasks/python/genai/__init__.py +14 -0
- mediapipe/tasks/python/genai/bundler/__init__.py +23 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler.py +130 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler_test.py +168 -0
- mediapipe/tasks/python/genai/converter/__init__.py +24 -0
- mediapipe/tasks/python/genai/converter/converter_base.py +179 -0
- mediapipe/tasks/python/genai/converter/converter_factory.py +79 -0
- mediapipe/tasks/python/genai/converter/llm_converter.py +374 -0
- mediapipe/tasks/python/genai/converter/llm_converter_test.py +63 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter.py +318 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter_test.py +86 -0
- mediapipe/tasks/python/genai/converter/quantization_util.py +516 -0
- mediapipe/tasks/python/genai/converter/quantization_util_test.py +259 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter.py +580 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter_test.py +83 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer.py +120 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer_test.py +95 -0
- mediapipe/tasks/python/metadata/__init__.py +13 -0
- mediapipe/tasks/python/metadata/flatbuffers_lib/_pywrap_flatbuffers.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/python/metadata/metadata.py +928 -0
- mediapipe/tasks/python/metadata/metadata_displayer_cli.py +34 -0
- mediapipe/tasks/python/metadata/metadata_writers/__init__.py +13 -0
- mediapipe/tasks/python/metadata/metadata_writers/face_stylizer.py +138 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_classifier.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py +170 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py +1166 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_writer.py +845 -0
- mediapipe/tasks/python/metadata/metadata_writers/model_asset_bundle_utils.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/object_detector.py +331 -0
- mediapipe/tasks/python/metadata/metadata_writers/text_classifier.py +119 -0
- mediapipe/tasks/python/metadata/metadata_writers/writer_utils.py +91 -0
- mediapipe/tasks/python/test/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/audio_classifier_test.py +387 -0
- mediapipe/tasks/python/test/audio/audio_embedder_test.py +297 -0
- mediapipe/tasks/python/test/test_utils.py +196 -0
- mediapipe/tasks/python/test/text/__init__.py +13 -0
- mediapipe/tasks/python/test/text/language_detector_test.py +228 -0
- mediapipe/tasks/python/test/text/text_classifier_test.py +235 -0
- mediapipe/tasks/python/test/text/text_embedder_test.py +326 -0
- mediapipe/tasks/python/test/vision/__init__.py +13 -0
- mediapipe/tasks/python/test/vision/face_aligner_test.py +190 -0
- mediapipe/tasks/python/test/vision/face_detector_test.py +523 -0
- mediapipe/tasks/python/test/vision/face_landmarker_test.py +565 -0
- mediapipe/tasks/python/test/vision/face_stylizer_test.py +191 -0
- mediapipe/tasks/python/test/vision/hand_landmarker_test.py +437 -0
- mediapipe/tasks/python/test/vision/holistic_landmarker_test.py +544 -0
- mediapipe/tasks/python/test/vision/image_classifier_test.py +657 -0
- mediapipe/tasks/python/test/vision/image_embedder_test.py +423 -0
- mediapipe/tasks/python/test/vision/image_segmenter_test.py +512 -0
- mediapipe/tasks/python/test/vision/interactive_segmenter_test.py +341 -0
- mediapipe/tasks/python/test/vision/object_detector_test.py +493 -0
- mediapipe/tasks/python/test/vision/pose_landmarker_test.py +518 -0
- mediapipe/tasks/python/text/__init__.py +35 -0
- mediapipe/tasks/python/text/core/__init__.py +16 -0
- mediapipe/tasks/python/text/core/base_text_task_api.py +54 -0
- mediapipe/tasks/python/text/language_detector.py +220 -0
- mediapipe/tasks/python/text/text_classifier.py +187 -0
- mediapipe/tasks/python/text/text_embedder.py +188 -0
- mediapipe/tasks/python/vision/__init__.py +90 -0
- mediapipe/tasks/python/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/vision/core/base_vision_task_api.py +226 -0
- mediapipe/tasks/python/vision/core/image_processing_options.py +39 -0
- mediapipe/tasks/python/vision/core/vision_task_running_mode.py +31 -0
- mediapipe/tasks/python/vision/face_aligner.py +158 -0
- mediapipe/tasks/python/vision/face_detector.py +332 -0
- mediapipe/tasks/python/vision/face_landmarker.py +3244 -0
- mediapipe/tasks/python/vision/face_stylizer.py +158 -0
- mediapipe/tasks/python/vision/gesture_recognizer.py +480 -0
- mediapipe/tasks/python/vision/hand_landmarker.py +504 -0
- mediapipe/tasks/python/vision/holistic_landmarker.py +576 -0
- mediapipe/tasks/python/vision/image_classifier.py +358 -0
- mediapipe/tasks/python/vision/image_embedder.py +362 -0
- mediapipe/tasks/python/vision/image_segmenter.py +433 -0
- mediapipe/tasks/python/vision/interactive_segmenter.py +285 -0
- mediapipe/tasks/python/vision/object_detector.py +389 -0
- mediapipe/tasks/python/vision/pose_landmarker.py +455 -0
- mediapipe/util/__init__.py +0 -0
- mediapipe/util/analytics/__init__.py +0 -0
- mediapipe/util/analytics/mediapipe_log_extension_pb2.py +44 -0
- mediapipe/util/analytics/mediapipe_logging_enums_pb2.py +37 -0
- mediapipe/util/audio_decoder_pb2.py +33 -0
- mediapipe/util/color_pb2.py +33 -0
- mediapipe/util/label_map_pb2.py +27 -0
- mediapipe/util/render_data_pb2.py +58 -0
- mediapipe/util/sequence/__init__.py +14 -0
- mediapipe/util/sequence/media_sequence.py +716 -0
- mediapipe/util/sequence/media_sequence_test.py +290 -0
- mediapipe/util/sequence/media_sequence_util.py +800 -0
- mediapipe/util/sequence/media_sequence_util_test.py +389 -0
- mediapipe/util/tracking/__init__.py +0 -0
- mediapipe/util/tracking/box_detector_pb2.py +39 -0
- mediapipe/util/tracking/box_tracker_pb2.py +32 -0
- mediapipe/util/tracking/camera_motion_pb2.py +31 -0
- mediapipe/util/tracking/flow_packager_pb2.py +60 -0
- mediapipe/util/tracking/frame_selection_pb2.py +35 -0
- mediapipe/util/tracking/frame_selection_solution_evaluator_pb2.py +28 -0
- mediapipe/util/tracking/motion_analysis_pb2.py +35 -0
- mediapipe/util/tracking/motion_estimation_pb2.py +66 -0
- mediapipe/util/tracking/motion_models_pb2.py +42 -0
- mediapipe/util/tracking/motion_saliency_pb2.py +26 -0
- mediapipe/util/tracking/push_pull_filtering_pb2.py +26 -0
- mediapipe/util/tracking/region_flow_computation_pb2.py +59 -0
- mediapipe/util/tracking/region_flow_pb2.py +49 -0
- mediapipe/util/tracking/tone_estimation_pb2.py +45 -0
- mediapipe/util/tracking/tone_models_pb2.py +32 -0
- mediapipe/util/tracking/tracked_detection_manager_config_pb2.py +26 -0
- mediapipe/util/tracking/tracking_pb2.py +73 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/LICENSE +218 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/METADATA +199 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/RECORD +593 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/WHEEL +5 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/top_level.txt +4 -0
- mediapipe_nightly.libs/libEGL-48f73270.so.1.1.0 +0 -0
- mediapipe_nightly.libs/libGLESv2-ed5eda4f.so.2.1.0 +0 -0
- mediapipe_nightly.libs/libGLdispatch-64b28464.so.0.0.0 +0 -0
@@ -0,0 +1,71 @@
|
|
1
|
+
# Copyright 2023 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
"""Utility methods for creating the model asset bundles."""
|
16
|
+
|
17
|
+
from typing import Dict
|
18
|
+
import zipfile
|
19
|
+
|
20
|
+
# Alignment that ensures that all uncompressed files in the model bundle file
|
21
|
+
# are aligned relative to the start of the file. This lets the files be
|
22
|
+
# accessed directly via mmap.
|
23
|
+
_ALIGNMENT = 4
|
24
|
+
|
25
|
+
|
26
|
+
class AlignZipFile(zipfile.ZipFile):
|
27
|
+
"""ZipFile that stores uncompressed files at particular alignment."""
|
28
|
+
|
29
|
+
def __init__(self, *args, alignment: int, **kwargs) -> None:
|
30
|
+
super().__init__(*args, **kwargs)
|
31
|
+
assert alignment > 0
|
32
|
+
self._alignment = alignment
|
33
|
+
|
34
|
+
def _writecheck(self, zinfo: zipfile.ZipInfo) -> None:
|
35
|
+
# Aligned the uncompressed files.
|
36
|
+
if zinfo.compress_type == zipfile.ZIP_STORED:
|
37
|
+
offset = self.fp.tell()
|
38
|
+
header_length = len(zinfo.FileHeader())
|
39
|
+
padding_length = (
|
40
|
+
self._alignment - (offset + header_length) % self._alignment
|
41
|
+
)
|
42
|
+
if padding_length:
|
43
|
+
offset += padding_length
|
44
|
+
self.fp.write(b"\x00" * padding_length)
|
45
|
+
assert self.fp.tell() == offset
|
46
|
+
zinfo.header_offset = offset
|
47
|
+
else:
|
48
|
+
raise ValueError(
|
49
|
+
"Only support the uncompressed file (compress_type =="
|
50
|
+
" zipfile.ZIP_STORED) in zip. The current file compress type is "
|
51
|
+
+ str(zinfo.compress_type)
|
52
|
+
)
|
53
|
+
super()._writecheck(zinfo)
|
54
|
+
|
55
|
+
|
56
|
+
def create_model_asset_bundle(
|
57
|
+
input_models: Dict[str, bytes], output_path: str
|
58
|
+
) -> None:
|
59
|
+
"""Creates the model asset bundle.
|
60
|
+
|
61
|
+
Args:
|
62
|
+
input_models: A dict of input models with key as the model file name and
|
63
|
+
value as the model content.
|
64
|
+
output_path: The output file path to save the model asset bundle.
|
65
|
+
"""
|
66
|
+
if not input_models or len(input_models) < 2:
|
67
|
+
raise ValueError("Needs at least two input models for model asset bundle.")
|
68
|
+
|
69
|
+
with AlignZipFile(output_path, mode="w", alignment=_ALIGNMENT) as zf:
|
70
|
+
for file_name, file_buffer in input_models.items():
|
71
|
+
zf.writestr(file_name, file_buffer)
|
@@ -0,0 +1,331 @@
|
|
1
|
+
# Copyright 2023 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
"""Writes metadata and label file to the Object Detector models."""
|
16
|
+
|
17
|
+
import dataclasses
|
18
|
+
from typing import List, Optional
|
19
|
+
|
20
|
+
import flatbuffers
|
21
|
+
|
22
|
+
from mediapipe.tasks.metadata import metadata_schema_py_generated as _metadata_fb
|
23
|
+
from mediapipe.tasks.metadata import object_detector_metadata_schema_py_generated as _detector_metadata_fb
|
24
|
+
from mediapipe.tasks.python.metadata import metadata
|
25
|
+
from mediapipe.tasks.python.metadata.metadata_writers import metadata_info
|
26
|
+
from mediapipe.tasks.python.metadata.metadata_writers import metadata_writer
|
27
|
+
|
28
|
+
_MODEL_NAME = "ObjectDetector"
|
29
|
+
_MODEL_DESCRIPTION = (
|
30
|
+
"Identify which of a known set of objects might be present and provide "
|
31
|
+
"information about their positions within the given image or a video "
|
32
|
+
"stream."
|
33
|
+
)
|
34
|
+
|
35
|
+
# Metadata Schema file for object detector.
|
36
|
+
_FLATC_METADATA_SCHEMA_FILE = metadata.get_path_to_datafile(
|
37
|
+
"../../../metadata/object_detector_metadata_schema.fbs",
|
38
|
+
)
|
39
|
+
|
40
|
+
# Metadata name in custom metadata field. The metadata name is used to get
|
41
|
+
# object detector metadata from SubGraphMetadata.custom_metadata and shouldn't
|
42
|
+
# be changed.
|
43
|
+
_METADATA_NAME = "DETECTOR_METADATA"
|
44
|
+
|
45
|
+
|
46
|
+
@dataclasses.dataclass
|
47
|
+
class FixedAnchor:
|
48
|
+
"""A fixed size anchor."""
|
49
|
+
|
50
|
+
x_center: float
|
51
|
+
y_center: float
|
52
|
+
width: Optional[float]
|
53
|
+
height: Optional[float]
|
54
|
+
|
55
|
+
|
56
|
+
@dataclasses.dataclass
|
57
|
+
class FixedAnchorsSchema:
|
58
|
+
"""The schema for a list of anchors with fixed size."""
|
59
|
+
|
60
|
+
anchors: List[FixedAnchor]
|
61
|
+
|
62
|
+
|
63
|
+
@dataclasses.dataclass
|
64
|
+
class SsdAnchorsOptions:
|
65
|
+
"""The ssd anchors options used in object detector model."""
|
66
|
+
|
67
|
+
fixed_anchors_schema: Optional[FixedAnchorsSchema]
|
68
|
+
|
69
|
+
|
70
|
+
@dataclasses.dataclass
|
71
|
+
class TensorsDecodingOptions:
|
72
|
+
"""The decoding options to convert model output tensors to detections."""
|
73
|
+
|
74
|
+
# The number of output classes predicted by the detection model.
|
75
|
+
num_classes: int
|
76
|
+
# The number of output boxes predicted by the detection model.
|
77
|
+
num_boxes: int
|
78
|
+
# The number of output values per boxes predicted by the detection
|
79
|
+
# model. The values contain bounding boxes, keypoints, etc.
|
80
|
+
num_coords: int
|
81
|
+
# The offset of keypoint coordinates in the location tensor.
|
82
|
+
keypoint_coord_offset: int
|
83
|
+
# The number of predicted keypoints.
|
84
|
+
num_keypoints: int
|
85
|
+
# The dimension of each keypoint, e.g. number of values predicted for each
|
86
|
+
# keypoint.
|
87
|
+
num_values_per_keypoint: int
|
88
|
+
# Parameters for decoding SSD detection model.
|
89
|
+
x_scale: float
|
90
|
+
y_scale: float
|
91
|
+
w_scale: float
|
92
|
+
h_scale: float
|
93
|
+
# Whether to apply exponential on box size.
|
94
|
+
apply_exponential_on_box_size: bool
|
95
|
+
# Whether to apply sigmod function on the score.
|
96
|
+
sigmoid_score: bool
|
97
|
+
|
98
|
+
|
99
|
+
# Create an individual method for getting the metadata json file, so that it can
|
100
|
+
# be used as a standalone util.
|
101
|
+
def convert_to_json(metadata_buffer: bytearray) -> str:
|
102
|
+
"""Converts the metadata into a json string.
|
103
|
+
|
104
|
+
Args:
|
105
|
+
metadata_buffer: valid metadata buffer in bytes.
|
106
|
+
|
107
|
+
Returns:
|
108
|
+
Metadata in JSON format.
|
109
|
+
|
110
|
+
Raises:
|
111
|
+
ValueError: error occurred when parsing the metadata schema file.
|
112
|
+
"""
|
113
|
+
return metadata.convert_to_json(
|
114
|
+
metadata_buffer,
|
115
|
+
custom_metadata_schema={_METADATA_NAME: _FLATC_METADATA_SCHEMA_FILE},
|
116
|
+
)
|
117
|
+
|
118
|
+
|
119
|
+
class ObjectDetectorOptionsMd(metadata_info.CustomMetadataMd):
|
120
|
+
"""Object detector options metadata."""
|
121
|
+
|
122
|
+
_METADATA_FILE_IDENTIFIER = b"V001"
|
123
|
+
|
124
|
+
def __init__(
|
125
|
+
self,
|
126
|
+
ssd_anchors_options: SsdAnchorsOptions,
|
127
|
+
tensors_decoding_options: TensorsDecodingOptions,
|
128
|
+
) -> None:
|
129
|
+
"""Creates an ObjectDetectorOptionsMd object.
|
130
|
+
|
131
|
+
Args:
|
132
|
+
ssd_anchors_options: the ssd anchors options associated to the object
|
133
|
+
detector model.
|
134
|
+
tensors_decoding_options: the tensors decoding options used to decode the
|
135
|
+
object detector model output.
|
136
|
+
"""
|
137
|
+
if ssd_anchors_options.fixed_anchors_schema is None:
|
138
|
+
raise ValueError(
|
139
|
+
"Currently only support FixedAnchorsSchema, which cannot be found"
|
140
|
+
" in ssd_anchors_options."
|
141
|
+
)
|
142
|
+
self.ssd_anchors_options = ssd_anchors_options
|
143
|
+
self.tensors_decoding_options = tensors_decoding_options
|
144
|
+
super().__init__(name=_METADATA_NAME)
|
145
|
+
|
146
|
+
def create_metadata(self) -> _metadata_fb.CustomMetadataT:
|
147
|
+
"""Creates the image segmenter options metadata.
|
148
|
+
|
149
|
+
Returns:
|
150
|
+
A Flatbuffers Python object of the custom metadata including object
|
151
|
+
detector options metadata.
|
152
|
+
"""
|
153
|
+
detector_options = _detector_metadata_fb.ObjectDetectorOptionsT()
|
154
|
+
|
155
|
+
# Set ssd_anchors_options.
|
156
|
+
ssd_anchors_options = _detector_metadata_fb.SsdAnchorsOptionsT()
|
157
|
+
fixed_anchors_schema = _detector_metadata_fb.FixedAnchorsSchemaT()
|
158
|
+
fixed_anchors_schema.anchors = []
|
159
|
+
for anchor in self.ssd_anchors_options.fixed_anchors_schema.anchors:
|
160
|
+
anchor_t = _detector_metadata_fb.FixedAnchorT()
|
161
|
+
anchor_t.xCenter = anchor.x_center
|
162
|
+
anchor_t.yCenter = anchor.y_center
|
163
|
+
anchor_t.width = anchor.width
|
164
|
+
anchor_t.height = anchor.height
|
165
|
+
fixed_anchors_schema.anchors.append(anchor_t)
|
166
|
+
ssd_anchors_options.fixedAnchorsSchema = fixed_anchors_schema
|
167
|
+
detector_options.ssdAnchorsOptions = ssd_anchors_options
|
168
|
+
|
169
|
+
# Set tensors_decoding_options.
|
170
|
+
tensors_decoding_options = _detector_metadata_fb.TensorsDecodingOptionsT()
|
171
|
+
tensors_decoding_options.numClasses = (
|
172
|
+
self.tensors_decoding_options.num_classes
|
173
|
+
)
|
174
|
+
tensors_decoding_options.numBoxes = self.tensors_decoding_options.num_boxes
|
175
|
+
tensors_decoding_options.numCoords = (
|
176
|
+
self.tensors_decoding_options.num_coords
|
177
|
+
)
|
178
|
+
tensors_decoding_options.keypointCoordOffset = (
|
179
|
+
self.tensors_decoding_options.keypoint_coord_offset
|
180
|
+
)
|
181
|
+
tensors_decoding_options.numKeypoints = (
|
182
|
+
self.tensors_decoding_options.num_keypoints
|
183
|
+
)
|
184
|
+
tensors_decoding_options.numValuesPerKeypoint = (
|
185
|
+
self.tensors_decoding_options.num_values_per_keypoint
|
186
|
+
)
|
187
|
+
tensors_decoding_options.xScale = self.tensors_decoding_options.x_scale
|
188
|
+
tensors_decoding_options.yScale = self.tensors_decoding_options.y_scale
|
189
|
+
tensors_decoding_options.wScale = self.tensors_decoding_options.w_scale
|
190
|
+
tensors_decoding_options.hScale = self.tensors_decoding_options.h_scale
|
191
|
+
tensors_decoding_options.applyExponentialOnBoxSize = (
|
192
|
+
self.tensors_decoding_options.apply_exponential_on_box_size
|
193
|
+
)
|
194
|
+
tensors_decoding_options.sigmoidScore = (
|
195
|
+
self.tensors_decoding_options.sigmoid_score
|
196
|
+
)
|
197
|
+
detector_options.tensorsDecodingOptions = tensors_decoding_options
|
198
|
+
|
199
|
+
# Get the object detector options flatbuffer.
|
200
|
+
b = flatbuffers.Builder(0)
|
201
|
+
b.Finish(detector_options.Pack(b), self._METADATA_FILE_IDENTIFIER)
|
202
|
+
detector_options_buf = b.Output()
|
203
|
+
|
204
|
+
# Add the object detector options flatbuffer in custom metadata.
|
205
|
+
custom_metadata = _metadata_fb.CustomMetadataT()
|
206
|
+
custom_metadata.name = self.name
|
207
|
+
custom_metadata.data = detector_options_buf
|
208
|
+
return custom_metadata
|
209
|
+
|
210
|
+
|
211
|
+
class MetadataWriter(metadata_writer.MetadataWriterBase):
|
212
|
+
"""MetadataWriter to write the metadata into the object detector."""
|
213
|
+
|
214
|
+
@classmethod
|
215
|
+
def create_for_models_with_nms(
|
216
|
+
cls,
|
217
|
+
model_buffer: bytearray,
|
218
|
+
input_norm_mean: List[float],
|
219
|
+
input_norm_std: List[float],
|
220
|
+
labels: metadata_writer.Labels,
|
221
|
+
score_calibration: Optional[metadata_writer.ScoreCalibration] = None,
|
222
|
+
) -> "MetadataWriter":
|
223
|
+
"""Creates MetadataWriter to write the metadata for object detector with postprocessing in the model.
|
224
|
+
|
225
|
+
This method create a metadata writer for the models with postprocessing [1].
|
226
|
+
|
227
|
+
The parameters required in this method are mandatory when using MediaPipe
|
228
|
+
Tasks.
|
229
|
+
|
230
|
+
Example usage:
|
231
|
+
metadata_writer = object_detector.Metadatawriter.create(model_buffer, ...)
|
232
|
+
tflite_content, json_content = metadata_writer.populate()
|
233
|
+
|
234
|
+
When calling `populate` function in this class, it returns TfLite content
|
235
|
+
and JSON content. Note that only the output TFLite is used for deployment.
|
236
|
+
The output JSON content is used to interpret the metadata content.
|
237
|
+
|
238
|
+
Args:
|
239
|
+
model_buffer: A valid flatbuffer loaded from the TFLite model file.
|
240
|
+
input_norm_mean: the mean value used in the input tensor normalization
|
241
|
+
[2].
|
242
|
+
input_norm_std: the std value used in the input tensor normalizarion [2].
|
243
|
+
labels: an instance of Labels helper class used in the output
|
244
|
+
classification tensor [3].
|
245
|
+
score_calibration: A container of the score calibration operation [4] in
|
246
|
+
the classification tensor. Optional if the model does not use score
|
247
|
+
calibration.
|
248
|
+
[1]:
|
249
|
+
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/kernels/detection_postprocess.cc
|
250
|
+
[2]:
|
251
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L389
|
252
|
+
[3]:
|
253
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L99
|
254
|
+
[4]:
|
255
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L456
|
256
|
+
|
257
|
+
Returns:
|
258
|
+
A MetadataWriter object.
|
259
|
+
"""
|
260
|
+
writer = metadata_writer.MetadataWriter(model_buffer)
|
261
|
+
writer.add_general_info(_MODEL_NAME, _MODEL_DESCRIPTION)
|
262
|
+
writer.add_image_input(input_norm_mean, input_norm_std)
|
263
|
+
writer.add_detection_output(labels, score_calibration)
|
264
|
+
return cls(writer)
|
265
|
+
|
266
|
+
@classmethod
|
267
|
+
def create_for_models_without_nms(
|
268
|
+
cls,
|
269
|
+
model_buffer: bytearray,
|
270
|
+
input_norm_mean: List[float],
|
271
|
+
input_norm_std: List[float],
|
272
|
+
labels: metadata_writer.Labels,
|
273
|
+
ssd_anchors_options: SsdAnchorsOptions,
|
274
|
+
tensors_decoding_options: TensorsDecodingOptions,
|
275
|
+
output_tensors_order: metadata_info.RawDetectionOutputTensorsOrder = metadata_info.RawDetectionOutputTensorsOrder.UNSPECIFIED,
|
276
|
+
) -> "MetadataWriter":
|
277
|
+
"""Creates MetadataWriter to write the metadata for object detector without postprocessing in the model.
|
278
|
+
|
279
|
+
This method create a metadata writer for the models without postprocessing
|
280
|
+
[1].
|
281
|
+
|
282
|
+
The parameters required in this method are mandatory when using MediaPipe
|
283
|
+
Tasks.
|
284
|
+
|
285
|
+
Example usage:
|
286
|
+
metadata_writer = object_detector.Metadatawriter.create(model_buffer, ...)
|
287
|
+
tflite_content, json_content = metadata_writer.populate()
|
288
|
+
|
289
|
+
When calling `populate` function in this class, it returns TfLite content
|
290
|
+
and JSON content. Note that only the output TFLite is used for deployment.
|
291
|
+
The output JSON content is used to interpret the metadata content.
|
292
|
+
|
293
|
+
Args:
|
294
|
+
model_buffer: A valid flatbuffer loaded from the TFLite model file.
|
295
|
+
input_norm_mean: the mean value used in the input tensor normalization
|
296
|
+
[2].
|
297
|
+
input_norm_std: the std value used in the input tensor normalizarion [2].
|
298
|
+
labels: an instance of Labels helper class used in the output
|
299
|
+
classification tensor [3].
|
300
|
+
ssd_anchors_options: the ssd anchors options associated to the object
|
301
|
+
detector model.
|
302
|
+
tensors_decoding_options: the tensors decoding options used to decode the
|
303
|
+
object detector model output.
|
304
|
+
output_tensors_order: the order of the output tensors.
|
305
|
+
[1]:
|
306
|
+
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/kernels/detection_postprocess.cc
|
307
|
+
[2]:
|
308
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L389
|
309
|
+
[3]:
|
310
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L99
|
311
|
+
|
312
|
+
Returns:
|
313
|
+
A MetadataWriter object.
|
314
|
+
"""
|
315
|
+
writer = metadata_writer.MetadataWriter(model_buffer)
|
316
|
+
writer.add_general_info(_MODEL_NAME, _MODEL_DESCRIPTION)
|
317
|
+
writer.add_image_input(input_norm_mean, input_norm_std)
|
318
|
+
writer.add_raw_detection_output(
|
319
|
+
labels, output_tensors_order=output_tensors_order
|
320
|
+
)
|
321
|
+
option_md = ObjectDetectorOptionsMd(
|
322
|
+
ssd_anchors_options, tensors_decoding_options
|
323
|
+
)
|
324
|
+
writer.add_custom_metadata(option_md)
|
325
|
+
return cls(writer)
|
326
|
+
|
327
|
+
def populate(self) -> "tuple[bytearray, str]":
|
328
|
+
model_buf, _ = super().populate()
|
329
|
+
metadata_buf = metadata.get_metadata_buffer(model_buf)
|
330
|
+
json_content = convert_to_json(metadata_buf)
|
331
|
+
return model_buf, json_content
|
@@ -0,0 +1,119 @@
|
|
1
|
+
# Copyright 2022 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
"""Writes metadata and label file to the Text classifier models."""
|
16
|
+
|
17
|
+
from typing import Union
|
18
|
+
|
19
|
+
from mediapipe.tasks.python.metadata.metadata_writers import metadata_writer
|
20
|
+
|
21
|
+
_MODEL_NAME = "TextClassifier"
|
22
|
+
_MODEL_DESCRIPTION = ("Classify the input text into a set of known categories.")
|
23
|
+
|
24
|
+
# The input tensor names of models created by Model Maker.
|
25
|
+
_DEFAULT_ID_NAME = "serving_default_input_word_ids:0"
|
26
|
+
_DEFAULT_MASK_NAME = "serving_default_input_mask:0"
|
27
|
+
_DEFAULT_SEGMENT_ID_NAME = "serving_default_input_type_ids:0"
|
28
|
+
|
29
|
+
|
30
|
+
class MetadataWriter(metadata_writer.MetadataWriterBase):
|
31
|
+
"""MetadataWriter to write the metadata into the text classifier."""
|
32
|
+
|
33
|
+
@classmethod
|
34
|
+
def create_for_regex_model(
|
35
|
+
cls, model_buffer: bytearray,
|
36
|
+
regex_tokenizer: metadata_writer.RegexTokenizer,
|
37
|
+
labels: metadata_writer.Labels) -> "MetadataWriter":
|
38
|
+
"""Creates MetadataWriter for TFLite model with regex tokentizer.
|
39
|
+
|
40
|
+
The parameters required in this method are mandatory when using MediaPipe
|
41
|
+
Tasks.
|
42
|
+
|
43
|
+
Note that only the output TFLite is used for deployment. The output JSON
|
44
|
+
content is used to interpret the metadata content.
|
45
|
+
|
46
|
+
Args:
|
47
|
+
model_buffer: A valid flatbuffer loaded from the TFLite model file.
|
48
|
+
regex_tokenizer: information of the regex tokenizer [1] used to process
|
49
|
+
the input string. If the tokenizer is `BertTokenizer` [2] or
|
50
|
+
`SentencePieceTokenizer` [3], please refer to
|
51
|
+
`create_for_bert_model`.
|
52
|
+
labels: an instance of Labels helper class used in the output
|
53
|
+
classification tensor [4].
|
54
|
+
|
55
|
+
[1]:
|
56
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L500
|
57
|
+
[2]:
|
58
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L477
|
59
|
+
[3]:
|
60
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L485
|
61
|
+
[4]:
|
62
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L99
|
63
|
+
|
64
|
+
Returns:
|
65
|
+
A MetadataWriter object.
|
66
|
+
"""
|
67
|
+
writer = metadata_writer.MetadataWriter(model_buffer)
|
68
|
+
writer.add_general_info(_MODEL_NAME, _MODEL_DESCRIPTION)
|
69
|
+
writer.add_regex_text_input(regex_tokenizer)
|
70
|
+
writer.add_classification_output(labels)
|
71
|
+
return cls(writer)
|
72
|
+
|
73
|
+
@classmethod
|
74
|
+
def create_for_bert_model(
|
75
|
+
cls,
|
76
|
+
model_buffer: bytearray,
|
77
|
+
tokenizer: Union[metadata_writer.BertTokenizer,
|
78
|
+
metadata_writer.SentencePieceTokenizer],
|
79
|
+
labels: metadata_writer.Labels,
|
80
|
+
ids_name: str = _DEFAULT_ID_NAME,
|
81
|
+
mask_name: str = _DEFAULT_MASK_NAME,
|
82
|
+
segment_name: str = _DEFAULT_SEGMENT_ID_NAME,
|
83
|
+
) -> "MetadataWriter":
|
84
|
+
"""Creates MetadataWriter for models with {Bert/SentencePiece}Tokenizer.
|
85
|
+
|
86
|
+
`ids_name`, `mask_name`, and `segment_name` correspond to the `Tensor.name`
|
87
|
+
in the TFLite schema, which help to determine the tensor order when
|
88
|
+
populating metadata. The default values come from Model Maker.
|
89
|
+
|
90
|
+
Args:
|
91
|
+
model_buffer: valid buffer of the model file.
|
92
|
+
tokenizer: information of the tokenizer used to process the input string,
|
93
|
+
if any. Supported tokenziers are: `BertTokenizer` [1] and
|
94
|
+
`SentencePieceTokenizer` [2]. If the tokenizer is `RegexTokenizer` [3],
|
95
|
+
refer to `create_for_regex_model`.
|
96
|
+
labels: an instance of Labels helper class used in the output
|
97
|
+
classification tensor [4].
|
98
|
+
ids_name: name of the ids tensor, which represents the tokenized ids of
|
99
|
+
the input text.
|
100
|
+
mask_name: name of the mask tensor, which represents the mask with `1` for
|
101
|
+
real tokens and `0` for padding tokens.
|
102
|
+
segment_name: name of the segment ids tensor, where `0` stands for the
|
103
|
+
first sequence, and `1` stands for the second sequence if exists. [1]:
|
104
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L477
|
105
|
+
[2]:
|
106
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L485
|
107
|
+
[3]:
|
108
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L500
|
109
|
+
[4]:
|
110
|
+
https://github.com/google/mediapipe/blob/f8af41b1eb49ff4bdad756ff19d1d36f486be614/mediapipe/tasks/metadata/metadata_schema.fbs#L99
|
111
|
+
|
112
|
+
Returns:
|
113
|
+
A MetadataWriter object.
|
114
|
+
"""
|
115
|
+
writer = metadata_writer.MetadataWriter(model_buffer)
|
116
|
+
writer.add_general_info(_MODEL_NAME, _MODEL_DESCRIPTION)
|
117
|
+
writer.add_bert_text_input(tokenizer, ids_name, mask_name, segment_name)
|
118
|
+
writer.add_classification_output(labels)
|
119
|
+
return cls(writer)
|
@@ -0,0 +1,91 @@
|
|
1
|
+
# Copyright 2022 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# ==============================================================================
|
15
|
+
"""Helper methods for writing metadata into TFLite models."""
|
16
|
+
|
17
|
+
from typing import List
|
18
|
+
|
19
|
+
from mediapipe.tasks.metadata import schema_py_generated as _schema_fb
|
20
|
+
|
21
|
+
|
22
|
+
def get_input_tensor_names(model_buffer: bytearray) -> List[str]:
|
23
|
+
"""Gets a list of the input tensor names."""
|
24
|
+
subgraph = get_subgraph(model_buffer)
|
25
|
+
tensor_names = []
|
26
|
+
for i in range(subgraph.InputsLength()):
|
27
|
+
index = subgraph.Inputs(i)
|
28
|
+
tensor_names.append(subgraph.Tensors(index).Name().decode("utf-8"))
|
29
|
+
return tensor_names
|
30
|
+
|
31
|
+
|
32
|
+
def get_output_tensor_names(model_buffer: bytearray) -> List[str]:
|
33
|
+
"""Gets a list of the output tensor names."""
|
34
|
+
subgraph = get_subgraph(model_buffer)
|
35
|
+
tensor_names = []
|
36
|
+
for i in range(subgraph.OutputsLength()):
|
37
|
+
index = subgraph.Outputs(i)
|
38
|
+
tensor_names.append(subgraph.Tensors(index).Name().decode("utf-8"))
|
39
|
+
return tensor_names
|
40
|
+
|
41
|
+
|
42
|
+
def get_input_tensor_types(
|
43
|
+
model_buffer: bytearray) -> List[_schema_fb.TensorType]:
|
44
|
+
"""Gets a list of the input tensor types."""
|
45
|
+
subgraph = get_subgraph(model_buffer)
|
46
|
+
tensor_types = []
|
47
|
+
for i in range(subgraph.InputsLength()):
|
48
|
+
index = subgraph.Inputs(i)
|
49
|
+
tensor_types.append(subgraph.Tensors(index).Type())
|
50
|
+
return tensor_types
|
51
|
+
|
52
|
+
|
53
|
+
def get_output_tensor_types(
|
54
|
+
model_buffer: bytearray) -> List[_schema_fb.TensorType]:
|
55
|
+
"""Gets a list of the output tensor types."""
|
56
|
+
subgraph = get_subgraph(model_buffer)
|
57
|
+
tensor_types = []
|
58
|
+
for i in range(subgraph.OutputsLength()):
|
59
|
+
index = subgraph.Outputs(i)
|
60
|
+
tensor_types.append(subgraph.Tensors(index).Type())
|
61
|
+
return tensor_types
|
62
|
+
|
63
|
+
|
64
|
+
def get_output_tensor_indices(model_buffer: bytearray) -> List[int]:
|
65
|
+
"""Gets a list of the output tensor indices."""
|
66
|
+
subgraph = get_subgraph(model_buffer)
|
67
|
+
return subgraph.OutputsAsNumpy()
|
68
|
+
|
69
|
+
|
70
|
+
def get_subgraph(model_buffer: bytearray) -> _schema_fb.SubGraph:
|
71
|
+
"""Gets the subgraph of the model.
|
72
|
+
|
73
|
+
TFLite does not support multi-subgraph. A model should have exactly one
|
74
|
+
subgraph.
|
75
|
+
|
76
|
+
Args:
|
77
|
+
model_buffer: valid buffer of the model file.
|
78
|
+
|
79
|
+
Returns:
|
80
|
+
The subgraph of the model.
|
81
|
+
|
82
|
+
Raises:
|
83
|
+
ValueError: if the model has more than one subgraph or has no subgraph.
|
84
|
+
"""
|
85
|
+
|
86
|
+
model = _schema_fb.Model.GetRootAsModel(model_buffer, 0)
|
87
|
+
|
88
|
+
# Use the first subgraph as default. TFLite Interpreter doesn't support
|
89
|
+
# multiple subgraphs yet, but models with mini-benchmark may have multiple
|
90
|
+
# subgraphs for acceleration evaluation purpose.
|
91
|
+
return model.Subgraphs(0)
|
@@ -0,0 +1,13 @@
|
|
1
|
+
# Copyright 2022 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
@@ -0,0 +1,13 @@
|
|
1
|
+
# Copyright 2022 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|