mediapipe-nightly 0.10.21.post20241223__cp312-cp312-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mediapipe/__init__.py +26 -0
- mediapipe/calculators/__init__.py +0 -0
- mediapipe/calculators/audio/__init__.py +0 -0
- mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py +33 -0
- mediapipe/calculators/audio/rational_factor_resample_calculator_pb2.py +33 -0
- mediapipe/calculators/audio/spectrogram_calculator_pb2.py +37 -0
- mediapipe/calculators/audio/stabilized_log_calculator_pb2.py +31 -0
- mediapipe/calculators/audio/time_series_framer_calculator_pb2.py +33 -0
- mediapipe/calculators/core/__init__.py +0 -0
- mediapipe/calculators/core/bypass_calculator_pb2.py +31 -0
- mediapipe/calculators/core/clip_vector_size_calculator_pb2.py +31 -0
- mediapipe/calculators/core/concatenate_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/constant_side_packet_calculator_pb2.py +39 -0
- mediapipe/calculators/core/dequantize_byte_array_calculator_pb2.py +31 -0
- mediapipe/calculators/core/flow_limiter_calculator_pb2.py +32 -0
- mediapipe/calculators/core/gate_calculator_pb2.py +33 -0
- mediapipe/calculators/core/get_vector_item_calculator_pb2.py +31 -0
- mediapipe/calculators/core/graph_profile_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_cloner_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_resampler_calculator_pb2.py +33 -0
- mediapipe/calculators/core/packet_thinner_calculator_pb2.py +33 -0
- mediapipe/calculators/core/quantize_float_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/sequence_shift_calculator_pb2.py +31 -0
- mediapipe/calculators/core/split_vector_calculator_pb2.py +33 -0
- mediapipe/calculators/image/__init__.py +0 -0
- mediapipe/calculators/image/bilateral_filter_calculator_pb2.py +31 -0
- mediapipe/calculators/image/feature_detector_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_clone_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_cropping_calculator_pb2.py +33 -0
- mediapipe/calculators/image/image_transformation_calculator_pb2.py +38 -0
- mediapipe/calculators/image/mask_overlay_calculator_pb2.py +33 -0
- mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator_pb2.py +31 -0
- mediapipe/calculators/image/opencv_image_encoder_calculator_pb2.py +35 -0
- mediapipe/calculators/image/recolor_calculator_pb2.py +34 -0
- mediapipe/calculators/image/rotation_mode_pb2.py +29 -0
- mediapipe/calculators/image/scale_image_calculator_pb2.py +34 -0
- mediapipe/calculators/image/segmentation_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/image/set_alpha_calculator_pb2.py +31 -0
- mediapipe/calculators/image/warp_affine_calculator_pb2.py +36 -0
- mediapipe/calculators/internal/__init__.py +0 -0
- mediapipe/calculators/internal/callback_packet_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/__init__.py +0 -0
- mediapipe/calculators/tensor/audio_to_tensor_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/bert_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/feedback_tensors_calculator_pb2.py +37 -0
- mediapipe/calculators/tensor/image_to_tensor_calculator_pb2.py +40 -0
- mediapipe/calculators/tensor/inference_calculator_pb2.py +63 -0
- mediapipe/calculators/tensor/landmarks_to_tensor_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/regex_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensor_converter_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/tensor_to_joints_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensors_readback_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/tensors_to_audio_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_classification_calculator_pb2.py +44 -0
- mediapipe/calculators/tensor/tensors_to_detections_calculator_pb2.py +39 -0
- mediapipe/calculators/tensor/tensors_to_floats_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/vector_to_tensor_calculator_pb2.py +27 -0
- mediapipe/calculators/tflite/__init__.py +0 -0
- mediapipe/calculators/tflite/ssd_anchors_calculator_pb2.py +32 -0
- mediapipe/calculators/tflite/tflite_converter_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_inference_calculator_pb2.py +49 -0
- mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/__init__.py +0 -0
- mediapipe/calculators/util/align_hand_to_pose_in_world_calculator_pb2.py +31 -0
- mediapipe/calculators/util/annotation_overlay_calculator_pb2.py +32 -0
- mediapipe/calculators/util/association_calculator_pb2.py +31 -0
- mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py +31 -0
- mediapipe/calculators/util/combine_joints_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detection_label_id_to_text_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detections_to_rects_calculator_pb2.py +33 -0
- mediapipe/calculators/util/detections_to_render_data_calculator_pb2.py +33 -0
- mediapipe/calculators/util/face_to_rect_calculator_pb2.py +26 -0
- mediapipe/calculators/util/filter_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/util/flat_color_image_calculator_pb2.py +32 -0
- mediapipe/calculators/util/labels_to_render_data_calculator_pb2.py +34 -0
- mediapipe/calculators/util/landmark_projection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_refinement_calculator_pb2.py +41 -0
- mediapipe/calculators/util/landmarks_smoothing_calculator_pb2.py +33 -0
- mediapipe/calculators/util/landmarks_to_detection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_floats_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/landmarks_transformation_calculator_pb2.py +37 -0
- mediapipe/calculators/util/latency_pb2.py +26 -0
- mediapipe/calculators/util/local_file_contents_calculator_pb2.py +31 -0
- mediapipe/calculators/util/logic_calculator_pb2.py +34 -0
- mediapipe/calculators/util/non_max_suppression_calculator_pb2.py +35 -0
- mediapipe/calculators/util/packet_frequency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/packet_frequency_pb2.py +26 -0
- mediapipe/calculators/util/packet_latency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/rect_to_render_scale_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_transformation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/refine_landmarks_from_heatmap_calculator_pb2.py +31 -0
- mediapipe/calculators/util/resource_provider_calculator_pb2.py +28 -0
- mediapipe/calculators/util/set_joints_visibility_calculator_pb2.py +41 -0
- mediapipe/calculators/util/thresholding_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_id_to_label_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/top_k_scores_calculator_pb2.py +31 -0
- mediapipe/calculators/util/visibility_copy_calculator_pb2.py +27 -0
- mediapipe/calculators/util/visibility_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/video/__init__.py +0 -0
- mediapipe/calculators/video/box_detector_calculator_pb2.py +32 -0
- mediapipe/calculators/video/box_tracker_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_packager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_to_image_calculator_pb2.py +31 -0
- mediapipe/calculators/video/motion_analysis_calculator_pb2.py +42 -0
- mediapipe/calculators/video/opencv_video_encoder_calculator_pb2.py +31 -0
- mediapipe/calculators/video/tool/__init__.py +0 -0
- mediapipe/calculators/video/tool/flow_quantizer_model_pb2.py +26 -0
- mediapipe/calculators/video/tracked_detection_manager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/video_pre_stream_calculator_pb2.py +35 -0
- mediapipe/examples/__init__.py +14 -0
- mediapipe/examples/desktop/__init__.py +14 -0
- mediapipe/framework/__init__.py +0 -0
- mediapipe/framework/calculator_options_pb2.py +29 -0
- mediapipe/framework/calculator_pb2.py +59 -0
- mediapipe/framework/calculator_profile_pb2.py +48 -0
- mediapipe/framework/deps/__init__.py +0 -0
- mediapipe/framework/deps/proto_descriptor_pb2.py +29 -0
- mediapipe/framework/formats/__init__.py +0 -0
- mediapipe/framework/formats/affine_transform_data_pb2.py +28 -0
- mediapipe/framework/formats/annotation/__init__.py +0 -0
- mediapipe/framework/formats/annotation/locus_pb2.py +32 -0
- mediapipe/framework/formats/annotation/rasterization_pb2.py +29 -0
- mediapipe/framework/formats/body_rig_pb2.py +28 -0
- mediapipe/framework/formats/classification_pb2.py +31 -0
- mediapipe/framework/formats/detection_pb2.py +36 -0
- mediapipe/framework/formats/image_file_properties_pb2.py +26 -0
- mediapipe/framework/formats/image_format_pb2.py +29 -0
- mediapipe/framework/formats/landmark_pb2.py +37 -0
- mediapipe/framework/formats/location_data_pb2.py +38 -0
- mediapipe/framework/formats/matrix_data_pb2.py +31 -0
- mediapipe/framework/formats/motion/__init__.py +0 -0
- mediapipe/framework/formats/motion/optical_flow_field_data_pb2.py +30 -0
- mediapipe/framework/formats/object_detection/__init__.py +0 -0
- mediapipe/framework/formats/object_detection/anchor_pb2.py +26 -0
- mediapipe/framework/formats/rect_pb2.py +29 -0
- mediapipe/framework/formats/time_series_header_pb2.py +28 -0
- mediapipe/framework/graph_runtime_info_pb2.py +31 -0
- mediapipe/framework/mediapipe_options_pb2.py +27 -0
- mediapipe/framework/packet_factory_pb2.py +31 -0
- mediapipe/framework/packet_generator_pb2.py +33 -0
- mediapipe/framework/status_handler_pb2.py +28 -0
- mediapipe/framework/stream_handler/__init__.py +0 -0
- mediapipe/framework/stream_handler/default_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/fixed_size_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/sync_set_input_stream_handler_pb2.py +29 -0
- mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler_pb2.py +30 -0
- mediapipe/framework/test_calculators_pb2.py +31 -0
- mediapipe/framework/thread_pool_executor_pb2.py +29 -0
- mediapipe/framework/tool/__init__.py +0 -0
- mediapipe/framework/tool/calculator_graph_template_pb2.py +44 -0
- mediapipe/framework/tool/field_data_pb2.py +28 -0
- mediapipe/framework/tool/node_chain_subgraph_pb2.py +31 -0
- mediapipe/framework/tool/packet_generator_wrapper_calculator_pb2.py +28 -0
- mediapipe/framework/tool/source_pb2.py +33 -0
- mediapipe/framework/tool/switch_container_pb2.py +32 -0
- mediapipe/gpu/__init__.py +0 -0
- mediapipe/gpu/copy_calculator_pb2.py +33 -0
- mediapipe/gpu/gl_animation_overlay_calculator_pb2.py +31 -0
- mediapipe/gpu/gl_context_options_pb2.py +31 -0
- mediapipe/gpu/gl_scaler_calculator_pb2.py +32 -0
- mediapipe/gpu/gl_surface_sink_calculator_pb2.py +32 -0
- mediapipe/gpu/gpu_origin_pb2.py +29 -0
- mediapipe/gpu/scale_mode_pb2.py +28 -0
- mediapipe/model_maker/__init__.py +27 -0
- mediapipe/model_maker/setup.py +107 -0
- mediapipe/modules/__init__.py +0 -0
- mediapipe/modules/face_detection/__init__.py +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_cpu.binarypb +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_sparse.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_pb2.py +30 -0
- mediapipe/modules/face_detection/face_detection_short_range.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_short_range_cpu.binarypb +0 -0
- mediapipe/modules/face_geometry/__init__.py +0 -0
- mediapipe/modules/face_geometry/data/__init__.py +0 -0
- mediapipe/modules/face_geometry/effect_renderer_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/env_generator_calculator_pb2.py +28 -0
- mediapipe/modules/face_geometry/geometry_pipeline_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/libs/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/environment_pb2.py +31 -0
- mediapipe/modules/face_geometry/protos/face_geometry_pb2.py +29 -0
- mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/modules/face_geometry/protos/mesh_3d_pb2.py +31 -0
- mediapipe/modules/face_landmark/__init__.py +0 -0
- mediapipe/modules/face_landmark/face_landmark.tflite +0 -0
- mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb +0 -0
- mediapipe/modules/face_landmark/face_landmark_with_attention.tflite +0 -0
- mediapipe/modules/hand_landmark/__init__.py +0 -0
- mediapipe/modules/hand_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_full.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_lite.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb +0 -0
- mediapipe/modules/hand_landmark/handedness.txt +2 -0
- mediapipe/modules/holistic_landmark/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator_pb2.py +37 -0
- mediapipe/modules/holistic_landmark/hand_recrop.tflite +0 -0
- mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb +0 -0
- mediapipe/modules/iris_landmark/__init__.py +0 -0
- mediapipe/modules/iris_landmark/iris_landmark.tflite +0 -0
- mediapipe/modules/objectron/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/a_r_capture_metadata_pb2.py +102 -0
- mediapipe/modules/objectron/calculators/annotation_data_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/belief_decoder_config_pb2.py +28 -0
- mediapipe/modules/objectron/calculators/camera_parameters_pb2.py +30 -0
- mediapipe/modules/objectron/calculators/filter_detection_calculator_pb2.py +35 -0
- mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/object_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/object_detection_oidv4_labelmap.txt +24 -0
- mediapipe/modules/objectron/objectron_cpu.binarypb +0 -0
- mediapipe/modules/palm_detection/__init__.py +0 -0
- mediapipe/modules/palm_detection/palm_detection_full.tflite +0 -0
- mediapipe/modules/palm_detection/palm_detection_lite.tflite +0 -0
- mediapipe/modules/pose_detection/__init__.py +0 -0
- mediapipe/modules/pose_detection/pose_detection.tflite +0 -0
- mediapipe/modules/pose_landmark/__init__.py +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_full.tflite +0 -0
- mediapipe/modules/selfie_segmentation/__init__.py +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation.tflite +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_cpu.binarypb +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_landscape.tflite +0 -0
- mediapipe/python/__init__.py +29 -0
- mediapipe/python/_framework_bindings.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/python/calculator_graph_test.py +251 -0
- mediapipe/python/image_frame_test.py +194 -0
- mediapipe/python/image_test.py +218 -0
- mediapipe/python/packet_creator.py +275 -0
- mediapipe/python/packet_getter.py +120 -0
- mediapipe/python/packet_test.py +533 -0
- mediapipe/python/solution_base.py +604 -0
- mediapipe/python/solution_base_test.py +396 -0
- mediapipe/python/solutions/__init__.py +27 -0
- mediapipe/python/solutions/download_utils.py +37 -0
- mediapipe/python/solutions/drawing_styles.py +249 -0
- mediapipe/python/solutions/drawing_utils.py +320 -0
- mediapipe/python/solutions/drawing_utils_test.py +258 -0
- mediapipe/python/solutions/face_detection.py +105 -0
- mediapipe/python/solutions/face_detection_test.py +92 -0
- mediapipe/python/solutions/face_mesh.py +125 -0
- mediapipe/python/solutions/face_mesh_connections.py +500 -0
- mediapipe/python/solutions/face_mesh_test.py +170 -0
- mediapipe/python/solutions/hands.py +153 -0
- mediapipe/python/solutions/hands_connections.py +32 -0
- mediapipe/python/solutions/hands_test.py +219 -0
- mediapipe/python/solutions/holistic.py +167 -0
- mediapipe/python/solutions/holistic_test.py +142 -0
- mediapipe/python/solutions/objectron.py +288 -0
- mediapipe/python/solutions/objectron_test.py +81 -0
- mediapipe/python/solutions/pose.py +192 -0
- mediapipe/python/solutions/pose_connections.py +22 -0
- mediapipe/python/solutions/pose_test.py +262 -0
- mediapipe/python/solutions/selfie_segmentation.py +76 -0
- mediapipe/python/solutions/selfie_segmentation_test.py +68 -0
- mediapipe/python/timestamp_test.py +78 -0
- mediapipe/tasks/__init__.py +14 -0
- mediapipe/tasks/cc/__init__.py +0 -0
- mediapipe/tasks/cc/audio/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/audio_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/audio_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/audio_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/core/__init__.py +0 -0
- mediapipe/tasks/cc/audio/utils/__init__.py +0 -0
- mediapipe/tasks/cc/components/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/classification_aggregation_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/components/calculators/score_calibration_calculator_pb2.py +35 -0
- mediapipe/tasks/cc/components/calculators/tensors_to_embeddings_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/components/containers/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/classifications_pb2.py +30 -0
- mediapipe/tasks/cc/components/containers/proto/embeddings_pb2.py +35 -0
- mediapipe/tasks/cc/components/containers/proto/landmarks_detection_result_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/classification_postprocessing_graph_options_pb2.py +38 -0
- mediapipe/tasks/cc/components/processors/proto/classifier_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/detection_postprocessing_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/components/processors/proto/detector_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedder_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedding_postprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/proto/image_preprocessing_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/components/processors/proto/text_model_type_pb2.py +28 -0
- mediapipe/tasks/cc/components/processors/proto/text_preprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/utils/__init__.py +0 -0
- mediapipe/tasks/cc/core/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/acceleration_pb2.py +28 -0
- mediapipe/tasks/cc/core/proto/base_options_pb2.py +30 -0
- mediapipe/tasks/cc/core/proto/external_file_pb2.py +31 -0
- mediapipe/tasks/cc/core/proto/inference_subgraph_pb2.py +32 -0
- mediapipe/tasks/cc/core/proto/model_resources_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/c/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/calculators/model_data_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/common/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_params_pb2.py +33 -0
- mediapipe/tasks/cc/genai/inference/proto/prompt_template_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/proto/sampler_params_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/proto/transformer_params_pb2.py +45 -0
- mediapipe/tasks/cc/genai/inference/utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/llm_utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/xnn_utils/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/_pywrap_metadata_version.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/cc/metadata/tests/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/ragged/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/testdata/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/hash/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/text_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/text_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/text_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/tokenizers/__init__.py +0 -0
- mediapipe/tasks/cc/text/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/__init__.py +0 -0
- mediapipe/tasks/cc/vision/core/__init__.py +0 -0
- mediapipe/tasks/cc/vision/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_geometry/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/env_generator_calculator_pb2.py +28 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/geometry_pipeline_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/data/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/libs/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/environment_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_graph_options_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/mesh_3d_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options_pb2.py +37 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/tensors_to_face_landmarks_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_stylizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/tensors_to_image_calculator_pb2.py +36 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/face_stylizer_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/combined_prediction_calculator_pb2.py +33 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_embedder_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/hand_gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_result_pb2.py +30 -0
- mediapipe/tasks/cc/vision/hand_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_roi_refinement_graph_options_pb2.py +28 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result_pb2.py +29 -0
- mediapipe/tasks/cc/vision/image_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/image_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/image_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_generator/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/stable_diffusion_iterate_calculator_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/proto/conditioned_image_graph_options_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/control_plugin_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_generator/proto/image_generator_graph_options_pb2.py +30 -0
- mediapipe/tasks/cc/vision/image_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/image_segmenter_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/segmenter_options_pb2.py +33 -0
- mediapipe/tasks/cc/vision/interactive_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/object_detector_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/pose_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/utils/ghum/__init__.py +0 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema.fbs +59 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema_py_generated.py +108 -0
- mediapipe/tasks/metadata/metadata_schema.fbs +732 -0
- mediapipe/tasks/metadata/metadata_schema_py_generated.py +3251 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema.fbs +98 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema_py_generated.py +674 -0
- mediapipe/tasks/metadata/schema_py_generated.py +18438 -0
- mediapipe/tasks/python/__init__.py +27 -0
- mediapipe/tasks/python/audio/__init__.py +33 -0
- mediapipe/tasks/python/audio/audio_classifier.py +324 -0
- mediapipe/tasks/python/audio/audio_embedder.py +285 -0
- mediapipe/tasks/python/audio/core/__init__.py +16 -0
- mediapipe/tasks/python/audio/core/audio_record.py +125 -0
- mediapipe/tasks/python/audio/core/audio_task_running_mode.py +29 -0
- mediapipe/tasks/python/audio/core/base_audio_task_api.py +181 -0
- mediapipe/tasks/python/benchmark/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/benchmark_utils.py +70 -0
- mediapipe/tasks/python/benchmark/vision/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/vision/benchmark.py +99 -0
- mediapipe/tasks/python/benchmark/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py +40 -0
- mediapipe/tasks/python/components/__init__.py +13 -0
- mediapipe/tasks/python/components/containers/__init__.py +53 -0
- mediapipe/tasks/python/components/containers/audio_data.py +137 -0
- mediapipe/tasks/python/components/containers/bounding_box.py +73 -0
- mediapipe/tasks/python/components/containers/category.py +78 -0
- mediapipe/tasks/python/components/containers/classification_result.py +111 -0
- mediapipe/tasks/python/components/containers/detections.py +181 -0
- mediapipe/tasks/python/components/containers/embedding_result.py +89 -0
- mediapipe/tasks/python/components/containers/keypoint.py +77 -0
- mediapipe/tasks/python/components/containers/landmark.py +122 -0
- mediapipe/tasks/python/components/containers/landmark_detection_result.py +106 -0
- mediapipe/tasks/python/components/containers/rect.py +109 -0
- mediapipe/tasks/python/components/processors/__init__.py +23 -0
- mediapipe/tasks/python/components/processors/classifier_options.py +86 -0
- mediapipe/tasks/python/components/utils/__init__.py +13 -0
- mediapipe/tasks/python/components/utils/cosine_similarity.py +68 -0
- mediapipe/tasks/python/core/__init__.py +13 -0
- mediapipe/tasks/python/core/base_options.py +121 -0
- mediapipe/tasks/python/core/optional_dependencies.py +25 -0
- mediapipe/tasks/python/core/task_info.py +139 -0
- mediapipe/tasks/python/genai/__init__.py +14 -0
- mediapipe/tasks/python/genai/bundler/__init__.py +23 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler.py +130 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler_test.py +168 -0
- mediapipe/tasks/python/genai/converter/__init__.py +24 -0
- mediapipe/tasks/python/genai/converter/converter_base.py +179 -0
- mediapipe/tasks/python/genai/converter/converter_factory.py +79 -0
- mediapipe/tasks/python/genai/converter/llm_converter.py +374 -0
- mediapipe/tasks/python/genai/converter/llm_converter_test.py +63 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter.py +318 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter_test.py +86 -0
- mediapipe/tasks/python/genai/converter/quantization_util.py +516 -0
- mediapipe/tasks/python/genai/converter/quantization_util_test.py +259 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter.py +580 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter_test.py +83 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer.py +120 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer_test.py +95 -0
- mediapipe/tasks/python/metadata/__init__.py +13 -0
- mediapipe/tasks/python/metadata/flatbuffers_lib/_pywrap_flatbuffers.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/python/metadata/metadata.py +928 -0
- mediapipe/tasks/python/metadata/metadata_displayer_cli.py +34 -0
- mediapipe/tasks/python/metadata/metadata_writers/__init__.py +13 -0
- mediapipe/tasks/python/metadata/metadata_writers/face_stylizer.py +138 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_classifier.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py +170 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py +1166 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_writer.py +845 -0
- mediapipe/tasks/python/metadata/metadata_writers/model_asset_bundle_utils.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/object_detector.py +331 -0
- mediapipe/tasks/python/metadata/metadata_writers/text_classifier.py +119 -0
- mediapipe/tasks/python/metadata/metadata_writers/writer_utils.py +91 -0
- mediapipe/tasks/python/test/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/audio_classifier_test.py +387 -0
- mediapipe/tasks/python/test/audio/audio_embedder_test.py +297 -0
- mediapipe/tasks/python/test/test_utils.py +196 -0
- mediapipe/tasks/python/test/text/__init__.py +13 -0
- mediapipe/tasks/python/test/text/language_detector_test.py +228 -0
- mediapipe/tasks/python/test/text/text_classifier_test.py +235 -0
- mediapipe/tasks/python/test/text/text_embedder_test.py +326 -0
- mediapipe/tasks/python/test/vision/__init__.py +13 -0
- mediapipe/tasks/python/test/vision/face_aligner_test.py +190 -0
- mediapipe/tasks/python/test/vision/face_detector_test.py +523 -0
- mediapipe/tasks/python/test/vision/face_landmarker_test.py +565 -0
- mediapipe/tasks/python/test/vision/face_stylizer_test.py +191 -0
- mediapipe/tasks/python/test/vision/hand_landmarker_test.py +437 -0
- mediapipe/tasks/python/test/vision/holistic_landmarker_test.py +544 -0
- mediapipe/tasks/python/test/vision/image_classifier_test.py +657 -0
- mediapipe/tasks/python/test/vision/image_embedder_test.py +423 -0
- mediapipe/tasks/python/test/vision/image_segmenter_test.py +512 -0
- mediapipe/tasks/python/test/vision/interactive_segmenter_test.py +341 -0
- mediapipe/tasks/python/test/vision/object_detector_test.py +493 -0
- mediapipe/tasks/python/test/vision/pose_landmarker_test.py +518 -0
- mediapipe/tasks/python/text/__init__.py +35 -0
- mediapipe/tasks/python/text/core/__init__.py +16 -0
- mediapipe/tasks/python/text/core/base_text_task_api.py +54 -0
- mediapipe/tasks/python/text/language_detector.py +220 -0
- mediapipe/tasks/python/text/text_classifier.py +187 -0
- mediapipe/tasks/python/text/text_embedder.py +188 -0
- mediapipe/tasks/python/vision/__init__.py +90 -0
- mediapipe/tasks/python/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/vision/core/base_vision_task_api.py +226 -0
- mediapipe/tasks/python/vision/core/image_processing_options.py +39 -0
- mediapipe/tasks/python/vision/core/vision_task_running_mode.py +31 -0
- mediapipe/tasks/python/vision/face_aligner.py +158 -0
- mediapipe/tasks/python/vision/face_detector.py +332 -0
- mediapipe/tasks/python/vision/face_landmarker.py +3244 -0
- mediapipe/tasks/python/vision/face_stylizer.py +158 -0
- mediapipe/tasks/python/vision/gesture_recognizer.py +480 -0
- mediapipe/tasks/python/vision/hand_landmarker.py +504 -0
- mediapipe/tasks/python/vision/holistic_landmarker.py +576 -0
- mediapipe/tasks/python/vision/image_classifier.py +358 -0
- mediapipe/tasks/python/vision/image_embedder.py +362 -0
- mediapipe/tasks/python/vision/image_segmenter.py +433 -0
- mediapipe/tasks/python/vision/interactive_segmenter.py +285 -0
- mediapipe/tasks/python/vision/object_detector.py +389 -0
- mediapipe/tasks/python/vision/pose_landmarker.py +455 -0
- mediapipe/util/__init__.py +0 -0
- mediapipe/util/analytics/__init__.py +0 -0
- mediapipe/util/analytics/mediapipe_log_extension_pb2.py +44 -0
- mediapipe/util/analytics/mediapipe_logging_enums_pb2.py +37 -0
- mediapipe/util/audio_decoder_pb2.py +33 -0
- mediapipe/util/color_pb2.py +33 -0
- mediapipe/util/label_map_pb2.py +27 -0
- mediapipe/util/render_data_pb2.py +58 -0
- mediapipe/util/sequence/__init__.py +14 -0
- mediapipe/util/sequence/media_sequence.py +716 -0
- mediapipe/util/sequence/media_sequence_test.py +290 -0
- mediapipe/util/sequence/media_sequence_util.py +800 -0
- mediapipe/util/sequence/media_sequence_util_test.py +389 -0
- mediapipe/util/tracking/__init__.py +0 -0
- mediapipe/util/tracking/box_detector_pb2.py +39 -0
- mediapipe/util/tracking/box_tracker_pb2.py +32 -0
- mediapipe/util/tracking/camera_motion_pb2.py +31 -0
- mediapipe/util/tracking/flow_packager_pb2.py +60 -0
- mediapipe/util/tracking/frame_selection_pb2.py +35 -0
- mediapipe/util/tracking/frame_selection_solution_evaluator_pb2.py +28 -0
- mediapipe/util/tracking/motion_analysis_pb2.py +35 -0
- mediapipe/util/tracking/motion_estimation_pb2.py +66 -0
- mediapipe/util/tracking/motion_models_pb2.py +42 -0
- mediapipe/util/tracking/motion_saliency_pb2.py +26 -0
- mediapipe/util/tracking/push_pull_filtering_pb2.py +26 -0
- mediapipe/util/tracking/region_flow_computation_pb2.py +59 -0
- mediapipe/util/tracking/region_flow_pb2.py +49 -0
- mediapipe/util/tracking/tone_estimation_pb2.py +45 -0
- mediapipe/util/tracking/tone_models_pb2.py +32 -0
- mediapipe/util/tracking/tracked_detection_manager_config_pb2.py +26 -0
- mediapipe/util/tracking/tracking_pb2.py +73 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/LICENSE +218 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/METADATA +199 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/RECORD +593 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/WHEEL +5 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/top_level.txt +4 -0
- mediapipe_nightly.libs/libEGL-48f73270.so.1.1.0 +0 -0
- mediapipe_nightly.libs/libGLESv2-ed5eda4f.so.2.1.0 +0 -0
- mediapipe_nightly.libs/libGLdispatch-64b28464.so.0.0.0 +0 -0
@@ -0,0 +1,196 @@
|
|
1
|
+
# Copyright 2022 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""Test util for MediaPipe Tasks."""
|
15
|
+
|
16
|
+
import difflib
|
17
|
+
import os
|
18
|
+
|
19
|
+
from absl import flags
|
20
|
+
import six
|
21
|
+
|
22
|
+
from google.protobuf import descriptor
|
23
|
+
from google.protobuf import descriptor_pool
|
24
|
+
from google.protobuf import text_format
|
25
|
+
from mediapipe.python._framework_bindings import image as image_module
|
26
|
+
from mediapipe.python._framework_bindings import image_frame as image_frame_module
|
27
|
+
|
28
|
+
FLAGS = flags.FLAGS
|
29
|
+
_Image = image_module.Image
|
30
|
+
_ImageFormat = image_frame_module.ImageFormat
|
31
|
+
_RGB_CHANNELS = 3
|
32
|
+
|
33
|
+
|
34
|
+
def test_srcdir():
|
35
|
+
"""Returns the path where to look for test data files."""
|
36
|
+
if "test_srcdir" in flags.FLAGS:
|
37
|
+
return flags.FLAGS["test_srcdir"].value
|
38
|
+
elif "TEST_SRCDIR" in os.environ:
|
39
|
+
return os.environ["TEST_SRCDIR"]
|
40
|
+
else:
|
41
|
+
raise RuntimeError("Missing TEST_SRCDIR environment.")
|
42
|
+
|
43
|
+
|
44
|
+
def get_test_data_path(file_or_dirname_path: str) -> str:
|
45
|
+
"""Returns full test data path."""
|
46
|
+
for directory, subdirs, files in os.walk('/tmp/root/sources/mediapipe'):
|
47
|
+
for f in subdirs + files:
|
48
|
+
path = os.path.join(directory, f)
|
49
|
+
if path.endswith(file_or_dirname_path):
|
50
|
+
return path
|
51
|
+
raise ValueError(
|
52
|
+
"No %s in test directory: %s." % (file_or_dirname_path, test_srcdir())
|
53
|
+
)
|
54
|
+
|
55
|
+
|
56
|
+
def create_calibration_file(
|
57
|
+
file_dir: str,
|
58
|
+
file_name: str = "score_calibration.txt",
|
59
|
+
content: str = "1.0,2.0,3.0,4.0",
|
60
|
+
) -> str:
|
61
|
+
"""Creates the calibration file."""
|
62
|
+
calibration_file = os.path.join(file_dir, file_name)
|
63
|
+
with open(calibration_file, mode="w") as file:
|
64
|
+
file.write(content)
|
65
|
+
return calibration_file
|
66
|
+
|
67
|
+
|
68
|
+
def assert_proto_equals(
|
69
|
+
self, a, b, check_initialized=True, normalize_numbers=True, msg=None
|
70
|
+
):
|
71
|
+
"""assert_proto_equals() is useful for unit tests.
|
72
|
+
|
73
|
+
It produces much more helpful output than assertEqual() for proto2 messages.
|
74
|
+
Fails with a useful error if a and b aren't equal. Comparison of repeated
|
75
|
+
fields matches the semantics of unittest.TestCase.assertEqual(), ie order and
|
76
|
+
extra duplicates fields matter.
|
77
|
+
|
78
|
+
This is a fork of https://github.com/tensorflow/tensorflow/blob/
|
79
|
+
master/tensorflow/python/util/protobuf/compare.py#L73. We use slightly
|
80
|
+
different rounding cutoffs to support Mac usage.
|
81
|
+
|
82
|
+
Args:
|
83
|
+
self: absltest.testing.parameterized.TestCase
|
84
|
+
a: proto2 PB instance, or text string representing one.
|
85
|
+
b: proto2 PB instance -- message.Message or subclass thereof.
|
86
|
+
check_initialized: boolean, whether to fail if either a or b isn't
|
87
|
+
initialized.
|
88
|
+
normalize_numbers: boolean, whether to normalize types and precision of
|
89
|
+
numbers before comparison.
|
90
|
+
msg: if specified, is used as the error message on failure.
|
91
|
+
"""
|
92
|
+
pool = descriptor_pool.Default()
|
93
|
+
if isinstance(a, six.string_types):
|
94
|
+
a = text_format.Parse(a, b.__class__(), descriptor_pool=pool)
|
95
|
+
|
96
|
+
for pb in a, b:
|
97
|
+
if check_initialized:
|
98
|
+
errors = pb.FindInitializationErrors()
|
99
|
+
if errors:
|
100
|
+
self.fail("Initialization errors: %s\n%s" % (errors, pb))
|
101
|
+
if normalize_numbers:
|
102
|
+
_normalize_number_fields(pb)
|
103
|
+
|
104
|
+
a_str = text_format.MessageToString(a, descriptor_pool=pool)
|
105
|
+
b_str = text_format.MessageToString(b, descriptor_pool=pool)
|
106
|
+
|
107
|
+
# Some Python versions would perform regular diff instead of multi-line
|
108
|
+
# diff if string is longer than 2**16. We substitute this behavior
|
109
|
+
# with a call to unified_diff instead to have easier-to-read diffs.
|
110
|
+
# For context, see: https://bugs.python.org/issue11763.
|
111
|
+
if len(a_str) < 2**16 and len(b_str) < 2**16:
|
112
|
+
self.assertMultiLineEqual(a_str, b_str, msg=msg)
|
113
|
+
else:
|
114
|
+
diff = "".join(
|
115
|
+
difflib.unified_diff(a_str.splitlines(True), b_str.splitlines(True))
|
116
|
+
)
|
117
|
+
if diff:
|
118
|
+
self.fail("%s :\n%s" % (msg, diff))
|
119
|
+
|
120
|
+
|
121
|
+
def _normalize_number_fields(pb):
|
122
|
+
"""Normalizes types and precisions of number fields in a protocol buffer.
|
123
|
+
|
124
|
+
Due to subtleties in the python protocol buffer implementation, it is possible
|
125
|
+
for values to have different types and precision depending on whether they
|
126
|
+
were set and retrieved directly or deserialized from a protobuf. This function
|
127
|
+
normalizes integer values to ints and longs based on width, 32-bit floats to
|
128
|
+
five digits of precision to account for python always storing them as 64-bit,
|
129
|
+
and ensures doubles are floating point for when they're set to integers.
|
130
|
+
Modifies pb in place. Recurses into nested objects. https://github.com/tensorf
|
131
|
+
low/tensorflow/blob/master/tensorflow/python/util/protobuf/compare.py#L118
|
132
|
+
|
133
|
+
Args:
|
134
|
+
pb: proto2 message.
|
135
|
+
|
136
|
+
Returns:
|
137
|
+
the given pb, modified in place.
|
138
|
+
"""
|
139
|
+
for desc, values in pb.ListFields():
|
140
|
+
is_repeated = True
|
141
|
+
if desc.label != descriptor.FieldDescriptor.LABEL_REPEATED:
|
142
|
+
is_repeated = False
|
143
|
+
values = [values]
|
144
|
+
|
145
|
+
normalized_values = None
|
146
|
+
|
147
|
+
# We force 32-bit values to int and 64-bit values to long to make
|
148
|
+
# alternate implementations where the distinction is more significant
|
149
|
+
# (e.g. the C++ implementation) simpler.
|
150
|
+
if desc.type in (
|
151
|
+
descriptor.FieldDescriptor.TYPE_INT64,
|
152
|
+
descriptor.FieldDescriptor.TYPE_UINT64,
|
153
|
+
descriptor.FieldDescriptor.TYPE_SINT64,
|
154
|
+
):
|
155
|
+
normalized_values = [int(x) for x in values]
|
156
|
+
elif desc.type in (
|
157
|
+
descriptor.FieldDescriptor.TYPE_INT32,
|
158
|
+
descriptor.FieldDescriptor.TYPE_UINT32,
|
159
|
+
descriptor.FieldDescriptor.TYPE_SINT32,
|
160
|
+
descriptor.FieldDescriptor.TYPE_ENUM,
|
161
|
+
):
|
162
|
+
normalized_values = [int(x) for x in values]
|
163
|
+
elif desc.type == descriptor.FieldDescriptor.TYPE_FLOAT:
|
164
|
+
normalized_values = [round(x, 4) for x in values]
|
165
|
+
elif desc.type == descriptor.FieldDescriptor.TYPE_DOUBLE:
|
166
|
+
normalized_values = [round(float(x), 6) for x in values]
|
167
|
+
|
168
|
+
if normalized_values is not None:
|
169
|
+
if is_repeated:
|
170
|
+
pb.ClearField(desc.name)
|
171
|
+
getattr(pb, desc.name).extend(normalized_values)
|
172
|
+
else:
|
173
|
+
setattr(pb, desc.name, normalized_values[0])
|
174
|
+
|
175
|
+
if (
|
176
|
+
desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE
|
177
|
+
or desc.type == descriptor.FieldDescriptor.TYPE_GROUP
|
178
|
+
):
|
179
|
+
if (
|
180
|
+
desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE
|
181
|
+
and desc.message_type.has_options
|
182
|
+
and desc.message_type.GetOptions().map_entry
|
183
|
+
):
|
184
|
+
# This is a map, only recurse if the values have a message type.
|
185
|
+
if (
|
186
|
+
desc.message_type.fields_by_number[2].type
|
187
|
+
== descriptor.FieldDescriptor.TYPE_MESSAGE
|
188
|
+
):
|
189
|
+
for v in six.itervalues(values):
|
190
|
+
_normalize_number_fields(v)
|
191
|
+
else:
|
192
|
+
for v in values:
|
193
|
+
# recursive step
|
194
|
+
_normalize_number_fields(v)
|
195
|
+
|
196
|
+
return pb
|
@@ -0,0 +1,13 @@
|
|
1
|
+
# Copyright 2022 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
@@ -0,0 +1,228 @@
|
|
1
|
+
# Copyright 2023 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""Tests for language detector."""
|
15
|
+
|
16
|
+
import enum
|
17
|
+
import os
|
18
|
+
|
19
|
+
from absl.testing import absltest
|
20
|
+
from absl.testing import parameterized
|
21
|
+
|
22
|
+
from mediapipe.tasks.python.components.containers import category
|
23
|
+
from mediapipe.tasks.python.components.containers import classification_result as classification_result_module
|
24
|
+
from mediapipe.tasks.python.core import base_options as base_options_module
|
25
|
+
from mediapipe.tasks.python.test import test_utils
|
26
|
+
from mediapipe.tasks.python.text import language_detector
|
27
|
+
|
28
|
+
LanguageDetectorResult = language_detector.LanguageDetectorResult
|
29
|
+
LanguageDetectorPrediction = (
|
30
|
+
language_detector.LanguageDetectorResult.Detection
|
31
|
+
)
|
32
|
+
_BaseOptions = base_options_module.BaseOptions
|
33
|
+
_Category = category.Category
|
34
|
+
_Classifications = classification_result_module.Classifications
|
35
|
+
_LanguageDetector = language_detector.LanguageDetector
|
36
|
+
_LanguageDetectorOptions = language_detector.LanguageDetectorOptions
|
37
|
+
|
38
|
+
_LANGUAGE_DETECTOR_MODEL = "language_detector.tflite"
|
39
|
+
_TEST_DATA_DIR = "mediapipe/tasks/testdata/text"
|
40
|
+
|
41
|
+
_SCORE_THRESHOLD = 0.3
|
42
|
+
_EN_TEXT = "To be, or not to be, that is the question"
|
43
|
+
_EN_EXPECTED_RESULT = LanguageDetectorResult(
|
44
|
+
[LanguageDetectorPrediction("en", 0.999856)]
|
45
|
+
)
|
46
|
+
_FR_TEXT = (
|
47
|
+
"Il y a beaucoup de bouches qui parlent et fort peu de têtes qui pensent."
|
48
|
+
)
|
49
|
+
_FR_EXPECTED_RESULT = LanguageDetectorResult(
|
50
|
+
[LanguageDetectorPrediction("fr", 0.999781)]
|
51
|
+
)
|
52
|
+
_RU_TEXT = "это какой-то английский язык"
|
53
|
+
_RU_EXPECTED_RESULT = LanguageDetectorResult(
|
54
|
+
[LanguageDetectorPrediction("ru", 0.993362)]
|
55
|
+
)
|
56
|
+
_MIXED_TEXT = "分久必合合久必分"
|
57
|
+
_MIXED_EXPECTED_RESULT = LanguageDetectorResult([
|
58
|
+
LanguageDetectorPrediction("zh", 0.505424),
|
59
|
+
LanguageDetectorPrediction("ja", 0.481617),
|
60
|
+
])
|
61
|
+
_TOLERANCE = 1e-6
|
62
|
+
|
63
|
+
|
64
|
+
class ModelFileType(enum.Enum):
|
65
|
+
FILE_CONTENT = 1
|
66
|
+
FILE_NAME = 2
|
67
|
+
|
68
|
+
|
69
|
+
class LanguageDetectorTest(parameterized.TestCase):
|
70
|
+
|
71
|
+
def setUp(self):
|
72
|
+
super().setUp()
|
73
|
+
self.model_path = test_utils.get_test_data_path(
|
74
|
+
os.path.join(_TEST_DATA_DIR, _LANGUAGE_DETECTOR_MODEL)
|
75
|
+
)
|
76
|
+
|
77
|
+
def _expect_language_detector_result_correct(
|
78
|
+
self,
|
79
|
+
actual_result: LanguageDetectorResult,
|
80
|
+
expect_result: LanguageDetectorResult,
|
81
|
+
):
|
82
|
+
for i, prediction in enumerate(actual_result.detections):
|
83
|
+
expected_prediction = expect_result.detections[i]
|
84
|
+
self.assertEqual(
|
85
|
+
prediction.language_code,
|
86
|
+
expected_prediction.language_code,
|
87
|
+
)
|
88
|
+
self.assertAlmostEqual(
|
89
|
+
prediction.probability,
|
90
|
+
expected_prediction.probability,
|
91
|
+
delta=_TOLERANCE,
|
92
|
+
)
|
93
|
+
|
94
|
+
def test_create_from_file_succeeds_with_valid_model_path(self):
|
95
|
+
# Creates with default option and valid model file successfully.
|
96
|
+
with _LanguageDetector.create_from_model_path(self.model_path) as detector:
|
97
|
+
self.assertIsInstance(detector, _LanguageDetector)
|
98
|
+
|
99
|
+
def test_create_from_options_succeeds_with_valid_model_path(self):
|
100
|
+
# Creates with options containing model file successfully.
|
101
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
102
|
+
options = _LanguageDetectorOptions(base_options=base_options)
|
103
|
+
with _LanguageDetector.create_from_options(options) as detector:
|
104
|
+
self.assertIsInstance(detector, _LanguageDetector)
|
105
|
+
|
106
|
+
def test_create_from_options_fails_with_invalid_model_path(self):
|
107
|
+
with self.assertRaisesRegex(
|
108
|
+
RuntimeError, "Unable to open file at /path/to/invalid/model.tflite"
|
109
|
+
):
|
110
|
+
base_options = _BaseOptions(
|
111
|
+
model_asset_path="/path/to/invalid/model.tflite"
|
112
|
+
)
|
113
|
+
options = _LanguageDetectorOptions(base_options=base_options)
|
114
|
+
_LanguageDetector.create_from_options(options)
|
115
|
+
|
116
|
+
def test_create_from_options_succeeds_with_valid_model_content(self):
|
117
|
+
# Creates with options containing model content successfully.
|
118
|
+
with open(self.model_path, "rb") as f:
|
119
|
+
base_options = _BaseOptions(model_asset_buffer=f.read())
|
120
|
+
options = _LanguageDetectorOptions(base_options=base_options)
|
121
|
+
detector = _LanguageDetector.create_from_options(options)
|
122
|
+
self.assertIsInstance(detector, _LanguageDetector)
|
123
|
+
|
124
|
+
@parameterized.parameters(
|
125
|
+
(ModelFileType.FILE_NAME, _EN_TEXT, _EN_EXPECTED_RESULT),
|
126
|
+
(ModelFileType.FILE_CONTENT, _EN_TEXT, _EN_EXPECTED_RESULT),
|
127
|
+
(ModelFileType.FILE_NAME, _FR_TEXT, _FR_EXPECTED_RESULT),
|
128
|
+
(ModelFileType.FILE_CONTENT, _FR_TEXT, _FR_EXPECTED_RESULT),
|
129
|
+
(ModelFileType.FILE_NAME, _RU_TEXT, _RU_EXPECTED_RESULT),
|
130
|
+
(ModelFileType.FILE_CONTENT, _RU_TEXT, _RU_EXPECTED_RESULT),
|
131
|
+
(ModelFileType.FILE_NAME, _MIXED_TEXT, _MIXED_EXPECTED_RESULT),
|
132
|
+
(ModelFileType.FILE_CONTENT, _MIXED_TEXT, _MIXED_EXPECTED_RESULT),
|
133
|
+
)
|
134
|
+
def test_detect(self, model_file_type, text, expected_result):
|
135
|
+
# Creates detector.
|
136
|
+
if model_file_type is ModelFileType.FILE_NAME:
|
137
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
138
|
+
elif model_file_type is ModelFileType.FILE_CONTENT:
|
139
|
+
with open(self.model_path, "rb") as f:
|
140
|
+
model_content = f.read()
|
141
|
+
base_options = _BaseOptions(model_asset_buffer=model_content)
|
142
|
+
else:
|
143
|
+
# Should never happen
|
144
|
+
raise ValueError("model_file_type is invalid.")
|
145
|
+
|
146
|
+
options = _LanguageDetectorOptions(
|
147
|
+
base_options=base_options, score_threshold=_SCORE_THRESHOLD
|
148
|
+
)
|
149
|
+
detector = _LanguageDetector.create_from_options(options)
|
150
|
+
|
151
|
+
# Performs language detection on the input.
|
152
|
+
text_result = detector.detect(text)
|
153
|
+
# Comparing results.
|
154
|
+
self._expect_language_detector_result_correct(text_result, expected_result)
|
155
|
+
# Closes the detector explicitly when the detector is not used in
|
156
|
+
# a context.
|
157
|
+
detector.close()
|
158
|
+
|
159
|
+
@parameterized.parameters(
|
160
|
+
(ModelFileType.FILE_NAME, _EN_TEXT, _EN_EXPECTED_RESULT),
|
161
|
+
(ModelFileType.FILE_NAME, _FR_TEXT, _FR_EXPECTED_RESULT),
|
162
|
+
(ModelFileType.FILE_NAME, _RU_TEXT, _RU_EXPECTED_RESULT),
|
163
|
+
(ModelFileType.FILE_CONTENT, _MIXED_TEXT, _MIXED_EXPECTED_RESULT),
|
164
|
+
)
|
165
|
+
def test_detect_in_context(self, model_file_type, text, expected_result):
|
166
|
+
# Creates detector.
|
167
|
+
if model_file_type is ModelFileType.FILE_NAME:
|
168
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
169
|
+
elif model_file_type is ModelFileType.FILE_CONTENT:
|
170
|
+
with open(self.model_path, "rb") as f:
|
171
|
+
model_content = f.read()
|
172
|
+
base_options = _BaseOptions(model_asset_buffer=model_content)
|
173
|
+
else:
|
174
|
+
# Should never happen
|
175
|
+
raise ValueError("model_file_type is invalid.")
|
176
|
+
|
177
|
+
options = _LanguageDetectorOptions(
|
178
|
+
base_options=base_options, score_threshold=_SCORE_THRESHOLD
|
179
|
+
)
|
180
|
+
with _LanguageDetector.create_from_options(options) as detector:
|
181
|
+
# Performs language detection on the input.
|
182
|
+
text_result = detector.detect(text)
|
183
|
+
# Comparing results.
|
184
|
+
self._expect_language_detector_result_correct(
|
185
|
+
text_result, expected_result
|
186
|
+
)
|
187
|
+
|
188
|
+
def test_allowlist_option(self):
|
189
|
+
# Creates detector.
|
190
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
191
|
+
options = _LanguageDetectorOptions(
|
192
|
+
base_options=base_options,
|
193
|
+
score_threshold=_SCORE_THRESHOLD,
|
194
|
+
category_allowlist=["ja"],
|
195
|
+
)
|
196
|
+
with _LanguageDetector.create_from_options(options) as detector:
|
197
|
+
# Performs language detection on the input.
|
198
|
+
text_result = detector.detect(_MIXED_TEXT)
|
199
|
+
# Comparing results.
|
200
|
+
expected_result = LanguageDetectorResult(
|
201
|
+
[LanguageDetectorPrediction("ja", 0.481617)]
|
202
|
+
)
|
203
|
+
self._expect_language_detector_result_correct(
|
204
|
+
text_result, expected_result
|
205
|
+
)
|
206
|
+
|
207
|
+
def test_denylist_option(self):
|
208
|
+
# Creates detector.
|
209
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
210
|
+
options = _LanguageDetectorOptions(
|
211
|
+
base_options=base_options,
|
212
|
+
score_threshold=_SCORE_THRESHOLD,
|
213
|
+
category_denylist=["ja"],
|
214
|
+
)
|
215
|
+
with _LanguageDetector.create_from_options(options) as detector:
|
216
|
+
# Performs language detection on the input.
|
217
|
+
text_result = detector.detect(_MIXED_TEXT)
|
218
|
+
# Comparing results.
|
219
|
+
expected_result = LanguageDetectorResult(
|
220
|
+
[LanguageDetectorPrediction("zh", 0.505424)]
|
221
|
+
)
|
222
|
+
self._expect_language_detector_result_correct(
|
223
|
+
text_result, expected_result
|
224
|
+
)
|
225
|
+
|
226
|
+
|
227
|
+
if __name__ == "__main__":
|
228
|
+
absltest.main()
|
@@ -0,0 +1,235 @@
|
|
1
|
+
# Copyright 2022 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""Tests for text classifier."""
|
15
|
+
|
16
|
+
import enum
|
17
|
+
import os
|
18
|
+
|
19
|
+
from absl.testing import absltest
|
20
|
+
from absl.testing import parameterized
|
21
|
+
|
22
|
+
from mediapipe.tasks.python.components.containers import category
|
23
|
+
from mediapipe.tasks.python.components.containers import classification_result as classification_result_module
|
24
|
+
from mediapipe.tasks.python.core import base_options as base_options_module
|
25
|
+
from mediapipe.tasks.python.test import test_utils
|
26
|
+
from mediapipe.tasks.python.text import text_classifier
|
27
|
+
|
28
|
+
TextClassifierResult = classification_result_module.ClassificationResult
|
29
|
+
_BaseOptions = base_options_module.BaseOptions
|
30
|
+
_Category = category.Category
|
31
|
+
_Classifications = classification_result_module.Classifications
|
32
|
+
_TextClassifier = text_classifier.TextClassifier
|
33
|
+
_TextClassifierOptions = text_classifier.TextClassifierOptions
|
34
|
+
|
35
|
+
_BERT_MODEL_FILE = 'bert_text_classifier.tflite'
|
36
|
+
_REGEX_MODEL_FILE = 'test_model_text_classifier_with_regex_tokenizer.tflite'
|
37
|
+
_TEST_DATA_DIR = 'mediapipe/tasks/testdata/text'
|
38
|
+
|
39
|
+
_NEGATIVE_TEXT = 'What a waste of my time.'
|
40
|
+
_POSITIVE_TEXT = ('This is the best movie I’ve seen in recent years.'
|
41
|
+
'Strongly recommend it!')
|
42
|
+
|
43
|
+
_BERT_NEGATIVE_RESULTS = TextClassifierResult(
|
44
|
+
classifications=[
|
45
|
+
_Classifications(
|
46
|
+
categories=[
|
47
|
+
_Category(
|
48
|
+
index=0,
|
49
|
+
score=0.9995,
|
50
|
+
display_name='',
|
51
|
+
category_name='negative'),
|
52
|
+
_Category(
|
53
|
+
index=1,
|
54
|
+
score=0.0005,
|
55
|
+
display_name='',
|
56
|
+
category_name='positive')
|
57
|
+
],
|
58
|
+
head_index=0,
|
59
|
+
head_name='probability')
|
60
|
+
],
|
61
|
+
timestamp_ms=0)
|
62
|
+
_BERT_POSITIVE_RESULTS = TextClassifierResult(
|
63
|
+
classifications=[
|
64
|
+
_Classifications(
|
65
|
+
categories=[
|
66
|
+
_Category(
|
67
|
+
index=1,
|
68
|
+
score=0.9994,
|
69
|
+
display_name='',
|
70
|
+
category_name='positive',
|
71
|
+
),
|
72
|
+
_Category(
|
73
|
+
index=0,
|
74
|
+
score=0.0006,
|
75
|
+
display_name='',
|
76
|
+
category_name='negative',
|
77
|
+
),
|
78
|
+
],
|
79
|
+
head_index=0,
|
80
|
+
head_name='probability',
|
81
|
+
)
|
82
|
+
],
|
83
|
+
timestamp_ms=0,
|
84
|
+
)
|
85
|
+
_REGEX_NEGATIVE_RESULTS = TextClassifierResult(
|
86
|
+
classifications=[
|
87
|
+
_Classifications(
|
88
|
+
categories=[
|
89
|
+
_Category(
|
90
|
+
index=0,
|
91
|
+
score=0.81313,
|
92
|
+
display_name='',
|
93
|
+
category_name='Negative'),
|
94
|
+
_Category(
|
95
|
+
index=1,
|
96
|
+
score=0.1868704,
|
97
|
+
display_name='',
|
98
|
+
category_name='Positive')
|
99
|
+
],
|
100
|
+
head_index=0,
|
101
|
+
head_name='probability')
|
102
|
+
],
|
103
|
+
timestamp_ms=0)
|
104
|
+
_REGEX_POSITIVE_RESULTS = TextClassifierResult(
|
105
|
+
classifications=[
|
106
|
+
_Classifications(
|
107
|
+
categories=[
|
108
|
+
_Category(
|
109
|
+
index=1,
|
110
|
+
score=0.5134273,
|
111
|
+
display_name='',
|
112
|
+
category_name='Positive'),
|
113
|
+
_Category(
|
114
|
+
index=0,
|
115
|
+
score=0.486573,
|
116
|
+
display_name='',
|
117
|
+
category_name='Negative')
|
118
|
+
],
|
119
|
+
head_index=0,
|
120
|
+
head_name='probability')
|
121
|
+
],
|
122
|
+
timestamp_ms=0)
|
123
|
+
|
124
|
+
|
125
|
+
class ModelFileType(enum.Enum):
|
126
|
+
FILE_CONTENT = 1
|
127
|
+
FILE_NAME = 2
|
128
|
+
|
129
|
+
|
130
|
+
class TextClassifierTest(parameterized.TestCase):
|
131
|
+
|
132
|
+
def setUp(self):
|
133
|
+
super().setUp()
|
134
|
+
self.model_path = test_utils.get_test_data_path(
|
135
|
+
os.path.join(_TEST_DATA_DIR, _BERT_MODEL_FILE))
|
136
|
+
|
137
|
+
def test_create_from_file_succeeds_with_valid_model_path(self):
|
138
|
+
# Creates with default option and valid model file successfully.
|
139
|
+
with _TextClassifier.create_from_model_path(self.model_path) as classifier:
|
140
|
+
self.assertIsInstance(classifier, _TextClassifier)
|
141
|
+
|
142
|
+
def test_create_from_options_succeeds_with_valid_model_path(self):
|
143
|
+
# Creates with options containing model file successfully.
|
144
|
+
base_options = _BaseOptions(model_asset_path=self.model_path)
|
145
|
+
options = _TextClassifierOptions(base_options=base_options)
|
146
|
+
with _TextClassifier.create_from_options(options) as classifier:
|
147
|
+
self.assertIsInstance(classifier, _TextClassifier)
|
148
|
+
|
149
|
+
def test_create_from_options_fails_with_invalid_model_path(self):
|
150
|
+
with self.assertRaisesRegex(
|
151
|
+
RuntimeError, 'Unable to open file at /path/to/invalid/model.tflite'):
|
152
|
+
base_options = _BaseOptions(
|
153
|
+
model_asset_path='/path/to/invalid/model.tflite')
|
154
|
+
options = _TextClassifierOptions(base_options=base_options)
|
155
|
+
_TextClassifier.create_from_options(options)
|
156
|
+
|
157
|
+
def test_create_from_options_succeeds_with_valid_model_content(self):
|
158
|
+
# Creates with options containing model content successfully.
|
159
|
+
with open(self.model_path, 'rb') as f:
|
160
|
+
base_options = _BaseOptions(model_asset_buffer=f.read())
|
161
|
+
options = _TextClassifierOptions(base_options=base_options)
|
162
|
+
classifier = _TextClassifier.create_from_options(options)
|
163
|
+
self.assertIsInstance(classifier, _TextClassifier)
|
164
|
+
|
165
|
+
@parameterized.parameters(
|
166
|
+
(ModelFileType.FILE_NAME, _BERT_MODEL_FILE, _NEGATIVE_TEXT,
|
167
|
+
_BERT_NEGATIVE_RESULTS), (ModelFileType.FILE_CONTENT, _BERT_MODEL_FILE,
|
168
|
+
_NEGATIVE_TEXT, _BERT_NEGATIVE_RESULTS),
|
169
|
+
(ModelFileType.FILE_NAME, _BERT_MODEL_FILE, _POSITIVE_TEXT,
|
170
|
+
_BERT_POSITIVE_RESULTS), (ModelFileType.FILE_CONTENT, _BERT_MODEL_FILE,
|
171
|
+
_POSITIVE_TEXT, _BERT_POSITIVE_RESULTS),
|
172
|
+
(ModelFileType.FILE_NAME, _REGEX_MODEL_FILE, _NEGATIVE_TEXT,
|
173
|
+
_REGEX_NEGATIVE_RESULTS), (ModelFileType.FILE_CONTENT, _REGEX_MODEL_FILE,
|
174
|
+
_NEGATIVE_TEXT, _REGEX_NEGATIVE_RESULTS),
|
175
|
+
(ModelFileType.FILE_NAME, _REGEX_MODEL_FILE, _POSITIVE_TEXT,
|
176
|
+
_REGEX_POSITIVE_RESULTS), (ModelFileType.FILE_CONTENT, _REGEX_MODEL_FILE,
|
177
|
+
_POSITIVE_TEXT, _REGEX_POSITIVE_RESULTS))
|
178
|
+
def test_classify(self, model_file_type, model_name, text,
|
179
|
+
expected_classification_result):
|
180
|
+
# Creates classifier.
|
181
|
+
model_path = test_utils.get_test_data_path(
|
182
|
+
os.path.join(_TEST_DATA_DIR, model_name))
|
183
|
+
if model_file_type is ModelFileType.FILE_NAME:
|
184
|
+
base_options = _BaseOptions(model_asset_path=model_path)
|
185
|
+
elif model_file_type is ModelFileType.FILE_CONTENT:
|
186
|
+
with open(model_path, 'rb') as f:
|
187
|
+
model_content = f.read()
|
188
|
+
base_options = _BaseOptions(model_asset_buffer=model_content)
|
189
|
+
else:
|
190
|
+
# Should never happen
|
191
|
+
raise ValueError('model_file_type is invalid.')
|
192
|
+
|
193
|
+
options = _TextClassifierOptions(base_options=base_options)
|
194
|
+
classifier = _TextClassifier.create_from_options(options)
|
195
|
+
|
196
|
+
# Performs text classification on the input.
|
197
|
+
text_result = classifier.classify(text)
|
198
|
+
# Comparing results.
|
199
|
+
test_utils.assert_proto_equals(self, text_result.to_pb2(),
|
200
|
+
expected_classification_result.to_pb2())
|
201
|
+
# Closes the classifier explicitly when the classifier is not used in
|
202
|
+
# a context.
|
203
|
+
classifier.close()
|
204
|
+
|
205
|
+
@parameterized.parameters((ModelFileType.FILE_NAME, _BERT_MODEL_FILE,
|
206
|
+
_NEGATIVE_TEXT, _BERT_NEGATIVE_RESULTS),
|
207
|
+
(ModelFileType.FILE_CONTENT, _BERT_MODEL_FILE,
|
208
|
+
_NEGATIVE_TEXT, _BERT_NEGATIVE_RESULTS))
|
209
|
+
def test_classify_in_context(self, model_file_type, model_name, text,
|
210
|
+
expected_classification_result):
|
211
|
+
# Creates classifier.
|
212
|
+
model_path = test_utils.get_test_data_path(
|
213
|
+
os.path.join(_TEST_DATA_DIR, model_name))
|
214
|
+
if model_file_type is ModelFileType.FILE_NAME:
|
215
|
+
base_options = _BaseOptions(model_asset_path=model_path)
|
216
|
+
elif model_file_type is ModelFileType.FILE_CONTENT:
|
217
|
+
with open(model_path, 'rb') as f:
|
218
|
+
model_content = f.read()
|
219
|
+
base_options = _BaseOptions(model_asset_buffer=model_content)
|
220
|
+
else:
|
221
|
+
# Should never happen
|
222
|
+
raise ValueError('model_file_type is invalid.')
|
223
|
+
|
224
|
+
options = _TextClassifierOptions(base_options=base_options)
|
225
|
+
|
226
|
+
with _TextClassifier.create_from_options(options) as classifier:
|
227
|
+
# Performs text classification on the input.
|
228
|
+
text_result = classifier.classify(text)
|
229
|
+
# Comparing results.
|
230
|
+
test_utils.assert_proto_equals(self, text_result.to_pb2(),
|
231
|
+
expected_classification_result.to_pb2())
|
232
|
+
|
233
|
+
|
234
|
+
if __name__ == '__main__':
|
235
|
+
absltest.main()
|