mediapipe-nightly 0.10.21.post20241223__cp312-cp312-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mediapipe/__init__.py +26 -0
- mediapipe/calculators/__init__.py +0 -0
- mediapipe/calculators/audio/__init__.py +0 -0
- mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py +33 -0
- mediapipe/calculators/audio/rational_factor_resample_calculator_pb2.py +33 -0
- mediapipe/calculators/audio/spectrogram_calculator_pb2.py +37 -0
- mediapipe/calculators/audio/stabilized_log_calculator_pb2.py +31 -0
- mediapipe/calculators/audio/time_series_framer_calculator_pb2.py +33 -0
- mediapipe/calculators/core/__init__.py +0 -0
- mediapipe/calculators/core/bypass_calculator_pb2.py +31 -0
- mediapipe/calculators/core/clip_vector_size_calculator_pb2.py +31 -0
- mediapipe/calculators/core/concatenate_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/constant_side_packet_calculator_pb2.py +39 -0
- mediapipe/calculators/core/dequantize_byte_array_calculator_pb2.py +31 -0
- mediapipe/calculators/core/flow_limiter_calculator_pb2.py +32 -0
- mediapipe/calculators/core/gate_calculator_pb2.py +33 -0
- mediapipe/calculators/core/get_vector_item_calculator_pb2.py +31 -0
- mediapipe/calculators/core/graph_profile_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_cloner_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_resampler_calculator_pb2.py +33 -0
- mediapipe/calculators/core/packet_thinner_calculator_pb2.py +33 -0
- mediapipe/calculators/core/quantize_float_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/sequence_shift_calculator_pb2.py +31 -0
- mediapipe/calculators/core/split_vector_calculator_pb2.py +33 -0
- mediapipe/calculators/image/__init__.py +0 -0
- mediapipe/calculators/image/bilateral_filter_calculator_pb2.py +31 -0
- mediapipe/calculators/image/feature_detector_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_clone_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_cropping_calculator_pb2.py +33 -0
- mediapipe/calculators/image/image_transformation_calculator_pb2.py +38 -0
- mediapipe/calculators/image/mask_overlay_calculator_pb2.py +33 -0
- mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator_pb2.py +31 -0
- mediapipe/calculators/image/opencv_image_encoder_calculator_pb2.py +35 -0
- mediapipe/calculators/image/recolor_calculator_pb2.py +34 -0
- mediapipe/calculators/image/rotation_mode_pb2.py +29 -0
- mediapipe/calculators/image/scale_image_calculator_pb2.py +34 -0
- mediapipe/calculators/image/segmentation_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/image/set_alpha_calculator_pb2.py +31 -0
- mediapipe/calculators/image/warp_affine_calculator_pb2.py +36 -0
- mediapipe/calculators/internal/__init__.py +0 -0
- mediapipe/calculators/internal/callback_packet_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/__init__.py +0 -0
- mediapipe/calculators/tensor/audio_to_tensor_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/bert_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/feedback_tensors_calculator_pb2.py +37 -0
- mediapipe/calculators/tensor/image_to_tensor_calculator_pb2.py +40 -0
- mediapipe/calculators/tensor/inference_calculator_pb2.py +63 -0
- mediapipe/calculators/tensor/landmarks_to_tensor_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/regex_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensor_converter_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/tensor_to_joints_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensors_readback_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/tensors_to_audio_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_classification_calculator_pb2.py +44 -0
- mediapipe/calculators/tensor/tensors_to_detections_calculator_pb2.py +39 -0
- mediapipe/calculators/tensor/tensors_to_floats_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/vector_to_tensor_calculator_pb2.py +27 -0
- mediapipe/calculators/tflite/__init__.py +0 -0
- mediapipe/calculators/tflite/ssd_anchors_calculator_pb2.py +32 -0
- mediapipe/calculators/tflite/tflite_converter_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_inference_calculator_pb2.py +49 -0
- mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/__init__.py +0 -0
- mediapipe/calculators/util/align_hand_to_pose_in_world_calculator_pb2.py +31 -0
- mediapipe/calculators/util/annotation_overlay_calculator_pb2.py +32 -0
- mediapipe/calculators/util/association_calculator_pb2.py +31 -0
- mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py +31 -0
- mediapipe/calculators/util/combine_joints_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detection_label_id_to_text_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detections_to_rects_calculator_pb2.py +33 -0
- mediapipe/calculators/util/detections_to_render_data_calculator_pb2.py +33 -0
- mediapipe/calculators/util/face_to_rect_calculator_pb2.py +26 -0
- mediapipe/calculators/util/filter_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/util/flat_color_image_calculator_pb2.py +32 -0
- mediapipe/calculators/util/labels_to_render_data_calculator_pb2.py +34 -0
- mediapipe/calculators/util/landmark_projection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_refinement_calculator_pb2.py +41 -0
- mediapipe/calculators/util/landmarks_smoothing_calculator_pb2.py +33 -0
- mediapipe/calculators/util/landmarks_to_detection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_floats_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/landmarks_transformation_calculator_pb2.py +37 -0
- mediapipe/calculators/util/latency_pb2.py +26 -0
- mediapipe/calculators/util/local_file_contents_calculator_pb2.py +31 -0
- mediapipe/calculators/util/logic_calculator_pb2.py +34 -0
- mediapipe/calculators/util/non_max_suppression_calculator_pb2.py +35 -0
- mediapipe/calculators/util/packet_frequency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/packet_frequency_pb2.py +26 -0
- mediapipe/calculators/util/packet_latency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/rect_to_render_scale_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_transformation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/refine_landmarks_from_heatmap_calculator_pb2.py +31 -0
- mediapipe/calculators/util/resource_provider_calculator_pb2.py +28 -0
- mediapipe/calculators/util/set_joints_visibility_calculator_pb2.py +41 -0
- mediapipe/calculators/util/thresholding_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_id_to_label_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/top_k_scores_calculator_pb2.py +31 -0
- mediapipe/calculators/util/visibility_copy_calculator_pb2.py +27 -0
- mediapipe/calculators/util/visibility_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/video/__init__.py +0 -0
- mediapipe/calculators/video/box_detector_calculator_pb2.py +32 -0
- mediapipe/calculators/video/box_tracker_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_packager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_to_image_calculator_pb2.py +31 -0
- mediapipe/calculators/video/motion_analysis_calculator_pb2.py +42 -0
- mediapipe/calculators/video/opencv_video_encoder_calculator_pb2.py +31 -0
- mediapipe/calculators/video/tool/__init__.py +0 -0
- mediapipe/calculators/video/tool/flow_quantizer_model_pb2.py +26 -0
- mediapipe/calculators/video/tracked_detection_manager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/video_pre_stream_calculator_pb2.py +35 -0
- mediapipe/examples/__init__.py +14 -0
- mediapipe/examples/desktop/__init__.py +14 -0
- mediapipe/framework/__init__.py +0 -0
- mediapipe/framework/calculator_options_pb2.py +29 -0
- mediapipe/framework/calculator_pb2.py +59 -0
- mediapipe/framework/calculator_profile_pb2.py +48 -0
- mediapipe/framework/deps/__init__.py +0 -0
- mediapipe/framework/deps/proto_descriptor_pb2.py +29 -0
- mediapipe/framework/formats/__init__.py +0 -0
- mediapipe/framework/formats/affine_transform_data_pb2.py +28 -0
- mediapipe/framework/formats/annotation/__init__.py +0 -0
- mediapipe/framework/formats/annotation/locus_pb2.py +32 -0
- mediapipe/framework/formats/annotation/rasterization_pb2.py +29 -0
- mediapipe/framework/formats/body_rig_pb2.py +28 -0
- mediapipe/framework/formats/classification_pb2.py +31 -0
- mediapipe/framework/formats/detection_pb2.py +36 -0
- mediapipe/framework/formats/image_file_properties_pb2.py +26 -0
- mediapipe/framework/formats/image_format_pb2.py +29 -0
- mediapipe/framework/formats/landmark_pb2.py +37 -0
- mediapipe/framework/formats/location_data_pb2.py +38 -0
- mediapipe/framework/formats/matrix_data_pb2.py +31 -0
- mediapipe/framework/formats/motion/__init__.py +0 -0
- mediapipe/framework/formats/motion/optical_flow_field_data_pb2.py +30 -0
- mediapipe/framework/formats/object_detection/__init__.py +0 -0
- mediapipe/framework/formats/object_detection/anchor_pb2.py +26 -0
- mediapipe/framework/formats/rect_pb2.py +29 -0
- mediapipe/framework/formats/time_series_header_pb2.py +28 -0
- mediapipe/framework/graph_runtime_info_pb2.py +31 -0
- mediapipe/framework/mediapipe_options_pb2.py +27 -0
- mediapipe/framework/packet_factory_pb2.py +31 -0
- mediapipe/framework/packet_generator_pb2.py +33 -0
- mediapipe/framework/status_handler_pb2.py +28 -0
- mediapipe/framework/stream_handler/__init__.py +0 -0
- mediapipe/framework/stream_handler/default_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/fixed_size_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/sync_set_input_stream_handler_pb2.py +29 -0
- mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler_pb2.py +30 -0
- mediapipe/framework/test_calculators_pb2.py +31 -0
- mediapipe/framework/thread_pool_executor_pb2.py +29 -0
- mediapipe/framework/tool/__init__.py +0 -0
- mediapipe/framework/tool/calculator_graph_template_pb2.py +44 -0
- mediapipe/framework/tool/field_data_pb2.py +28 -0
- mediapipe/framework/tool/node_chain_subgraph_pb2.py +31 -0
- mediapipe/framework/tool/packet_generator_wrapper_calculator_pb2.py +28 -0
- mediapipe/framework/tool/source_pb2.py +33 -0
- mediapipe/framework/tool/switch_container_pb2.py +32 -0
- mediapipe/gpu/__init__.py +0 -0
- mediapipe/gpu/copy_calculator_pb2.py +33 -0
- mediapipe/gpu/gl_animation_overlay_calculator_pb2.py +31 -0
- mediapipe/gpu/gl_context_options_pb2.py +31 -0
- mediapipe/gpu/gl_scaler_calculator_pb2.py +32 -0
- mediapipe/gpu/gl_surface_sink_calculator_pb2.py +32 -0
- mediapipe/gpu/gpu_origin_pb2.py +29 -0
- mediapipe/gpu/scale_mode_pb2.py +28 -0
- mediapipe/model_maker/__init__.py +27 -0
- mediapipe/model_maker/setup.py +107 -0
- mediapipe/modules/__init__.py +0 -0
- mediapipe/modules/face_detection/__init__.py +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_cpu.binarypb +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_sparse.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_pb2.py +30 -0
- mediapipe/modules/face_detection/face_detection_short_range.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_short_range_cpu.binarypb +0 -0
- mediapipe/modules/face_geometry/__init__.py +0 -0
- mediapipe/modules/face_geometry/data/__init__.py +0 -0
- mediapipe/modules/face_geometry/effect_renderer_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/env_generator_calculator_pb2.py +28 -0
- mediapipe/modules/face_geometry/geometry_pipeline_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/libs/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/environment_pb2.py +31 -0
- mediapipe/modules/face_geometry/protos/face_geometry_pb2.py +29 -0
- mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/modules/face_geometry/protos/mesh_3d_pb2.py +31 -0
- mediapipe/modules/face_landmark/__init__.py +0 -0
- mediapipe/modules/face_landmark/face_landmark.tflite +0 -0
- mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb +0 -0
- mediapipe/modules/face_landmark/face_landmark_with_attention.tflite +0 -0
- mediapipe/modules/hand_landmark/__init__.py +0 -0
- mediapipe/modules/hand_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_full.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_lite.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb +0 -0
- mediapipe/modules/hand_landmark/handedness.txt +2 -0
- mediapipe/modules/holistic_landmark/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator_pb2.py +37 -0
- mediapipe/modules/holistic_landmark/hand_recrop.tflite +0 -0
- mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb +0 -0
- mediapipe/modules/iris_landmark/__init__.py +0 -0
- mediapipe/modules/iris_landmark/iris_landmark.tflite +0 -0
- mediapipe/modules/objectron/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/a_r_capture_metadata_pb2.py +102 -0
- mediapipe/modules/objectron/calculators/annotation_data_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/belief_decoder_config_pb2.py +28 -0
- mediapipe/modules/objectron/calculators/camera_parameters_pb2.py +30 -0
- mediapipe/modules/objectron/calculators/filter_detection_calculator_pb2.py +35 -0
- mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/object_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/object_detection_oidv4_labelmap.txt +24 -0
- mediapipe/modules/objectron/objectron_cpu.binarypb +0 -0
- mediapipe/modules/palm_detection/__init__.py +0 -0
- mediapipe/modules/palm_detection/palm_detection_full.tflite +0 -0
- mediapipe/modules/palm_detection/palm_detection_lite.tflite +0 -0
- mediapipe/modules/pose_detection/__init__.py +0 -0
- mediapipe/modules/pose_detection/pose_detection.tflite +0 -0
- mediapipe/modules/pose_landmark/__init__.py +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_full.tflite +0 -0
- mediapipe/modules/selfie_segmentation/__init__.py +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation.tflite +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_cpu.binarypb +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_landscape.tflite +0 -0
- mediapipe/python/__init__.py +29 -0
- mediapipe/python/_framework_bindings.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/python/calculator_graph_test.py +251 -0
- mediapipe/python/image_frame_test.py +194 -0
- mediapipe/python/image_test.py +218 -0
- mediapipe/python/packet_creator.py +275 -0
- mediapipe/python/packet_getter.py +120 -0
- mediapipe/python/packet_test.py +533 -0
- mediapipe/python/solution_base.py +604 -0
- mediapipe/python/solution_base_test.py +396 -0
- mediapipe/python/solutions/__init__.py +27 -0
- mediapipe/python/solutions/download_utils.py +37 -0
- mediapipe/python/solutions/drawing_styles.py +249 -0
- mediapipe/python/solutions/drawing_utils.py +320 -0
- mediapipe/python/solutions/drawing_utils_test.py +258 -0
- mediapipe/python/solutions/face_detection.py +105 -0
- mediapipe/python/solutions/face_detection_test.py +92 -0
- mediapipe/python/solutions/face_mesh.py +125 -0
- mediapipe/python/solutions/face_mesh_connections.py +500 -0
- mediapipe/python/solutions/face_mesh_test.py +170 -0
- mediapipe/python/solutions/hands.py +153 -0
- mediapipe/python/solutions/hands_connections.py +32 -0
- mediapipe/python/solutions/hands_test.py +219 -0
- mediapipe/python/solutions/holistic.py +167 -0
- mediapipe/python/solutions/holistic_test.py +142 -0
- mediapipe/python/solutions/objectron.py +288 -0
- mediapipe/python/solutions/objectron_test.py +81 -0
- mediapipe/python/solutions/pose.py +192 -0
- mediapipe/python/solutions/pose_connections.py +22 -0
- mediapipe/python/solutions/pose_test.py +262 -0
- mediapipe/python/solutions/selfie_segmentation.py +76 -0
- mediapipe/python/solutions/selfie_segmentation_test.py +68 -0
- mediapipe/python/timestamp_test.py +78 -0
- mediapipe/tasks/__init__.py +14 -0
- mediapipe/tasks/cc/__init__.py +0 -0
- mediapipe/tasks/cc/audio/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/audio_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/audio_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/audio_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/core/__init__.py +0 -0
- mediapipe/tasks/cc/audio/utils/__init__.py +0 -0
- mediapipe/tasks/cc/components/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/classification_aggregation_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/components/calculators/score_calibration_calculator_pb2.py +35 -0
- mediapipe/tasks/cc/components/calculators/tensors_to_embeddings_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/components/containers/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/classifications_pb2.py +30 -0
- mediapipe/tasks/cc/components/containers/proto/embeddings_pb2.py +35 -0
- mediapipe/tasks/cc/components/containers/proto/landmarks_detection_result_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/classification_postprocessing_graph_options_pb2.py +38 -0
- mediapipe/tasks/cc/components/processors/proto/classifier_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/detection_postprocessing_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/components/processors/proto/detector_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedder_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedding_postprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/proto/image_preprocessing_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/components/processors/proto/text_model_type_pb2.py +28 -0
- mediapipe/tasks/cc/components/processors/proto/text_preprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/utils/__init__.py +0 -0
- mediapipe/tasks/cc/core/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/acceleration_pb2.py +28 -0
- mediapipe/tasks/cc/core/proto/base_options_pb2.py +30 -0
- mediapipe/tasks/cc/core/proto/external_file_pb2.py +31 -0
- mediapipe/tasks/cc/core/proto/inference_subgraph_pb2.py +32 -0
- mediapipe/tasks/cc/core/proto/model_resources_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/c/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/calculators/model_data_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/common/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_params_pb2.py +33 -0
- mediapipe/tasks/cc/genai/inference/proto/prompt_template_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/proto/sampler_params_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/proto/transformer_params_pb2.py +45 -0
- mediapipe/tasks/cc/genai/inference/utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/llm_utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/xnn_utils/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/_pywrap_metadata_version.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/cc/metadata/tests/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/ragged/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/testdata/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/hash/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/text_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/text_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/text_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/tokenizers/__init__.py +0 -0
- mediapipe/tasks/cc/text/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/__init__.py +0 -0
- mediapipe/tasks/cc/vision/core/__init__.py +0 -0
- mediapipe/tasks/cc/vision/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_geometry/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/env_generator_calculator_pb2.py +28 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/geometry_pipeline_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/data/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/libs/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/environment_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_graph_options_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/mesh_3d_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options_pb2.py +37 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/tensors_to_face_landmarks_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_stylizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/tensors_to_image_calculator_pb2.py +36 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/face_stylizer_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/combined_prediction_calculator_pb2.py +33 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_embedder_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/hand_gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_result_pb2.py +30 -0
- mediapipe/tasks/cc/vision/hand_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_roi_refinement_graph_options_pb2.py +28 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result_pb2.py +29 -0
- mediapipe/tasks/cc/vision/image_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/image_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/image_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_generator/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/stable_diffusion_iterate_calculator_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/proto/conditioned_image_graph_options_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/control_plugin_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_generator/proto/image_generator_graph_options_pb2.py +30 -0
- mediapipe/tasks/cc/vision/image_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/image_segmenter_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/segmenter_options_pb2.py +33 -0
- mediapipe/tasks/cc/vision/interactive_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/object_detector_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/pose_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/utils/ghum/__init__.py +0 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema.fbs +59 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema_py_generated.py +108 -0
- mediapipe/tasks/metadata/metadata_schema.fbs +732 -0
- mediapipe/tasks/metadata/metadata_schema_py_generated.py +3251 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema.fbs +98 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema_py_generated.py +674 -0
- mediapipe/tasks/metadata/schema_py_generated.py +18438 -0
- mediapipe/tasks/python/__init__.py +27 -0
- mediapipe/tasks/python/audio/__init__.py +33 -0
- mediapipe/tasks/python/audio/audio_classifier.py +324 -0
- mediapipe/tasks/python/audio/audio_embedder.py +285 -0
- mediapipe/tasks/python/audio/core/__init__.py +16 -0
- mediapipe/tasks/python/audio/core/audio_record.py +125 -0
- mediapipe/tasks/python/audio/core/audio_task_running_mode.py +29 -0
- mediapipe/tasks/python/audio/core/base_audio_task_api.py +181 -0
- mediapipe/tasks/python/benchmark/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/benchmark_utils.py +70 -0
- mediapipe/tasks/python/benchmark/vision/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/vision/benchmark.py +99 -0
- mediapipe/tasks/python/benchmark/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py +40 -0
- mediapipe/tasks/python/components/__init__.py +13 -0
- mediapipe/tasks/python/components/containers/__init__.py +53 -0
- mediapipe/tasks/python/components/containers/audio_data.py +137 -0
- mediapipe/tasks/python/components/containers/bounding_box.py +73 -0
- mediapipe/tasks/python/components/containers/category.py +78 -0
- mediapipe/tasks/python/components/containers/classification_result.py +111 -0
- mediapipe/tasks/python/components/containers/detections.py +181 -0
- mediapipe/tasks/python/components/containers/embedding_result.py +89 -0
- mediapipe/tasks/python/components/containers/keypoint.py +77 -0
- mediapipe/tasks/python/components/containers/landmark.py +122 -0
- mediapipe/tasks/python/components/containers/landmark_detection_result.py +106 -0
- mediapipe/tasks/python/components/containers/rect.py +109 -0
- mediapipe/tasks/python/components/processors/__init__.py +23 -0
- mediapipe/tasks/python/components/processors/classifier_options.py +86 -0
- mediapipe/tasks/python/components/utils/__init__.py +13 -0
- mediapipe/tasks/python/components/utils/cosine_similarity.py +68 -0
- mediapipe/tasks/python/core/__init__.py +13 -0
- mediapipe/tasks/python/core/base_options.py +121 -0
- mediapipe/tasks/python/core/optional_dependencies.py +25 -0
- mediapipe/tasks/python/core/task_info.py +139 -0
- mediapipe/tasks/python/genai/__init__.py +14 -0
- mediapipe/tasks/python/genai/bundler/__init__.py +23 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler.py +130 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler_test.py +168 -0
- mediapipe/tasks/python/genai/converter/__init__.py +24 -0
- mediapipe/tasks/python/genai/converter/converter_base.py +179 -0
- mediapipe/tasks/python/genai/converter/converter_factory.py +79 -0
- mediapipe/tasks/python/genai/converter/llm_converter.py +374 -0
- mediapipe/tasks/python/genai/converter/llm_converter_test.py +63 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter.py +318 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter_test.py +86 -0
- mediapipe/tasks/python/genai/converter/quantization_util.py +516 -0
- mediapipe/tasks/python/genai/converter/quantization_util_test.py +259 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter.py +580 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter_test.py +83 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer.py +120 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer_test.py +95 -0
- mediapipe/tasks/python/metadata/__init__.py +13 -0
- mediapipe/tasks/python/metadata/flatbuffers_lib/_pywrap_flatbuffers.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/python/metadata/metadata.py +928 -0
- mediapipe/tasks/python/metadata/metadata_displayer_cli.py +34 -0
- mediapipe/tasks/python/metadata/metadata_writers/__init__.py +13 -0
- mediapipe/tasks/python/metadata/metadata_writers/face_stylizer.py +138 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_classifier.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py +170 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py +1166 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_writer.py +845 -0
- mediapipe/tasks/python/metadata/metadata_writers/model_asset_bundle_utils.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/object_detector.py +331 -0
- mediapipe/tasks/python/metadata/metadata_writers/text_classifier.py +119 -0
- mediapipe/tasks/python/metadata/metadata_writers/writer_utils.py +91 -0
- mediapipe/tasks/python/test/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/audio_classifier_test.py +387 -0
- mediapipe/tasks/python/test/audio/audio_embedder_test.py +297 -0
- mediapipe/tasks/python/test/test_utils.py +196 -0
- mediapipe/tasks/python/test/text/__init__.py +13 -0
- mediapipe/tasks/python/test/text/language_detector_test.py +228 -0
- mediapipe/tasks/python/test/text/text_classifier_test.py +235 -0
- mediapipe/tasks/python/test/text/text_embedder_test.py +326 -0
- mediapipe/tasks/python/test/vision/__init__.py +13 -0
- mediapipe/tasks/python/test/vision/face_aligner_test.py +190 -0
- mediapipe/tasks/python/test/vision/face_detector_test.py +523 -0
- mediapipe/tasks/python/test/vision/face_landmarker_test.py +565 -0
- mediapipe/tasks/python/test/vision/face_stylizer_test.py +191 -0
- mediapipe/tasks/python/test/vision/hand_landmarker_test.py +437 -0
- mediapipe/tasks/python/test/vision/holistic_landmarker_test.py +544 -0
- mediapipe/tasks/python/test/vision/image_classifier_test.py +657 -0
- mediapipe/tasks/python/test/vision/image_embedder_test.py +423 -0
- mediapipe/tasks/python/test/vision/image_segmenter_test.py +512 -0
- mediapipe/tasks/python/test/vision/interactive_segmenter_test.py +341 -0
- mediapipe/tasks/python/test/vision/object_detector_test.py +493 -0
- mediapipe/tasks/python/test/vision/pose_landmarker_test.py +518 -0
- mediapipe/tasks/python/text/__init__.py +35 -0
- mediapipe/tasks/python/text/core/__init__.py +16 -0
- mediapipe/tasks/python/text/core/base_text_task_api.py +54 -0
- mediapipe/tasks/python/text/language_detector.py +220 -0
- mediapipe/tasks/python/text/text_classifier.py +187 -0
- mediapipe/tasks/python/text/text_embedder.py +188 -0
- mediapipe/tasks/python/vision/__init__.py +90 -0
- mediapipe/tasks/python/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/vision/core/base_vision_task_api.py +226 -0
- mediapipe/tasks/python/vision/core/image_processing_options.py +39 -0
- mediapipe/tasks/python/vision/core/vision_task_running_mode.py +31 -0
- mediapipe/tasks/python/vision/face_aligner.py +158 -0
- mediapipe/tasks/python/vision/face_detector.py +332 -0
- mediapipe/tasks/python/vision/face_landmarker.py +3244 -0
- mediapipe/tasks/python/vision/face_stylizer.py +158 -0
- mediapipe/tasks/python/vision/gesture_recognizer.py +480 -0
- mediapipe/tasks/python/vision/hand_landmarker.py +504 -0
- mediapipe/tasks/python/vision/holistic_landmarker.py +576 -0
- mediapipe/tasks/python/vision/image_classifier.py +358 -0
- mediapipe/tasks/python/vision/image_embedder.py +362 -0
- mediapipe/tasks/python/vision/image_segmenter.py +433 -0
- mediapipe/tasks/python/vision/interactive_segmenter.py +285 -0
- mediapipe/tasks/python/vision/object_detector.py +389 -0
- mediapipe/tasks/python/vision/pose_landmarker.py +455 -0
- mediapipe/util/__init__.py +0 -0
- mediapipe/util/analytics/__init__.py +0 -0
- mediapipe/util/analytics/mediapipe_log_extension_pb2.py +44 -0
- mediapipe/util/analytics/mediapipe_logging_enums_pb2.py +37 -0
- mediapipe/util/audio_decoder_pb2.py +33 -0
- mediapipe/util/color_pb2.py +33 -0
- mediapipe/util/label_map_pb2.py +27 -0
- mediapipe/util/render_data_pb2.py +58 -0
- mediapipe/util/sequence/__init__.py +14 -0
- mediapipe/util/sequence/media_sequence.py +716 -0
- mediapipe/util/sequence/media_sequence_test.py +290 -0
- mediapipe/util/sequence/media_sequence_util.py +800 -0
- mediapipe/util/sequence/media_sequence_util_test.py +389 -0
- mediapipe/util/tracking/__init__.py +0 -0
- mediapipe/util/tracking/box_detector_pb2.py +39 -0
- mediapipe/util/tracking/box_tracker_pb2.py +32 -0
- mediapipe/util/tracking/camera_motion_pb2.py +31 -0
- mediapipe/util/tracking/flow_packager_pb2.py +60 -0
- mediapipe/util/tracking/frame_selection_pb2.py +35 -0
- mediapipe/util/tracking/frame_selection_solution_evaluator_pb2.py +28 -0
- mediapipe/util/tracking/motion_analysis_pb2.py +35 -0
- mediapipe/util/tracking/motion_estimation_pb2.py +66 -0
- mediapipe/util/tracking/motion_models_pb2.py +42 -0
- mediapipe/util/tracking/motion_saliency_pb2.py +26 -0
- mediapipe/util/tracking/push_pull_filtering_pb2.py +26 -0
- mediapipe/util/tracking/region_flow_computation_pb2.py +59 -0
- mediapipe/util/tracking/region_flow_pb2.py +49 -0
- mediapipe/util/tracking/tone_estimation_pb2.py +45 -0
- mediapipe/util/tracking/tone_models_pb2.py +32 -0
- mediapipe/util/tracking/tracked_detection_manager_config_pb2.py +26 -0
- mediapipe/util/tracking/tracking_pb2.py +73 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/LICENSE +218 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/METADATA +199 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/RECORD +593 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/WHEEL +5 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/top_level.txt +4 -0
- mediapipe_nightly.libs/libEGL-48f73270.so.1.1.0 +0 -0
- mediapipe_nightly.libs/libGLESv2-ed5eda4f.so.2.1.0 +0 -0
- mediapipe_nightly.libs/libGLdispatch-64b28464.so.0.0.0 +0 -0
@@ -0,0 +1,220 @@
|
|
1
|
+
# Copyright 2023 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""MediaPipe language detector task."""
|
15
|
+
|
16
|
+
import dataclasses
|
17
|
+
from typing import List, Optional
|
18
|
+
|
19
|
+
from mediapipe.python import packet_creator
|
20
|
+
from mediapipe.python import packet_getter
|
21
|
+
from mediapipe.tasks.cc.components.containers.proto import classifications_pb2
|
22
|
+
from mediapipe.tasks.cc.components.processors.proto import classifier_options_pb2
|
23
|
+
from mediapipe.tasks.cc.text.text_classifier.proto import text_classifier_graph_options_pb2
|
24
|
+
from mediapipe.tasks.python.components.containers import classification_result as classification_result_module
|
25
|
+
from mediapipe.tasks.python.core import base_options as base_options_module
|
26
|
+
from mediapipe.tasks.python.core import task_info as task_info_module
|
27
|
+
from mediapipe.tasks.python.core.optional_dependencies import doc_controls
|
28
|
+
from mediapipe.tasks.python.text.core import base_text_task_api
|
29
|
+
|
30
|
+
_ClassificationResult = classification_result_module.ClassificationResult
|
31
|
+
_BaseOptions = base_options_module.BaseOptions
|
32
|
+
_TextClassifierGraphOptionsProto = (
|
33
|
+
text_classifier_graph_options_pb2.TextClassifierGraphOptions
|
34
|
+
)
|
35
|
+
_ClassifierOptionsProto = classifier_options_pb2.ClassifierOptions
|
36
|
+
_TaskInfo = task_info_module.TaskInfo
|
37
|
+
|
38
|
+
_CLASSIFICATIONS_STREAM_NAME = 'classifications_out'
|
39
|
+
_CLASSIFICATIONS_TAG = 'CLASSIFICATIONS'
|
40
|
+
_TEXT_IN_STREAM_NAME = 'text_in'
|
41
|
+
_TEXT_TAG = 'TEXT'
|
42
|
+
_TASK_GRAPH_NAME = 'mediapipe.tasks.text.text_classifier.TextClassifierGraph'
|
43
|
+
|
44
|
+
|
45
|
+
@dataclasses.dataclass
|
46
|
+
class LanguageDetectorResult:
|
47
|
+
|
48
|
+
@dataclasses.dataclass
|
49
|
+
class Detection:
|
50
|
+
"""A language code and its probability."""
|
51
|
+
|
52
|
+
# An i18n language / locale code, e.g. "en" for English, "uz" for Uzbek,
|
53
|
+
# "ja"-Latn for Japanese (romaji).
|
54
|
+
language_code: str
|
55
|
+
probability: float
|
56
|
+
|
57
|
+
detections: List[Detection]
|
58
|
+
|
59
|
+
|
60
|
+
def _extract_language_detector_result(
|
61
|
+
classification_result: classification_result_module.ClassificationResult,
|
62
|
+
) -> LanguageDetectorResult:
|
63
|
+
"""Extracts a LanguageDetectorResult from a ClassificationResult."""
|
64
|
+
if len(classification_result.classifications) != 1:
|
65
|
+
raise ValueError(
|
66
|
+
'The LanguageDetector TextClassifierGraph should have exactly one '
|
67
|
+
'classification head.'
|
68
|
+
)
|
69
|
+
languages_and_scores = classification_result.classifications[0]
|
70
|
+
language_detector_result = LanguageDetectorResult([])
|
71
|
+
for category in languages_and_scores.categories:
|
72
|
+
if category.category_name is None:
|
73
|
+
raise ValueError(
|
74
|
+
'LanguageDetector ClassificationResult has a missing language code.'
|
75
|
+
)
|
76
|
+
prediction = LanguageDetectorResult.Detection(
|
77
|
+
category.category_name, category.score
|
78
|
+
)
|
79
|
+
language_detector_result.detections.append(prediction)
|
80
|
+
return language_detector_result
|
81
|
+
|
82
|
+
|
83
|
+
@dataclasses.dataclass
|
84
|
+
class LanguageDetectorOptions:
|
85
|
+
"""Options for the language detector task.
|
86
|
+
|
87
|
+
Attributes:
|
88
|
+
base_options: Base options for the language detector task.
|
89
|
+
display_names_locale: The locale to use for display names specified through
|
90
|
+
the TFLite Model Metadata.
|
91
|
+
max_results: The maximum number of top-scored classification results to
|
92
|
+
return.
|
93
|
+
score_threshold: Overrides the ones provided in the model metadata. Results
|
94
|
+
below this value are rejected.
|
95
|
+
category_allowlist: Allowlist of category names. If non-empty,
|
96
|
+
classification results whose category name is not in this set will be
|
97
|
+
filtered out. Duplicate or unknown category names are ignored. Mutually
|
98
|
+
exclusive with `category_denylist`.
|
99
|
+
category_denylist: Denylist of category names. If non-empty, classification
|
100
|
+
results whose category name is in this set will be filtered out. Duplicate
|
101
|
+
or unknown category names are ignored. Mutually exclusive with
|
102
|
+
`category_allowlist`.
|
103
|
+
"""
|
104
|
+
|
105
|
+
base_options: _BaseOptions
|
106
|
+
display_names_locale: Optional[str] = None
|
107
|
+
max_results: Optional[int] = None
|
108
|
+
score_threshold: Optional[float] = None
|
109
|
+
category_allowlist: Optional[List[str]] = None
|
110
|
+
category_denylist: Optional[List[str]] = None
|
111
|
+
|
112
|
+
@doc_controls.do_not_generate_docs
|
113
|
+
def to_pb2(self) -> _TextClassifierGraphOptionsProto:
|
114
|
+
"""Generates an TextClassifierOptions protobuf object."""
|
115
|
+
base_options_proto = self.base_options.to_pb2()
|
116
|
+
classifier_options_proto = _ClassifierOptionsProto(
|
117
|
+
score_threshold=self.score_threshold,
|
118
|
+
category_allowlist=self.category_allowlist,
|
119
|
+
category_denylist=self.category_denylist,
|
120
|
+
display_names_locale=self.display_names_locale,
|
121
|
+
max_results=self.max_results,
|
122
|
+
)
|
123
|
+
|
124
|
+
return _TextClassifierGraphOptionsProto(
|
125
|
+
base_options=base_options_proto,
|
126
|
+
classifier_options=classifier_options_proto,
|
127
|
+
)
|
128
|
+
|
129
|
+
|
130
|
+
class LanguageDetector(base_text_task_api.BaseTextTaskApi):
|
131
|
+
"""Class that predicts the language of an input text.
|
132
|
+
|
133
|
+
This API expects a TFLite model with TFLite Model Metadata that contains the
|
134
|
+
mandatory (described below) input tensors, output tensor, and the language
|
135
|
+
codes in an AssociatedFile.
|
136
|
+
|
137
|
+
Input tensors:
|
138
|
+
(kTfLiteString)
|
139
|
+
- 1 input tensor that is scalar or has shape [1] containing the input
|
140
|
+
string.
|
141
|
+
Output tensor:
|
142
|
+
(kTfLiteFloat32)
|
143
|
+
- 1 output tensor of shape`[1 x N]` where `N` is the number of languages.
|
144
|
+
"""
|
145
|
+
|
146
|
+
@classmethod
|
147
|
+
def create_from_model_path(cls, model_path: str) -> 'LanguageDetector':
|
148
|
+
"""Creates an `LanguageDetector` object from a TensorFlow Lite model and the default `LanguageDetectorOptions`.
|
149
|
+
|
150
|
+
Args:
|
151
|
+
model_path: Path to the model.
|
152
|
+
|
153
|
+
Returns:
|
154
|
+
`LanguageDetector` object that's created from the model file and the
|
155
|
+
default `LanguageDetectorOptions`.
|
156
|
+
|
157
|
+
Raises:
|
158
|
+
ValueError: If failed to create `LanguageDetector` object from the
|
159
|
+
provided
|
160
|
+
file such as invalid file path.
|
161
|
+
RuntimeError: If other types of error occurred.
|
162
|
+
"""
|
163
|
+
base_options = _BaseOptions(model_asset_path=model_path)
|
164
|
+
options = LanguageDetectorOptions(base_options=base_options)
|
165
|
+
return cls.create_from_options(options)
|
166
|
+
|
167
|
+
@classmethod
|
168
|
+
def create_from_options(
|
169
|
+
cls, options: LanguageDetectorOptions
|
170
|
+
) -> 'LanguageDetector':
|
171
|
+
"""Creates the `LanguageDetector` object from language detector options.
|
172
|
+
|
173
|
+
Args:
|
174
|
+
options: Options for the language detector task.
|
175
|
+
|
176
|
+
Returns:
|
177
|
+
`LanguageDetector` object that's created from `options`.
|
178
|
+
|
179
|
+
Raises:
|
180
|
+
ValueError: If failed to create `LanguageDetector` object from
|
181
|
+
`LanguageDetectorOptions` such as missing the model.
|
182
|
+
RuntimeError: If other types of error occurred.
|
183
|
+
"""
|
184
|
+
task_info = _TaskInfo(
|
185
|
+
task_graph=_TASK_GRAPH_NAME,
|
186
|
+
input_streams=[':'.join([_TEXT_TAG, _TEXT_IN_STREAM_NAME])],
|
187
|
+
output_streams=[
|
188
|
+
':'.join([_CLASSIFICATIONS_TAG, _CLASSIFICATIONS_STREAM_NAME])
|
189
|
+
],
|
190
|
+
task_options=options,
|
191
|
+
)
|
192
|
+
return cls(task_info.generate_graph_config())
|
193
|
+
|
194
|
+
def detect(self, text: str) -> LanguageDetectorResult:
|
195
|
+
"""Predicts the language of the input `text`.
|
196
|
+
|
197
|
+
Args:
|
198
|
+
text: The input text.
|
199
|
+
|
200
|
+
Returns:
|
201
|
+
A `LanguageDetectorResult` object that contains a list of languages and
|
202
|
+
scores.
|
203
|
+
|
204
|
+
Raises:
|
205
|
+
ValueError: If any of the input arguments is invalid.
|
206
|
+
RuntimeError: If language detection failed to run.
|
207
|
+
"""
|
208
|
+
output_packets = self._runner.process(
|
209
|
+
{_TEXT_IN_STREAM_NAME: packet_creator.create_string(text)}
|
210
|
+
)
|
211
|
+
|
212
|
+
classification_result_proto = classifications_pb2.ClassificationResult()
|
213
|
+
classification_result_proto.CopyFrom(
|
214
|
+
packet_getter.get_proto(output_packets[_CLASSIFICATIONS_STREAM_NAME])
|
215
|
+
)
|
216
|
+
|
217
|
+
classification_result = _ClassificationResult.create_from_pb2(
|
218
|
+
classification_result_proto
|
219
|
+
)
|
220
|
+
return _extract_language_detector_result(classification_result)
|
@@ -0,0 +1,187 @@
|
|
1
|
+
# Copyright 2022 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""MediaPipe text classifier task."""
|
15
|
+
|
16
|
+
import dataclasses
|
17
|
+
from typing import Optional, List
|
18
|
+
|
19
|
+
from mediapipe.python import packet_creator
|
20
|
+
from mediapipe.python import packet_getter
|
21
|
+
from mediapipe.tasks.cc.components.containers.proto import classifications_pb2
|
22
|
+
from mediapipe.tasks.cc.components.processors.proto import classifier_options_pb2
|
23
|
+
from mediapipe.tasks.cc.text.text_classifier.proto import text_classifier_graph_options_pb2
|
24
|
+
from mediapipe.tasks.python.components.containers import classification_result as classification_result_module
|
25
|
+
from mediapipe.tasks.python.core import base_options as base_options_module
|
26
|
+
from mediapipe.tasks.python.core import task_info as task_info_module
|
27
|
+
from mediapipe.tasks.python.core.optional_dependencies import doc_controls
|
28
|
+
from mediapipe.tasks.python.text.core import base_text_task_api
|
29
|
+
|
30
|
+
TextClassifierResult = classification_result_module.ClassificationResult
|
31
|
+
_BaseOptions = base_options_module.BaseOptions
|
32
|
+
_TextClassifierGraphOptionsProto = text_classifier_graph_options_pb2.TextClassifierGraphOptions
|
33
|
+
_ClassifierOptionsProto = classifier_options_pb2.ClassifierOptions
|
34
|
+
_TaskInfo = task_info_module.TaskInfo
|
35
|
+
|
36
|
+
_CLASSIFICATIONS_STREAM_NAME = 'classifications_out'
|
37
|
+
_CLASSIFICATIONS_TAG = 'CLASSIFICATIONS'
|
38
|
+
_TEXT_IN_STREAM_NAME = 'text_in'
|
39
|
+
_TEXT_TAG = 'TEXT'
|
40
|
+
_TASK_GRAPH_NAME = 'mediapipe.tasks.text.text_classifier.TextClassifierGraph'
|
41
|
+
|
42
|
+
|
43
|
+
@dataclasses.dataclass
|
44
|
+
class TextClassifierOptions:
|
45
|
+
"""Options for the text classifier task.
|
46
|
+
|
47
|
+
Attributes:
|
48
|
+
base_options: Base options for the text classifier task.
|
49
|
+
display_names_locale: The locale to use for display names specified through
|
50
|
+
the TFLite Model Metadata.
|
51
|
+
max_results: The maximum number of top-scored classification results to
|
52
|
+
return.
|
53
|
+
score_threshold: Overrides the ones provided in the model metadata. Results
|
54
|
+
below this value are rejected.
|
55
|
+
category_allowlist: Allowlist of category names. If non-empty,
|
56
|
+
classification results whose category name is not in this set will be
|
57
|
+
filtered out. Duplicate or unknown category names are ignored. Mutually
|
58
|
+
exclusive with `category_denylist`.
|
59
|
+
category_denylist: Denylist of category names. If non-empty, classification
|
60
|
+
results whose category name is in this set will be filtered out. Duplicate
|
61
|
+
or unknown category names are ignored. Mutually exclusive with
|
62
|
+
`category_allowlist`.
|
63
|
+
"""
|
64
|
+
base_options: _BaseOptions
|
65
|
+
display_names_locale: Optional[str] = None
|
66
|
+
max_results: Optional[int] = None
|
67
|
+
score_threshold: Optional[float] = None
|
68
|
+
category_allowlist: Optional[List[str]] = None
|
69
|
+
category_denylist: Optional[List[str]] = None
|
70
|
+
|
71
|
+
@doc_controls.do_not_generate_docs
|
72
|
+
def to_pb2(self) -> _TextClassifierGraphOptionsProto:
|
73
|
+
"""Generates an TextClassifierOptions protobuf object."""
|
74
|
+
base_options_proto = self.base_options.to_pb2()
|
75
|
+
classifier_options_proto = _ClassifierOptionsProto(
|
76
|
+
score_threshold=self.score_threshold,
|
77
|
+
category_allowlist=self.category_allowlist,
|
78
|
+
category_denylist=self.category_denylist,
|
79
|
+
display_names_locale=self.display_names_locale,
|
80
|
+
max_results=self.max_results)
|
81
|
+
|
82
|
+
return _TextClassifierGraphOptionsProto(
|
83
|
+
base_options=base_options_proto,
|
84
|
+
classifier_options=classifier_options_proto)
|
85
|
+
|
86
|
+
|
87
|
+
class TextClassifier(base_text_task_api.BaseTextTaskApi):
|
88
|
+
"""Class that performs classification on text.
|
89
|
+
|
90
|
+
This API expects a TFLite model with (optional) TFLite Model Metadata that
|
91
|
+
contains the mandatory (described below) input tensors, output tensor,
|
92
|
+
and the optional (but recommended) category labels as AssociatedFiles with
|
93
|
+
type
|
94
|
+
TENSOR_AXIS_LABELS per output classification tensor. Metadata is required for
|
95
|
+
models with int32 input tensors because it contains the input process unit
|
96
|
+
for the model's Tokenizer. No metadata is required for models with string
|
97
|
+
input tensors.
|
98
|
+
|
99
|
+
Input tensors:
|
100
|
+
(kTfLiteInt32)
|
101
|
+
- 3 input tensors of size `[batch_size x bert_max_seq_len]` representing
|
102
|
+
the input ids, segment ids, and mask ids
|
103
|
+
- or 1 input tensor of size `[batch_size x max_seq_len]` representing the
|
104
|
+
input ids
|
105
|
+
or (kTfLiteString)
|
106
|
+
- 1 input tensor that is shapeless or has shape [1] containing the input
|
107
|
+
string
|
108
|
+
At least one output tensor with:
|
109
|
+
(kTfLiteFloat32/kBool)
|
110
|
+
- `[1 x N]` array with `N` represents the number of categories.
|
111
|
+
- optional (but recommended) category labels as AssociatedFiles with type
|
112
|
+
TENSOR_AXIS_LABELS, containing one label per line. The first such
|
113
|
+
AssociatedFile (if any) is used to fill the `category_name` field of the
|
114
|
+
results. The `display_name` field is filled from the AssociatedFile (if
|
115
|
+
any) whose locale matches the `display_names_locale` field of the
|
116
|
+
`TextClassifierOptions` used at creation time ("en" by default, i.e.
|
117
|
+
English). If none of these are available, only the `index` field of the
|
118
|
+
results will be filled.
|
119
|
+
"""
|
120
|
+
|
121
|
+
@classmethod
|
122
|
+
def create_from_model_path(cls, model_path: str) -> 'TextClassifier':
|
123
|
+
"""Creates an `TextClassifier` object from a TensorFlow Lite model and the default `TextClassifierOptions`.
|
124
|
+
|
125
|
+
Args:
|
126
|
+
model_path: Path to the model.
|
127
|
+
|
128
|
+
Returns:
|
129
|
+
`TextClassifier` object that's created from the model file and the
|
130
|
+
default `TextClassifierOptions`.
|
131
|
+
|
132
|
+
Raises:
|
133
|
+
ValueError: If failed to create `TextClassifier` object from the provided
|
134
|
+
file such as invalid file path.
|
135
|
+
RuntimeError: If other types of error occurred.
|
136
|
+
"""
|
137
|
+
base_options = _BaseOptions(model_asset_path=model_path)
|
138
|
+
options = TextClassifierOptions(base_options=base_options)
|
139
|
+
return cls.create_from_options(options)
|
140
|
+
|
141
|
+
@classmethod
|
142
|
+
def create_from_options(cls,
|
143
|
+
options: TextClassifierOptions) -> 'TextClassifier':
|
144
|
+
"""Creates the `TextClassifier` object from text classifier options.
|
145
|
+
|
146
|
+
Args:
|
147
|
+
options: Options for the text classifier task.
|
148
|
+
|
149
|
+
Returns:
|
150
|
+
`TextClassifier` object that's created from `options`.
|
151
|
+
|
152
|
+
Raises:
|
153
|
+
ValueError: If failed to create `TextClassifier` object from
|
154
|
+
`TextClassifierOptions` such as missing the model.
|
155
|
+
RuntimeError: If other types of error occurred.
|
156
|
+
"""
|
157
|
+
task_info = _TaskInfo(
|
158
|
+
task_graph=_TASK_GRAPH_NAME,
|
159
|
+
input_streams=[':'.join([_TEXT_TAG, _TEXT_IN_STREAM_NAME])],
|
160
|
+
output_streams=[
|
161
|
+
':'.join([_CLASSIFICATIONS_TAG, _CLASSIFICATIONS_STREAM_NAME])
|
162
|
+
],
|
163
|
+
task_options=options)
|
164
|
+
return cls(task_info.generate_graph_config())
|
165
|
+
|
166
|
+
def classify(self, text: str) -> TextClassifierResult:
|
167
|
+
"""Performs classification on the input `text`.
|
168
|
+
|
169
|
+
Args:
|
170
|
+
text: The input text.
|
171
|
+
|
172
|
+
Returns:
|
173
|
+
A `TextClassifierResult` object that contains a list of text
|
174
|
+
classifications.
|
175
|
+
|
176
|
+
Raises:
|
177
|
+
ValueError: If any of the input arguments is invalid.
|
178
|
+
RuntimeError: If text classification failed to run.
|
179
|
+
"""
|
180
|
+
output_packets = self._runner.process(
|
181
|
+
{_TEXT_IN_STREAM_NAME: packet_creator.create_string(text)})
|
182
|
+
|
183
|
+
classification_result_proto = classifications_pb2.ClassificationResult()
|
184
|
+
classification_result_proto.CopyFrom(
|
185
|
+
packet_getter.get_proto(output_packets[_CLASSIFICATIONS_STREAM_NAME]))
|
186
|
+
|
187
|
+
return TextClassifierResult.create_from_pb2(classification_result_proto)
|
@@ -0,0 +1,188 @@
|
|
1
|
+
# Copyright 2022 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""MediaPipe text embedder task."""
|
15
|
+
|
16
|
+
import dataclasses
|
17
|
+
from typing import Optional
|
18
|
+
|
19
|
+
from mediapipe.python import packet_creator
|
20
|
+
from mediapipe.python import packet_getter
|
21
|
+
from mediapipe.tasks.cc.components.containers.proto import embeddings_pb2
|
22
|
+
from mediapipe.tasks.cc.components.processors.proto import embedder_options_pb2
|
23
|
+
from mediapipe.tasks.cc.text.text_embedder.proto import text_embedder_graph_options_pb2
|
24
|
+
from mediapipe.tasks.python.components.containers import embedding_result as embedding_result_module
|
25
|
+
from mediapipe.tasks.python.components.utils import cosine_similarity
|
26
|
+
from mediapipe.tasks.python.core import base_options as base_options_module
|
27
|
+
from mediapipe.tasks.python.core import task_info as task_info_module
|
28
|
+
from mediapipe.tasks.python.core.optional_dependencies import doc_controls
|
29
|
+
from mediapipe.tasks.python.text.core import base_text_task_api
|
30
|
+
|
31
|
+
TextEmbedderResult = embedding_result_module.EmbeddingResult
|
32
|
+
_BaseOptions = base_options_module.BaseOptions
|
33
|
+
_TextEmbedderGraphOptionsProto = text_embedder_graph_options_pb2.TextEmbedderGraphOptions
|
34
|
+
_EmbedderOptionsProto = embedder_options_pb2.EmbedderOptions
|
35
|
+
_TaskInfo = task_info_module.TaskInfo
|
36
|
+
|
37
|
+
_EMBEDDINGS_OUT_STREAM_NAME = 'embeddings_out'
|
38
|
+
_EMBEDDINGS_TAG = 'EMBEDDINGS'
|
39
|
+
_TEXT_IN_STREAM_NAME = 'text_in'
|
40
|
+
_TEXT_TAG = 'TEXT'
|
41
|
+
_TASK_GRAPH_NAME = 'mediapipe.tasks.text.text_embedder.TextEmbedderGraph'
|
42
|
+
|
43
|
+
|
44
|
+
@dataclasses.dataclass
|
45
|
+
class TextEmbedderOptions:
|
46
|
+
"""Options for the text embedder task.
|
47
|
+
|
48
|
+
Attributes:
|
49
|
+
base_options: Base options for the text embedder task.
|
50
|
+
l2_normalize: Whether to normalize the returned feature vector with L2 norm.
|
51
|
+
Use this option only if the model does not already contain a native
|
52
|
+
L2_NORMALIZATION TF Lite Op. In most cases, this is already the case and
|
53
|
+
L2 norm is thus achieved through TF Lite inference.
|
54
|
+
quantize: Whether the returned embedding should be quantized to bytes via
|
55
|
+
scalar quantization. Embeddings are implicitly assumed to be unit-norm and
|
56
|
+
therefore any dimension is guaranteed to have a value in [-1.0, 1.0]. Use
|
57
|
+
the l2_normalize option if this is not the case.
|
58
|
+
"""
|
59
|
+
base_options: _BaseOptions
|
60
|
+
l2_normalize: Optional[bool] = None
|
61
|
+
quantize: Optional[bool] = None
|
62
|
+
|
63
|
+
@doc_controls.do_not_generate_docs
|
64
|
+
def to_pb2(self) -> _TextEmbedderGraphOptionsProto:
|
65
|
+
"""Generates an TextEmbedderOptions protobuf object."""
|
66
|
+
base_options_proto = self.base_options.to_pb2()
|
67
|
+
embedder_options_proto = _EmbedderOptionsProto(
|
68
|
+
l2_normalize=self.l2_normalize, quantize=self.quantize)
|
69
|
+
|
70
|
+
return _TextEmbedderGraphOptionsProto(
|
71
|
+
base_options=base_options_proto,
|
72
|
+
embedder_options=embedder_options_proto)
|
73
|
+
|
74
|
+
|
75
|
+
class TextEmbedder(base_text_task_api.BaseTextTaskApi):
|
76
|
+
"""Class that performs embedding extraction on text.
|
77
|
+
|
78
|
+
This API expects a TFLite model with TFLite Model Metadata that contains the
|
79
|
+
mandatory (described below) input tensors and output tensors. Metadata should
|
80
|
+
contain the input process unit for the model's Tokenizer as well as input /
|
81
|
+
output tensor metadata.
|
82
|
+
|
83
|
+
Input tensors:
|
84
|
+
(kTfLiteInt32)
|
85
|
+
- 3 input tensors of size `[batch_size x bert_max_seq_len]` with names
|
86
|
+
"ids", "mask", and "segment_ids" representing the input ids, mask ids, and
|
87
|
+
segment ids respectively.
|
88
|
+
- or 1 input tensor of size `[batch_size x max_seq_len]` representing the
|
89
|
+
input ids.
|
90
|
+
|
91
|
+
At least one output tensor with:
|
92
|
+
(kTfLiteFloat32)
|
93
|
+
- `N` components corresponding to the `N` dimensions of the returned
|
94
|
+
feature vector for this output layer.
|
95
|
+
- Either 2 or 4 dimensions, i.e. `[1 x N]` or `[1 x 1 x 1 x N]`.
|
96
|
+
"""
|
97
|
+
|
98
|
+
@classmethod
|
99
|
+
def create_from_model_path(cls, model_path: str) -> 'TextEmbedder':
|
100
|
+
"""Creates an `TextEmbedder` object from a TensorFlow Lite model and the default `TextEmbedderOptions`.
|
101
|
+
|
102
|
+
Args:
|
103
|
+
model_path: Path to the model.
|
104
|
+
|
105
|
+
Returns:
|
106
|
+
`TextEmbedder` object that's created from the model file and the default
|
107
|
+
`TextEmbedderOptions`.
|
108
|
+
|
109
|
+
Raises:
|
110
|
+
ValueError: If failed to create `TextEmbedder` object from the provided
|
111
|
+
file such as invalid file path.
|
112
|
+
RuntimeError: If other types of error occurred.
|
113
|
+
"""
|
114
|
+
base_options = _BaseOptions(model_asset_path=model_path)
|
115
|
+
options = TextEmbedderOptions(base_options=base_options)
|
116
|
+
return cls.create_from_options(options)
|
117
|
+
|
118
|
+
@classmethod
|
119
|
+
def create_from_options(cls, options: TextEmbedderOptions) -> 'TextEmbedder':
|
120
|
+
"""Creates the `TextEmbedder` object from text embedder options.
|
121
|
+
|
122
|
+
Args:
|
123
|
+
options: Options for the text embedder task.
|
124
|
+
|
125
|
+
Returns:
|
126
|
+
`TextEmbedder` object that's created from `options`.
|
127
|
+
|
128
|
+
Raises:
|
129
|
+
ValueError: If failed to create `TextEmbedder` object from
|
130
|
+
`TextEmbedderOptions` such as missing the model.
|
131
|
+
RuntimeError: If other types of error occurred.
|
132
|
+
"""
|
133
|
+
task_info = _TaskInfo(
|
134
|
+
task_graph=_TASK_GRAPH_NAME,
|
135
|
+
input_streams=[':'.join([_TEXT_TAG, _TEXT_IN_STREAM_NAME])],
|
136
|
+
output_streams=[
|
137
|
+
':'.join([_EMBEDDINGS_TAG, _EMBEDDINGS_OUT_STREAM_NAME])
|
138
|
+
],
|
139
|
+
task_options=options)
|
140
|
+
return cls(task_info.generate_graph_config())
|
141
|
+
|
142
|
+
def embed(
|
143
|
+
self,
|
144
|
+
text: str,
|
145
|
+
) -> TextEmbedderResult:
|
146
|
+
"""Performs text embedding extraction on the provided text.
|
147
|
+
|
148
|
+
Args:
|
149
|
+
text: The input text.
|
150
|
+
|
151
|
+
Returns:
|
152
|
+
An embedding result object that contains a list of embeddings.
|
153
|
+
|
154
|
+
Raises:
|
155
|
+
ValueError: If any of the input arguments is invalid.
|
156
|
+
RuntimeError: If text embedder failed to run.
|
157
|
+
"""
|
158
|
+
output_packets = self._runner.process(
|
159
|
+
{_TEXT_IN_STREAM_NAME: packet_creator.create_string(text)})
|
160
|
+
|
161
|
+
embedding_result_proto = embeddings_pb2.EmbeddingResult()
|
162
|
+
embedding_result_proto.CopyFrom(
|
163
|
+
packet_getter.get_proto(output_packets[_EMBEDDINGS_OUT_STREAM_NAME]))
|
164
|
+
|
165
|
+
return TextEmbedderResult.create_from_pb2(embedding_result_proto)
|
166
|
+
|
167
|
+
@classmethod
|
168
|
+
def cosine_similarity(cls, u: embedding_result_module.Embedding,
|
169
|
+
v: embedding_result_module.Embedding) -> float:
|
170
|
+
"""Utility function to compute cosine similarity between two embedding entries.
|
171
|
+
|
172
|
+
May return an InvalidArgumentError if e.g. the feature vectors are
|
173
|
+
of different types (quantized vs. float), have different sizes, or have a
|
174
|
+
an L2-norm of 0.
|
175
|
+
|
176
|
+
Args:
|
177
|
+
u: An embedding entry.
|
178
|
+
v: An embedding entry.
|
179
|
+
|
180
|
+
Returns:
|
181
|
+
The cosine similarity for the two embeddings.
|
182
|
+
|
183
|
+
Raises:
|
184
|
+
ValueError: May return an error if e.g. the feature vectors are of
|
185
|
+
different types (quantized vs. float), have different sizes, or have
|
186
|
+
an L2-norm of 0.
|
187
|
+
"""
|
188
|
+
return cosine_similarity.cosine_similarity(u, v)
|
@@ -0,0 +1,90 @@
|
|
1
|
+
# Copyright 2022 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""MediaPipe Tasks Vision API."""
|
16
|
+
|
17
|
+
import mediapipe.tasks.python.vision.core
|
18
|
+
import mediapipe.tasks.python.vision.face_aligner
|
19
|
+
import mediapipe.tasks.python.vision.face_detector
|
20
|
+
import mediapipe.tasks.python.vision.face_landmarker
|
21
|
+
import mediapipe.tasks.python.vision.face_stylizer
|
22
|
+
import mediapipe.tasks.python.vision.gesture_recognizer
|
23
|
+
import mediapipe.tasks.python.vision.hand_landmarker
|
24
|
+
import mediapipe.tasks.python.vision.holistic_landmarker
|
25
|
+
import mediapipe.tasks.python.vision.image_classifier
|
26
|
+
import mediapipe.tasks.python.vision.image_embedder
|
27
|
+
import mediapipe.tasks.python.vision.image_segmenter
|
28
|
+
import mediapipe.tasks.python.vision.interactive_segmenter
|
29
|
+
import mediapipe.tasks.python.vision.object_detector
|
30
|
+
import mediapipe.tasks.python.vision.pose_landmarker
|
31
|
+
|
32
|
+
FaceAligner = face_aligner.FaceAligner
|
33
|
+
FaceAlignerOptions = face_aligner.FaceAlignerOptions
|
34
|
+
FaceDetector = face_detector.FaceDetector
|
35
|
+
FaceDetectorOptions = face_detector.FaceDetectorOptions
|
36
|
+
FaceDetectorResult = face_detector.FaceDetectorResult
|
37
|
+
FaceLandmarker = face_landmarker.FaceLandmarker
|
38
|
+
FaceLandmarkerOptions = face_landmarker.FaceLandmarkerOptions
|
39
|
+
FaceLandmarkerResult = face_landmarker.FaceLandmarkerResult
|
40
|
+
FaceLandmarksConnections = face_landmarker.FaceLandmarksConnections
|
41
|
+
FaceStylizer = face_stylizer.FaceStylizer
|
42
|
+
FaceStylizerOptions = face_stylizer.FaceStylizerOptions
|
43
|
+
GestureRecognizer = gesture_recognizer.GestureRecognizer
|
44
|
+
GestureRecognizerOptions = gesture_recognizer.GestureRecognizerOptions
|
45
|
+
GestureRecognizerResult = gesture_recognizer.GestureRecognizerResult
|
46
|
+
HandLandmarker = hand_landmarker.HandLandmarker
|
47
|
+
HandLandmarkerOptions = hand_landmarker.HandLandmarkerOptions
|
48
|
+
HandLandmarkerResult = hand_landmarker.HandLandmarkerResult
|
49
|
+
HandLandmarksConnections = hand_landmarker.HandLandmarksConnections
|
50
|
+
ImageClassifier = image_classifier.ImageClassifier
|
51
|
+
ImageClassifierOptions = image_classifier.ImageClassifierOptions
|
52
|
+
ImageClassifierResult = image_classifier.ImageClassifierResult
|
53
|
+
ImageEmbedder = image_embedder.ImageEmbedder
|
54
|
+
ImageEmbedderOptions = image_embedder.ImageEmbedderOptions
|
55
|
+
ImageEmbedderResult = image_embedder.ImageEmbedderResult
|
56
|
+
ImageSegmenter = image_segmenter.ImageSegmenter
|
57
|
+
ImageSegmenterOptions = image_segmenter.ImageSegmenterOptions
|
58
|
+
ImageProcessingOptions = core.image_processing_options.ImageProcessingOptions
|
59
|
+
InteractiveSegmenter = interactive_segmenter.InteractiveSegmenter
|
60
|
+
InteractiveSegmenterOptions = interactive_segmenter.InteractiveSegmenterOptions
|
61
|
+
InteractiveSegmenterRegionOfInterest = interactive_segmenter.RegionOfInterest
|
62
|
+
ObjectDetector = object_detector.ObjectDetector
|
63
|
+
ObjectDetectorOptions = object_detector.ObjectDetectorOptions
|
64
|
+
ObjectDetectorResult = object_detector.ObjectDetectorResult
|
65
|
+
PoseLandmarker = pose_landmarker.PoseLandmarker
|
66
|
+
PoseLandmarkerOptions = pose_landmarker.PoseLandmarkerOptions
|
67
|
+
PoseLandmarkerResult = pose_landmarker.PoseLandmarkerResult
|
68
|
+
PoseLandmarksConnections = pose_landmarker.PoseLandmarksConnections
|
69
|
+
HolisticLandmarker = holistic_landmarker.HolisticLandmarker
|
70
|
+
HolisticLandmarkerOptions = holistic_landmarker.HolisticLandmarkerOptions
|
71
|
+
HolisticLandmarkerResult = holistic_landmarker.HolisticLandmarkerResult
|
72
|
+
|
73
|
+
RunningMode = core.vision_task_running_mode.VisionTaskRunningMode
|
74
|
+
|
75
|
+
# Remove unnecessary modules to avoid duplication in API docs.
|
76
|
+
del core
|
77
|
+
del face_aligner
|
78
|
+
del face_detector
|
79
|
+
del face_landmarker
|
80
|
+
del face_stylizer
|
81
|
+
del gesture_recognizer
|
82
|
+
del hand_landmarker
|
83
|
+
del holistic_landmarker
|
84
|
+
del image_classifier
|
85
|
+
del image_embedder
|
86
|
+
del image_segmenter
|
87
|
+
del interactive_segmenter
|
88
|
+
del object_detector
|
89
|
+
del pose_landmarker
|
90
|
+
del mediapipe
|