mediapipe-nightly 0.10.21.post20241223__cp312-cp312-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mediapipe/__init__.py +26 -0
- mediapipe/calculators/__init__.py +0 -0
- mediapipe/calculators/audio/__init__.py +0 -0
- mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py +33 -0
- mediapipe/calculators/audio/rational_factor_resample_calculator_pb2.py +33 -0
- mediapipe/calculators/audio/spectrogram_calculator_pb2.py +37 -0
- mediapipe/calculators/audio/stabilized_log_calculator_pb2.py +31 -0
- mediapipe/calculators/audio/time_series_framer_calculator_pb2.py +33 -0
- mediapipe/calculators/core/__init__.py +0 -0
- mediapipe/calculators/core/bypass_calculator_pb2.py +31 -0
- mediapipe/calculators/core/clip_vector_size_calculator_pb2.py +31 -0
- mediapipe/calculators/core/concatenate_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/constant_side_packet_calculator_pb2.py +39 -0
- mediapipe/calculators/core/dequantize_byte_array_calculator_pb2.py +31 -0
- mediapipe/calculators/core/flow_limiter_calculator_pb2.py +32 -0
- mediapipe/calculators/core/gate_calculator_pb2.py +33 -0
- mediapipe/calculators/core/get_vector_item_calculator_pb2.py +31 -0
- mediapipe/calculators/core/graph_profile_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_cloner_calculator_pb2.py +31 -0
- mediapipe/calculators/core/packet_resampler_calculator_pb2.py +33 -0
- mediapipe/calculators/core/packet_thinner_calculator_pb2.py +33 -0
- mediapipe/calculators/core/quantize_float_vector_calculator_pb2.py +31 -0
- mediapipe/calculators/core/sequence_shift_calculator_pb2.py +31 -0
- mediapipe/calculators/core/split_vector_calculator_pb2.py +33 -0
- mediapipe/calculators/image/__init__.py +0 -0
- mediapipe/calculators/image/bilateral_filter_calculator_pb2.py +31 -0
- mediapipe/calculators/image/feature_detector_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_clone_calculator_pb2.py +31 -0
- mediapipe/calculators/image/image_cropping_calculator_pb2.py +33 -0
- mediapipe/calculators/image/image_transformation_calculator_pb2.py +38 -0
- mediapipe/calculators/image/mask_overlay_calculator_pb2.py +33 -0
- mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator_pb2.py +31 -0
- mediapipe/calculators/image/opencv_image_encoder_calculator_pb2.py +35 -0
- mediapipe/calculators/image/recolor_calculator_pb2.py +34 -0
- mediapipe/calculators/image/rotation_mode_pb2.py +29 -0
- mediapipe/calculators/image/scale_image_calculator_pb2.py +34 -0
- mediapipe/calculators/image/segmentation_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/image/set_alpha_calculator_pb2.py +31 -0
- mediapipe/calculators/image/warp_affine_calculator_pb2.py +36 -0
- mediapipe/calculators/internal/__init__.py +0 -0
- mediapipe/calculators/internal/callback_packet_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/__init__.py +0 -0
- mediapipe/calculators/tensor/audio_to_tensor_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/bert_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/feedback_tensors_calculator_pb2.py +37 -0
- mediapipe/calculators/tensor/image_to_tensor_calculator_pb2.py +40 -0
- mediapipe/calculators/tensor/inference_calculator_pb2.py +63 -0
- mediapipe/calculators/tensor/landmarks_to_tensor_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/regex_preprocessor_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensor_converter_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/tensor_to_joints_calculator_pb2.py +31 -0
- mediapipe/calculators/tensor/tensors_readback_calculator_pb2.py +35 -0
- mediapipe/calculators/tensor/tensors_to_audio_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_classification_calculator_pb2.py +44 -0
- mediapipe/calculators/tensor/tensors_to_detections_calculator_pb2.py +39 -0
- mediapipe/calculators/tensor/tensors_to_floats_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tensor/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/calculators/tensor/vector_to_tensor_calculator_pb2.py +27 -0
- mediapipe/calculators/tflite/__init__.py +0 -0
- mediapipe/calculators/tflite/ssd_anchors_calculator_pb2.py +32 -0
- mediapipe/calculators/tflite/tflite_converter_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_inference_calculator_pb2.py +49 -0
- mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator_pb2.py +33 -0
- mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/__init__.py +0 -0
- mediapipe/calculators/util/align_hand_to_pose_in_world_calculator_pb2.py +31 -0
- mediapipe/calculators/util/annotation_overlay_calculator_pb2.py +32 -0
- mediapipe/calculators/util/association_calculator_pb2.py +31 -0
- mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py +31 -0
- mediapipe/calculators/util/combine_joints_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detection_label_id_to_text_calculator_pb2.py +36 -0
- mediapipe/calculators/util/detections_to_rects_calculator_pb2.py +33 -0
- mediapipe/calculators/util/detections_to_render_data_calculator_pb2.py +33 -0
- mediapipe/calculators/util/face_to_rect_calculator_pb2.py +26 -0
- mediapipe/calculators/util/filter_detections_calculator_pb2.py +31 -0
- mediapipe/calculators/util/flat_color_image_calculator_pb2.py +32 -0
- mediapipe/calculators/util/labels_to_render_data_calculator_pb2.py +34 -0
- mediapipe/calculators/util/landmark_projection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_refinement_calculator_pb2.py +41 -0
- mediapipe/calculators/util/landmarks_smoothing_calculator_pb2.py +33 -0
- mediapipe/calculators/util/landmarks_to_detection_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_floats_calculator_pb2.py +31 -0
- mediapipe/calculators/util/landmarks_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/landmarks_transformation_calculator_pb2.py +37 -0
- mediapipe/calculators/util/latency_pb2.py +26 -0
- mediapipe/calculators/util/local_file_contents_calculator_pb2.py +31 -0
- mediapipe/calculators/util/logic_calculator_pb2.py +34 -0
- mediapipe/calculators/util/non_max_suppression_calculator_pb2.py +35 -0
- mediapipe/calculators/util/packet_frequency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/packet_frequency_pb2.py +26 -0
- mediapipe/calculators/util/packet_latency_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/rect_to_render_scale_calculator_pb2.py +31 -0
- mediapipe/calculators/util/rect_transformation_calculator_pb2.py +31 -0
- mediapipe/calculators/util/refine_landmarks_from_heatmap_calculator_pb2.py +31 -0
- mediapipe/calculators/util/resource_provider_calculator_pb2.py +28 -0
- mediapipe/calculators/util/set_joints_visibility_calculator_pb2.py +41 -0
- mediapipe/calculators/util/thresholding_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_id_to_label_calculator_pb2.py +31 -0
- mediapipe/calculators/util/timed_box_list_to_render_data_calculator_pb2.py +32 -0
- mediapipe/calculators/util/top_k_scores_calculator_pb2.py +31 -0
- mediapipe/calculators/util/visibility_copy_calculator_pb2.py +27 -0
- mediapipe/calculators/util/visibility_smoothing_calculator_pb2.py +31 -0
- mediapipe/calculators/video/__init__.py +0 -0
- mediapipe/calculators/video/box_detector_calculator_pb2.py +32 -0
- mediapipe/calculators/video/box_tracker_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_packager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/flow_to_image_calculator_pb2.py +31 -0
- mediapipe/calculators/video/motion_analysis_calculator_pb2.py +42 -0
- mediapipe/calculators/video/opencv_video_encoder_calculator_pb2.py +31 -0
- mediapipe/calculators/video/tool/__init__.py +0 -0
- mediapipe/calculators/video/tool/flow_quantizer_model_pb2.py +26 -0
- mediapipe/calculators/video/tracked_detection_manager_calculator_pb2.py +32 -0
- mediapipe/calculators/video/video_pre_stream_calculator_pb2.py +35 -0
- mediapipe/examples/__init__.py +14 -0
- mediapipe/examples/desktop/__init__.py +14 -0
- mediapipe/framework/__init__.py +0 -0
- mediapipe/framework/calculator_options_pb2.py +29 -0
- mediapipe/framework/calculator_pb2.py +59 -0
- mediapipe/framework/calculator_profile_pb2.py +48 -0
- mediapipe/framework/deps/__init__.py +0 -0
- mediapipe/framework/deps/proto_descriptor_pb2.py +29 -0
- mediapipe/framework/formats/__init__.py +0 -0
- mediapipe/framework/formats/affine_transform_data_pb2.py +28 -0
- mediapipe/framework/formats/annotation/__init__.py +0 -0
- mediapipe/framework/formats/annotation/locus_pb2.py +32 -0
- mediapipe/framework/formats/annotation/rasterization_pb2.py +29 -0
- mediapipe/framework/formats/body_rig_pb2.py +28 -0
- mediapipe/framework/formats/classification_pb2.py +31 -0
- mediapipe/framework/formats/detection_pb2.py +36 -0
- mediapipe/framework/formats/image_file_properties_pb2.py +26 -0
- mediapipe/framework/formats/image_format_pb2.py +29 -0
- mediapipe/framework/formats/landmark_pb2.py +37 -0
- mediapipe/framework/formats/location_data_pb2.py +38 -0
- mediapipe/framework/formats/matrix_data_pb2.py +31 -0
- mediapipe/framework/formats/motion/__init__.py +0 -0
- mediapipe/framework/formats/motion/optical_flow_field_data_pb2.py +30 -0
- mediapipe/framework/formats/object_detection/__init__.py +0 -0
- mediapipe/framework/formats/object_detection/anchor_pb2.py +26 -0
- mediapipe/framework/formats/rect_pb2.py +29 -0
- mediapipe/framework/formats/time_series_header_pb2.py +28 -0
- mediapipe/framework/graph_runtime_info_pb2.py +31 -0
- mediapipe/framework/mediapipe_options_pb2.py +27 -0
- mediapipe/framework/packet_factory_pb2.py +31 -0
- mediapipe/framework/packet_generator_pb2.py +33 -0
- mediapipe/framework/status_handler_pb2.py +28 -0
- mediapipe/framework/stream_handler/__init__.py +0 -0
- mediapipe/framework/stream_handler/default_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/fixed_size_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler/sync_set_input_stream_handler_pb2.py +29 -0
- mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_pb2.py +27 -0
- mediapipe/framework/stream_handler_pb2.py +30 -0
- mediapipe/framework/test_calculators_pb2.py +31 -0
- mediapipe/framework/thread_pool_executor_pb2.py +29 -0
- mediapipe/framework/tool/__init__.py +0 -0
- mediapipe/framework/tool/calculator_graph_template_pb2.py +44 -0
- mediapipe/framework/tool/field_data_pb2.py +28 -0
- mediapipe/framework/tool/node_chain_subgraph_pb2.py +31 -0
- mediapipe/framework/tool/packet_generator_wrapper_calculator_pb2.py +28 -0
- mediapipe/framework/tool/source_pb2.py +33 -0
- mediapipe/framework/tool/switch_container_pb2.py +32 -0
- mediapipe/gpu/__init__.py +0 -0
- mediapipe/gpu/copy_calculator_pb2.py +33 -0
- mediapipe/gpu/gl_animation_overlay_calculator_pb2.py +31 -0
- mediapipe/gpu/gl_context_options_pb2.py +31 -0
- mediapipe/gpu/gl_scaler_calculator_pb2.py +32 -0
- mediapipe/gpu/gl_surface_sink_calculator_pb2.py +32 -0
- mediapipe/gpu/gpu_origin_pb2.py +29 -0
- mediapipe/gpu/scale_mode_pb2.py +28 -0
- mediapipe/model_maker/__init__.py +27 -0
- mediapipe/model_maker/setup.py +107 -0
- mediapipe/modules/__init__.py +0 -0
- mediapipe/modules/face_detection/__init__.py +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_cpu.binarypb +0 -0
- mediapipe/modules/face_detection/face_detection_full_range_sparse.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_pb2.py +30 -0
- mediapipe/modules/face_detection/face_detection_short_range.tflite +0 -0
- mediapipe/modules/face_detection/face_detection_short_range_cpu.binarypb +0 -0
- mediapipe/modules/face_geometry/__init__.py +0 -0
- mediapipe/modules/face_geometry/data/__init__.py +0 -0
- mediapipe/modules/face_geometry/effect_renderer_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/env_generator_calculator_pb2.py +28 -0
- mediapipe/modules/face_geometry/geometry_pipeline_calculator_pb2.py +27 -0
- mediapipe/modules/face_geometry/libs/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/__init__.py +0 -0
- mediapipe/modules/face_geometry/protos/environment_pb2.py +31 -0
- mediapipe/modules/face_geometry/protos/face_geometry_pb2.py +29 -0
- mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/modules/face_geometry/protos/mesh_3d_pb2.py +31 -0
- mediapipe/modules/face_landmark/__init__.py +0 -0
- mediapipe/modules/face_landmark/face_landmark.tflite +0 -0
- mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb +0 -0
- mediapipe/modules/face_landmark/face_landmark_with_attention.tflite +0 -0
- mediapipe/modules/hand_landmark/__init__.py +0 -0
- mediapipe/modules/hand_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_full.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_lite.tflite +0 -0
- mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb +0 -0
- mediapipe/modules/hand_landmark/handedness.txt +2 -0
- mediapipe/modules/holistic_landmark/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/__init__.py +0 -0
- mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator_pb2.py +37 -0
- mediapipe/modules/holistic_landmark/hand_recrop.tflite +0 -0
- mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb +0 -0
- mediapipe/modules/iris_landmark/__init__.py +0 -0
- mediapipe/modules/iris_landmark/iris_landmark.tflite +0 -0
- mediapipe/modules/objectron/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/__init__.py +0 -0
- mediapipe/modules/objectron/calculators/a_r_capture_metadata_pb2.py +102 -0
- mediapipe/modules/objectron/calculators/annotation_data_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/belief_decoder_config_pb2.py +28 -0
- mediapipe/modules/objectron/calculators/camera_parameters_pb2.py +30 -0
- mediapipe/modules/objectron/calculators/filter_detection_calculator_pb2.py +35 -0
- mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator_pb2.py +31 -0
- mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/object_pb2.py +38 -0
- mediapipe/modules/objectron/calculators/tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator_pb2.py +32 -0
- mediapipe/modules/objectron/object_detection_oidv4_labelmap.txt +24 -0
- mediapipe/modules/objectron/objectron_cpu.binarypb +0 -0
- mediapipe/modules/palm_detection/__init__.py +0 -0
- mediapipe/modules/palm_detection/palm_detection_full.tflite +0 -0
- mediapipe/modules/palm_detection/palm_detection_lite.tflite +0 -0
- mediapipe/modules/pose_detection/__init__.py +0 -0
- mediapipe/modules/pose_detection/pose_detection.tflite +0 -0
- mediapipe/modules/pose_landmark/__init__.py +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb +0 -0
- mediapipe/modules/pose_landmark/pose_landmark_full.tflite +0 -0
- mediapipe/modules/selfie_segmentation/__init__.py +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation.tflite +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_cpu.binarypb +0 -0
- mediapipe/modules/selfie_segmentation/selfie_segmentation_landscape.tflite +0 -0
- mediapipe/python/__init__.py +29 -0
- mediapipe/python/_framework_bindings.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/python/calculator_graph_test.py +251 -0
- mediapipe/python/image_frame_test.py +194 -0
- mediapipe/python/image_test.py +218 -0
- mediapipe/python/packet_creator.py +275 -0
- mediapipe/python/packet_getter.py +120 -0
- mediapipe/python/packet_test.py +533 -0
- mediapipe/python/solution_base.py +604 -0
- mediapipe/python/solution_base_test.py +396 -0
- mediapipe/python/solutions/__init__.py +27 -0
- mediapipe/python/solutions/download_utils.py +37 -0
- mediapipe/python/solutions/drawing_styles.py +249 -0
- mediapipe/python/solutions/drawing_utils.py +320 -0
- mediapipe/python/solutions/drawing_utils_test.py +258 -0
- mediapipe/python/solutions/face_detection.py +105 -0
- mediapipe/python/solutions/face_detection_test.py +92 -0
- mediapipe/python/solutions/face_mesh.py +125 -0
- mediapipe/python/solutions/face_mesh_connections.py +500 -0
- mediapipe/python/solutions/face_mesh_test.py +170 -0
- mediapipe/python/solutions/hands.py +153 -0
- mediapipe/python/solutions/hands_connections.py +32 -0
- mediapipe/python/solutions/hands_test.py +219 -0
- mediapipe/python/solutions/holistic.py +167 -0
- mediapipe/python/solutions/holistic_test.py +142 -0
- mediapipe/python/solutions/objectron.py +288 -0
- mediapipe/python/solutions/objectron_test.py +81 -0
- mediapipe/python/solutions/pose.py +192 -0
- mediapipe/python/solutions/pose_connections.py +22 -0
- mediapipe/python/solutions/pose_test.py +262 -0
- mediapipe/python/solutions/selfie_segmentation.py +76 -0
- mediapipe/python/solutions/selfie_segmentation_test.py +68 -0
- mediapipe/python/timestamp_test.py +78 -0
- mediapipe/tasks/__init__.py +14 -0
- mediapipe/tasks/cc/__init__.py +0 -0
- mediapipe/tasks/cc/audio/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_classifier/proto/audio_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/audio_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/audio/audio_embedder/proto/audio_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/audio/core/__init__.py +0 -0
- mediapipe/tasks/cc/audio/utils/__init__.py +0 -0
- mediapipe/tasks/cc/components/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/components/calculators/classification_aggregation_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/components/calculators/score_calibration_calculator_pb2.py +35 -0
- mediapipe/tasks/cc/components/calculators/tensors_to_embeddings_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/components/containers/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/containers/proto/classifications_pb2.py +30 -0
- mediapipe/tasks/cc/components/containers/proto/embeddings_pb2.py +35 -0
- mediapipe/tasks/cc/components/containers/proto/landmarks_detection_result_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/__init__.py +0 -0
- mediapipe/tasks/cc/components/processors/proto/classification_postprocessing_graph_options_pb2.py +38 -0
- mediapipe/tasks/cc/components/processors/proto/classifier_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/detection_postprocessing_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/components/processors/proto/detector_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedder_options_pb2.py +27 -0
- mediapipe/tasks/cc/components/processors/proto/embedding_postprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/processors/proto/image_preprocessing_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/components/processors/proto/text_model_type_pb2.py +28 -0
- mediapipe/tasks/cc/components/processors/proto/text_preprocessing_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/components/utils/__init__.py +0 -0
- mediapipe/tasks/cc/core/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/__init__.py +0 -0
- mediapipe/tasks/cc/core/proto/acceleration_pb2.py +28 -0
- mediapipe/tasks/cc/core/proto/base_options_pb2.py +30 -0
- mediapipe/tasks/cc/core/proto/external_file_pb2.py +31 -0
- mediapipe/tasks/cc/core/proto/inference_subgraph_pb2.py +32 -0
- mediapipe/tasks/cc/core/proto/model_resources_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/c/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/calculators/model_data_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/common/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_params_pb2.py +33 -0
- mediapipe/tasks/cc/genai/inference/proto/prompt_template_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/proto/sampler_params_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/proto/transformer_params_pb2.py +45 -0
- mediapipe/tasks/cc/genai/inference/utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/llm_utils/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/utils/xnn_utils/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/python/_pywrap_metadata_version.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/cc/metadata/tests/__init__.py +0 -0
- mediapipe/tasks/cc/metadata/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/ragged/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/__init__.py +0 -0
- mediapipe/tasks/cc/text/custom_ops/sentencepiece/testdata/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/hash/__init__.py +0 -0
- mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_classifier/proto/text_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/text_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/text/text_embedder/proto/text_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/text/tokenizers/__init__.py +0 -0
- mediapipe/tasks/cc/text/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/__init__.py +0 -0
- mediapipe/tasks/cc/vision/core/__init__.py +0 -0
- mediapipe/tasks/cc/vision/custom_ops/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_geometry/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/env_generator_calculator_pb2.py +28 -0
- mediapipe/tasks/cc/vision/face_geometry/calculators/geometry_pipeline_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/data/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/libs/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/environment_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_graph_options_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_pb2.py +29 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/geometry_pipeline_metadata_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_geometry/proto/mesh_3d_pb2.py +31 -0
- mediapipe/tasks/cc/vision/face_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options_pb2.py +37 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/face_landmarker/proto/tensors_to_face_landmarks_graph_options_pb2.py +32 -0
- mediapipe/tasks/cc/vision/face_stylizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/calculators/tensors_to_image_calculator_pb2.py +36 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/face_stylizer/proto/face_stylizer_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/combined_prediction_calculator_pb2.py +33 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_embedder_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/gesture_recognizer/proto/hand_gesture_recognizer_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_result_pb2.py +30 -0
- mediapipe/tasks/cc/vision/hand_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_roi_refinement_graph_options_pb2.py +28 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result_pb2.py +29 -0
- mediapipe/tasks/cc/vision/image_classifier/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_classifier/proto/image_classifier_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_embedder/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_embedder/proto/image_embedder_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_generator/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/diffuser/stable_diffusion_iterate_calculator_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_generator/proto/conditioned_image_graph_options_pb2.py +40 -0
- mediapipe/tasks/cc/vision/image_generator/proto/control_plugin_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_generator/proto/image_generator_graph_options_pb2.py +30 -0
- mediapipe/tasks/cc/vision/image_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/calculators/tensors_to_segmentation_calculator_pb2.py +34 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/image_segmenter_graph_options_pb2.py +35 -0
- mediapipe/tasks/cc/vision/image_segmenter/proto/segmenter_options_pb2.py +33 -0
- mediapipe/tasks/cc/vision/interactive_segmenter/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/object_detector/proto/object_detector_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_detector/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_detector/proto/pose_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/pose_landmarker/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/__init__.py +0 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarker_graph_options_pb2.py +36 -0
- mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarks_detector_graph_options_pb2.py +34 -0
- mediapipe/tasks/cc/vision/utils/__init__.py +0 -0
- mediapipe/tasks/cc/vision/utils/ghum/__init__.py +0 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema.fbs +59 -0
- mediapipe/tasks/metadata/image_segmenter_metadata_schema_py_generated.py +108 -0
- mediapipe/tasks/metadata/metadata_schema.fbs +732 -0
- mediapipe/tasks/metadata/metadata_schema_py_generated.py +3251 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema.fbs +98 -0
- mediapipe/tasks/metadata/object_detector_metadata_schema_py_generated.py +674 -0
- mediapipe/tasks/metadata/schema_py_generated.py +18438 -0
- mediapipe/tasks/python/__init__.py +27 -0
- mediapipe/tasks/python/audio/__init__.py +33 -0
- mediapipe/tasks/python/audio/audio_classifier.py +324 -0
- mediapipe/tasks/python/audio/audio_embedder.py +285 -0
- mediapipe/tasks/python/audio/core/__init__.py +16 -0
- mediapipe/tasks/python/audio/core/audio_record.py +125 -0
- mediapipe/tasks/python/audio/core/audio_task_running_mode.py +29 -0
- mediapipe/tasks/python/audio/core/base_audio_task_api.py +181 -0
- mediapipe/tasks/python/benchmark/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/benchmark_utils.py +70 -0
- mediapipe/tasks/python/benchmark/vision/__init__.py +13 -0
- mediapipe/tasks/python/benchmark/vision/benchmark.py +99 -0
- mediapipe/tasks/python/benchmark/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py +40 -0
- mediapipe/tasks/python/components/__init__.py +13 -0
- mediapipe/tasks/python/components/containers/__init__.py +53 -0
- mediapipe/tasks/python/components/containers/audio_data.py +137 -0
- mediapipe/tasks/python/components/containers/bounding_box.py +73 -0
- mediapipe/tasks/python/components/containers/category.py +78 -0
- mediapipe/tasks/python/components/containers/classification_result.py +111 -0
- mediapipe/tasks/python/components/containers/detections.py +181 -0
- mediapipe/tasks/python/components/containers/embedding_result.py +89 -0
- mediapipe/tasks/python/components/containers/keypoint.py +77 -0
- mediapipe/tasks/python/components/containers/landmark.py +122 -0
- mediapipe/tasks/python/components/containers/landmark_detection_result.py +106 -0
- mediapipe/tasks/python/components/containers/rect.py +109 -0
- mediapipe/tasks/python/components/processors/__init__.py +23 -0
- mediapipe/tasks/python/components/processors/classifier_options.py +86 -0
- mediapipe/tasks/python/components/utils/__init__.py +13 -0
- mediapipe/tasks/python/components/utils/cosine_similarity.py +68 -0
- mediapipe/tasks/python/core/__init__.py +13 -0
- mediapipe/tasks/python/core/base_options.py +121 -0
- mediapipe/tasks/python/core/optional_dependencies.py +25 -0
- mediapipe/tasks/python/core/task_info.py +139 -0
- mediapipe/tasks/python/genai/__init__.py +14 -0
- mediapipe/tasks/python/genai/bundler/__init__.py +23 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler.py +130 -0
- mediapipe/tasks/python/genai/bundler/llm_bundler_test.py +168 -0
- mediapipe/tasks/python/genai/converter/__init__.py +24 -0
- mediapipe/tasks/python/genai/converter/converter_base.py +179 -0
- mediapipe/tasks/python/genai/converter/converter_factory.py +79 -0
- mediapipe/tasks/python/genai/converter/llm_converter.py +374 -0
- mediapipe/tasks/python/genai/converter/llm_converter_test.py +63 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter.py +318 -0
- mediapipe/tasks/python/genai/converter/pytorch_converter_test.py +86 -0
- mediapipe/tasks/python/genai/converter/quantization_util.py +516 -0
- mediapipe/tasks/python/genai/converter/quantization_util_test.py +259 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter.py +580 -0
- mediapipe/tasks/python/genai/converter/safetensors_converter_test.py +83 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer.py +120 -0
- mediapipe/tasks/python/genai/converter/weight_bins_writer_test.py +95 -0
- mediapipe/tasks/python/metadata/__init__.py +13 -0
- mediapipe/tasks/python/metadata/flatbuffers_lib/_pywrap_flatbuffers.cpython-312-x86_64-linux-gnu.so +0 -0
- mediapipe/tasks/python/metadata/metadata.py +928 -0
- mediapipe/tasks/python/metadata/metadata_displayer_cli.py +34 -0
- mediapipe/tasks/python/metadata/metadata_writers/__init__.py +13 -0
- mediapipe/tasks/python/metadata/metadata_writers/face_stylizer.py +138 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_classifier.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py +170 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py +1166 -0
- mediapipe/tasks/python/metadata/metadata_writers/metadata_writer.py +845 -0
- mediapipe/tasks/python/metadata/metadata_writers/model_asset_bundle_utils.py +71 -0
- mediapipe/tasks/python/metadata/metadata_writers/object_detector.py +331 -0
- mediapipe/tasks/python/metadata/metadata_writers/text_classifier.py +119 -0
- mediapipe/tasks/python/metadata/metadata_writers/writer_utils.py +91 -0
- mediapipe/tasks/python/test/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/__init__.py +13 -0
- mediapipe/tasks/python/test/audio/audio_classifier_test.py +387 -0
- mediapipe/tasks/python/test/audio/audio_embedder_test.py +297 -0
- mediapipe/tasks/python/test/test_utils.py +196 -0
- mediapipe/tasks/python/test/text/__init__.py +13 -0
- mediapipe/tasks/python/test/text/language_detector_test.py +228 -0
- mediapipe/tasks/python/test/text/text_classifier_test.py +235 -0
- mediapipe/tasks/python/test/text/text_embedder_test.py +326 -0
- mediapipe/tasks/python/test/vision/__init__.py +13 -0
- mediapipe/tasks/python/test/vision/face_aligner_test.py +190 -0
- mediapipe/tasks/python/test/vision/face_detector_test.py +523 -0
- mediapipe/tasks/python/test/vision/face_landmarker_test.py +565 -0
- mediapipe/tasks/python/test/vision/face_stylizer_test.py +191 -0
- mediapipe/tasks/python/test/vision/hand_landmarker_test.py +437 -0
- mediapipe/tasks/python/test/vision/holistic_landmarker_test.py +544 -0
- mediapipe/tasks/python/test/vision/image_classifier_test.py +657 -0
- mediapipe/tasks/python/test/vision/image_embedder_test.py +423 -0
- mediapipe/tasks/python/test/vision/image_segmenter_test.py +512 -0
- mediapipe/tasks/python/test/vision/interactive_segmenter_test.py +341 -0
- mediapipe/tasks/python/test/vision/object_detector_test.py +493 -0
- mediapipe/tasks/python/test/vision/pose_landmarker_test.py +518 -0
- mediapipe/tasks/python/text/__init__.py +35 -0
- mediapipe/tasks/python/text/core/__init__.py +16 -0
- mediapipe/tasks/python/text/core/base_text_task_api.py +54 -0
- mediapipe/tasks/python/text/language_detector.py +220 -0
- mediapipe/tasks/python/text/text_classifier.py +187 -0
- mediapipe/tasks/python/text/text_embedder.py +188 -0
- mediapipe/tasks/python/vision/__init__.py +90 -0
- mediapipe/tasks/python/vision/core/__init__.py +14 -0
- mediapipe/tasks/python/vision/core/base_vision_task_api.py +226 -0
- mediapipe/tasks/python/vision/core/image_processing_options.py +39 -0
- mediapipe/tasks/python/vision/core/vision_task_running_mode.py +31 -0
- mediapipe/tasks/python/vision/face_aligner.py +158 -0
- mediapipe/tasks/python/vision/face_detector.py +332 -0
- mediapipe/tasks/python/vision/face_landmarker.py +3244 -0
- mediapipe/tasks/python/vision/face_stylizer.py +158 -0
- mediapipe/tasks/python/vision/gesture_recognizer.py +480 -0
- mediapipe/tasks/python/vision/hand_landmarker.py +504 -0
- mediapipe/tasks/python/vision/holistic_landmarker.py +576 -0
- mediapipe/tasks/python/vision/image_classifier.py +358 -0
- mediapipe/tasks/python/vision/image_embedder.py +362 -0
- mediapipe/tasks/python/vision/image_segmenter.py +433 -0
- mediapipe/tasks/python/vision/interactive_segmenter.py +285 -0
- mediapipe/tasks/python/vision/object_detector.py +389 -0
- mediapipe/tasks/python/vision/pose_landmarker.py +455 -0
- mediapipe/util/__init__.py +0 -0
- mediapipe/util/analytics/__init__.py +0 -0
- mediapipe/util/analytics/mediapipe_log_extension_pb2.py +44 -0
- mediapipe/util/analytics/mediapipe_logging_enums_pb2.py +37 -0
- mediapipe/util/audio_decoder_pb2.py +33 -0
- mediapipe/util/color_pb2.py +33 -0
- mediapipe/util/label_map_pb2.py +27 -0
- mediapipe/util/render_data_pb2.py +58 -0
- mediapipe/util/sequence/__init__.py +14 -0
- mediapipe/util/sequence/media_sequence.py +716 -0
- mediapipe/util/sequence/media_sequence_test.py +290 -0
- mediapipe/util/sequence/media_sequence_util.py +800 -0
- mediapipe/util/sequence/media_sequence_util_test.py +389 -0
- mediapipe/util/tracking/__init__.py +0 -0
- mediapipe/util/tracking/box_detector_pb2.py +39 -0
- mediapipe/util/tracking/box_tracker_pb2.py +32 -0
- mediapipe/util/tracking/camera_motion_pb2.py +31 -0
- mediapipe/util/tracking/flow_packager_pb2.py +60 -0
- mediapipe/util/tracking/frame_selection_pb2.py +35 -0
- mediapipe/util/tracking/frame_selection_solution_evaluator_pb2.py +28 -0
- mediapipe/util/tracking/motion_analysis_pb2.py +35 -0
- mediapipe/util/tracking/motion_estimation_pb2.py +66 -0
- mediapipe/util/tracking/motion_models_pb2.py +42 -0
- mediapipe/util/tracking/motion_saliency_pb2.py +26 -0
- mediapipe/util/tracking/push_pull_filtering_pb2.py +26 -0
- mediapipe/util/tracking/region_flow_computation_pb2.py +59 -0
- mediapipe/util/tracking/region_flow_pb2.py +49 -0
- mediapipe/util/tracking/tone_estimation_pb2.py +45 -0
- mediapipe/util/tracking/tone_models_pb2.py +32 -0
- mediapipe/util/tracking/tracked_detection_manager_config_pb2.py +26 -0
- mediapipe/util/tracking/tracking_pb2.py +73 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/LICENSE +218 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/METADATA +199 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/RECORD +593 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/WHEEL +5 -0
- mediapipe_nightly-0.10.21.post20241223.dist-info/top_level.txt +4 -0
- mediapipe_nightly.libs/libEGL-48f73270.so.1.1.0 +0 -0
- mediapipe_nightly.libs/libGLESv2-ed5eda4f.so.2.1.0 +0 -0
- mediapipe_nightly.libs/libGLdispatch-64b28464.so.0.0.0 +0 -0
@@ -0,0 +1,179 @@
|
|
1
|
+
# Copyright 2024 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""Defines a couple base classes for the conversion/quantization process."""
|
16
|
+
|
17
|
+
from typing import Iterator
|
18
|
+
import os
|
19
|
+
from typing import Dict, List, Optional, Tuple
|
20
|
+
import numpy as np
|
21
|
+
|
22
|
+
|
23
|
+
class QuantizationAction:
|
24
|
+
"""Container of the tensor values and its corresponding quantization settings.
|
25
|
+
|
26
|
+
The container is responsible for hosting all of the information that is
|
27
|
+
required to execute the weight-only quantization.
|
28
|
+
|
29
|
+
Attributes:
|
30
|
+
tensor_name: A string that represents the input tensor name.
|
31
|
+
tensor_value: A numpy array that contains the unquantized tensor values.
|
32
|
+
target_name: A string that represents the updated tensor name.
|
33
|
+
quantize_axis: A list of integers representing the dimensions to be
|
34
|
+
quantized along. For example, if an input tensor has shape [128, 256] and
|
35
|
+
the quantize_axis==[0], it means the quantization happens along the 0-th
|
36
|
+
dimension, resulting in [256] scaling factors.
|
37
|
+
quantize_bits: An integer that specifies the target quantization bits. It
|
38
|
+
currently only supports either 8 or 4 bits.
|
39
|
+
pack_dim: An integer specifying which dimension to pack the quantized bits.
|
40
|
+
This is only applicable when the quantize_bits == 4.
|
41
|
+
"""
|
42
|
+
|
43
|
+
def __init__(
|
44
|
+
self,
|
45
|
+
tensor_name: str,
|
46
|
+
tensor_value: Optional[np.ndarray] = None,
|
47
|
+
target_name: Optional[str] = None,
|
48
|
+
quantize_axis: Optional[List[int]] = None,
|
49
|
+
quantize_bits: Optional[int] = None,
|
50
|
+
pack_dim: Optional[int] = 0,
|
51
|
+
):
|
52
|
+
"""Initializes the model attributes."""
|
53
|
+
self.tensor_name = tensor_name
|
54
|
+
self.tensor_value = tensor_value
|
55
|
+
self.target_name = target_name
|
56
|
+
self.quantize_axis = quantize_axis
|
57
|
+
self.quantize_bits = quantize_bits
|
58
|
+
self.pack_dim = pack_dim
|
59
|
+
|
60
|
+
def __str__(self) -> str:
|
61
|
+
output_string = "QuantizationAction(\n"
|
62
|
+
output_string += f" tensor_name: {self.tensor_name}\n"
|
63
|
+
output_string += f" target_name: {self.target_name}\n"
|
64
|
+
output_string += f" quantize_axis: {self.quantize_axis}\n"
|
65
|
+
output_string += f" quantize_bits: {self.quantize_bits}\n"
|
66
|
+
output_string += f" pack_dim: {self.pack_dim}\n"
|
67
|
+
if self.tensor_value is not None:
|
68
|
+
output_string += f" tensor_value: {self.tensor_value.shape}\n"
|
69
|
+
output_string += ")\n"
|
70
|
+
return output_string
|
71
|
+
|
72
|
+
|
73
|
+
class CkptLoaderBase:
|
74
|
+
"""Base class for loading the checkpoint.
|
75
|
+
|
76
|
+
This class is responsible for loading the checkpoint files into the layer
|
77
|
+
weight tensors (as numpy arrays) + quantization setting information (8/4
|
78
|
+
bits). The returned data should be a list of QuantizationAction that describes
|
79
|
+
how to quantize each layer weights.
|
80
|
+
"""
|
81
|
+
|
82
|
+
def __init__(
|
83
|
+
self,
|
84
|
+
ckpt_path: str,
|
85
|
+
is_symmetric: bool,
|
86
|
+
attention_quant_bits: int,
|
87
|
+
feedforward_quant_bits: int,
|
88
|
+
embedding_quant_bits: int,
|
89
|
+
):
|
90
|
+
"""Initializes the loader.
|
91
|
+
|
92
|
+
Args:
|
93
|
+
ckpt_path: The filepath to the checkpoint.
|
94
|
+
is_symmetric: Whether to apply symmetric or asymmetric quantization.
|
95
|
+
attention_quant_bits: An integer that specify the target quantization bits
|
96
|
+
(support 8 or 4) for the attention layers.
|
97
|
+
feedforward_quant_bits: An integer that specify the target quantization
|
98
|
+
bits (support 8 or 4) for the feedforward layers in each Transformer
|
99
|
+
blocks.
|
100
|
+
embedding_quant_bits: An integer that specify the target quantization bits
|
101
|
+
(support 8 or 4) for the embedding (and the final projection) layers.
|
102
|
+
"""
|
103
|
+
self._ckpt_path = ckpt_path
|
104
|
+
self._is_symmetric = is_symmetric
|
105
|
+
self._attention_quant_bits = attention_quant_bits
|
106
|
+
self._feedforward_quant_bits = feedforward_quant_bits
|
107
|
+
self._embedding_quant_bits = embedding_quant_bits
|
108
|
+
|
109
|
+
def load_to_actions(
|
110
|
+
self,
|
111
|
+
) -> Iterator[Optional[List[QuantizationAction]]]:
|
112
|
+
"""Loads the checkpoint and returns the quantization actions."""
|
113
|
+
raise NotImplementedError("The load_to_actions method is not implemented.")
|
114
|
+
|
115
|
+
|
116
|
+
class LayerActionMapperBase:
|
117
|
+
"""Base class for mapping the layer weights to quantization actions.
|
118
|
+
|
119
|
+
This class is responsible for mapping from each layer to its corresponding
|
120
|
+
quantization information (e.g. target quantization bits / updated tensor
|
121
|
+
name...).
|
122
|
+
"""
|
123
|
+
|
124
|
+
def __init__(
|
125
|
+
self,
|
126
|
+
is_symmetric: bool,
|
127
|
+
attention_quant_bits: int,
|
128
|
+
feedforward_quant_bits: int,
|
129
|
+
embedding_quant_bits: int,
|
130
|
+
backend: str,
|
131
|
+
):
|
132
|
+
self._is_symmetric = is_symmetric
|
133
|
+
self._attention_quant_bits = attention_quant_bits
|
134
|
+
self._feedforward_quant_bits = feedforward_quant_bits
|
135
|
+
self._embedding_quant_bits = embedding_quant_bits
|
136
|
+
self._backend = backend
|
137
|
+
|
138
|
+
def map_to_actions(
|
139
|
+
self, layer_name: str
|
140
|
+
) -> Optional[List[QuantizationAction]]:
|
141
|
+
"""Maps the layer weights to quantization actions.
|
142
|
+
|
143
|
+
Args:
|
144
|
+
layer_name: A string representing the name of the layer weight. Note that
|
145
|
+
it is expected the layer information is contained in the name which is
|
146
|
+
enough to determine the target quantization information. Any child class
|
147
|
+
is expected to implement this function.
|
148
|
+
"""
|
149
|
+
raise NotImplementedError("The map_to_actions method is not implemented.")
|
150
|
+
|
151
|
+
|
152
|
+
class ModelWriterBase:
|
153
|
+
"""Base class for writing the quantized model.
|
154
|
+
|
155
|
+
This class is responsible for taking a dictionary of the quantized
|
156
|
+
tensors/names and writing them into the format that can be loaded by the
|
157
|
+
on-device inference engine.
|
158
|
+
"""
|
159
|
+
|
160
|
+
def __init__(self, output_dir: str, backend: str):
|
161
|
+
"""Initializes the class.
|
162
|
+
|
163
|
+
Args:
|
164
|
+
output_dir: A string that represents the output directory to write the
|
165
|
+
resulting file(s).
|
166
|
+
backend: A string that represents the target backend to run the output
|
167
|
+
file(s).
|
168
|
+
"""
|
169
|
+
self._output_dir = output_dir
|
170
|
+
if not os.path.exists(self._output_dir):
|
171
|
+
os.mkdir(self._output_dir)
|
172
|
+
self._backend = backend
|
173
|
+
|
174
|
+
def write_variables(
|
175
|
+
self,
|
176
|
+
variables: Dict[str, Tuple[np.ndarray, bool]],
|
177
|
+
use_fake_values: bool = False,
|
178
|
+
):
|
179
|
+
raise NotImplementedError("The write_variables method is not implemented.")
|
@@ -0,0 +1,79 @@
|
|
1
|
+
# Copyright 2024 The MediaPipe Authors.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
"""Utility library that helps create the converter instances."""
|
16
|
+
from mediapipe.tasks.python.genai.converter import converter_base
|
17
|
+
from mediapipe.tasks.python.genai.converter import pytorch_converter
|
18
|
+
from mediapipe.tasks.python.genai.converter import safetensors_converter
|
19
|
+
from mediapipe.tasks.python.genai.converter import weight_bins_writer
|
20
|
+
|
21
|
+
|
22
|
+
def create_ckpt_loader(
|
23
|
+
ckpt_format: str, *args, **kwargs
|
24
|
+
) -> converter_base.CkptLoaderBase:
|
25
|
+
"""Creates the checkpoint loader.
|
26
|
+
|
27
|
+
Args:
|
28
|
+
ckpt_format: A string that indicates which input checkpoint format is.
|
29
|
+
*args: Additional arguments to be passed into the loader.
|
30
|
+
**kwargs: Additional arguments to be passed into the loader.
|
31
|
+
|
32
|
+
Returns:
|
33
|
+
A created CkptLoader instance.
|
34
|
+
"""
|
35
|
+
del args
|
36
|
+
if ckpt_format == "pytorch":
|
37
|
+
return pytorch_converter.PytorchCkptLoader(
|
38
|
+
ckpt_path=kwargs["ckpt_path"],
|
39
|
+
is_symmetric=kwargs["is_symmetric"],
|
40
|
+
attention_quant_bits=kwargs["attention_quant_bits"],
|
41
|
+
feedforward_quant_bits=kwargs["feedforward_quant_bits"],
|
42
|
+
embedding_quant_bits=kwargs["embedding_quant_bits"],
|
43
|
+
special_model=kwargs["special_model"],
|
44
|
+
backend=kwargs["backend"],
|
45
|
+
)
|
46
|
+
elif ckpt_format == "safetensors":
|
47
|
+
return safetensors_converter.SafetensorsCkptLoader(
|
48
|
+
ckpt_path=kwargs["ckpt_path"],
|
49
|
+
is_symmetric=kwargs["is_symmetric"],
|
50
|
+
attention_quant_bits=kwargs["attention_quant_bits"],
|
51
|
+
feedforward_quant_bits=kwargs["feedforward_quant_bits"],
|
52
|
+
embedding_quant_bits=kwargs["embedding_quant_bits"],
|
53
|
+
special_model=kwargs["special_model"],
|
54
|
+
backend=kwargs["backend"],
|
55
|
+
)
|
56
|
+
else:
|
57
|
+
raise ValueError(f"Unknown checkpoint format: {ckpt_format}")
|
58
|
+
|
59
|
+
|
60
|
+
def create_writer(
|
61
|
+
writer_type: str, *args, **kwargs
|
62
|
+
) -> converter_base.ModelWriterBase:
|
63
|
+
"""Creates the model writer.
|
64
|
+
|
65
|
+
Args:
|
66
|
+
writer_type: A string the indicates which model writer to create.
|
67
|
+
*args: Additional arguments to be passed into the loader.
|
68
|
+
**kwargs: Additional arguments to be passed into the loader.
|
69
|
+
|
70
|
+
Returns:
|
71
|
+
A created ModelWriter instance.
|
72
|
+
"""
|
73
|
+
del args
|
74
|
+
if writer_type == "weight_bins":
|
75
|
+
return weight_bins_writer.WeightBinsWriter(
|
76
|
+
output_dir=kwargs["output_dir"], backend=kwargs["backend"]
|
77
|
+
)
|
78
|
+
else:
|
79
|
+
raise ValueError(f"Unknown writer type: {writer_type}")
|
@@ -0,0 +1,374 @@
|
|
1
|
+
"""Functions to perform the checkpoint conversion."""
|
2
|
+
|
3
|
+
import contextlib
|
4
|
+
import os
|
5
|
+
from typing import List, Optional
|
6
|
+
|
7
|
+
from absl import logging
|
8
|
+
import numpy as np
|
9
|
+
|
10
|
+
from mediapipe.python._framework_bindings import model_ckpt_util
|
11
|
+
from mediapipe.tasks.python.genai.converter import converter_base
|
12
|
+
from mediapipe.tasks.python.genai.converter import converter_factory
|
13
|
+
from mediapipe.tasks.python.genai.converter import quantization_util
|
14
|
+
|
15
|
+
|
16
|
+
class ConversionConfig(object):
|
17
|
+
"""Config for checkpoint conversion.
|
18
|
+
|
19
|
+
Attributes:
|
20
|
+
input_ckpt: Directory or path for the input checkpoint.
|
21
|
+
ckpt_format: Checkpoint format, e.g. 'safetensors', 'pytorch'.
|
22
|
+
model_type: Name of the model, e.g. GEMMA_2B.
|
23
|
+
backend: Target backend to run the model. Can be either "cpu" or "gpu".
|
24
|
+
output_dir: Where the output file(s) to be stored.
|
25
|
+
is_symmetric: Whether to quantize symmetrically.
|
26
|
+
attention_quant_bits: Target quantization bits for the attention layers.
|
27
|
+
feedforward_quant_bits: Target quantization bits for the feedforward layers.
|
28
|
+
embedding_quant_bits: Target quantization bits for the embedding layers.
|
29
|
+
combine_file_only: Whether to combine the weight files only (assuming the
|
30
|
+
weight files are already existed).
|
31
|
+
vocab_model_file: The file path to the 1) SentencePiece vocab model; 2)
|
32
|
+
Hugging Face BPE tokenizer files; 1) is applicable for the Gemma model and
|
33
|
+
2) is applicable for other models. When 2) is used, the provided path is
|
34
|
+
expected to point to a directory that contains both tokenizer.json and
|
35
|
+
tokenizer_config.json files.
|
36
|
+
obfuscate: Whether to obfuscate the model.
|
37
|
+
output_tflite_file: (optional) the output tflite filename. If not provided,
|
38
|
+
the output will be `model.tflite` stored in the output_dir.
|
39
|
+
fp16_scale: A scalar value between [0, 1]. Some models can run into
|
40
|
+
activation overflow issue when running in 16-bit floating point mode. To
|
41
|
+
solve this, we need to scale down the weights of certain layers. See
|
42
|
+
go/llm-on-device-fp16 for more detailed explanation.
|
43
|
+
lora_ckpt: The directory or path for the lora checkpoint. Required in order
|
44
|
+
to convert the lora weights.
|
45
|
+
lora_rank: An integer representing the rank of LoRA. Required in order to
|
46
|
+
convert the lora weights.If not provided, then the converter assumes there
|
47
|
+
is no LoRA weights. Note that only the GPU backend supports LoRA.
|
48
|
+
lora_output_tflite_file: A string indicating the name of the generated
|
49
|
+
tflite file for the LoRA weight. Only applicable when the lora_rank is not
|
50
|
+
zero.
|
51
|
+
image_encoder_file: A string with the name of the image encoder tflite file.
|
52
|
+
image_adapter_file: A string with the name of the image adapter tflite file.
|
53
|
+
submodel_type: Name of submodel, e.g. GEMMA_2B.
|
54
|
+
use_fake_weights: Whether to use fake weights. If set to True, the weights
|
55
|
+
will be filled with zeros.
|
56
|
+
"""
|
57
|
+
|
58
|
+
def __init__(
|
59
|
+
self,
|
60
|
+
input_ckpt: str,
|
61
|
+
ckpt_format: str,
|
62
|
+
model_type: str,
|
63
|
+
backend: str,
|
64
|
+
output_dir: str,
|
65
|
+
is_symmetric: bool = True,
|
66
|
+
attention_quant_bits: int = 8,
|
67
|
+
feedforward_quant_bits: int = 8,
|
68
|
+
embedding_quant_bits: int = 8,
|
69
|
+
combine_file_only: bool = False,
|
70
|
+
vocab_model_file: str = '',
|
71
|
+
obfuscate: bool = False,
|
72
|
+
output_tflite_file: Optional[str] = None,
|
73
|
+
fp16_scale: Optional[float] = None,
|
74
|
+
lora_ckpt: Optional[str] = None,
|
75
|
+
lora_rank: Optional[int] = None,
|
76
|
+
lora_output_tflite_file: Optional[str] = None,
|
77
|
+
image_encoder_file: Optional[str] = None,
|
78
|
+
image_adapter_file: Optional[str] = None,
|
79
|
+
submodel_type: Optional[str] = None,
|
80
|
+
use_fake_weights: bool = False,
|
81
|
+
):
|
82
|
+
self.input_ckpt = input_ckpt
|
83
|
+
self.ckpt_format = ckpt_format
|
84
|
+
self.model_type = model_type
|
85
|
+
self.backend = backend
|
86
|
+
if os.path.isfile(output_dir):
|
87
|
+
raise ValueError('Output directory mush not point to an existing file.')
|
88
|
+
if not os.path.isdir(output_dir):
|
89
|
+
logging.info('Creating output directory: %s', output_dir)
|
90
|
+
os.makedirs(output_dir, exist_ok=True)
|
91
|
+
self.output_dir = output_dir
|
92
|
+
self.is_symmetric = is_symmetric
|
93
|
+
self.attention_quant_bits = attention_quant_bits
|
94
|
+
self.feedforward_quant_bits = feedforward_quant_bits
|
95
|
+
self.embedding_quant_bits = embedding_quant_bits
|
96
|
+
self.combine_file_only = combine_file_only
|
97
|
+
self.vocab_model_file = vocab_model_file
|
98
|
+
self.obfuscate = obfuscate
|
99
|
+
self.image_encoder_file = image_encoder_file
|
100
|
+
self.image_adapter_file = image_adapter_file
|
101
|
+
self.submodel_type = submodel_type
|
102
|
+
self.use_fake_weights = use_fake_weights
|
103
|
+
if output_tflite_file:
|
104
|
+
parent_dir = os.path.dirname(output_tflite_file)
|
105
|
+
if not os.path.isdir(parent_dir):
|
106
|
+
logging.info('Creating tflite parent directory: %s', parent_dir)
|
107
|
+
os.makedirs(parent_dir, exist_ok=True)
|
108
|
+
self.output_tflite_file = output_tflite_file
|
109
|
+
else:
|
110
|
+
self.output_tflite_file = os.path.join(output_dir, 'model.tflite')
|
111
|
+
|
112
|
+
self.fp16_scale = None
|
113
|
+
self.lora_ckpt = lora_ckpt
|
114
|
+
self.lora_rank = lora_rank
|
115
|
+
self.lora_output_tflite_file = lora_output_tflite_file
|
116
|
+
if (self.lora_ckpt is None) ^ (self.lora_rank is None):
|
117
|
+
raise ValueError(
|
118
|
+
'lora_ckpt and lora_rank must be either both provided or both not'
|
119
|
+
' provided.'
|
120
|
+
)
|
121
|
+
if self.lora_rank is not None:
|
122
|
+
if backend == 'cpu':
|
123
|
+
raise ValueError('LoRA is not supported for CPU backend.')
|
124
|
+
lora_applicable_models = ['GEMMA_2B', 'GEMMA2_2B', 'PHI_2']
|
125
|
+
if model_type not in lora_applicable_models:
|
126
|
+
raise ValueError(
|
127
|
+
'LoRA is only applicable for the model_type:'
|
128
|
+
f' {", ".join(lora_applicable_models)}, but get model_type:'
|
129
|
+
f' {model_type}.'
|
130
|
+
)
|
131
|
+
|
132
|
+
|
133
|
+
def quantize_by_actions(
|
134
|
+
actions: List[converter_base.QuantizationAction],
|
135
|
+
backend: str,
|
136
|
+
is_symmetric: bool,
|
137
|
+
):
|
138
|
+
"""Quantizes the weights by actions.
|
139
|
+
|
140
|
+
Args:
|
141
|
+
actions: A list of QuantizationAction that contains the information and
|
142
|
+
tensor values to be quantized.
|
143
|
+
backend: Target backend to run the model. Can be either "cpu" or "gpu".
|
144
|
+
is_symmetric: Whether to quantize symmetrically.
|
145
|
+
|
146
|
+
Returns:
|
147
|
+
A dictionary that maps from the updated tensor names to the quantized
|
148
|
+
tensor values + a boolean that indicates whether the tensor values need to
|
149
|
+
be packed (only applicable for the 4-bit quantized weights).
|
150
|
+
"""
|
151
|
+
output_tensors = {}
|
152
|
+
for action in actions:
|
153
|
+
if action.tensor_value is None:
|
154
|
+
continue
|
155
|
+
# The dtype needs to be compared in string as it is a custom numpy dtype.
|
156
|
+
# Explicitly cast the bfloat16 and float16 dtype to float32 to make sure its
|
157
|
+
# value is converted and serialized correctly.
|
158
|
+
if (
|
159
|
+
str(action.tensor_value.dtype) == 'bfloat16'
|
160
|
+
or action.tensor_value.dtype == np.float16
|
161
|
+
):
|
162
|
+
action.tensor_value = action.tensor_value.astype(np.float32)
|
163
|
+
if (
|
164
|
+
action.tensor_value.dtype != np.float32
|
165
|
+
and action.tensor_value.dtype != np.int8
|
166
|
+
):
|
167
|
+
raise ValueError(
|
168
|
+
'All tensors should be casted to either float32 or int8, but got: %s'
|
169
|
+
% action.tensor_value.dtype
|
170
|
+
)
|
171
|
+
if action.quantize_axis:
|
172
|
+
pack = action.quantize_bits == 4
|
173
|
+
if action.tensor_value.dtype == np.int8:
|
174
|
+
if backend == 'cpu' and pack:
|
175
|
+
raise ValueError(
|
176
|
+
'Converting pre-quantized checkpoint into 4-bit is not supported'
|
177
|
+
' for CPU backend.'
|
178
|
+
)
|
179
|
+
output_tensors[action.target_name] = (action.tensor_value, pack)
|
180
|
+
else:
|
181
|
+
if is_symmetric:
|
182
|
+
target_var, scale = quantization_util.quantize_tensor(
|
183
|
+
var=action.tensor_value,
|
184
|
+
axis=action.quantize_axis,
|
185
|
+
sym=is_symmetric,
|
186
|
+
number_bits=action.quantize_bits,
|
187
|
+
)
|
188
|
+
output_tensors[action.target_name] = (target_var, pack)
|
189
|
+
output_tensors[action.target_name + '_quantized_scale'] = (
|
190
|
+
scale,
|
191
|
+
False,
|
192
|
+
)
|
193
|
+
zp = None
|
194
|
+
else:
|
195
|
+
target_var, scale, zp = quantization_util.quantize_tensor(
|
196
|
+
var=action.tensor_value,
|
197
|
+
axis=action.quantize_axis,
|
198
|
+
sym=is_symmetric,
|
199
|
+
number_bits=action.quantize_bits,
|
200
|
+
)
|
201
|
+
if backend == 'cpu' and pack:
|
202
|
+
target_var, scale, zp = quantization_util.update_to_uint4(
|
203
|
+
target_var, scale, zp
|
204
|
+
)
|
205
|
+
output_tensors[action.target_name] = (target_var, pack)
|
206
|
+
output_tensors[action.target_name + '_quantized_scale'] = (scale, False)
|
207
|
+
if zp is not None:
|
208
|
+
output_tensors[action.target_name + '_quantized_zp'] = (zp, False)
|
209
|
+
else:
|
210
|
+
output_tensors[action.target_name] = (action.tensor_value, False)
|
211
|
+
return output_tensors
|
212
|
+
|
213
|
+
|
214
|
+
def combined_weight_bins_to_tflite(
|
215
|
+
model_type: str,
|
216
|
+
backend: str,
|
217
|
+
weight_path: str,
|
218
|
+
output_tflite_file: str,
|
219
|
+
obfuscate: bool,
|
220
|
+
vocab_model_file: str,
|
221
|
+
lora_rank: Optional[int] = None,
|
222
|
+
lora_weight_path: Optional[str] = None,
|
223
|
+
lora_output_tflite_file: Optional[str] = None,
|
224
|
+
image_encoder_file: Optional[str] = None,
|
225
|
+
image_adapter_file: Optional[str] = None,
|
226
|
+
submodel_type: Optional[str] = None,
|
227
|
+
):
|
228
|
+
"""Combines weight files to tflite file."""
|
229
|
+
if backend == 'cpu':
|
230
|
+
if lora_rank is not None:
|
231
|
+
logging.fatal('LoRA is not supported for CPU backend.')
|
232
|
+
model_ckpt_util.GenerateCpuTfLite(
|
233
|
+
model_type,
|
234
|
+
weight_path,
|
235
|
+
vocab_model_file,
|
236
|
+
True,
|
237
|
+
output_tflite_file,
|
238
|
+
)
|
239
|
+
elif backend == 'gpu':
|
240
|
+
model_ckpt_util.GenerateGpuTfLite(
|
241
|
+
model_type,
|
242
|
+
weight_path,
|
243
|
+
vocab_model_file,
|
244
|
+
True,
|
245
|
+
obfuscate,
|
246
|
+
output_tflite_file,
|
247
|
+
0 if lora_rank is None else lora_rank,
|
248
|
+
'' if lora_weight_path is None else lora_weight_path,
|
249
|
+
'' if lora_output_tflite_file is None else lora_output_tflite_file,
|
250
|
+
'' if image_encoder_file is None else image_encoder_file,
|
251
|
+
'' if image_adapter_file is None else image_adapter_file,
|
252
|
+
'' if submodel_type is None else submodel_type,
|
253
|
+
)
|
254
|
+
else:
|
255
|
+
raise ValueError('Unsupported backend: %s' % backend)
|
256
|
+
|
257
|
+
|
258
|
+
def convert_bpe_vocab(vocab_model_file: str, output_dir: str) -> str:
|
259
|
+
if not os.path.isdir(vocab_model_file):
|
260
|
+
raise ValueError(
|
261
|
+
'The input BPE vocab model file path is expected to be a directory that'
|
262
|
+
' contains both tokenizer.json and tokenizer_config.json files.'
|
263
|
+
)
|
264
|
+
output_vocab_file = os.path.join(output_dir, 'spm.model')
|
265
|
+
model_ckpt_util.ConvertHfTokenizer(vocab_model_file, output_vocab_file)
|
266
|
+
return output_vocab_file
|
267
|
+
|
268
|
+
|
269
|
+
@contextlib.contextmanager
|
270
|
+
def filemanager(filename: str, mode: str):
|
271
|
+
try:
|
272
|
+
with open(filename, mode) as f:
|
273
|
+
yield f
|
274
|
+
finally:
|
275
|
+
pass
|
276
|
+
|
277
|
+
|
278
|
+
def sort_layer_info(layer_info_file: str) -> None:
|
279
|
+
"""Loads and sorts the layer info file."""
|
280
|
+
layer_info = []
|
281
|
+
with filemanager(layer_info_file, 'r') as finfo:
|
282
|
+
for line in finfo:
|
283
|
+
line = line.strip()
|
284
|
+
if line:
|
285
|
+
layer_info.append(line)
|
286
|
+
layer_info = list(set(layer_info))
|
287
|
+
layer_info.sort()
|
288
|
+
with filemanager(layer_info_file, 'w') as finfo:
|
289
|
+
for line in layer_info:
|
290
|
+
finfo.write(line + '\n')
|
291
|
+
finfo.write('\n')
|
292
|
+
|
293
|
+
|
294
|
+
def maybe_quantize_and_write_tensors_to_bins(
|
295
|
+
ckpt_loader: converter_base.CkptLoaderBase,
|
296
|
+
config: ConversionConfig,
|
297
|
+
) -> None:
|
298
|
+
"""Quantizes the weight tensors according to the loader and writes them to bins."""
|
299
|
+
actions = ckpt_loader.load_to_actions()
|
300
|
+
|
301
|
+
for action in actions:
|
302
|
+
# Quantize the weight
|
303
|
+
quantized_tensors = quantize_by_actions(
|
304
|
+
action, config.backend, config.is_symmetric
|
305
|
+
)
|
306
|
+
del action
|
307
|
+
# Write the tensors into file(s).
|
308
|
+
writer = converter_factory.create_writer(
|
309
|
+
writer_type='weight_bins',
|
310
|
+
output_dir=config.output_dir,
|
311
|
+
backend=config.backend,
|
312
|
+
)
|
313
|
+
writer.write_variables(quantized_tensors, config.use_fake_weights)
|
314
|
+
del quantized_tensors
|
315
|
+
del writer
|
316
|
+
|
317
|
+
|
318
|
+
def convert_checkpoint(config: ConversionConfig) -> None:
|
319
|
+
"""Converts the checkpoint to tflite file."""
|
320
|
+
logging.info('input folder: %s', config.input_ckpt)
|
321
|
+
|
322
|
+
if os.path.isdir(config.vocab_model_file):
|
323
|
+
vocab_model_path = convert_bpe_vocab(
|
324
|
+
config.vocab_model_file, config.output_dir
|
325
|
+
)
|
326
|
+
else:
|
327
|
+
vocab_model_path = config.vocab_model_file
|
328
|
+
|
329
|
+
if not config.combine_file_only:
|
330
|
+
# Load the layer weights and prepare the quantization configurations.
|
331
|
+
loader = converter_factory.create_ckpt_loader(
|
332
|
+
config.ckpt_format,
|
333
|
+
ckpt_path=config.input_ckpt,
|
334
|
+
is_symmetric=config.is_symmetric,
|
335
|
+
backend=config.backend,
|
336
|
+
attention_quant_bits=config.attention_quant_bits,
|
337
|
+
feedforward_quant_bits=config.feedforward_quant_bits,
|
338
|
+
embedding_quant_bits=config.embedding_quant_bits,
|
339
|
+
special_model=config.model_type,
|
340
|
+
fp16_scale=config.fp16_scale,
|
341
|
+
)
|
342
|
+
maybe_quantize_and_write_tensors_to_bins(loader, config)
|
343
|
+
|
344
|
+
if config.lora_ckpt is not None and config.lora_ckpt != config.input_ckpt:
|
345
|
+
# If lora ckpt and the input ckpt is the same. The lora conversion is
|
346
|
+
# handled in the previous loader.
|
347
|
+
lora_loader = converter_factory.create_ckpt_loader(
|
348
|
+
config.ckpt_format,
|
349
|
+
ckpt_path=config.lora_ckpt,
|
350
|
+
is_symmetric=config.is_symmetric,
|
351
|
+
backend=config.backend,
|
352
|
+
attention_quant_bits=config.attention_quant_bits,
|
353
|
+
feedforward_quant_bits=config.feedforward_quant_bits,
|
354
|
+
embedding_quant_bits=config.embedding_quant_bits,
|
355
|
+
special_model=config.model_type,
|
356
|
+
)
|
357
|
+
maybe_quantize_and_write_tensors_to_bins(lora_loader, config)
|
358
|
+
|
359
|
+
sort_layer_info(os.path.join(config.output_dir, 'layer_info.txt'))
|
360
|
+
|
361
|
+
combined_weight_bins_to_tflite(
|
362
|
+
config.model_type,
|
363
|
+
config.backend,
|
364
|
+
weight_path=config.output_dir,
|
365
|
+
output_tflite_file=config.output_tflite_file,
|
366
|
+
obfuscate=config.obfuscate,
|
367
|
+
vocab_model_file=vocab_model_path,
|
368
|
+
lora_rank=config.lora_rank,
|
369
|
+
lora_weight_path=config.output_dir,
|
370
|
+
lora_output_tflite_file=config.lora_output_tflite_file,
|
371
|
+
image_encoder_file=config.image_encoder_file,
|
372
|
+
image_adapter_file=config.image_adapter_file,
|
373
|
+
submodel_type=config.submodel_type,
|
374
|
+
)
|
@@ -0,0 +1,63 @@
|
|
1
|
+
"""Tests for llm_converter."""
|
2
|
+
|
3
|
+
from absl.testing import parameterized
|
4
|
+
import numpy as np
|
5
|
+
import tensorflow as tf
|
6
|
+
|
7
|
+
import unittest
|
8
|
+
from mediapipe.tasks.python.genai.converter import converter_base
|
9
|
+
from mediapipe.tasks.python.genai.converter import llm_converter
|
10
|
+
|
11
|
+
|
12
|
+
class LlmConverterTest(googletest.TestCase, parameterized.TestCase):
|
13
|
+
|
14
|
+
def get_fake_action(self, input_dtype):
|
15
|
+
if input_dtype == 'bfloat16':
|
16
|
+
# Create a TensorFlow bfloat16 tensor
|
17
|
+
bfloat16_tensor = tf.constant([1.0, -1.0, 2.0, -2.0], dtype=tf.bfloat16)
|
18
|
+
# Convert the TensorFlow tensor to a NumPy array
|
19
|
+
tensor_value = bfloat16_tensor.numpy()
|
20
|
+
else:
|
21
|
+
tensor_value = np.array(
|
22
|
+
[1.0, -1.0, 2.0, -2.0], dtype=np.dtype(input_dtype)
|
23
|
+
)
|
24
|
+
return converter_base.QuantizationAction(
|
25
|
+
tensor_name='mdl_vars.params.lm.softmax.logits_ffn.w',
|
26
|
+
target_name='params.lm.softmax.logits_ffn.w',
|
27
|
+
quantize_axis=[0],
|
28
|
+
quantize_bits=8,
|
29
|
+
pack_dim=0,
|
30
|
+
tensor_value=tensor_value,
|
31
|
+
)
|
32
|
+
|
33
|
+
@parameterized.parameters(
|
34
|
+
{'input_dtype': 'float32'},
|
35
|
+
{'input_dtype': 'float16'},
|
36
|
+
{'input_dtype': 'bfloat16'},
|
37
|
+
{'input_dtype': 'int8'},
|
38
|
+
)
|
39
|
+
def test_quantize_by_actions(self, input_dtype):
|
40
|
+
out = llm_converter.quantize_by_actions(
|
41
|
+
[self.get_fake_action(input_dtype)], backend='gpu', is_symmetric=True
|
42
|
+
)
|
43
|
+
|
44
|
+
if input_dtype == 'int8':
|
45
|
+
# The values are pre-quantized and should be the same.
|
46
|
+
np.testing.assert_allclose(
|
47
|
+
out['params.lm.softmax.logits_ffn.w'][0],
|
48
|
+
np.array([1, -1, 2, -2], dtype=np.int8),
|
49
|
+
)
|
50
|
+
else:
|
51
|
+
np.testing.assert_allclose(
|
52
|
+
out['params.lm.softmax.logits_ffn.w'][0],
|
53
|
+
np.array([64, -64, 127, -127], dtype=np.int8),
|
54
|
+
)
|
55
|
+
np.testing.assert_allclose(
|
56
|
+
out['params.lm.softmax.logits_ffn.w_quantized_scale'][0],
|
57
|
+
np.array(0.015748, dtype=np.float32),
|
58
|
+
rtol=1e-03,
|
59
|
+
)
|
60
|
+
|
61
|
+
|
62
|
+
if __name__ == '__main__':
|
63
|
+
googletest.main()
|