mediapipe-nightly 0.10.21.post20241223__cp310-cp310-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (593) hide show
  1. mediapipe/__init__.py +26 -0
  2. mediapipe/calculators/__init__.py +0 -0
  3. mediapipe/calculators/audio/__init__.py +0 -0
  4. mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py +33 -0
  5. mediapipe/calculators/audio/rational_factor_resample_calculator_pb2.py +33 -0
  6. mediapipe/calculators/audio/spectrogram_calculator_pb2.py +37 -0
  7. mediapipe/calculators/audio/stabilized_log_calculator_pb2.py +31 -0
  8. mediapipe/calculators/audio/time_series_framer_calculator_pb2.py +33 -0
  9. mediapipe/calculators/core/__init__.py +0 -0
  10. mediapipe/calculators/core/bypass_calculator_pb2.py +31 -0
  11. mediapipe/calculators/core/clip_vector_size_calculator_pb2.py +31 -0
  12. mediapipe/calculators/core/concatenate_vector_calculator_pb2.py +31 -0
  13. mediapipe/calculators/core/constant_side_packet_calculator_pb2.py +39 -0
  14. mediapipe/calculators/core/dequantize_byte_array_calculator_pb2.py +31 -0
  15. mediapipe/calculators/core/flow_limiter_calculator_pb2.py +32 -0
  16. mediapipe/calculators/core/gate_calculator_pb2.py +33 -0
  17. mediapipe/calculators/core/get_vector_item_calculator_pb2.py +31 -0
  18. mediapipe/calculators/core/graph_profile_calculator_pb2.py +31 -0
  19. mediapipe/calculators/core/packet_cloner_calculator_pb2.py +31 -0
  20. mediapipe/calculators/core/packet_resampler_calculator_pb2.py +33 -0
  21. mediapipe/calculators/core/packet_thinner_calculator_pb2.py +33 -0
  22. mediapipe/calculators/core/quantize_float_vector_calculator_pb2.py +31 -0
  23. mediapipe/calculators/core/sequence_shift_calculator_pb2.py +31 -0
  24. mediapipe/calculators/core/split_vector_calculator_pb2.py +33 -0
  25. mediapipe/calculators/image/__init__.py +0 -0
  26. mediapipe/calculators/image/bilateral_filter_calculator_pb2.py +31 -0
  27. mediapipe/calculators/image/feature_detector_calculator_pb2.py +31 -0
  28. mediapipe/calculators/image/image_clone_calculator_pb2.py +31 -0
  29. mediapipe/calculators/image/image_cropping_calculator_pb2.py +33 -0
  30. mediapipe/calculators/image/image_transformation_calculator_pb2.py +38 -0
  31. mediapipe/calculators/image/mask_overlay_calculator_pb2.py +33 -0
  32. mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator_pb2.py +31 -0
  33. mediapipe/calculators/image/opencv_image_encoder_calculator_pb2.py +35 -0
  34. mediapipe/calculators/image/recolor_calculator_pb2.py +34 -0
  35. mediapipe/calculators/image/rotation_mode_pb2.py +29 -0
  36. mediapipe/calculators/image/scale_image_calculator_pb2.py +34 -0
  37. mediapipe/calculators/image/segmentation_smoothing_calculator_pb2.py +31 -0
  38. mediapipe/calculators/image/set_alpha_calculator_pb2.py +31 -0
  39. mediapipe/calculators/image/warp_affine_calculator_pb2.py +36 -0
  40. mediapipe/calculators/internal/__init__.py +0 -0
  41. mediapipe/calculators/internal/callback_packet_calculator_pb2.py +33 -0
  42. mediapipe/calculators/tensor/__init__.py +0 -0
  43. mediapipe/calculators/tensor/audio_to_tensor_calculator_pb2.py +35 -0
  44. mediapipe/calculators/tensor/bert_preprocessor_calculator_pb2.py +31 -0
  45. mediapipe/calculators/tensor/feedback_tensors_calculator_pb2.py +37 -0
  46. mediapipe/calculators/tensor/image_to_tensor_calculator_pb2.py +40 -0
  47. mediapipe/calculators/tensor/inference_calculator_pb2.py +63 -0
  48. mediapipe/calculators/tensor/landmarks_to_tensor_calculator_pb2.py +33 -0
  49. mediapipe/calculators/tensor/regex_preprocessor_calculator_pb2.py +31 -0
  50. mediapipe/calculators/tensor/tensor_converter_calculator_pb2.py +34 -0
  51. mediapipe/calculators/tensor/tensor_to_joints_calculator_pb2.py +31 -0
  52. mediapipe/calculators/tensor/tensors_readback_calculator_pb2.py +35 -0
  53. mediapipe/calculators/tensor/tensors_to_audio_calculator_pb2.py +33 -0
  54. mediapipe/calculators/tensor/tensors_to_classification_calculator_pb2.py +44 -0
  55. mediapipe/calculators/tensor/tensors_to_detections_calculator_pb2.py +39 -0
  56. mediapipe/calculators/tensor/tensors_to_floats_calculator_pb2.py +33 -0
  57. mediapipe/calculators/tensor/tensors_to_landmarks_calculator_pb2.py +33 -0
  58. mediapipe/calculators/tensor/tensors_to_segmentation_calculator_pb2.py +34 -0
  59. mediapipe/calculators/tensor/vector_to_tensor_calculator_pb2.py +27 -0
  60. mediapipe/calculators/tflite/__init__.py +0 -0
  61. mediapipe/calculators/tflite/ssd_anchors_calculator_pb2.py +32 -0
  62. mediapipe/calculators/tflite/tflite_converter_calculator_pb2.py +33 -0
  63. mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator_pb2.py +31 -0
  64. mediapipe/calculators/tflite/tflite_inference_calculator_pb2.py +49 -0
  65. mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_pb2.py +31 -0
  66. mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator_pb2.py +31 -0
  67. mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator_pb2.py +33 -0
  68. mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator_pb2.py +31 -0
  69. mediapipe/calculators/util/__init__.py +0 -0
  70. mediapipe/calculators/util/align_hand_to_pose_in_world_calculator_pb2.py +31 -0
  71. mediapipe/calculators/util/annotation_overlay_calculator_pb2.py +32 -0
  72. mediapipe/calculators/util/association_calculator_pb2.py +31 -0
  73. mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py +31 -0
  74. mediapipe/calculators/util/combine_joints_calculator_pb2.py +36 -0
  75. mediapipe/calculators/util/detection_label_id_to_text_calculator_pb2.py +36 -0
  76. mediapipe/calculators/util/detections_to_rects_calculator_pb2.py +33 -0
  77. mediapipe/calculators/util/detections_to_render_data_calculator_pb2.py +33 -0
  78. mediapipe/calculators/util/face_to_rect_calculator_pb2.py +26 -0
  79. mediapipe/calculators/util/filter_detections_calculator_pb2.py +31 -0
  80. mediapipe/calculators/util/flat_color_image_calculator_pb2.py +32 -0
  81. mediapipe/calculators/util/labels_to_render_data_calculator_pb2.py +34 -0
  82. mediapipe/calculators/util/landmark_projection_calculator_pb2.py +31 -0
  83. mediapipe/calculators/util/landmarks_refinement_calculator_pb2.py +41 -0
  84. mediapipe/calculators/util/landmarks_smoothing_calculator_pb2.py +33 -0
  85. mediapipe/calculators/util/landmarks_to_detection_calculator_pb2.py +31 -0
  86. mediapipe/calculators/util/landmarks_to_floats_calculator_pb2.py +31 -0
  87. mediapipe/calculators/util/landmarks_to_render_data_calculator_pb2.py +32 -0
  88. mediapipe/calculators/util/landmarks_transformation_calculator_pb2.py +37 -0
  89. mediapipe/calculators/util/latency_pb2.py +26 -0
  90. mediapipe/calculators/util/local_file_contents_calculator_pb2.py +31 -0
  91. mediapipe/calculators/util/logic_calculator_pb2.py +34 -0
  92. mediapipe/calculators/util/non_max_suppression_calculator_pb2.py +35 -0
  93. mediapipe/calculators/util/packet_frequency_calculator_pb2.py +31 -0
  94. mediapipe/calculators/util/packet_frequency_pb2.py +26 -0
  95. mediapipe/calculators/util/packet_latency_calculator_pb2.py +31 -0
  96. mediapipe/calculators/util/rect_to_render_data_calculator_pb2.py +32 -0
  97. mediapipe/calculators/util/rect_to_render_scale_calculator_pb2.py +31 -0
  98. mediapipe/calculators/util/rect_transformation_calculator_pb2.py +31 -0
  99. mediapipe/calculators/util/refine_landmarks_from_heatmap_calculator_pb2.py +31 -0
  100. mediapipe/calculators/util/resource_provider_calculator_pb2.py +28 -0
  101. mediapipe/calculators/util/set_joints_visibility_calculator_pb2.py +41 -0
  102. mediapipe/calculators/util/thresholding_calculator_pb2.py +31 -0
  103. mediapipe/calculators/util/timed_box_list_id_to_label_calculator_pb2.py +31 -0
  104. mediapipe/calculators/util/timed_box_list_to_render_data_calculator_pb2.py +32 -0
  105. mediapipe/calculators/util/top_k_scores_calculator_pb2.py +31 -0
  106. mediapipe/calculators/util/visibility_copy_calculator_pb2.py +27 -0
  107. mediapipe/calculators/util/visibility_smoothing_calculator_pb2.py +31 -0
  108. mediapipe/calculators/video/__init__.py +0 -0
  109. mediapipe/calculators/video/box_detector_calculator_pb2.py +32 -0
  110. mediapipe/calculators/video/box_tracker_calculator_pb2.py +32 -0
  111. mediapipe/calculators/video/flow_packager_calculator_pb2.py +32 -0
  112. mediapipe/calculators/video/flow_to_image_calculator_pb2.py +31 -0
  113. mediapipe/calculators/video/motion_analysis_calculator_pb2.py +42 -0
  114. mediapipe/calculators/video/opencv_video_encoder_calculator_pb2.py +31 -0
  115. mediapipe/calculators/video/tool/__init__.py +0 -0
  116. mediapipe/calculators/video/tool/flow_quantizer_model_pb2.py +26 -0
  117. mediapipe/calculators/video/tracked_detection_manager_calculator_pb2.py +32 -0
  118. mediapipe/calculators/video/video_pre_stream_calculator_pb2.py +35 -0
  119. mediapipe/examples/__init__.py +14 -0
  120. mediapipe/examples/desktop/__init__.py +14 -0
  121. mediapipe/framework/__init__.py +0 -0
  122. mediapipe/framework/calculator_options_pb2.py +29 -0
  123. mediapipe/framework/calculator_pb2.py +59 -0
  124. mediapipe/framework/calculator_profile_pb2.py +48 -0
  125. mediapipe/framework/deps/__init__.py +0 -0
  126. mediapipe/framework/deps/proto_descriptor_pb2.py +29 -0
  127. mediapipe/framework/formats/__init__.py +0 -0
  128. mediapipe/framework/formats/affine_transform_data_pb2.py +28 -0
  129. mediapipe/framework/formats/annotation/__init__.py +0 -0
  130. mediapipe/framework/formats/annotation/locus_pb2.py +32 -0
  131. mediapipe/framework/formats/annotation/rasterization_pb2.py +29 -0
  132. mediapipe/framework/formats/body_rig_pb2.py +28 -0
  133. mediapipe/framework/formats/classification_pb2.py +31 -0
  134. mediapipe/framework/formats/detection_pb2.py +36 -0
  135. mediapipe/framework/formats/image_file_properties_pb2.py +26 -0
  136. mediapipe/framework/formats/image_format_pb2.py +29 -0
  137. mediapipe/framework/formats/landmark_pb2.py +37 -0
  138. mediapipe/framework/formats/location_data_pb2.py +38 -0
  139. mediapipe/framework/formats/matrix_data_pb2.py +31 -0
  140. mediapipe/framework/formats/motion/__init__.py +0 -0
  141. mediapipe/framework/formats/motion/optical_flow_field_data_pb2.py +30 -0
  142. mediapipe/framework/formats/object_detection/__init__.py +0 -0
  143. mediapipe/framework/formats/object_detection/anchor_pb2.py +26 -0
  144. mediapipe/framework/formats/rect_pb2.py +29 -0
  145. mediapipe/framework/formats/time_series_header_pb2.py +28 -0
  146. mediapipe/framework/graph_runtime_info_pb2.py +31 -0
  147. mediapipe/framework/mediapipe_options_pb2.py +27 -0
  148. mediapipe/framework/packet_factory_pb2.py +31 -0
  149. mediapipe/framework/packet_generator_pb2.py +33 -0
  150. mediapipe/framework/status_handler_pb2.py +28 -0
  151. mediapipe/framework/stream_handler/__init__.py +0 -0
  152. mediapipe/framework/stream_handler/default_input_stream_handler_pb2.py +27 -0
  153. mediapipe/framework/stream_handler/fixed_size_input_stream_handler_pb2.py +27 -0
  154. mediapipe/framework/stream_handler/sync_set_input_stream_handler_pb2.py +29 -0
  155. mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_pb2.py +27 -0
  156. mediapipe/framework/stream_handler_pb2.py +30 -0
  157. mediapipe/framework/test_calculators_pb2.py +31 -0
  158. mediapipe/framework/thread_pool_executor_pb2.py +29 -0
  159. mediapipe/framework/tool/__init__.py +0 -0
  160. mediapipe/framework/tool/calculator_graph_template_pb2.py +44 -0
  161. mediapipe/framework/tool/field_data_pb2.py +28 -0
  162. mediapipe/framework/tool/node_chain_subgraph_pb2.py +31 -0
  163. mediapipe/framework/tool/packet_generator_wrapper_calculator_pb2.py +28 -0
  164. mediapipe/framework/tool/source_pb2.py +33 -0
  165. mediapipe/framework/tool/switch_container_pb2.py +32 -0
  166. mediapipe/gpu/__init__.py +0 -0
  167. mediapipe/gpu/copy_calculator_pb2.py +33 -0
  168. mediapipe/gpu/gl_animation_overlay_calculator_pb2.py +31 -0
  169. mediapipe/gpu/gl_context_options_pb2.py +31 -0
  170. mediapipe/gpu/gl_scaler_calculator_pb2.py +32 -0
  171. mediapipe/gpu/gl_surface_sink_calculator_pb2.py +32 -0
  172. mediapipe/gpu/gpu_origin_pb2.py +29 -0
  173. mediapipe/gpu/scale_mode_pb2.py +28 -0
  174. mediapipe/model_maker/__init__.py +27 -0
  175. mediapipe/model_maker/setup.py +107 -0
  176. mediapipe/modules/__init__.py +0 -0
  177. mediapipe/modules/face_detection/__init__.py +0 -0
  178. mediapipe/modules/face_detection/face_detection_full_range_cpu.binarypb +0 -0
  179. mediapipe/modules/face_detection/face_detection_full_range_sparse.tflite +0 -0
  180. mediapipe/modules/face_detection/face_detection_pb2.py +30 -0
  181. mediapipe/modules/face_detection/face_detection_short_range.tflite +0 -0
  182. mediapipe/modules/face_detection/face_detection_short_range_cpu.binarypb +0 -0
  183. mediapipe/modules/face_geometry/__init__.py +0 -0
  184. mediapipe/modules/face_geometry/data/__init__.py +0 -0
  185. mediapipe/modules/face_geometry/effect_renderer_calculator_pb2.py +27 -0
  186. mediapipe/modules/face_geometry/env_generator_calculator_pb2.py +28 -0
  187. mediapipe/modules/face_geometry/geometry_pipeline_calculator_pb2.py +27 -0
  188. mediapipe/modules/face_geometry/libs/__init__.py +0 -0
  189. mediapipe/modules/face_geometry/protos/__init__.py +0 -0
  190. mediapipe/modules/face_geometry/protos/environment_pb2.py +31 -0
  191. mediapipe/modules/face_geometry/protos/face_geometry_pb2.py +29 -0
  192. mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py +32 -0
  193. mediapipe/modules/face_geometry/protos/mesh_3d_pb2.py +31 -0
  194. mediapipe/modules/face_landmark/__init__.py +0 -0
  195. mediapipe/modules/face_landmark/face_landmark.tflite +0 -0
  196. mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb +0 -0
  197. mediapipe/modules/face_landmark/face_landmark_with_attention.tflite +0 -0
  198. mediapipe/modules/hand_landmark/__init__.py +0 -0
  199. mediapipe/modules/hand_landmark/calculators/__init__.py +0 -0
  200. mediapipe/modules/hand_landmark/hand_landmark_full.tflite +0 -0
  201. mediapipe/modules/hand_landmark/hand_landmark_lite.tflite +0 -0
  202. mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb +0 -0
  203. mediapipe/modules/hand_landmark/handedness.txt +2 -0
  204. mediapipe/modules/holistic_landmark/__init__.py +0 -0
  205. mediapipe/modules/holistic_landmark/calculators/__init__.py +0 -0
  206. mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator_pb2.py +37 -0
  207. mediapipe/modules/holistic_landmark/hand_recrop.tflite +0 -0
  208. mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb +0 -0
  209. mediapipe/modules/iris_landmark/__init__.py +0 -0
  210. mediapipe/modules/iris_landmark/iris_landmark.tflite +0 -0
  211. mediapipe/modules/objectron/__init__.py +0 -0
  212. mediapipe/modules/objectron/calculators/__init__.py +0 -0
  213. mediapipe/modules/objectron/calculators/a_r_capture_metadata_pb2.py +102 -0
  214. mediapipe/modules/objectron/calculators/annotation_data_pb2.py +38 -0
  215. mediapipe/modules/objectron/calculators/belief_decoder_config_pb2.py +28 -0
  216. mediapipe/modules/objectron/calculators/camera_parameters_pb2.py +30 -0
  217. mediapipe/modules/objectron/calculators/filter_detection_calculator_pb2.py +35 -0
  218. mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator_pb2.py +31 -0
  219. mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator_pb2.py +31 -0
  220. mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator_pb2.py +32 -0
  221. mediapipe/modules/objectron/calculators/object_pb2.py +38 -0
  222. mediapipe/modules/objectron/calculators/tensors_to_objects_calculator_pb2.py +32 -0
  223. mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator_pb2.py +32 -0
  224. mediapipe/modules/objectron/object_detection_oidv4_labelmap.txt +24 -0
  225. mediapipe/modules/objectron/objectron_cpu.binarypb +0 -0
  226. mediapipe/modules/palm_detection/__init__.py +0 -0
  227. mediapipe/modules/palm_detection/palm_detection_full.tflite +0 -0
  228. mediapipe/modules/palm_detection/palm_detection_lite.tflite +0 -0
  229. mediapipe/modules/pose_detection/__init__.py +0 -0
  230. mediapipe/modules/pose_detection/pose_detection.tflite +0 -0
  231. mediapipe/modules/pose_landmark/__init__.py +0 -0
  232. mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb +0 -0
  233. mediapipe/modules/pose_landmark/pose_landmark_full.tflite +0 -0
  234. mediapipe/modules/selfie_segmentation/__init__.py +0 -0
  235. mediapipe/modules/selfie_segmentation/selfie_segmentation.tflite +0 -0
  236. mediapipe/modules/selfie_segmentation/selfie_segmentation_cpu.binarypb +0 -0
  237. mediapipe/modules/selfie_segmentation/selfie_segmentation_landscape.tflite +0 -0
  238. mediapipe/python/__init__.py +29 -0
  239. mediapipe/python/_framework_bindings.cpython-310-x86_64-linux-gnu.so +0 -0
  240. mediapipe/python/calculator_graph_test.py +251 -0
  241. mediapipe/python/image_frame_test.py +194 -0
  242. mediapipe/python/image_test.py +218 -0
  243. mediapipe/python/packet_creator.py +275 -0
  244. mediapipe/python/packet_getter.py +120 -0
  245. mediapipe/python/packet_test.py +533 -0
  246. mediapipe/python/solution_base.py +604 -0
  247. mediapipe/python/solution_base_test.py +396 -0
  248. mediapipe/python/solutions/__init__.py +27 -0
  249. mediapipe/python/solutions/download_utils.py +37 -0
  250. mediapipe/python/solutions/drawing_styles.py +249 -0
  251. mediapipe/python/solutions/drawing_utils.py +320 -0
  252. mediapipe/python/solutions/drawing_utils_test.py +258 -0
  253. mediapipe/python/solutions/face_detection.py +105 -0
  254. mediapipe/python/solutions/face_detection_test.py +92 -0
  255. mediapipe/python/solutions/face_mesh.py +125 -0
  256. mediapipe/python/solutions/face_mesh_connections.py +500 -0
  257. mediapipe/python/solutions/face_mesh_test.py +170 -0
  258. mediapipe/python/solutions/hands.py +153 -0
  259. mediapipe/python/solutions/hands_connections.py +32 -0
  260. mediapipe/python/solutions/hands_test.py +219 -0
  261. mediapipe/python/solutions/holistic.py +167 -0
  262. mediapipe/python/solutions/holistic_test.py +142 -0
  263. mediapipe/python/solutions/objectron.py +288 -0
  264. mediapipe/python/solutions/objectron_test.py +81 -0
  265. mediapipe/python/solutions/pose.py +192 -0
  266. mediapipe/python/solutions/pose_connections.py +22 -0
  267. mediapipe/python/solutions/pose_test.py +262 -0
  268. mediapipe/python/solutions/selfie_segmentation.py +76 -0
  269. mediapipe/python/solutions/selfie_segmentation_test.py +68 -0
  270. mediapipe/python/timestamp_test.py +78 -0
  271. mediapipe/tasks/__init__.py +14 -0
  272. mediapipe/tasks/cc/__init__.py +0 -0
  273. mediapipe/tasks/cc/audio/__init__.py +0 -0
  274. mediapipe/tasks/cc/audio/audio_classifier/__init__.py +0 -0
  275. mediapipe/tasks/cc/audio/audio_classifier/proto/__init__.py +0 -0
  276. mediapipe/tasks/cc/audio/audio_classifier/proto/audio_classifier_graph_options_pb2.py +35 -0
  277. mediapipe/tasks/cc/audio/audio_embedder/__init__.py +0 -0
  278. mediapipe/tasks/cc/audio/audio_embedder/proto/__init__.py +0 -0
  279. mediapipe/tasks/cc/audio/audio_embedder/proto/audio_embedder_graph_options_pb2.py +35 -0
  280. mediapipe/tasks/cc/audio/core/__init__.py +0 -0
  281. mediapipe/tasks/cc/audio/utils/__init__.py +0 -0
  282. mediapipe/tasks/cc/components/__init__.py +0 -0
  283. mediapipe/tasks/cc/components/calculators/__init__.py +0 -0
  284. mediapipe/tasks/cc/components/calculators/classification_aggregation_calculator_pb2.py +31 -0
  285. mediapipe/tasks/cc/components/calculators/score_calibration_calculator_pb2.py +35 -0
  286. mediapipe/tasks/cc/components/calculators/tensors_to_embeddings_calculator_pb2.py +32 -0
  287. mediapipe/tasks/cc/components/containers/__init__.py +0 -0
  288. mediapipe/tasks/cc/components/containers/proto/__init__.py +0 -0
  289. mediapipe/tasks/cc/components/containers/proto/classifications_pb2.py +30 -0
  290. mediapipe/tasks/cc/components/containers/proto/embeddings_pb2.py +35 -0
  291. mediapipe/tasks/cc/components/containers/proto/landmarks_detection_result_pb2.py +32 -0
  292. mediapipe/tasks/cc/components/processors/__init__.py +0 -0
  293. mediapipe/tasks/cc/components/processors/proto/__init__.py +0 -0
  294. mediapipe/tasks/cc/components/processors/proto/classification_postprocessing_graph_options_pb2.py +38 -0
  295. mediapipe/tasks/cc/components/processors/proto/classifier_options_pb2.py +27 -0
  296. mediapipe/tasks/cc/components/processors/proto/detection_postprocessing_graph_options_pb2.py +36 -0
  297. mediapipe/tasks/cc/components/processors/proto/detector_options_pb2.py +27 -0
  298. mediapipe/tasks/cc/components/processors/proto/embedder_options_pb2.py +27 -0
  299. mediapipe/tasks/cc/components/processors/proto/embedding_postprocessing_graph_options_pb2.py +32 -0
  300. mediapipe/tasks/cc/components/processors/proto/image_preprocessing_graph_options_pb2.py +34 -0
  301. mediapipe/tasks/cc/components/processors/proto/text_model_type_pb2.py +28 -0
  302. mediapipe/tasks/cc/components/processors/proto/text_preprocessing_graph_options_pb2.py +32 -0
  303. mediapipe/tasks/cc/components/utils/__init__.py +0 -0
  304. mediapipe/tasks/cc/core/__init__.py +0 -0
  305. mediapipe/tasks/cc/core/proto/__init__.py +0 -0
  306. mediapipe/tasks/cc/core/proto/acceleration_pb2.py +28 -0
  307. mediapipe/tasks/cc/core/proto/base_options_pb2.py +30 -0
  308. mediapipe/tasks/cc/core/proto/external_file_pb2.py +31 -0
  309. mediapipe/tasks/cc/core/proto/inference_subgraph_pb2.py +32 -0
  310. mediapipe/tasks/cc/core/proto/model_resources_calculator_pb2.py +32 -0
  311. mediapipe/tasks/cc/genai/__init__.py +0 -0
  312. mediapipe/tasks/cc/genai/inference/__init__.py +0 -0
  313. mediapipe/tasks/cc/genai/inference/c/__init__.py +0 -0
  314. mediapipe/tasks/cc/genai/inference/calculators/__init__.py +0 -0
  315. mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py +27 -0
  316. mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py +32 -0
  317. mediapipe/tasks/cc/genai/inference/calculators/model_data_calculator_pb2.py +27 -0
  318. mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py +29 -0
  319. mediapipe/tasks/cc/genai/inference/common/__init__.py +0 -0
  320. mediapipe/tasks/cc/genai/inference/proto/__init__.py +0 -0
  321. mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py +32 -0
  322. mediapipe/tasks/cc/genai/inference/proto/llm_params_pb2.py +33 -0
  323. mediapipe/tasks/cc/genai/inference/proto/prompt_template_pb2.py +27 -0
  324. mediapipe/tasks/cc/genai/inference/proto/sampler_params_pb2.py +29 -0
  325. mediapipe/tasks/cc/genai/inference/proto/transformer_params_pb2.py +45 -0
  326. mediapipe/tasks/cc/genai/inference/utils/__init__.py +0 -0
  327. mediapipe/tasks/cc/genai/inference/utils/llm_utils/__init__.py +0 -0
  328. mediapipe/tasks/cc/genai/inference/utils/xnn_utils/__init__.py +0 -0
  329. mediapipe/tasks/cc/metadata/__init__.py +0 -0
  330. mediapipe/tasks/cc/metadata/python/__init__.py +0 -0
  331. mediapipe/tasks/cc/metadata/python/_pywrap_metadata_version.cpython-310-x86_64-linux-gnu.so +0 -0
  332. mediapipe/tasks/cc/metadata/tests/__init__.py +0 -0
  333. mediapipe/tasks/cc/metadata/utils/__init__.py +0 -0
  334. mediapipe/tasks/cc/text/__init__.py +0 -0
  335. mediapipe/tasks/cc/text/custom_ops/__init__.py +0 -0
  336. mediapipe/tasks/cc/text/custom_ops/ragged/__init__.py +0 -0
  337. mediapipe/tasks/cc/text/custom_ops/sentencepiece/__init__.py +0 -0
  338. mediapipe/tasks/cc/text/custom_ops/sentencepiece/testdata/__init__.py +0 -0
  339. mediapipe/tasks/cc/text/language_detector/__init__.py +0 -0
  340. mediapipe/tasks/cc/text/language_detector/custom_ops/__init__.py +0 -0
  341. mediapipe/tasks/cc/text/language_detector/custom_ops/utils/__init__.py +0 -0
  342. mediapipe/tasks/cc/text/language_detector/custom_ops/utils/hash/__init__.py +0 -0
  343. mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/__init__.py +0 -0
  344. mediapipe/tasks/cc/text/text_classifier/__init__.py +0 -0
  345. mediapipe/tasks/cc/text/text_classifier/proto/__init__.py +0 -0
  346. mediapipe/tasks/cc/text/text_classifier/proto/text_classifier_graph_options_pb2.py +35 -0
  347. mediapipe/tasks/cc/text/text_embedder/__init__.py +0 -0
  348. mediapipe/tasks/cc/text/text_embedder/proto/__init__.py +0 -0
  349. mediapipe/tasks/cc/text/text_embedder/proto/text_embedder_graph_options_pb2.py +35 -0
  350. mediapipe/tasks/cc/text/tokenizers/__init__.py +0 -0
  351. mediapipe/tasks/cc/text/utils/__init__.py +0 -0
  352. mediapipe/tasks/cc/vision/__init__.py +0 -0
  353. mediapipe/tasks/cc/vision/core/__init__.py +0 -0
  354. mediapipe/tasks/cc/vision/custom_ops/__init__.py +0 -0
  355. mediapipe/tasks/cc/vision/face_detector/__init__.py +0 -0
  356. mediapipe/tasks/cc/vision/face_detector/proto/__init__.py +0 -0
  357. mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options_pb2.py +34 -0
  358. mediapipe/tasks/cc/vision/face_geometry/__init__.py +0 -0
  359. mediapipe/tasks/cc/vision/face_geometry/calculators/__init__.py +0 -0
  360. mediapipe/tasks/cc/vision/face_geometry/calculators/env_generator_calculator_pb2.py +28 -0
  361. mediapipe/tasks/cc/vision/face_geometry/calculators/geometry_pipeline_calculator_pb2.py +29 -0
  362. mediapipe/tasks/cc/vision/face_geometry/data/__init__.py +0 -0
  363. mediapipe/tasks/cc/vision/face_geometry/libs/__init__.py +0 -0
  364. mediapipe/tasks/cc/vision/face_geometry/proto/__init__.py +0 -0
  365. mediapipe/tasks/cc/vision/face_geometry/proto/environment_pb2.py +31 -0
  366. mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_graph_options_pb2.py +29 -0
  367. mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_pb2.py +29 -0
  368. mediapipe/tasks/cc/vision/face_geometry/proto/geometry_pipeline_metadata_pb2.py +32 -0
  369. mediapipe/tasks/cc/vision/face_geometry/proto/mesh_3d_pb2.py +31 -0
  370. mediapipe/tasks/cc/vision/face_landmarker/__init__.py +0 -0
  371. mediapipe/tasks/cc/vision/face_landmarker/proto/__init__.py +0 -0
  372. mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options_pb2.py +34 -0
  373. mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options_pb2.py +37 -0
  374. mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options_pb2.py +35 -0
  375. mediapipe/tasks/cc/vision/face_landmarker/proto/tensors_to_face_landmarks_graph_options_pb2.py +32 -0
  376. mediapipe/tasks/cc/vision/face_stylizer/__init__.py +0 -0
  377. mediapipe/tasks/cc/vision/face_stylizer/calculators/__init__.py +0 -0
  378. mediapipe/tasks/cc/vision/face_stylizer/calculators/tensors_to_image_calculator_pb2.py +36 -0
  379. mediapipe/tasks/cc/vision/face_stylizer/proto/__init__.py +0 -0
  380. mediapipe/tasks/cc/vision/face_stylizer/proto/face_stylizer_graph_options_pb2.py +35 -0
  381. mediapipe/tasks/cc/vision/gesture_recognizer/__init__.py +0 -0
  382. mediapipe/tasks/cc/vision/gesture_recognizer/calculators/__init__.py +0 -0
  383. mediapipe/tasks/cc/vision/gesture_recognizer/calculators/combined_prediction_calculator_pb2.py +33 -0
  384. mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator_pb2.py +31 -0
  385. mediapipe/tasks/cc/vision/gesture_recognizer/proto/__init__.py +0 -0
  386. mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_classifier_graph_options_pb2.py +35 -0
  387. mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_embedder_graph_options_pb2.py +34 -0
  388. mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_recognizer_graph_options_pb2.py +36 -0
  389. mediapipe/tasks/cc/vision/gesture_recognizer/proto/hand_gesture_recognizer_graph_options_pb2.py +36 -0
  390. mediapipe/tasks/cc/vision/hand_detector/__init__.py +0 -0
  391. mediapipe/tasks/cc/vision/hand_detector/proto/__init__.py +0 -0
  392. mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_graph_options_pb2.py +34 -0
  393. mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_result_pb2.py +30 -0
  394. mediapipe/tasks/cc/vision/hand_landmarker/__init__.py +0 -0
  395. mediapipe/tasks/cc/vision/hand_landmarker/calculators/__init__.py +0 -0
  396. mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator_pb2.py +31 -0
  397. mediapipe/tasks/cc/vision/hand_landmarker/proto/__init__.py +0 -0
  398. mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarker_graph_options_pb2.py +36 -0
  399. mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarks_detector_graph_options_pb2.py +34 -0
  400. mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_roi_refinement_graph_options_pb2.py +28 -0
  401. mediapipe/tasks/cc/vision/holistic_landmarker/__init__.py +0 -0
  402. mediapipe/tasks/cc/vision/holistic_landmarker/proto/__init__.py +0 -0
  403. mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options_pb2.py +34 -0
  404. mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result_pb2.py +29 -0
  405. mediapipe/tasks/cc/vision/image_classifier/__init__.py +0 -0
  406. mediapipe/tasks/cc/vision/image_classifier/proto/__init__.py +0 -0
  407. mediapipe/tasks/cc/vision/image_classifier/proto/image_classifier_graph_options_pb2.py +35 -0
  408. mediapipe/tasks/cc/vision/image_embedder/__init__.py +0 -0
  409. mediapipe/tasks/cc/vision/image_embedder/proto/__init__.py +0 -0
  410. mediapipe/tasks/cc/vision/image_embedder/proto/image_embedder_graph_options_pb2.py +35 -0
  411. mediapipe/tasks/cc/vision/image_generator/__init__.py +0 -0
  412. mediapipe/tasks/cc/vision/image_generator/diffuser/__init__.py +0 -0
  413. mediapipe/tasks/cc/vision/image_generator/diffuser/stable_diffusion_iterate_calculator_pb2.py +40 -0
  414. mediapipe/tasks/cc/vision/image_generator/proto/__init__.py +0 -0
  415. mediapipe/tasks/cc/vision/image_generator/proto/conditioned_image_graph_options_pb2.py +40 -0
  416. mediapipe/tasks/cc/vision/image_generator/proto/control_plugin_graph_options_pb2.py +34 -0
  417. mediapipe/tasks/cc/vision/image_generator/proto/image_generator_graph_options_pb2.py +30 -0
  418. mediapipe/tasks/cc/vision/image_segmenter/__init__.py +0 -0
  419. mediapipe/tasks/cc/vision/image_segmenter/calculators/__init__.py +0 -0
  420. mediapipe/tasks/cc/vision/image_segmenter/calculators/tensors_to_segmentation_calculator_pb2.py +34 -0
  421. mediapipe/tasks/cc/vision/image_segmenter/proto/__init__.py +0 -0
  422. mediapipe/tasks/cc/vision/image_segmenter/proto/image_segmenter_graph_options_pb2.py +35 -0
  423. mediapipe/tasks/cc/vision/image_segmenter/proto/segmenter_options_pb2.py +33 -0
  424. mediapipe/tasks/cc/vision/interactive_segmenter/__init__.py +0 -0
  425. mediapipe/tasks/cc/vision/object_detector/__init__.py +0 -0
  426. mediapipe/tasks/cc/vision/object_detector/proto/__init__.py +0 -0
  427. mediapipe/tasks/cc/vision/object_detector/proto/object_detector_options_pb2.py +34 -0
  428. mediapipe/tasks/cc/vision/pose_detector/__init__.py +0 -0
  429. mediapipe/tasks/cc/vision/pose_detector/proto/__init__.py +0 -0
  430. mediapipe/tasks/cc/vision/pose_detector/proto/pose_detector_graph_options_pb2.py +34 -0
  431. mediapipe/tasks/cc/vision/pose_landmarker/__init__.py +0 -0
  432. mediapipe/tasks/cc/vision/pose_landmarker/proto/__init__.py +0 -0
  433. mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarker_graph_options_pb2.py +36 -0
  434. mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarks_detector_graph_options_pb2.py +34 -0
  435. mediapipe/tasks/cc/vision/utils/__init__.py +0 -0
  436. mediapipe/tasks/cc/vision/utils/ghum/__init__.py +0 -0
  437. mediapipe/tasks/metadata/image_segmenter_metadata_schema.fbs +59 -0
  438. mediapipe/tasks/metadata/image_segmenter_metadata_schema_py_generated.py +108 -0
  439. mediapipe/tasks/metadata/metadata_schema.fbs +732 -0
  440. mediapipe/tasks/metadata/metadata_schema_py_generated.py +3251 -0
  441. mediapipe/tasks/metadata/object_detector_metadata_schema.fbs +98 -0
  442. mediapipe/tasks/metadata/object_detector_metadata_schema_py_generated.py +674 -0
  443. mediapipe/tasks/metadata/schema_py_generated.py +18438 -0
  444. mediapipe/tasks/python/__init__.py +27 -0
  445. mediapipe/tasks/python/audio/__init__.py +33 -0
  446. mediapipe/tasks/python/audio/audio_classifier.py +324 -0
  447. mediapipe/tasks/python/audio/audio_embedder.py +285 -0
  448. mediapipe/tasks/python/audio/core/__init__.py +16 -0
  449. mediapipe/tasks/python/audio/core/audio_record.py +125 -0
  450. mediapipe/tasks/python/audio/core/audio_task_running_mode.py +29 -0
  451. mediapipe/tasks/python/audio/core/base_audio_task_api.py +181 -0
  452. mediapipe/tasks/python/benchmark/__init__.py +13 -0
  453. mediapipe/tasks/python/benchmark/benchmark_utils.py +70 -0
  454. mediapipe/tasks/python/benchmark/vision/__init__.py +13 -0
  455. mediapipe/tasks/python/benchmark/vision/benchmark.py +99 -0
  456. mediapipe/tasks/python/benchmark/vision/core/__init__.py +14 -0
  457. mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py +40 -0
  458. mediapipe/tasks/python/components/__init__.py +13 -0
  459. mediapipe/tasks/python/components/containers/__init__.py +53 -0
  460. mediapipe/tasks/python/components/containers/audio_data.py +137 -0
  461. mediapipe/tasks/python/components/containers/bounding_box.py +73 -0
  462. mediapipe/tasks/python/components/containers/category.py +78 -0
  463. mediapipe/tasks/python/components/containers/classification_result.py +111 -0
  464. mediapipe/tasks/python/components/containers/detections.py +181 -0
  465. mediapipe/tasks/python/components/containers/embedding_result.py +89 -0
  466. mediapipe/tasks/python/components/containers/keypoint.py +77 -0
  467. mediapipe/tasks/python/components/containers/landmark.py +122 -0
  468. mediapipe/tasks/python/components/containers/landmark_detection_result.py +106 -0
  469. mediapipe/tasks/python/components/containers/rect.py +109 -0
  470. mediapipe/tasks/python/components/processors/__init__.py +23 -0
  471. mediapipe/tasks/python/components/processors/classifier_options.py +86 -0
  472. mediapipe/tasks/python/components/utils/__init__.py +13 -0
  473. mediapipe/tasks/python/components/utils/cosine_similarity.py +68 -0
  474. mediapipe/tasks/python/core/__init__.py +13 -0
  475. mediapipe/tasks/python/core/base_options.py +121 -0
  476. mediapipe/tasks/python/core/optional_dependencies.py +25 -0
  477. mediapipe/tasks/python/core/task_info.py +139 -0
  478. mediapipe/tasks/python/genai/__init__.py +14 -0
  479. mediapipe/tasks/python/genai/bundler/__init__.py +23 -0
  480. mediapipe/tasks/python/genai/bundler/llm_bundler.py +130 -0
  481. mediapipe/tasks/python/genai/bundler/llm_bundler_test.py +168 -0
  482. mediapipe/tasks/python/genai/converter/__init__.py +24 -0
  483. mediapipe/tasks/python/genai/converter/converter_base.py +179 -0
  484. mediapipe/tasks/python/genai/converter/converter_factory.py +79 -0
  485. mediapipe/tasks/python/genai/converter/llm_converter.py +374 -0
  486. mediapipe/tasks/python/genai/converter/llm_converter_test.py +63 -0
  487. mediapipe/tasks/python/genai/converter/pytorch_converter.py +318 -0
  488. mediapipe/tasks/python/genai/converter/pytorch_converter_test.py +86 -0
  489. mediapipe/tasks/python/genai/converter/quantization_util.py +516 -0
  490. mediapipe/tasks/python/genai/converter/quantization_util_test.py +259 -0
  491. mediapipe/tasks/python/genai/converter/safetensors_converter.py +580 -0
  492. mediapipe/tasks/python/genai/converter/safetensors_converter_test.py +83 -0
  493. mediapipe/tasks/python/genai/converter/weight_bins_writer.py +120 -0
  494. mediapipe/tasks/python/genai/converter/weight_bins_writer_test.py +95 -0
  495. mediapipe/tasks/python/metadata/__init__.py +13 -0
  496. mediapipe/tasks/python/metadata/flatbuffers_lib/_pywrap_flatbuffers.cpython-310-x86_64-linux-gnu.so +0 -0
  497. mediapipe/tasks/python/metadata/metadata.py +928 -0
  498. mediapipe/tasks/python/metadata/metadata_displayer_cli.py +34 -0
  499. mediapipe/tasks/python/metadata/metadata_writers/__init__.py +13 -0
  500. mediapipe/tasks/python/metadata/metadata_writers/face_stylizer.py +138 -0
  501. mediapipe/tasks/python/metadata/metadata_writers/image_classifier.py +71 -0
  502. mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py +170 -0
  503. mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py +1166 -0
  504. mediapipe/tasks/python/metadata/metadata_writers/metadata_writer.py +845 -0
  505. mediapipe/tasks/python/metadata/metadata_writers/model_asset_bundle_utils.py +71 -0
  506. mediapipe/tasks/python/metadata/metadata_writers/object_detector.py +331 -0
  507. mediapipe/tasks/python/metadata/metadata_writers/text_classifier.py +119 -0
  508. mediapipe/tasks/python/metadata/metadata_writers/writer_utils.py +91 -0
  509. mediapipe/tasks/python/test/__init__.py +13 -0
  510. mediapipe/tasks/python/test/audio/__init__.py +13 -0
  511. mediapipe/tasks/python/test/audio/audio_classifier_test.py +387 -0
  512. mediapipe/tasks/python/test/audio/audio_embedder_test.py +297 -0
  513. mediapipe/tasks/python/test/test_utils.py +196 -0
  514. mediapipe/tasks/python/test/text/__init__.py +13 -0
  515. mediapipe/tasks/python/test/text/language_detector_test.py +228 -0
  516. mediapipe/tasks/python/test/text/text_classifier_test.py +235 -0
  517. mediapipe/tasks/python/test/text/text_embedder_test.py +326 -0
  518. mediapipe/tasks/python/test/vision/__init__.py +13 -0
  519. mediapipe/tasks/python/test/vision/face_aligner_test.py +190 -0
  520. mediapipe/tasks/python/test/vision/face_detector_test.py +523 -0
  521. mediapipe/tasks/python/test/vision/face_landmarker_test.py +565 -0
  522. mediapipe/tasks/python/test/vision/face_stylizer_test.py +191 -0
  523. mediapipe/tasks/python/test/vision/hand_landmarker_test.py +437 -0
  524. mediapipe/tasks/python/test/vision/holistic_landmarker_test.py +544 -0
  525. mediapipe/tasks/python/test/vision/image_classifier_test.py +657 -0
  526. mediapipe/tasks/python/test/vision/image_embedder_test.py +423 -0
  527. mediapipe/tasks/python/test/vision/image_segmenter_test.py +512 -0
  528. mediapipe/tasks/python/test/vision/interactive_segmenter_test.py +341 -0
  529. mediapipe/tasks/python/test/vision/object_detector_test.py +493 -0
  530. mediapipe/tasks/python/test/vision/pose_landmarker_test.py +518 -0
  531. mediapipe/tasks/python/text/__init__.py +35 -0
  532. mediapipe/tasks/python/text/core/__init__.py +16 -0
  533. mediapipe/tasks/python/text/core/base_text_task_api.py +54 -0
  534. mediapipe/tasks/python/text/language_detector.py +220 -0
  535. mediapipe/tasks/python/text/text_classifier.py +187 -0
  536. mediapipe/tasks/python/text/text_embedder.py +188 -0
  537. mediapipe/tasks/python/vision/__init__.py +90 -0
  538. mediapipe/tasks/python/vision/core/__init__.py +14 -0
  539. mediapipe/tasks/python/vision/core/base_vision_task_api.py +226 -0
  540. mediapipe/tasks/python/vision/core/image_processing_options.py +39 -0
  541. mediapipe/tasks/python/vision/core/vision_task_running_mode.py +31 -0
  542. mediapipe/tasks/python/vision/face_aligner.py +158 -0
  543. mediapipe/tasks/python/vision/face_detector.py +332 -0
  544. mediapipe/tasks/python/vision/face_landmarker.py +3244 -0
  545. mediapipe/tasks/python/vision/face_stylizer.py +158 -0
  546. mediapipe/tasks/python/vision/gesture_recognizer.py +480 -0
  547. mediapipe/tasks/python/vision/hand_landmarker.py +504 -0
  548. mediapipe/tasks/python/vision/holistic_landmarker.py +576 -0
  549. mediapipe/tasks/python/vision/image_classifier.py +358 -0
  550. mediapipe/tasks/python/vision/image_embedder.py +362 -0
  551. mediapipe/tasks/python/vision/image_segmenter.py +433 -0
  552. mediapipe/tasks/python/vision/interactive_segmenter.py +285 -0
  553. mediapipe/tasks/python/vision/object_detector.py +389 -0
  554. mediapipe/tasks/python/vision/pose_landmarker.py +455 -0
  555. mediapipe/util/__init__.py +0 -0
  556. mediapipe/util/analytics/__init__.py +0 -0
  557. mediapipe/util/analytics/mediapipe_log_extension_pb2.py +44 -0
  558. mediapipe/util/analytics/mediapipe_logging_enums_pb2.py +37 -0
  559. mediapipe/util/audio_decoder_pb2.py +33 -0
  560. mediapipe/util/color_pb2.py +33 -0
  561. mediapipe/util/label_map_pb2.py +27 -0
  562. mediapipe/util/render_data_pb2.py +58 -0
  563. mediapipe/util/sequence/__init__.py +14 -0
  564. mediapipe/util/sequence/media_sequence.py +716 -0
  565. mediapipe/util/sequence/media_sequence_test.py +290 -0
  566. mediapipe/util/sequence/media_sequence_util.py +800 -0
  567. mediapipe/util/sequence/media_sequence_util_test.py +389 -0
  568. mediapipe/util/tracking/__init__.py +0 -0
  569. mediapipe/util/tracking/box_detector_pb2.py +39 -0
  570. mediapipe/util/tracking/box_tracker_pb2.py +32 -0
  571. mediapipe/util/tracking/camera_motion_pb2.py +31 -0
  572. mediapipe/util/tracking/flow_packager_pb2.py +60 -0
  573. mediapipe/util/tracking/frame_selection_pb2.py +35 -0
  574. mediapipe/util/tracking/frame_selection_solution_evaluator_pb2.py +28 -0
  575. mediapipe/util/tracking/motion_analysis_pb2.py +35 -0
  576. mediapipe/util/tracking/motion_estimation_pb2.py +66 -0
  577. mediapipe/util/tracking/motion_models_pb2.py +42 -0
  578. mediapipe/util/tracking/motion_saliency_pb2.py +26 -0
  579. mediapipe/util/tracking/push_pull_filtering_pb2.py +26 -0
  580. mediapipe/util/tracking/region_flow_computation_pb2.py +59 -0
  581. mediapipe/util/tracking/region_flow_pb2.py +49 -0
  582. mediapipe/util/tracking/tone_estimation_pb2.py +45 -0
  583. mediapipe/util/tracking/tone_models_pb2.py +32 -0
  584. mediapipe/util/tracking/tracked_detection_manager_config_pb2.py +26 -0
  585. mediapipe/util/tracking/tracking_pb2.py +73 -0
  586. mediapipe_nightly-0.10.21.post20241223.dist-info/LICENSE +218 -0
  587. mediapipe_nightly-0.10.21.post20241223.dist-info/METADATA +199 -0
  588. mediapipe_nightly-0.10.21.post20241223.dist-info/RECORD +593 -0
  589. mediapipe_nightly-0.10.21.post20241223.dist-info/WHEEL +5 -0
  590. mediapipe_nightly-0.10.21.post20241223.dist-info/top_level.txt +4 -0
  591. mediapipe_nightly.libs/libEGL-48f73270.so.1.1.0 +0 -0
  592. mediapipe_nightly.libs/libGLESv2-ed5eda4f.so.2.1.0 +0 -0
  593. mediapipe_nightly.libs/libGLdispatch-64b28464.so.0.0.0 +0 -0
@@ -0,0 +1,158 @@
1
+ # Copyright 2023 The MediaPipe Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """MediaPipe face stylizer task."""
15
+
16
+ import dataclasses
17
+ from typing import Optional
18
+
19
+ from mediapipe.python import packet_creator
20
+ from mediapipe.python import packet_getter
21
+ from mediapipe.python._framework_bindings import image as image_module
22
+ from mediapipe.tasks.cc.vision.face_stylizer.proto import face_stylizer_graph_options_pb2
23
+ from mediapipe.tasks.python.core import base_options as base_options_module
24
+ from mediapipe.tasks.python.core import task_info as task_info_module
25
+ from mediapipe.tasks.python.core.optional_dependencies import doc_controls
26
+ from mediapipe.tasks.python.vision.core import base_vision_task_api
27
+ from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
28
+ from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module
29
+
30
+ _BaseOptions = base_options_module.BaseOptions
31
+ _FaceStylizerGraphOptionsProto = (
32
+ face_stylizer_graph_options_pb2.FaceStylizerGraphOptions
33
+ )
34
+ _ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
35
+ _TaskInfo = task_info_module.TaskInfo
36
+
37
+ _STYLIZED_IMAGE_NAME = 'stylized_image'
38
+ _STYLIZED_IMAGE_TAG = 'STYLIZED_IMAGE'
39
+ _NORM_RECT_STREAM_NAME = 'norm_rect_in'
40
+ _NORM_RECT_TAG = 'NORM_RECT'
41
+ _IMAGE_IN_STREAM_NAME = 'image_in'
42
+ _IMAGE_OUT_STREAM_NAME = 'image_out'
43
+ _IMAGE_TAG = 'IMAGE'
44
+ _TASK_GRAPH_NAME = 'mediapipe.tasks.vision.face_stylizer.FaceStylizerGraph'
45
+ _MICRO_SECONDS_PER_MILLISECOND = 1000
46
+
47
+
48
+ @dataclasses.dataclass
49
+ class FaceStylizerOptions:
50
+ """Options for the face stylizer task.
51
+
52
+ Attributes:
53
+ base_options: Base options for the face stylizer task.
54
+ """
55
+
56
+ base_options: _BaseOptions
57
+
58
+ @doc_controls.do_not_generate_docs
59
+ def to_pb2(self) -> _FaceStylizerGraphOptionsProto:
60
+ """Generates an FaceStylizerOptions protobuf object."""
61
+ base_options_proto = self.base_options.to_pb2()
62
+ return _FaceStylizerGraphOptionsProto(base_options=base_options_proto)
63
+
64
+
65
+ class FaceStylizer(base_vision_task_api.BaseVisionTaskApi):
66
+ """Class that performs face stylization on images."""
67
+
68
+ @classmethod
69
+ def create_from_model_path(cls, model_path: str) -> 'FaceStylizer':
70
+ """Creates an `FaceStylizer` object from a TensorFlow Lite model and the default `FaceStylizerOptions`.
71
+
72
+ Note that the created `FaceStylizer` instance is in image mode, for
73
+ stylizing one face on a single image input.
74
+
75
+ Args:
76
+ model_path: Path to the model.
77
+
78
+ Returns:
79
+ `FaceStylizer` object that's created from the model file and the default
80
+ `FaceStylizerOptions`.
81
+
82
+ Raises:
83
+ ValueError: If failed to create `FaceStylizer` object from the provided
84
+ file such as invalid file path.
85
+ RuntimeError: If other types of error occurred.
86
+ """
87
+ base_options = _BaseOptions(model_asset_path=model_path)
88
+ options = FaceStylizerOptions(base_options=base_options)
89
+ return cls.create_from_options(options)
90
+
91
+ @classmethod
92
+ def create_from_options(cls, options: FaceStylizerOptions) -> 'FaceStylizer':
93
+ """Creates the `FaceStylizer` object from face stylizer options.
94
+
95
+ Args:
96
+ options: Options for the face stylizer task.
97
+
98
+ Returns:
99
+ `FaceStylizer` object that's created from `options`.
100
+
101
+ Raises:
102
+ ValueError: If failed to create `FaceStylizer` object from
103
+ `FaceStylizerOptions` such as missing the model.
104
+ RuntimeError: If other types of error occurred.
105
+ """
106
+
107
+ task_info = _TaskInfo(
108
+ task_graph=_TASK_GRAPH_NAME,
109
+ input_streams=[
110
+ ':'.join([_IMAGE_TAG, _IMAGE_IN_STREAM_NAME]),
111
+ ':'.join([_NORM_RECT_TAG, _NORM_RECT_STREAM_NAME]),
112
+ ],
113
+ output_streams=[
114
+ ':'.join([_STYLIZED_IMAGE_TAG, _STYLIZED_IMAGE_NAME]),
115
+ ':'.join([_IMAGE_TAG, _IMAGE_OUT_STREAM_NAME]),
116
+ ],
117
+ task_options=options,
118
+ )
119
+ return cls(
120
+ task_info.generate_graph_config(),
121
+ running_mode=running_mode_module.VisionTaskRunningMode.IMAGE,
122
+ )
123
+
124
+ def stylize(
125
+ self,
126
+ image: image_module.Image,
127
+ image_processing_options: Optional[_ImageProcessingOptions] = None,
128
+ ) -> image_module.Image:
129
+ """Performs face stylization on the provided MediaPipe Image.
130
+
131
+ Only use this method when the FaceStylizer is created with the image
132
+ running mode.
133
+
134
+ Args:
135
+ image: MediaPipe Image.
136
+ image_processing_options: Options for image processing.
137
+
138
+ Returns:
139
+ The stylized image of the most visible face. The stylized output image
140
+ size is the same as the model output size. None if no face is detected
141
+ on the input image.
142
+
143
+ Raises:
144
+ ValueError: If any of the input arguments is invalid.
145
+ RuntimeError: If face stylization failed to run.
146
+ """
147
+ normalized_rect = self.convert_to_normalized_rect(
148
+ image_processing_options, image
149
+ )
150
+ output_packets = self._process_image_data({
151
+ _IMAGE_IN_STREAM_NAME: packet_creator.create_image(image),
152
+ _NORM_RECT_STREAM_NAME: packet_creator.create_proto(
153
+ normalized_rect.to_pb2()
154
+ ),
155
+ })
156
+ if output_packets[_STYLIZED_IMAGE_NAME].is_empty():
157
+ return None
158
+ return packet_getter.get_image(output_packets[_STYLIZED_IMAGE_NAME])
@@ -0,0 +1,480 @@
1
+ # Copyright 2022 The MediaPipe Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """MediaPipe gesture recognizer task."""
15
+
16
+ import dataclasses
17
+ from typing import Callable, Mapping, Optional, List
18
+
19
+ from mediapipe.framework.formats import classification_pb2
20
+ from mediapipe.framework.formats import landmark_pb2
21
+ from mediapipe.python import packet_creator
22
+ from mediapipe.python import packet_getter
23
+ from mediapipe.python._framework_bindings import image as image_module
24
+ from mediapipe.python._framework_bindings import packet as packet_module
25
+ from mediapipe.tasks.cc.vision.gesture_recognizer.proto import gesture_recognizer_graph_options_pb2
26
+ from mediapipe.tasks.python.components.containers import category as category_module
27
+ from mediapipe.tasks.python.components.containers import landmark as landmark_module
28
+ from mediapipe.tasks.python.components.processors import classifier_options
29
+ from mediapipe.tasks.python.core import base_options as base_options_module
30
+ from mediapipe.tasks.python.core import task_info as task_info_module
31
+ from mediapipe.tasks.python.core.optional_dependencies import doc_controls
32
+ from mediapipe.tasks.python.vision.core import base_vision_task_api
33
+ from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
34
+ from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module
35
+
36
+ _BaseOptions = base_options_module.BaseOptions
37
+ _GestureRecognizerGraphOptionsProto = (
38
+ gesture_recognizer_graph_options_pb2.GestureRecognizerGraphOptions
39
+ )
40
+ _ClassifierOptions = classifier_options.ClassifierOptions
41
+ _RunningMode = running_mode_module.VisionTaskRunningMode
42
+ _ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
43
+ _TaskInfo = task_info_module.TaskInfo
44
+
45
+ _IMAGE_IN_STREAM_NAME = 'image_in'
46
+ _IMAGE_OUT_STREAM_NAME = 'image_out'
47
+ _IMAGE_TAG = 'IMAGE'
48
+ _NORM_RECT_STREAM_NAME = 'norm_rect_in'
49
+ _NORM_RECT_TAG = 'NORM_RECT'
50
+ _HAND_GESTURE_STREAM_NAME = 'hand_gestures'
51
+ _HAND_GESTURE_TAG = 'HAND_GESTURES'
52
+ _HANDEDNESS_STREAM_NAME = 'handedness'
53
+ _HANDEDNESS_TAG = 'HANDEDNESS'
54
+ _HAND_LANDMARKS_STREAM_NAME = 'landmarks'
55
+ _HAND_LANDMARKS_TAG = 'LANDMARKS'
56
+ _HAND_WORLD_LANDMARKS_STREAM_NAME = 'world_landmarks'
57
+ _HAND_WORLD_LANDMARKS_TAG = 'WORLD_LANDMARKS'
58
+ _TASK_GRAPH_NAME = (
59
+ 'mediapipe.tasks.vision.gesture_recognizer.GestureRecognizerGraph'
60
+ )
61
+ _MICRO_SECONDS_PER_MILLISECOND = 1000
62
+ _GESTURE_DEFAULT_INDEX = -1
63
+
64
+
65
+ @dataclasses.dataclass
66
+ class GestureRecognizerResult:
67
+ """The gesture recognition result from GestureRecognizer, where each vector element represents a single hand detected in the image.
68
+
69
+ Attributes:
70
+ gestures: Recognized hand gestures of detected hands. Note that the index of
71
+ the gesture is always -1, because the raw indices from multiple gesture
72
+ classifiers cannot consolidate to a meaningful index.
73
+ handedness: Classification of handedness.
74
+ hand_landmarks: Detected hand landmarks in normalized image coordinates.
75
+ hand_world_landmarks: Detected hand landmarks in world coordinates.
76
+ """
77
+
78
+ gestures: List[List[category_module.Category]]
79
+ handedness: List[List[category_module.Category]]
80
+ hand_landmarks: List[List[landmark_module.NormalizedLandmark]]
81
+ hand_world_landmarks: List[List[landmark_module.Landmark]]
82
+
83
+
84
+ def _build_recognition_result(
85
+ output_packets: Mapping[str, packet_module.Packet]
86
+ ) -> GestureRecognizerResult:
87
+ """Constructs a `GestureRecognizerResult` from output packets."""
88
+ gestures_proto_list = packet_getter.get_proto_list(
89
+ output_packets[_HAND_GESTURE_STREAM_NAME]
90
+ )
91
+ handedness_proto_list = packet_getter.get_proto_list(
92
+ output_packets[_HANDEDNESS_STREAM_NAME]
93
+ )
94
+ hand_landmarks_proto_list = packet_getter.get_proto_list(
95
+ output_packets[_HAND_LANDMARKS_STREAM_NAME]
96
+ )
97
+ hand_world_landmarks_proto_list = packet_getter.get_proto_list(
98
+ output_packets[_HAND_WORLD_LANDMARKS_STREAM_NAME]
99
+ )
100
+
101
+ gesture_results = []
102
+ for proto in gestures_proto_list:
103
+ gesture_categories = []
104
+ gesture_classifications = classification_pb2.ClassificationList()
105
+ gesture_classifications.MergeFrom(proto)
106
+ for gesture in gesture_classifications.classification:
107
+ gesture_categories.append(
108
+ category_module.Category(
109
+ index=_GESTURE_DEFAULT_INDEX,
110
+ score=gesture.score,
111
+ display_name=gesture.display_name,
112
+ category_name=gesture.label,
113
+ )
114
+ )
115
+ gesture_results.append(gesture_categories)
116
+
117
+ handedness_results = []
118
+ for proto in handedness_proto_list:
119
+ handedness_categories = []
120
+ handedness_classifications = classification_pb2.ClassificationList()
121
+ handedness_classifications.MergeFrom(proto)
122
+ for handedness in handedness_classifications.classification:
123
+ handedness_categories.append(
124
+ category_module.Category(
125
+ index=handedness.index,
126
+ score=handedness.score,
127
+ display_name=handedness.display_name,
128
+ category_name=handedness.label,
129
+ )
130
+ )
131
+ handedness_results.append(handedness_categories)
132
+
133
+ hand_landmarks_results = []
134
+ for proto in hand_landmarks_proto_list:
135
+ hand_landmarks = landmark_pb2.NormalizedLandmarkList()
136
+ hand_landmarks.MergeFrom(proto)
137
+ hand_landmarks_list = []
138
+ for hand_landmark in hand_landmarks.landmark:
139
+ hand_landmarks_list.append(
140
+ landmark_module.NormalizedLandmark.create_from_pb2(hand_landmark)
141
+ )
142
+ hand_landmarks_results.append(hand_landmarks_list)
143
+
144
+ hand_world_landmarks_results = []
145
+ for proto in hand_world_landmarks_proto_list:
146
+ hand_world_landmarks = landmark_pb2.LandmarkList()
147
+ hand_world_landmarks.MergeFrom(proto)
148
+ hand_world_landmarks_list = []
149
+ for hand_world_landmark in hand_world_landmarks.landmark:
150
+ hand_world_landmarks_list.append(
151
+ landmark_module.Landmark.create_from_pb2(hand_world_landmark)
152
+ )
153
+ hand_world_landmarks_results.append(hand_world_landmarks_list)
154
+
155
+ return GestureRecognizerResult(
156
+ gesture_results,
157
+ handedness_results,
158
+ hand_landmarks_results,
159
+ hand_world_landmarks_results,
160
+ )
161
+
162
+
163
+ @dataclasses.dataclass
164
+ class GestureRecognizerOptions:
165
+ """Options for the gesture recognizer task.
166
+
167
+ Attributes:
168
+ base_options: Base options for the hand gesture recognizer task.
169
+ running_mode: The running mode of the task. Default to the image mode.
170
+ Gesture recognizer task has three running modes: 1) The image mode for
171
+ recognizing hand gestures on single image inputs. 2) The video mode for
172
+ recognizing hand gestures on the decoded frames of a video. 3) The live
173
+ stream mode for recognizing hand gestures on a live stream of input data,
174
+ such as from camera.
175
+ num_hands: The maximum number of hands can be detected by the recognizer.
176
+ min_hand_detection_confidence: The minimum confidence score for the hand
177
+ detection to be considered successful.
178
+ min_hand_presence_confidence: The minimum confidence score of hand presence
179
+ score in the hand landmark detection.
180
+ min_tracking_confidence: The minimum confidence score for the hand tracking
181
+ to be considered successful.
182
+ canned_gesture_classifier_options: Options for configuring the canned
183
+ gestures classifier, such as score threshold, allow list and deny list of
184
+ gestures. The categories for canned gesture classifiers are: ["None",
185
+ "Closed_Fist", "Open_Palm", "Pointing_Up", "Thumb_Down", "Thumb_Up",
186
+ "Victory", "ILoveYou"]. Note this option is subject to change.
187
+ custom_gesture_classifier_options: Options for configuring the custom
188
+ gestures classifier, such as score threshold, allow list and deny list of
189
+ gestures. Note this option is subject to change.
190
+ result_callback: The user-defined result callback for processing live stream
191
+ data. The result callback should only be specified when the running mode
192
+ is set to the live stream mode.
193
+ """
194
+
195
+ base_options: _BaseOptions
196
+ running_mode: _RunningMode = _RunningMode.IMAGE
197
+ num_hands: int = 1
198
+ min_hand_detection_confidence: float = 0.5
199
+ min_hand_presence_confidence: float = 0.5
200
+ min_tracking_confidence: float = 0.5
201
+ canned_gesture_classifier_options: _ClassifierOptions = dataclasses.field(
202
+ default_factory=_ClassifierOptions
203
+ )
204
+ custom_gesture_classifier_options: _ClassifierOptions = dataclasses.field(
205
+ default_factory=_ClassifierOptions
206
+ )
207
+ result_callback: Optional[
208
+ Callable[[GestureRecognizerResult, image_module.Image, int], None]
209
+ ] = None
210
+
211
+ @doc_controls.do_not_generate_docs
212
+ def to_pb2(self) -> _GestureRecognizerGraphOptionsProto:
213
+ """Generates an GestureRecognizerOptions protobuf object."""
214
+ base_options_proto = self.base_options.to_pb2()
215
+ base_options_proto.use_stream_mode = (
216
+ False if self.running_mode == _RunningMode.IMAGE else True
217
+ )
218
+
219
+ # Initialize gesture recognizer options from base options.
220
+ gesture_recognizer_options_proto = _GestureRecognizerGraphOptionsProto(
221
+ base_options=base_options_proto
222
+ )
223
+ # Configure hand detector and hand landmarker options.
224
+ hand_landmarker_options_proto = (
225
+ gesture_recognizer_options_proto.hand_landmarker_graph_options
226
+ )
227
+ hand_landmarker_options_proto.min_tracking_confidence = (
228
+ self.min_tracking_confidence
229
+ )
230
+ hand_landmarker_options_proto.hand_detector_graph_options.num_hands = (
231
+ self.num_hands
232
+ )
233
+ hand_landmarker_options_proto.hand_detector_graph_options.min_detection_confidence = (
234
+ self.min_hand_detection_confidence
235
+ )
236
+ hand_landmarker_options_proto.hand_landmarks_detector_graph_options.min_detection_confidence = (
237
+ self.min_hand_presence_confidence
238
+ )
239
+
240
+ # Configure hand gesture recognizer options.
241
+ hand_gesture_recognizer_options_proto = (
242
+ gesture_recognizer_options_proto.hand_gesture_recognizer_graph_options
243
+ )
244
+ hand_gesture_recognizer_options_proto.canned_gesture_classifier_graph_options.classifier_options.CopyFrom(
245
+ self.canned_gesture_classifier_options.to_pb2()
246
+ )
247
+ hand_gesture_recognizer_options_proto.custom_gesture_classifier_graph_options.classifier_options.CopyFrom(
248
+ self.custom_gesture_classifier_options.to_pb2()
249
+ )
250
+
251
+ return gesture_recognizer_options_proto
252
+
253
+
254
+ class GestureRecognizer(base_vision_task_api.BaseVisionTaskApi):
255
+ """Class that performs gesture recognition on images."""
256
+
257
+ @classmethod
258
+ def create_from_model_path(cls, model_path: str) -> 'GestureRecognizer':
259
+ """Creates an `GestureRecognizer` object from a TensorFlow Lite model and the default `GestureRecognizerOptions`.
260
+
261
+ Note that the created `GestureRecognizer` instance is in image mode, for
262
+ recognizing hand gestures on single image inputs.
263
+
264
+ Args:
265
+ model_path: Path to the model.
266
+
267
+ Returns:
268
+ `GestureRecognizer` object that's created from the model file and the
269
+ default `GestureRecognizerOptions`.
270
+
271
+ Raises:
272
+ ValueError: If failed to create `GestureRecognizer` object from the
273
+ provided file such as invalid file path.
274
+ RuntimeError: If other types of error occurred.
275
+ """
276
+ base_options = _BaseOptions(model_asset_path=model_path)
277
+ options = GestureRecognizerOptions(
278
+ base_options=base_options, running_mode=_RunningMode.IMAGE
279
+ )
280
+ return cls.create_from_options(options)
281
+
282
+ @classmethod
283
+ def create_from_options(
284
+ cls, options: GestureRecognizerOptions
285
+ ) -> 'GestureRecognizer':
286
+ """Creates the `GestureRecognizer` object from gesture recognizer options.
287
+
288
+ Args:
289
+ options: Options for the gesture recognizer task.
290
+
291
+ Returns:
292
+ `GestureRecognizer` object that's created from `options`.
293
+
294
+ Raises:
295
+ ValueError: If failed to create `GestureRecognizer` object from
296
+ `GestureRecognizerOptions` such as missing the model.
297
+ RuntimeError: If other types of error occurred.
298
+ """
299
+
300
+ def packets_callback(output_packets: Mapping[str, packet_module.Packet]):
301
+ if output_packets[_IMAGE_OUT_STREAM_NAME].is_empty():
302
+ return
303
+
304
+ image = packet_getter.get_image(output_packets[_IMAGE_OUT_STREAM_NAME])
305
+
306
+ if output_packets[_HAND_GESTURE_STREAM_NAME].is_empty():
307
+ empty_packet = output_packets[_HAND_GESTURE_STREAM_NAME]
308
+ options.result_callback(
309
+ GestureRecognizerResult([], [], [], []),
310
+ image,
311
+ empty_packet.timestamp.value // _MICRO_SECONDS_PER_MILLISECOND,
312
+ )
313
+ return
314
+
315
+ gesture_recognizer_result = _build_recognition_result(output_packets)
316
+ timestamp = output_packets[_HAND_GESTURE_STREAM_NAME].timestamp
317
+ options.result_callback(
318
+ gesture_recognizer_result,
319
+ image,
320
+ timestamp.value // _MICRO_SECONDS_PER_MILLISECOND,
321
+ )
322
+
323
+ task_info = _TaskInfo(
324
+ task_graph=_TASK_GRAPH_NAME,
325
+ input_streams=[
326
+ ':'.join([_IMAGE_TAG, _IMAGE_IN_STREAM_NAME]),
327
+ ':'.join([_NORM_RECT_TAG, _NORM_RECT_STREAM_NAME]),
328
+ ],
329
+ output_streams=[
330
+ ':'.join([_HAND_GESTURE_TAG, _HAND_GESTURE_STREAM_NAME]),
331
+ ':'.join([_HANDEDNESS_TAG, _HANDEDNESS_STREAM_NAME]),
332
+ ':'.join([_HAND_LANDMARKS_TAG, _HAND_LANDMARKS_STREAM_NAME]),
333
+ ':'.join(
334
+ [_HAND_WORLD_LANDMARKS_TAG, _HAND_WORLD_LANDMARKS_STREAM_NAME]
335
+ ),
336
+ ':'.join([_IMAGE_TAG, _IMAGE_OUT_STREAM_NAME]),
337
+ ],
338
+ task_options=options,
339
+ )
340
+ return cls(
341
+ task_info.generate_graph_config(
342
+ enable_flow_limiting=options.running_mode
343
+ == _RunningMode.LIVE_STREAM
344
+ ),
345
+ options.running_mode,
346
+ packets_callback if options.result_callback else None,
347
+ )
348
+
349
+ def recognize(
350
+ self,
351
+ image: image_module.Image,
352
+ image_processing_options: Optional[_ImageProcessingOptions] = None,
353
+ ) -> GestureRecognizerResult:
354
+ """Performs hand gesture recognition on the given image.
355
+
356
+ Only use this method when the GestureRecognizer is created with the image
357
+ running mode.
358
+
359
+ The image can be of any size with format RGB or RGBA.
360
+ TODO: Describes how the input image will be preprocessed after the yuv
361
+ support is implemented.
362
+
363
+ Args:
364
+ image: MediaPipe Image.
365
+ image_processing_options: Options for image processing.
366
+
367
+ Returns:
368
+ The hand gesture recognition results.
369
+
370
+ Raises:
371
+ ValueError: If any of the input arguments is invalid.
372
+ RuntimeError: If gesture recognition failed to run.
373
+ """
374
+ normalized_rect = self.convert_to_normalized_rect(
375
+ image_processing_options, image, roi_allowed=False
376
+ )
377
+ output_packets = self._process_image_data({
378
+ _IMAGE_IN_STREAM_NAME: packet_creator.create_image(image),
379
+ _NORM_RECT_STREAM_NAME: packet_creator.create_proto(
380
+ normalized_rect.to_pb2()
381
+ ),
382
+ })
383
+
384
+ if output_packets[_HAND_GESTURE_STREAM_NAME].is_empty():
385
+ return GestureRecognizerResult([], [], [], [])
386
+
387
+ return _build_recognition_result(output_packets)
388
+
389
+ def recognize_for_video(
390
+ self,
391
+ image: image_module.Image,
392
+ timestamp_ms: int,
393
+ image_processing_options: Optional[_ImageProcessingOptions] = None,
394
+ ) -> GestureRecognizerResult:
395
+ """Performs gesture recognition on the provided video frame.
396
+
397
+ Only use this method when the GestureRecognizer is created with the video
398
+ running mode.
399
+
400
+ Only use this method when the GestureRecognizer is created with the video
401
+ running mode. It's required to provide the video frame's timestamp (in
402
+ milliseconds) along with the video frame. The input timestamps should be
403
+ monotonically increasing for adjacent calls of this method.
404
+
405
+ Args:
406
+ image: MediaPipe Image.
407
+ timestamp_ms: The timestamp of the input video frame in milliseconds.
408
+ image_processing_options: Options for image processing.
409
+
410
+ Returns:
411
+ The hand gesture recognition results.
412
+
413
+ Raises:
414
+ ValueError: If any of the input arguments is invalid.
415
+ RuntimeError: If gesture recognition failed to run.
416
+ """
417
+ normalized_rect = self.convert_to_normalized_rect(
418
+ image_processing_options, image, roi_allowed=False
419
+ )
420
+ output_packets = self._process_video_data({
421
+ _IMAGE_IN_STREAM_NAME: packet_creator.create_image(image).at(
422
+ timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND
423
+ ),
424
+ _NORM_RECT_STREAM_NAME: packet_creator.create_proto(
425
+ normalized_rect.to_pb2()
426
+ ).at(timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND),
427
+ })
428
+
429
+ if output_packets[_HAND_GESTURE_STREAM_NAME].is_empty():
430
+ return GestureRecognizerResult([], [], [], [])
431
+
432
+ return _build_recognition_result(output_packets)
433
+
434
+ def recognize_async(
435
+ self,
436
+ image: image_module.Image,
437
+ timestamp_ms: int,
438
+ image_processing_options: Optional[_ImageProcessingOptions] = None,
439
+ ) -> None:
440
+ """Sends live image data to perform gesture recognition.
441
+
442
+ The results will be available via the "result_callback" provided in the
443
+ GestureRecognizerOptions. Only use this method when the GestureRecognizer
444
+ is created with the live stream running mode.
445
+
446
+ Only use this method when the GestureRecognizer is created with the live
447
+ stream running mode. The input timestamps should be monotonically increasing
448
+ for adjacent calls of this method. This method will return immediately after
449
+ the input image is accepted. The results will be available via the
450
+ `result_callback` provided in the `GestureRecognizerOptions`. The
451
+ `recognize_async` method is designed to process live stream data such as
452
+ camera input. To lower the overall latency, gesture recognizer may drop the
453
+ input images if needed. In other words, it's not guaranteed to have output
454
+ per input image.
455
+
456
+ The `result_callback` provides:
457
+ - The hand gesture recognition results.
458
+ - The input image that the gesture recognizer runs on.
459
+ - The input timestamp in milliseconds.
460
+
461
+ Args:
462
+ image: MediaPipe Image.
463
+ timestamp_ms: The timestamp of the input image in milliseconds.
464
+ image_processing_options: Options for image processing.
465
+
466
+ Raises:
467
+ ValueError: If the current input timestamp is smaller than what the
468
+ gesture recognizer has already processed.
469
+ """
470
+ normalized_rect = self.convert_to_normalized_rect(
471
+ image_processing_options, image, roi_allowed=False
472
+ )
473
+ self._send_live_stream_data({
474
+ _IMAGE_IN_STREAM_NAME: packet_creator.create_image(image).at(
475
+ timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND
476
+ ),
477
+ _NORM_RECT_STREAM_NAME: packet_creator.create_proto(
478
+ normalized_rect.to_pb2()
479
+ ).at(timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND),
480
+ })