mediapipe-nightly 0.10.21.post20241223__cp310-cp310-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (593) hide show
  1. mediapipe/__init__.py +26 -0
  2. mediapipe/calculators/__init__.py +0 -0
  3. mediapipe/calculators/audio/__init__.py +0 -0
  4. mediapipe/calculators/audio/mfcc_mel_calculators_pb2.py +33 -0
  5. mediapipe/calculators/audio/rational_factor_resample_calculator_pb2.py +33 -0
  6. mediapipe/calculators/audio/spectrogram_calculator_pb2.py +37 -0
  7. mediapipe/calculators/audio/stabilized_log_calculator_pb2.py +31 -0
  8. mediapipe/calculators/audio/time_series_framer_calculator_pb2.py +33 -0
  9. mediapipe/calculators/core/__init__.py +0 -0
  10. mediapipe/calculators/core/bypass_calculator_pb2.py +31 -0
  11. mediapipe/calculators/core/clip_vector_size_calculator_pb2.py +31 -0
  12. mediapipe/calculators/core/concatenate_vector_calculator_pb2.py +31 -0
  13. mediapipe/calculators/core/constant_side_packet_calculator_pb2.py +39 -0
  14. mediapipe/calculators/core/dequantize_byte_array_calculator_pb2.py +31 -0
  15. mediapipe/calculators/core/flow_limiter_calculator_pb2.py +32 -0
  16. mediapipe/calculators/core/gate_calculator_pb2.py +33 -0
  17. mediapipe/calculators/core/get_vector_item_calculator_pb2.py +31 -0
  18. mediapipe/calculators/core/graph_profile_calculator_pb2.py +31 -0
  19. mediapipe/calculators/core/packet_cloner_calculator_pb2.py +31 -0
  20. mediapipe/calculators/core/packet_resampler_calculator_pb2.py +33 -0
  21. mediapipe/calculators/core/packet_thinner_calculator_pb2.py +33 -0
  22. mediapipe/calculators/core/quantize_float_vector_calculator_pb2.py +31 -0
  23. mediapipe/calculators/core/sequence_shift_calculator_pb2.py +31 -0
  24. mediapipe/calculators/core/split_vector_calculator_pb2.py +33 -0
  25. mediapipe/calculators/image/__init__.py +0 -0
  26. mediapipe/calculators/image/bilateral_filter_calculator_pb2.py +31 -0
  27. mediapipe/calculators/image/feature_detector_calculator_pb2.py +31 -0
  28. mediapipe/calculators/image/image_clone_calculator_pb2.py +31 -0
  29. mediapipe/calculators/image/image_cropping_calculator_pb2.py +33 -0
  30. mediapipe/calculators/image/image_transformation_calculator_pb2.py +38 -0
  31. mediapipe/calculators/image/mask_overlay_calculator_pb2.py +33 -0
  32. mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator_pb2.py +31 -0
  33. mediapipe/calculators/image/opencv_image_encoder_calculator_pb2.py +35 -0
  34. mediapipe/calculators/image/recolor_calculator_pb2.py +34 -0
  35. mediapipe/calculators/image/rotation_mode_pb2.py +29 -0
  36. mediapipe/calculators/image/scale_image_calculator_pb2.py +34 -0
  37. mediapipe/calculators/image/segmentation_smoothing_calculator_pb2.py +31 -0
  38. mediapipe/calculators/image/set_alpha_calculator_pb2.py +31 -0
  39. mediapipe/calculators/image/warp_affine_calculator_pb2.py +36 -0
  40. mediapipe/calculators/internal/__init__.py +0 -0
  41. mediapipe/calculators/internal/callback_packet_calculator_pb2.py +33 -0
  42. mediapipe/calculators/tensor/__init__.py +0 -0
  43. mediapipe/calculators/tensor/audio_to_tensor_calculator_pb2.py +35 -0
  44. mediapipe/calculators/tensor/bert_preprocessor_calculator_pb2.py +31 -0
  45. mediapipe/calculators/tensor/feedback_tensors_calculator_pb2.py +37 -0
  46. mediapipe/calculators/tensor/image_to_tensor_calculator_pb2.py +40 -0
  47. mediapipe/calculators/tensor/inference_calculator_pb2.py +63 -0
  48. mediapipe/calculators/tensor/landmarks_to_tensor_calculator_pb2.py +33 -0
  49. mediapipe/calculators/tensor/regex_preprocessor_calculator_pb2.py +31 -0
  50. mediapipe/calculators/tensor/tensor_converter_calculator_pb2.py +34 -0
  51. mediapipe/calculators/tensor/tensor_to_joints_calculator_pb2.py +31 -0
  52. mediapipe/calculators/tensor/tensors_readback_calculator_pb2.py +35 -0
  53. mediapipe/calculators/tensor/tensors_to_audio_calculator_pb2.py +33 -0
  54. mediapipe/calculators/tensor/tensors_to_classification_calculator_pb2.py +44 -0
  55. mediapipe/calculators/tensor/tensors_to_detections_calculator_pb2.py +39 -0
  56. mediapipe/calculators/tensor/tensors_to_floats_calculator_pb2.py +33 -0
  57. mediapipe/calculators/tensor/tensors_to_landmarks_calculator_pb2.py +33 -0
  58. mediapipe/calculators/tensor/tensors_to_segmentation_calculator_pb2.py +34 -0
  59. mediapipe/calculators/tensor/vector_to_tensor_calculator_pb2.py +27 -0
  60. mediapipe/calculators/tflite/__init__.py +0 -0
  61. mediapipe/calculators/tflite/ssd_anchors_calculator_pb2.py +32 -0
  62. mediapipe/calculators/tflite/tflite_converter_calculator_pb2.py +33 -0
  63. mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator_pb2.py +31 -0
  64. mediapipe/calculators/tflite/tflite_inference_calculator_pb2.py +49 -0
  65. mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_pb2.py +31 -0
  66. mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator_pb2.py +31 -0
  67. mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator_pb2.py +33 -0
  68. mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator_pb2.py +31 -0
  69. mediapipe/calculators/util/__init__.py +0 -0
  70. mediapipe/calculators/util/align_hand_to_pose_in_world_calculator_pb2.py +31 -0
  71. mediapipe/calculators/util/annotation_overlay_calculator_pb2.py +32 -0
  72. mediapipe/calculators/util/association_calculator_pb2.py +31 -0
  73. mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py +31 -0
  74. mediapipe/calculators/util/combine_joints_calculator_pb2.py +36 -0
  75. mediapipe/calculators/util/detection_label_id_to_text_calculator_pb2.py +36 -0
  76. mediapipe/calculators/util/detections_to_rects_calculator_pb2.py +33 -0
  77. mediapipe/calculators/util/detections_to_render_data_calculator_pb2.py +33 -0
  78. mediapipe/calculators/util/face_to_rect_calculator_pb2.py +26 -0
  79. mediapipe/calculators/util/filter_detections_calculator_pb2.py +31 -0
  80. mediapipe/calculators/util/flat_color_image_calculator_pb2.py +32 -0
  81. mediapipe/calculators/util/labels_to_render_data_calculator_pb2.py +34 -0
  82. mediapipe/calculators/util/landmark_projection_calculator_pb2.py +31 -0
  83. mediapipe/calculators/util/landmarks_refinement_calculator_pb2.py +41 -0
  84. mediapipe/calculators/util/landmarks_smoothing_calculator_pb2.py +33 -0
  85. mediapipe/calculators/util/landmarks_to_detection_calculator_pb2.py +31 -0
  86. mediapipe/calculators/util/landmarks_to_floats_calculator_pb2.py +31 -0
  87. mediapipe/calculators/util/landmarks_to_render_data_calculator_pb2.py +32 -0
  88. mediapipe/calculators/util/landmarks_transformation_calculator_pb2.py +37 -0
  89. mediapipe/calculators/util/latency_pb2.py +26 -0
  90. mediapipe/calculators/util/local_file_contents_calculator_pb2.py +31 -0
  91. mediapipe/calculators/util/logic_calculator_pb2.py +34 -0
  92. mediapipe/calculators/util/non_max_suppression_calculator_pb2.py +35 -0
  93. mediapipe/calculators/util/packet_frequency_calculator_pb2.py +31 -0
  94. mediapipe/calculators/util/packet_frequency_pb2.py +26 -0
  95. mediapipe/calculators/util/packet_latency_calculator_pb2.py +31 -0
  96. mediapipe/calculators/util/rect_to_render_data_calculator_pb2.py +32 -0
  97. mediapipe/calculators/util/rect_to_render_scale_calculator_pb2.py +31 -0
  98. mediapipe/calculators/util/rect_transformation_calculator_pb2.py +31 -0
  99. mediapipe/calculators/util/refine_landmarks_from_heatmap_calculator_pb2.py +31 -0
  100. mediapipe/calculators/util/resource_provider_calculator_pb2.py +28 -0
  101. mediapipe/calculators/util/set_joints_visibility_calculator_pb2.py +41 -0
  102. mediapipe/calculators/util/thresholding_calculator_pb2.py +31 -0
  103. mediapipe/calculators/util/timed_box_list_id_to_label_calculator_pb2.py +31 -0
  104. mediapipe/calculators/util/timed_box_list_to_render_data_calculator_pb2.py +32 -0
  105. mediapipe/calculators/util/top_k_scores_calculator_pb2.py +31 -0
  106. mediapipe/calculators/util/visibility_copy_calculator_pb2.py +27 -0
  107. mediapipe/calculators/util/visibility_smoothing_calculator_pb2.py +31 -0
  108. mediapipe/calculators/video/__init__.py +0 -0
  109. mediapipe/calculators/video/box_detector_calculator_pb2.py +32 -0
  110. mediapipe/calculators/video/box_tracker_calculator_pb2.py +32 -0
  111. mediapipe/calculators/video/flow_packager_calculator_pb2.py +32 -0
  112. mediapipe/calculators/video/flow_to_image_calculator_pb2.py +31 -0
  113. mediapipe/calculators/video/motion_analysis_calculator_pb2.py +42 -0
  114. mediapipe/calculators/video/opencv_video_encoder_calculator_pb2.py +31 -0
  115. mediapipe/calculators/video/tool/__init__.py +0 -0
  116. mediapipe/calculators/video/tool/flow_quantizer_model_pb2.py +26 -0
  117. mediapipe/calculators/video/tracked_detection_manager_calculator_pb2.py +32 -0
  118. mediapipe/calculators/video/video_pre_stream_calculator_pb2.py +35 -0
  119. mediapipe/examples/__init__.py +14 -0
  120. mediapipe/examples/desktop/__init__.py +14 -0
  121. mediapipe/framework/__init__.py +0 -0
  122. mediapipe/framework/calculator_options_pb2.py +29 -0
  123. mediapipe/framework/calculator_pb2.py +59 -0
  124. mediapipe/framework/calculator_profile_pb2.py +48 -0
  125. mediapipe/framework/deps/__init__.py +0 -0
  126. mediapipe/framework/deps/proto_descriptor_pb2.py +29 -0
  127. mediapipe/framework/formats/__init__.py +0 -0
  128. mediapipe/framework/formats/affine_transform_data_pb2.py +28 -0
  129. mediapipe/framework/formats/annotation/__init__.py +0 -0
  130. mediapipe/framework/formats/annotation/locus_pb2.py +32 -0
  131. mediapipe/framework/formats/annotation/rasterization_pb2.py +29 -0
  132. mediapipe/framework/formats/body_rig_pb2.py +28 -0
  133. mediapipe/framework/formats/classification_pb2.py +31 -0
  134. mediapipe/framework/formats/detection_pb2.py +36 -0
  135. mediapipe/framework/formats/image_file_properties_pb2.py +26 -0
  136. mediapipe/framework/formats/image_format_pb2.py +29 -0
  137. mediapipe/framework/formats/landmark_pb2.py +37 -0
  138. mediapipe/framework/formats/location_data_pb2.py +38 -0
  139. mediapipe/framework/formats/matrix_data_pb2.py +31 -0
  140. mediapipe/framework/formats/motion/__init__.py +0 -0
  141. mediapipe/framework/formats/motion/optical_flow_field_data_pb2.py +30 -0
  142. mediapipe/framework/formats/object_detection/__init__.py +0 -0
  143. mediapipe/framework/formats/object_detection/anchor_pb2.py +26 -0
  144. mediapipe/framework/formats/rect_pb2.py +29 -0
  145. mediapipe/framework/formats/time_series_header_pb2.py +28 -0
  146. mediapipe/framework/graph_runtime_info_pb2.py +31 -0
  147. mediapipe/framework/mediapipe_options_pb2.py +27 -0
  148. mediapipe/framework/packet_factory_pb2.py +31 -0
  149. mediapipe/framework/packet_generator_pb2.py +33 -0
  150. mediapipe/framework/status_handler_pb2.py +28 -0
  151. mediapipe/framework/stream_handler/__init__.py +0 -0
  152. mediapipe/framework/stream_handler/default_input_stream_handler_pb2.py +27 -0
  153. mediapipe/framework/stream_handler/fixed_size_input_stream_handler_pb2.py +27 -0
  154. mediapipe/framework/stream_handler/sync_set_input_stream_handler_pb2.py +29 -0
  155. mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_pb2.py +27 -0
  156. mediapipe/framework/stream_handler_pb2.py +30 -0
  157. mediapipe/framework/test_calculators_pb2.py +31 -0
  158. mediapipe/framework/thread_pool_executor_pb2.py +29 -0
  159. mediapipe/framework/tool/__init__.py +0 -0
  160. mediapipe/framework/tool/calculator_graph_template_pb2.py +44 -0
  161. mediapipe/framework/tool/field_data_pb2.py +28 -0
  162. mediapipe/framework/tool/node_chain_subgraph_pb2.py +31 -0
  163. mediapipe/framework/tool/packet_generator_wrapper_calculator_pb2.py +28 -0
  164. mediapipe/framework/tool/source_pb2.py +33 -0
  165. mediapipe/framework/tool/switch_container_pb2.py +32 -0
  166. mediapipe/gpu/__init__.py +0 -0
  167. mediapipe/gpu/copy_calculator_pb2.py +33 -0
  168. mediapipe/gpu/gl_animation_overlay_calculator_pb2.py +31 -0
  169. mediapipe/gpu/gl_context_options_pb2.py +31 -0
  170. mediapipe/gpu/gl_scaler_calculator_pb2.py +32 -0
  171. mediapipe/gpu/gl_surface_sink_calculator_pb2.py +32 -0
  172. mediapipe/gpu/gpu_origin_pb2.py +29 -0
  173. mediapipe/gpu/scale_mode_pb2.py +28 -0
  174. mediapipe/model_maker/__init__.py +27 -0
  175. mediapipe/model_maker/setup.py +107 -0
  176. mediapipe/modules/__init__.py +0 -0
  177. mediapipe/modules/face_detection/__init__.py +0 -0
  178. mediapipe/modules/face_detection/face_detection_full_range_cpu.binarypb +0 -0
  179. mediapipe/modules/face_detection/face_detection_full_range_sparse.tflite +0 -0
  180. mediapipe/modules/face_detection/face_detection_pb2.py +30 -0
  181. mediapipe/modules/face_detection/face_detection_short_range.tflite +0 -0
  182. mediapipe/modules/face_detection/face_detection_short_range_cpu.binarypb +0 -0
  183. mediapipe/modules/face_geometry/__init__.py +0 -0
  184. mediapipe/modules/face_geometry/data/__init__.py +0 -0
  185. mediapipe/modules/face_geometry/effect_renderer_calculator_pb2.py +27 -0
  186. mediapipe/modules/face_geometry/env_generator_calculator_pb2.py +28 -0
  187. mediapipe/modules/face_geometry/geometry_pipeline_calculator_pb2.py +27 -0
  188. mediapipe/modules/face_geometry/libs/__init__.py +0 -0
  189. mediapipe/modules/face_geometry/protos/__init__.py +0 -0
  190. mediapipe/modules/face_geometry/protos/environment_pb2.py +31 -0
  191. mediapipe/modules/face_geometry/protos/face_geometry_pb2.py +29 -0
  192. mediapipe/modules/face_geometry/protos/geometry_pipeline_metadata_pb2.py +32 -0
  193. mediapipe/modules/face_geometry/protos/mesh_3d_pb2.py +31 -0
  194. mediapipe/modules/face_landmark/__init__.py +0 -0
  195. mediapipe/modules/face_landmark/face_landmark.tflite +0 -0
  196. mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb +0 -0
  197. mediapipe/modules/face_landmark/face_landmark_with_attention.tflite +0 -0
  198. mediapipe/modules/hand_landmark/__init__.py +0 -0
  199. mediapipe/modules/hand_landmark/calculators/__init__.py +0 -0
  200. mediapipe/modules/hand_landmark/hand_landmark_full.tflite +0 -0
  201. mediapipe/modules/hand_landmark/hand_landmark_lite.tflite +0 -0
  202. mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb +0 -0
  203. mediapipe/modules/hand_landmark/handedness.txt +2 -0
  204. mediapipe/modules/holistic_landmark/__init__.py +0 -0
  205. mediapipe/modules/holistic_landmark/calculators/__init__.py +0 -0
  206. mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator_pb2.py +37 -0
  207. mediapipe/modules/holistic_landmark/hand_recrop.tflite +0 -0
  208. mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb +0 -0
  209. mediapipe/modules/iris_landmark/__init__.py +0 -0
  210. mediapipe/modules/iris_landmark/iris_landmark.tflite +0 -0
  211. mediapipe/modules/objectron/__init__.py +0 -0
  212. mediapipe/modules/objectron/calculators/__init__.py +0 -0
  213. mediapipe/modules/objectron/calculators/a_r_capture_metadata_pb2.py +102 -0
  214. mediapipe/modules/objectron/calculators/annotation_data_pb2.py +38 -0
  215. mediapipe/modules/objectron/calculators/belief_decoder_config_pb2.py +28 -0
  216. mediapipe/modules/objectron/calculators/camera_parameters_pb2.py +30 -0
  217. mediapipe/modules/objectron/calculators/filter_detection_calculator_pb2.py +35 -0
  218. mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator_pb2.py +31 -0
  219. mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator_pb2.py +31 -0
  220. mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator_pb2.py +32 -0
  221. mediapipe/modules/objectron/calculators/object_pb2.py +38 -0
  222. mediapipe/modules/objectron/calculators/tensors_to_objects_calculator_pb2.py +32 -0
  223. mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator_pb2.py +32 -0
  224. mediapipe/modules/objectron/object_detection_oidv4_labelmap.txt +24 -0
  225. mediapipe/modules/objectron/objectron_cpu.binarypb +0 -0
  226. mediapipe/modules/palm_detection/__init__.py +0 -0
  227. mediapipe/modules/palm_detection/palm_detection_full.tflite +0 -0
  228. mediapipe/modules/palm_detection/palm_detection_lite.tflite +0 -0
  229. mediapipe/modules/pose_detection/__init__.py +0 -0
  230. mediapipe/modules/pose_detection/pose_detection.tflite +0 -0
  231. mediapipe/modules/pose_landmark/__init__.py +0 -0
  232. mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb +0 -0
  233. mediapipe/modules/pose_landmark/pose_landmark_full.tflite +0 -0
  234. mediapipe/modules/selfie_segmentation/__init__.py +0 -0
  235. mediapipe/modules/selfie_segmentation/selfie_segmentation.tflite +0 -0
  236. mediapipe/modules/selfie_segmentation/selfie_segmentation_cpu.binarypb +0 -0
  237. mediapipe/modules/selfie_segmentation/selfie_segmentation_landscape.tflite +0 -0
  238. mediapipe/python/__init__.py +29 -0
  239. mediapipe/python/_framework_bindings.cpython-310-x86_64-linux-gnu.so +0 -0
  240. mediapipe/python/calculator_graph_test.py +251 -0
  241. mediapipe/python/image_frame_test.py +194 -0
  242. mediapipe/python/image_test.py +218 -0
  243. mediapipe/python/packet_creator.py +275 -0
  244. mediapipe/python/packet_getter.py +120 -0
  245. mediapipe/python/packet_test.py +533 -0
  246. mediapipe/python/solution_base.py +604 -0
  247. mediapipe/python/solution_base_test.py +396 -0
  248. mediapipe/python/solutions/__init__.py +27 -0
  249. mediapipe/python/solutions/download_utils.py +37 -0
  250. mediapipe/python/solutions/drawing_styles.py +249 -0
  251. mediapipe/python/solutions/drawing_utils.py +320 -0
  252. mediapipe/python/solutions/drawing_utils_test.py +258 -0
  253. mediapipe/python/solutions/face_detection.py +105 -0
  254. mediapipe/python/solutions/face_detection_test.py +92 -0
  255. mediapipe/python/solutions/face_mesh.py +125 -0
  256. mediapipe/python/solutions/face_mesh_connections.py +500 -0
  257. mediapipe/python/solutions/face_mesh_test.py +170 -0
  258. mediapipe/python/solutions/hands.py +153 -0
  259. mediapipe/python/solutions/hands_connections.py +32 -0
  260. mediapipe/python/solutions/hands_test.py +219 -0
  261. mediapipe/python/solutions/holistic.py +167 -0
  262. mediapipe/python/solutions/holistic_test.py +142 -0
  263. mediapipe/python/solutions/objectron.py +288 -0
  264. mediapipe/python/solutions/objectron_test.py +81 -0
  265. mediapipe/python/solutions/pose.py +192 -0
  266. mediapipe/python/solutions/pose_connections.py +22 -0
  267. mediapipe/python/solutions/pose_test.py +262 -0
  268. mediapipe/python/solutions/selfie_segmentation.py +76 -0
  269. mediapipe/python/solutions/selfie_segmentation_test.py +68 -0
  270. mediapipe/python/timestamp_test.py +78 -0
  271. mediapipe/tasks/__init__.py +14 -0
  272. mediapipe/tasks/cc/__init__.py +0 -0
  273. mediapipe/tasks/cc/audio/__init__.py +0 -0
  274. mediapipe/tasks/cc/audio/audio_classifier/__init__.py +0 -0
  275. mediapipe/tasks/cc/audio/audio_classifier/proto/__init__.py +0 -0
  276. mediapipe/tasks/cc/audio/audio_classifier/proto/audio_classifier_graph_options_pb2.py +35 -0
  277. mediapipe/tasks/cc/audio/audio_embedder/__init__.py +0 -0
  278. mediapipe/tasks/cc/audio/audio_embedder/proto/__init__.py +0 -0
  279. mediapipe/tasks/cc/audio/audio_embedder/proto/audio_embedder_graph_options_pb2.py +35 -0
  280. mediapipe/tasks/cc/audio/core/__init__.py +0 -0
  281. mediapipe/tasks/cc/audio/utils/__init__.py +0 -0
  282. mediapipe/tasks/cc/components/__init__.py +0 -0
  283. mediapipe/tasks/cc/components/calculators/__init__.py +0 -0
  284. mediapipe/tasks/cc/components/calculators/classification_aggregation_calculator_pb2.py +31 -0
  285. mediapipe/tasks/cc/components/calculators/score_calibration_calculator_pb2.py +35 -0
  286. mediapipe/tasks/cc/components/calculators/tensors_to_embeddings_calculator_pb2.py +32 -0
  287. mediapipe/tasks/cc/components/containers/__init__.py +0 -0
  288. mediapipe/tasks/cc/components/containers/proto/__init__.py +0 -0
  289. mediapipe/tasks/cc/components/containers/proto/classifications_pb2.py +30 -0
  290. mediapipe/tasks/cc/components/containers/proto/embeddings_pb2.py +35 -0
  291. mediapipe/tasks/cc/components/containers/proto/landmarks_detection_result_pb2.py +32 -0
  292. mediapipe/tasks/cc/components/processors/__init__.py +0 -0
  293. mediapipe/tasks/cc/components/processors/proto/__init__.py +0 -0
  294. mediapipe/tasks/cc/components/processors/proto/classification_postprocessing_graph_options_pb2.py +38 -0
  295. mediapipe/tasks/cc/components/processors/proto/classifier_options_pb2.py +27 -0
  296. mediapipe/tasks/cc/components/processors/proto/detection_postprocessing_graph_options_pb2.py +36 -0
  297. mediapipe/tasks/cc/components/processors/proto/detector_options_pb2.py +27 -0
  298. mediapipe/tasks/cc/components/processors/proto/embedder_options_pb2.py +27 -0
  299. mediapipe/tasks/cc/components/processors/proto/embedding_postprocessing_graph_options_pb2.py +32 -0
  300. mediapipe/tasks/cc/components/processors/proto/image_preprocessing_graph_options_pb2.py +34 -0
  301. mediapipe/tasks/cc/components/processors/proto/text_model_type_pb2.py +28 -0
  302. mediapipe/tasks/cc/components/processors/proto/text_preprocessing_graph_options_pb2.py +32 -0
  303. mediapipe/tasks/cc/components/utils/__init__.py +0 -0
  304. mediapipe/tasks/cc/core/__init__.py +0 -0
  305. mediapipe/tasks/cc/core/proto/__init__.py +0 -0
  306. mediapipe/tasks/cc/core/proto/acceleration_pb2.py +28 -0
  307. mediapipe/tasks/cc/core/proto/base_options_pb2.py +30 -0
  308. mediapipe/tasks/cc/core/proto/external_file_pb2.py +31 -0
  309. mediapipe/tasks/cc/core/proto/inference_subgraph_pb2.py +32 -0
  310. mediapipe/tasks/cc/core/proto/model_resources_calculator_pb2.py +32 -0
  311. mediapipe/tasks/cc/genai/__init__.py +0 -0
  312. mediapipe/tasks/cc/genai/inference/__init__.py +0 -0
  313. mediapipe/tasks/cc/genai/inference/c/__init__.py +0 -0
  314. mediapipe/tasks/cc/genai/inference/calculators/__init__.py +0 -0
  315. mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py +27 -0
  316. mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py +32 -0
  317. mediapipe/tasks/cc/genai/inference/calculators/model_data_calculator_pb2.py +27 -0
  318. mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py +29 -0
  319. mediapipe/tasks/cc/genai/inference/common/__init__.py +0 -0
  320. mediapipe/tasks/cc/genai/inference/proto/__init__.py +0 -0
  321. mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py +32 -0
  322. mediapipe/tasks/cc/genai/inference/proto/llm_params_pb2.py +33 -0
  323. mediapipe/tasks/cc/genai/inference/proto/prompt_template_pb2.py +27 -0
  324. mediapipe/tasks/cc/genai/inference/proto/sampler_params_pb2.py +29 -0
  325. mediapipe/tasks/cc/genai/inference/proto/transformer_params_pb2.py +45 -0
  326. mediapipe/tasks/cc/genai/inference/utils/__init__.py +0 -0
  327. mediapipe/tasks/cc/genai/inference/utils/llm_utils/__init__.py +0 -0
  328. mediapipe/tasks/cc/genai/inference/utils/xnn_utils/__init__.py +0 -0
  329. mediapipe/tasks/cc/metadata/__init__.py +0 -0
  330. mediapipe/tasks/cc/metadata/python/__init__.py +0 -0
  331. mediapipe/tasks/cc/metadata/python/_pywrap_metadata_version.cpython-310-x86_64-linux-gnu.so +0 -0
  332. mediapipe/tasks/cc/metadata/tests/__init__.py +0 -0
  333. mediapipe/tasks/cc/metadata/utils/__init__.py +0 -0
  334. mediapipe/tasks/cc/text/__init__.py +0 -0
  335. mediapipe/tasks/cc/text/custom_ops/__init__.py +0 -0
  336. mediapipe/tasks/cc/text/custom_ops/ragged/__init__.py +0 -0
  337. mediapipe/tasks/cc/text/custom_ops/sentencepiece/__init__.py +0 -0
  338. mediapipe/tasks/cc/text/custom_ops/sentencepiece/testdata/__init__.py +0 -0
  339. mediapipe/tasks/cc/text/language_detector/__init__.py +0 -0
  340. mediapipe/tasks/cc/text/language_detector/custom_ops/__init__.py +0 -0
  341. mediapipe/tasks/cc/text/language_detector/custom_ops/utils/__init__.py +0 -0
  342. mediapipe/tasks/cc/text/language_detector/custom_ops/utils/hash/__init__.py +0 -0
  343. mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/__init__.py +0 -0
  344. mediapipe/tasks/cc/text/text_classifier/__init__.py +0 -0
  345. mediapipe/tasks/cc/text/text_classifier/proto/__init__.py +0 -0
  346. mediapipe/tasks/cc/text/text_classifier/proto/text_classifier_graph_options_pb2.py +35 -0
  347. mediapipe/tasks/cc/text/text_embedder/__init__.py +0 -0
  348. mediapipe/tasks/cc/text/text_embedder/proto/__init__.py +0 -0
  349. mediapipe/tasks/cc/text/text_embedder/proto/text_embedder_graph_options_pb2.py +35 -0
  350. mediapipe/tasks/cc/text/tokenizers/__init__.py +0 -0
  351. mediapipe/tasks/cc/text/utils/__init__.py +0 -0
  352. mediapipe/tasks/cc/vision/__init__.py +0 -0
  353. mediapipe/tasks/cc/vision/core/__init__.py +0 -0
  354. mediapipe/tasks/cc/vision/custom_ops/__init__.py +0 -0
  355. mediapipe/tasks/cc/vision/face_detector/__init__.py +0 -0
  356. mediapipe/tasks/cc/vision/face_detector/proto/__init__.py +0 -0
  357. mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options_pb2.py +34 -0
  358. mediapipe/tasks/cc/vision/face_geometry/__init__.py +0 -0
  359. mediapipe/tasks/cc/vision/face_geometry/calculators/__init__.py +0 -0
  360. mediapipe/tasks/cc/vision/face_geometry/calculators/env_generator_calculator_pb2.py +28 -0
  361. mediapipe/tasks/cc/vision/face_geometry/calculators/geometry_pipeline_calculator_pb2.py +29 -0
  362. mediapipe/tasks/cc/vision/face_geometry/data/__init__.py +0 -0
  363. mediapipe/tasks/cc/vision/face_geometry/libs/__init__.py +0 -0
  364. mediapipe/tasks/cc/vision/face_geometry/proto/__init__.py +0 -0
  365. mediapipe/tasks/cc/vision/face_geometry/proto/environment_pb2.py +31 -0
  366. mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_graph_options_pb2.py +29 -0
  367. mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry_pb2.py +29 -0
  368. mediapipe/tasks/cc/vision/face_geometry/proto/geometry_pipeline_metadata_pb2.py +32 -0
  369. mediapipe/tasks/cc/vision/face_geometry/proto/mesh_3d_pb2.py +31 -0
  370. mediapipe/tasks/cc/vision/face_landmarker/__init__.py +0 -0
  371. mediapipe/tasks/cc/vision/face_landmarker/proto/__init__.py +0 -0
  372. mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options_pb2.py +34 -0
  373. mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options_pb2.py +37 -0
  374. mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options_pb2.py +35 -0
  375. mediapipe/tasks/cc/vision/face_landmarker/proto/tensors_to_face_landmarks_graph_options_pb2.py +32 -0
  376. mediapipe/tasks/cc/vision/face_stylizer/__init__.py +0 -0
  377. mediapipe/tasks/cc/vision/face_stylizer/calculators/__init__.py +0 -0
  378. mediapipe/tasks/cc/vision/face_stylizer/calculators/tensors_to_image_calculator_pb2.py +36 -0
  379. mediapipe/tasks/cc/vision/face_stylizer/proto/__init__.py +0 -0
  380. mediapipe/tasks/cc/vision/face_stylizer/proto/face_stylizer_graph_options_pb2.py +35 -0
  381. mediapipe/tasks/cc/vision/gesture_recognizer/__init__.py +0 -0
  382. mediapipe/tasks/cc/vision/gesture_recognizer/calculators/__init__.py +0 -0
  383. mediapipe/tasks/cc/vision/gesture_recognizer/calculators/combined_prediction_calculator_pb2.py +33 -0
  384. mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator_pb2.py +31 -0
  385. mediapipe/tasks/cc/vision/gesture_recognizer/proto/__init__.py +0 -0
  386. mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_classifier_graph_options_pb2.py +35 -0
  387. mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_embedder_graph_options_pb2.py +34 -0
  388. mediapipe/tasks/cc/vision/gesture_recognizer/proto/gesture_recognizer_graph_options_pb2.py +36 -0
  389. mediapipe/tasks/cc/vision/gesture_recognizer/proto/hand_gesture_recognizer_graph_options_pb2.py +36 -0
  390. mediapipe/tasks/cc/vision/hand_detector/__init__.py +0 -0
  391. mediapipe/tasks/cc/vision/hand_detector/proto/__init__.py +0 -0
  392. mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_graph_options_pb2.py +34 -0
  393. mediapipe/tasks/cc/vision/hand_detector/proto/hand_detector_result_pb2.py +30 -0
  394. mediapipe/tasks/cc/vision/hand_landmarker/__init__.py +0 -0
  395. mediapipe/tasks/cc/vision/hand_landmarker/calculators/__init__.py +0 -0
  396. mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator_pb2.py +31 -0
  397. mediapipe/tasks/cc/vision/hand_landmarker/proto/__init__.py +0 -0
  398. mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarker_graph_options_pb2.py +36 -0
  399. mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarks_detector_graph_options_pb2.py +34 -0
  400. mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_roi_refinement_graph_options_pb2.py +28 -0
  401. mediapipe/tasks/cc/vision/holistic_landmarker/__init__.py +0 -0
  402. mediapipe/tasks/cc/vision/holistic_landmarker/proto/__init__.py +0 -0
  403. mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options_pb2.py +34 -0
  404. mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result_pb2.py +29 -0
  405. mediapipe/tasks/cc/vision/image_classifier/__init__.py +0 -0
  406. mediapipe/tasks/cc/vision/image_classifier/proto/__init__.py +0 -0
  407. mediapipe/tasks/cc/vision/image_classifier/proto/image_classifier_graph_options_pb2.py +35 -0
  408. mediapipe/tasks/cc/vision/image_embedder/__init__.py +0 -0
  409. mediapipe/tasks/cc/vision/image_embedder/proto/__init__.py +0 -0
  410. mediapipe/tasks/cc/vision/image_embedder/proto/image_embedder_graph_options_pb2.py +35 -0
  411. mediapipe/tasks/cc/vision/image_generator/__init__.py +0 -0
  412. mediapipe/tasks/cc/vision/image_generator/diffuser/__init__.py +0 -0
  413. mediapipe/tasks/cc/vision/image_generator/diffuser/stable_diffusion_iterate_calculator_pb2.py +40 -0
  414. mediapipe/tasks/cc/vision/image_generator/proto/__init__.py +0 -0
  415. mediapipe/tasks/cc/vision/image_generator/proto/conditioned_image_graph_options_pb2.py +40 -0
  416. mediapipe/tasks/cc/vision/image_generator/proto/control_plugin_graph_options_pb2.py +34 -0
  417. mediapipe/tasks/cc/vision/image_generator/proto/image_generator_graph_options_pb2.py +30 -0
  418. mediapipe/tasks/cc/vision/image_segmenter/__init__.py +0 -0
  419. mediapipe/tasks/cc/vision/image_segmenter/calculators/__init__.py +0 -0
  420. mediapipe/tasks/cc/vision/image_segmenter/calculators/tensors_to_segmentation_calculator_pb2.py +34 -0
  421. mediapipe/tasks/cc/vision/image_segmenter/proto/__init__.py +0 -0
  422. mediapipe/tasks/cc/vision/image_segmenter/proto/image_segmenter_graph_options_pb2.py +35 -0
  423. mediapipe/tasks/cc/vision/image_segmenter/proto/segmenter_options_pb2.py +33 -0
  424. mediapipe/tasks/cc/vision/interactive_segmenter/__init__.py +0 -0
  425. mediapipe/tasks/cc/vision/object_detector/__init__.py +0 -0
  426. mediapipe/tasks/cc/vision/object_detector/proto/__init__.py +0 -0
  427. mediapipe/tasks/cc/vision/object_detector/proto/object_detector_options_pb2.py +34 -0
  428. mediapipe/tasks/cc/vision/pose_detector/__init__.py +0 -0
  429. mediapipe/tasks/cc/vision/pose_detector/proto/__init__.py +0 -0
  430. mediapipe/tasks/cc/vision/pose_detector/proto/pose_detector_graph_options_pb2.py +34 -0
  431. mediapipe/tasks/cc/vision/pose_landmarker/__init__.py +0 -0
  432. mediapipe/tasks/cc/vision/pose_landmarker/proto/__init__.py +0 -0
  433. mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarker_graph_options_pb2.py +36 -0
  434. mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarks_detector_graph_options_pb2.py +34 -0
  435. mediapipe/tasks/cc/vision/utils/__init__.py +0 -0
  436. mediapipe/tasks/cc/vision/utils/ghum/__init__.py +0 -0
  437. mediapipe/tasks/metadata/image_segmenter_metadata_schema.fbs +59 -0
  438. mediapipe/tasks/metadata/image_segmenter_metadata_schema_py_generated.py +108 -0
  439. mediapipe/tasks/metadata/metadata_schema.fbs +732 -0
  440. mediapipe/tasks/metadata/metadata_schema_py_generated.py +3251 -0
  441. mediapipe/tasks/metadata/object_detector_metadata_schema.fbs +98 -0
  442. mediapipe/tasks/metadata/object_detector_metadata_schema_py_generated.py +674 -0
  443. mediapipe/tasks/metadata/schema_py_generated.py +18438 -0
  444. mediapipe/tasks/python/__init__.py +27 -0
  445. mediapipe/tasks/python/audio/__init__.py +33 -0
  446. mediapipe/tasks/python/audio/audio_classifier.py +324 -0
  447. mediapipe/tasks/python/audio/audio_embedder.py +285 -0
  448. mediapipe/tasks/python/audio/core/__init__.py +16 -0
  449. mediapipe/tasks/python/audio/core/audio_record.py +125 -0
  450. mediapipe/tasks/python/audio/core/audio_task_running_mode.py +29 -0
  451. mediapipe/tasks/python/audio/core/base_audio_task_api.py +181 -0
  452. mediapipe/tasks/python/benchmark/__init__.py +13 -0
  453. mediapipe/tasks/python/benchmark/benchmark_utils.py +70 -0
  454. mediapipe/tasks/python/benchmark/vision/__init__.py +13 -0
  455. mediapipe/tasks/python/benchmark/vision/benchmark.py +99 -0
  456. mediapipe/tasks/python/benchmark/vision/core/__init__.py +14 -0
  457. mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py +40 -0
  458. mediapipe/tasks/python/components/__init__.py +13 -0
  459. mediapipe/tasks/python/components/containers/__init__.py +53 -0
  460. mediapipe/tasks/python/components/containers/audio_data.py +137 -0
  461. mediapipe/tasks/python/components/containers/bounding_box.py +73 -0
  462. mediapipe/tasks/python/components/containers/category.py +78 -0
  463. mediapipe/tasks/python/components/containers/classification_result.py +111 -0
  464. mediapipe/tasks/python/components/containers/detections.py +181 -0
  465. mediapipe/tasks/python/components/containers/embedding_result.py +89 -0
  466. mediapipe/tasks/python/components/containers/keypoint.py +77 -0
  467. mediapipe/tasks/python/components/containers/landmark.py +122 -0
  468. mediapipe/tasks/python/components/containers/landmark_detection_result.py +106 -0
  469. mediapipe/tasks/python/components/containers/rect.py +109 -0
  470. mediapipe/tasks/python/components/processors/__init__.py +23 -0
  471. mediapipe/tasks/python/components/processors/classifier_options.py +86 -0
  472. mediapipe/tasks/python/components/utils/__init__.py +13 -0
  473. mediapipe/tasks/python/components/utils/cosine_similarity.py +68 -0
  474. mediapipe/tasks/python/core/__init__.py +13 -0
  475. mediapipe/tasks/python/core/base_options.py +121 -0
  476. mediapipe/tasks/python/core/optional_dependencies.py +25 -0
  477. mediapipe/tasks/python/core/task_info.py +139 -0
  478. mediapipe/tasks/python/genai/__init__.py +14 -0
  479. mediapipe/tasks/python/genai/bundler/__init__.py +23 -0
  480. mediapipe/tasks/python/genai/bundler/llm_bundler.py +130 -0
  481. mediapipe/tasks/python/genai/bundler/llm_bundler_test.py +168 -0
  482. mediapipe/tasks/python/genai/converter/__init__.py +24 -0
  483. mediapipe/tasks/python/genai/converter/converter_base.py +179 -0
  484. mediapipe/tasks/python/genai/converter/converter_factory.py +79 -0
  485. mediapipe/tasks/python/genai/converter/llm_converter.py +374 -0
  486. mediapipe/tasks/python/genai/converter/llm_converter_test.py +63 -0
  487. mediapipe/tasks/python/genai/converter/pytorch_converter.py +318 -0
  488. mediapipe/tasks/python/genai/converter/pytorch_converter_test.py +86 -0
  489. mediapipe/tasks/python/genai/converter/quantization_util.py +516 -0
  490. mediapipe/tasks/python/genai/converter/quantization_util_test.py +259 -0
  491. mediapipe/tasks/python/genai/converter/safetensors_converter.py +580 -0
  492. mediapipe/tasks/python/genai/converter/safetensors_converter_test.py +83 -0
  493. mediapipe/tasks/python/genai/converter/weight_bins_writer.py +120 -0
  494. mediapipe/tasks/python/genai/converter/weight_bins_writer_test.py +95 -0
  495. mediapipe/tasks/python/metadata/__init__.py +13 -0
  496. mediapipe/tasks/python/metadata/flatbuffers_lib/_pywrap_flatbuffers.cpython-310-x86_64-linux-gnu.so +0 -0
  497. mediapipe/tasks/python/metadata/metadata.py +928 -0
  498. mediapipe/tasks/python/metadata/metadata_displayer_cli.py +34 -0
  499. mediapipe/tasks/python/metadata/metadata_writers/__init__.py +13 -0
  500. mediapipe/tasks/python/metadata/metadata_writers/face_stylizer.py +138 -0
  501. mediapipe/tasks/python/metadata/metadata_writers/image_classifier.py +71 -0
  502. mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py +170 -0
  503. mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py +1166 -0
  504. mediapipe/tasks/python/metadata/metadata_writers/metadata_writer.py +845 -0
  505. mediapipe/tasks/python/metadata/metadata_writers/model_asset_bundle_utils.py +71 -0
  506. mediapipe/tasks/python/metadata/metadata_writers/object_detector.py +331 -0
  507. mediapipe/tasks/python/metadata/metadata_writers/text_classifier.py +119 -0
  508. mediapipe/tasks/python/metadata/metadata_writers/writer_utils.py +91 -0
  509. mediapipe/tasks/python/test/__init__.py +13 -0
  510. mediapipe/tasks/python/test/audio/__init__.py +13 -0
  511. mediapipe/tasks/python/test/audio/audio_classifier_test.py +387 -0
  512. mediapipe/tasks/python/test/audio/audio_embedder_test.py +297 -0
  513. mediapipe/tasks/python/test/test_utils.py +196 -0
  514. mediapipe/tasks/python/test/text/__init__.py +13 -0
  515. mediapipe/tasks/python/test/text/language_detector_test.py +228 -0
  516. mediapipe/tasks/python/test/text/text_classifier_test.py +235 -0
  517. mediapipe/tasks/python/test/text/text_embedder_test.py +326 -0
  518. mediapipe/tasks/python/test/vision/__init__.py +13 -0
  519. mediapipe/tasks/python/test/vision/face_aligner_test.py +190 -0
  520. mediapipe/tasks/python/test/vision/face_detector_test.py +523 -0
  521. mediapipe/tasks/python/test/vision/face_landmarker_test.py +565 -0
  522. mediapipe/tasks/python/test/vision/face_stylizer_test.py +191 -0
  523. mediapipe/tasks/python/test/vision/hand_landmarker_test.py +437 -0
  524. mediapipe/tasks/python/test/vision/holistic_landmarker_test.py +544 -0
  525. mediapipe/tasks/python/test/vision/image_classifier_test.py +657 -0
  526. mediapipe/tasks/python/test/vision/image_embedder_test.py +423 -0
  527. mediapipe/tasks/python/test/vision/image_segmenter_test.py +512 -0
  528. mediapipe/tasks/python/test/vision/interactive_segmenter_test.py +341 -0
  529. mediapipe/tasks/python/test/vision/object_detector_test.py +493 -0
  530. mediapipe/tasks/python/test/vision/pose_landmarker_test.py +518 -0
  531. mediapipe/tasks/python/text/__init__.py +35 -0
  532. mediapipe/tasks/python/text/core/__init__.py +16 -0
  533. mediapipe/tasks/python/text/core/base_text_task_api.py +54 -0
  534. mediapipe/tasks/python/text/language_detector.py +220 -0
  535. mediapipe/tasks/python/text/text_classifier.py +187 -0
  536. mediapipe/tasks/python/text/text_embedder.py +188 -0
  537. mediapipe/tasks/python/vision/__init__.py +90 -0
  538. mediapipe/tasks/python/vision/core/__init__.py +14 -0
  539. mediapipe/tasks/python/vision/core/base_vision_task_api.py +226 -0
  540. mediapipe/tasks/python/vision/core/image_processing_options.py +39 -0
  541. mediapipe/tasks/python/vision/core/vision_task_running_mode.py +31 -0
  542. mediapipe/tasks/python/vision/face_aligner.py +158 -0
  543. mediapipe/tasks/python/vision/face_detector.py +332 -0
  544. mediapipe/tasks/python/vision/face_landmarker.py +3244 -0
  545. mediapipe/tasks/python/vision/face_stylizer.py +158 -0
  546. mediapipe/tasks/python/vision/gesture_recognizer.py +480 -0
  547. mediapipe/tasks/python/vision/hand_landmarker.py +504 -0
  548. mediapipe/tasks/python/vision/holistic_landmarker.py +576 -0
  549. mediapipe/tasks/python/vision/image_classifier.py +358 -0
  550. mediapipe/tasks/python/vision/image_embedder.py +362 -0
  551. mediapipe/tasks/python/vision/image_segmenter.py +433 -0
  552. mediapipe/tasks/python/vision/interactive_segmenter.py +285 -0
  553. mediapipe/tasks/python/vision/object_detector.py +389 -0
  554. mediapipe/tasks/python/vision/pose_landmarker.py +455 -0
  555. mediapipe/util/__init__.py +0 -0
  556. mediapipe/util/analytics/__init__.py +0 -0
  557. mediapipe/util/analytics/mediapipe_log_extension_pb2.py +44 -0
  558. mediapipe/util/analytics/mediapipe_logging_enums_pb2.py +37 -0
  559. mediapipe/util/audio_decoder_pb2.py +33 -0
  560. mediapipe/util/color_pb2.py +33 -0
  561. mediapipe/util/label_map_pb2.py +27 -0
  562. mediapipe/util/render_data_pb2.py +58 -0
  563. mediapipe/util/sequence/__init__.py +14 -0
  564. mediapipe/util/sequence/media_sequence.py +716 -0
  565. mediapipe/util/sequence/media_sequence_test.py +290 -0
  566. mediapipe/util/sequence/media_sequence_util.py +800 -0
  567. mediapipe/util/sequence/media_sequence_util_test.py +389 -0
  568. mediapipe/util/tracking/__init__.py +0 -0
  569. mediapipe/util/tracking/box_detector_pb2.py +39 -0
  570. mediapipe/util/tracking/box_tracker_pb2.py +32 -0
  571. mediapipe/util/tracking/camera_motion_pb2.py +31 -0
  572. mediapipe/util/tracking/flow_packager_pb2.py +60 -0
  573. mediapipe/util/tracking/frame_selection_pb2.py +35 -0
  574. mediapipe/util/tracking/frame_selection_solution_evaluator_pb2.py +28 -0
  575. mediapipe/util/tracking/motion_analysis_pb2.py +35 -0
  576. mediapipe/util/tracking/motion_estimation_pb2.py +66 -0
  577. mediapipe/util/tracking/motion_models_pb2.py +42 -0
  578. mediapipe/util/tracking/motion_saliency_pb2.py +26 -0
  579. mediapipe/util/tracking/push_pull_filtering_pb2.py +26 -0
  580. mediapipe/util/tracking/region_flow_computation_pb2.py +59 -0
  581. mediapipe/util/tracking/region_flow_pb2.py +49 -0
  582. mediapipe/util/tracking/tone_estimation_pb2.py +45 -0
  583. mediapipe/util/tracking/tone_models_pb2.py +32 -0
  584. mediapipe/util/tracking/tracked_detection_manager_config_pb2.py +26 -0
  585. mediapipe/util/tracking/tracking_pb2.py +73 -0
  586. mediapipe_nightly-0.10.21.post20241223.dist-info/LICENSE +218 -0
  587. mediapipe_nightly-0.10.21.post20241223.dist-info/METADATA +199 -0
  588. mediapipe_nightly-0.10.21.post20241223.dist-info/RECORD +593 -0
  589. mediapipe_nightly-0.10.21.post20241223.dist-info/WHEEL +5 -0
  590. mediapipe_nightly-0.10.21.post20241223.dist-info/top_level.txt +4 -0
  591. mediapipe_nightly.libs/libEGL-48f73270.so.1.1.0 +0 -0
  592. mediapipe_nightly.libs/libGLESv2-ed5eda4f.so.2.1.0 +0 -0
  593. mediapipe_nightly.libs/libGLdispatch-64b28464.so.0.0.0 +0 -0
@@ -0,0 +1,732 @@
1
+ // Copyright 2022 The MediaPipe Authors.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ namespace tflite;
16
+
17
+ // TFLite metadata contains both human readable and machine readable information
18
+ // about what the model does and how to use the model. It can be used as a
19
+ // README file, which elaborates the details of the model, each input/ouput
20
+ // tensor, and each associated file.
21
+ //
22
+ // An important use case of TFLite metadata is the TFLite codegen tool, which
23
+ // automatically generates the model interface based on the properties of the
24
+ // model and the tensors. The model interface provides high-level APIs to
25
+ // interact with the model, such as preprocessing the input data and running
26
+ // inferences.
27
+ //
28
+ // Entries marked with "<Codegen usage>" are used in TFLite codegen tool to
29
+ // generate the model interface. It is recommended to fill in at least those
30
+ // enties to boost the codegen performance.
31
+
32
+ // The Metadata schema is versioned by the Semantic versioning number, such as
33
+ // MAJOR.MINOR.PATCH. It tracks the schema changes according to the rules below:
34
+ // * Bump up the MAJOR number when making potentially backwards incompatible
35
+ // changes. It must be incremented if the new changes break the backwards
36
+ // compatibility. It may also include minor and patch level changes as
37
+ // needed. The true backwards compatibility is indicated by the file
38
+ // identifier.
39
+ // * Bump up the MINOR number when making backwards compatible updates for
40
+ // major features, such as supporting new content types or adding new
41
+ // processing units.
42
+ // * Bump up the PATCH number when making small backwards compatible changes,
43
+ // such as adding a new fields or deprecating certain fields (not deleting
44
+ // them).
45
+ //
46
+ // ModelMetadata.min_parser_version indicates the minimum necessary metadata
47
+ // parser version to fully understand all fields in a given metadata flatbuffer.
48
+ //
49
+ // New fields and types will have associated comments with the schema version
50
+ // for which they were added.
51
+ //
52
+ // Schema Semantic version: 1.5.0
53
+
54
+ // This indicates the flatbuffer compatibility. The number will bump up when a
55
+ // break change is applied to the schema, such as removing fields or adding new
56
+ // fields to the middle of a table.
57
+ file_identifier "M001";
58
+
59
+ // History:
60
+ // 1.0.1 - Added VOCABULARY type to AssociatedFileType.
61
+ // 1.1.0 - Added BertTokenizerOptions to ProcessUnitOptions.
62
+ // Added SentencePieceTokenizerOptions to ProcessUnitOptions.
63
+ // Added input_process_units to SubGraphMetadata.
64
+ // Added output_process_units to SubGraphMetadata.
65
+ // 1.2.0 - Added input_tensor_group to SubGraphMetadata.
66
+ // Added output_tensor_group to SubGraphMetadata.
67
+ // 1.2.1 - Added RegexTokenizerOptions to ProcessUnitOptions.
68
+ // 1.3.0 - Added AudioProperties to ContentProperties.
69
+ // 1.4.0 - Added SCANN_INDEX_FILE type to AssociatedFileType.
70
+ // 1.4.1 - Added version to AssociatedFile.
71
+ // 1.5.0 - Added CustomMetadata in SubGraphMetadata.
72
+
73
+ // File extension of any written files.
74
+ file_extension "tflitemeta";
75
+
76
+ enum AssociatedFileType : byte {
77
+ UNKNOWN = 0,
78
+
79
+ // Files such as readme.txt.
80
+ DESCRIPTIONS = 1,
81
+
82
+ // Contains a list of labels (characters separated by "\n" or in lines) that
83
+ // annotate certain axis of the tensor. For example,
84
+ // the label file in image classification. Those labels annotate the
85
+ // the output tensor, such that each value in the output tensor is the
86
+ // probability of that corresponding category specified by the label. See the
87
+ // example label file used in image classification [1].
88
+ //
89
+ // <Codegen usage>:
90
+ // If an output tensor has an associated file as TENSOR_AXIS_LABELS, return
91
+ // the output as a mapping between the labels and probability in the model
92
+ // interface.
93
+ // If multiple files of the same type are present, the first one is used by
94
+ // default; additional ones are to be distinguished from one another by their
95
+ // specified locale.
96
+ //
97
+ // TODO: Add github example link.
98
+ TENSOR_AXIS_LABELS = 2,
99
+
100
+ // Contains a list of labels (characters separated by "\n" or in lines) that
101
+ // tensor values correspond to. For example, in
102
+ // the object detection model, one of the output tensors is the detected
103
+ // classes. And each value in the tensor refers to the index of label in the
104
+ // category label file. See the example label file used in object detection
105
+ // [1].
106
+ //
107
+ // <Codegen usage>:
108
+ // If an output tensor has an associated file as TENSOR_VALUE_LABELS, convert
109
+ // the tensor values into labels, and return a list of string as the output.
110
+ // If multiple files of the same type are present, the first one is used by
111
+ // default; additional ones are to be distinguished from one another by their
112
+ // specified locale.
113
+ //
114
+ // TODO: Add github example link.
115
+ TENSOR_VALUE_LABELS = 3,
116
+
117
+ // Contains sigmoid-based score calibration parameters, formatted as CSV.
118
+ // Lines contain for each index of an output tensor the scale, slope, offset
119
+ // and (optional) min_score parameters to be used for sigmoid fitting (in this
120
+ // order and in `strtof`-compatible [1] format). Scale should be a
121
+ // non-negative value.
122
+ // A line may be left empty to default calibrated scores for this index to
123
+ // default_score.
124
+ // In summary, each line should thus contain 0, 3 or 4 comma-separated values.
125
+ //
126
+ // See the example score calibration file used in image classification [2].
127
+ //
128
+ // See documentation for ScoreCalibrationOptions for details.
129
+ //
130
+ // [1]: https://en.cppreference.com/w/c/string/byte/strtof
131
+ // TODO: Add github example link.
132
+ TENSOR_AXIS_SCORE_CALIBRATION = 4,
133
+
134
+ // Contains a list of unique words (characters separated by "\n" or in lines)
135
+ // that help to convert natural language words to embedding vectors.
136
+ //
137
+ // See the example vocab file used in text classification [1].
138
+ //
139
+ // TODO: Add github example link.
140
+ // Added in: 1.0.1
141
+ VOCABULARY = 5,
142
+
143
+ // TODO: introduce the ScaNN index file with links once the code
144
+ // is released.
145
+ // Contains on-device ScaNN index file with LevelDB format.
146
+ // Added in: 1.4.0
147
+ SCANN_INDEX_FILE = 6,
148
+ }
149
+
150
+ table AssociatedFile {
151
+ // Name of this file. Need to be exact the same as the name of the actual file
152
+ // packed into the TFLite model as a zip file.
153
+ //
154
+ // <Codegen usage>:
155
+ // Locates to the actual file in the TFLite model.
156
+ name:string;
157
+
158
+ // A description of what the file is.
159
+ description:string;
160
+
161
+ // Type of the associated file. There may be special pre/post processing for
162
+ // some types. For example in image classification, a label file of the output
163
+ // will be used to convert object index into string.
164
+ //
165
+ // <Codegen usage>:
166
+ // Determines how to process the corresponding tensor.
167
+ type:AssociatedFileType;
168
+
169
+ // An optional locale for this associated file (if applicable). It is
170
+ // recommended to use an ISO 639-1 letter code (e.g. "en" for English),
171
+ // optionally completed by a two letter region code (e.g. "en-US" for US
172
+ // English and "en-CA" for Canadian English).
173
+ // Leverage this in order to specify e.g multiple label files translated in
174
+ // different languages.
175
+ locale:string;
176
+
177
+ // Version of the file specified by model creators.
178
+ // Added in: 1.4.1
179
+ version:string;
180
+ }
181
+
182
+ // The basic content type for all tensors.
183
+ //
184
+ // <Codegen usage>:
185
+ // Input feature tensors:
186
+ // 1. Generates the method to load data from a TensorBuffer.
187
+ // 2. Creates the preprocessing logic. The default processing pipeline is:
188
+ // [NormalizeOp, QuantizeOp].
189
+ // Output feature tensors:
190
+ // 1. Generates the method to return the output data to a TensorBuffer.
191
+ // 2. Creates the post-processing logic. The default processing pipeline is:
192
+ // [DeQuantizeOp].
193
+ table FeatureProperties {
194
+ }
195
+
196
+ // The type of color space of an image.
197
+ enum ColorSpaceType : byte {
198
+ UNKNOWN = 0,
199
+ RGB = 1,
200
+ GRAYSCALE = 2,
201
+ }
202
+
203
+ table ImageSize {
204
+ width:uint;
205
+ height:uint;
206
+ }
207
+
208
+ // The properties for image tensors.
209
+ //
210
+ // <Codegen usage>:
211
+ // Input image tensors:
212
+ // 1. Generates the method to load an image from a TensorImage.
213
+ // 2. Creates the preprocessing logic. The default processing pipeline is:
214
+ // [ResizeOp, NormalizeOp, QuantizeOp].
215
+ // Output image tensors:
216
+ // 1. Generates the method to return the output data to a TensorImage.
217
+ // 2. Creates the post-processing logic. The default processing pipeline is:
218
+ // [DeQuantizeOp].
219
+ table ImageProperties {
220
+ // The color space of the image.
221
+ //
222
+ // <Codegen usage>:
223
+ // Determines how to convert the color space of a given image from users.
224
+ color_space:ColorSpaceType;
225
+
226
+ // Indicates the default value of image width and height if the tensor shape
227
+ // is dynamic. For fixed-size tensor, this size will be consistent with the
228
+ // expected size.
229
+ default_size:ImageSize;
230
+ }
231
+
232
+ // The properties for tensors representing bounding boxes.
233
+ //
234
+ // <Codegen usage>:
235
+ // Input image tensors: NA.
236
+ // Output image tensors: parses the values into a data structure that represents
237
+ // bounding boxes. For example, in the generated wrapper for Android, it returns
238
+ // the output as android.graphics.Rect objects.
239
+ enum BoundingBoxType : byte {
240
+ UNKNOWN = 0,
241
+ // Represents the bounding box by using the combination of boundaries,
242
+ // {left, top, right, bottom}.
243
+ // The default order is {left, top, right, bottom}. Other orders can be
244
+ // indicated by BoundingBoxProperties.index.
245
+ BOUNDARIES = 1,
246
+
247
+ // Represents the bounding box by using the upper_left corner, width and
248
+ // height.
249
+ // The default order is {upper_left_x, upper_left_y, width, height}. Other
250
+ // orders can be indicated by BoundingBoxProperties.index.
251
+ UPPER_LEFT = 2,
252
+
253
+ // Represents the bounding box by using the center of the box, width and
254
+ // height. The default order is {center_x, center_y, width, height}. Other
255
+ // orders can be indicated by BoundingBoxProperties.index.
256
+ CENTER = 3,
257
+
258
+ }
259
+
260
+ // The properties for audio tensors.
261
+ // Added in: 1.3.0
262
+ table AudioProperties {
263
+ // The sample rate in Hz when the audio was captured.
264
+ sample_rate:uint;
265
+
266
+ // The channel count of the audio.
267
+ channels:uint;
268
+ }
269
+
270
+ enum CoordinateType : byte {
271
+ // The coordinates are float values from 0 to 1.
272
+ RATIO = 0,
273
+ // The coordinates are integers.
274
+ PIXEL = 1,
275
+ }
276
+
277
+ table BoundingBoxProperties {
278
+ // Denotes the order of the elements defined in each bounding box type. An
279
+ // empty index array represent the default order of each bounding box type.
280
+ // For example, to denote the default order of BOUNDARIES, {left, top, right,
281
+ // bottom}, the index should be {0, 1, 2, 3}. To denote the order {left,
282
+ // right, top, bottom}, the order should be {0, 2, 1, 3}.
283
+ //
284
+ // The index array can be applied to all bounding box types to adjust the
285
+ // order of their corresponding underlying elements.
286
+ //
287
+ // <Codegen usage>:
288
+ // Indicates how to parse the bounding box values.
289
+ index:[uint];
290
+
291
+ // <Codegen usage>:
292
+ // Indicates how to parse the bounding box values.
293
+ type:BoundingBoxType;
294
+
295
+ // <Codegen usage>:
296
+ // Indicates how to convert the bounding box back to the original image in
297
+ // pixels.
298
+ coordinate_type:CoordinateType;
299
+ }
300
+
301
+ union ContentProperties {
302
+ FeatureProperties,
303
+ ImageProperties,
304
+ BoundingBoxProperties,
305
+ // Added in: 1.3.0
306
+ AudioProperties,
307
+ }
308
+
309
+ table ValueRange {
310
+ min:int;
311
+ max:int;
312
+ }
313
+
314
+ table Content {
315
+ // The properties that the content may have, indicating the type of the
316
+ // Content.
317
+ //
318
+ // <Codegen usage>:
319
+ // Indicates how to process the tensor.
320
+ content_properties:ContentProperties;
321
+
322
+ // The range of dimensions that the content corresponds to. A NULL
323
+ // "range" indicates that the content uses up all dimensions,
324
+ // except the batch axis if applied.
325
+ //
326
+ // Here are all the possible situations of how a tensor is composed.
327
+ // Case 1: The tensor is a single object, such as an image.
328
+ // For example, the input of an image classifier
329
+ // (https://www.tensorflow.org/lite/models/image_classification/overview),
330
+ // a tensor of shape [1, 224, 224, 3]. Dimensions 1 to 3 correspond to the
331
+ // image. Since dimension 0 is a batch axis, which can be ignored,
332
+ // "range" can be left as NULL.
333
+ //
334
+ // Case 2: The tensor contains multiple instances of the same object.
335
+ // For example, the output tensor of detected bounding boxes of an object
336
+ // detection model
337
+ // (https://www.tensorflow.org/lite/models/object_detection/overview).
338
+ // The tensor shape is [1, 10, 4]. Here is the what the three dimensions
339
+ // represent for:
340
+ // dimension 0: the batch axis.
341
+ // dimension 1: the 10 objects detected with the highest confidence.
342
+ // dimension 2: the bounding boxes of the 10 detected objects.
343
+ // The tensor is essentially 10 bounding boxes. In this case,
344
+ // "range" should be {min=2; max=2;}.
345
+ //
346
+ // The output tensor of scores of the above object detection model has shape
347
+ // [1, 10], where
348
+ // dimension 0: the batch axis;
349
+ // dimension 1: the scores of the 10 detected objects.
350
+ // Set "range" to the number of dimensions which is {min=2; max=2;} to denote
351
+ // that every element in the tensor is an individual content object, i.e. a
352
+ // score in this example.
353
+ //
354
+ // Another example is the pose estimation model
355
+ // (https://www.tensorflow.org/lite/models/pose_estimation/overview).
356
+ // The output tensor of heatmaps is in the shape of [1, 9, 9, 17].
357
+ // Here is the what the four dimensions represent for:
358
+ // dimension 0: the batch axis.
359
+ // dimension 1/2: the heatmap image.
360
+ // dimension 3: 17 body parts of a person.
361
+ // Even though the last axis is body part, the real content of this tensor is
362
+ // the heatmap. "range" should be [min=1; max=2].
363
+ //
364
+ // Case 3: The tensor contains multiple different objects. (Not supported by
365
+ // Content at this point).
366
+ // Sometimes a tensor may contain multiple different objects, thus different
367
+ // contents. It is very common for regression models. For example, a model
368
+ // to predict the fuel efficiency
369
+ // (https://www.tensorflow.org/tutorials/keras/regression).
370
+ // The input tensor has shape [1, 9], consisting of 9 features, such as
371
+ // "Cylinders", "Displacement", "Weight", etc. In this case, dimension 1
372
+ // contains 9 different contents. However, since these sub-dimension objects
373
+ // barely need to be specifically processed, their contents are not recorded
374
+ // in the metadata. Through, the name of each dimension can be set through
375
+ // TensorMetadata.dimension_names.
376
+ //
377
+ // Note that if it is not case 3, a tensor can only have one content type.
378
+ //
379
+ // <Codegen usage>:
380
+ // Case 1: return a processed single object of certain content type.
381
+ // Case 2: return a list of processed objects of certain content type. The
382
+ // generated model interface have API to random access those objects from
383
+ // the output.
384
+ range:ValueRange;
385
+ }
386
+
387
+ // Parameters that are used when normalizing the tensor.
388
+ table NormalizationOptions{
389
+ // mean and std are normalization parameters. Tensor values are normalized
390
+ // on a per-channel basis, by the formula
391
+ // (x - mean) / std.
392
+ // If there is only one value in mean or std, we'll propagate the value to
393
+ // all channels.
394
+ //
395
+ // Quantized models share the same normalization parameters as their
396
+ // corresponding float models. For example, an image input tensor may have
397
+ // the normalization parameter of
398
+ // mean = 127.5f and std = 127.5f.
399
+ // The image value will be normalized from [0, 255] to [-1, 1].
400
+ // Then, for quantized models, the image data should be further quantized
401
+ // according to the quantization parameters. In the case of uint8, the image
402
+ // data will be scaled back to [0, 255], while for int8, the image data will
403
+ // be scaled to [-128, 127].
404
+ //
405
+ // Both the normalization parameters and quantization parameters can be
406
+ // retrieved through the metadata extractor library.
407
+ // TODO: add link for the metadata extractor library.
408
+
409
+ // Per-channel mean of the possible values used in normalization.
410
+ //
411
+ // <Codegen usage>:
412
+ // Apply normalization to input tensors accordingly.
413
+ mean:[float];
414
+
415
+ // Per-channel standard dev. of the possible values used in normalization.
416
+ //
417
+ // <Codegen usage>:
418
+ // Apply normalization to input tensors accordingly.
419
+ std:[float];
420
+ }
421
+
422
+ // The different possible score transforms to apply to uncalibrated scores
423
+ // before applying score calibration.
424
+ enum ScoreTransformationType : byte {
425
+ // Identity function: g(x) = x.
426
+ IDENTITY = 0,
427
+ // Log function: g(x) = log(x).
428
+ LOG = 1,
429
+ // Inverse logistic function: g(x) = log(x) - log(1-x).
430
+ INVERSE_LOGISTIC = 2,
431
+ }
432
+
433
+ // Options to perform score calibration on an output tensor through sigmoid
434
+ // functions. One of the main purposes of score calibration is to make scores
435
+ // across classes comparable, so that a common threshold can be used for all
436
+ // output classes. This is meant for models producing class predictions as
437
+ // output, e.g. image classification or detection models.
438
+ //
439
+ // For each index in the output tensor, this applies:
440
+ // * `f(x) = scale / (1 + e^-(slope*g(x)+offset))` if `x > min_score` or if no
441
+ // `min_score` has been specified,
442
+ // * `f(x) = default_score` otherwise or if no scale, slope and offset have been
443
+ // specified.
444
+ // Where:
445
+ // * scale, slope, offset and (optional) min_score are index-specific parameters
446
+ // * g(x) is an index-independent transform among those defined in
447
+ // ScoreTransformationType
448
+ // * default_score is an index-independent parameter.
449
+ // An AssociatedFile with type TANSOR_AXIS_SCORE_CALIBRATION specifying the
450
+ // index-specific parameters must be associated with the corresponding
451
+ // TensorMetadata for score calibration be applied.
452
+ //
453
+ // See the example score calibration file used in image classification [1].
454
+ // TODO: Add github example link.
455
+ table ScoreCalibrationOptions {
456
+ // The function to use for transforming the uncalibrated score before
457
+ // applying score calibration.
458
+ score_transformation:ScoreTransformationType;
459
+
460
+ // The default calibrated score to apply if the uncalibrated score is
461
+ // below min_score or if no parameters were specified for a given index.
462
+ default_score:float;
463
+ }
464
+
465
+ // Performs thresholding on output tensor values, in order to filter out
466
+ // low-confidence results.
467
+ table ScoreThresholdingOptions {
468
+ // The recommended global threshold below which results are considered
469
+ // low-confidence and should be filtered out.
470
+ global_score_threshold:float;
471
+ }
472
+
473
+ // Performs Bert tokenization as in tf.text.BertTokenizer
474
+ // (https://github.com/tensorflow/text/blob/3599f6fcd2b780a2dc413b90fb9315464f10b314/docs/api_docs/python/text/BertTokenizer.md)
475
+ // Added in: 1.1.0
476
+ table BertTokenizerOptions {
477
+ // The vocabulary files used in the BertTokenizer.
478
+ vocab_file:[AssociatedFile];
479
+ }
480
+
481
+ // Performs SentencePiece tokenization as in tf.text.SentencepieceTokenizer
482
+ // (https://github.com/tensorflow/text/blob/3599f6fcd2b780a2dc413b90fb9315464f10b314/docs/api_docs/python/text/SentencepieceTokenizer.md).
483
+ // Added in: 1.1.0
484
+ table SentencePieceTokenizerOptions {
485
+ // The SentencePiece model files used in the SentencePieceTokenizer.
486
+ sentencePiece_model:[AssociatedFile];
487
+
488
+ // The optional vocabulary model files used in the SentencePieceTokenizer.
489
+ vocab_file:[AssociatedFile];
490
+ }
491
+
492
+ // Splits strings by the occurrences of delim_regex_pattern and converts the
493
+ // tokens into ids. For example, given
494
+ // delim_regex_pattern: "\W+",
495
+ // string: "Words, words, words.",
496
+ // the tokens after split are: "Words", "words", "words", "".
497
+ // And then the tokens can be converted into ids according to the vocab_file.
498
+ // Added in: 1.2.1
499
+ table RegexTokenizerOptions {
500
+ delim_regex_pattern:string;
501
+ // The vocabulary files used to convert this tokens into ids.
502
+ vocab_file:[AssociatedFile];
503
+ }
504
+
505
+ // Options that are used when processing the tensor.
506
+ union ProcessUnitOptions {
507
+ NormalizationOptions,
508
+ ScoreCalibrationOptions,
509
+ ScoreThresholdingOptions,
510
+ // Added in: 1.1.0
511
+ BertTokenizerOptions,
512
+ // Added in: 1.1.0
513
+ SentencePieceTokenizerOptions,
514
+ // Added in: 1.2.1
515
+ RegexTokenizerOptions
516
+ }
517
+
518
+ // A process unit that is used to process the tensor out-of-graph.
519
+ table ProcessUnit {
520
+ options:ProcessUnitOptions;
521
+ }
522
+
523
+
524
+ // Statistics to describe a tensor.
525
+ table Stats {
526
+ // Max and min are not currently used in tflite.support codegen. They mainly
527
+ // serve as references for users to better understand the model. They can also
528
+ // be used to validate model pre/post processing results.
529
+ // If there is only one value in max or min, we'll propagate the value to
530
+ // all channels.
531
+
532
+ // Per-channel maximum value of the tensor.
533
+ max:[float];
534
+
535
+ // Per-channel minimum value of the tensor.
536
+ min:[float];
537
+ }
538
+
539
+ // Metadata of a group of tensors. It may contain several tensors that will be
540
+ // grouped together in codegen. For example, the TFLite object detection model
541
+ // example (https://www.tensorflow.org/lite/models/object_detection/overview)
542
+ // has four outputs: classes, scores, bounding boxes, and number of detections.
543
+ // If the four outputs are bundled together using TensorGroup (for example,
544
+ // named as "detection result"), the codegen tool will generate the class,
545
+ // `DetectionResult`, which contains the class, score, and bounding box. And the
546
+ // outputs of the model will be converted to a list of `DetectionResults` and
547
+ // the number of detection. Note that the number of detection is a single
548
+ // number, therefore is inappropriate for the list of `DetectionResult`.
549
+ // Added in: 1.2.0
550
+ table TensorGroup {
551
+ // Name of tensor group.
552
+ //
553
+ // <codegen usage>:
554
+ // Name of the joint class of the tensor group.
555
+ name:string;
556
+
557
+ // Names of the tensors to group together, corresponding to
558
+ // TensorMetadata.name.
559
+ //
560
+ // <codegen usage>:
561
+ // Determines which tensors will be added to this group. All tensors in the
562
+ // group should have the same number of elements specified by Content.range.
563
+ tensor_names:[string];
564
+ }
565
+
566
+ // Detailed information of an input or output tensor.
567
+ table TensorMetadata {
568
+ // Name of the tensor.
569
+ //
570
+ // <Codegen usage>:
571
+ // The name of this tensor in the generated model interface.
572
+ name:string;
573
+
574
+ // A description of the tensor.
575
+ description:string;
576
+
577
+ // A list of names of the dimensions in this tensor. The length of
578
+ // dimension_names need to match the number of dimensions in this tensor.
579
+ //
580
+ // <Codegen usage>:
581
+ // The name of each dimension in the generated model interface. See "Case 2"
582
+ // in the comments of Content.range.
583
+ dimension_names:[string];
584
+
585
+ // The content that represents this tensor.
586
+ //
587
+ // <Codegen usage>:
588
+ // Determines how to process this tensor. See each item in ContentProperties
589
+ // for the default process units that will be applied to the tensor.
590
+ content:Content;
591
+
592
+ // The process units that are used to process the tensor out-of-graph.
593
+ //
594
+ // <Codegen usage>:
595
+ // Contains the parameters of the default processing pipeline for each content
596
+ // type, such as the normalization parameters in all content types. See the
597
+ // items under ContentProperties for the details of the default processing
598
+ // pipeline.
599
+ process_units:[ProcessUnit];
600
+
601
+ // The statistics of the tensor values.
602
+ stats:Stats;
603
+
604
+ // A list of associated files of this tensor.
605
+ //
606
+ // <Codegen usage>:
607
+ // Contains processing parameters of this tensor, such as normalization.
608
+ associated_files:[AssociatedFile];
609
+ }
610
+
611
+ table CustomMetadata {
612
+ name:string;
613
+ data:[ubyte] (force_align: 16);
614
+ }
615
+
616
+ table SubGraphMetadata {
617
+ // Name of the subgraph.
618
+ //
619
+ // Note that, since TFLite only support one subgraph at this moment, the
620
+ // Codegen tool will use the name in ModelMetadata in the generated model
621
+ // interface.
622
+ name:string;
623
+
624
+ // A description explains details about what the subgraph does.
625
+ description:string;
626
+
627
+ // Metadata of all input tensors used in this subgraph. It matches exactly
628
+ // the input tensors specified by `SubGraph.inputs` in the TFLite
629
+ // schema.fbs file[2]. The number of `TensorMetadata` in the array should
630
+ // equal to the number of indices in `SubGraph.inputs`.
631
+ //
632
+ // [2]: tensorflow/lite/schema/schema.fbs
633
+ // <Codegen usage>:
634
+ // Determines how to process the inputs.
635
+ input_tensor_metadata:[TensorMetadata];
636
+
637
+ // Metadata of all output tensors used in this subgraph. It matches exactly
638
+ // the output tensors specified by `SubGraph.outputs` in the TFLite
639
+ // schema.fbs file[2]. The number of `TensorMetadata` in the array should
640
+ // equal to the number of indices in `SubGraph.outputs`.
641
+ //
642
+ // <Codegen usage>:
643
+ // Determines how to process the outputs.
644
+ output_tensor_metadata:[TensorMetadata];
645
+
646
+ // A list of associated files of this subgraph.
647
+ associated_files:[AssociatedFile];
648
+
649
+ // Input process units of the subgraph. Some models may have complex pre and
650
+ // post processing logics where the process units do not work on one tensor at
651
+ // a time, but in a similar way of a TFLite graph. For example, in the
652
+ // MobileBert model (https://www.tensorflow.org/lite/models/bert_qa/overview),
653
+ // the inputs are: ids / mask / segment ids;
654
+ // the outputs are: end logits / start logits.
655
+ // The preprocessing converts the query string and the context string to the
656
+ // model inputs, and the post-processing converts the model outputs to the
657
+ // answer string.
658
+ // Added in: 1.1.0
659
+ input_process_units:[ProcessUnit];
660
+
661
+ // Output process units of the subgraph.
662
+ // Added in: 1.1.0
663
+ output_process_units:[ProcessUnit];
664
+
665
+ // Metadata of all input tensor groups used in this subgraph.
666
+ //
667
+ // <codegen usage>:
668
+ // Bundles the corresponding elements of the underlying input tensors together
669
+ // into a class, and converts those individual tensors into a list of the
670
+ // class objects.
671
+ // Added in: 1.2.0
672
+ input_tensor_groups:[TensorGroup];
673
+
674
+ // Metadata of all output tensor groups used in this subgraph.
675
+ //
676
+ // <codegen usage>:
677
+ // Bundles the corresponding elements of the underlying output tensors
678
+ // together into a class, and converts those individual tensors into a list of
679
+ // the class objects.
680
+ // Added in: 1.2.0
681
+ output_tensor_groups:[TensorGroup];
682
+
683
+ // A list of custom metadata.
684
+ // Added in: 1.5.0.
685
+ custom_metadata:[CustomMetadata];
686
+ }
687
+
688
+ table ModelMetadata {
689
+ // Name of the model.
690
+ //
691
+ // <Codegen usage>:
692
+ // The name of the model in the generated model interface.
693
+ name:string;
694
+
695
+ // Model description in schema.
696
+ description:string;
697
+
698
+ // Version of the model that specified by model creators.
699
+ version:string;
700
+
701
+ // Noted that, the minimum required TFLite runtime version that the model is
702
+ // compatible with, has already been added as a metadata entry in tflite
703
+ // schema. We'll decide later if we want to move it here, and keep it with
704
+ // other metadata entries.
705
+
706
+ // Metadata of all the subgraphs of the model. The 0th is assumed to be the
707
+ // main subgraph.
708
+ //
709
+ // <Codegen usage>:
710
+ // Determines how to process the inputs and outputs.
711
+ subgraph_metadata:[SubGraphMetadata];
712
+
713
+ // The person who creates this model.
714
+ author:string;
715
+
716
+ // Licenses that may apply to this model.
717
+ license:string;
718
+
719
+ // A list of associated files of this model.
720
+ associated_files:[AssociatedFile];
721
+
722
+ // The minimum metadata parser version that can fully understand the fields in
723
+ // the metadata flatbuffer. The version is effectively the largest version
724
+ // number among the versions of all the fields populated and the smallest
725
+ // compatible version indicated by the file identifier.
726
+ //
727
+ // This field is automatically populated by the MetadataPopulator when
728
+ // the metadata is populated into a TFLite model.
729
+ min_parser_version:string;
730
+ }
731
+
732
+ root_type ModelMetadata;