mediapipe-nightly 0.0.0.post20231128__cp38-cp38-macosx_11_0_universal2.whl → 0.10.8.post20231206__cp38-cp38-macosx_11_0_universal2.whl

Sign up to get free protection for your applications and to get access to all the features.
mediapipe/__init__.py CHANGED
@@ -23,4 +23,4 @@ del modules
23
23
  del python
24
24
  del mediapipe
25
25
  del util
26
- __version__ = '0.0.0-20231128'
26
+ __version__ = '0.10.8-20231206'
@@ -125,7 +125,8 @@ def draw_landmarks(
125
125
  color=RED_COLOR),
126
126
  connection_drawing_spec: Union[DrawingSpec,
127
127
  Mapping[Tuple[int, int],
128
- DrawingSpec]] = DrawingSpec()):
128
+ DrawingSpec]] = DrawingSpec(),
129
+ is_drawing_landmarks: bool = True):
129
130
  """Draws the landmarks and the connections on the image.
130
131
 
131
132
  Args:
@@ -142,6 +143,8 @@ def draw_landmarks(
142
143
  connections to the DrawingSpecs that specifies the connections' drawing
143
144
  settings such as color and line thickness. If this argument is explicitly
144
145
  set to None, no landmark connections will be drawn.
146
+ is_drawing_landmarks: Whether to draw landmarks. If set false, skip drawing
147
+ landmarks, only contours will be drawed.
145
148
 
146
149
  Raises:
147
150
  ValueError: If one of the followings:
@@ -181,7 +184,7 @@ def draw_landmarks(
181
184
  drawing_spec.thickness)
182
185
  # Draws landmark points after finishing the connection lines, which is
183
186
  # aesthetically better.
184
- if landmark_drawing_spec:
187
+ if is_drawing_landmarks and landmark_drawing_spec:
185
188
  for idx, landmark_px in idx_to_coordinates.items():
186
189
  drawing_spec = landmark_drawing_spec[idx] if isinstance(
187
190
  landmark_drawing_spec, Mapping) else landmark_drawing_spec
@@ -0,0 +1,33 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # source: mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options.proto
4
+ """Generated protocol buffer code."""
5
+ from google.protobuf.internal import builder as _builder
6
+ from google.protobuf import descriptor as _descriptor
7
+ from google.protobuf import descriptor_pool as _descriptor_pool
8
+ from google.protobuf import symbol_database as _symbol_database
9
+ # @@protoc_insertion_point(imports)
10
+
11
+ _sym_db = _symbol_database.Default()
12
+
13
+
14
+ from mediapipe.tasks.cc.core.proto import base_options_pb2 as mediapipe_dot_tasks_dot_cc_dot_core_dot_proto_dot_base__options__pb2
15
+ from mediapipe.tasks.cc.vision.face_detector.proto import face_detector_graph_options_pb2 as mediapipe_dot_tasks_dot_cc_dot_vision_dot_face__detector_dot_proto_dot_face__detector__graph__options__pb2
16
+ from mediapipe.tasks.cc.vision.face_landmarker.proto import face_landmarks_detector_graph_options_pb2 as mediapipe_dot_tasks_dot_cc_dot_vision_dot_face__landmarker_dot_proto_dot_face__landmarks__detector__graph__options__pb2
17
+ from mediapipe.tasks.cc.vision.hand_landmarker.proto import hand_landmarks_detector_graph_options_pb2 as mediapipe_dot_tasks_dot_cc_dot_vision_dot_hand__landmarker_dot_proto_dot_hand__landmarks__detector__graph__options__pb2
18
+ from mediapipe.tasks.cc.vision.hand_landmarker.proto import hand_roi_refinement_graph_options_pb2 as mediapipe_dot_tasks_dot_cc_dot_vision_dot_hand__landmarker_dot_proto_dot_hand__roi__refinement__graph__options__pb2
19
+ from mediapipe.tasks.cc.vision.pose_detector.proto import pose_detector_graph_options_pb2 as mediapipe_dot_tasks_dot_cc_dot_vision_dot_pose__detector_dot_proto_dot_pose__detector__graph__options__pb2
20
+ from mediapipe.tasks.cc.vision.pose_landmarker.proto import pose_landmarks_detector_graph_options_pb2 as mediapipe_dot_tasks_dot_cc_dot_vision_dot_pose__landmarker_dot_proto_dot_pose__landmarks__detector__graph__options__pb2
21
+
22
+
23
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n[mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options.proto\x12\x30mediapipe.tasks.vision.holistic_landmarker.proto\x1a\x30mediapipe/tasks/cc/core/proto/base_options.proto\x1aOmediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options.proto\x1a[mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options.proto\x1a[mediapipe/tasks/cc/vision/hand_landmarker/proto/hand_landmarks_detector_graph_options.proto\x1aWmediapipe/tasks/cc/vision/hand_landmarker/proto/hand_roi_refinement_graph_options.proto\x1aOmediapipe/tasks/cc/vision/pose_detector/proto/pose_detector_graph_options.proto\x1a[mediapipe/tasks/cc/vision/pose_landmarker/proto/pose_landmarks_detector_graph_options.proto\"\xad\x06\n\x1eHolisticLandmarkerGraphOptions\x12=\n\x0c\x62\x61se_options\x18\x01 \x01(\x0b\x32\'.mediapipe.tasks.core.proto.BaseOptions\x12~\n%hand_landmarks_detector_graph_options\x18\x02 \x01(\x0b\x32O.mediapipe.tasks.vision.hand_landmarker.proto.HandLandmarksDetectorGraphOptions\x12v\n!hand_roi_refinement_graph_options\x18\x03 \x01(\x0b\x32K.mediapipe.tasks.vision.hand_landmarker.proto.HandRoiRefinementGraphOptions\x12i\n\x1b\x66\x61\x63\x65_detector_graph_options\x18\x04 \x01(\x0b\x32\x44.mediapipe.tasks.vision.face_detector.proto.FaceDetectorGraphOptions\x12~\n%face_landmarks_detector_graph_options\x18\x05 \x01(\x0b\x32O.mediapipe.tasks.vision.face_landmarker.proto.FaceLandmarksDetectorGraphOptions\x12i\n\x1bpose_detector_graph_options\x18\x06 \x01(\x0b\x32\x44.mediapipe.tasks.vision.pose_detector.proto.PoseDetectorGraphOptions\x12~\n%pose_landmarks_detector_graph_options\x18\x07 \x01(\x0b\x32O.mediapipe.tasks.vision.pose_landmarker.proto.PoseLandmarksDetectorGraphOptionsBa\n:com.google.mediapipe.tasks.vision.holisticlandmarker.protoB#HolisticLandmarkerGraphOptionsProtob\x06proto3')
24
+
25
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
26
+ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'mediapipe.tasks.cc.vision.holistic_landmarker.proto.holistic_landmarker_graph_options_pb2', globals())
27
+ if _descriptor._USE_C_DESCRIPTORS == False:
28
+
29
+ DESCRIPTOR._options = None
30
+ DESCRIPTOR._serialized_options = b'\n:com.google.mediapipe.tasks.vision.holisticlandmarker.protoB#HolisticLandmarkerGraphOptionsProto'
31
+ _HOLISTICLANDMARKERGRAPHOPTIONS._serialized_start=726
32
+ _HOLISTICLANDMARKERGRAPHOPTIONS._serialized_end=1539
33
+ # @@protoc_insertion_point(module_scope)
@@ -0,0 +1,28 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # source: mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result.proto
4
+ """Generated protocol buffer code."""
5
+ from google.protobuf.internal import builder as _builder
6
+ from google.protobuf import descriptor as _descriptor
7
+ from google.protobuf import descriptor_pool as _descriptor_pool
8
+ from google.protobuf import symbol_database as _symbol_database
9
+ # @@protoc_insertion_point(imports)
10
+
11
+ _sym_db = _symbol_database.Default()
12
+
13
+
14
+ from mediapipe.framework.formats import classification_pb2 as mediapipe_dot_framework_dot_formats_dot_classification__pb2
15
+ from mediapipe.framework.formats import landmark_pb2 as mediapipe_dot_framework_dot_formats_dot_landmark__pb2
16
+
17
+
18
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nImediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result.proto\x12\x30mediapipe.tasks.vision.holistic_landmarker.proto\x1a\x30mediapipe/framework/formats/classification.proto\x1a*mediapipe/framework/formats/landmark.proto\"\xb7\x03\n\x0eHolisticResult\x12\x39\n\x0epose_landmarks\x18\x01 \x01(\x0b\x32!.mediapipe.NormalizedLandmarkList\x12\x35\n\x14pose_world_landmarks\x18\x07 \x01(\x0b\x32\x17.mediapipe.LandmarkList\x12>\n\x13left_hand_landmarks\x18\x02 \x01(\x0b\x32!.mediapipe.NormalizedLandmarkList\x12?\n\x14right_hand_landmarks\x18\x03 \x01(\x0b\x32!.mediapipe.NormalizedLandmarkList\x12\x39\n\x0e\x66\x61\x63\x65_landmarks\x18\x04 \x01(\x0b\x32!.mediapipe.NormalizedLandmarkList\x12\x37\n\x10\x66\x61\x63\x65_blendshapes\x18\x06 \x01(\x0b\x32\x1d.mediapipe.ClassificationList\x12>\n\x13\x61uxiliary_landmarks\x18\x05 \x01(\x0b\x32!.mediapipe.NormalizedLandmarkListBK\n4com.google.mediapipe.tasks.vision.holisticlandmarkerB\x13HolisticResultProtob\x06proto3')
19
+
20
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
21
+ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'mediapipe.tasks.cc.vision.holistic_landmarker.proto.holistic_result_pb2', globals())
22
+ if _descriptor._USE_C_DESCRIPTORS == False:
23
+
24
+ DESCRIPTOR._options = None
25
+ DESCRIPTOR._serialized_options = b'\n4com.google.mediapipe.tasks.vision.holisticlandmarkerB\023HolisticResultProto'
26
+ _HOLISTICRESULT._serialized_start=222
27
+ _HOLISTICRESULT._serialized_end=661
28
+ # @@protoc_insertion_point(module_scope)
@@ -0,0 +1,70 @@
1
+ # Copyright 2023 The MediaPipe Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Benchmark utils for MediaPipe Tasks."""
15
+
16
+ import os
17
+ import numpy as np
18
+
19
+
20
+ def nth_percentile(inference_times, percentile):
21
+ """Calculate the nth percentile of the inference times."""
22
+ return np.percentile(inference_times, percentile)
23
+
24
+
25
+ def average(inference_times):
26
+ """Calculate the average of the inference times."""
27
+ return np.mean(inference_times)
28
+
29
+
30
+ def get_test_data_path(test_srcdir, file_or_dirname_path: str) -> str:
31
+ """Determine the test data path.
32
+
33
+ Args:
34
+ test_srcdir: The path to the test source directory.
35
+ file_or_dirname_path: The path to the file or directory.
36
+
37
+ Returns:
38
+ The full test data path.
39
+ """
40
+ for directory, subdirs, files in os.walk(test_srcdir):
41
+ for f in subdirs + files:
42
+ path = os.path.join(directory, f)
43
+ if path.endswith(file_or_dirname_path):
44
+ return path
45
+ raise ValueError(
46
+ "No %s in test directory: %s." % (file_or_dirname_path, test_srcdir)
47
+ )
48
+
49
+
50
+ def get_model_path(custom_model, default_model_path):
51
+ """Determine the model path based on the existence of the custom model.
52
+
53
+ Args:
54
+ custom_model: The path to the custom model provided by the user.
55
+ default_model_path: The path to the default model.
56
+
57
+ Returns:
58
+ The path to the model to be used.
59
+ """
60
+ if custom_model is not None and os.path.exists(custom_model):
61
+ print(f"Using provided model: {custom_model}")
62
+ return custom_model
63
+ else:
64
+ if custom_model is not None:
65
+ print(
66
+ f"Warning: Provided model '{custom_model}' not found. "
67
+ "Using default model instead."
68
+ )
69
+ print(f"Using default model: {default_model_path}")
70
+ return default_model_path
@@ -0,0 +1,99 @@
1
+ # Copyright 2023 The MediaPipe Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """MediaPipe vision benchmarker."""
15
+
16
+ import argparse
17
+
18
+ from mediapipe.tasks.python.benchmark import benchmark_utils as bu
19
+ from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
20
+ from mediapipe.tasks.python.core import base_options
21
+
22
+
23
+ def benchmarker(benchmark_function, default_model_name):
24
+ """Executes a benchmarking process using a specified function ann model.
25
+
26
+ Args:
27
+ benchmark_function: A callable function to be executed for benchmarking.
28
+ This function should contain the logic of the task to be benchmarked and
29
+ should be capable of utilizing a model specified by its name.
30
+ default_model_name: The name or path of the default model to be used in
31
+ the benchmarking process. This is useful when the benchmarking function
32
+ requires a model and no other model is explicitly specified.
33
+ """
34
+ parser = argparse.ArgumentParser(
35
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
36
+ )
37
+
38
+ parser.add_argument(
39
+ '--mode',
40
+ help='Benchmarking mode (e.g., "nth_percentile").',
41
+ required=False,
42
+ default='nth_percentile',
43
+ )
44
+ parser.add_argument('--model', help='Path to the model.', default=None)
45
+ parser.add_argument(
46
+ '--iterations',
47
+ help='Number of iterations for benchmarking.',
48
+ type=int,
49
+ default=100,
50
+ )
51
+ parser.add_argument(
52
+ '--percentile',
53
+ help='Percentile for benchmarking statistics.',
54
+ type=float,
55
+ default=95.0,
56
+ )
57
+
58
+ args = parser.parse_args()
59
+
60
+ # Get the model path
61
+ default_model_path = bu.get_test_data_path(
62
+ base_vision_benchmark_api.VISION_TEST_DATA_DIR, default_model_name
63
+ )
64
+ model_path = bu.get_model_path(args.model, default_model_path)
65
+
66
+ # Define a mapping of modes to their respective function argument lists
67
+ mode_args_mapping = {
68
+ 'nth_percentile': {'percentile': args.percentile},
69
+ 'average': {},
70
+ }
71
+
72
+ # Check if the mode is supported and get the argument dictionary
73
+ if args.mode not in mode_args_mapping:
74
+ raise ValueError(f'Unsupported benchmarking mode: {args.mode}')
75
+
76
+ mode_args = mode_args_mapping[args.mode]
77
+
78
+ # Run the benchmark for both CPU and GPU and calculate results based on mode
79
+ results = {}
80
+ for delegate_type in [
81
+ base_options.BaseOptions.Delegate.CPU,
82
+ base_options.BaseOptions.Delegate.GPU,
83
+ ]:
84
+ inference_times = benchmark_function(
85
+ model_path, args.iterations, delegate_type
86
+ )
87
+
88
+ # Calculate the benchmark result based on the mode
89
+ if args.mode == 'nth_percentile':
90
+ results[delegate_type] = bu.nth_percentile(inference_times, **mode_args)
91
+ elif args.mode == 'average':
92
+ results[delegate_type] = bu.average(inference_times)
93
+
94
+ # Report benchmarking results
95
+ for delegate_type, result in results.items():
96
+ print(
97
+ f'Inference time {delegate_type} {mode_args_mapping[args.mode]}: '
98
+ f'{result:.6f} milliseconds'
99
+ )
@@ -0,0 +1,14 @@
1
+ """Copyright 2023 The MediaPipe Authors.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ """
@@ -0,0 +1,40 @@
1
+ # Copyright 2023 The MediaPipe Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """MediaPipe vision benchmark base api."""
15
+ import time
16
+
17
+ VISION_TEST_DATA_DIR = 'mediapipe/tasks/testdata/vision'
18
+
19
+
20
+ def benchmark_task(func, image, n_iterations):
21
+ """Collect inference times for a given task after benchmarking.
22
+
23
+ Args:
24
+ func: The task function used for benchmarking.
25
+ image: The input MediaPipe Image.
26
+ n_iterations: Number of iterations to run the benchmark.
27
+
28
+ Returns:
29
+ List of inference times in milliseconds.
30
+ """
31
+ inference_times = []
32
+
33
+ for _ in range(n_iterations):
34
+ start_time_ns = time.time_ns()
35
+ # Run the method for the task (e.g., classify)
36
+ func(image)
37
+ end_time_ns = time.time_ns()
38
+ inference_times.append((end_time_ns - start_time_ns) / 1_000_000)
39
+
40
+ return inference_times
@@ -13,7 +13,7 @@ _sym_db = _symbol_database.Default()
13
13
 
14
14
 
15
15
 
16
- DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n6mediapipe/util/analytics/mediapipe_logging_enums.proto\x12\x14logs.proto.mediapipe*H\n\x08Platform\x12\x14\n\x10PLATFORM_UNKNOWN\x10\x00\x12\x14\n\x10PLATFORM_ANDROID\x10\x01\x12\x10\n\x0cPLATFORM_IOS\x10\x02*\xf2\x03\n\x0cSolutionName\x12\x14\n\x10SOLUTION_UNKNOWN\x10\x00\x12\x1a\n\x16SOLUTION_FACEDETECTION\x10\x01\x12\x15\n\x11SOLUTION_FACEMESH\x10\x02\x12\x12\n\x0eSOLUTION_HANDS\x10\x03\x12\x19\n\x15TASKS_AUDIOCLASSIFIER\x10\x04\x12\x17\n\x13TASKS_AUDIOEMBEDDER\x10\x05\x12\x18\n\x14TASKS_TEXTCLASSIFIER\x10\x06\x12\x16\n\x12TASKS_TEXTEMBEDDER\x10\x07\x12\x1b\n\x17TASKS_GESTURERECOGNIZER\x10\x08\x12\x16\n\x12TASKS_HANDDETECTOR\x10\t\x12\x18\n\x14TASKS_HANDLANDMARKER\x10\n\x12\x19\n\x15TASKS_IMAGECLASSIFIER\x10\x0b\x12\x17\n\x13TASKS_IMAGEEMBEDDER\x10\x0c\x12\x18\n\x14TASKS_IMAGESEGMENTER\x10\r\x12\x18\n\x14TASKS_OBJECTDETECTOR\x10\x0e\x12\x16\n\x12TASKS_FACEDETECTOR\x10\x0f\x12\x18\n\x14TASKS_FACELANDMARKER\x10\x10\x12\x16\n\x12TASKS_FACESTYLIZER\x10\x11\x12\x1e\n\x1aTASKS_INTERACTIVESEGMENTER\x10\x12*R\n\tEventName\x12\x0f\n\x0b\x45VENT_START\x10\x00\x12\x14\n\x10\x45VENT_INVOCATONS\x10\x01\x12\r\n\tEVENT_END\x10\x02\x12\x0f\n\x0b\x45VENT_ERROR\x10\x03*\xe4\x01\n\x0cSolutionMode\x12\x10\n\x0cMODE_UNKNOWN\x10\x00\x12\x0e\n\nMODE_VIDEO\x10\x01\x12\x15\n\x11MODE_STATIC_IMAGE\x10\x02\x12\x1a\n\x16MODE_TASKS_UNSPECIFIED\x10\n\x12\x14\n\x10MODE_TASKS_IMAGE\x10\x0b\x12\x14\n\x10MODE_TASKS_VIDEO\x10\x0c\x12\x1a\n\x16MODE_TASKS_LIVE_STREAM\x10\r\x12\x1a\n\x16MODE_TASKS_AUDIO_CLIPS\x10\x0e\x12\x1b\n\x17MODE_TASKS_AUDIO_STREAM\x10\x0f*\x8f\x01\n\rInputDataType\x12\x16\n\x12INPUT_TYPE_UNKNOWN\x10\x00\x12\x18\n\x14INPUT_TYPE_CPU_IMAGE\x10\x01\x12\x18\n\x14INPUT_TYPE_GPU_IMAGE\x10\x02\x12\x18\n\x14INPUT_TYPE_TASKS_CPU\x10\x03\x12\x18\n\x14INPUT_TYPE_TASKS_GPU\x10\x04*i\n\tErrorCode\x12\x11\n\rERROR_UNKNOWN\x10\x00\x12\x1b\n\x17\x45RROR_UNSUPPORTED_INPUT\x10\x01\x12\x1c\n\x18\x45RROR_UNSUPPORTED_OUTPUT\x10\x02\x12\x0e\n\nERROR_INIT\x10\x03\x42\x38\n\x1a\x63om.google.mediapipe.protoB\x1aMediaPipeLoggingEnumsProto')
16
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n6mediapipe/util/analytics/mediapipe_logging_enums.proto\x12\x14logs.proto.mediapipe*H\n\x08Platform\x12\x14\n\x10PLATFORM_UNKNOWN\x10\x00\x12\x14\n\x10PLATFORM_ANDROID\x10\x01\x12\x10\n\x0cPLATFORM_IOS\x10\x02*\xaa\x04\n\x0cSolutionName\x12\x14\n\x10SOLUTION_UNKNOWN\x10\x00\x12\x1a\n\x16SOLUTION_FACEDETECTION\x10\x01\x12\x15\n\x11SOLUTION_FACEMESH\x10\x02\x12\x12\n\x0eSOLUTION_HANDS\x10\x03\x12\x19\n\x15TASKS_AUDIOCLASSIFIER\x10\x04\x12\x17\n\x13TASKS_AUDIOEMBEDDER\x10\x05\x12\x18\n\x14TASKS_TEXTCLASSIFIER\x10\x06\x12\x16\n\x12TASKS_TEXTEMBEDDER\x10\x07\x12\x1b\n\x17TASKS_GESTURERECOGNIZER\x10\x08\x12\x16\n\x12TASKS_HANDDETECTOR\x10\t\x12\x18\n\x14TASKS_HANDLANDMARKER\x10\n\x12\x19\n\x15TASKS_IMAGECLASSIFIER\x10\x0b\x12\x17\n\x13TASKS_IMAGEEMBEDDER\x10\x0c\x12\x18\n\x14TASKS_IMAGESEGMENTER\x10\r\x12\x18\n\x14TASKS_OBJECTDETECTOR\x10\x0e\x12\x16\n\x12TASKS_FACEDETECTOR\x10\x0f\x12\x18\n\x14TASKS_FACELANDMARKER\x10\x10\x12\x16\n\x12TASKS_FACESTYLIZER\x10\x11\x12\x1e\n\x1aTASKS_INTERACTIVESEGMENTER\x10\x12\x12\x18\n\x14TASKS_IMAGEGENERATOR\x10\x13\x12\x1c\n\x18TASKS_HOLISTICLANDMARKER\x10\x14*R\n\tEventName\x12\x0f\n\x0b\x45VENT_START\x10\x00\x12\x14\n\x10\x45VENT_INVOCATONS\x10\x01\x12\r\n\tEVENT_END\x10\x02\x12\x0f\n\x0b\x45VENT_ERROR\x10\x03*\xe4\x01\n\x0cSolutionMode\x12\x10\n\x0cMODE_UNKNOWN\x10\x00\x12\x0e\n\nMODE_VIDEO\x10\x01\x12\x15\n\x11MODE_STATIC_IMAGE\x10\x02\x12\x1a\n\x16MODE_TASKS_UNSPECIFIED\x10\n\x12\x14\n\x10MODE_TASKS_IMAGE\x10\x0b\x12\x14\n\x10MODE_TASKS_VIDEO\x10\x0c\x12\x1a\n\x16MODE_TASKS_LIVE_STREAM\x10\r\x12\x1a\n\x16MODE_TASKS_AUDIO_CLIPS\x10\x0e\x12\x1b\n\x17MODE_TASKS_AUDIO_STREAM\x10\x0f*\x8f\x01\n\rInputDataType\x12\x16\n\x12INPUT_TYPE_UNKNOWN\x10\x00\x12\x18\n\x14INPUT_TYPE_CPU_IMAGE\x10\x01\x12\x18\n\x14INPUT_TYPE_GPU_IMAGE\x10\x02\x12\x18\n\x14INPUT_TYPE_TASKS_CPU\x10\x03\x12\x18\n\x14INPUT_TYPE_TASKS_GPU\x10\x04*i\n\tErrorCode\x12\x11\n\rERROR_UNKNOWN\x10\x00\x12\x1b\n\x17\x45RROR_UNSUPPORTED_INPUT\x10\x01\x12\x1c\n\x18\x45RROR_UNSUPPORTED_OUTPUT\x10\x02\x12\x0e\n\nERROR_INIT\x10\x03\x42\x38\n\x1a\x63om.google.mediapipe.protoB\x1aMediaPipeLoggingEnumsProto')
17
17
 
18
18
  _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
19
19
  _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'mediapipe.util.analytics.mediapipe_logging_enums_pb2', globals())
@@ -24,13 +24,13 @@ if _descriptor._USE_C_DESCRIPTORS == False:
24
24
  _PLATFORM._serialized_start=80
25
25
  _PLATFORM._serialized_end=152
26
26
  _SOLUTIONNAME._serialized_start=155
27
- _SOLUTIONNAME._serialized_end=653
28
- _EVENTNAME._serialized_start=655
29
- _EVENTNAME._serialized_end=737
30
- _SOLUTIONMODE._serialized_start=740
31
- _SOLUTIONMODE._serialized_end=968
32
- _INPUTDATATYPE._serialized_start=971
33
- _INPUTDATATYPE._serialized_end=1114
34
- _ERRORCODE._serialized_start=1116
35
- _ERRORCODE._serialized_end=1221
27
+ _SOLUTIONNAME._serialized_end=709
28
+ _EVENTNAME._serialized_start=711
29
+ _EVENTNAME._serialized_end=793
30
+ _SOLUTIONMODE._serialized_start=796
31
+ _SOLUTIONMODE._serialized_end=1024
32
+ _INPUTDATATYPE._serialized_start=1027
33
+ _INPUTDATATYPE._serialized_end=1170
34
+ _ERRORCODE._serialized_start=1172
35
+ _ERRORCODE._serialized_end=1277
36
36
  # @@protoc_insertion_point(module_scope)
mediapipe/version.txt CHANGED
@@ -1 +1 @@
1
- 0.0.0-20231128
1
+ 0.10.8-20231206
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mediapipe-nightly
3
- Version: 0.0.0.post20231128
3
+ Version: 0.10.8.post20231206
4
4
  Summary: MediaPipe is the simplest way for researchers and developers to build world-class ML solutions and applications for mobile, edge, cloud and the web.
5
5
  Home-page: https://github.com/google/mediapipe
6
6
  Author: The MediaPipe Authors
@@ -1,13 +1,17 @@
1
- mediapipe_nightly-0.0.0.post20231128.dist-info/RECORD,,
2
- mediapipe_nightly-0.0.0.post20231128.dist-info/LICENSE,sha256=hwfu8FM5h-_FsVXWR2HutuIHk_ULm9Gmja0c9HGdDtg,12331
3
- mediapipe_nightly-0.0.0.post20231128.dist-info/WHEEL,sha256=ne9oAesYhmqqzZoZWVnfbfuFiLIE1hGPUf33054jNWs,109
4
- mediapipe_nightly-0.0.0.post20231128.dist-info/top_level.txt,sha256=LG-epD1oIiiHFRqLp--7jacjB3dbx2RfMcLYjCIhmxU,175
5
- mediapipe_nightly-0.0.0.post20231128.dist-info/METADATA,sha256=uZadee-ImTHttTHGen-tFx2hoFUhN6MzvMxlA_i6gRA,9651
6
- mediapipe/__init__.py,sha256=6hDlvAZzAIUilMSwUTHAkka4CpGyz7hECDwMrjv-jhA,814
1
+ mediapipe_nightly-0.10.8.post20231206.dist-info/RECORD,,
2
+ mediapipe_nightly-0.10.8.post20231206.dist-info/LICENSE,sha256=hwfu8FM5h-_FsVXWR2HutuIHk_ULm9Gmja0c9HGdDtg,12331
3
+ mediapipe_nightly-0.10.8.post20231206.dist-info/WHEEL,sha256=ne9oAesYhmqqzZoZWVnfbfuFiLIE1hGPUf33054jNWs,109
4
+ mediapipe_nightly-0.10.8.post20231206.dist-info/top_level.txt,sha256=LG-epD1oIiiHFRqLp--7jacjB3dbx2RfMcLYjCIhmxU,175
5
+ mediapipe_nightly-0.10.8.post20231206.dist-info/METADATA,sha256=wkcIyI7QsprzKkhIHH3GH4CojIM9UmXc6dTTEpWkTfo,9652
6
+ mediapipe/__init__.py,sha256=jL_N9MEB98tYKTIo9iKpfx5Vez6okQGN6K-5P1cEIx4,815
7
7
  mediapipe/tasks/__init__.py,sha256=sVJS2p8J2PNVl8DLRPVY6KLpHenP_z3QVPRU0x_iL5g,571
8
8
  mediapipe/tasks/python/__init__.py,sha256=i-0yWpv_VgAcZeBFQZiLlgIb0FuFb1fHdT--0EqLCAU,838
9
9
  mediapipe/tasks/python/benchmark/__init__.py,sha256=epEucluzX0HinwBZoS7Tgb19j_qgfTuBf-vBkqemch8,587
10
+ mediapipe/tasks/python/benchmark/benchmark_utils.py,sha256=5qbqGxxYlJJQLJzbW0cwMCcGl4c8ZfKL3rNmm7xMVAE,2241
11
+ mediapipe/tasks/python/benchmark/vision/benchmark.py,sha256=gifRumSkesmXVU51GHRct-UYf8S9Dn2isD28Aw7BClQ,3491
10
12
  mediapipe/tasks/python/benchmark/vision/__init__.py,sha256=epEucluzX0HinwBZoS7Tgb19j_qgfTuBf-vBkqemch8,587
13
+ mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py,sha256=Yqqje22r4m0pp24m8682xc4kqtZkMlx4VF6zxOiq2b8,1331
14
+ mediapipe/tasks/python/benchmark/vision/core/__init__.py,sha256=ZxHWTuEeRH77CDVcgDbCs5H-B9OomxX7oWZ3YGxG8VM,571
11
15
  mediapipe/tasks/python/core/task_info.py,sha256=WslckfmxBAe-btun_oY8DK6brANrV11kBazyXMKTvvc,5217
12
16
  mediapipe/tasks/python/core/__init__.py,sha256=KO3IKth2FV2gOlD70fltPxtixGnxyQJksZ-gfrZDQns,587
13
17
  mediapipe/tasks/python/core/base_options.py,sha256=im2EOZB98ORuBCzf91ONKT7S2CNgcb6hM5_YymQHHMY,4025
@@ -109,6 +113,10 @@ mediapipe/tasks/cc/vision/face_detector/__init__.py,sha256=47DEQpj8HBSa-_TImW-5J
109
113
  mediapipe/tasks/cc/vision/face_detector/proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
110
114
  mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options_pb2.py,sha256=Ak8qUR9RGr0B-7pRn8dYJScmvikFJ0m242lrGSyyDDw,2811
111
115
  mediapipe/tasks/cc/vision/custom_ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
116
+ mediapipe/tasks/cc/vision/holistic_landmarker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
117
+ mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_result_pb2.py,sha256=zLH3nakReVS0gGKIG8acWP_PQg0uSVJNmZJf24fx8rI,2309
118
+ mediapipe/tasks/cc/vision/holistic_landmarker/proto/holistic_landmarker_graph_options_pb2.py,sha256=-sw1wfITTVIxpVVFno5R-RhHZEhrZo3BDa4DXbNReVA,4468
119
+ mediapipe/tasks/cc/vision/holistic_landmarker/proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
112
120
  mediapipe/tasks/cc/vision/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
113
121
  mediapipe/tasks/cc/vision/image_classifier/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
114
122
  mediapipe/tasks/cc/vision/image_classifier/proto/image_classifier_graph_options_pb2.py,sha256=0_fn1i1oD8TzaM9_vKiMD3IIdUt97KphK92FXh65bzU,3020
@@ -281,12 +289,12 @@ mediapipe/util/sequence/__init__.py,sha256=bglKd2k2C7QGT1i-vstURXPJX2Cvq9FO9opr6
281
289
  mediapipe/util/sequence/media_sequence_util_test.py,sha256=nuN9-HW3kw2kZbraCH76qhbaSyrPYZ3Fi_lXW9PvyZE,18180
282
290
  mediapipe/util/analytics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
283
291
  mediapipe/util/analytics/mediapipe_log_extension_pb2.py,sha256=k_ZCA-9K0W-3CfN4lNTn9PbdF6y8bLVJx6FfZ-AyMUY,4051
284
- mediapipe/util/analytics/mediapipe_logging_enums_pb2.py,sha256=GyCz96ru4uRy0rOZiaZ5oV-3FVH2eF6tBJC2bPPQa2U,3431
292
+ mediapipe/util/analytics/mediapipe_logging_enums_pb2.py,sha256=NuCgKqI2OyHQFbFRFBOYcH9EAz8hRDtUFGwsU7pOxWE,3521
285
293
  mediapipe/python/solution_base.py,sha256=45tTBPz43q8r7NNn0BWTnQ5rzs8lcXu9CBUfsRpDo5E,25846
286
294
  mediapipe/python/timestamp_test.py,sha256=oWKTZMsV586jH57OBV30rihcymETyGC29VbYURNLJQQ,2528
287
295
  mediapipe/python/image_frame_test.py,sha256=ZSjdE-an2t8i6MiA4_Xri91VMH5_CCx45fjhWUQptMY,8602
288
296
  mediapipe/python/__init__.py,sha256=xTuq4e55ofMUGUpV81ZbVzYHmjMmzt9dBOfS4ugFN1c,1428
289
- mediapipe/python/_framework_bindings.cpython-38-darwin.so,sha256=RBZ7VETU0kQr36wGa0V-dl9B852W0vD3hObHqxMzepA,77031347
297
+ mediapipe/python/_framework_bindings.cpython-38-darwin.so,sha256=Acyo2co3yqEsrRmlhZrdF6p1xPpdxP3g7lMgZuGX_fk,77046755
290
298
  mediapipe/python/solution_base_test.py,sha256=1u5Lo4aEUrMKj8Ha_34XMyKnI-3A1AvpaX3MCI0b2MM,15632
291
299
  mediapipe/python/packet_creator.py,sha256=34MBIMwykbZSLV-gdVYTzK8R07yzWV5yg1HuyRbS4d0,11414
292
300
  mediapipe/python/packet_getter.py,sha256=QkBxKCjXrOC6j2dJ5zcVNGaPB6zjurKztQpW5kOYLj4,4205
@@ -297,7 +305,7 @@ mediapipe/python/solutions/drawing_styles.py,sha256=u52h0xbgNPzzqAKhX7Yt2Nrxdloq
297
305
  mediapipe/python/solutions/face_detection_test.py,sha256=qgHrbywIuw39sF9UB5DYtkHWJIEPojeVdfsxR8YRyxI,3776
298
306
  mediapipe/python/solutions/hands.py,sha256=TuoT4PY-rlzy3CFEjqeN849MkKKjb0Px242Hl_eItRo,6132
299
307
  mediapipe/python/solutions/holistic_test.py,sha256=81K7WvWBNiDjAT__DDFgC7V2i5U3spng29oBRKi28-U,6746
300
- mediapipe/python/solutions/drawing_utils.py,sha256=9ksOboTarDT4hDt6RG3CFk3fGYGoGhbn5tfqFr8DeI0,13693
308
+ mediapipe/python/solutions/drawing_utils.py,sha256=aySJbChw3TusQJIVoYWol9_CSy3_rHO5zpahTlhmgX4,13884
301
309
  mediapipe/python/solutions/selfie_segmentation.py,sha256=YAoacNZn_JD3s5VYBjAbOopR4XQhOmWjIImZfH0pKyY,2774
302
310
  mediapipe/python/solutions/objectron.py,sha256=GU9oWOp-RHXchvS833n7_ifYYqEFMDBUlx1iIE9cHcQ,11653
303
311
  mediapipe/python/solutions/pose_test.py,sha256=TKhiHbrQroAoh7N8u1ttIapp0uiGn8lP60-NGnwpGRU,11418