google-cloud-video_intelligence-v1 0.7.0 → 0.9.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/AUTHENTICATION.md +1 -1
- data/README.md +3 -3
- data/lib/google/cloud/video_intelligence/v1/rest.rb +37 -0
- data/lib/google/cloud/video_intelligence/v1/version.rb +1 -1
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service/client.rb +6 -6
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service/operations.rb +14 -16
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service/rest/client.rb +395 -0
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service/rest/operations.rb +814 -0
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service/rest/service_stub.rb +106 -0
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service/rest.rb +52 -0
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service.rb +7 -1
- data/lib/google/cloud/video_intelligence/v1.rb +7 -2
- data/lib/google/cloud/videointelligence/v1/video_intelligence_pb.rb +27 -260
- data/proto_docs/google/api/client.rb +381 -0
- data/proto_docs/google/api/launch_stage.rb +71 -0
- data/proto_docs/google/protobuf/any.rb +7 -4
- data/proto_docs/google/protobuf/empty.rb +0 -2
- data/proto_docs/google/protobuf/timestamp.rb +1 -3
- data/proto_docs/google/rpc/status.rb +4 -2
- metadata +17 -8
@@ -0,0 +1,106 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Copyright 2023 Google LLC
|
4
|
+
#
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
# you may not use this file except in compliance with the License.
|
7
|
+
# You may obtain a copy of the License at
|
8
|
+
#
|
9
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
#
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
# See the License for the specific language governing permissions and
|
15
|
+
# limitations under the License.
|
16
|
+
|
17
|
+
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
|
18
|
+
|
19
|
+
require "google/cloud/videointelligence/v1/video_intelligence_pb"
|
20
|
+
|
21
|
+
module Google
|
22
|
+
module Cloud
|
23
|
+
module VideoIntelligence
|
24
|
+
module V1
|
25
|
+
module VideoIntelligenceService
|
26
|
+
module Rest
|
27
|
+
##
|
28
|
+
# REST service stub for the VideoIntelligenceService service.
|
29
|
+
# Service stub contains baseline method implementations
|
30
|
+
# including transcoding, making the REST call, and deserialing the response.
|
31
|
+
#
|
32
|
+
class ServiceStub
|
33
|
+
def initialize endpoint:, credentials:
|
34
|
+
# These require statements are intentionally placed here to initialize
|
35
|
+
# the REST modules only when it's required.
|
36
|
+
require "gapic/rest"
|
37
|
+
|
38
|
+
@client_stub = ::Gapic::Rest::ClientStub.new endpoint: endpoint, credentials: credentials,
|
39
|
+
numeric_enums: true,
|
40
|
+
raise_faraday_errors: false
|
41
|
+
end
|
42
|
+
|
43
|
+
##
|
44
|
+
# Baseline implementation for the annotate_video REST call
|
45
|
+
#
|
46
|
+
# @param request_pb [::Google::Cloud::VideoIntelligence::V1::AnnotateVideoRequest]
|
47
|
+
# A request object representing the call parameters. Required.
|
48
|
+
# @param options [::Gapic::CallOptions]
|
49
|
+
# Overrides the default settings for this call, e.g, timeout, retries etc. Optional.
|
50
|
+
#
|
51
|
+
# @yield [result, operation] Access the result along with the TransportOperation object
|
52
|
+
# @yieldparam result [::Google::Longrunning::Operation]
|
53
|
+
# @yieldparam operation [::Gapic::Rest::TransportOperation]
|
54
|
+
#
|
55
|
+
# @return [::Google::Longrunning::Operation]
|
56
|
+
# A result object deserialized from the server's reply
|
57
|
+
def annotate_video request_pb, options = nil
|
58
|
+
raise ::ArgumentError, "request must be provided" if request_pb.nil?
|
59
|
+
|
60
|
+
verb, uri, query_string_params, body = ServiceStub.transcode_annotate_video_request request_pb
|
61
|
+
query_string_params = if query_string_params.any?
|
62
|
+
query_string_params.to_h { |p| p.split("=", 2) }
|
63
|
+
else
|
64
|
+
{}
|
65
|
+
end
|
66
|
+
|
67
|
+
response = @client_stub.make_http_request(
|
68
|
+
verb,
|
69
|
+
uri: uri,
|
70
|
+
body: body || "",
|
71
|
+
params: query_string_params,
|
72
|
+
options: options
|
73
|
+
)
|
74
|
+
operation = ::Gapic::Rest::TransportOperation.new response
|
75
|
+
result = ::Google::Longrunning::Operation.decode_json response.body, ignore_unknown_fields: true
|
76
|
+
|
77
|
+
yield result, operation if block_given?
|
78
|
+
result
|
79
|
+
end
|
80
|
+
|
81
|
+
##
|
82
|
+
# @private
|
83
|
+
#
|
84
|
+
# GRPC transcoding helper method for the annotate_video REST call
|
85
|
+
#
|
86
|
+
# @param request_pb [::Google::Cloud::VideoIntelligence::V1::AnnotateVideoRequest]
|
87
|
+
# A request object representing the call parameters. Required.
|
88
|
+
# @return [Array(String, [String, nil], Hash{String => String})]
|
89
|
+
# Uri, Body, Query string parameters
|
90
|
+
def self.transcode_annotate_video_request request_pb
|
91
|
+
transcoder = Gapic::Rest::GrpcTranscoder.new
|
92
|
+
.with_bindings(
|
93
|
+
uri_method: :post,
|
94
|
+
uri_template: "/v1/videos:annotate",
|
95
|
+
body: "*",
|
96
|
+
matches: []
|
97
|
+
)
|
98
|
+
transcoder.transcode request_pb
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
105
|
+
end
|
106
|
+
end
|
@@ -0,0 +1,52 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Copyright 2023 Google LLC
|
4
|
+
#
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
# you may not use this file except in compliance with the License.
|
7
|
+
# You may obtain a copy of the License at
|
8
|
+
#
|
9
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
#
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
# See the License for the specific language governing permissions and
|
15
|
+
# limitations under the License.
|
16
|
+
|
17
|
+
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
|
18
|
+
|
19
|
+
require "gapic/rest"
|
20
|
+
require "gapic/config"
|
21
|
+
require "gapic/config/method"
|
22
|
+
|
23
|
+
require "google/cloud/video_intelligence/v1/version"
|
24
|
+
|
25
|
+
require "google/cloud/video_intelligence/v1/video_intelligence_service/credentials"
|
26
|
+
require "google/cloud/video_intelligence/v1/video_intelligence_service/rest/operations"
|
27
|
+
require "google/cloud/video_intelligence/v1/video_intelligence_service/rest/client"
|
28
|
+
|
29
|
+
module Google
|
30
|
+
module Cloud
|
31
|
+
module VideoIntelligence
|
32
|
+
module V1
|
33
|
+
##
|
34
|
+
# Service that implements the Video Intelligence API.
|
35
|
+
#
|
36
|
+
# To load this service and instantiate a REST client:
|
37
|
+
#
|
38
|
+
# require "google/cloud/video_intelligence/v1/video_intelligence_service/rest"
|
39
|
+
# client = ::Google::Cloud::VideoIntelligence::V1::VideoIntelligenceService::Rest::Client.new
|
40
|
+
#
|
41
|
+
module VideoIntelligenceService
|
42
|
+
# Client for the REST transport
|
43
|
+
module Rest
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
helper_path = ::File.join __dir__, "rest", "helpers.rb"
|
52
|
+
require "google/cloud/video_intelligence/v1/video_intelligence_service/rest/helpers" if ::File.file? helper_path
|
@@ -25,6 +25,7 @@ require "google/cloud/video_intelligence/v1/version"
|
|
25
25
|
require "google/cloud/video_intelligence/v1/video_intelligence_service/credentials"
|
26
26
|
require "google/cloud/video_intelligence/v1/video_intelligence_service/operations"
|
27
27
|
require "google/cloud/video_intelligence/v1/video_intelligence_service/client"
|
28
|
+
require "google/cloud/video_intelligence/v1/video_intelligence_service/rest"
|
28
29
|
|
29
30
|
module Google
|
30
31
|
module Cloud
|
@@ -33,11 +34,16 @@ module Google
|
|
33
34
|
##
|
34
35
|
# Service that implements the Video Intelligence API.
|
35
36
|
#
|
36
|
-
#
|
37
|
+
# @example Load this service and instantiate a gRPC client
|
37
38
|
#
|
38
39
|
# require "google/cloud/video_intelligence/v1/video_intelligence_service"
|
39
40
|
# client = ::Google::Cloud::VideoIntelligence::V1::VideoIntelligenceService::Client.new
|
40
41
|
#
|
42
|
+
# @example Load this service and instantiate a REST client
|
43
|
+
#
|
44
|
+
# require "google/cloud/video_intelligence/v1/video_intelligence_service/rest"
|
45
|
+
# client = ::Google::Cloud::VideoIntelligence::V1::VideoIntelligenceService::Rest::Client.new
|
46
|
+
#
|
41
47
|
module VideoIntelligenceService
|
42
48
|
end
|
43
49
|
end
|
@@ -23,13 +23,18 @@ module Google
|
|
23
23
|
module Cloud
|
24
24
|
module VideoIntelligence
|
25
25
|
##
|
26
|
-
#
|
26
|
+
# API client module.
|
27
27
|
#
|
28
|
-
# @example
|
28
|
+
# @example Load this package, including all its services, and instantiate a gRPC client
|
29
29
|
#
|
30
30
|
# require "google/cloud/video_intelligence/v1"
|
31
31
|
# client = ::Google::Cloud::VideoIntelligence::V1::VideoIntelligenceService::Client.new
|
32
32
|
#
|
33
|
+
# @example Load this package, including all its services, and instantiate a REST client
|
34
|
+
#
|
35
|
+
# require "google/cloud/video_intelligence/v1"
|
36
|
+
# client = ::Google::Cloud::VideoIntelligence::V1::VideoIntelligenceService::Rest::Client.new
|
37
|
+
#
|
33
38
|
module V1
|
34
39
|
end
|
35
40
|
end
|
@@ -1,3 +1,4 @@
|
|
1
|
+
# frozen_string_literal: true
|
1
2
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
2
3
|
# source: google/cloud/videointelligence/v1/video_intelligence.proto
|
3
4
|
|
@@ -11,268 +12,34 @@ require 'google/protobuf/duration_pb'
|
|
11
12
|
require 'google/protobuf/timestamp_pb'
|
12
13
|
require 'google/rpc/status_pb'
|
13
14
|
|
14
|
-
|
15
|
-
add_file("google/cloud/videointelligence/v1/video_intelligence.proto", :syntax => :proto3) do
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
optional :model, :string, 3
|
39
|
-
optional :frame_confidence_threshold, :float, 4
|
40
|
-
optional :video_confidence_threshold, :float, 5
|
41
|
-
end
|
42
|
-
add_message "google.cloud.videointelligence.v1.ShotChangeDetectionConfig" do
|
43
|
-
optional :model, :string, 1
|
44
|
-
end
|
45
|
-
add_message "google.cloud.videointelligence.v1.ObjectTrackingConfig" do
|
46
|
-
optional :model, :string, 1
|
47
|
-
end
|
48
|
-
add_message "google.cloud.videointelligence.v1.FaceDetectionConfig" do
|
49
|
-
optional :model, :string, 1
|
50
|
-
optional :include_bounding_boxes, :bool, 2
|
51
|
-
optional :include_attributes, :bool, 5
|
52
|
-
end
|
53
|
-
add_message "google.cloud.videointelligence.v1.PersonDetectionConfig" do
|
54
|
-
optional :include_bounding_boxes, :bool, 1
|
55
|
-
optional :include_pose_landmarks, :bool, 2
|
56
|
-
optional :include_attributes, :bool, 3
|
57
|
-
end
|
58
|
-
add_message "google.cloud.videointelligence.v1.ExplicitContentDetectionConfig" do
|
59
|
-
optional :model, :string, 1
|
60
|
-
end
|
61
|
-
add_message "google.cloud.videointelligence.v1.TextDetectionConfig" do
|
62
|
-
repeated :language_hints, :string, 1
|
63
|
-
optional :model, :string, 2
|
64
|
-
end
|
65
|
-
add_message "google.cloud.videointelligence.v1.VideoSegment" do
|
66
|
-
optional :start_time_offset, :message, 1, "google.protobuf.Duration"
|
67
|
-
optional :end_time_offset, :message, 2, "google.protobuf.Duration"
|
68
|
-
end
|
69
|
-
add_message "google.cloud.videointelligence.v1.LabelSegment" do
|
70
|
-
optional :segment, :message, 1, "google.cloud.videointelligence.v1.VideoSegment"
|
71
|
-
optional :confidence, :float, 2
|
72
|
-
end
|
73
|
-
add_message "google.cloud.videointelligence.v1.LabelFrame" do
|
74
|
-
optional :time_offset, :message, 1, "google.protobuf.Duration"
|
75
|
-
optional :confidence, :float, 2
|
76
|
-
end
|
77
|
-
add_message "google.cloud.videointelligence.v1.Entity" do
|
78
|
-
optional :entity_id, :string, 1
|
79
|
-
optional :description, :string, 2
|
80
|
-
optional :language_code, :string, 3
|
81
|
-
end
|
82
|
-
add_message "google.cloud.videointelligence.v1.LabelAnnotation" do
|
83
|
-
optional :entity, :message, 1, "google.cloud.videointelligence.v1.Entity"
|
84
|
-
repeated :category_entities, :message, 2, "google.cloud.videointelligence.v1.Entity"
|
85
|
-
repeated :segments, :message, 3, "google.cloud.videointelligence.v1.LabelSegment"
|
86
|
-
repeated :frames, :message, 4, "google.cloud.videointelligence.v1.LabelFrame"
|
87
|
-
optional :version, :string, 5
|
88
|
-
end
|
89
|
-
add_message "google.cloud.videointelligence.v1.ExplicitContentFrame" do
|
90
|
-
optional :time_offset, :message, 1, "google.protobuf.Duration"
|
91
|
-
optional :pornography_likelihood, :enum, 2, "google.cloud.videointelligence.v1.Likelihood"
|
92
|
-
end
|
93
|
-
add_message "google.cloud.videointelligence.v1.ExplicitContentAnnotation" do
|
94
|
-
repeated :frames, :message, 1, "google.cloud.videointelligence.v1.ExplicitContentFrame"
|
95
|
-
optional :version, :string, 2
|
96
|
-
end
|
97
|
-
add_message "google.cloud.videointelligence.v1.NormalizedBoundingBox" do
|
98
|
-
optional :left, :float, 1
|
99
|
-
optional :top, :float, 2
|
100
|
-
optional :right, :float, 3
|
101
|
-
optional :bottom, :float, 4
|
102
|
-
end
|
103
|
-
add_message "google.cloud.videointelligence.v1.FaceDetectionAnnotation" do
|
104
|
-
repeated :tracks, :message, 3, "google.cloud.videointelligence.v1.Track"
|
105
|
-
optional :thumbnail, :bytes, 4
|
106
|
-
optional :version, :string, 5
|
107
|
-
end
|
108
|
-
add_message "google.cloud.videointelligence.v1.PersonDetectionAnnotation" do
|
109
|
-
repeated :tracks, :message, 1, "google.cloud.videointelligence.v1.Track"
|
110
|
-
optional :version, :string, 2
|
111
|
-
end
|
112
|
-
add_message "google.cloud.videointelligence.v1.FaceSegment" do
|
113
|
-
optional :segment, :message, 1, "google.cloud.videointelligence.v1.VideoSegment"
|
114
|
-
end
|
115
|
-
add_message "google.cloud.videointelligence.v1.FaceFrame" do
|
116
|
-
repeated :normalized_bounding_boxes, :message, 1, "google.cloud.videointelligence.v1.NormalizedBoundingBox"
|
117
|
-
optional :time_offset, :message, 2, "google.protobuf.Duration"
|
118
|
-
end
|
119
|
-
add_message "google.cloud.videointelligence.v1.FaceAnnotation" do
|
120
|
-
optional :thumbnail, :bytes, 1
|
121
|
-
repeated :segments, :message, 2, "google.cloud.videointelligence.v1.FaceSegment"
|
122
|
-
repeated :frames, :message, 3, "google.cloud.videointelligence.v1.FaceFrame"
|
123
|
-
end
|
124
|
-
add_message "google.cloud.videointelligence.v1.TimestampedObject" do
|
125
|
-
optional :normalized_bounding_box, :message, 1, "google.cloud.videointelligence.v1.NormalizedBoundingBox"
|
126
|
-
optional :time_offset, :message, 2, "google.protobuf.Duration"
|
127
|
-
repeated :attributes, :message, 3, "google.cloud.videointelligence.v1.DetectedAttribute"
|
128
|
-
repeated :landmarks, :message, 4, "google.cloud.videointelligence.v1.DetectedLandmark"
|
129
|
-
end
|
130
|
-
add_message "google.cloud.videointelligence.v1.Track" do
|
131
|
-
optional :segment, :message, 1, "google.cloud.videointelligence.v1.VideoSegment"
|
132
|
-
repeated :timestamped_objects, :message, 2, "google.cloud.videointelligence.v1.TimestampedObject"
|
133
|
-
repeated :attributes, :message, 3, "google.cloud.videointelligence.v1.DetectedAttribute"
|
134
|
-
optional :confidence, :float, 4
|
135
|
-
end
|
136
|
-
add_message "google.cloud.videointelligence.v1.DetectedAttribute" do
|
137
|
-
optional :name, :string, 1
|
138
|
-
optional :confidence, :float, 2
|
139
|
-
optional :value, :string, 3
|
140
|
-
end
|
141
|
-
add_message "google.cloud.videointelligence.v1.DetectedLandmark" do
|
142
|
-
optional :name, :string, 1
|
143
|
-
optional :point, :message, 2, "google.cloud.videointelligence.v1.NormalizedVertex"
|
144
|
-
optional :confidence, :float, 3
|
145
|
-
end
|
146
|
-
add_message "google.cloud.videointelligence.v1.VideoAnnotationResults" do
|
147
|
-
optional :input_uri, :string, 1
|
148
|
-
optional :segment, :message, 10, "google.cloud.videointelligence.v1.VideoSegment"
|
149
|
-
repeated :segment_label_annotations, :message, 2, "google.cloud.videointelligence.v1.LabelAnnotation"
|
150
|
-
repeated :segment_presence_label_annotations, :message, 23, "google.cloud.videointelligence.v1.LabelAnnotation"
|
151
|
-
repeated :shot_label_annotations, :message, 3, "google.cloud.videointelligence.v1.LabelAnnotation"
|
152
|
-
repeated :shot_presence_label_annotations, :message, 24, "google.cloud.videointelligence.v1.LabelAnnotation"
|
153
|
-
repeated :frame_label_annotations, :message, 4, "google.cloud.videointelligence.v1.LabelAnnotation"
|
154
|
-
repeated :face_annotations, :message, 5, "google.cloud.videointelligence.v1.FaceAnnotation"
|
155
|
-
repeated :face_detection_annotations, :message, 13, "google.cloud.videointelligence.v1.FaceDetectionAnnotation"
|
156
|
-
repeated :shot_annotations, :message, 6, "google.cloud.videointelligence.v1.VideoSegment"
|
157
|
-
optional :explicit_annotation, :message, 7, "google.cloud.videointelligence.v1.ExplicitContentAnnotation"
|
158
|
-
repeated :speech_transcriptions, :message, 11, "google.cloud.videointelligence.v1.SpeechTranscription"
|
159
|
-
repeated :text_annotations, :message, 12, "google.cloud.videointelligence.v1.TextAnnotation"
|
160
|
-
repeated :object_annotations, :message, 14, "google.cloud.videointelligence.v1.ObjectTrackingAnnotation"
|
161
|
-
repeated :logo_recognition_annotations, :message, 19, "google.cloud.videointelligence.v1.LogoRecognitionAnnotation"
|
162
|
-
repeated :person_detection_annotations, :message, 20, "google.cloud.videointelligence.v1.PersonDetectionAnnotation"
|
163
|
-
optional :error, :message, 9, "google.rpc.Status"
|
164
|
-
end
|
165
|
-
add_message "google.cloud.videointelligence.v1.AnnotateVideoResponse" do
|
166
|
-
repeated :annotation_results, :message, 1, "google.cloud.videointelligence.v1.VideoAnnotationResults"
|
167
|
-
end
|
168
|
-
add_message "google.cloud.videointelligence.v1.VideoAnnotationProgress" do
|
169
|
-
optional :input_uri, :string, 1
|
170
|
-
optional :progress_percent, :int32, 2
|
171
|
-
optional :start_time, :message, 3, "google.protobuf.Timestamp"
|
172
|
-
optional :update_time, :message, 4, "google.protobuf.Timestamp"
|
173
|
-
optional :feature, :enum, 5, "google.cloud.videointelligence.v1.Feature"
|
174
|
-
optional :segment, :message, 6, "google.cloud.videointelligence.v1.VideoSegment"
|
175
|
-
end
|
176
|
-
add_message "google.cloud.videointelligence.v1.AnnotateVideoProgress" do
|
177
|
-
repeated :annotation_progress, :message, 1, "google.cloud.videointelligence.v1.VideoAnnotationProgress"
|
178
|
-
end
|
179
|
-
add_message "google.cloud.videointelligence.v1.SpeechTranscriptionConfig" do
|
180
|
-
optional :language_code, :string, 1
|
181
|
-
optional :max_alternatives, :int32, 2
|
182
|
-
optional :filter_profanity, :bool, 3
|
183
|
-
repeated :speech_contexts, :message, 4, "google.cloud.videointelligence.v1.SpeechContext"
|
184
|
-
optional :enable_automatic_punctuation, :bool, 5
|
185
|
-
repeated :audio_tracks, :int32, 6
|
186
|
-
optional :enable_speaker_diarization, :bool, 7
|
187
|
-
optional :diarization_speaker_count, :int32, 8
|
188
|
-
optional :enable_word_confidence, :bool, 9
|
189
|
-
end
|
190
|
-
add_message "google.cloud.videointelligence.v1.SpeechContext" do
|
191
|
-
repeated :phrases, :string, 1
|
192
|
-
end
|
193
|
-
add_message "google.cloud.videointelligence.v1.SpeechTranscription" do
|
194
|
-
repeated :alternatives, :message, 1, "google.cloud.videointelligence.v1.SpeechRecognitionAlternative"
|
195
|
-
optional :language_code, :string, 2
|
196
|
-
end
|
197
|
-
add_message "google.cloud.videointelligence.v1.SpeechRecognitionAlternative" do
|
198
|
-
optional :transcript, :string, 1
|
199
|
-
optional :confidence, :float, 2
|
200
|
-
repeated :words, :message, 3, "google.cloud.videointelligence.v1.WordInfo"
|
201
|
-
end
|
202
|
-
add_message "google.cloud.videointelligence.v1.WordInfo" do
|
203
|
-
optional :start_time, :message, 1, "google.protobuf.Duration"
|
204
|
-
optional :end_time, :message, 2, "google.protobuf.Duration"
|
205
|
-
optional :word, :string, 3
|
206
|
-
optional :confidence, :float, 4
|
207
|
-
optional :speaker_tag, :int32, 5
|
208
|
-
end
|
209
|
-
add_message "google.cloud.videointelligence.v1.NormalizedVertex" do
|
210
|
-
optional :x, :float, 1
|
211
|
-
optional :y, :float, 2
|
212
|
-
end
|
213
|
-
add_message "google.cloud.videointelligence.v1.NormalizedBoundingPoly" do
|
214
|
-
repeated :vertices, :message, 1, "google.cloud.videointelligence.v1.NormalizedVertex"
|
215
|
-
end
|
216
|
-
add_message "google.cloud.videointelligence.v1.TextSegment" do
|
217
|
-
optional :segment, :message, 1, "google.cloud.videointelligence.v1.VideoSegment"
|
218
|
-
optional :confidence, :float, 2
|
219
|
-
repeated :frames, :message, 3, "google.cloud.videointelligence.v1.TextFrame"
|
220
|
-
end
|
221
|
-
add_message "google.cloud.videointelligence.v1.TextFrame" do
|
222
|
-
optional :rotated_bounding_box, :message, 1, "google.cloud.videointelligence.v1.NormalizedBoundingPoly"
|
223
|
-
optional :time_offset, :message, 2, "google.protobuf.Duration"
|
224
|
-
end
|
225
|
-
add_message "google.cloud.videointelligence.v1.TextAnnotation" do
|
226
|
-
optional :text, :string, 1
|
227
|
-
repeated :segments, :message, 2, "google.cloud.videointelligence.v1.TextSegment"
|
228
|
-
optional :version, :string, 3
|
229
|
-
end
|
230
|
-
add_message "google.cloud.videointelligence.v1.ObjectTrackingFrame" do
|
231
|
-
optional :normalized_bounding_box, :message, 1, "google.cloud.videointelligence.v1.NormalizedBoundingBox"
|
232
|
-
optional :time_offset, :message, 2, "google.protobuf.Duration"
|
233
|
-
end
|
234
|
-
add_message "google.cloud.videointelligence.v1.ObjectTrackingAnnotation" do
|
235
|
-
optional :entity, :message, 1, "google.cloud.videointelligence.v1.Entity"
|
236
|
-
optional :confidence, :float, 4
|
237
|
-
repeated :frames, :message, 2, "google.cloud.videointelligence.v1.ObjectTrackingFrame"
|
238
|
-
optional :version, :string, 6
|
239
|
-
oneof :track_info do
|
240
|
-
optional :segment, :message, 3, "google.cloud.videointelligence.v1.VideoSegment"
|
241
|
-
optional :track_id, :int64, 5
|
242
|
-
end
|
243
|
-
end
|
244
|
-
add_message "google.cloud.videointelligence.v1.LogoRecognitionAnnotation" do
|
245
|
-
optional :entity, :message, 1, "google.cloud.videointelligence.v1.Entity"
|
246
|
-
repeated :tracks, :message, 2, "google.cloud.videointelligence.v1.Track"
|
247
|
-
repeated :segments, :message, 3, "google.cloud.videointelligence.v1.VideoSegment"
|
248
|
-
end
|
249
|
-
add_enum "google.cloud.videointelligence.v1.Feature" do
|
250
|
-
value :FEATURE_UNSPECIFIED, 0
|
251
|
-
value :LABEL_DETECTION, 1
|
252
|
-
value :SHOT_CHANGE_DETECTION, 2
|
253
|
-
value :EXPLICIT_CONTENT_DETECTION, 3
|
254
|
-
value :FACE_DETECTION, 4
|
255
|
-
value :SPEECH_TRANSCRIPTION, 6
|
256
|
-
value :TEXT_DETECTION, 7
|
257
|
-
value :OBJECT_TRACKING, 9
|
258
|
-
value :LOGO_RECOGNITION, 12
|
259
|
-
value :PERSON_DETECTION, 14
|
260
|
-
end
|
261
|
-
add_enum "google.cloud.videointelligence.v1.LabelDetectionMode" do
|
262
|
-
value :LABEL_DETECTION_MODE_UNSPECIFIED, 0
|
263
|
-
value :SHOT_MODE, 1
|
264
|
-
value :FRAME_MODE, 2
|
265
|
-
value :SHOT_AND_FRAME_MODE, 3
|
266
|
-
end
|
267
|
-
add_enum "google.cloud.videointelligence.v1.Likelihood" do
|
268
|
-
value :LIKELIHOOD_UNSPECIFIED, 0
|
269
|
-
value :VERY_UNLIKELY, 1
|
270
|
-
value :UNLIKELY, 2
|
271
|
-
value :POSSIBLE, 3
|
272
|
-
value :LIKELY, 4
|
273
|
-
value :VERY_LIKELY, 5
|
15
|
+
|
16
|
+
descriptor_data = "\n:google/cloud/videointelligence/v1/video_intelligence.proto\x12!google.cloud.videointelligence.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\xfe\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x41\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32*.google.cloud.videointelligence.v1.FeatureB\x03\xe0\x41\x02\x12\x46\n\rvideo_context\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01\"\xc1\x06\n\x0cVideoContext\x12\x41\n\x08segments\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12W\n\x16label_detection_config\x18\x02 \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.LabelDetectionConfig\x12\x62\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ShotChangeDetectionConfig\x12l\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1.ExplicitContentDetectionConfig\x12U\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.FaceDetectionConfig\x12\x61\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32<.google.cloud.videointelligence.v1.SpeechTranscriptionConfig\x12U\n\x15text_detection_config\x18\x08 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.TextDetectionConfig\x12Y\n\x17person_detection_config\x18\x0b \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.PersonDetectionConfig\x12W\n\x16object_tracking_config\x18\r \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.ObjectTrackingConfig\"\xdd\x01\n\x14LabelDetectionConfig\x12S\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32\x35.google.cloud.videointelligence.v1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12\"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12\"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02\"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t\"`\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x05 \x01(\x08\"s\n\x15PersonDetectionConfig\x12\x1e\n\x16include_bounding_boxes\x18\x01 \x01(\x08\x12\x1e\n\x16include_pose_landmarks\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x03 \x01(\x08\"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t\"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"d\n\x0cLabelSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02\"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t\"\xa5\x02\n\x0fLabelAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x44\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.LabelSegment\x12=\n\x06\x66rames\x18\x04 \x03(\x0b\x32-.google.cloud.videointelligence.v1.LabelFrame\x12\x0f\n\x07version\x18\x05 \x01(\t\"\x95\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32-.google.cloud.videointelligence.v1.Likelihood\"u\n\x19\x45xplicitContentAnnotation\x12G\n\x06\x66rames\x18\x01 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1.ExplicitContentFrame\x12\x0f\n\x07version\x18\x02 \x01(\t\"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02\"w\n\x17\x46\x61\x63\x65\x44\x65tectionAnnotation\x12\x38\n\x06tracks\x18\x03 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x11\n\tthumbnail\x18\x04 \x01(\x0c\x12\x0f\n\x07version\x18\x05 \x01(\t\"f\n\x19PersonDetectionAnnotation\x12\x38\n\x06tracks\x18\x01 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x0f\n\x07version\x18\x02 \x01(\t\"O\n\x0b\x46\x61\x63\x65Segment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\"\x9c\x01\n\tFaceFrame\x12[\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration:\x02\x18\x01\"\xa7\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.FaceSegment\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.FaceFrame:\x02\x18\x01\"\xba\x02\n\x11TimestampedObject\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12K\n\tlandmarks\x18\x04 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.DetectedLandmarkB\x03\xe0\x41\x01\"\x84\x02\n\x05Track\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Q\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.TimestampedObject\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01\"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t\"x\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x42\n\x05point\x18\x02 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02\"\xe9\n\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12@\n\x07segment\x18\n \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12U\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12^\n\"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12R\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12[\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12S\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12O\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.FaceAnnotationB\x02\x18\x01\x12^\n\x1a\x66\x61\x63\x65_detection_annotations\x18\r \x03(\x0b\x32:.google.cloud.videointelligence.v1.FaceDetectionAnnotation\x12I\n\x10shot_annotations\x18\x06 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Y\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ExplicitContentAnnotation\x12U\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.SpeechTranscription\x12K\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.TextAnnotation\x12W\n\x12object_annotations\x18\x0e \x03(\x0b\x32;.google.cloud.videointelligence.v1.ObjectTrackingAnnotation\x12\x62\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32<.google.cloud.videointelligence.v1.LogoRecognitionAnnotation\x12\x62\n\x1cperson_detection_annotations\x18\x14 \x03(\x0b\x32<.google.cloud.videointelligence.v1.PersonDetectionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status\"n\n\x15\x41nnotateVideoResponse\x12U\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1.VideoAnnotationResults\"\xa6\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12;\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32*.google.cloud.videointelligence.v1.Feature\x12@\n\x07segment\x18\x06 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\"p\n\x15\x41nnotateVideoProgress\x12W\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1.VideoAnnotationProgress\"\x81\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12N\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01\"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01\"\x88\x01\n\x13SpeechTranscription\x12U\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03\"\x8c\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12?\n\x05words\x18\x03 \x03(\x0b\x32+.google.cloud.videointelligence.v1.WordInfoB\x03\xe0\x41\x03\"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03\"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\"_\n\x16NormalizedBoundingPoly\x12\x45\n\x08vertices\x18\x01 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex\"\xa1\x01\n\x0bTextSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.TextFrame\"\x94\x01\n\tTextFrame\x12W\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32\x39.google.cloud.videointelligence.v1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"q\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.TextSegment\x12\x0f\n\x07version\x18\x03 \x01(\t\"\xa0\x01\n\x13ObjectTrackingFrame\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xa8\x02\n\x18ObjectTrackingAnnotation\x12\x42\n\x07segment\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x46\n\x06\x66rames\x18\x02 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.ObjectTrackingFrame\x12\x0f\n\x07version\x18\x06 \x01(\tB\x0c\n\ntrack_info\"\xd3\x01\n\x19LogoRecognitionAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x38\n\x06tracks\x18\x02 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment*\xf5\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c\x12\x14\n\x10PERSON_DETECTION\x10\x0e*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xc0\x02\n\x18VideoIntelligenceService\x12\xcd\x01\n\rAnnotateVideo\x12\x37.google.cloud.videointelligence.v1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation\"d\x82\xd3\xe4\x93\x02\x18\"\x13/v1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x8c\x02\n%com.google.cloud.videointelligence.v1B\x1dVideoIntelligenceServiceProtoP\x01ZScloud.google.com/go/videointelligence/apiv1/videointelligencepb;videointelligencepb\xaa\x02!Google.Cloud.VideoIntelligence.V1\xca\x02!Google\\Cloud\\VideoIntelligence\\V1\xea\x02$Google::Cloud::VideoIntelligence::V1b\x06proto3"
|
17
|
+
|
18
|
+
pool = Google::Protobuf::DescriptorPool.generated_pool
|
19
|
+
|
20
|
+
begin
|
21
|
+
pool.add_serialized_file(descriptor_data)
|
22
|
+
rescue TypeError => e
|
23
|
+
# Compatibility code: will be removed in the next major version.
|
24
|
+
require 'google/protobuf/descriptor_pb'
|
25
|
+
parsed = Google::Protobuf::FileDescriptorProto.decode(descriptor_data)
|
26
|
+
parsed.clear_dependency
|
27
|
+
serialized = parsed.class.encode(parsed)
|
28
|
+
file = pool.add_serialized_file(serialized)
|
29
|
+
warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
|
30
|
+
imports = [
|
31
|
+
["google.protobuf.Duration", "google/protobuf/duration.proto"],
|
32
|
+
["google.rpc.Status", "google/rpc/status.proto"],
|
33
|
+
["google.protobuf.Timestamp", "google/protobuf/timestamp.proto"],
|
34
|
+
]
|
35
|
+
imports.each do |type_name, expected_filename|
|
36
|
+
import_file = pool.lookup(type_name).file_descriptor
|
37
|
+
if import_file.name != expected_filename
|
38
|
+
warn "- #{file.name} imports #{expected_filename}, but that import was loaded as #{import_file.name}"
|
274
39
|
end
|
275
40
|
end
|
41
|
+
warn "Each proto file must use a consistent fully-qualified name."
|
42
|
+
warn "This will become an error in the next major version."
|
276
43
|
end
|
277
44
|
|
278
45
|
module Google
|