google-api-client 0.25.0 → 0.26.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (265) hide show
  1. checksums.yaml +4 -4
  2. data/{CONTRIBUTING.md → .github/CONTRIBUTING.md} +0 -0
  3. data/.github/ISSUE_TEMPLATE/bug_report.md +36 -0
  4. data/.github/ISSUE_TEMPLATE/feature_request.md +21 -0
  5. data/.github/ISSUE_TEMPLATE/support_request.md +7 -0
  6. data/.kokoro/build.bat +8 -0
  7. data/.kokoro/build.sh +36 -0
  8. data/.kokoro/common.cfg +22 -0
  9. data/.kokoro/continuous/common.cfg +20 -0
  10. data/.kokoro/continuous/linux.cfg +15 -0
  11. data/.kokoro/continuous/osx.cfg +3 -0
  12. data/.kokoro/continuous/windows.cfg +3 -0
  13. data/.kokoro/osx.sh +35 -0
  14. data/.kokoro/presubmit/common.cfg +19 -0
  15. data/.kokoro/presubmit/linux.cfg +14 -0
  16. data/.kokoro/presubmit/osx.cfg +3 -0
  17. data/.kokoro/presubmit/windows.cfg +3 -0
  18. data/.kokoro/trampoline.sh +24 -0
  19. data/.kokoro/windows.sh +32 -0
  20. data/CHANGELOG.md +83 -0
  21. data/Gemfile +2 -2
  22. data/generated/google/apis/adexchangebuyer2_v2beta1.rb +1 -1
  23. data/generated/google/apis/adexchangebuyer2_v2beta1/classes.rb +3 -1
  24. data/generated/google/apis/alertcenter_v1beta1.rb +5 -3
  25. data/generated/google/apis/alertcenter_v1beta1/classes.rb +110 -101
  26. data/generated/google/apis/alertcenter_v1beta1/representations.rb +13 -0
  27. data/generated/google/apis/alertcenter_v1beta1/service.rb +83 -76
  28. data/generated/google/apis/androiddeviceprovisioning_v1.rb +1 -1
  29. data/generated/google/apis/androiddeviceprovisioning_v1/classes.rb +7 -0
  30. data/generated/google/apis/androiddeviceprovisioning_v1/representations.rb +2 -0
  31. data/generated/google/apis/androiddeviceprovisioning_v1/service.rb +1 -1
  32. data/generated/google/apis/androidenterprise_v1.rb +1 -1
  33. data/generated/google/apis/androidenterprise_v1/classes.rb +129 -44
  34. data/generated/google/apis/androidenterprise_v1/representations.rb +48 -0
  35. data/generated/google/apis/androidenterprise_v1/service.rb +218 -0
  36. data/generated/google/apis/androidmanagement_v1.rb +1 -1
  37. data/generated/google/apis/androidmanagement_v1/classes.rb +1 -1
  38. data/generated/google/apis/androidpublisher_v3.rb +1 -1
  39. data/generated/google/apis/androidpublisher_v3/classes.rb +8 -0
  40. data/generated/google/apis/androidpublisher_v3/representations.rb +1 -0
  41. data/generated/google/apis/appengine_v1.rb +1 -1
  42. data/generated/google/apis/appengine_v1beta.rb +1 -1
  43. data/generated/google/apis/bigquery_v2.rb +1 -1
  44. data/generated/google/apis/bigquery_v2/classes.rb +87 -29
  45. data/generated/google/apis/bigquery_v2/representations.rb +21 -0
  46. data/generated/google/apis/bigquery_v2/service.rb +7 -9
  47. data/generated/google/apis/calendar_v3.rb +2 -2
  48. data/generated/google/apis/calendar_v3/classes.rb +14 -6
  49. data/generated/google/apis/classroom_v1.rb +1 -1
  50. data/generated/google/apis/classroom_v1/service.rb +1 -1
  51. data/generated/google/apis/cloudasset_v1beta1.rb +1 -1
  52. data/generated/google/apis/cloudasset_v1beta1/classes.rb +14 -10
  53. data/generated/google/apis/cloudasset_v1beta1/service.rb +4 -4
  54. data/generated/google/apis/clouddebugger_v2.rb +1 -1
  55. data/generated/google/apis/clouddebugger_v2/classes.rb +2 -2
  56. data/generated/google/apis/cloudfunctions_v1.rb +1 -1
  57. data/generated/google/apis/cloudfunctions_v1/classes.rb +2 -3
  58. data/generated/google/apis/cloudfunctions_v1beta2.rb +1 -1
  59. data/generated/google/apis/cloudfunctions_v1beta2/classes.rb +2 -3
  60. data/generated/google/apis/cloudiot_v1.rb +1 -1
  61. data/generated/google/apis/cloudiot_v1/classes.rb +127 -0
  62. data/generated/google/apis/cloudiot_v1/representations.rb +70 -0
  63. data/generated/google/apis/cloudiot_v1/service.rb +172 -2
  64. data/generated/google/apis/cloudkms_v1.rb +1 -1
  65. data/generated/google/apis/cloudkms_v1/service.rb +107 -0
  66. data/generated/google/apis/{cloudiot_v1beta1.rb → cloudscheduler_v1beta1.rb} +8 -12
  67. data/generated/google/apis/cloudscheduler_v1beta1/classes.rb +882 -0
  68. data/generated/google/apis/cloudscheduler_v1beta1/representations.rb +264 -0
  69. data/generated/google/apis/cloudscheduler_v1beta1/service.rb +445 -0
  70. data/generated/google/apis/cloudsearch_v1.rb +1 -1
  71. data/generated/google/apis/cloudsearch_v1/classes.rb +79 -1
  72. data/generated/google/apis/cloudsearch_v1/representations.rb +44 -0
  73. data/generated/google/apis/cloudsearch_v1/service.rb +88 -3
  74. data/generated/google/apis/composer_v1.rb +1 -1
  75. data/generated/google/apis/composer_v1/classes.rb +9 -0
  76. data/generated/google/apis/composer_v1/representations.rb +1 -0
  77. data/generated/google/apis/compute_alpha.rb +1 -1
  78. data/generated/google/apis/compute_alpha/classes.rb +938 -117
  79. data/generated/google/apis/compute_alpha/representations.rb +310 -2
  80. data/generated/google/apis/compute_alpha/service.rb +1203 -245
  81. data/generated/google/apis/compute_beta.rb +1 -1
  82. data/generated/google/apis/compute_beta/classes.rb +1175 -219
  83. data/generated/google/apis/compute_beta/representations.rb +375 -2
  84. data/generated/google/apis/compute_beta/service.rb +907 -139
  85. data/generated/google/apis/compute_v1.rb +1 -1
  86. data/generated/google/apis/compute_v1/classes.rb +1485 -143
  87. data/generated/google/apis/compute_v1/representations.rb +503 -0
  88. data/generated/google/apis/compute_v1/service.rb +1375 -85
  89. data/generated/google/apis/container_v1.rb +3 -3
  90. data/generated/google/apis/container_v1/classes.rb +99 -6
  91. data/generated/google/apis/container_v1/representations.rb +39 -0
  92. data/generated/google/apis/container_v1/service.rb +2 -2
  93. data/generated/google/apis/container_v1beta1.rb +3 -3
  94. data/generated/google/apis/container_v1beta1/classes.rb +162 -9
  95. data/generated/google/apis/container_v1beta1/representations.rb +80 -0
  96. data/generated/google/apis/container_v1beta1/service.rb +3 -3
  97. data/generated/google/apis/content_v2.rb +1 -1
  98. data/generated/google/apis/content_v2/classes.rb +40 -32
  99. data/generated/google/apis/content_v2/representations.rb +0 -1
  100. data/generated/google/apis/content_v2/service.rb +3 -3
  101. data/generated/google/apis/content_v2_1.rb +35 -0
  102. data/generated/google/apis/content_v2_1/classes.rb +9104 -0
  103. data/generated/google/apis/content_v2_1/representations.rb +3967 -0
  104. data/generated/google/apis/content_v2_1/service.rb +3463 -0
  105. data/generated/google/apis/dataflow_v1b3.rb +1 -1
  106. data/generated/google/apis/dataflow_v1b3/classes.rb +82 -0
  107. data/generated/google/apis/dataflow_v1b3/representations.rb +33 -0
  108. data/generated/google/apis/dataflow_v1b3/service.rb +97 -6
  109. data/generated/google/apis/dataproc_v1.rb +1 -1
  110. data/generated/google/apis/dataproc_v1/classes.rb +15 -1
  111. data/generated/google/apis/dataproc_v1/representations.rb +2 -0
  112. data/generated/google/apis/dataproc_v1beta2.rb +1 -1
  113. data/generated/google/apis/dataproc_v1beta2/classes.rb +125 -1
  114. data/generated/google/apis/dataproc_v1beta2/representations.rb +41 -0
  115. data/generated/google/apis/dialogflow_v2.rb +1 -1
  116. data/generated/google/apis/dialogflow_v2/classes.rb +13 -6
  117. data/generated/google/apis/dialogflow_v2/service.rb +2 -0
  118. data/generated/google/apis/dialogflow_v2beta1.rb +1 -1
  119. data/generated/google/apis/dialogflow_v2beta1/classes.rb +26 -6
  120. data/generated/google/apis/dialogflow_v2beta1/representations.rb +12 -0
  121. data/generated/google/apis/dialogflow_v2beta1/service.rb +261 -12
  122. data/generated/google/apis/dlp_v2.rb +1 -1
  123. data/generated/google/apis/dlp_v2/classes.rb +4 -4
  124. data/generated/google/apis/dns_v1beta2.rb +1 -1
  125. data/generated/google/apis/dns_v1beta2/classes.rb +326 -0
  126. data/generated/google/apis/dns_v1beta2/representations.rb +149 -0
  127. data/generated/google/apis/dns_v1beta2/service.rb +246 -0
  128. data/generated/google/apis/drive_v2.rb +3 -3
  129. data/generated/google/apis/drive_v3.rb +3 -3
  130. data/generated/google/apis/driveactivity_v2.rb +37 -0
  131. data/generated/google/apis/driveactivity_v2/classes.rb +1388 -0
  132. data/generated/google/apis/driveactivity_v2/representations.rb +799 -0
  133. data/generated/google/apis/driveactivity_v2/service.rb +89 -0
  134. data/generated/google/apis/firebasedynamiclinks_v1.rb +1 -1
  135. data/generated/google/apis/firebasedynamiclinks_v1/classes.rb +8 -1
  136. data/generated/google/apis/firebasedynamiclinks_v1/representations.rb +1 -0
  137. data/generated/google/apis/firebasehosting_v1beta1.rb +3 -2
  138. data/generated/google/apis/firebasehosting_v1beta1/classes.rb +43 -17
  139. data/generated/google/apis/firebasehosting_v1beta1/representations.rb +13 -0
  140. data/generated/google/apis/firebasehosting_v1beta1/service.rb +74 -1
  141. data/generated/google/apis/firestore_v1.rb +1 -1
  142. data/generated/google/apis/firestore_v1/classes.rb +2225 -520
  143. data/generated/google/apis/firestore_v1/representations.rb +800 -33
  144. data/generated/google/apis/firestore_v1/service.rb +526 -0
  145. data/generated/google/apis/fitness_v1.rb +1 -1
  146. data/generated/google/apis/fitness_v1/classes.rb +1 -2
  147. data/generated/google/apis/fitness_v1/service.rb +7 -3
  148. data/generated/google/apis/games_management_v1management.rb +1 -4
  149. data/generated/google/apis/games_v1.rb +1 -4
  150. data/generated/google/apis/genomics_v2alpha1.rb +1 -1
  151. data/generated/google/apis/genomics_v2alpha1/classes.rb +6 -1
  152. data/generated/google/apis/gmail_v1.rb +2 -2
  153. data/generated/google/apis/iap_v1.rb +34 -0
  154. data/generated/google/apis/iap_v1/classes.rb +308 -0
  155. data/generated/google/apis/iap_v1/representations.rb +126 -0
  156. data/generated/google/apis/iap_v1/service.rb +725 -0
  157. data/generated/google/apis/iap_v1beta1.rb +1 -1
  158. data/generated/google/apis/iap_v1beta1/classes.rb +0 -132
  159. data/generated/google/apis/iap_v1beta1/representations.rb +0 -32
  160. data/generated/google/apis/jobs_v2.rb +1 -1
  161. data/generated/google/apis/jobs_v2/classes.rb +8 -9
  162. data/generated/google/apis/jobs_v3.rb +1 -1
  163. data/generated/google/apis/jobs_v3/classes.rb +6 -6
  164. data/generated/google/apis/jobs_v3/service.rb +8 -8
  165. data/generated/google/apis/jobs_v3p1beta1.rb +1 -1
  166. data/generated/google/apis/jobs_v3p1beta1/classes.rb +9 -9
  167. data/generated/google/apis/jobs_v3p1beta1/service.rb +8 -8
  168. data/generated/google/apis/monitoring_v3.rb +1 -1
  169. data/generated/google/apis/monitoring_v3/classes.rb +20 -7
  170. data/generated/google/apis/monitoring_v3/representations.rb +1 -0
  171. data/generated/google/apis/monitoring_v3/service.rb +0 -365
  172. data/generated/google/apis/pagespeedonline_v5.rb +32 -0
  173. data/generated/google/apis/pagespeedonline_v5/classes.rb +724 -0
  174. data/generated/google/apis/pagespeedonline_v5/representations.rb +315 -0
  175. data/generated/google/apis/pagespeedonline_v5/service.rb +116 -0
  176. data/generated/google/apis/people_v1.rb +3 -3
  177. data/generated/google/apis/pubsub_v1.rb +1 -1
  178. data/generated/google/apis/pubsub_v1/classes.rb +63 -12
  179. data/generated/google/apis/pubsub_v1/representations.rb +15 -0
  180. data/generated/google/apis/pubsub_v1/service.rb +26 -18
  181. data/generated/google/apis/redis_v1.rb +2 -3
  182. data/generated/google/apis/redis_v1/service.rb +1 -2
  183. data/generated/google/apis/redis_v1beta1.rb +2 -3
  184. data/generated/google/apis/redis_v1beta1/service.rb +1 -2
  185. data/generated/google/apis/runtimeconfig_v1beta1.rb +1 -1
  186. data/generated/google/apis/runtimeconfig_v1beta1/classes.rb +2 -2
  187. data/generated/google/apis/script_v1.rb +24 -6
  188. data/generated/google/apis/serviceconsumermanagement_v1.rb +1 -1
  189. data/generated/google/apis/serviceconsumermanagement_v1/classes.rb +66 -1
  190. data/generated/google/apis/serviceconsumermanagement_v1/representations.rb +30 -0
  191. data/generated/google/apis/serviceconsumermanagement_v1/service.rb +89 -0
  192. data/generated/google/apis/servicecontrol_v1.rb +1 -1
  193. data/generated/google/apis/servicemanagement_v1.rb +1 -1
  194. data/generated/google/apis/servicemanagement_v1/classes.rb +7 -0
  195. data/generated/google/apis/servicemanagement_v1/representations.rb +1 -0
  196. data/generated/google/apis/servicenetworking_v1beta.rb +1 -1
  197. data/generated/google/apis/servicenetworking_v1beta/classes.rb +84 -37
  198. data/generated/google/apis/servicenetworking_v1beta/representations.rb +15 -0
  199. data/generated/google/apis/servicenetworking_v1beta/service.rb +132 -43
  200. data/generated/google/apis/serviceusage_v1.rb +1 -1
  201. data/generated/google/apis/serviceusage_v1/classes.rb +1 -1
  202. data/generated/google/apis/serviceusage_v1beta1.rb +1 -1
  203. data/generated/google/apis/serviceusage_v1beta1/classes.rb +1 -1
  204. data/generated/google/apis/sheets_v4.rb +4 -4
  205. data/generated/google/apis/slides_v1.rb +4 -4
  206. data/generated/google/apis/slides_v1/classes.rb +187 -2
  207. data/generated/google/apis/slides_v1/representations.rb +67 -0
  208. data/generated/google/apis/slides_v1/service.rb +4 -3
  209. data/generated/google/apis/sourcerepo_v1.rb +2 -2
  210. data/generated/google/apis/sourcerepo_v1/service.rb +1 -1
  211. data/generated/google/apis/speech_v1.rb +1 -1
  212. data/generated/google/apis/speech_v1/classes.rb +58 -10
  213. data/generated/google/apis/speech_v1/representations.rb +29 -1
  214. data/generated/google/apis/{speech_v1beta1.rb → speech_v1p1beta1.rb} +6 -6
  215. data/generated/google/apis/speech_v1p1beta1/classes.rb +922 -0
  216. data/generated/google/apis/speech_v1p1beta1/representations.rb +294 -0
  217. data/generated/google/apis/{speech_v1beta1 → speech_v1p1beta1}/service.rb +33 -35
  218. data/generated/google/apis/sqladmin_v1beta4.rb +1 -1
  219. data/generated/google/apis/sqladmin_v1beta4/classes.rb +64 -3
  220. data/generated/google/apis/sqladmin_v1beta4/representations.rb +31 -0
  221. data/generated/google/apis/storage_v1.rb +1 -1
  222. data/generated/google/apis/storage_v1/classes.rb +54 -0
  223. data/generated/google/apis/storage_v1/representations.rb +31 -0
  224. data/generated/google/apis/streetviewpublish_v1.rb +1 -1
  225. data/generated/google/apis/streetviewpublish_v1/classes.rb +26 -0
  226. data/generated/google/apis/streetviewpublish_v1/representations.rb +4 -0
  227. data/generated/google/apis/streetviewpublish_v1/service.rb +29 -3
  228. data/generated/google/apis/tasks_v1.rb +1 -1
  229. data/generated/google/apis/testing_v1.rb +1 -1
  230. data/generated/google/apis/testing_v1/classes.rb +49 -19
  231. data/generated/google/apis/testing_v1/representations.rb +20 -2
  232. data/generated/google/apis/toolresults_v1beta3.rb +1 -1
  233. data/generated/google/apis/toolresults_v1beta3/classes.rb +77 -70
  234. data/generated/google/apis/vault_v1.rb +1 -1
  235. data/generated/google/apis/vault_v1/classes.rb +78 -7
  236. data/generated/google/apis/vault_v1/representations.rb +34 -1
  237. data/generated/google/apis/vault_v1/service.rb +141 -0
  238. data/generated/google/apis/vision_v1.rb +1 -1
  239. data/generated/google/apis/vision_v1/classes.rb +33 -0
  240. data/generated/google/apis/vision_v1/representations.rb +16 -0
  241. data/generated/google/apis/vision_v1/service.rb +2 -2
  242. data/generated/google/apis/vision_v1p1beta1.rb +1 -1
  243. data/generated/google/apis/vision_v1p1beta1/classes.rb +33 -0
  244. data/generated/google/apis/vision_v1p1beta1/representations.rb +16 -0
  245. data/generated/google/apis/vision_v1p2beta1.rb +1 -1
  246. data/generated/google/apis/vision_v1p2beta1/classes.rb +33 -0
  247. data/generated/google/apis/vision_v1p2beta1/representations.rb +16 -0
  248. data/generated/google/apis/youtube_partner_v1.rb +1 -1
  249. data/google-api-client.gemspec +1 -1
  250. data/lib/google/api_client/auth/installed_app.rb +17 -2
  251. data/lib/google/api_client/client_secrets.rb +1 -1
  252. data/lib/google/apis/core/download.rb +2 -2
  253. data/lib/google/apis/errors.rb +9 -0
  254. data/lib/google/apis/generator/annotator.rb +2 -2
  255. data/lib/google/apis/version.rb +1 -1
  256. metadata +46 -17
  257. data/generated/google/apis/cloudiot_v1beta1/classes.rb +0 -959
  258. data/generated/google/apis/cloudiot_v1beta1/representations.rb +0 -375
  259. data/generated/google/apis/cloudiot_v1beta1/service.rb +0 -618
  260. data/generated/google/apis/dfareporting_v3_0.rb +0 -40
  261. data/generated/google/apis/dfareporting_v3_0/classes.rb +0 -12119
  262. data/generated/google/apis/dfareporting_v3_0/representations.rb +0 -4336
  263. data/generated/google/apis/dfareporting_v3_0/service.rb +0 -8701
  264. data/generated/google/apis/speech_v1beta1/classes.rb +0 -480
  265. data/generated/google/apis/speech_v1beta1/representations.rb +0 -194
@@ -280,6 +280,12 @@ module Google
280
280
  include Google::Apis::Core::JsonObjectSupport
281
281
  end
282
282
 
283
+ class LineConnection
284
+ class Representation < Google::Apis::Core::JsonRepresentation; end
285
+
286
+ include Google::Apis::Core::JsonObjectSupport
287
+ end
288
+
283
289
  class LineFill
284
290
  class Representation < Google::Apis::Core::JsonRepresentation; end
285
291
 
@@ -472,6 +478,12 @@ module Google
472
478
  include Google::Apis::Core::JsonObjectSupport
473
479
  end
474
480
 
481
+ class RerouteLineRequest
482
+ class Representation < Google::Apis::Core::JsonRepresentation; end
483
+
484
+ include Google::Apis::Core::JsonObjectSupport
485
+ end
486
+
475
487
  class Response
476
488
  class Representation < Google::Apis::Core::JsonRepresentation; end
477
489
 
@@ -682,6 +694,12 @@ module Google
682
694
  include Google::Apis::Core::JsonObjectSupport
683
695
  end
684
696
 
697
+ class UpdateLineCategoryRequest
698
+ class Representation < Google::Apis::Core::JsonRepresentation; end
699
+
700
+ include Google::Apis::Core::JsonObjectSupport
701
+ end
702
+
685
703
  class UpdateLinePropertiesRequest
686
704
  class Representation < Google::Apis::Core::JsonRepresentation; end
687
705
 
@@ -700,6 +718,12 @@ module Google
700
718
  include Google::Apis::Core::JsonObjectSupport
701
719
  end
702
720
 
721
+ class UpdatePageElementsZOrderRequest
722
+ class Representation < Google::Apis::Core::JsonRepresentation; end
723
+
724
+ include Google::Apis::Core::JsonObjectSupport
725
+ end
726
+
703
727
  class UpdatePagePropertiesRequest
704
728
  class Representation < Google::Apis::Core::JsonRepresentation; end
705
729
 
@@ -883,6 +907,7 @@ module Google
883
907
  class CreateLineRequest
884
908
  # @private
885
909
  class Representation < Google::Apis::Core::JsonRepresentation
910
+ property :category, as: 'category'
886
911
  property :element_properties, as: 'elementProperties', class: Google::Apis::SlidesV1::PageElementProperties, decorator: Google::Apis::SlidesV1::PageElementProperties::Representation
887
912
 
888
913
  property :line_category, as: 'lineCategory'
@@ -1196,12 +1221,21 @@ module Google
1196
1221
  class Line
1197
1222
  # @private
1198
1223
  class Representation < Google::Apis::Core::JsonRepresentation
1224
+ property :line_category, as: 'lineCategory'
1199
1225
  property :line_properties, as: 'lineProperties', class: Google::Apis::SlidesV1::LineProperties, decorator: Google::Apis::SlidesV1::LineProperties::Representation
1200
1226
 
1201
1227
  property :line_type, as: 'lineType'
1202
1228
  end
1203
1229
  end
1204
1230
 
1231
+ class LineConnection
1232
+ # @private
1233
+ class Representation < Google::Apis::Core::JsonRepresentation
1234
+ property :connected_object_id, as: 'connectedObjectId'
1235
+ property :connection_site_index, as: 'connectionSiteIndex'
1236
+ end
1237
+ end
1238
+
1205
1239
  class LineFill
1206
1240
  # @private
1207
1241
  class Representation < Google::Apis::Core::JsonRepresentation
@@ -1215,11 +1249,15 @@ module Google
1215
1249
  class Representation < Google::Apis::Core::JsonRepresentation
1216
1250
  property :dash_style, as: 'dashStyle'
1217
1251
  property :end_arrow, as: 'endArrow'
1252
+ property :end_connection, as: 'endConnection', class: Google::Apis::SlidesV1::LineConnection, decorator: Google::Apis::SlidesV1::LineConnection::Representation
1253
+
1218
1254
  property :line_fill, as: 'lineFill', class: Google::Apis::SlidesV1::LineFill, decorator: Google::Apis::SlidesV1::LineFill::Representation
1219
1255
 
1220
1256
  property :link, as: 'link', class: Google::Apis::SlidesV1::Link, decorator: Google::Apis::SlidesV1::Link::Representation
1221
1257
 
1222
1258
  property :start_arrow, as: 'startArrow'
1259
+ property :start_connection, as: 'startConnection', class: Google::Apis::SlidesV1::LineConnection, decorator: Google::Apis::SlidesV1::LineConnection::Representation
1260
+
1223
1261
  property :weight, as: 'weight', class: Google::Apis::SlidesV1::Dimension, decorator: Google::Apis::SlidesV1::Dimension::Representation
1224
1262
 
1225
1263
  end
@@ -1593,18 +1631,24 @@ module Google
1593
1631
 
1594
1632
  property :replace_image, as: 'replaceImage', class: Google::Apis::SlidesV1::ReplaceImageRequest, decorator: Google::Apis::SlidesV1::ReplaceImageRequest::Representation
1595
1633
 
1634
+ property :reroute_line, as: 'rerouteLine', class: Google::Apis::SlidesV1::RerouteLineRequest, decorator: Google::Apis::SlidesV1::RerouteLineRequest::Representation
1635
+
1596
1636
  property :ungroup_objects, as: 'ungroupObjects', class: Google::Apis::SlidesV1::UngroupObjectsRequest, decorator: Google::Apis::SlidesV1::UngroupObjectsRequest::Representation
1597
1637
 
1598
1638
  property :unmerge_table_cells, as: 'unmergeTableCells', class: Google::Apis::SlidesV1::UnmergeTableCellsRequest, decorator: Google::Apis::SlidesV1::UnmergeTableCellsRequest::Representation
1599
1639
 
1600
1640
  property :update_image_properties, as: 'updateImageProperties', class: Google::Apis::SlidesV1::UpdateImagePropertiesRequest, decorator: Google::Apis::SlidesV1::UpdateImagePropertiesRequest::Representation
1601
1641
 
1642
+ property :update_line_category, as: 'updateLineCategory', class: Google::Apis::SlidesV1::UpdateLineCategoryRequest, decorator: Google::Apis::SlidesV1::UpdateLineCategoryRequest::Representation
1643
+
1602
1644
  property :update_line_properties, as: 'updateLineProperties', class: Google::Apis::SlidesV1::UpdateLinePropertiesRequest, decorator: Google::Apis::SlidesV1::UpdateLinePropertiesRequest::Representation
1603
1645
 
1604
1646
  property :update_page_element_alt_text, as: 'updatePageElementAltText', class: Google::Apis::SlidesV1::UpdatePageElementAltTextRequest, decorator: Google::Apis::SlidesV1::UpdatePageElementAltTextRequest::Representation
1605
1647
 
1606
1648
  property :update_page_element_transform, as: 'updatePageElementTransform', class: Google::Apis::SlidesV1::UpdatePageElementTransformRequest, decorator: Google::Apis::SlidesV1::UpdatePageElementTransformRequest::Representation
1607
1649
 
1650
+ property :update_page_elements_z_order, as: 'updatePageElementsZOrder', class: Google::Apis::SlidesV1::UpdatePageElementsZOrderRequest, decorator: Google::Apis::SlidesV1::UpdatePageElementsZOrderRequest::Representation
1651
+
1608
1652
  property :update_page_properties, as: 'updatePageProperties', class: Google::Apis::SlidesV1::UpdatePagePropertiesRequest, decorator: Google::Apis::SlidesV1::UpdatePagePropertiesRequest::Representation
1609
1653
 
1610
1654
  property :update_paragraph_style, as: 'updateParagraphStyle', class: Google::Apis::SlidesV1::UpdateParagraphStyleRequest, decorator: Google::Apis::SlidesV1::UpdateParagraphStyleRequest::Representation
@@ -1628,6 +1672,13 @@ module Google
1628
1672
  end
1629
1673
  end
1630
1674
 
1675
+ class RerouteLineRequest
1676
+ # @private
1677
+ class Representation < Google::Apis::Core::JsonRepresentation
1678
+ property :object_id_prop, as: 'objectId'
1679
+ end
1680
+ end
1681
+
1631
1682
  class Response
1632
1683
  # @private
1633
1684
  class Representation < Google::Apis::Core::JsonRepresentation
@@ -2017,6 +2068,14 @@ module Google
2017
2068
  end
2018
2069
  end
2019
2070
 
2071
+ class UpdateLineCategoryRequest
2072
+ # @private
2073
+ class Representation < Google::Apis::Core::JsonRepresentation
2074
+ property :line_category, as: 'lineCategory'
2075
+ property :object_id_prop, as: 'objectId'
2076
+ end
2077
+ end
2078
+
2020
2079
  class UpdateLinePropertiesRequest
2021
2080
  # @private
2022
2081
  class Representation < Google::Apis::Core::JsonRepresentation
@@ -2046,6 +2105,14 @@ module Google
2046
2105
  end
2047
2106
  end
2048
2107
 
2108
+ class UpdatePageElementsZOrderRequest
2109
+ # @private
2110
+ class Representation < Google::Apis::Core::JsonRepresentation
2111
+ property :operation, as: 'operation'
2112
+ collection :page_element_object_ids, as: 'pageElementObjectIds'
2113
+ end
2114
+ end
2115
+
2049
2116
  class UpdatePagePropertiesRequest
2050
2117
  # @private
2051
2118
  class Representation < Google::Apis::Core::JsonRepresentation
@@ -96,9 +96,10 @@ module Google
96
96
  execute_or_queue_command(command, &block)
97
97
  end
98
98
 
99
- # Creates a new presentation using the title given in the request. If a
100
- # presentationId is provided, uses it as the ID of the new presentation.
101
- # Otherwise, a new presentationId is generated.
99
+ # Creates a blank presentation using the title given in the request. If a
100
+ # `presentationId` is provided, it is used as the ID of the new presentation.
101
+ # Otherwise, a new ID is generated. Other fields in the request, including
102
+ # any provided content, are ignored.
102
103
  # Returns the created presentation.
103
104
  # @param [Google::Apis::SlidesV1::Presentation] presentation_object
104
105
  # @param [String] fields
@@ -20,12 +20,12 @@ module Google
20
20
  module Apis
21
21
  # Cloud Source Repositories API
22
22
  #
23
- # Access source code repositories hosted by Google.
23
+ # Accesses source code repositories hosted by Google.
24
24
  #
25
25
  # @see https://cloud.google.com/source-repositories/docs/apis
26
26
  module SourcerepoV1
27
27
  VERSION = 'V1'
28
- REVISION = '20180718'
28
+ REVISION = '20181106'
29
29
 
30
30
  # View and manage your data across Google Cloud Platform services
31
31
  AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform'
@@ -22,7 +22,7 @@ module Google
22
22
  module SourcerepoV1
23
23
  # Cloud Source Repositories API
24
24
  #
25
- # Access source code repositories hosted by Google.
25
+ # Accesses source code repositories hosted by Google.
26
26
  #
27
27
  # @example
28
28
  # require 'google/apis/sourcerepo_v1'
@@ -25,7 +25,7 @@ module Google
25
25
  # @see https://cloud.google.com/speech-to-text/docs/quickstart-protocol
26
26
  module SpeechV1
27
27
  VERSION = 'V1'
28
- REVISION = '20180914'
28
+ REVISION = '20181127'
29
29
 
30
30
  # View and manage your data across Google Cloud Platform services
31
31
  AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform'
@@ -47,6 +47,40 @@ module Google
47
47
  end
48
48
  end
49
49
 
50
+ # Describes the progress of a long-running `LongRunningRecognize` call. It is
51
+ # included in the `metadata` field of the `Operation` returned by the
52
+ # `GetOperation` call of the `google::longrunning::Operations` service.
53
+ class LongRunningRecognizeMetadata
54
+ include Google::Apis::Core::Hashable
55
+
56
+ # Time of the most recent processing update.
57
+ # Corresponds to the JSON property `lastUpdateTime`
58
+ # @return [String]
59
+ attr_accessor :last_update_time
60
+
61
+ # Approximate percentage of audio processed thus far. Guaranteed to be 100
62
+ # when the audio is fully processed and the results are available.
63
+ # Corresponds to the JSON property `progressPercent`
64
+ # @return [Fixnum]
65
+ attr_accessor :progress_percent
66
+
67
+ # Time when the request was received.
68
+ # Corresponds to the JSON property `startTime`
69
+ # @return [String]
70
+ attr_accessor :start_time
71
+
72
+ def initialize(**args)
73
+ update!(**args)
74
+ end
75
+
76
+ # Update properties of this object
77
+ def update!(**args)
78
+ @last_update_time = args[:last_update_time] if args.key?(:last_update_time)
79
+ @progress_percent = args[:progress_percent] if args.key?(:progress_percent)
80
+ @start_time = args[:start_time] if args.key?(:start_time)
81
+ end
82
+ end
83
+
50
84
  # The top-level message sent by the client for the `LongRunningRecognize`
51
85
  # method.
52
86
  class LongRunningRecognizeRequest
@@ -77,6 +111,30 @@ module Google
77
111
  end
78
112
  end
79
113
 
114
+ # The only message returned to the client by the `LongRunningRecognize` method.
115
+ # It contains the result as zero or more sequential `SpeechRecognitionResult`
116
+ # messages. It is included in the `result.response` field of the `Operation`
117
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
118
+ # service.
119
+ class LongRunningRecognizeResponse
120
+ include Google::Apis::Core::Hashable
121
+
122
+ # Output only. Sequential list of transcription results corresponding to
123
+ # sequential portions of audio.
124
+ # Corresponds to the JSON property `results`
125
+ # @return [Array<Google::Apis::SpeechV1::SpeechRecognitionResult>]
126
+ attr_accessor :results
127
+
128
+ def initialize(**args)
129
+ update!(**args)
130
+ end
131
+
132
+ # Update properties of this object
133
+ def update!(**args)
134
+ @results = args[:results] if args.key?(:results)
135
+ end
136
+ end
137
+
80
138
  # This resource represents a long-running operation that is the result of a
81
139
  # network API call.
82
140
  class Operation
@@ -586,15 +644,6 @@ module Google
586
644
  # @return [String]
587
645
  attr_accessor :end_time
588
646
 
589
- # Output only. A distinct integer value is assigned for every speaker within
590
- # the audio. This field specifies which one of those speakers was detected to
591
- # have spoken this word. Value ranges from '1' to diarization_speaker_count.
592
- # speaker_tag is set if enable_speaker_diarization = 'true' and only in the
593
- # top alternative.
594
- # Corresponds to the JSON property `speakerTag`
595
- # @return [Fixnum]
596
- attr_accessor :speaker_tag
597
-
598
647
  # Output only. Time offset relative to the beginning of the audio,
599
648
  # and corresponding to the start of the spoken word.
600
649
  # This field is only set if `enable_word_time_offsets=true` and only
@@ -617,7 +666,6 @@ module Google
617
666
  # Update properties of this object
618
667
  def update!(**args)
619
668
  @end_time = args[:end_time] if args.key?(:end_time)
620
- @speaker_tag = args[:speaker_tag] if args.key?(:speaker_tag)
621
669
  @start_time = args[:start_time] if args.key?(:start_time)
622
670
  @word = args[:word] if args.key?(:word)
623
671
  end
@@ -28,12 +28,24 @@ module Google
28
28
  include Google::Apis::Core::JsonObjectSupport
29
29
  end
30
30
 
31
+ class LongRunningRecognizeMetadata
32
+ class Representation < Google::Apis::Core::JsonRepresentation; end
33
+
34
+ include Google::Apis::Core::JsonObjectSupport
35
+ end
36
+
31
37
  class LongRunningRecognizeRequest
32
38
  class Representation < Google::Apis::Core::JsonRepresentation; end
33
39
 
34
40
  include Google::Apis::Core::JsonObjectSupport
35
41
  end
36
42
 
43
+ class LongRunningRecognizeResponse
44
+ class Representation < Google::Apis::Core::JsonRepresentation; end
45
+
46
+ include Google::Apis::Core::JsonObjectSupport
47
+ end
48
+
37
49
  class Operation
38
50
  class Representation < Google::Apis::Core::JsonRepresentation; end
39
51
 
@@ -103,6 +115,15 @@ module Google
103
115
  end
104
116
  end
105
117
 
118
+ class LongRunningRecognizeMetadata
119
+ # @private
120
+ class Representation < Google::Apis::Core::JsonRepresentation
121
+ property :last_update_time, as: 'lastUpdateTime'
122
+ property :progress_percent, as: 'progressPercent'
123
+ property :start_time, as: 'startTime'
124
+ end
125
+ end
126
+
106
127
  class LongRunningRecognizeRequest
107
128
  # @private
108
129
  class Representation < Google::Apis::Core::JsonRepresentation
@@ -113,6 +134,14 @@ module Google
113
134
  end
114
135
  end
115
136
 
137
+ class LongRunningRecognizeResponse
138
+ # @private
139
+ class Representation < Google::Apis::Core::JsonRepresentation
140
+ collection :results, as: 'results', class: Google::Apis::SpeechV1::SpeechRecognitionResult, decorator: Google::Apis::SpeechV1::SpeechRecognitionResult::Representation
141
+
142
+ end
143
+ end
144
+
116
145
  class Operation
117
146
  # @private
118
147
  class Representation < Google::Apis::Core::JsonRepresentation
@@ -206,7 +235,6 @@ module Google
206
235
  # @private
207
236
  class Representation < Google::Apis::Core::JsonRepresentation
208
237
  property :end_time, as: 'endTime'
209
- property :speaker_tag, as: 'speakerTag'
210
238
  property :start_time, as: 'startTime'
211
239
  property :word, as: 'word'
212
240
  end
@@ -12,9 +12,9 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- require 'google/apis/speech_v1beta1/service.rb'
16
- require 'google/apis/speech_v1beta1/classes.rb'
17
- require 'google/apis/speech_v1beta1/representations.rb'
15
+ require 'google/apis/speech_v1p1beta1/service.rb'
16
+ require 'google/apis/speech_v1p1beta1/classes.rb'
17
+ require 'google/apis/speech_v1p1beta1/representations.rb'
18
18
 
19
19
  module Google
20
20
  module Apis
@@ -23,9 +23,9 @@ module Google
23
23
  # Converts audio to text by applying powerful neural network models.
24
24
  #
25
25
  # @see https://cloud.google.com/speech-to-text/docs/quickstart-protocol
26
- module SpeechV1beta1
27
- VERSION = 'V1beta1'
28
- REVISION = '20180911'
26
+ module SpeechV1p1beta1
27
+ VERSION = 'V1p1beta1'
28
+ REVISION = '20181127'
29
29
 
30
30
  # View and manage your data across Google Cloud Platform services
31
31
  AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform'
@@ -0,0 +1,922 @@
1
+ # Copyright 2015 Google Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ require 'date'
16
+ require 'google/apis/core/base_service'
17
+ require 'google/apis/core/json_representation'
18
+ require 'google/apis/core/hashable'
19
+ require 'google/apis/errors'
20
+
21
+ module Google
22
+ module Apis
23
+ module SpeechV1p1beta1
24
+
25
+ # The response message for Operations.ListOperations.
26
+ class ListOperationsResponse
27
+ include Google::Apis::Core::Hashable
28
+
29
+ # The standard List next-page token.
30
+ # Corresponds to the JSON property `nextPageToken`
31
+ # @return [String]
32
+ attr_accessor :next_page_token
33
+
34
+ # A list of operations that matches the specified filter in the request.
35
+ # Corresponds to the JSON property `operations`
36
+ # @return [Array<Google::Apis::SpeechV1p1beta1::Operation>]
37
+ attr_accessor :operations
38
+
39
+ def initialize(**args)
40
+ update!(**args)
41
+ end
42
+
43
+ # Update properties of this object
44
+ def update!(**args)
45
+ @next_page_token = args[:next_page_token] if args.key?(:next_page_token)
46
+ @operations = args[:operations] if args.key?(:operations)
47
+ end
48
+ end
49
+
50
+ # Describes the progress of a long-running `LongRunningRecognize` call. It is
51
+ # included in the `metadata` field of the `Operation` returned by the
52
+ # `GetOperation` call of the `google::longrunning::Operations` service.
53
+ class LongRunningRecognizeMetadata
54
+ include Google::Apis::Core::Hashable
55
+
56
+ # Time of the most recent processing update.
57
+ # Corresponds to the JSON property `lastUpdateTime`
58
+ # @return [String]
59
+ attr_accessor :last_update_time
60
+
61
+ # Approximate percentage of audio processed thus far. Guaranteed to be 100
62
+ # when the audio is fully processed and the results are available.
63
+ # Corresponds to the JSON property `progressPercent`
64
+ # @return [Fixnum]
65
+ attr_accessor :progress_percent
66
+
67
+ # Time when the request was received.
68
+ # Corresponds to the JSON property `startTime`
69
+ # @return [String]
70
+ attr_accessor :start_time
71
+
72
+ def initialize(**args)
73
+ update!(**args)
74
+ end
75
+
76
+ # Update properties of this object
77
+ def update!(**args)
78
+ @last_update_time = args[:last_update_time] if args.key?(:last_update_time)
79
+ @progress_percent = args[:progress_percent] if args.key?(:progress_percent)
80
+ @start_time = args[:start_time] if args.key?(:start_time)
81
+ end
82
+ end
83
+
84
+ # The top-level message sent by the client for the `LongRunningRecognize`
85
+ # method.
86
+ class LongRunningRecognizeRequest
87
+ include Google::Apis::Core::Hashable
88
+
89
+ # Contains audio data in the encoding specified in the `RecognitionConfig`.
90
+ # Either `content` or `uri` must be supplied. Supplying both or neither
91
+ # returns google.rpc.Code.INVALID_ARGUMENT. See
92
+ # [content limits](/speech-to-text/quotas#content).
93
+ # Corresponds to the JSON property `audio`
94
+ # @return [Google::Apis::SpeechV1p1beta1::RecognitionAudio]
95
+ attr_accessor :audio
96
+
97
+ # Provides information to the recognizer that specifies how to process the
98
+ # request.
99
+ # Corresponds to the JSON property `config`
100
+ # @return [Google::Apis::SpeechV1p1beta1::RecognitionConfig]
101
+ attr_accessor :config
102
+
103
+ def initialize(**args)
104
+ update!(**args)
105
+ end
106
+
107
+ # Update properties of this object
108
+ def update!(**args)
109
+ @audio = args[:audio] if args.key?(:audio)
110
+ @config = args[:config] if args.key?(:config)
111
+ end
112
+ end
113
+
114
+ # The only message returned to the client by the `LongRunningRecognize` method.
115
+ # It contains the result as zero or more sequential `SpeechRecognitionResult`
116
+ # messages. It is included in the `result.response` field of the `Operation`
117
+ # returned by the `GetOperation` call of the `google::longrunning::Operations`
118
+ # service.
119
+ class LongRunningRecognizeResponse
120
+ include Google::Apis::Core::Hashable
121
+
122
+ # Output only. Sequential list of transcription results corresponding to
123
+ # sequential portions of audio.
124
+ # Corresponds to the JSON property `results`
125
+ # @return [Array<Google::Apis::SpeechV1p1beta1::SpeechRecognitionResult>]
126
+ attr_accessor :results
127
+
128
+ def initialize(**args)
129
+ update!(**args)
130
+ end
131
+
132
+ # Update properties of this object
133
+ def update!(**args)
134
+ @results = args[:results] if args.key?(:results)
135
+ end
136
+ end
137
+
138
+ # This resource represents a long-running operation that is the result of a
139
+ # network API call.
140
+ class Operation
141
+ include Google::Apis::Core::Hashable
142
+
143
+ # If the value is `false`, it means the operation is still in progress.
144
+ # If `true`, the operation is completed, and either `error` or `response` is
145
+ # available.
146
+ # Corresponds to the JSON property `done`
147
+ # @return [Boolean]
148
+ attr_accessor :done
149
+ alias_method :done?, :done
150
+
151
+ # The `Status` type defines a logical error model that is suitable for different
152
+ # programming environments, including REST APIs and RPC APIs. It is used by
153
+ # [gRPC](https://github.com/grpc). The error model is designed to be:
154
+ # - Simple to use and understand for most users
155
+ # - Flexible enough to meet unexpected needs
156
+ # # Overview
157
+ # The `Status` message contains three pieces of data: error code, error message,
158
+ # and error details. The error code should be an enum value of
159
+ # google.rpc.Code, but it may accept additional error codes if needed. The
160
+ # error message should be a developer-facing English message that helps
161
+ # developers *understand* and *resolve* the error. If a localized user-facing
162
+ # error message is needed, put the localized message in the error details or
163
+ # localize it in the client. The optional error details may contain arbitrary
164
+ # information about the error. There is a predefined set of error detail types
165
+ # in the package `google.rpc` that can be used for common error conditions.
166
+ # # Language mapping
167
+ # The `Status` message is the logical representation of the error model, but it
168
+ # is not necessarily the actual wire format. When the `Status` message is
169
+ # exposed in different client libraries and different wire protocols, it can be
170
+ # mapped differently. For example, it will likely be mapped to some exceptions
171
+ # in Java, but more likely mapped to some error codes in C.
172
+ # # Other uses
173
+ # The error model and the `Status` message can be used in a variety of
174
+ # environments, either with or without APIs, to provide a
175
+ # consistent developer experience across different environments.
176
+ # Example uses of this error model include:
177
+ # - Partial errors. If a service needs to return partial errors to the client,
178
+ # it may embed the `Status` in the normal response to indicate the partial
179
+ # errors.
180
+ # - Workflow errors. A typical workflow has multiple steps. Each step may
181
+ # have a `Status` message for error reporting.
182
+ # - Batch operations. If a client uses batch request and batch response, the
183
+ # `Status` message should be used directly inside batch response, one for
184
+ # each error sub-response.
185
+ # - Asynchronous operations. If an API call embeds asynchronous operation
186
+ # results in its response, the status of those operations should be
187
+ # represented directly using the `Status` message.
188
+ # - Logging. If some API errors are stored in logs, the message `Status` could
189
+ # be used directly after any stripping needed for security/privacy reasons.
190
+ # Corresponds to the JSON property `error`
191
+ # @return [Google::Apis::SpeechV1p1beta1::Status]
192
+ attr_accessor :error
193
+
194
+ # Service-specific metadata associated with the operation. It typically
195
+ # contains progress information and common metadata such as create time.
196
+ # Some services might not provide such metadata. Any method that returns a
197
+ # long-running operation should document the metadata type, if any.
198
+ # Corresponds to the JSON property `metadata`
199
+ # @return [Hash<String,Object>]
200
+ attr_accessor :metadata
201
+
202
+ # The server-assigned name, which is only unique within the same service that
203
+ # originally returns it. If you use the default HTTP mapping, the
204
+ # `name` should have the format of `operations/some/unique/name`.
205
+ # Corresponds to the JSON property `name`
206
+ # @return [String]
207
+ attr_accessor :name
208
+
209
+ # The normal response of the operation in case of success. If the original
210
+ # method returns no data on success, such as `Delete`, the response is
211
+ # `google.protobuf.Empty`. If the original method is standard
212
+ # `Get`/`Create`/`Update`, the response should be the resource. For other
213
+ # methods, the response should have the type `XxxResponse`, where `Xxx`
214
+ # is the original method name. For example, if the original method name
215
+ # is `TakeSnapshot()`, the inferred response type is
216
+ # `TakeSnapshotResponse`.
217
+ # Corresponds to the JSON property `response`
218
+ # @return [Hash<String,Object>]
219
+ attr_accessor :response
220
+
221
+ def initialize(**args)
222
+ update!(**args)
223
+ end
224
+
225
+ # Update properties of this object
226
+ def update!(**args)
227
+ @done = args[:done] if args.key?(:done)
228
+ @error = args[:error] if args.key?(:error)
229
+ @metadata = args[:metadata] if args.key?(:metadata)
230
+ @name = args[:name] if args.key?(:name)
231
+ @response = args[:response] if args.key?(:response)
232
+ end
233
+ end
234
+
235
+ # Contains audio data in the encoding specified in the `RecognitionConfig`.
236
+ # Either `content` or `uri` must be supplied. Supplying both or neither
237
+ # returns google.rpc.Code.INVALID_ARGUMENT. See
238
+ # [content limits](/speech-to-text/quotas#content).
239
+ class RecognitionAudio
240
+ include Google::Apis::Core::Hashable
241
+
242
+ # The audio data bytes encoded as specified in
243
+ # `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
244
+ # pure binary representation, whereas JSON representations use base64.
245
+ # Corresponds to the JSON property `content`
246
+ # NOTE: Values are automatically base64 encoded/decoded in the client library.
247
+ # @return [String]
248
+ attr_accessor :content
249
+
250
+ # URI that points to a file that contains audio data bytes as specified in
251
+ # `RecognitionConfig`. The file must not be compressed (for example, gzip).
252
+ # Currently, only Google Cloud Storage URIs are
253
+ # supported, which must be specified in the following format:
254
+ # `gs://bucket_name/object_name` (other URI formats return
255
+ # google.rpc.Code.INVALID_ARGUMENT). For more information, see
256
+ # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
257
+ # Corresponds to the JSON property `uri`
258
+ # @return [String]
259
+ attr_accessor :uri
260
+
261
+ def initialize(**args)
262
+ update!(**args)
263
+ end
264
+
265
+ # Update properties of this object
266
+ def update!(**args)
267
+ @content = args[:content] if args.key?(:content)
268
+ @uri = args[:uri] if args.key?(:uri)
269
+ end
270
+ end
271
+
272
+ # Provides information to the recognizer that specifies how to process the
273
+ # request.
274
+ class RecognitionConfig
275
+ include Google::Apis::Core::Hashable
276
+
277
+ # *Optional* A list of up to 3 additional
278
+ # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
279
+ # listing possible alternative languages of the supplied audio.
280
+ # See [Language Support](/speech-to-text/docs/languages)
281
+ # for a list of the currently supported language codes.
282
+ # If alternative languages are listed, recognition result will contain
283
+ # recognition in the most likely language detected including the main
284
+ # language_code. The recognition result will include the language tag
285
+ # of the language detected in the audio.
286
+ # Note: This feature is only supported for Voice Command and Voice Search
287
+ # use cases and performance may vary for other use cases (e.g., phone call
288
+ # transcription).
289
+ # Corresponds to the JSON property `alternativeLanguageCodes`
290
+ # @return [Array<String>]
291
+ attr_accessor :alternative_language_codes
292
+
293
+ # *Optional* The number of channels in the input audio data.
294
+ # ONLY set this for MULTI-CHANNEL recognition.
295
+ # Valid values for LINEAR16 and FLAC are `1`-`8`.
296
+ # Valid values for OGG_OPUS are '1'-'254'.
297
+ # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
298
+ # If `0` or omitted, defaults to one channel (mono).
299
+ # Note: We only recognize the first channel by default.
300
+ # To perform independent recognition on each channel set
301
+ # `enable_separate_recognition_per_channel` to 'true'.
302
+ # Corresponds to the JSON property `audioChannelCount`
303
+ # @return [Fixnum]
304
+ attr_accessor :audio_channel_count
305
+
306
+ # *Optional* Config to enable speaker diarization and set additional
307
+ # parameters to make diarization better suited for your application.
308
+ # Note: When this is enabled, we send all the words from the beginning of the
309
+ # audio for the top alternative in every consecutive STREAMING responses.
310
+ # This is done in order to improve our speaker tags as our models learn to
311
+ # identify the speakers in the conversation over time.
312
+ # For non-streaming requests, the diarization results will be provided only
313
+ # in the top alternative of the FINAL SpeechRecognitionResult.
314
+ # Corresponds to the JSON property `diarizationConfig`
315
+ # @return [Google::Apis::SpeechV1p1beta1::SpeakerDiarizationConfig]
316
+ attr_accessor :diarization_config
317
+
318
+ # *Optional*
319
+ # If set, specifies the estimated number of speakers in the conversation.
320
+ # If not set, defaults to '2'.
321
+ # Ignored unless enable_speaker_diarization is set to true."
322
+ # Note: Use diarization_config instead. This field will be DEPRECATED soon.
323
+ # Corresponds to the JSON property `diarizationSpeakerCount`
324
+ # @return [Fixnum]
325
+ attr_accessor :diarization_speaker_count
326
+
327
+ # *Optional* If 'true', adds punctuation to recognition result hypotheses.
328
+ # This feature is only available in select languages. Setting this for
329
+ # requests in other languages has no effect at all.
330
+ # The default 'false' value does not add punctuation to result hypotheses.
331
+ # Note: This is currently offered as an experimental service, complimentary
332
+ # to all users. In the future this may be exclusively available as a
333
+ # premium feature.
334
+ # Corresponds to the JSON property `enableAutomaticPunctuation`
335
+ # @return [Boolean]
336
+ attr_accessor :enable_automatic_punctuation
337
+ alias_method :enable_automatic_punctuation?, :enable_automatic_punctuation
338
+
339
+ # This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
340
+ # to get each channel recognized separately. The recognition result will
341
+ # contain a `channel_tag` field to state which channel that result belongs
342
+ # to. If this is not true, we will only recognize the first channel. The
343
+ # request is billed cumulatively for all channels recognized:
344
+ # `audio_channel_count` multiplied by the length of the audio.
345
+ # Corresponds to the JSON property `enableSeparateRecognitionPerChannel`
346
+ # @return [Boolean]
347
+ attr_accessor :enable_separate_recognition_per_channel
348
+ alias_method :enable_separate_recognition_per_channel?, :enable_separate_recognition_per_channel
349
+
350
+ # *Optional* If 'true', enables speaker detection for each recognized word in
351
+ # the top alternative of the recognition result using a speaker_tag provided
352
+ # in the WordInfo.
353
+ # Note: Use diarization_config instead. This field will be DEPRECATED soon.
354
+ # Corresponds to the JSON property `enableSpeakerDiarization`
355
+ # @return [Boolean]
356
+ attr_accessor :enable_speaker_diarization
357
+ alias_method :enable_speaker_diarization?, :enable_speaker_diarization
358
+
359
+ # *Optional* If `true`, the top result includes a list of words and the
360
+ # confidence for those words. If `false`, no word-level confidence
361
+ # information is returned. The default is `false`.
362
+ # Corresponds to the JSON property `enableWordConfidence`
363
+ # @return [Boolean]
364
+ attr_accessor :enable_word_confidence
365
+ alias_method :enable_word_confidence?, :enable_word_confidence
366
+
367
+ # *Optional* If `true`, the top result includes a list of words and
368
+ # the start and end time offsets (timestamps) for those words. If
369
+ # `false`, no word-level time offset information is returned. The default is
370
+ # `false`.
371
+ # Corresponds to the JSON property `enableWordTimeOffsets`
372
+ # @return [Boolean]
373
+ attr_accessor :enable_word_time_offsets
374
+ alias_method :enable_word_time_offsets?, :enable_word_time_offsets
375
+
376
+ # Encoding of audio data sent in all `RecognitionAudio` messages.
377
+ # This field is optional for `FLAC` and `WAV` audio files and required
378
+ # for all other audio formats. For details, see AudioEncoding.
379
+ # Corresponds to the JSON property `encoding`
380
+ # @return [String]
381
+ attr_accessor :encoding
382
+
383
+ # *Required* The language of the supplied audio as a
384
+ # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
385
+ # Example: "en-US".
386
+ # See [Language Support](/speech-to-text/docs/languages)
387
+ # for a list of the currently supported language codes.
388
+ # Corresponds to the JSON property `languageCode`
389
+ # @return [String]
390
+ attr_accessor :language_code
391
+
392
+ # *Optional* Maximum number of recognition hypotheses to be returned.
393
+ # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
394
+ # within each `SpeechRecognitionResult`.
395
+ # The server may return fewer than `max_alternatives`.
396
+ # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
397
+ # one. If omitted, will return a maximum of one.
398
+ # Corresponds to the JSON property `maxAlternatives`
399
+ # @return [Fixnum]
400
+ attr_accessor :max_alternatives
401
+
402
+ # Description of audio data to be recognized.
403
+ # Corresponds to the JSON property `metadata`
404
+ # @return [Google::Apis::SpeechV1p1beta1::RecognitionMetadata]
405
+ attr_accessor :metadata
406
+
407
+ # *Optional* Which model to select for the given request. Select the model
408
+ # best suited to your domain to get best results. If a model is not
409
+ # explicitly specified, then we auto-select a model based on the parameters
410
+ # in the RecognitionConfig.
411
+ # <table>
412
+ # <tr>
413
+ # <td><b>Model</b></td>
414
+ # <td><b>Description</b></td>
415
+ # </tr>
416
+ # <tr>
417
+ # <td><code>command_and_search</code></td>
418
+ # <td>Best for short queries such as voice commands or voice search.</td>
419
+ # </tr>
420
+ # <tr>
421
+ # <td><code>phone_call</code></td>
422
+ # <td>Best for audio that originated from a phone call (typically
423
+ # recorded at an 8khz sampling rate).</td>
424
+ # </tr>
425
+ # <tr>
426
+ # <td><code>video</code></td>
427
+ # <td>Best for audio that originated from from video or includes multiple
428
+ # speakers. Ideally the audio is recorded at a 16khz or greater
429
+ # sampling rate. This is a premium model that costs more than the
430
+ # standard rate.</td>
431
+ # </tr>
432
+ # <tr>
433
+ # <td><code>default</code></td>
434
+ # <td>Best for audio that is not one of the specific audio models.
435
+ # For example, long-form audio. Ideally the audio is high-fidelity,
436
+ # recorded at a 16khz or greater sampling rate.</td>
437
+ # </tr>
438
+ # </table>
439
+ # Corresponds to the JSON property `model`
440
+ # @return [String]
441
+ attr_accessor :model
442
+
443
+ # *Optional* If set to `true`, the server will attempt to filter out
444
+ # profanities, replacing all but the initial character in each filtered word
445
+ # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
446
+ # won't be filtered out.
447
+ # Corresponds to the JSON property `profanityFilter`
448
+ # @return [Boolean]
449
+ attr_accessor :profanity_filter
450
+ alias_method :profanity_filter?, :profanity_filter
451
+
452
+ # Sample rate in Hertz of the audio data sent in all
453
+ # `RecognitionAudio` messages. Valid values are: 8000-48000.
454
+ # 16000 is optimal. For best results, set the sampling rate of the audio
455
+ # source to 16000 Hz. If that's not possible, use the native sample rate of
456
+ # the audio source (instead of re-sampling).
457
+ # This field is optional for `FLAC` and `WAV` audio files and required
458
+ # for all other audio formats. For details, see AudioEncoding.
459
+ # Corresponds to the JSON property `sampleRateHertz`
460
+ # @return [Fixnum]
461
+ attr_accessor :sample_rate_hertz
462
+
463
+ # *Optional* array of SpeechContext.
464
+ # A means to provide context to assist the speech recognition. For more
465
+ # information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
466
+ # Corresponds to the JSON property `speechContexts`
467
+ # @return [Array<Google::Apis::SpeechV1p1beta1::SpeechContext>]
468
+ attr_accessor :speech_contexts
469
+
470
+ # *Optional* Set to true to use an enhanced model for speech recognition.
471
+ # If `use_enhanced` is set to true and the `model` field is not set, then
472
+ # an appropriate enhanced model is chosen if:
473
+ # 1. project is eligible for requesting enhanced models
474
+ # 2. an enhanced model exists for the audio
475
+ # If `use_enhanced` is true and an enhanced version of the specified model
476
+ # does not exist, then the speech is recognized using the standard version
477
+ # of the specified model.
478
+ # Enhanced speech models require that you opt-in to data logging using
479
+ # instructions in the
480
+ # [documentation](/speech-to-text/docs/enable-data-logging). If you set
481
+ # `use_enhanced` to true and you have not enabled audio logging, then you
482
+ # will receive an error.
483
+ # Corresponds to the JSON property `useEnhanced`
484
+ # @return [Boolean]
485
+ attr_accessor :use_enhanced
486
+ alias_method :use_enhanced?, :use_enhanced
487
+
488
+ def initialize(**args)
489
+ update!(**args)
490
+ end
491
+
492
+ # Update properties of this object
493
+ def update!(**args)
494
+ @alternative_language_codes = args[:alternative_language_codes] if args.key?(:alternative_language_codes)
495
+ @audio_channel_count = args[:audio_channel_count] if args.key?(:audio_channel_count)
496
+ @diarization_config = args[:diarization_config] if args.key?(:diarization_config)
497
+ @diarization_speaker_count = args[:diarization_speaker_count] if args.key?(:diarization_speaker_count)
498
+ @enable_automatic_punctuation = args[:enable_automatic_punctuation] if args.key?(:enable_automatic_punctuation)
499
+ @enable_separate_recognition_per_channel = args[:enable_separate_recognition_per_channel] if args.key?(:enable_separate_recognition_per_channel)
500
+ @enable_speaker_diarization = args[:enable_speaker_diarization] if args.key?(:enable_speaker_diarization)
501
+ @enable_word_confidence = args[:enable_word_confidence] if args.key?(:enable_word_confidence)
502
+ @enable_word_time_offsets = args[:enable_word_time_offsets] if args.key?(:enable_word_time_offsets)
503
+ @encoding = args[:encoding] if args.key?(:encoding)
504
+ @language_code = args[:language_code] if args.key?(:language_code)
505
+ @max_alternatives = args[:max_alternatives] if args.key?(:max_alternatives)
506
+ @metadata = args[:metadata] if args.key?(:metadata)
507
+ @model = args[:model] if args.key?(:model)
508
+ @profanity_filter = args[:profanity_filter] if args.key?(:profanity_filter)
509
+ @sample_rate_hertz = args[:sample_rate_hertz] if args.key?(:sample_rate_hertz)
510
+ @speech_contexts = args[:speech_contexts] if args.key?(:speech_contexts)
511
+ @use_enhanced = args[:use_enhanced] if args.key?(:use_enhanced)
512
+ end
513
+ end
514
+
515
+ # Description of audio data to be recognized.
516
+ class RecognitionMetadata
517
+ include Google::Apis::Core::Hashable
518
+
519
+ # Description of the content. Eg. "Recordings of federal supreme court
520
+ # hearings from 2012".
521
+ # Corresponds to the JSON property `audioTopic`
522
+ # @return [String]
523
+ attr_accessor :audio_topic
524
+
525
+ # The industry vertical to which this speech recognition request most
526
+ # closely applies. This is most indicative of the topics contained
527
+ # in the audio. Use the 6-digit NAICS code to identify the industry
528
+ # vertical - see https://www.naics.com/search/.
529
+ # Corresponds to the JSON property `industryNaicsCodeOfAudio`
530
+ # @return [Fixnum]
531
+ attr_accessor :industry_naics_code_of_audio
532
+
533
+ # The use case most closely describing the audio content to be recognized.
534
+ # Corresponds to the JSON property `interactionType`
535
+ # @return [String]
536
+ attr_accessor :interaction_type
537
+
538
+ # The audio type that most closely describes the audio being recognized.
539
+ # Corresponds to the JSON property `microphoneDistance`
540
+ # @return [String]
541
+ attr_accessor :microphone_distance
542
+
543
+ # Obfuscated (privacy-protected) ID of the user, to identify number of
544
+ # unique users using the service.
545
+ # Corresponds to the JSON property `obfuscatedId`
546
+ # @return [Fixnum]
547
+ attr_accessor :obfuscated_id
548
+
549
+ # The original media the speech was recorded on.
550
+ # Corresponds to the JSON property `originalMediaType`
551
+ # @return [String]
552
+ attr_accessor :original_media_type
553
+
554
+ # Mime type of the original audio file. For example `audio/m4a`,
555
+ # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
556
+ # A list of possible audio mime types is maintained at
557
+ # http://www.iana.org/assignments/media-types/media-types.xhtml#audio
558
+ # Corresponds to the JSON property `originalMimeType`
559
+ # @return [String]
560
+ attr_accessor :original_mime_type
561
+
562
+ # The device used to make the recording. Examples 'Nexus 5X' or
563
+ # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
564
+ # 'Cardioid Microphone'.
565
+ # Corresponds to the JSON property `recordingDeviceName`
566
+ # @return [String]
567
+ attr_accessor :recording_device_name
568
+
569
+ # The type of device the speech was recorded with.
570
+ # Corresponds to the JSON property `recordingDeviceType`
571
+ # @return [String]
572
+ attr_accessor :recording_device_type
573
+
574
+ def initialize(**args)
575
+ update!(**args)
576
+ end
577
+
578
+ # Update properties of this object
579
+ def update!(**args)
580
+ @audio_topic = args[:audio_topic] if args.key?(:audio_topic)
581
+ @industry_naics_code_of_audio = args[:industry_naics_code_of_audio] if args.key?(:industry_naics_code_of_audio)
582
+ @interaction_type = args[:interaction_type] if args.key?(:interaction_type)
583
+ @microphone_distance = args[:microphone_distance] if args.key?(:microphone_distance)
584
+ @obfuscated_id = args[:obfuscated_id] if args.key?(:obfuscated_id)
585
+ @original_media_type = args[:original_media_type] if args.key?(:original_media_type)
586
+ @original_mime_type = args[:original_mime_type] if args.key?(:original_mime_type)
587
+ @recording_device_name = args[:recording_device_name] if args.key?(:recording_device_name)
588
+ @recording_device_type = args[:recording_device_type] if args.key?(:recording_device_type)
589
+ end
590
+ end
591
+
592
+ # The top-level message sent by the client for the `Recognize` method.
593
+ class RecognizeRequest
594
+ include Google::Apis::Core::Hashable
595
+
596
+ # Contains audio data in the encoding specified in the `RecognitionConfig`.
597
+ # Either `content` or `uri` must be supplied. Supplying both or neither
598
+ # returns google.rpc.Code.INVALID_ARGUMENT. See
599
+ # [content limits](/speech-to-text/quotas#content).
600
+ # Corresponds to the JSON property `audio`
601
+ # @return [Google::Apis::SpeechV1p1beta1::RecognitionAudio]
602
+ attr_accessor :audio
603
+
604
+ # Provides information to the recognizer that specifies how to process the
605
+ # request.
606
+ # Corresponds to the JSON property `config`
607
+ # @return [Google::Apis::SpeechV1p1beta1::RecognitionConfig]
608
+ attr_accessor :config
609
+
610
+ def initialize(**args)
611
+ update!(**args)
612
+ end
613
+
614
+ # Update properties of this object
615
+ def update!(**args)
616
+ @audio = args[:audio] if args.key?(:audio)
617
+ @config = args[:config] if args.key?(:config)
618
+ end
619
+ end
620
+
621
+ # The only message returned to the client by the `Recognize` method. It
622
+ # contains the result as zero or more sequential `SpeechRecognitionResult`
623
+ # messages.
624
+ class RecognizeResponse
625
+ include Google::Apis::Core::Hashable
626
+
627
+ # Output only. Sequential list of transcription results corresponding to
628
+ # sequential portions of audio.
629
+ # Corresponds to the JSON property `results`
630
+ # @return [Array<Google::Apis::SpeechV1p1beta1::SpeechRecognitionResult>]
631
+ attr_accessor :results
632
+
633
+ def initialize(**args)
634
+ update!(**args)
635
+ end
636
+
637
+ # Update properties of this object
638
+ def update!(**args)
639
+ @results = args[:results] if args.key?(:results)
640
+ end
641
+ end
642
+
643
+ #
644
+ class SpeakerDiarizationConfig
645
+ include Google::Apis::Core::Hashable
646
+
647
+ # *Optional* If 'true', enables speaker detection for each recognized word in
648
+ # the top alternative of the recognition result using a speaker_tag provided
649
+ # in the WordInfo.
650
+ # Corresponds to the JSON property `enableSpeakerDiarization`
651
+ # @return [Boolean]
652
+ attr_accessor :enable_speaker_diarization
653
+ alias_method :enable_speaker_diarization?, :enable_speaker_diarization
654
+
655
+ # *Optional* Only used if diarization_speaker_count is not set.
656
+ # Maximum number of speakers in the conversation. This range gives you more
657
+ # flexibility by allowing the system to automatically determine the correct
658
+ # number of speakers. If not set, the default value is 6.
659
+ # Corresponds to the JSON property `maxSpeakerCount`
660
+ # @return [Fixnum]
661
+ attr_accessor :max_speaker_count
662
+
663
+ # *Optional* Only used if diarization_speaker_count is not set.
664
+ # Minimum number of speakers in the conversation. This range gives you more
665
+ # flexibility by allowing the system to automatically determine the correct
666
+ # number of speakers. If not set, the default value is 2.
667
+ # Corresponds to the JSON property `minSpeakerCount`
668
+ # @return [Fixnum]
669
+ attr_accessor :min_speaker_count
670
+
671
+ def initialize(**args)
672
+ update!(**args)
673
+ end
674
+
675
+ # Update properties of this object
676
+ def update!(**args)
677
+ @enable_speaker_diarization = args[:enable_speaker_diarization] if args.key?(:enable_speaker_diarization)
678
+ @max_speaker_count = args[:max_speaker_count] if args.key?(:max_speaker_count)
679
+ @min_speaker_count = args[:min_speaker_count] if args.key?(:min_speaker_count)
680
+ end
681
+ end
682
+
683
+ # Provides "hints" to the speech recognizer to favor specific words and phrases
684
+ # in the results.
685
+ class SpeechContext
686
+ include Google::Apis::Core::Hashable
687
+
688
+ # *Optional* A list of strings containing words and phrases "hints" so that
689
+ # the speech recognition is more likely to recognize them. This can be used
690
+ # to improve the accuracy for specific words and phrases, for example, if
691
+ # specific commands are typically spoken by the user. This can also be used
692
+ # to add additional words to the vocabulary of the recognizer. See
693
+ # [usage limits](/speech-to-text/quotas#content).
694
+ # Corresponds to the JSON property `phrases`
695
+ # @return [Array<String>]
696
+ attr_accessor :phrases
697
+
698
+ def initialize(**args)
699
+ update!(**args)
700
+ end
701
+
702
+ # Update properties of this object
703
+ def update!(**args)
704
+ @phrases = args[:phrases] if args.key?(:phrases)
705
+ end
706
+ end
707
+
708
+ # Alternative hypotheses (a.k.a. n-best list).
709
+ class SpeechRecognitionAlternative
710
+ include Google::Apis::Core::Hashable
711
+
712
+ # Output only. The confidence estimate between 0.0 and 1.0. A higher number
713
+ # indicates an estimated greater likelihood that the recognized words are
714
+ # correct. This field is set only for the top alternative of a non-streaming
715
+ # result or, of a streaming result where `is_final=true`.
716
+ # This field is not guaranteed to be accurate and users should not rely on it
717
+ # to be always provided.
718
+ # The default of 0.0 is a sentinel value indicating `confidence` was not set.
719
+ # Corresponds to the JSON property `confidence`
720
+ # @return [Float]
721
+ attr_accessor :confidence
722
+
723
+ # Output only. Transcript text representing the words that the user spoke.
724
+ # Corresponds to the JSON property `transcript`
725
+ # @return [String]
726
+ attr_accessor :transcript
727
+
728
+ # Output only. A list of word-specific information for each recognized word.
729
+ # Note: When `enable_speaker_diarization` is true, you will see all the words
730
+ # from the beginning of the audio.
731
+ # Corresponds to the JSON property `words`
732
+ # @return [Array<Google::Apis::SpeechV1p1beta1::WordInfo>]
733
+ attr_accessor :words
734
+
735
+ def initialize(**args)
736
+ update!(**args)
737
+ end
738
+
739
+ # Update properties of this object
740
+ def update!(**args)
741
+ @confidence = args[:confidence] if args.key?(:confidence)
742
+ @transcript = args[:transcript] if args.key?(:transcript)
743
+ @words = args[:words] if args.key?(:words)
744
+ end
745
+ end
746
+
747
+ # A speech recognition result corresponding to a portion of the audio.
748
+ class SpeechRecognitionResult
749
+ include Google::Apis::Core::Hashable
750
+
751
+ # Output only. May contain one or more recognition hypotheses (up to the
752
+ # maximum specified in `max_alternatives`).
753
+ # These alternatives are ordered in terms of accuracy, with the top (first)
754
+ # alternative being the most probable, as ranked by the recognizer.
755
+ # Corresponds to the JSON property `alternatives`
756
+ # @return [Array<Google::Apis::SpeechV1p1beta1::SpeechRecognitionAlternative>]
757
+ attr_accessor :alternatives
758
+
759
+ # For multi-channel audio, this is the channel number corresponding to the
760
+ # recognized result for the audio from that channel.
761
+ # For audio_channel_count = N, its output values can range from '1' to 'N'.
762
+ # Corresponds to the JSON property `channelTag`
763
+ # @return [Fixnum]
764
+ attr_accessor :channel_tag
765
+
766
+ # Output only. The
767
+ # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
768
+ # language in this result. This language code was detected to have the most
769
+ # likelihood of being spoken in the audio.
770
+ # Corresponds to the JSON property `languageCode`
771
+ # @return [String]
772
+ attr_accessor :language_code
773
+
774
+ def initialize(**args)
775
+ update!(**args)
776
+ end
777
+
778
+ # Update properties of this object
779
+ def update!(**args)
780
+ @alternatives = args[:alternatives] if args.key?(:alternatives)
781
+ @channel_tag = args[:channel_tag] if args.key?(:channel_tag)
782
+ @language_code = args[:language_code] if args.key?(:language_code)
783
+ end
784
+ end
785
+
786
+ # The `Status` type defines a logical error model that is suitable for different
787
+ # programming environments, including REST APIs and RPC APIs. It is used by
788
+ # [gRPC](https://github.com/grpc). The error model is designed to be:
789
+ # - Simple to use and understand for most users
790
+ # - Flexible enough to meet unexpected needs
791
+ # # Overview
792
+ # The `Status` message contains three pieces of data: error code, error message,
793
+ # and error details. The error code should be an enum value of
794
+ # google.rpc.Code, but it may accept additional error codes if needed. The
795
+ # error message should be a developer-facing English message that helps
796
+ # developers *understand* and *resolve* the error. If a localized user-facing
797
+ # error message is needed, put the localized message in the error details or
798
+ # localize it in the client. The optional error details may contain arbitrary
799
+ # information about the error. There is a predefined set of error detail types
800
+ # in the package `google.rpc` that can be used for common error conditions.
801
+ # # Language mapping
802
+ # The `Status` message is the logical representation of the error model, but it
803
+ # is not necessarily the actual wire format. When the `Status` message is
804
+ # exposed in different client libraries and different wire protocols, it can be
805
+ # mapped differently. For example, it will likely be mapped to some exceptions
806
+ # in Java, but more likely mapped to some error codes in C.
807
+ # # Other uses
808
+ # The error model and the `Status` message can be used in a variety of
809
+ # environments, either with or without APIs, to provide a
810
+ # consistent developer experience across different environments.
811
+ # Example uses of this error model include:
812
+ # - Partial errors. If a service needs to return partial errors to the client,
813
+ # it may embed the `Status` in the normal response to indicate the partial
814
+ # errors.
815
+ # - Workflow errors. A typical workflow has multiple steps. Each step may
816
+ # have a `Status` message for error reporting.
817
+ # - Batch operations. If a client uses batch request and batch response, the
818
+ # `Status` message should be used directly inside batch response, one for
819
+ # each error sub-response.
820
+ # - Asynchronous operations. If an API call embeds asynchronous operation
821
+ # results in its response, the status of those operations should be
822
+ # represented directly using the `Status` message.
823
+ # - Logging. If some API errors are stored in logs, the message `Status` could
824
+ # be used directly after any stripping needed for security/privacy reasons.
825
+ class Status
826
+ include Google::Apis::Core::Hashable
827
+
828
+ # The status code, which should be an enum value of google.rpc.Code.
829
+ # Corresponds to the JSON property `code`
830
+ # @return [Fixnum]
831
+ attr_accessor :code
832
+
833
+ # A list of messages that carry the error details. There is a common set of
834
+ # message types for APIs to use.
835
+ # Corresponds to the JSON property `details`
836
+ # @return [Array<Hash<String,Object>>]
837
+ attr_accessor :details
838
+
839
+ # A developer-facing error message, which should be in English. Any
840
+ # user-facing error message should be localized and sent in the
841
+ # google.rpc.Status.details field, or localized by the client.
842
+ # Corresponds to the JSON property `message`
843
+ # @return [String]
844
+ attr_accessor :message
845
+
846
+ def initialize(**args)
847
+ update!(**args)
848
+ end
849
+
850
+ # Update properties of this object
851
+ def update!(**args)
852
+ @code = args[:code] if args.key?(:code)
853
+ @details = args[:details] if args.key?(:details)
854
+ @message = args[:message] if args.key?(:message)
855
+ end
856
+ end
857
+
858
+ # Word-specific information for recognized words.
859
+ class WordInfo
860
+ include Google::Apis::Core::Hashable
861
+
862
+ # Output only. The confidence estimate between 0.0 and 1.0. A higher number
863
+ # indicates an estimated greater likelihood that the recognized words are
864
+ # correct. This field is set only for the top alternative of a non-streaming
865
+ # result or, of a streaming result where `is_final=true`.
866
+ # This field is not guaranteed to be accurate and users should not rely on it
867
+ # to be always provided.
868
+ # The default of 0.0 is a sentinel value indicating `confidence` was not set.
869
+ # Corresponds to the JSON property `confidence`
870
+ # @return [Float]
871
+ attr_accessor :confidence
872
+
873
+ # Output only. Time offset relative to the beginning of the audio,
874
+ # and corresponding to the end of the spoken word.
875
+ # This field is only set if `enable_word_time_offsets=true` and only
876
+ # in the top hypothesis.
877
+ # This is an experimental feature and the accuracy of the time offset can
878
+ # vary.
879
+ # Corresponds to the JSON property `endTime`
880
+ # @return [String]
881
+ attr_accessor :end_time
882
+
883
+ # Output only. A distinct integer value is assigned for every speaker within
884
+ # the audio. This field specifies which one of those speakers was detected to
885
+ # have spoken this word. Value ranges from '1' to diarization_speaker_count.
886
+ # speaker_tag is set if enable_speaker_diarization = 'true' and only in the
887
+ # top alternative.
888
+ # Corresponds to the JSON property `speakerTag`
889
+ # @return [Fixnum]
890
+ attr_accessor :speaker_tag
891
+
892
+ # Output only. Time offset relative to the beginning of the audio,
893
+ # and corresponding to the start of the spoken word.
894
+ # This field is only set if `enable_word_time_offsets=true` and only
895
+ # in the top hypothesis.
896
+ # This is an experimental feature and the accuracy of the time offset can
897
+ # vary.
898
+ # Corresponds to the JSON property `startTime`
899
+ # @return [String]
900
+ attr_accessor :start_time
901
+
902
+ # Output only. The word corresponding to this set of information.
903
+ # Corresponds to the JSON property `word`
904
+ # @return [String]
905
+ attr_accessor :word
906
+
907
+ def initialize(**args)
908
+ update!(**args)
909
+ end
910
+
911
+ # Update properties of this object
912
+ def update!(**args)
913
+ @confidence = args[:confidence] if args.key?(:confidence)
914
+ @end_time = args[:end_time] if args.key?(:end_time)
915
+ @speaker_tag = args[:speaker_tag] if args.key?(:speaker_tag)
916
+ @start_time = args[:start_time] if args.key?(:start_time)
917
+ @word = args[:word] if args.key?(:word)
918
+ end
919
+ end
920
+ end
921
+ end
922
+ end