gcloud 0.6.3 → 0.7.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (175) hide show
  1. checksums.yaml +8 -8
  2. data/AUTHENTICATION.md +13 -9
  3. data/CHANGELOG.md +8 -3
  4. data/OVERVIEW.md +46 -8
  5. data/lib/gcloud.rb +123 -117
  6. data/lib/gcloud/backoff.rb +43 -15
  7. data/lib/gcloud/bigquery.rb +211 -195
  8. data/lib/gcloud/bigquery/connection.rb +9 -9
  9. data/lib/gcloud/bigquery/copy_job.rb +15 -16
  10. data/lib/gcloud/bigquery/credentials.rb +3 -3
  11. data/lib/gcloud/bigquery/data.rb +12 -11
  12. data/lib/gcloud/bigquery/dataset.rb +162 -216
  13. data/lib/gcloud/bigquery/dataset/access.rb +59 -43
  14. data/lib/gcloud/bigquery/dataset/list.rb +3 -3
  15. data/lib/gcloud/bigquery/errors.rb +9 -5
  16. data/lib/gcloud/bigquery/extract_job.rb +18 -18
  17. data/lib/gcloud/bigquery/insert_response.rb +7 -4
  18. data/lib/gcloud/bigquery/job.rb +48 -44
  19. data/lib/gcloud/bigquery/job/list.rb +3 -3
  20. data/lib/gcloud/bigquery/load_job.rb +24 -25
  21. data/lib/gcloud/bigquery/project.rb +145 -204
  22. data/lib/gcloud/bigquery/query_data.rb +10 -9
  23. data/lib/gcloud/bigquery/query_job.rb +23 -32
  24. data/lib/gcloud/bigquery/table.rb +238 -280
  25. data/lib/gcloud/bigquery/table/list.rb +3 -3
  26. data/lib/gcloud/bigquery/table/schema.rb +79 -87
  27. data/lib/gcloud/bigquery/view.rb +69 -82
  28. data/lib/gcloud/credentials.rb +3 -9
  29. data/lib/gcloud/datastore.rb +194 -170
  30. data/lib/gcloud/datastore/connection.rb +12 -8
  31. data/lib/gcloud/datastore/credentials.rb +6 -4
  32. data/lib/gcloud/datastore/dataset.rb +74 -141
  33. data/lib/gcloud/datastore/dataset/lookup_results.rb +6 -4
  34. data/lib/gcloud/datastore/dataset/query_results.rb +6 -4
  35. data/lib/gcloud/datastore/entity.rb +81 -76
  36. data/lib/gcloud/datastore/errors.rb +10 -8
  37. data/lib/gcloud/datastore/key.rb +41 -77
  38. data/lib/gcloud/datastore/properties.rb +3 -3
  39. data/lib/gcloud/datastore/proto.rb +7 -4
  40. data/lib/gcloud/datastore/query.rb +26 -3
  41. data/lib/gcloud/datastore/transaction.rb +12 -8
  42. data/lib/gcloud/dns.rb +180 -152
  43. data/lib/gcloud/dns/change.rb +16 -16
  44. data/lib/gcloud/dns/change/list.rb +3 -3
  45. data/lib/gcloud/dns/connection.rb +9 -10
  46. data/lib/gcloud/dns/credentials.rb +3 -3
  47. data/lib/gcloud/dns/errors.rb +9 -5
  48. data/lib/gcloud/dns/importer.rb +17 -23
  49. data/lib/gcloud/dns/project.rb +42 -64
  50. data/lib/gcloud/dns/record.rb +58 -46
  51. data/lib/gcloud/dns/record/list.rb +6 -7
  52. data/lib/gcloud/dns/zone.rb +198 -289
  53. data/lib/gcloud/dns/zone/list.rb +3 -3
  54. data/lib/gcloud/dns/zone/transaction.rb +56 -72
  55. data/lib/gcloud/errors.rb +174 -3
  56. data/lib/gcloud/gce.rb +3 -4
  57. data/lib/gcloud/grpc_utils.rb +76 -0
  58. data/lib/gcloud/logging.rb +308 -0
  59. data/lib/gcloud/logging/credentials.rb +29 -0
  60. data/lib/gcloud/logging/entry.rb +303 -0
  61. data/lib/gcloud/logging/entry/http_request.rb +141 -0
  62. data/lib/gcloud/logging/entry/list.rb +111 -0
  63. data/lib/gcloud/logging/entry/operation.rb +90 -0
  64. data/lib/gcloud/logging/logger.rb +307 -0
  65. data/lib/gcloud/logging/metric.rb +175 -0
  66. data/lib/gcloud/logging/metric/list.rb +98 -0
  67. data/lib/gcloud/logging/project.rb +650 -0
  68. data/lib/gcloud/logging/resource.rb +95 -0
  69. data/lib/gcloud/logging/resource_descriptor.rb +140 -0
  70. data/lib/gcloud/logging/resource_descriptor/list.rb +78 -0
  71. data/lib/gcloud/logging/service.rb +258 -0
  72. data/lib/gcloud/logging/sink.rb +233 -0
  73. data/lib/gcloud/logging/sink/list.rb +97 -0
  74. data/lib/gcloud/pubsub.rb +241 -199
  75. data/lib/gcloud/pubsub/credentials.rb +3 -3
  76. data/lib/gcloud/pubsub/message.rb +26 -20
  77. data/lib/gcloud/pubsub/project.rb +166 -233
  78. data/lib/gcloud/pubsub/received_message.rb +28 -38
  79. data/lib/gcloud/pubsub/service.rb +323 -0
  80. data/lib/gcloud/pubsub/subscription.rb +172 -242
  81. data/lib/gcloud/pubsub/subscription/list.rb +11 -9
  82. data/lib/gcloud/pubsub/topic.rb +152 -271
  83. data/lib/gcloud/pubsub/topic/batch.rb +66 -0
  84. data/lib/gcloud/pubsub/topic/list.rb +9 -7
  85. data/lib/gcloud/resource_manager.rb +158 -138
  86. data/lib/gcloud/resource_manager/connection.rb +6 -5
  87. data/lib/gcloud/resource_manager/credentials.rb +3 -3
  88. data/lib/gcloud/resource_manager/errors.rb +9 -5
  89. data/lib/gcloud/resource_manager/manager.rb +54 -86
  90. data/lib/gcloud/resource_manager/project.rb +69 -88
  91. data/lib/gcloud/resource_manager/project/list.rb +4 -5
  92. data/lib/gcloud/resource_manager/project/updater.rb +12 -14
  93. data/lib/gcloud/search.rb +158 -135
  94. data/lib/gcloud/search/api_client.rb +7 -7
  95. data/lib/gcloud/search/connection.rb +8 -8
  96. data/lib/gcloud/search/credentials.rb +3 -3
  97. data/lib/gcloud/search/document.rb +64 -87
  98. data/lib/gcloud/search/document/list.rb +5 -5
  99. data/lib/gcloud/search/errors.rb +9 -5
  100. data/lib/gcloud/search/field_value.rb +32 -38
  101. data/lib/gcloud/search/field_values.rb +50 -80
  102. data/lib/gcloud/search/fields.rb +44 -65
  103. data/lib/gcloud/search/index.rb +163 -204
  104. data/lib/gcloud/search/index/list.rb +5 -5
  105. data/lib/gcloud/search/project.rb +31 -47
  106. data/lib/gcloud/search/result.rb +27 -31
  107. data/lib/gcloud/search/result/list.rb +6 -6
  108. data/lib/gcloud/storage.rb +224 -190
  109. data/lib/gcloud/storage/bucket.rb +202 -227
  110. data/lib/gcloud/storage/bucket/acl.rb +83 -170
  111. data/lib/gcloud/storage/bucket/cors.rb +31 -34
  112. data/lib/gcloud/storage/bucket/list.rb +3 -3
  113. data/lib/gcloud/storage/connection.rb +11 -7
  114. data/lib/gcloud/storage/credentials.rb +3 -3
  115. data/lib/gcloud/storage/errors.rb +11 -8
  116. data/lib/gcloud/storage/file.rb +129 -171
  117. data/lib/gcloud/storage/file/acl.rb +51 -99
  118. data/lib/gcloud/storage/file/list.rb +3 -3
  119. data/lib/gcloud/storage/file/verifier.rb +3 -2
  120. data/lib/gcloud/storage/project.rb +111 -132
  121. data/lib/gcloud/upload.rb +4 -7
  122. data/lib/gcloud/version.rb +2 -4
  123. data/lib/google/api/annotations.rb +14 -0
  124. data/lib/google/api/http.rb +30 -0
  125. data/lib/google/api/label.rb +24 -0
  126. data/lib/google/api/monitored_resource.rb +25 -0
  127. data/lib/google/datastore/v1beta3/datastore.rb +115 -0
  128. data/lib/google/datastore/v1beta3/datastore_services.rb +33 -0
  129. data/lib/google/datastore/v1beta3/entity.rb +63 -0
  130. data/lib/google/datastore/v1beta3/query.rb +128 -0
  131. data/lib/google/devtools/cloudtrace/v1/trace.rb +78 -0
  132. data/lib/google/devtools/cloudtrace/v1/trace_services.rb +32 -0
  133. data/lib/google/example/library/v1/library.rb +91 -0
  134. data/lib/google/example/library/v1/library_services.rb +40 -0
  135. data/lib/google/iam/v1/iam_policy.rb +33 -0
  136. data/lib/google/iam/v1/iam_policy_services.rb +30 -0
  137. data/lib/google/iam/v1/policy.rb +25 -0
  138. data/lib/google/logging/type/http_request.rb +28 -0
  139. data/lib/google/logging/type/log_severity.rb +27 -0
  140. data/lib/google/logging/v2/log_entry.rb +44 -0
  141. data/lib/google/logging/v2/logging.rb +56 -0
  142. data/lib/google/logging/v2/logging_config.rb +59 -0
  143. data/lib/google/logging/v2/logging_config_services.rb +32 -0
  144. data/lib/google/logging/v2/logging_metrics.rb +51 -0
  145. data/lib/google/logging/v2/logging_metrics_services.rb +32 -0
  146. data/lib/google/logging/v2/logging_services.rb +31 -0
  147. data/lib/google/longrunning/operations.rb +50 -0
  148. data/lib/google/longrunning/operations_services.rb +29 -0
  149. data/lib/google/protobuf/any.rb +17 -0
  150. data/lib/google/protobuf/api.rb +31 -0
  151. data/lib/google/protobuf/descriptor.rb +0 -0
  152. data/lib/google/protobuf/duration.rb +17 -0
  153. data/lib/google/protobuf/empty.rb +15 -0
  154. data/lib/google/protobuf/field_mask.rb +16 -0
  155. data/lib/google/protobuf/source_context.rb +16 -0
  156. data/lib/google/protobuf/struct.rb +35 -0
  157. data/lib/google/protobuf/timestamp.rb +17 -0
  158. data/lib/google/protobuf/type.rb +79 -0
  159. data/lib/google/protobuf/wrappers.rb +48 -0
  160. data/lib/google/pubsub/v1/pubsub.rb +129 -0
  161. data/lib/google/pubsub/v1/pubsub_services.rb +56 -0
  162. data/lib/google/pubsub/v1beta2/pubsub.rb +126 -0
  163. data/lib/google/pubsub/v1beta2/pubsub_services.rb +56 -0
  164. data/lib/google/rpc/code.rb +32 -0
  165. data/lib/google/rpc/error_details.rb +61 -0
  166. data/lib/google/rpc/status.rb +19 -0
  167. data/lib/google/type/color.rb +20 -0
  168. data/lib/google/type/date.rb +18 -0
  169. data/lib/google/type/dayofweek.rb +23 -0
  170. data/lib/google/type/latlng.rb +17 -0
  171. data/lib/google/type/money.rb +18 -0
  172. data/lib/google/type/timeofday.rb +19 -0
  173. metadata +101 -4
  174. data/lib/gcloud/pubsub/connection.rb +0 -295
  175. data/lib/gcloud/pubsub/errors.rb +0 -93
@@ -1,4 +1,3 @@
1
- #--
2
1
  # Copyright 2014 Google Inc. All rights reserved.
3
2
  #
4
3
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,8 +12,7 @@
13
12
  # See the License for the specific language governing permissions and
14
13
  # limitations under the License.
15
14
 
16
- #--
17
- # Google Cloud Backoff
15
+
18
16
  module Gcloud
19
17
  ##
20
18
  # Backoff allows users to control how Google API calls are retried.
@@ -25,9 +23,11 @@ module Gcloud
25
23
  # retry will be delayed one second, the second retry will be delayed
26
24
  # two seconds, and so on.
27
25
  #
26
+ # @example
28
27
  # require "gcloud/backoff"
29
28
  #
30
29
  # Gcloud::Backoff.retries = 5 # Set a maximum of five retries per call
30
+ #
31
31
  class Backoff
32
32
  class << self
33
33
  ##
@@ -36,6 +36,12 @@ module Gcloud
36
36
  # The default value is 3.
37
37
  attr_accessor :retries
38
38
 
39
+ ##
40
+ # The GRPC Status Codes that should be retried.
41
+ #
42
+ # The default values are 14.
43
+ attr_accessor :grpc_codes
44
+
39
45
  ##
40
46
  # The HTTP Status Codes that should be retried.
41
47
  #
@@ -59,52 +65,74 @@ module Gcloud
59
65
  end
60
66
  # Set the default values
61
67
  self.retries = 3
68
+ self.grpc_codes = [14]
62
69
  self.http_codes = [500, 503]
63
70
  self.reasons = %w(rateLimitExceeded userRateLimitExceeded)
64
71
  self.backoff = ->(retries) { sleep retries.to_i }
65
72
 
66
73
  ##
74
+ # @private
67
75
  # Creates a new Backoff object to catch common errors when calling
68
76
  # the Google API and handle the error by retrying the call.
69
77
  #
70
- # Gcloud::Backoff.new(options).execute do
78
+ # Gcloud::Backoff.new(options).execute_gapi do
71
79
  # client.execute api_method: service.things.insert,
72
80
  # parameters: { thing: @thing },
73
81
  # body_object: { name: thing_name }
74
82
  # end
75
- def initialize options = {} #:nodoc:
76
- @max_retries = (options[:retries] || Backoff.retries).to_i
77
- @http_codes = (options[:http_codes] || Backoff.http_codes).to_a
78
- @reasons = (options[:reasons] || Backoff.reasons).to_a
79
- @backoff = options[:backoff] || Backoff.backoff
83
+ def initialize options = {}
84
+ @retries = (options[:retries] || Backoff.retries).to_i
85
+ @grpc_codes = (options[:grpc_codes] || Backoff.grpc_codes).to_a
86
+ @http_codes = (options[:http_codes] || Backoff.http_codes).to_a
87
+ @reasons = (options[:reasons] || Backoff.reasons).to_a
88
+ @backoff = options[:backoff] || Backoff.backoff
80
89
  end
81
90
 
82
- def execute #:nodoc:
91
+ # @private
92
+ def execute_gapi
83
93
  current_retries = 0
84
94
  loop do
85
- result = yield # Expecting Google::APIClient::Result
86
- return result if result.success?
87
- break result unless retry? result, current_retries
95
+ result = yield
96
+ return result unless result.is_a? Google::APIClient::Result
97
+ break result if result.success? || !retry?(result, current_retries)
88
98
  current_retries += 1
89
99
  @backoff.call current_retries
90
100
  end
91
101
  end
92
102
 
103
+ # @private
104
+ def execute_grpc
105
+ current_retries = 0
106
+ loop do
107
+ begin
108
+ return yield
109
+ rescue GRPC::BadStatus => e
110
+ raise e unless @grpc_codes.include?(e.code) &&
111
+ (current_retries < @retries)
112
+ current_retries += 1
113
+ @backoff.call current_retries
114
+ end
115
+ end
116
+ end
117
+
93
118
  protected
94
119
 
120
+ # @private
95
121
  def retry? result, current_retries #:nodoc:
96
- if current_retries < @max_retries
122
+ if current_retries < @retries
97
123
  return true if retry_http_code? result
98
124
  return true if retry_error_reason? result
99
125
  end
100
126
  false
101
127
  end
102
128
 
129
+ # @private
103
130
  def retry_http_code? result #:nodoc:
104
131
  @http_codes.include? result.response.status
105
132
  end
106
133
 
107
- def retry_error_reason? result #:nodoc:
134
+ # @private
135
+ def retry_error_reason? result
108
136
  if result.data &&
109
137
  result.data["error"] &&
110
138
  result.data["error"]["errors"]
@@ -12,40 +12,34 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+
15
16
  require "gcloud"
16
17
  require "gcloud/bigquery/project"
17
18
 
18
- #--
19
- # Google Cloud BigQuery
20
19
  module Gcloud
21
20
  ##
22
- # Creates a new +Project+ instance connected to the BigQuery service.
21
+ # Creates a new `Project` instance connected to the BigQuery service.
23
22
  # Each call creates a new connection.
24
23
  #
25
- # === Parameters
24
+ # For more information on connecting to Google Cloud see the [Authentication
25
+ # Guide](https://googlecloudplatform.github.io/gcloud-ruby/#/docs/guides/authentication).
26
26
  #
27
- # +project+::
28
- # Identifier for a BigQuery project. If not present, the default project for
29
- # the credentials is used. (+String+)
30
- # +keyfile+::
31
- # Keyfile downloaded from Google Cloud. If file path the file must be
32
- # readable. (+String+ or +Hash+)
33
- # +scope+::
34
- # The OAuth 2.0 scopes controlling the set of resources and operations that
35
- # the connection can access. See {Using OAuth 2.0 to Access Google
36
- # APIs}[https://developers.google.com/identity/protocols/OAuth2]. (+String+
37
- # or +Array+)
27
+ # @param [String] project Identifier for a BigQuery project. If not present,
28
+ # the default project for the credentials is used.
29
+ # @param [String, Hash] keyfile Keyfile downloaded from Google Cloud. If file
30
+ # path the file must be readable.
31
+ # @param [String, Array<String>] scope The OAuth 2.0 scopes controlling the
32
+ # set of resources and operations that the connection can access. See [Using
33
+ # OAuth 2.0 to Access Google
34
+ # APIs](https://developers.google.com/identity/protocols/OAuth2).
38
35
  #
39
36
  # The default scope is:
40
37
  #
41
- # * +https://www.googleapis.com/auth/bigquery+
42
- #
43
- # === Returns
38
+ # * `https://www.googleapis.com/auth/bigquery`
44
39
  #
45
- # Gcloud::Bigquery::Project
46
- #
47
- # === Example
40
+ # @return [Gcloud::Bigquery::Project]
48
41
  #
42
+ # @example
49
43
  # require "gcloud/bigquery"
50
44
  #
51
45
  # bigquery = Gcloud.bigquery
@@ -63,311 +57,333 @@ module Gcloud
63
57
  end
64
58
 
65
59
  ##
66
- # = Google Cloud BigQuery
60
+ # # Google Cloud BigQuery
67
61
  #
68
62
  # Google Cloud BigQuery enables super-fast, SQL-like queries against massive
69
63
  # datasets, using the processing power of Google's infrastructure. To learn
70
- # more, read {What is
71
- # BigQuery?}[https://cloud.google.com/bigquery/what-is-bigquery].
64
+ # more, read [What is
65
+ # BigQuery?](https://cloud.google.com/bigquery/what-is-bigquery).
72
66
  #
73
67
  # Gcloud's goal is to provide an API that is familiar and comfortable to
74
- # Rubyists. Authentication is handled by Gcloud#bigquery. You can provide
68
+ # Rubyists. Authentication is handled by {Gcloud#bigquery}. You can provide
75
69
  # the project and credential information to connect to the BigQuery service,
76
70
  # or if you are running on Google Compute Engine this configuration is taken
77
71
  # care of for you. You can read more about the options for connecting in the
78
- # {Authentication Guide}[link:AUTHENTICATION.md].
72
+ # [Authentication
73
+ # Guide](https://googlecloudplatform.github.io/gcloud-ruby/#/docs/guides/authentication).
79
74
  #
80
75
  # To help you get started quickly, the first few examples below use a public
81
- # dataset provided by Google. As soon as you have {signed
82
- # up}[https://cloud.google.com/bigquery/sign-up] to use BigQuery, and provided
76
+ # dataset provided by Google. As soon as you have [signed
77
+ # up](https://cloud.google.com/bigquery/sign-up) to use BigQuery, and provided
83
78
  # that you stay in the free tier for queries, you should be able to run these
84
79
  # first examples without the need to set up billing or to load data (although
85
80
  # we'll show you how to do that too.)
86
81
  #
87
- # == Listing Datasets and Tables
82
+ # ## Listing Datasets and Tables
88
83
  #
89
84
  # A BigQuery project holds datasets, which in turn hold tables. Assuming that
90
85
  # you have not yet created datasets or tables in your own project, let's
91
- # connect to Google's +publicdata+ project, and see what you find.
86
+ # connect to Google's `publicdata` project, and see what you find.
92
87
  #
93
- # require "gcloud"
88
+ # ```ruby
89
+ # require "gcloud"
94
90
  #
95
- # gcloud = Gcloud.new "publicdata"
96
- # bigquery = gcloud.bigquery
91
+ # gcloud = Gcloud.new "publicdata"
92
+ # bigquery = gcloud.bigquery
97
93
  #
98
- # bigquery.datasets.count #=> 1
99
- # bigquery.datasets.first.dataset_id #=> "samples"
94
+ # bigquery.datasets.count #=> 1
95
+ # bigquery.datasets.first.dataset_id #=> "samples"
100
96
  #
101
- # dataset = bigquery.datasets.first
102
- # tables = dataset.tables
97
+ # dataset = bigquery.datasets.first
98
+ # tables = dataset.tables
103
99
  #
104
- # tables.count #=> 7
105
- # tables.map &:table_id #=> [..., "shakespeare", "trigrams", "wikipedia"]
100
+ # tables.count #=> 7
101
+ # tables.map &:table_id #=> [..., "shakespeare", "trigrams", "wikipedia"]
102
+ # ```
106
103
  #
107
104
  # In addition listing all datasets and tables in the project, you can also
108
105
  # retrieve individual datasets and tables by ID. Let's look at the structure
109
- # of the +shakespeare+ table, which contains an entry for every word in every
106
+ # of the `shakespeare` table, which contains an entry for every word in every
110
107
  # play written by Shakespeare.
111
108
  #
112
- # require "gcloud"
109
+ # ```ruby
110
+ # require "gcloud"
113
111
  #
114
- # gcloud = Gcloud.new "publicdata"
115
- # bigquery = gcloud.bigquery
112
+ # gcloud = Gcloud.new "publicdata"
113
+ # bigquery = gcloud.bigquery
116
114
  #
117
- # dataset = bigquery.dataset "samples"
118
- # table = dataset.table "shakespeare"
115
+ # dataset = bigquery.dataset "samples"
116
+ # table = dataset.table "shakespeare"
119
117
  #
120
- # table.headers #=> ["word", "word_count", "corpus", "corpus_date"]
121
- # table.rows_count #=> 164656
118
+ # table.headers #=> ["word", "word_count", "corpus", "corpus_date"]
119
+ # table.rows_count #=> 164656
120
+ # ```
122
121
  #
123
122
  # Now that you know the column names for the Shakespeare table, you can write
124
123
  # and run a query.
125
124
  #
126
- # == Running queries
125
+ # ## Running queries
127
126
  #
128
127
  # BigQuery offers both synchronous and asynchronous methods, as explained in
129
- # {Querying Data}[https://cloud.google.com/bigquery/querying-data].
128
+ # [Querying Data](https://cloud.google.com/bigquery/querying-data).
130
129
  #
131
- # === Synchronous queries
130
+ # ### Synchronous queries
132
131
  #
133
132
  # Let's start with the simpler synchronous approach. Notice that this time you
134
133
  # are connecting using your own default project. This is necessary for running
135
134
  # a query, since queries need to be able to create tables to hold results.
136
135
  #
137
- # require "gcloud"
136
+ # ```ruby
137
+ # require "gcloud"
138
138
  #
139
- # gcloud = Gcloud.new
140
- # bigquery = gcloud.bigquery
139
+ # gcloud = Gcloud.new
140
+ # bigquery = gcloud.bigquery
141
141
  #
142
- # sql = "SELECT TOP(word, 50) as word, COUNT(*) as count " +
143
- # "FROM publicdata:samples.shakespeare"
144
- # data = bigquery.query sql
142
+ # sql = "SELECT TOP(word, 50) as word, COUNT(*) as count " +
143
+ # "FROM publicdata:samples.shakespeare"
144
+ # data = bigquery.query sql
145
145
  #
146
- # data.count #=> 50
147
- # data.next? #=> false
148
- # data.first #=> {"word"=>"you", "count"=>42}
146
+ # data.count #=> 50
147
+ # data.next? #=> false
148
+ # data.first #=> {"word"=>"you", "count"=>42}
149
+ # ```
149
150
  #
150
- # The +TOP+ function shown above is just one of a variety of functions
151
- # offered by BigQuery. See the {Query
152
- # Reference}[https://cloud.google.com/bigquery/query-reference] for a full
151
+ # The `TOP` function shown above is just one of a variety of functions
152
+ # offered by BigQuery. See the [Query
153
+ # Reference](https://cloud.google.com/bigquery/query-reference) for a full
153
154
  # listing.
154
155
  #
155
- # === Asynchronous queries
156
+ # ### Asynchronous queries
156
157
  #
157
158
  # Because you probably should not block for most BigQuery operations,
158
159
  # including querying as well as importing, exporting, and copying data, the
159
160
  # BigQuery API enables you to manage longer-running jobs. In the asynchronous
160
- # approach to running a query, an instance of Gcloud::Bigquery::QueryJob is
161
- # returned, rather than an instance of Gcloud::Bigquery::QueryData.
161
+ # approach to running a query, an instance of {Gcloud::Bigquery::QueryJob} is
162
+ # returned, rather than an instance of {Gcloud::Bigquery::QueryData}.
162
163
  #
163
- # require "gcloud"
164
+ # ```ruby
165
+ # require "gcloud"
164
166
  #
165
- # gcloud = Gcloud.new
166
- # bigquery = gcloud.bigquery
167
+ # gcloud = Gcloud.new
168
+ # bigquery = gcloud.bigquery
167
169
  #
168
- # sql = "SELECT TOP(word, 50) as word, COUNT(*) as count " +
169
- # "FROM publicdata:samples.shakespeare"
170
- # job = bigquery.query_job sql
170
+ # sql = "SELECT TOP(word, 50) as word, COUNT(*) as count " +
171
+ # "FROM publicdata:samples.shakespeare"
172
+ # job = bigquery.query_job sql
171
173
  #
172
- # job.wait_until_done!
173
- # if !job.failed?
174
- # job.query_results.each do |row|
175
- # puts row["word"]
176
- # end
174
+ # job.wait_until_done!
175
+ # if !job.failed?
176
+ # job.query_results.each do |row|
177
+ # puts row["word"]
177
178
  # end
179
+ # end
180
+ # ```
178
181
  #
179
182
  # Once you have determined that the job is done and has not failed, you can
180
- # obtain an instance of Gcloud::Bigquery::QueryData by calling
181
- # Gcloud::Bigquery::QueryJob#query_results. The query results for both of
183
+ # obtain an instance of {Gcloud::Bigquery::QueryData} by calling
184
+ # {Gcloud::Bigquery::QueryJob#query_results}. The query results for both of
182
185
  # the above examples are stored in temporary tables with a lifetime of about
183
186
  # 24 hours. See the final example below for a demonstration of how to store
184
187
  # query results in a permanent table.
185
188
  #
186
- # == Creating Datasets and Tables
189
+ # ## Creating Datasets and Tables
187
190
  #
188
191
  # The first thing you need to do in a new BigQuery project is to create a
189
- # Gcloud::Bigquery::Dataset. Datasets hold tables and control access to them.
192
+ # {Gcloud::Bigquery::Dataset}. Datasets hold tables and control access to
193
+ # them.
190
194
  #
191
- # require "gcloud/bigquery"
195
+ # ```ruby
196
+ # require "gcloud/bigquery"
192
197
  #
193
- # gcloud = Gcloud.new
194
- # bigquery = gcloud.bigquery
195
- # dataset = bigquery.create_dataset "my_dataset"
198
+ # gcloud = Gcloud.new
199
+ # bigquery = gcloud.bigquery
200
+ # dataset = bigquery.create_dataset "my_dataset"
201
+ # ```
196
202
  #
197
203
  # Now that you have a dataset, you can use it to create a table. Every table
198
204
  # is defined by a schema that may contain nested and repeated fields. The
199
205
  # example below shows a schema with a repeated record field named
200
- # +cities_lived+. (For more information about nested and repeated fields, see
201
- # {Preparing Data for
202
- # BigQuery}[https://cloud.google.com/bigquery/preparing-data-for-bigquery].)
203
- #
204
- # require "gcloud"
205
- #
206
- # gcloud = Gcloud.new
207
- # bigquery = gcloud.bigquery
208
- # dataset = bigquery.dataset "my_dataset"
209
- #
210
- # table = dataset.create_table "people" do |schema|
211
- # schema.string "first_name", mode: :required
212
- # schema.record "cities_lived", mode: :repeated do |nested_schema|
213
- # nested_schema.string "place", mode: :required
214
- # nested_schema.integer "number_of_years", mode: :required
215
- # end
206
+ # `cities_lived`. (For more information about nested and repeated fields, see
207
+ # [Preparing Data for
208
+ # BigQuery](https://cloud.google.com/bigquery/preparing-data-for-bigquery).)
209
+ #
210
+ # ```ruby
211
+ # require "gcloud"
212
+ #
213
+ # gcloud = Gcloud.new
214
+ # bigquery = gcloud.bigquery
215
+ # dataset = bigquery.dataset "my_dataset"
216
+ #
217
+ # table = dataset.create_table "people" do |schema|
218
+ # schema.string "first_name", mode: :required
219
+ # schema.record "cities_lived", mode: :repeated do |nested_schema|
220
+ # nested_schema.string "place", mode: :required
221
+ # nested_schema.integer "number_of_years", mode: :required
216
222
  # end
223
+ # end
224
+ # ```
217
225
  #
218
226
  # Because of the repeated field in this schema, we cannot use the CSV format
219
227
  # to load data into the table.
220
228
  #
221
- # == Loading records
229
+ # ## Loading records
222
230
  #
223
231
  # In addition to CSV, data can be imported from files that are formatted as
224
- # {Newline-delimited JSON}[http://jsonlines.org/] or
225
- # {Avro}[http://avro.apache.org/], or from a Google Cloud Datastore backup. It
232
+ # [Newline-delimited JSON](http://jsonlines.org/) or
233
+ # [Avro](http://avro.apache.org/), or from a Google Cloud Datastore backup. It
226
234
  # can also be "streamed" into BigQuery.
227
235
  #
228
236
  # To follow along with these examples, you will need to set up billing on the
229
- # {Google Developers Console}[https://console.developers.google.com].
237
+ # [Google Developers Console](https://console.developers.google.com).
230
238
  #
231
- # === Streaming records
239
+ # ### Streaming records
232
240
  #
233
241
  # For situations in which you want new data to be available for querying as
234
242
  # soon as possible, inserting individual records directly from your Ruby
235
243
  # application is a great approach.
236
244
  #
237
- # require "gcloud"
238
- #
239
- # gcloud = Gcloud.new
240
- # bigquery = gcloud.bigquery
241
- # dataset = bigquery.dataset "my_dataset"
242
- # table = dataset.table "people"
243
- #
244
- # rows = [
245
- # {
246
- # "first_name" => "Anna",
247
- # "cities_lived" => [
248
- # {
249
- # "place" => "Stockholm",
250
- # "number_of_years" => 2
251
- # }
252
- # ]
253
- # },
254
- # {
255
- # "first_name" => "Bob",
256
- # "cities_lived" => [
257
- # {
258
- # "place" => "Seattle",
259
- # "number_of_years" => 5
260
- # },
261
- # {
262
- # "place" => "Austin",
263
- # "number_of_years" => 6
264
- # }
265
- # ]
266
- # }
267
- # ]
268
- # table.insert rows
245
+ # ```ruby
246
+ # require "gcloud"
247
+ #
248
+ # gcloud = Gcloud.new
249
+ # bigquery = gcloud.bigquery
250
+ # dataset = bigquery.dataset "my_dataset"
251
+ # table = dataset.table "people"
252
+ #
253
+ # rows = [
254
+ # {
255
+ # "first_name" => "Anna",
256
+ # "cities_lived" => [
257
+ # {
258
+ # "place" => "Stockholm",
259
+ # "number_of_years" => 2
260
+ # }
261
+ # ]
262
+ # },
263
+ # {
264
+ # "first_name" => "Bob",
265
+ # "cities_lived" => [
266
+ # {
267
+ # "place" => "Seattle",
268
+ # "number_of_years" => 5
269
+ # },
270
+ # {
271
+ # "place" => "Austin",
272
+ # "number_of_years" => 6
273
+ # }
274
+ # ]
275
+ # }
276
+ # ]
277
+ # table.insert rows
278
+ # ```
269
279
  #
270
280
  # There are some trade-offs involved with streaming, so be sure to read the
271
- # discussion of data consistency in {Streaming Data Into
272
- # BigQuery}[https://cloud.google.com/bigquery/streaming-data-into-bigquery].
281
+ # discussion of data consistency in [Streaming Data Into
282
+ # BigQuery](https://cloud.google.com/bigquery/streaming-data-into-bigquery).
273
283
  #
274
- # === Uploading a file
284
+ # ### Uploading a file
275
285
  #
276
286
  # To follow along with this example, please download the
277
- # {names.zip}[http://www.ssa.gov/OACT/babynames/names.zip] archive from the
287
+ # [names.zip](http://www.ssa.gov/OACT/babynames/names.zip) archive from the
278
288
  # U.S. Social Security Administration. Inside the archive you will find over
279
289
  # 100 files containing baby name records since the year 1880. A PDF file also
280
290
  # contained in the archive specifies the schema used below.
281
291
  #
282
- # require "gcloud"
292
+ # ```ruby
293
+ # require "gcloud"
283
294
  #
284
- # gcloud = Gcloud.new
285
- # bigquery = gcloud.bigquery
286
- # dataset = bigquery.dataset "my_dataset"
287
- # table = dataset.create_table "baby_names" do |schema|
288
- # schema.string "name", mode: :required
289
- # schema.string "sex", mode: :required
290
- # schema.integer "number", mode: :required
291
- # end
295
+ # gcloud = Gcloud.new
296
+ # bigquery = gcloud.bigquery
297
+ # dataset = bigquery.dataset "my_dataset"
298
+ # table = dataset.create_table "baby_names" do |schema|
299
+ # schema.string "name", mode: :required
300
+ # schema.string "sex", mode: :required
301
+ # schema.integer "number", mode: :required
302
+ # end
292
303
  #
293
- # file = File.open "names/yob2014.txt"
294
- # load_job = table.load file, format: "csv"
304
+ # file = File.open "names/yob2014.txt"
305
+ # load_job = table.load file, format: "csv"
306
+ # ```
295
307
  #
296
308
  # Because the names data, although formatted as CSV, is distributed in files
297
- # with a +.txt+ extension, this example explicitly passes the +format+ option
309
+ # with a `.txt` extension, this example explicitly passes the `format` option
298
310
  # in order to demonstrate how to handle such situations. Because CSV is the
299
311
  # default format for load operations, the option is not actually necessary.
300
- # For JSON saved with a +.txt+ extension, however, it would be.
312
+ # For JSON saved with a `.txt` extension, however, it would be.
301
313
  #
302
- # === A note about large uploads
314
+ # ### A note about large uploads
303
315
  #
304
- # You may encounter a Broken pipe (Errno::EPIPE) error when attempting to
316
+ # You may encounter a Broken pipe (`Errno::EPIPE`) error when attempting to
305
317
  # upload large files. To avoid this problem, add the
306
- # {httpclient}[https://rubygems.org/gems/httpclient] gem to your project, and
318
+ # [httpclient](https://rubygems.org/gems/httpclient) gem to your project, and
307
319
  # the line (or lines) of configuration shown below. These lines must execute
308
320
  # after you require gcloud but before you make your first gcloud connection.
309
- # The first statement configures {Faraday}[https://rubygems.org/gems/faraday]
321
+ # The first statement configures [Faraday](https://rubygems.org/gems/faraday)
310
322
  # to use httpclient. The second statement, which should only be added if you
311
- # are using a version of Faraday at or above 0.9.2, is a workaround for {this
312
- # gzip issue}[https://github.com/GoogleCloudPlatform/gcloud-ruby/issues/367].
323
+ # are using a version of Faraday at or above 0.9.2, is a workaround for [this
324
+ # gzip issue](https://github.com/GoogleCloudPlatform/gcloud-ruby/issues/367).
313
325
  #
314
- # require "gcloud"
326
+ # ```ruby
327
+ # require "gcloud"
315
328
  #
316
- # # Use httpclient to avoid broken pipe errors with large uploads
317
- # Faraday.default_adapter = :httpclient
329
+ # # Use httpclient to avoid broken pipe errors with large uploads
330
+ # Faraday.default_adapter = :httpclient
318
331
  #
319
- # # Only add the following statement if using Faraday >= 0.9.2
320
- # # Override gzip middleware with no-op for httpclient
321
- # Faraday::Response.register_middleware :gzip =>
322
- # Faraday::Response::Middleware
332
+ # # Only add the following statement if using Faraday >= 0.9.2
333
+ # # Override gzip middleware with no-op for httpclient
334
+ # Faraday::Response.register_middleware :gzip =>
335
+ # Faraday::Response::Middleware
323
336
  #
324
- # gcloud = Gcloud.new
325
- # bigquery = gcloud.bigquery
337
+ # gcloud = Gcloud.new
338
+ # bigquery = gcloud.bigquery
339
+ # ```
326
340
  #
327
- # == Exporting query results to Google Cloud Storage
341
+ # ## Exporting query results to Google Cloud Storage
328
342
  #
329
- # The example below shows how to pass the +table+ option with a query in order
343
+ # The example below shows how to pass the `table` option with a query in order
330
344
  # to store results in a permanent table. It also shows how to export the
331
345
  # result data to a Google Cloud Storage file. In order to follow along, you
332
346
  # will need to enable the Google Cloud Storage API in addition to setting up
333
347
  # billing.
334
348
  #
335
- # require "gcloud"
349
+ # ```ruby
350
+ # require "gcloud"
336
351
  #
337
- # gcloud = Gcloud.new
338
- # bigquery = gcloud.bigquery
339
- # dataset = bigquery.dataset "my_dataset"
340
- # source_table = dataset.table "baby_names"
341
- # result_table = dataset.create_table "baby_names_results"
352
+ # gcloud = Gcloud.new
353
+ # bigquery = gcloud.bigquery
354
+ # dataset = bigquery.dataset "my_dataset"
355
+ # source_table = dataset.table "baby_names"
356
+ # result_table = dataset.create_table "baby_names_results"
342
357
  #
343
- # sql = "SELECT name, number as count " +
344
- # "FROM baby_names " +
345
- # "WHERE name CONTAINS 'Sam' " +
346
- # "ORDER BY count DESC"
347
- # query_job = dataset.query_job sql, table: result_table
358
+ # sql = "SELECT name, number as count " +
359
+ # "FROM baby_names " +
360
+ # "WHERE name CONTAINS 'Sam' " +
361
+ # "ORDER BY count DESC"
362
+ # query_job = dataset.query_job sql, table: result_table
348
363
  #
349
- # query_job.wait_until_done!
364
+ # query_job.wait_until_done!
350
365
  #
351
- # if !query_job.failed?
366
+ # if !query_job.failed?
352
367
  #
353
- # storage = gcloud.storage
354
- # bucket_id = "bigquery-exports-#{SecureRandom.uuid}"
355
- # bucket = storage.create_bucket bucket_id
356
- # extract_url = "gs://#{bucket.id}/baby-names-sam.csv"
368
+ # storage = gcloud.storage
369
+ # bucket_id = "bigquery-exports-#{SecureRandom.uuid}"
370
+ # bucket = storage.create_bucket bucket_id
371
+ # extract_url = "gs://#{bucket.id}/baby-names-sam.csv"
357
372
  #
358
- # extract_job = result_table.extract extract_url
373
+ # extract_job = result_table.extract extract_url
359
374
  #
360
- # extract_job.wait_until_done!
375
+ # extract_job.wait_until_done!
361
376
  #
362
- # # Download to local filesystem
363
- # bucket.files.first.download "baby-names-sam.csv"
377
+ # # Download to local filesystem
378
+ # bucket.files.first.download "baby-names-sam.csv"
364
379
  #
365
- # end
380
+ # end
381
+ # ```
366
382
  #
367
383
  # If a table you wish to export contains a large amount of data, you can pass
368
384
  # a wildcard URI to export to multiple files (for sharding), or an array of
369
- # URIs (for partitioning), or both. See {Exporting Data From
370
- # BigQuery}[https://cloud.google.com/bigquery/exporting-data-from-bigquery]
385
+ # URIs (for partitioning), or both. See [Exporting Data From
386
+ # BigQuery](https://cloud.google.com/bigquery/exporting-data-from-bigquery)
371
387
  # for details.
372
388
  #
373
389
  module Bigquery