presto-client 0.3.3 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,15 +1,15 @@
1
1
  ---
2
2
  !binary "U0hBMQ==":
3
3
  metadata.gz: !binary |-
4
- NDc4YmQ2ZDQ1OTg4ZWJiYTJlMDgzMDIzNThmMDJmMzUxZjE2YzlhZg==
4
+ YjkzMDA1ZjE0N2U5NjQ2ODUwOTg4NDhkMTk3YTEwODcwZDNlYzE3Ng==
5
5
  data.tar.gz: !binary |-
6
- MmIzYmIzOWVmNzRjOTc5ZDRiYTFiZjY5NTNlNjgwMjc1YWRlODBhMA==
6
+ ZDhkMzczNzM0YWE2OGFmZWYwZjAyZjM2Yzk0NGUzZDdlMzRlZGU1Yw==
7
7
  SHA512:
8
8
  metadata.gz: !binary |-
9
- MjAyYTBlNzYzODMwMzJlNWU2OTI2ZjQ4MzM5ZDhlNWRhNjNmMWY0YWFiYWU1
10
- YzkyMzg5NTUwNjlkZDY3Nzg0MTdlMGEwZTE1YzIxZDNiZmE4MjI5NzkxMmZj
11
- ZDRhYThjNDgwMDk5YzMyNDA5ODQ1MjFjMTYzNjY2MjUxM2Y5YWQ=
9
+ ODczY2ZmNmU2YjU4ZjhiNzBhYWE1MDIxMTNhYjIyMDFlYTkxMzYwYWQwOTRj
10
+ NzI1YmE4MDRjNDVkMjIzN2FkMzVkYTFhZTdjZDU5YWQwMzEzZjA0ZTgwOGY4
11
+ MmYyNzdmODY5ZmNjZGIzY2Y2ZTIzOTgxZDk1ZTliMjc1YWE4OGQ=
12
12
  data.tar.gz: !binary |-
13
- N2Q5NmI2N2ExOTEwMTI0OWFjM2U5Njc0NjlmNjRkNDhkOWYyYTY4OWU5YWFm
14
- MDJmMTc4MzMwNmFjNDQ2YWMyYjI5YTI2NDVhZTc5ODFkYWEzNzYyNzE3NTE1
15
- YjkyYjVmNTczNWM0OWFjYWQ0ZGY0NDVhM2QzM2QwMGVmMWJmMTk=
13
+ YzM1MmIwODUyOWIyZTU1MzM1MmUyNGNhMGE4NzYwZTQzYzY5MjBlNDAxYjdh
14
+ MWRjMWQyODBhZTRmZWIwN2I0NjUzOTI3MjAxYmMxZjk2YjA1YmJiMDgzOTY1
15
+ Zjk3NGMwZjRiNzg3NTY5NmRiOWRhYmQ5N2UwMTc5MDYwNzUzMmI=
data/README.md CHANGED
@@ -44,9 +44,11 @@ end
44
44
 
45
45
  * **server** sets address[:port] to a Presto coordinator
46
46
  * **catalog** sets catalog (connector) name of Presto such as `hive-cdh4`, `hive-hadoop1`, etc.
47
- * **schema** sets default schema name of Presto. You can read other schemas by qualified name like `FROM myschema.table1`
47
+ * **schema** sets default schema name of Presto. You can read other schemas by qualified name like `FROM myschema.table1`.
48
48
  * **source** sets source name to connect to a Presto. This name is shown on Presto web interface.
49
49
  * **user** sets user name to connect to a Presto.
50
+ * **time_zone** sets time zone of the query. Time zone affects some functions such as `format_datetime`.
51
+ * **language** sets language of the query. Language affects some functions such as `format_datetime`.
50
52
  * **http_debug** enables debug message to STDOUT for each HTTP requests
51
53
  * **http_open_timeout** sets timeout in seconds to open new HTTP connection
52
54
  * **http_timeout** sets timeout in seconds to read data from a server
data/Rakefile CHANGED
@@ -11,3 +11,16 @@ RSpec::Core::RakeTask.new(:spec) do |t|
11
11
  end
12
12
 
13
13
  task :default => [:spec, :build]
14
+
15
+ GEN_MODELS_VERSION = "0.69"
16
+
17
+ task :modelgen do
18
+ unless Dir.exists?("presto-#{GEN_MODELS_VERSION}")
19
+ sh "curl -L -o presto-#{GEN_MODELS_VERSION}.tar.gz https://github.com/facebook/presto/archive/#{GEN_MODELS_VERSION}.tar.gz"
20
+ sh "tar zxvf presto-#{GEN_MODELS_VERSION}.tar.gz"
21
+ end
22
+
23
+ sh "#{RbConfig.ruby} modelgen/modelgen.rb presto-#{GEN_MODELS_VERSION} modelgen/models.rb lib/presto/client/models.rb"
24
+ puts "Generated lib/presto/client/models.rb."
25
+ end
26
+
@@ -15,231 +15,934 @@
15
15
  #
16
16
  module Presto::Client
17
17
 
18
- class Column
19
- attr_reader :name
20
- attr_reader :type
18
+ ####
19
+ ## lib/presto/client/models.rb is automatically generated using "rake modelgen" command.
20
+ ## You should not edit this file directly. To modify the class definitions, edit
21
+ ## modelgen/models.rb file and run "rake modelgen".
22
+ ##
21
23
 
22
- def initialize(options={})
23
- @name = options[:name]
24
- @type = options[:type]
24
+ module Models
25
+ class Base < Struct
26
+ class << self
27
+ alias_method :new_struct, :new
28
+
29
+ def new(*args)
30
+ new_struct(*args) do
31
+ # make it immutable
32
+ undef_method :"[]="
33
+ members.each do |m|
34
+ undef_method :"#{m}="
35
+ end
36
+
37
+ # replace constructor to receive hash instead of array
38
+ alias_method :initialize_struct, :initialize
39
+
40
+ def initialize(params={})
41
+ initialize_struct(*members.map {|m| params[m] })
42
+ end
43
+ end
44
+ end
45
+ end
25
46
  end
26
47
 
27
- def self.decode_hash(hash)
28
- new(
29
- name: hash["name"],
30
- type: hash["type"],
31
- )
48
+ class QueryId < String
32
49
  end
33
- end
34
50
 
35
- class StageStats
36
- attr_reader :stage_id
37
- attr_reader :state
38
- attr_reader :done
39
- attr_reader :nodes
40
- attr_reader :total_splits
41
- attr_reader :queued_splits
42
- attr_reader :running_splits
43
- attr_reader :completed_splits
44
- attr_reader :user_time_millis
45
- attr_reader :cpu_time_millis
46
- attr_reader :wall_time_millis
47
- attr_reader :processed_rows
48
- attr_reader :processed_bytes
49
- attr_reader :sub_stages
50
-
51
- def initialize(options={})
52
- @stage_id = options[:stage_id]
53
- @state = options[:state]
54
- @done = options[:done]
55
- @nodes = options[:nodes]
56
- @total_splits = options[:total_splits]
57
- @queued_splits = options[:queued_splits]
58
- @running_splits = options[:running_splits]
59
- @completed_splits = options[:completed_splits]
60
- @user_time_millis = options[:user_time_millis]
61
- @cpu_time_millis = options[:cpu_time_millis]
62
- @wall_time_millis = options[:wall_time_millis]
63
- @processed_rows = options[:processed_rows]
64
- @processed_bytes = options[:processed_bytes]
65
- @sub_stages = options[:sub_stages]
66
- end
67
-
68
- def self.decode_hash(hash)
69
- new(
70
- stage_id: hash["stageId"],
71
- state: hash["state"],
72
- done: hash["done"],
73
- nodes: hash["nodes"],
74
- total_splits: hash["totalSplits"],
75
- queued_splits: hash["queuedSplits"],
76
- running_splits: hash["runningSplits"],
77
- completed_splits: hash["completedSplits"],
78
- user_time_millis: hash["userTimeMillis"],
79
- cpu_time_millis: hash["cpuTimeMillis"],
80
- wall_time_millis: hash["wallTimeMillis"],
81
- processed_rows: hash["processedRows"],
82
- processed_bytes: hash["processedBytes"],
83
- sub_stages: hash["subStages"] && hash["subStages"].map {|h| StageStats.decode_hash(h) },
84
- )
51
+ class StageId < String
85
52
  end
86
- end
87
53
 
88
- class StatementStats
89
- attr_reader :state
90
- attr_reader :scheduled
91
- attr_reader :nodes
92
- attr_reader :total_splits
93
- attr_reader :queued_splits
94
- attr_reader :running_splits
95
- attr_reader :completed_splits
96
- attr_reader :user_time_millis
97
- attr_reader :cpu_time_millis
98
- attr_reader :wall_time_millis
99
- attr_reader :processed_rows
100
- attr_reader :processed_bytes
101
- attr_reader :root_stage
102
-
103
- def initialize(options={})
104
- @state = options[:state]
105
- @scheduled = options[:scheduled]
106
- @nodes = options[:nodes]
107
- @total_splits = options[:total_splits]
108
- @queued_splits = options[:queued_splits]
109
- @running_splits = options[:running_splits]
110
- @completed_splits = options[:completed_splits]
111
- @user_time_millis = options[:user_time_millis]
112
- @cpu_time_millis = options[:cpu_time_millis]
113
- @wall_time_millis = options[:wall_time_millis]
114
- @processed_rows = options[:processed_rows]
115
- @processed_bytes = options[:processed_bytes]
116
- @root_stage = options[:root_stage]
117
- end
118
-
119
- def self.decode_hash(hash)
120
- new(
121
- state: hash["state"],
122
- scheduled: hash["scheduled"],
123
- nodes: hash["nodes"],
124
- total_splits: hash["totalSplits"],
125
- queued_splits: hash["queuedSplits"],
126
- running_splits: hash["runningSplits"],
127
- completed_splits: hash["completedSplits"],
128
- user_time_millis: hash["userTimeMillis"],
129
- cpu_time_millis: hash["cpuTimeMillis"],
130
- wall_time_millis: hash["wallTimeMillis"],
131
- processed_rows: hash["processedRows"],
132
- processed_bytes: hash["processedBytes"],
133
- root_stage: hash["rootStage"] && StageStats.decode_hash(hash["rootStage"]),
134
- )
54
+ class TaskId < String
135
55
  end
136
- end
137
56
 
138
- class ErrorLocation
139
- attr_reader :line_number
140
- attr_reader :column_number
57
+ class PlanNodeId < String
58
+ end
141
59
 
142
- def initialize(options={})
143
- @line_number = options[:line_number]
144
- @column_number = options[:column_number]
60
+ class PlanFragmentId < String
145
61
  end
146
62
 
147
- def self.decode_hash(hash)
148
- new(
149
- line_number: hash["lineNumber"],
150
- column_number: hash["columnNumber"],
151
- )
63
+ class ConnectorSession < Hash
64
+ def initialize(hash)
65
+ super()
66
+ merge!(hash)
67
+ end
152
68
  end
153
- end
154
69
 
155
- class FailureInfo
156
- attr_reader :type
157
- attr_reader :message
158
- attr_reader :cause
159
- attr_reader :suppressed
160
- attr_reader :stack
161
- attr_reader :error_location
162
-
163
- def initialize(options={})
164
- @type = options[:type]
165
- @message = options[:message]
166
- @cause = options[:cause]
167
- @suppressed = options[:suppressed]
168
- @stack = options[:stack]
169
- @error_location = options[:error_location]
170
- end
171
-
172
- def self.decode_hash(hash)
173
- new(
174
- type: hash["type"],
175
- message: hash["message"],
176
- cause: hash["cause"],
177
- suppressed: hash["suppressed"] && hash["suppressed"].map {|h| FailureInfo.decode_hash(h) },
178
- stack: hash["stack"],
179
- error_location: hash["errorLocation"] && ErrorLocation.decode_hash(hash["errorLocation"]),
180
- )
70
+ module PlanNode
71
+ def self.decode(hash)
72
+ model_class = case hash["type"]
73
+ when "output" then OutputNode
74
+ when "project" then ProjectNode
75
+ when "tablescan" then TableScanNode
76
+ when "values" then ValuesNode
77
+ when "aggregation" then AggregationNode
78
+ when "markDistinct" then MarkDistinctNode
79
+ when "materializeSample" then MaterializeSampleNode
80
+ when "filter" then FilterNode
81
+ when "window" then WindowNode
82
+ when "limit" then LimitNode
83
+ when "distinctlimit" then DistinctLimitNode
84
+ when "topn" then TopNNode
85
+ when "sample" then SampleNode
86
+ when "sort" then SortNode
87
+ when "exchange" then ExchangeNode
88
+ when "sink" then SinkNode
89
+ when "join" then JoinNode
90
+ when "INNER" then JoinNode
91
+ when "LEFT" then JoinNode
92
+ when "RIGHT" then JoinNode
93
+ when "CROSS" then JoinNode
94
+ when "semijoin" then SemiJoinNode
95
+ when "indexjoin" then IndexJoinNode
96
+ when "indexsource" then IndexSourceNode
97
+ when "tablewriter" then TableWriterNode
98
+ when "tablecommit" then TableCommitNode
99
+ else
100
+ end
101
+ model_class.decode(hash) if model_class
102
+ end
181
103
  end
182
- end
183
104
 
184
- class QueryError
185
- attr_reader :message
186
- attr_reader :sql_state
187
- attr_reader :error_code
188
- attr_reader :error_location
189
- attr_reader :failure_info
190
-
191
- def initialize(options={})
192
- @message = options[:message]
193
- @sql_state = options[:sql_state]
194
- @error_code = options[:error_code]
195
- @error_location = options[:error_location]
196
- @failure_info = options[:failure_info]
197
- end
198
-
199
- def self.decode_hash(hash)
200
- new(
201
- message: hash["message"],
202
- sql_state: hash["sqlState"],
203
- error_code: hash["errorCode"],
204
- error_location: hash["errorLocation"] && ErrorLocation.decode_hash(hash["errorLocation"]),
205
- failure_info: hash["failureInfo"] && FailureInfo.decode_hash(hash["failureInfo"]),
206
- )
105
+ # io.airlift.stats.Distribution.DistributionSnapshot
106
+ class << DistributionSnapshot =
107
+ Base.new(:max_error, :count, :total, :p01, :p05, :p10, :p25, :p50, :p75, :p90, :p95, :p99, :min, :max)
108
+ def decode(hash)
109
+ obj = allocate
110
+ obj.send(:initialize_struct,
111
+ hash["maxError"],
112
+ hash["count"],
113
+ hash["total"],
114
+ hash["p01"],
115
+ hash["p05"],
116
+ hash["p10"],
117
+ hash["p25"],
118
+ hash["p50"],
119
+ hash["p75"],
120
+ hash["p90"],
121
+ hash["p95"],
122
+ hash["p99"],
123
+ hash["min"],
124
+ hash["max"],
125
+ )
126
+ obj
127
+ end
207
128
  end
208
- end
209
129
 
210
- class QueryResults
211
- attr_reader :id
212
- attr_reader :info_uri
213
- attr_reader :partial_cache_uri
214
- attr_reader :next_uri
215
- attr_reader :columns
216
- attr_reader :data
217
- attr_reader :stats
218
- attr_reader :error
219
-
220
- def initialize(options={})
221
- @id = options[:id]
222
- @info_uri = options[:info_uri]
223
- @partial_cache_uri = options[:partial_cache_uri]
224
- @next_uri = options[:next_uri]
225
- @columns = options[:columns]
226
- @data = options[:data]
227
- @stats = options[:stats]
228
- @error = options[:error]
229
- end
230
-
231
- def self.decode_hash(hash)
232
- new(
233
- id: hash["id"],
234
- info_uri: hash["infoUri"],
235
- partial_cache_uri: hash["partialCancelUri"],
236
- next_uri: hash["nextUri"],
237
- columns: hash["columns"] && hash["columns"].map {|h| Column.decode_hash(h) },
238
- data: hash["data"],
239
- stats: hash["stats"] && StatementStats.decode_hash(hash["stats"]),
240
- error: hash["error"] && QueryError.decode_hash(hash["error"]),
241
- )
130
+
131
+ ##
132
+ # Those model classes are automatically generated
133
+ #
134
+
135
+ class << AggregationNode =
136
+ Base.new(:id, :source, :group_by, :aggregations, :functions, :masks, :step, :sample_weight, :confidence)
137
+ def decode(hash)
138
+ obj = allocate
139
+ obj.send(:initialize_struct,
140
+ hash["id"] && PlanNodeId.new(hash["id"]),
141
+ hash["source"] && PlanNode.decode(hash["source"]),
142
+ hash["groupBy"],
143
+ hash["aggregations"],
144
+ hash["functions"] && Hash[hash["functions"].to_a.map! {|k,v| [k, Signature.decode(v)] }],
145
+ hash["masks"],
146
+ hash["step"] && hash["step"].downcase.to_sym,
147
+ hash["sampleWeight"],
148
+ hash["confidence"],
149
+ )
150
+ obj
151
+ end
152
+ end
153
+
154
+ class << BufferInfo =
155
+ Base.new(:buffer_id, :finished, :buffered_pages, :pages_sent)
156
+ def decode(hash)
157
+ obj = allocate
158
+ obj.send(:initialize_struct,
159
+ hash["bufferId"],
160
+ hash["finished"],
161
+ hash["bufferedPages"],
162
+ hash["pagesSent"],
163
+ )
164
+ obj
165
+ end
166
+ end
167
+
168
+ class << Column =
169
+ Base.new(:name, :type)
170
+ def decode(hash)
171
+ obj = allocate
172
+ obj.send(:initialize_struct,
173
+ hash["name"],
174
+ hash["type"],
175
+ )
176
+ obj
177
+ end
178
+ end
179
+
180
+ class << ColumnHandle =
181
+ Base.new(:connector_id, :connector_handle)
182
+ def decode(hash)
183
+ obj = allocate
184
+ obj.send(:initialize_struct,
185
+ hash["connectorId"],
186
+ hash["connectorHandle"],
187
+ )
188
+ obj
189
+ end
190
+ end
191
+
192
+ class << DistinctLimitNode =
193
+ Base.new(:id, :source, :limit)
194
+ def decode(hash)
195
+ obj = allocate
196
+ obj.send(:initialize_struct,
197
+ hash["id"] && PlanNodeId.new(hash["id"]),
198
+ hash["source"] && PlanNode.decode(hash["source"]),
199
+ hash["limit"],
200
+ )
201
+ obj
202
+ end
203
+ end
204
+
205
+ class << DriverStats =
206
+ Base.new(:create_time, :start_time, :end_time, :queued_time, :elapsed_time, :memory_reservation, :total_scheduled_time, :total_cpu_time, :total_user_time, :total_blocked_time, :raw_input_data_size, :raw_input_positions, :raw_input_read_time, :processed_input_data_size, :processed_input_positions, :output_data_size, :output_positions, :operator_stats)
207
+ def decode(hash)
208
+ obj = allocate
209
+ obj.send(:initialize_struct,
210
+ hash["createTime"],
211
+ hash["startTime"],
212
+ hash["endTime"],
213
+ hash["queuedTime"],
214
+ hash["elapsedTime"],
215
+ hash["memoryReservation"],
216
+ hash["totalScheduledTime"],
217
+ hash["totalCpuTime"],
218
+ hash["totalUserTime"],
219
+ hash["totalBlockedTime"],
220
+ hash["rawInputDataSize"],
221
+ hash["rawInputPositions"],
222
+ hash["rawInputReadTime"],
223
+ hash["processedInputDataSize"],
224
+ hash["processedInputPositions"],
225
+ hash["outputDataSize"],
226
+ hash["outputPositions"],
227
+ hash["operatorStats"] && hash["operatorStats"].map {|h| OperatorStats.decode(h) },
228
+ )
229
+ obj
230
+ end
231
+ end
232
+
233
+ class << ErrorCode =
234
+ Base.new(:code, :name)
235
+ def decode(hash)
236
+ obj = allocate
237
+ obj.send(:initialize_struct,
238
+ hash["code"],
239
+ hash["name"],
240
+ )
241
+ obj
242
+ end
243
+ end
244
+
245
+ class << ErrorLocation =
246
+ Base.new(:line_number, :column_number)
247
+ def decode(hash)
248
+ obj = allocate
249
+ obj.send(:initialize_struct,
250
+ hash["lineNumber"],
251
+ hash["columnNumber"],
252
+ )
253
+ obj
254
+ end
255
+ end
256
+
257
+ class << ExchangeNode =
258
+ Base.new(:id, :source_fragment_ids, :outputs)
259
+ def decode(hash)
260
+ obj = allocate
261
+ obj.send(:initialize_struct,
262
+ hash["id"] && PlanNodeId.new(hash["id"]),
263
+ hash["sourceFragmentIds"] && hash["sourceFragmentIds"].map {|h| PlanFragmentId.new(h) },
264
+ hash["outputs"],
265
+ )
266
+ obj
267
+ end
268
+ end
269
+
270
+ class << ExecutionFailureInfo =
271
+ Base.new(:type, :message, :cause, :suppressed, :stack, :error_location, :error_code)
272
+ def decode(hash)
273
+ obj = allocate
274
+ obj.send(:initialize_struct,
275
+ hash["type"],
276
+ hash["message"],
277
+ hash["cause"] && ExecutionFailureInfo.decode(hash["cause"]),
278
+ hash["suppressed"] && hash["suppressed"].map {|h| ExecutionFailureInfo.decode(h) },
279
+ hash["stack"],
280
+ hash["errorLocation"] && ErrorLocation.decode(hash["errorLocation"]),
281
+ hash["errorCode"] && ErrorCode.decode(hash["errorCode"]),
282
+ )
283
+ obj
284
+ end
285
+ end
286
+
287
+ class << FailureInfo =
288
+ Base.new(:type, :message, :cause, :suppressed, :stack, :error_location)
289
+ def decode(hash)
290
+ obj = allocate
291
+ obj.send(:initialize_struct,
292
+ hash["type"],
293
+ hash["message"],
294
+ hash["cause"] && FailureInfo.decode(hash["cause"]),
295
+ hash["suppressed"] && hash["suppressed"].map {|h| FailureInfo.decode(h) },
296
+ hash["stack"],
297
+ hash["errorLocation"] && ErrorLocation.decode(hash["errorLocation"]),
298
+ )
299
+ obj
300
+ end
301
+ end
302
+
303
+ class << FilterNode =
304
+ Base.new(:id, :source, :predicate)
305
+ def decode(hash)
306
+ obj = allocate
307
+ obj.send(:initialize_struct,
308
+ hash["id"] && PlanNodeId.new(hash["id"]),
309
+ hash["source"] && PlanNode.decode(hash["source"]),
310
+ hash["predicate"],
311
+ )
312
+ obj
313
+ end
314
+ end
315
+
316
+ class << IndexHandle =
317
+ Base.new(:connector_id, :connector_handle)
318
+ def decode(hash)
319
+ obj = allocate
320
+ obj.send(:initialize_struct,
321
+ hash["connectorId"],
322
+ hash["connectorHandle"],
323
+ )
324
+ obj
325
+ end
326
+ end
327
+
328
+ class << IndexJoinNode =
329
+ Base.new(:id, :type, :probe_source, :index_source, :criteria)
330
+ def decode(hash)
331
+ obj = allocate
332
+ obj.send(:initialize_struct,
333
+ hash["id"] && PlanNodeId.new(hash["id"]),
334
+ hash["type"],
335
+ hash["probeSource"] && PlanNode.decode(hash["probeSource"]),
336
+ hash["indexSource"] && PlanNode.decode(hash["indexSource"]),
337
+ )
338
+ obj
339
+ end
340
+ end
341
+
342
+ class << IndexSourceNode =
343
+ Base.new(:id, :index_handle, :table_handle, :lookup_symbols, :output_symbols, :assignments, :effective_tuple_domain)
344
+ def decode(hash)
345
+ obj = allocate
346
+ obj.send(:initialize_struct,
347
+ hash["id"] && PlanNodeId.new(hash["id"]),
348
+ hash["indexHandle"] && IndexHandle.decode(hash["indexHandle"]),
349
+ hash["tableHandle"] && TableHandle.decode(hash["tableHandle"]),
350
+ hash["lookupSymbols"],
351
+ hash["outputSymbols"],
352
+ hash["assignments"] && Hash[hash["assignments"].to_a.map! {|k,v| [k, ColumnHandle.decode(v)] }],
353
+ )
354
+ obj
355
+ end
356
+ end
357
+
358
+ class << Input =
359
+ Base.new(:connector_id, :schema, :table, :columns)
360
+ def decode(hash)
361
+ obj = allocate
362
+ obj.send(:initialize_struct,
363
+ hash["connectorId"],
364
+ hash["schema"],
365
+ hash["table"],
366
+ hash["columns"] && hash["columns"].map {|h| Column.decode(h) },
367
+ )
368
+ obj
369
+ end
370
+ end
371
+
372
+ class << JoinNode =
373
+ Base.new(:id, :type, :left, :right, :criteria)
374
+ def decode(hash)
375
+ obj = allocate
376
+ obj.send(:initialize_struct,
377
+ hash["id"] && PlanNodeId.new(hash["id"]),
378
+ hash["type"],
379
+ hash["left"] && PlanNode.decode(hash["left"]),
380
+ hash["right"] && PlanNode.decode(hash["right"]),
381
+ )
382
+ obj
383
+ end
384
+ end
385
+
386
+ class << LimitNode =
387
+ Base.new(:id, :source, :count, :sample_weight)
388
+ def decode(hash)
389
+ obj = allocate
390
+ obj.send(:initialize_struct,
391
+ hash["id"] && PlanNodeId.new(hash["id"]),
392
+ hash["source"] && PlanNode.decode(hash["source"]),
393
+ hash["count"],
394
+ hash["sampleWeight"],
395
+ )
396
+ obj
397
+ end
398
+ end
399
+
400
+ class << MarkDistinctNode =
401
+ Base.new(:id, :source, :marker_symbol, :distinct_symbols, :sample_weight_symbol)
402
+ def decode(hash)
403
+ obj = allocate
404
+ obj.send(:initialize_struct,
405
+ hash["id"] && PlanNodeId.new(hash["id"]),
406
+ hash["source"] && PlanNode.decode(hash["source"]),
407
+ hash["markerSymbol"],
408
+ hash["distinctSymbols"],
409
+ hash["sampleWeightSymbol"],
410
+ )
411
+ obj
412
+ end
413
+ end
414
+
415
+ class << MaterializeSampleNode =
416
+ Base.new(:id, :source, :sample_weight_symbol)
417
+ def decode(hash)
418
+ obj = allocate
419
+ obj.send(:initialize_struct,
420
+ hash["id"] && PlanNodeId.new(hash["id"]),
421
+ hash["source"] && PlanNode.decode(hash["source"]),
422
+ hash["sampleWeightSymbol"],
423
+ )
424
+ obj
425
+ end
426
+ end
427
+
428
+ class << OperatorStats =
429
+ Base.new(:operator_id, :operator_type, :add_input_calls, :add_input_wall, :add_input_cpu, :add_input_user, :input_data_size, :input_positions, :get_output_calls, :get_output_wall, :get_output_cpu, :get_output_user, :output_data_size, :output_positions, :blocked_wall, :finish_calls, :finish_wall, :finish_cpu, :finish_user, :memory_reservation, :info)
430
+ def decode(hash)
431
+ obj = allocate
432
+ obj.send(:initialize_struct,
433
+ hash["operatorId"],
434
+ hash["operatorType"],
435
+ hash["addInputCalls"],
436
+ hash["addInputWall"],
437
+ hash["addInputCpu"],
438
+ hash["addInputUser"],
439
+ hash["inputDataSize"],
440
+ hash["inputPositions"],
441
+ hash["getOutputCalls"],
442
+ hash["getOutputWall"],
443
+ hash["getOutputCpu"],
444
+ hash["getOutputUser"],
445
+ hash["outputDataSize"],
446
+ hash["outputPositions"],
447
+ hash["blockedWall"],
448
+ hash["finishCalls"],
449
+ hash["finishWall"],
450
+ hash["finishCpu"],
451
+ hash["finishUser"],
452
+ hash["memoryReservation"],
453
+ hash["info"],
454
+ )
455
+ obj
456
+ end
457
+ end
458
+
459
+ class << OutputNode =
460
+ Base.new(:id, :source, :columns, :outputs)
461
+ def decode(hash)
462
+ obj = allocate
463
+ obj.send(:initialize_struct,
464
+ hash["id"] && PlanNodeId.new(hash["id"]),
465
+ hash["source"] && PlanNode.decode(hash["source"]),
466
+ hash["columns"],
467
+ hash["outputs"],
468
+ )
469
+ obj
470
+ end
242
471
  end
243
- end
244
472
 
473
+ class << OutputTableHandle =
474
+ Base.new(:connector_id, :connector_handle)
475
+ def decode(hash)
476
+ obj = allocate
477
+ obj.send(:initialize_struct,
478
+ hash["connectorId"],
479
+ hash["connectorHandle"],
480
+ )
481
+ obj
482
+ end
483
+ end
484
+
485
+ class << PipelineStats =
486
+ Base.new(:input_pipeline, :output_pipeline, :total_drivers, :queued_drivers, :running_drivers, :completed_drivers, :memory_reservation, :queued_time, :elapsed_time, :total_scheduled_time, :total_cpu_time, :total_user_time, :total_blocked_time, :raw_input_data_size, :raw_input_positions, :processed_input_data_size, :processed_input_positions, :output_data_size, :output_positions, :operator_summaries, :drivers)
487
+ def decode(hash)
488
+ obj = allocate
489
+ obj.send(:initialize_struct,
490
+ hash["inputPipeline"],
491
+ hash["outputPipeline"],
492
+ hash["totalDrivers"],
493
+ hash["queuedDrivers"],
494
+ hash["runningDrivers"],
495
+ hash["completedDrivers"],
496
+ hash["memoryReservation"],
497
+ hash["queuedTime"] && DistributionSnapshot.decode(hash["queuedTime"]),
498
+ hash["elapsedTime"] && DistributionSnapshot.decode(hash["elapsedTime"]),
499
+ hash["totalScheduledTime"],
500
+ hash["totalCpuTime"],
501
+ hash["totalUserTime"],
502
+ hash["totalBlockedTime"],
503
+ hash["rawInputDataSize"],
504
+ hash["rawInputPositions"],
505
+ hash["processedInputDataSize"],
506
+ hash["processedInputPositions"],
507
+ hash["outputDataSize"],
508
+ hash["outputPositions"],
509
+ hash["operatorSummaries"] && hash["operatorSummaries"].map {|h| OperatorStats.decode(h) },
510
+ hash["drivers"] && hash["drivers"].map {|h| DriverStats.decode(h) },
511
+ )
512
+ obj
513
+ end
514
+ end
515
+
516
+ class << PlanFragment =
517
+ Base.new(:id, :root, :symbols, :distribution, :partitioned_source, :output_partitioning, :partition_by)
518
+ def decode(hash)
519
+ obj = allocate
520
+ obj.send(:initialize_struct,
521
+ hash["id"] && PlanFragmentId.new(hash["id"]),
522
+ hash["root"] && PlanNode.decode(hash["root"]),
523
+ hash["symbols"],
524
+ hash["distribution"] && hash["distribution"].downcase.to_sym,
525
+ hash["partitionedSource"] && PlanNodeId.new(hash["partitionedSource"]),
526
+ hash["outputPartitioning"] && hash["outputPartitioning"].downcase.to_sym,
527
+ hash["partitionBy"],
528
+ )
529
+ obj
530
+ end
531
+ end
532
+
533
+ class << ProjectNode =
534
+ Base.new(:id, :source, :assignments)
535
+ def decode(hash)
536
+ obj = allocate
537
+ obj.send(:initialize_struct,
538
+ hash["id"] && PlanNodeId.new(hash["id"]),
539
+ hash["source"] && PlanNode.decode(hash["source"]),
540
+ hash["assignments"],
541
+ )
542
+ obj
543
+ end
544
+ end
545
+
546
+ class << QueryError =
547
+ Base.new(:message, :sql_state, :error_code, :error_location, :failure_info)
548
+ def decode(hash)
549
+ obj = allocate
550
+ obj.send(:initialize_struct,
551
+ hash["message"],
552
+ hash["sqlState"],
553
+ hash["errorCode"],
554
+ hash["errorLocation"] && ErrorLocation.decode(hash["errorLocation"]),
555
+ hash["failureInfo"] && FailureInfo.decode(hash["failureInfo"]),
556
+ )
557
+ obj
558
+ end
559
+ end
560
+
561
+ class << QueryInfo =
562
+ Base.new(:query_id, :session, :state, :self, :field_names, :query, :query_stats, :output_stage, :failure_info, :error_code, :inputs)
563
+ def decode(hash)
564
+ obj = allocate
565
+ obj.send(:initialize_struct,
566
+ hash["queryId"] && QueryId.new(hash["queryId"]),
567
+ hash["session"] && ConnectorSession.new(hash["session"]),
568
+ hash["state"] && hash["state"].downcase.to_sym,
569
+ hash["self"],
570
+ hash["fieldNames"],
571
+ hash["query"],
572
+ hash["queryStats"] && QueryStats.decode(hash["queryStats"]),
573
+ hash["outputStage"] && StageInfo.decode(hash["outputStage"]),
574
+ hash["failureInfo"] && FailureInfo.decode(hash["failureInfo"]),
575
+ hash["errorCode"] && ErrorCode.decode(hash["errorCode"]),
576
+ hash["inputs"] && hash["inputs"].map {|h| Input.decode(h) },
577
+ )
578
+ obj
579
+ end
580
+ end
581
+
582
+ class << QueryResults =
583
+ Base.new(:id, :info_uri, :partial_cancel_uri, :next_uri, :columns, :data, :stats, :error)
584
+ def decode(hash)
585
+ obj = allocate
586
+ obj.send(:initialize_struct,
587
+ hash["id"],
588
+ hash["infoUri"],
589
+ hash["partialCancelUri"],
590
+ hash["nextUri"],
591
+ hash["columns"] && hash["columns"].map {|h| Column.decode(h) },
592
+ hash["data"],
593
+ hash["stats"] && StatementStats.decode(hash["stats"]),
594
+ hash["error"] && QueryError.decode(hash["error"]),
595
+ )
596
+ obj
597
+ end
598
+ end
599
+
600
+ class << QueryStats =
601
+ Base.new(:create_time, :execution_start_time, :last_heartbeat, :end_time, :elapsed_time, :queued_time, :analysis_time, :distributed_planning_time, :total_planning_time, :total_tasks, :running_tasks, :completed_tasks, :total_drivers, :queued_drivers, :running_drivers, :completed_drivers, :total_memory_reservation, :total_scheduled_time, :total_cpu_time, :total_user_time, :total_blocked_time, :raw_input_data_size, :raw_input_positions, :processed_input_data_size, :processed_input_positions, :output_data_size, :output_positions)
602
+ def decode(hash)
603
+ obj = allocate
604
+ obj.send(:initialize_struct,
605
+ hash["createTime"],
606
+ hash["executionStartTime"],
607
+ hash["lastHeartbeat"],
608
+ hash["endTime"],
609
+ hash["elapsedTime"],
610
+ hash["queuedTime"],
611
+ hash["analysisTime"],
612
+ hash["distributedPlanningTime"],
613
+ hash["totalPlanningTime"],
614
+ hash["totalTasks"],
615
+ hash["runningTasks"],
616
+ hash["completedTasks"],
617
+ hash["totalDrivers"],
618
+ hash["queuedDrivers"],
619
+ hash["runningDrivers"],
620
+ hash["completedDrivers"],
621
+ hash["totalMemoryReservation"],
622
+ hash["totalScheduledTime"],
623
+ hash["totalCpuTime"],
624
+ hash["totalUserTime"],
625
+ hash["totalBlockedTime"],
626
+ hash["rawInputDataSize"],
627
+ hash["rawInputPositions"],
628
+ hash["processedInputDataSize"],
629
+ hash["processedInputPositions"],
630
+ hash["outputDataSize"],
631
+ hash["outputPositions"],
632
+ )
633
+ obj
634
+ end
635
+ end
636
+
637
+ class << SampleNode =
638
+ Base.new(:id, :source, :sample_ratio, :sample_type, :rescaled, :sample_weight_symbol)
639
+ def decode(hash)
640
+ obj = allocate
641
+ obj.send(:initialize_struct,
642
+ hash["id"] && PlanNodeId.new(hash["id"]),
643
+ hash["source"] && PlanNode.decode(hash["source"]),
644
+ hash["sampleRatio"],
645
+ hash["sampleType"],
646
+ hash["rescaled"],
647
+ hash["sampleWeightSymbol"],
648
+ )
649
+ obj
650
+ end
651
+ end
652
+
653
+ class << SemiJoinNode =
654
+ Base.new(:id, :source, :filtering_source, :source_join_symbol, :filtering_source_join_symbol, :semi_join_output)
655
+ def decode(hash)
656
+ obj = allocate
657
+ obj.send(:initialize_struct,
658
+ hash["id"] && PlanNodeId.new(hash["id"]),
659
+ hash["source"] && PlanNode.decode(hash["source"]),
660
+ hash["filteringSource"] && PlanNode.decode(hash["filteringSource"]),
661
+ hash["sourceJoinSymbol"],
662
+ hash["filteringSourceJoinSymbol"],
663
+ hash["semiJoinOutput"],
664
+ )
665
+ obj
666
+ end
667
+ end
668
+
669
+ class << SharedBufferInfo =
670
+ Base.new(:state, :master_sequence_id, :pages_added, :buffers)
671
+ def decode(hash)
672
+ obj = allocate
673
+ obj.send(:initialize_struct,
674
+ hash["state"] && hash["state"].downcase.to_sym,
675
+ hash["masterSequenceId"],
676
+ hash["pagesAdded"],
677
+ hash["buffers"] && hash["buffers"].map {|h| BufferInfo.decode(h) },
678
+ )
679
+ obj
680
+ end
681
+ end
682
+
683
+ class << Signature =
684
+ Base.new(:name, :return_type, :argument_types, :approximate)
685
+ def decode(hash)
686
+ obj = allocate
687
+ obj.send(:initialize_struct,
688
+ hash["name"],
689
+ hash["returnType"],
690
+ hash["argumentTypes"],
691
+ hash["approximate"],
692
+ )
693
+ obj
694
+ end
695
+ end
696
+
697
+ class << SinkNode =
698
+ Base.new(:id, :source, :output_symbols)
699
+ def decode(hash)
700
+ obj = allocate
701
+ obj.send(:initialize_struct,
702
+ hash["id"] && PlanNodeId.new(hash["id"]),
703
+ hash["source"] && PlanNode.decode(hash["source"]),
704
+ hash["outputSymbols"],
705
+ )
706
+ obj
707
+ end
708
+ end
709
+
710
+ class << SortNode =
711
+ Base.new(:id, :source, :order_by, :orderings)
712
+ def decode(hash)
713
+ obj = allocate
714
+ obj.send(:initialize_struct,
715
+ hash["id"] && PlanNodeId.new(hash["id"]),
716
+ hash["source"] && PlanNode.decode(hash["source"]),
717
+ hash["orderBy"],
718
+ hash["orderings"] && Hash[hash["orderings"].to_a.map! {|k,v| [k, v.downcase.to_sym] }],
719
+ )
720
+ obj
721
+ end
722
+ end
723
+
724
+ class << StageInfo =
725
+ Base.new(:stage_id, :state, :self, :plan, :types, :stage_stats, :tasks, :sub_stages, :failures)
726
+ def decode(hash)
727
+ obj = allocate
728
+ obj.send(:initialize_struct,
729
+ hash["stageId"] && StageId.new(hash["stageId"]),
730
+ hash["state"] && hash["state"].downcase.to_sym,
731
+ hash["self"],
732
+ hash["plan"] && PlanFragment.decode(hash["plan"]),
733
+ hash["types"],
734
+ hash["stageStats"] && StageStats.decode(hash["stageStats"]),
735
+ hash["tasks"] && hash["tasks"].map {|h| TaskInfo.decode(h) },
736
+ hash["subStages"] && hash["subStages"].map {|h| StageInfo.decode(h) },
737
+ hash["failures"] && hash["failures"].map {|h| ExecutionFailureInfo.decode(h) },
738
+ )
739
+ obj
740
+ end
741
+ end
742
+
743
+ class << StageStats =
744
+ Base.new(:stage_id, :state, :done, :nodes, :total_splits, :queued_splits, :running_splits, :completed_splits, :user_time_millis, :cpu_time_millis, :wall_time_millis, :processed_rows, :processed_bytes, :sub_stages)
745
+ def decode(hash)
746
+ obj = allocate
747
+ obj.send(:initialize_struct,
748
+ hash["stageId"],
749
+ hash["state"],
750
+ hash["done"],
751
+ hash["nodes"],
752
+ hash["totalSplits"],
753
+ hash["queuedSplits"],
754
+ hash["runningSplits"],
755
+ hash["completedSplits"],
756
+ hash["userTimeMillis"],
757
+ hash["cpuTimeMillis"],
758
+ hash["wallTimeMillis"],
759
+ hash["processedRows"],
760
+ hash["processedBytes"],
761
+ hash["subStages"] && hash["subStages"].map {|h| StageStats.decode(h) },
762
+ )
763
+ obj
764
+ end
765
+ end
766
+
767
+ class << StatementStats =
768
+ Base.new(:state, :scheduled, :nodes, :total_splits, :queued_splits, :running_splits, :completed_splits, :user_time_millis, :cpu_time_millis, :wall_time_millis, :processed_rows, :processed_bytes, :root_stage)
769
+ def decode(hash)
770
+ obj = allocate
771
+ obj.send(:initialize_struct,
772
+ hash["state"],
773
+ hash["scheduled"],
774
+ hash["nodes"],
775
+ hash["totalSplits"],
776
+ hash["queuedSplits"],
777
+ hash["runningSplits"],
778
+ hash["completedSplits"],
779
+ hash["userTimeMillis"],
780
+ hash["cpuTimeMillis"],
781
+ hash["wallTimeMillis"],
782
+ hash["processedRows"],
783
+ hash["processedBytes"],
784
+ hash["rootStage"] && StageStats.decode(hash["rootStage"]),
785
+ )
786
+ obj
787
+ end
788
+ end
789
+
790
+ class << TableCommitNode =
791
+ Base.new(:id, :source, :target, :outputs)
792
+ def decode(hash)
793
+ obj = allocate
794
+ obj.send(:initialize_struct,
795
+ hash["id"] && PlanNodeId.new(hash["id"]),
796
+ hash["source"] && PlanNode.decode(hash["source"]),
797
+ hash["target"] && OutputTableHandle.decode(hash["target"]),
798
+ hash["outputs"],
799
+ )
800
+ obj
801
+ end
802
+ end
803
+
804
+ class << TableHandle =
805
+ Base.new(:connector_id, :connector_handle)
806
+ def decode(hash)
807
+ obj = allocate
808
+ obj.send(:initialize_struct,
809
+ hash["connectorId"],
810
+ hash["connectorHandle"],
811
+ )
812
+ obj
813
+ end
814
+ end
815
+
816
+ class << TableScanNode =
817
+ Base.new(:id, :table, :output_symbols, :assignments, :original_constraint)
818
+ def decode(hash)
819
+ obj = allocate
820
+ obj.send(:initialize_struct,
821
+ hash["id"] && PlanNodeId.new(hash["id"]),
822
+ hash["table"] && TableHandle.decode(hash["table"]),
823
+ hash["outputSymbols"],
824
+ hash["assignments"] && Hash[hash["assignments"].to_a.map! {|k,v| [k, ColumnHandle.decode(v)] }],
825
+ hash["originalConstraint"],
826
+ )
827
+ obj
828
+ end
829
+ end
830
+
831
+ class << TableWriterNode =
832
+ Base.new(:id, :source, :target, :columns, :column_names, :outputs, :sample_weight_symbol)
833
+ def decode(hash)
834
+ obj = allocate
835
+ obj.send(:initialize_struct,
836
+ hash["id"] && PlanNodeId.new(hash["id"]),
837
+ hash["source"] && PlanNode.decode(hash["source"]),
838
+ hash["target"] && OutputTableHandle.decode(hash["target"]),
839
+ hash["columns"],
840
+ hash["columnNames"],
841
+ hash["outputs"],
842
+ hash["sampleWeightSymbol"],
843
+ )
844
+ obj
845
+ end
846
+ end
847
+
848
+ class << TaskInfo =
849
+ Base.new(:task_id, :version, :state, :self, :last_heartbeat, :output_buffers, :no_more_splits, :stats, :failures)
850
+ def decode(hash)
851
+ obj = allocate
852
+ obj.send(:initialize_struct,
853
+ hash["taskId"] && TaskId.new(hash["taskId"]),
854
+ hash["version"],
855
+ hash["state"] && hash["state"].downcase.to_sym,
856
+ hash["self"],
857
+ hash["lastHeartbeat"],
858
+ hash["outputBuffers"] && SharedBufferInfo.decode(hash["outputBuffers"]),
859
+ hash["noMoreSplits"] && hash["noMoreSplits"].map {|h| PlanNodeId.new(h) },
860
+ hash["stats"] && TaskStats.decode(hash["stats"]),
861
+ hash["failures"] && hash["failures"].map {|h| ExecutionFailureInfo.decode(h) },
862
+ )
863
+ obj
864
+ end
865
+ end
866
+
867
+ class << TaskStats =
868
+ Base.new(:create_time, :first_start_time, :last_start_time, :end_time, :elapsed_time, :queued_time, :total_drivers, :queued_drivers, :running_drivers, :completed_drivers, :memory_reservation, :total_scheduled_time, :total_cpu_time, :total_user_time, :total_blocked_time, :raw_input_data_size, :raw_input_positions, :processed_input_data_size, :processed_input_positions, :output_data_size, :output_positions, :pipelines)
869
+ def decode(hash)
870
+ obj = allocate
871
+ obj.send(:initialize_struct,
872
+ hash["createTime"],
873
+ hash["firstStartTime"],
874
+ hash["lastStartTime"],
875
+ hash["endTime"],
876
+ hash["elapsedTime"],
877
+ hash["queuedTime"],
878
+ hash["totalDrivers"],
879
+ hash["queuedDrivers"],
880
+ hash["runningDrivers"],
881
+ hash["completedDrivers"],
882
+ hash["memoryReservation"],
883
+ hash["totalScheduledTime"],
884
+ hash["totalCpuTime"],
885
+ hash["totalUserTime"],
886
+ hash["totalBlockedTime"],
887
+ hash["rawInputDataSize"],
888
+ hash["rawInputPositions"],
889
+ hash["processedInputDataSize"],
890
+ hash["processedInputPositions"],
891
+ hash["outputDataSize"],
892
+ hash["outputPositions"],
893
+ hash["pipelines"] && hash["pipelines"].map {|h| PipelineStats.decode(h) },
894
+ )
895
+ obj
896
+ end
897
+ end
898
+
899
+ class << TopNNode =
900
+ Base.new(:id, :source, :count, :order_by, :orderings, :partial, :sample_weight)
901
+ def decode(hash)
902
+ obj = allocate
903
+ obj.send(:initialize_struct,
904
+ hash["id"] && PlanNodeId.new(hash["id"]),
905
+ hash["source"] && PlanNode.decode(hash["source"]),
906
+ hash["count"],
907
+ hash["orderBy"],
908
+ hash["orderings"] && Hash[hash["orderings"].to_a.map! {|k,v| [k, v.downcase.to_sym] }],
909
+ hash["partial"],
910
+ hash["sampleWeight"],
911
+ )
912
+ obj
913
+ end
914
+ end
915
+
916
+ class << ValuesNode =
917
+ Base.new(:id, :output_symbols, :rows)
918
+ def decode(hash)
919
+ obj = allocate
920
+ obj.send(:initialize_struct,
921
+ hash["id"] && PlanNodeId.new(hash["id"]),
922
+ hash["outputSymbols"],
923
+ hash["rows"],
924
+ )
925
+ obj
926
+ end
927
+ end
928
+
929
+ class << WindowNode =
930
+ Base.new(:id, :source, :partition_by, :order_by, :orderings, :window_functions, :signatures)
931
+ def decode(hash)
932
+ obj = allocate
933
+ obj.send(:initialize_struct,
934
+ hash["id"] && PlanNodeId.new(hash["id"]),
935
+ hash["source"] && PlanNode.decode(hash["source"]),
936
+ hash["partitionBy"],
937
+ hash["orderBy"],
938
+ hash["orderings"] && Hash[hash["orderings"].to_a.map! {|k,v| [k, v.downcase.to_sym] }],
939
+ hash["windowFunctions"],
940
+ hash["signatures"] && Hash[hash["signatures"].to_a.map! {|k,v| [k, Signature.decode(v)] }],
941
+ )
942
+ obj
943
+ end
944
+ end
945
+
946
+
947
+ end
245
948
  end