impala 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (86) hide show
  1. data/.gitignore +17 -0
  2. data/Gemfile +2 -0
  3. data/LICENSE.txt +22 -0
  4. data/README.md +28 -0
  5. data/Rakefile +15 -0
  6. data/impala.gemspec +25 -0
  7. data/lib/impala.rb +33 -0
  8. data/lib/impala/connection.rb +93 -0
  9. data/lib/impala/cursor.rb +86 -0
  10. data/lib/impala/protocol.rb +6 -0
  11. data/lib/impala/protocol/beeswax_constants.rb +14 -0
  12. data/lib/impala/protocol/beeswax_service.rb +747 -0
  13. data/lib/impala/protocol/beeswax_types.rb +192 -0
  14. data/lib/impala/protocol/data_constants.rb +12 -0
  15. data/lib/impala/protocol/data_sinks_constants.rb +12 -0
  16. data/lib/impala/protocol/data_sinks_types.rb +107 -0
  17. data/lib/impala/protocol/data_types.rb +77 -0
  18. data/lib/impala/protocol/descriptors_constants.rb +12 -0
  19. data/lib/impala/protocol/descriptors_types.rb +266 -0
  20. data/lib/impala/protocol/exprs_constants.rb +12 -0
  21. data/lib/impala/protocol/exprs_types.rb +345 -0
  22. data/lib/impala/protocol/facebook_service.rb +706 -0
  23. data/lib/impala/protocol/fb303_constants.rb +14 -0
  24. data/lib/impala/protocol/fb303_types.rb +24 -0
  25. data/lib/impala/protocol/frontend_constants.rb +12 -0
  26. data/lib/impala/protocol/frontend_types.rb +347 -0
  27. data/lib/impala/protocol/hive_metastore_constants.rb +52 -0
  28. data/lib/impala/protocol/hive_metastore_types.rb +697 -0
  29. data/lib/impala/protocol/impala_internal_service.rb +244 -0
  30. data/lib/impala/protocol/impala_internal_service_constants.rb +12 -0
  31. data/lib/impala/protocol/impala_internal_service_types.rb +362 -0
  32. data/lib/impala/protocol/impala_plan_service.rb +310 -0
  33. data/lib/impala/protocol/impala_plan_service_constants.rb +12 -0
  34. data/lib/impala/protocol/impala_plan_service_types.rb +36 -0
  35. data/lib/impala/protocol/impala_service.rb +260 -0
  36. data/lib/impala/protocol/impala_service_constants.rb +12 -0
  37. data/lib/impala/protocol/impala_service_types.rb +46 -0
  38. data/lib/impala/protocol/java_constants_constants.rb +42 -0
  39. data/lib/impala/protocol/java_constants_types.rb +14 -0
  40. data/lib/impala/protocol/opcodes_constants.rb +12 -0
  41. data/lib/impala/protocol/opcodes_types.rb +309 -0
  42. data/lib/impala/protocol/partitions_constants.rb +12 -0
  43. data/lib/impala/protocol/partitions_types.rb +44 -0
  44. data/lib/impala/protocol/plan_nodes_constants.rb +12 -0
  45. data/lib/impala/protocol/plan_nodes_types.rb +345 -0
  46. data/lib/impala/protocol/planner_constants.rb +12 -0
  47. data/lib/impala/protocol/planner_types.rb +78 -0
  48. data/lib/impala/protocol/runtime_profile_constants.rb +12 -0
  49. data/lib/impala/protocol/runtime_profile_types.rb +97 -0
  50. data/lib/impala/protocol/state_store_service.rb +244 -0
  51. data/lib/impala/protocol/state_store_service_constants.rb +12 -0
  52. data/lib/impala/protocol/state_store_service_types.rb +185 -0
  53. data/lib/impala/protocol/state_store_subscriber_service.rb +82 -0
  54. data/lib/impala/protocol/state_store_subscriber_service_constants.rb +12 -0
  55. data/lib/impala/protocol/state_store_subscriber_service_types.rb +67 -0
  56. data/lib/impala/protocol/statestore_types_constants.rb +12 -0
  57. data/lib/impala/protocol/statestore_types_types.rb +77 -0
  58. data/lib/impala/protocol/status_constants.rb +12 -0
  59. data/lib/impala/protocol/status_types.rb +44 -0
  60. data/lib/impala/protocol/thrift_hive_metastore.rb +4707 -0
  61. data/lib/impala/protocol/types_constants.rb +12 -0
  62. data/lib/impala/protocol/types_types.rb +86 -0
  63. data/lib/impala/version.rb +3 -0
  64. data/thrift/Data.thrift +52 -0
  65. data/thrift/DataSinks.thrift +61 -0
  66. data/thrift/Descriptors.thrift +115 -0
  67. data/thrift/Exprs.thrift +134 -0
  68. data/thrift/Frontend.thrift +193 -0
  69. data/thrift/ImpalaInternalService.thrift +265 -0
  70. data/thrift/ImpalaPlanService.thrift +44 -0
  71. data/thrift/ImpalaService.thrift +105 -0
  72. data/thrift/JavaConstants.thrift +60 -0
  73. data/thrift/Opcodes.thrift +317 -0
  74. data/thrift/Partitions.thrift +41 -0
  75. data/thrift/PlanNodes.thrift +184 -0
  76. data/thrift/Planner.thrift +72 -0
  77. data/thrift/RuntimeProfile.thrift +58 -0
  78. data/thrift/StateStoreService.thrift +121 -0
  79. data/thrift/StateStoreSubscriberService.thrift +64 -0
  80. data/thrift/StatestoreTypes.thrift +50 -0
  81. data/thrift/Status.thrift +31 -0
  82. data/thrift/Types.thrift +71 -0
  83. data/thrift/beeswax.thrift +175 -0
  84. data/thrift/fb303.thrift +112 -0
  85. data/thrift/hive_metastore.thrift +528 -0
  86. metadata +206 -0
@@ -0,0 +1,528 @@
1
+ #!/usr/local/bin/thrift -java
2
+
3
+ /**
4
+ * Licensed to the Apache Software Foundation (ASF) under one
5
+ * or more contributor license agreements. See the NOTICE file
6
+ * distributed with this work for additional information
7
+ * regarding copyright ownership. The ASF licenses this file
8
+ * to you under the Apache License, Version 2.0 (the
9
+ * "License"); you may not use this file except in compliance
10
+ * with the License. You may obtain a copy of the License at
11
+ *
12
+ * http://www.apache.org/licenses/LICENSE-2.0
13
+ *
14
+ * Unless required by applicable law or agreed to in writing, software
15
+ * distributed under the License is distributed on an "AS IS" BASIS,
16
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ * See the License for the specific language governing permissions and
18
+ * limitations under the License.
19
+ */
20
+
21
+ #
22
+ # Thrift Service that the MetaStore is built on
23
+ #
24
+
25
+ include "fb303.thrift"
26
+
27
+ namespace java org.apache.hadoop.hive.metastore.api
28
+ namespace php metastore
29
+ namespace cpp Apache.Hadoop.Hive
30
+ namespace rb Impala.Protocol.HiveMetastore
31
+
32
+ const string DDL_TIME = "transient_lastDdlTime"
33
+
34
+ struct Version {
35
+ 1: string version,
36
+ 2: string comments
37
+ }
38
+
39
+ struct FieldSchema {
40
+ 1: string name, // name of the field
41
+ 2: string type, // type of the field. primitive types defined above, specify list<TYPE_NAME>, map<TYPE_NAME, TYPE_NAME> for lists & maps
42
+ 3: string comment
43
+ }
44
+
45
+ struct Type {
46
+ 1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types
47
+ 2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE)
48
+ 3: optional string type2, // val type if the name is 'map' (MAP_TYPE)
49
+ //4: optional list<FieldSchema> fields // if the name is one of the user defined types
50
+ }
51
+
52
+ enum HiveObjectType {
53
+ GLOBAL = 1,
54
+ DATABASE = 2,
55
+ TABLE = 3,
56
+ PARTITION = 4,
57
+ COLUMN = 5,
58
+ }
59
+
60
+ enum PrincipalType {
61
+ USER = 1,
62
+ ROLE = 2,
63
+ GROUP = 3,
64
+ }
65
+
66
+ const string HIVE_FILTER_FIELD_OWNER = "hive_filter_field_owner__"
67
+ const string HIVE_FILTER_FIELD_PARAMS = "hive_filter_field_params__"
68
+ const string HIVE_FILTER_FIELD_LAST_ACCESS = "hive_filter_field_last_access__"
69
+
70
+ enum PartitionEventType {
71
+ LOAD_DONE = 1,
72
+ }
73
+
74
+ struct HiveObjectRef{
75
+ 1: HiveObjectType objectType,
76
+ 2: string dbName,
77
+ 3: string objectName,
78
+ 4: list<string> partValues,
79
+ 5: string columnName,
80
+ }
81
+
82
+ struct PrivilegeGrantInfo {
83
+ 1: string privilege,
84
+ 2: i32 createTime,
85
+ 3: string grantor,
86
+ 4: PrincipalType grantorType,
87
+ 5: bool grantOption,
88
+ }
89
+
90
+ struct HiveObjectPrivilege {
91
+ 1: HiveObjectRef hiveObject,
92
+ 2: string principalName,
93
+ 3: PrincipalType principalType,
94
+ 4: PrivilegeGrantInfo grantInfo,
95
+ }
96
+
97
+ struct PrivilegeBag {
98
+ 1: list<HiveObjectPrivilege> privileges,
99
+ }
100
+
101
+ struct PrincipalPrivilegeSet {
102
+ 1: map<string, list<PrivilegeGrantInfo>> userPrivileges, // user name -> privilege grant info
103
+ 2: map<string, list<PrivilegeGrantInfo>> groupPrivileges, // group name -> privilege grant info
104
+ 3: map<string, list<PrivilegeGrantInfo>> rolePrivileges, //role name -> privilege grant info
105
+ }
106
+
107
+ struct Role {
108
+ 1: string roleName,
109
+ 2: i32 createTime,
110
+ 3: string ownerName,
111
+ }
112
+
113
+ // namespace for tables
114
+ struct Database {
115
+ 1: string name,
116
+ 2: string description,
117
+ 3: string locationUri,
118
+ 4: map<string, string> parameters, // properties associated with the database
119
+ 5: optional PrincipalPrivilegeSet privileges
120
+ }
121
+
122
+ // This object holds the information needed by SerDes
123
+ struct SerDeInfo {
124
+ 1: string name, // name of the serde, table name by default
125
+ 2: string serializationLib, // usually the class that implements the extractor & loader
126
+ 3: map<string, string> parameters // initialization parameters
127
+ }
128
+
129
+ // sort order of a column (column name along with asc(1)/desc(0))
130
+ struct Order {
131
+ 1: string col, // sort column name
132
+ 2: i32 order // asc(1) or desc(0)
133
+ }
134
+
135
+ // this object holds all the information about physical storage of the data belonging to a table
136
+ struct StorageDescriptor {
137
+ 1: list<FieldSchema> cols, // required (refer to types defined above)
138
+ 2: string location, // defaults to <warehouse loc>/<db loc>/tablename
139
+ 3: string inputFormat, // SequenceFileInputFormat (binary) or TextInputFormat` or custom format
140
+ 4: string outputFormat, // SequenceFileOutputFormat (binary) or IgnoreKeyTextOutputFormat or custom format
141
+ 5: bool compressed, // compressed or not
142
+ 6: i32 numBuckets, // this must be specified if there are any dimension columns
143
+ 7: SerDeInfo serdeInfo, // serialization and deserialization information
144
+ 8: list<string> bucketCols, // reducer grouping columns and clustering columns and bucketing columns`
145
+ 9: list<Order> sortCols, // sort order of the data in each bucket
146
+ 10: map<string, string> parameters // any user supplied key value hash
147
+ }
148
+
149
+ // table information
150
+ struct Table {
151
+ 1: string tableName, // name of the table
152
+ 2: string dbName, // database name ('default')
153
+ 3: string owner, // owner of this table
154
+ 4: i32 createTime, // creation time of the table
155
+ 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on)
156
+ 6: i32 retention, // retention time
157
+ 7: StorageDescriptor sd, // storage descriptor of the table
158
+ 8: list<FieldSchema> partitionKeys, // partition keys of the table. only primitive types are supported
159
+ 9: map<string, string> parameters, // to store comments or any other user level parameters
160
+ 10: string viewOriginalText, // original view text, null for non-view
161
+ 11: string viewExpandedText, // expanded view text, null for non-view
162
+ 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE
163
+ 13: optional PrincipalPrivilegeSet privileges,
164
+ }
165
+
166
+ struct Partition {
167
+ 1: list<string> values // string value is converted to appropriate partition key type
168
+ 2: string dbName,
169
+ 3: string tableName,
170
+ 4: i32 createTime,
171
+ 5: i32 lastAccessTime,
172
+ 6: StorageDescriptor sd,
173
+ 7: map<string, string> parameters,
174
+ 8: optional PrincipalPrivilegeSet privileges
175
+ }
176
+
177
+ struct Index {
178
+ 1: string indexName, // unique with in the whole database namespace
179
+ 2: string indexHandlerClass, // reserved
180
+ 3: string dbName,
181
+ 4: string origTableName,
182
+ 5: i32 createTime,
183
+ 6: i32 lastAccessTime,
184
+ 7: string indexTableName,
185
+ 8: StorageDescriptor sd,
186
+ 9: map<string, string> parameters,
187
+ 10: bool deferredRebuild
188
+ }
189
+
190
+ // schema of the table/query results etc.
191
+ struct Schema {
192
+ // column names, types, comments
193
+ 1: list<FieldSchema> fieldSchemas, // delimiters etc
194
+ 2: map<string, string> properties
195
+ }
196
+
197
+ // Key-value store to be used with selected
198
+ // Metastore APIs (create, alter methods).
199
+ // The client can pass environment properties / configs that can be
200
+ // accessed in hooks.
201
+ struct EnvironmentContext {
202
+ 1: map<string, string> properties
203
+ }
204
+
205
+ exception MetaException {
206
+ 1: string message
207
+ }
208
+
209
+ exception UnknownTableException {
210
+ 1: string message
211
+ }
212
+
213
+ exception UnknownDBException {
214
+ 1: string message
215
+ }
216
+
217
+ exception AlreadyExistsException {
218
+ 1: string message
219
+ }
220
+
221
+ exception InvalidPartitionException {
222
+ 1: string message
223
+ }
224
+
225
+ exception UnknownPartitionException {
226
+ 1: string message
227
+ }
228
+
229
+ exception InvalidObjectException {
230
+ 1: string message
231
+ }
232
+
233
+ exception NoSuchObjectException {
234
+ 1: string message
235
+ }
236
+
237
+ exception IndexAlreadyExistsException {
238
+ 1: string message
239
+ }
240
+
241
+ exception InvalidOperationException {
242
+ 1: string message
243
+ }
244
+
245
+ exception ConfigValSecurityException {
246
+ 1: string message
247
+ }
248
+
249
+ /**
250
+ * This interface is live.
251
+ */
252
+ service ThriftHiveMetastore extends fb303.FacebookService
253
+ {
254
+ void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
255
+ Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2)
256
+ void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
257
+ list<string> get_databases(1:string pattern) throws(1:MetaException o1)
258
+ list<string> get_all_databases() throws(1:MetaException o1)
259
+ void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2)
260
+
261
+ // returns the type with given name (make seperate calls for the dependent types if needed)
262
+ Type get_type(1:string name) throws(1:MetaException o1, 2:NoSuchObjectException o2)
263
+ bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
264
+ bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2)
265
+ map<string, Type> get_type_all(1:string name)
266
+ throws(1:MetaException o2)
267
+
268
+ // Gets a list of FieldSchemas describing the columns of a particular table
269
+ list<FieldSchema> get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3),
270
+
271
+ // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table
272
+ list<FieldSchema> get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
273
+
274
+ // create a Hive table. Following fields must be set
275
+ // tableName
276
+ // database (only 'default' for now until Hive QL supports databases)
277
+ // owner (not needed, but good to have for tracking purposes)
278
+ // sd.cols (list of field schemas)
279
+ // sd.inputFormat (SequenceFileInputFormat (binary like falcon tables or u_full) or TextInputFormat)
280
+ // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat)
281
+ // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe
282
+ // * See notes on DDL_TIME
283
+ void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4)
284
+ void create_table_with_environment_context(1:Table tbl,
285
+ 2:EnvironmentContext environment_context)
286
+ throws (1:AlreadyExistsException o1,
287
+ 2:InvalidObjectException o2, 3:MetaException o3,
288
+ 4:NoSuchObjectException o4)
289
+ // drops the table and all the partitions associated with it if the table has partitions
290
+ // delete data (including partitions) if deleteData is set to true
291
+ void drop_table(1:string dbname, 2:string name, 3:bool deleteData)
292
+ throws(1:NoSuchObjectException o1, 2:MetaException o3)
293
+ list<string> get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1)
294
+ list<string> get_all_tables(1: string db_name) throws (1: MetaException o1)
295
+
296
+ Table get_table(1:string dbname, 2:string tbl_name)
297
+ throws (1:MetaException o1, 2:NoSuchObjectException o2)
298
+ list<Table> get_table_objects_by_name(1:string dbname, 2:list<string> tbl_names)
299
+ throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
300
+
301
+ // Get a list of table names that match a filter.
302
+ // The filter operators are LIKE, <, <=, >, >=, =, <>
303
+ //
304
+ // In the filter statement, values interpreted as strings must be enclosed in quotes,
305
+ // while values interpreted as integers should not be. Strings and integers are the only
306
+ // supported value types.
307
+ //
308
+ // The currently supported key names in the filter are:
309
+ // Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name
310
+ // and supports all filter operators
311
+ // Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times
312
+ // and supports all filter operators except LIKE
313
+ // Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values
314
+ // and only supports the filter operators = and <>.
315
+ // Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement.
316
+ // For example, to filter on parameter keys called "retention", the key name in the filter
317
+ // statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention"
318
+ // Also, = and <> only work for keys that exist
319
+ // in the tables. E.g., if you are looking for tables where key1 <> value, it will only
320
+ // look at tables that have a value for the parameter key1.
321
+ // Some example filter statements include:
322
+ // filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " +
323
+ // Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0";
324
+ // filter = Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " +
325
+ // Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\""
326
+ // @param dbName
327
+ // The name of the database from which you will retrieve the table names
328
+ // @param filterType
329
+ // The type of filter
330
+ // @param filter
331
+ // The filter string
332
+ // @param max_tables
333
+ // The maximum number of tables returned
334
+ // @return A list of table names that match the desired filter
335
+ list<string> get_table_names_by_filter(1:string dbname, 2:string filter, 3:i16 max_tables=-1)
336
+ throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
337
+
338
+ // alter table applies to only future partitions not for existing partitions
339
+ // * See notes on DDL_TIME
340
+ void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl)
341
+ throws (1:InvalidOperationException o1, 2:MetaException o2)
342
+ void alter_table_with_environment_context(1:string dbname, 2:string tbl_name,
343
+ 3:Table new_tbl, 4:EnvironmentContext environment_context)
344
+ throws (1:InvalidOperationException o1, 2:MetaException o2)
345
+ // the following applies to only tables that have partitions
346
+ // * See notes on DDL_TIME
347
+ Partition add_partition(1:Partition new_part)
348
+ throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
349
+ Partition add_partition_with_environment_context(1:Partition new_part,
350
+ 2:EnvironmentContext environment_context)
351
+ throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2,
352
+ 3:MetaException o3)
353
+ i32 add_partitions(1:list<Partition> new_parts)
354
+ throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
355
+ Partition append_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
356
+ throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
357
+ Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name)
358
+ throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
359
+ bool drop_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:bool deleteData)
360
+ throws(1:NoSuchObjectException o1, 2:MetaException o2)
361
+ bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData)
362
+ throws(1:NoSuchObjectException o1, 2:MetaException o2)
363
+ Partition get_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
364
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
365
+
366
+ Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals,
367
+ 4: string user_name, 5: list<string> group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2)
368
+
369
+ Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name)
370
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
371
+
372
+ // returns all the partitions for this table in reverse chronological order.
373
+ // If max parts is given then it will return only that many.
374
+ list<Partition> get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
375
+ throws(1:NoSuchObjectException o1, 2:MetaException o2)
376
+ list<Partition> get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1,
377
+ 4: string user_name, 5: list<string> group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
378
+
379
+ list<string> get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
380
+ throws(1:MetaException o2)
381
+
382
+ // get_partition*_ps methods allow filtering by a partial partition specification,
383
+ // as needed for dynamic partitions. The values that are not restricted should
384
+ // be empty strings. Nulls were considered (instead of "") but caused errors in
385
+ // generated Python code. The size of part_vals may be smaller than the
386
+ // number of partition columns - the unspecified values are considered the same
387
+ // as "".
388
+ list<Partition> get_partitions_ps(1:string db_name 2:string tbl_name
389
+ 3:list<string> part_vals, 4:i16 max_parts=-1)
390
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
391
+ list<Partition> get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1,
392
+ 5: string user_name, 6: list<string> group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
393
+
394
+ list<string> get_partition_names_ps(1:string db_name,
395
+ 2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1)
396
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
397
+
398
+ // get the partitions matching the given partition filter
399
+ list<Partition> get_partitions_by_filter(1:string db_name 2:string tbl_name
400
+ 3:string filter, 4:i16 max_parts=-1)
401
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
402
+
403
+ // get partitions give a list of partition names
404
+ list<Partition> get_partitions_by_names(1:string db_name 2:string tbl_name 3:list<string> names)
405
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
406
+
407
+ // changes the partition to the new partition object. partition is identified from the part values
408
+ // in the new_part
409
+ // * See notes on DDL_TIME
410
+ void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part)
411
+ throws (1:InvalidOperationException o1, 2:MetaException o2)
412
+
413
+ void alter_partition_with_environment_context(1:string db_name,
414
+ 2:string tbl_name, 3:Partition new_part,
415
+ 4:EnvironmentContext environment_context)
416
+ throws (1:InvalidOperationException o1, 2:MetaException o2)
417
+
418
+ // rename the old partition to the new partition object by changing old part values to the part values
419
+ // in the new_part. old partition is identified from part_vals.
420
+ // partition keys in new_part should be the same as those in old partition.
421
+ void rename_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:Partition new_part)
422
+ throws (1:InvalidOperationException o1, 2:MetaException o2)
423
+
424
+ // gets the value of the configuration key in the metastore server. returns
425
+ // defaultValue if the key does not exist. if the configuration key does not
426
+ // begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is
427
+ // thrown.
428
+ string get_config_value(1:string name, 2:string defaultValue)
429
+ throws(1:ConfigValSecurityException o1)
430
+
431
+ // converts a partition name into a partition values array
432
+ list<string> partition_name_to_vals(1: string part_name)
433
+ throws(1: MetaException o1)
434
+ // converts a partition name into a partition specification (a mapping from
435
+ // the partition cols to the values)
436
+ map<string, string> partition_name_to_spec(1: string part_name)
437
+ throws(1: MetaException o1)
438
+
439
+ void markPartitionForEvent(1:string db_name, 2:string tbl_name, 3:map<string,string> part_vals,
440
+ 4:PartitionEventType eventType) throws (1: MetaException o1, 2: NoSuchObjectException o2,
441
+ 3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5,
442
+ 6: InvalidPartitionException o6)
443
+ bool isPartitionMarkedForEvent(1:string db_name, 2:string tbl_name, 3:map<string,string> part_vals,
444
+ 4: PartitionEventType eventType) throws (1: MetaException o1, 2:NoSuchObjectException o2,
445
+ 3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5,
446
+ 6: InvalidPartitionException o6)
447
+
448
+ //index
449
+ Index add_index(1:Index new_index, 2: Table index_table)
450
+ throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
451
+ void alter_index(1:string dbname, 2:string base_tbl_name, 3:string idx_name, 4:Index new_idx)
452
+ throws (1:InvalidOperationException o1, 2:MetaException o2)
453
+ bool drop_index_by_name(1:string db_name, 2:string tbl_name, 3:string index_name, 4:bool deleteData)
454
+ throws(1:NoSuchObjectException o1, 2:MetaException o2)
455
+ Index get_index_by_name(1:string db_name 2:string tbl_name, 3:string index_name)
456
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
457
+
458
+ list<Index> get_indexes(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1)
459
+ throws(1:NoSuchObjectException o1, 2:MetaException o2)
460
+ list<string> get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1)
461
+ throws(1:MetaException o2)
462
+
463
+ //authorization privileges
464
+
465
+ bool create_role(1:Role role) throws(1:MetaException o1)
466
+ bool drop_role(1:string role_name) throws(1:MetaException o1)
467
+ list<string> get_role_names() throws(1:MetaException o1)
468
+ bool grant_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type,
469
+ 4:string grantor, 5:PrincipalType grantorType, 6:bool grant_option) throws(1:MetaException o1)
470
+ bool revoke_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type)
471
+ throws(1:MetaException o1)
472
+ list<Role> list_roles(1:string principal_name, 2:PrincipalType principal_type) throws(1:MetaException o1)
473
+
474
+ PrincipalPrivilegeSet get_privilege_set(1:HiveObjectRef hiveObject, 2:string user_name,
475
+ 3: list<string> group_names) throws(1:MetaException o1)
476
+ list<HiveObjectPrivilege> list_privileges(1:string principal_name, 2:PrincipalType principal_type,
477
+ 3: HiveObjectRef hiveObject) throws(1:MetaException o1)
478
+
479
+ bool grant_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1)
480
+ bool revoke_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1)
481
+
482
+ // this is used by metastore client to send UGI information to metastore server immediately
483
+ // after setting up a connection.
484
+ list<string> set_ugi(1:string user_name, 2:list<string> group_names) throws (1:MetaException o1)
485
+
486
+ //Authentication (delegation token) interfaces
487
+
488
+ // get metastore server delegation token for use from the map/reduce tasks to authenticate
489
+ // to metastore server
490
+ string get_delegation_token(1:string token_owner, 2:string renewer_kerberos_principal_name)
491
+ throws (1:MetaException o1)
492
+
493
+ // method to renew delegation token obtained from metastore server
494
+ i64 renew_delegation_token(1:string token_str_form) throws (1:MetaException o1)
495
+
496
+ // method to cancel delegation token obtained from metastore server
497
+ void cancel_delegation_token(1:string token_str_form) throws (1:MetaException o1)
498
+ }
499
+
500
+ // * Note about the DDL_TIME: When creating or altering a table or a partition,
501
+ // if the DDL_TIME is not set, the current time will be used.
502
+
503
+ // For storing info about archived partitions in parameters
504
+
505
+ // Whether the partition is archived
506
+ const string IS_ARCHIVED = "is_archived",
507
+ // The original location of the partition, before archiving. After archiving,
508
+ // this directory will contain the archive. When the partition
509
+ // is dropped, this directory will be deleted
510
+ const string ORIGINAL_LOCATION = "original_location",
511
+
512
+ // these should be needed only for backward compatibility with filestore
513
+ const string META_TABLE_COLUMNS = "columns",
514
+ const string META_TABLE_COLUMN_TYPES = "columns.types",
515
+ const string BUCKET_FIELD_NAME = "bucket_field_name",
516
+ const string BUCKET_COUNT = "bucket_count",
517
+ const string FIELD_TO_DIMENSION = "field_to_dimension",
518
+ const string META_TABLE_NAME = "name",
519
+ const string META_TABLE_DB = "db",
520
+ const string META_TABLE_LOCATION = "location",
521
+ const string META_TABLE_SERDE = "serde",
522
+ const string META_TABLE_PARTITION_COLUMNS = "partition_columns",
523
+ const string FILE_INPUT_FORMAT = "file.inputformat",
524
+ const string FILE_OUTPUT_FORMAT = "file.outputformat",
525
+ const string META_TABLE_STORAGE = "storage_handler",
526
+
527
+
528
+