python-datastore-sqlalchemy 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,287 @@
1
+ # Copyright (c) 2017 The sqlalchemy-bigquery Authors
2
+ #
3
+ # Permission is hereby granted, free of charge, to any person obtaining a copy of
4
+ # this software and associated documentation files (the "Software"), to deal in
5
+ # the Software without restriction, including without limitation the rights to
6
+ # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
7
+ # the Software, and to permit persons to whom the Software is furnished to do so,
8
+ # subject to the following conditions:
9
+ #
10
+ # The above copyright notice and this permission notice shall be included in all
11
+ # copies or substantial portions of the Software.
12
+ #
13
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
15
+ # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
16
+ # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
17
+ # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18
+ # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19
+
20
+ import re
21
+
22
+ from google.cloud.bigquery.dataset import DatasetReference
23
+ from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
24
+ from google.cloud.bigquery.job import (
25
+ CreateDisposition,
26
+ QueryJobConfig,
27
+ QueryPriority,
28
+ SchemaUpdateOption,
29
+ WriteDisposition,
30
+ )
31
+ from google.cloud.bigquery.table import TableReference
32
+
33
+ GROUP_DELIMITER = re.compile(r"\s*\,\s*")
34
+ KEY_VALUE_DELIMITER = re.compile(r"\s*\:\s*")
35
+
36
+
37
+ def parse_boolean(bool_string):
38
+ bool_string = bool_string.lower()
39
+ if bool_string == "true":
40
+ return True
41
+ elif bool_string == "false":
42
+ return False
43
+ else:
44
+ raise ValueError()
45
+
46
+
47
+ def parse_url(url): # noqa: C901
48
+ query = dict(url.query) # need mutable query.
49
+
50
+ # use_legacy_sql (legacy)
51
+ if "use_legacy_sql" in query:
52
+ raise ValueError("legacy sql is not supported by this dialect")
53
+ # allow_large_results (legacy)
54
+ if "allow_large_results" in query:
55
+ raise ValueError(
56
+ "allow_large_results is only allowed for legacy sql, which is not supported by this dialect"
57
+ )
58
+ # flatten_results (legacy)
59
+ if "flatten_results" in query:
60
+ raise ValueError(
61
+ "flatten_results is only allowed for legacy sql, which is not supported by this dialect"
62
+ )
63
+ # maximum_billing_tier (deprecated)
64
+ if "maximum_billing_tier" in query:
65
+ raise ValueError("maximum_billing_tier is a deprecated argument")
66
+
67
+ project_id = url.host
68
+ location = None
69
+ dataset_id = url.database or None
70
+ arraysize = None
71
+ credentials_path = None
72
+ credentials_base64 = None
73
+ list_tables_page_size = None
74
+ user_supplied_client = False
75
+
76
+ # location
77
+ if "location" in query:
78
+ location = query.pop("location")
79
+
80
+ # credentials_path
81
+ if "credentials_path" in query:
82
+ credentials_path = query.pop("credentials_path")
83
+
84
+ # credentials_base64
85
+ if "credentials_base64" in query:
86
+ credentials_base64 = query.pop("credentials_base64")
87
+
88
+ # arraysize
89
+ if "arraysize" in query:
90
+ str_arraysize = query.pop("arraysize")
91
+ try:
92
+ arraysize = int(str_arraysize)
93
+ except ValueError:
94
+ raise ValueError("invalid int in url query arraysize: " + str_arraysize)
95
+
96
+ if "list_tables_page_size" in query:
97
+ str_list_tables_page_size = query.pop("list_tables_page_size")
98
+ try:
99
+ list_tables_page_size = int(str_list_tables_page_size)
100
+ except ValueError:
101
+ raise ValueError(
102
+ "invalid int in url query list_tables_page_size: "
103
+ + str_list_tables_page_size
104
+ )
105
+
106
+ # user_supplied_client
107
+ if "user_supplied_client" in query:
108
+ user_supplied_client = query.pop("user_supplied_client").lower() == "true"
109
+
110
+ # if only these "non-config" values were present, the dict will now be empty
111
+ if not query:
112
+ # if a dataset_id exists, we need to return a job_config that isn't None
113
+ # so it can be updated with a dataset reference from the client
114
+ if dataset_id:
115
+ return (
116
+ project_id,
117
+ location,
118
+ dataset_id,
119
+ arraysize,
120
+ credentials_path,
121
+ credentials_base64,
122
+ QueryJobConfig(),
123
+ list_tables_page_size,
124
+ user_supplied_client,
125
+ )
126
+ else:
127
+ return (
128
+ project_id,
129
+ location,
130
+ dataset_id,
131
+ arraysize,
132
+ credentials_path,
133
+ credentials_base64,
134
+ None,
135
+ list_tables_page_size,
136
+ user_supplied_client,
137
+ )
138
+
139
+ job_config = QueryJobConfig()
140
+
141
+ # clustering_fields list(str)
142
+ if "clustering_fields" in query:
143
+ clustering_fields = GROUP_DELIMITER.split(query["clustering_fields"])
144
+ job_config.clustering_fields = list(clustering_fields)
145
+
146
+ # create_disposition
147
+ if "create_disposition" in query:
148
+ create_disposition = query["create_disposition"]
149
+ try:
150
+ job_config.create_disposition = getattr(
151
+ CreateDisposition, create_disposition
152
+ )
153
+ except AttributeError:
154
+ raise ValueError(
155
+ "invalid create_disposition in url query: " + create_disposition
156
+ )
157
+
158
+ # default_dataset
159
+ if "default_dataset" in query or "dataset_id" in query or "project_id" in query:
160
+ raise ValueError(
161
+ "don't pass default_dataset, dataset_id, project_id in url query, instead use the url host and database"
162
+ )
163
+
164
+ # destination
165
+ if "destination" in query:
166
+ dest_project = None
167
+ dest_dataset = None
168
+ dest_table = None
169
+
170
+ try:
171
+ dest_project, dest_dataset, dest_table = query["destination"].split(".")
172
+ except ValueError:
173
+ raise ValueError(
174
+ "url query destination parameter should be fully qualified with project, dataset, and table"
175
+ )
176
+
177
+ job_config.destination = TableReference(
178
+ DatasetReference(dest_project, dest_dataset), dest_table
179
+ )
180
+
181
+ # destination_encryption_configuration
182
+ if "destination_encryption_configuration" in query:
183
+ job_config.destination_encryption_configuration = EncryptionConfiguration(
184
+ query["destination_encryption_configuration"]
185
+ )
186
+
187
+ # dry_run
188
+ if "dry_run" in query:
189
+ try:
190
+ job_config.dry_run = parse_boolean(query["dry_run"])
191
+ except ValueError:
192
+ raise ValueError(
193
+ "invalid boolean in url query for dry_run: " + query["dry_run"]
194
+ )
195
+
196
+ # labels
197
+ if "labels" in query:
198
+ label_groups = GROUP_DELIMITER.split(query["labels"])
199
+ labels = {}
200
+ for label_group in label_groups:
201
+ try:
202
+ key, value = KEY_VALUE_DELIMITER.split(label_group)
203
+ except ValueError:
204
+ raise ValueError("malformed url query in labels: " + label_group)
205
+ labels[key] = value
206
+
207
+ job_config.labels = labels
208
+
209
+ # maximum_bytes_billed
210
+ if "maximum_bytes_billed" in query:
211
+ try:
212
+ job_config.maximum_bytes_billed = int(query["maximum_bytes_billed"])
213
+ except ValueError:
214
+ raise ValueError(
215
+ "invalid int in url query maximum_bytes_billed: "
216
+ + query["maximum_bytes_billed"]
217
+ )
218
+
219
+ # priority
220
+ if "priority" in query:
221
+ try:
222
+ job_config.priority = getattr(QueryPriority, query["priority"])
223
+ except AttributeError:
224
+ raise ValueError("invalid priority in url query: " + query["priority"])
225
+
226
+ # query_parameters
227
+ if "query_parameters" in query:
228
+ raise NotImplementedError("url query query_parameters not implemented")
229
+
230
+ # schema_update_options
231
+ if "schema_update_options" in query:
232
+ schema_update_options = GROUP_DELIMITER.split(query["schema_update_options"])
233
+ try:
234
+ job_config.schema_update_options = [
235
+ getattr(SchemaUpdateOption, schema_update_option)
236
+ for schema_update_option in schema_update_options
237
+ ]
238
+ except AttributeError:
239
+ raise ValueError(
240
+ "invalid schema_update_options in url query: "
241
+ + query["schema_update_options"]
242
+ )
243
+
244
+ # table_definitions
245
+ if "table_definitions" in query:
246
+ raise NotImplementedError("url query table_definitions not implemented")
247
+
248
+ # time_partitioning
249
+ if "time_partitioning" in query:
250
+ raise NotImplementedError("url query time_partitioning not implemented")
251
+
252
+ # udf_resources
253
+ if "udf_resources" in query:
254
+ raise NotImplementedError("url query udf_resources not implemented")
255
+
256
+ # use_query_cache
257
+ if "use_query_cache" in query:
258
+ try:
259
+ job_config.use_query_cache = parse_boolean(query["use_query_cache"])
260
+ except ValueError:
261
+ raise ValueError(
262
+ "invalid boolean in url query for use_query_cache: "
263
+ + query["use_query_cache"]
264
+ )
265
+
266
+ # write_disposition
267
+ if "write_disposition" in query:
268
+ try:
269
+ job_config.write_disposition = getattr(
270
+ WriteDisposition, query["write_disposition"]
271
+ )
272
+ except AttributeError:
273
+ raise ValueError(
274
+ "invalid write_disposition in url query: " + query["write_disposition"]
275
+ )
276
+
277
+ return (
278
+ project_id,
279
+ location,
280
+ dataset_id,
281
+ arraysize,
282
+ credentials_path,
283
+ credentials_base64,
284
+ job_config,
285
+ list_tables_page_size,
286
+ user_supplied_client,
287
+ )