bqseine 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of bqseine might be problematic. Click here for more details.

@@ -0,0 +1,207 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ .envrc
140
+ .venv
141
+ env/
142
+ venv/
143
+ ENV/
144
+ env.bak/
145
+ venv.bak/
146
+
147
+ # Spyder project settings
148
+ .spyderproject
149
+ .spyproject
150
+
151
+ # Rope project settings
152
+ .ropeproject
153
+
154
+ # mkdocs documentation
155
+ /site
156
+
157
+ # mypy
158
+ .mypy_cache/
159
+ .dmypy.json
160
+ dmypy.json
161
+
162
+ # Pyre type checker
163
+ .pyre/
164
+
165
+ # pytype static type analyzer
166
+ .pytype/
167
+
168
+ # Cython debug symbols
169
+ cython_debug/
170
+
171
+ # PyCharm
172
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
175
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
+ #.idea/
177
+
178
+ # Abstra
179
+ # Abstra is an AI-powered process automation framework.
180
+ # Ignore directories containing user credentials, local state, and settings.
181
+ # Learn more at https://abstra.io/docs
182
+ .abstra/
183
+
184
+ # Visual Studio Code
185
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
+ # you could uncomment the following to ignore the entire vscode folder
189
+ # .vscode/
190
+
191
+ # Ruff stuff:
192
+ .ruff_cache/
193
+
194
+ # PyPI configuration file
195
+ .pypirc
196
+
197
+ # Cursor
198
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
+ # refer to https://docs.cursor.com/context/ignore-files
201
+ .cursorignore
202
+ .cursorindexingignore
203
+
204
+ # Marimo
205
+ marimo/_static/
206
+ marimo/_lsp/
207
+ __marimo__/
bqseine-0.0.1/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) [year] [fullname]
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
bqseine-0.0.1/PKG-INFO ADDED
@@ -0,0 +1,65 @@
1
+ Metadata-Version: 2.4
2
+ Name: bqseine
3
+ Version: 0.0.1
4
+ Summary: ETL for BigQuery
5
+ Project-URL: Homepage, https://github.com/shaafiee/seine
6
+ Project-URL: Issues, https://github.com/shaafiee/siene/issues
7
+ Author-email: Shaafiee <shaafiee@gmail.com>
8
+ License-Expression: MIT
9
+ License-File: LICENSE
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Programming Language :: Python :: 3
12
+ Requires-Python: >=3.9
13
+ Description-Content-Type: text/markdown
14
+
15
+ # BQSeine
16
+ ## Python dict to BigQuery data loader
17
+ Seine is a data loader that pushes data in a dictionary to BigQuery in relational normalized form.
18
+
19
+ ## Usage
20
+ ```
21
+ from bqseine import sync
22
+ sourceData = [
23
+ {
24
+ 'item': 'Juice',
25
+ 'price': 20.0,
26
+ 'stock': [
27
+ {
28
+ 'batch': '2025-01-20',
29
+ 'qty': 300
30
+ },
31
+ {
32
+ 'batch': '2025-02-02',
33
+ 'qty': 50
34
+ }
35
+ ]
36
+ },
37
+ {
38
+ 'item': 'Burger',
39
+ 'price': 30.0,
40
+ 'stock': [
41
+ {
42
+ 'batch': '2025-02-10',
43
+ 'qty': 200
44
+ }
45
+ ]
46
+ }
47
+ ]
48
+ sync('someGoogleProject', sourceData, 'catalog', 'US')
49
+ ### The arguments above are:
50
+ ### sync(<Google project name>, <dict>, <main table name>, <BigQuery region>)*
51
+ ```
52
+
53
+ The above example will generate the following tables in BigQuery:
54
+ ### catalog
55
+ | seine_id | seine_parent_id | item | price | injected |
56
+ | --- | --- | --- | --- | --- |
57
+ | 1 | 0 | 'Juice' | 20.0 | now() |
58
+ | 2 | 0 | 'Burger' | 30.0 | now() |
59
+
60
+ ### catalog_stock
61
+ | seine_id | seine_parent_id | batch | qty | injected |
62
+ | --- | --- | --- | --- | --- |
63
+ | 1 | 1 | '2025-01-20' | 300 | now() |
64
+ | 2 | 1 | '2025-02-02' | 50 | now() |
65
+ | 3 | 2 | '2025-02-10' | 200 | now() |
@@ -0,0 +1,51 @@
1
+ # BQSeine
2
+ ## Python dict to BigQuery data loader
3
+ Seine is a data loader that pushes data in a dictionary to BigQuery in relational normalized form.
4
+
5
+ ## Usage
6
+ ```
7
+ from bqseine import sync
8
+ sourceData = [
9
+ {
10
+ 'item': 'Juice',
11
+ 'price': 20.0,
12
+ 'stock': [
13
+ {
14
+ 'batch': '2025-01-20',
15
+ 'qty': 300
16
+ },
17
+ {
18
+ 'batch': '2025-02-02',
19
+ 'qty': 50
20
+ }
21
+ ]
22
+ },
23
+ {
24
+ 'item': 'Burger',
25
+ 'price': 30.0,
26
+ 'stock': [
27
+ {
28
+ 'batch': '2025-02-10',
29
+ 'qty': 200
30
+ }
31
+ ]
32
+ }
33
+ ]
34
+ sync('someGoogleProject', sourceData, 'catalog', 'US')
35
+ ### The arguments above are:
36
+ ### sync(<Google project name>, <dict>, <main table name>, <BigQuery region>)*
37
+ ```
38
+
39
+ The above example will generate the following tables in BigQuery:
40
+ ### catalog
41
+ | seine_id | seine_parent_id | item | price | injected |
42
+ | --- | --- | --- | --- | --- |
43
+ | 1 | 0 | 'Juice' | 20.0 | now() |
44
+ | 2 | 0 | 'Burger' | 30.0 | now() |
45
+
46
+ ### catalog_stock
47
+ | seine_id | seine_parent_id | batch | qty | injected |
48
+ | --- | --- | --- | --- | --- |
49
+ | 1 | 1 | '2025-01-20' | 300 | now() |
50
+ | 2 | 1 | '2025-02-02' | 50 | now() |
51
+ | 3 | 2 | '2025-02-10' | 200 | now() |
@@ -0,0 +1,23 @@
1
+ [build-system]
2
+ requires = ["hatchling >= 1.26"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "bqseine"
7
+ version = "0.0.1"
8
+ authors = [
9
+ { name="Shaafiee", email="shaafiee@gmail.com" },
10
+ ]
11
+ description = "ETL for BigQuery"
12
+ readme = "README.md"
13
+ requires-python = ">=3.9"
14
+ classifiers = [
15
+ "Programming Language :: Python :: 3",
16
+ "Operating System :: OS Independent",
17
+ ]
18
+ license = "MIT"
19
+ license-files = ["LICEN[CS]E*"]
20
+
21
+ [project.urls]
22
+ Homepage = "https://github.com/shaafiee/seine"
23
+ Issues = "https://github.com/shaafiee/siene/issues"
Binary file
File without changes
@@ -0,0 +1,381 @@
1
+ from gcp_secrets.secrets import *
2
+ from google.cloud import bigquery
3
+ from google.cloud.exceptions import NotFound
4
+ from datetime import date, datetime
5
+ from db_lib import *
6
+ import json
7
+
8
+
9
+ lastSeineId = {}
10
+ tableSchema = {}
11
+ tableCurrentSchema = {}
12
+ tableCurrentSchemaType = {}
13
+ tableReset = {}
14
+ #client = bigquery.Client()
15
+
16
+ # * EXCEPT(is_generated, generation_expression, is_stored, is_updatable)
17
+ tableColumnsQuery = """
18
+ SELECT
19
+ column_name
20
+ FROM
21
+ `__dataset__`.INFORMATION_SCHEMA.COLUMNS
22
+ WHERE
23
+ table_name = '__table__';
24
+ """
25
+
26
+
27
+ def resolveType(value):
28
+ isDatetime = False
29
+ tempDatetime = None
30
+ try:
31
+ timeDatetime = datetime.strptime(value, "%Y-%m-%dT%H:%M:%SZ")
32
+ value = timeDatetime
33
+ isDatetime = True
34
+ except:
35
+ pass
36
+
37
+ if isinstance(value, bool):
38
+ return "BOOL"
39
+ elif isinstance(value, int):
40
+ return "INT64"
41
+ elif isinstance(value, float):
42
+ return "FLOAT64"
43
+ elif type(value) == datetime:
44
+ return "DATETIME"
45
+ else:
46
+ return "STRING"
47
+
48
+
49
+ def testValue(value, tableKey, fieldKey):
50
+ global tableCurrentSchemaType
51
+ try:
52
+ timeDatetime = datetime.strptime(value, "%Y-%m-%dT%H:%M:%SZ")
53
+ return timeDatetime.strftime('%Y-%m-%dT%H:%M:%S')
54
+ except:
55
+ pass
56
+
57
+ fieldType = None
58
+ if fieldKey in tableCurrentSchemaType[tableKey].keys():
59
+ fieldType = tableCurrentSchemaType[tableKey][fieldKey]
60
+ else:
61
+ fieldType = resolveType(value)
62
+ tableCurrentSchemaType[tableKey][fieldKey] = fieldType
63
+
64
+ if fieldType == "BOOL":
65
+ if isinstance(value, bool):
66
+ return value
67
+ else:
68
+ return False
69
+ elif fieldType == "INT64":
70
+ if isinstance(value, int):
71
+ return value
72
+ else:
73
+ return 0
74
+ elif fieldType == "FLOAT64":
75
+ if isinstance(value, float):
76
+ return value
77
+ else:
78
+ return 0
79
+ elif fieldType == "STRING":
80
+ if isinstance(value, str):
81
+ return value
82
+ else:
83
+ return ""
84
+ else:
85
+ if isinstance(value, str):
86
+ return value
87
+ else:
88
+ return json.dumps(value)
89
+
90
+ return ""
91
+
92
+
93
+ def incrementId(curKey):
94
+ global lastSeineId
95
+ if curKey in lastSeineId.keys():
96
+ lastSeineId[curKey] += 1
97
+ else:
98
+ lastSeineId[curKey] = 1
99
+
100
+
101
+ def sync(myGoogleProject, blob, curKey, bqRegion = 'US', firstReset = False, idField = None):
102
+ global lastSeineId
103
+ global tableSchema
104
+ global tableReset
105
+ global tableCurrentSchema
106
+ global tableCurrentSchemaType
107
+
108
+ if len(curKey) < 1:
109
+ raise ExceptionType("A default current key (second arg) should be provided")
110
+
111
+ stack = []
112
+ if isinstance(blob, list):
113
+ for part in blob:
114
+ stack.insert(0, (curKey, part, curKey, 0))
115
+ else:
116
+ stack = [(curKey, blob, curKey, 0)]
117
+
118
+ tableChecked = False
119
+
120
+ client = None
121
+ try:
122
+ client = bigquery.Client(project=myGoogleProject)
123
+ except:
124
+ raise ExceptionType(f"Could not connect to {myGoogleProject}")
125
+
126
+ if firstReset:
127
+ lastSeineId = {}
128
+ tableSchema = {}
129
+ tableReset = {}
130
+ tableCurrentSchema = {}
131
+ tableCurrentSchemaType = {}
132
+
133
+ keyNotInSchema = {}
134
+ dataToLoad = {}
135
+ seineDataset = False
136
+ dataset = ""
137
+ datasetName = "seine_" + curKey
138
+ try:
139
+ seineDataset = client.get_dataset(myGoogleProject + f".{datasetName}")
140
+ except NotFound:
141
+ seineDataset = bigquery.Dataset(myGoogleProject + f".{datasetName}")
142
+ seineDataset.location = bqRegion
143
+ seineDataset = client.create_dataset(seineDataset, timeout=30)
144
+ print("Created dataset {}".format(seineDataset.dataset_id))
145
+
146
+ valuesArray = []
147
+ currentDepth = 0
148
+ while stack:
149
+ curKey, curDict, lastKey, parentId = stack.pop()
150
+ currentDepth += 1
151
+ fieldTypes = {}
152
+ fields = []
153
+ fieldsJson = {}
154
+ values = []
155
+ fieldType = {}
156
+ valuePlaceholders = {}
157
+ counter = 0
158
+
159
+ #seineDataset = client.get_dataset(myGoogleProject + f".{datasetName}")
160
+ print("-------------------------")
161
+ print(curDict)
162
+ noUpdateNeeded = False
163
+
164
+ if curKey not in lastSeineId.keys():
165
+ lastSeineId[curKey] = 1
166
+
167
+ if parentId == 0 and curKey == idField and isinstance(curDict, int):
168
+ try:
169
+ curTableName = myGoogleProject + f".{datasetName}." + curKey
170
+ queryJob = client.query(f"select {idField} from {curTableName} where {idField} = {curDict}")
171
+ returned = queryJob.result()
172
+ foundId = False
173
+ for row in returned:
174
+ foundId = True
175
+ if foundId:
176
+ print(f"ID exists {idField} = {curDict}")
177
+ print(returned)
178
+ continue
179
+ except:
180
+ print("================== QUERY FAILED =======================")
181
+ pass
182
+
183
+ #if curKey not in keyNotInSchema.keys():
184
+ keyNotInSchema[curKey] = []
185
+ if curKey not in tableCurrentSchema.keys():
186
+ try:
187
+ curTableName = myGoogleProject + f".{datasetName}." + curKey
188
+ queryJob = client.query(f"select max(seine_id) as max_id from {curTableName}")
189
+ returned = queryJob.result()
190
+ print(returned)
191
+ for row in returned:
192
+ if row.max_id is not None and isinstance(row.max_id, int):
193
+ lastSeineId[curKey] = int(row.max_id) + 1
194
+ except:
195
+ pass
196
+ tableCurrentSchema[curKey] = []
197
+ tableCurrentSchemaType[curKey] = {}
198
+
199
+ if isinstance(curDict, list):
200
+ if not isinstance(curDict[0], dict):
201
+ fields.append(curKey)
202
+ fieldTypes[curKey] = resolveType(json.dumps(curDict))
203
+ valuePlaceholders[curKey] = testValue(json.dumps(curDict), curKey, curKey)
204
+ if curKey not in tableCurrentSchema[curKey]:
205
+ tableCurrentSchema[curKey].append(curKey)
206
+ keyNotInSchema[curKey].append(curKey)
207
+ #tableCurrentSchemaType[curKey][curKey] = resolveType(json.dumps(curDict))
208
+ else:
209
+ noUpdateNeeded = True
210
+ for tempDict in curDict:
211
+ stack.insert(0, (curKey, tempDict, lastKey, lastSeineId[lastKey]))
212
+ continue
213
+
214
+ elif isinstance(curDict, dict):
215
+ for key, value in curDict.items():
216
+ if isinstance(value, list):
217
+ if len(value) > 0:
218
+ if not isinstance(value[0], dict) and key not in ["edges"]:
219
+ fields.append(key)
220
+ fieldTypes[key] = resolveType(json.dumps(value))
221
+ valuePlaceholders[key] = testValue(json.dumps(value), curKey, key)
222
+ if key not in tableCurrentSchema[curKey]:
223
+ tableCurrentSchema[curKey].append(key)
224
+ keyNotInSchema[curKey].append(key)
225
+ #tableCurrentSchemaType[curKey][key] = resolveType(json.dumps(value))
226
+ elif key in ["edges"]:
227
+ noUpdateNeeded = True
228
+ for part in value:
229
+ stack.insert(0, (curKey, part, lastKey, lastSeineId[lastKey]))
230
+ else:
231
+ for part in value:
232
+ stack.insert(0, (curKey + "_" + key, part, curKey, lastSeineId[lastKey]))
233
+ #fields.append(key)
234
+ #fieldTypes[key] = resolveType(json.dumps(value))
235
+ #valuePlaceholders[key] = testValue(json.dumps(value), curKey, key)
236
+ #if key not in tableCurrentSchema[curKey]:
237
+ # tableCurrentSchema[curKey].append(key)
238
+ # keyNotInSchema[curKey].append(key)
239
+ else:
240
+ fields.append(key)
241
+ fieldTypes[key] = resolveType(json.dumps(value))
242
+ valuePlaceholders[key] = testValue(json.dumps(value), curKey, key)
243
+ if key not in tableCurrentSchema[curKey]:
244
+ tableCurrentSchema[curKey].append(key)
245
+ keyNotInSchema[curKey].append(key)
246
+ #tableCurrentSchemaType[curKey][key] = resolveType(json.dumps(value))
247
+ elif isinstance(value, dict):
248
+ if key in ["node"]:
249
+ noUpdateNeeded = True
250
+ stack.insert(0, (curKey, value, lastKey, lastSeineId[lastKey]))
251
+ else:
252
+ stack.insert(0, (curKey + "_" + key, value, curKey, lastSeineId[lastKey]))
253
+ fields.append(key)
254
+ fieldTypes[key] = resolveType(lastSeineId[curKey])
255
+ valuePlaceholders[key] = testValue(lastSeineId[curKey], curKey, key)
256
+ if key not in tableCurrentSchema[curKey]:
257
+ tableCurrentSchema[curKey].append(key)
258
+ keyNotInSchema[curKey].append(key)
259
+ #tableCurrentSchemaType[curKey][key] = resolveType(json.dumps(value))
260
+ else:
261
+ # Add schema
262
+ if key not in fields:
263
+ fields.append(key)
264
+ fieldTypes[key] = resolveType(value)
265
+ valuePlaceholders[key] = testValue(value, curKey, key)
266
+ if key not in tableCurrentSchema[curKey]:
267
+ tableCurrentSchema[curKey].append(key)
268
+ keyNotInSchema[curKey].append(key)
269
+ #tableCurrentSchemaType[curKey][key] = resolveType(value)
270
+
271
+ elif isinstance(curDict, str) or isinstance(curDict, int) or isinstance(curDict, float):
272
+ fields.append(curKey)
273
+ fieldTypes[curKey] = resolveType(curDict)
274
+ valuePlaceholders[curKey] = testValue(curDict, curKey, key)
275
+ if curKey not in tableCurrentSchema[curKey]:
276
+ tableCurrentSchema[curKey].append(curKey)
277
+ keyNotInSchema[curKey].append(curKey)
278
+ #tableCurrentSchemaType[curKey][curKey] = resolveType(curDict)
279
+
280
+ else:
281
+ fields.append(curKey)
282
+ fieldTypes[curKey] = resolveType(curDict)
283
+ valuePlaceholders[curKey] = testValue(curDict, curKey, key)
284
+ if curKey not in tableCurrentSchema[curKey]:
285
+ tableCurrentSchema[curKey].append(curKey)
286
+ keyNotInSchema[curKey].append(curKey)
287
+ #tableCurrentSchemaType[curKey][curKey] = resolveType(curDict)
288
+
289
+ #if len(fields) < 1 and parentId is None:
290
+ # continue
291
+
292
+ if noUpdateNeeded:
293
+ continue
294
+
295
+ if len(keyNotInSchema[curKey]) > 0 or curKey not in tableReset.keys():
296
+ tableReset[curKey] = True
297
+ curTableName = myGoogleProject + f".{datasetName}." + curKey
298
+ curTable = False
299
+ tableSchema[curKey] = []
300
+ tableSchema[curKey].append(bigquery.SchemaField("seine_id", "INT64"))
301
+ tableSchema[curKey].append(bigquery.SchemaField("seine_parent_id", "INT64"))
302
+ tableSchema[curKey].append(bigquery.SchemaField("injected", "DATETIME"))
303
+ tableCurrentSchema[curKey].append("seine_id");
304
+ tableCurrentSchema[curKey].append("seine_parent_id");
305
+ tableCurrentSchema[curKey].append("injected");
306
+ try:
307
+ curTable = client.get_table(curTableName)
308
+ #colQuery = tableColumnsQuery.replace("__dataset__", seineDataset).replace("__table__", key)
309
+ #queryJob = client.query(colQuery)
310
+ #returned = queryJob.result()
311
+ existingSchema = curTable.schema
312
+ tableSchema[curKey] = existingSchema
313
+ existingColumns = []
314
+ jobCconfig = bigquery.QueryJobConfig(
315
+ destination=curTableName,
316
+ schema_update_options=[bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION],
317
+ write_disposition=bigquery.WriteDisposition.WRITE_APPEND,
318
+ )
319
+ for schemaElement in existingSchema:
320
+ existingColumns.append(schemaElement.name)
321
+ schemaAdjusted = False
322
+ for fidx, field in enumerate(fields):
323
+ if field not in existingColumns:
324
+ if field not in tableCurrentSchema[curKey]:
325
+ tableCurrentSchema[curKey].append(field);
326
+ tableSchema[curKey].append(bigquery.SchemaField(field, fieldTypes[field]))
327
+ existingSchema.append(bigquery.SchemaField(field, fieldTypes[field]))
328
+ if not schemaAdjusted:
329
+ schemaAdjusted = True
330
+ if schemaAdjusted:
331
+ curTable.schema = existingSchema
332
+ try:
333
+ curTable = client.update_table(curTable, ["schema"])
334
+ except:
335
+ pass
336
+
337
+ #if returned.total_rows > 0:
338
+ # counter = 0
339
+ # for row in returned:
340
+ # print(row)
341
+ # counter += 1
342
+ # if counter < 2:
343
+ # continue
344
+ # if row.max_id + 1 > lastSeineId[curKey]:
345
+ # lastSeineId[curKey] = int(row.max_id) + 1
346
+ # print(f"{curKey}" + str(lastSeineId[curKey]))
347
+ except NotFound:
348
+ for fidx, field in enumerate(fields):
349
+ tableSchema[curKey].append(bigquery.SchemaField(field, fieldTypes[field]))
350
+ if field not in tableCurrentSchema[curKey]:
351
+ tableCurrentSchema[curKey].append(field);
352
+ curTable = bigquery.Table(curTableName, schema=tableSchema[curKey])
353
+ curTable = client.create_table(curTable)
354
+ lastSeineId[curKey] = 1
355
+ keyNotInSchema[curKey] = []
356
+
357
+ if curKey not in dataToLoad.keys():
358
+ dataToLoad[curKey] = []
359
+ tempRow = {
360
+ "seine_id": lastSeineId[curKey],
361
+ "seine_parent_id": parentId,
362
+ "injected": datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
363
+ }
364
+ for idx, field in enumerate(fields):
365
+ tempRow[field] = valuePlaceholders[field]
366
+ print(tempRow)
367
+ dataToLoad[curKey].append(tempRow)
368
+ lastSeineId[curKey] += 1
369
+
370
+ conn, cur = dbConnect()
371
+ for tableName in dataToLoad.keys():
372
+ curTable = client.get_table(myGoogleProject + f".{datasetName}." + tableName)
373
+ errors = client.insert_rows_json(
374
+ curTable, dataToLoad[tableName], row_ids=[None] * len(dataToLoad[tableName])
375
+ )
376
+ if errors == []:
377
+ print("Loaded " + str(len(dataToLoad[tableName])) + " rows into " + tableName)
378
+ else:
379
+ print("FAILED: loading " + str(len(dataToLoad[tableName])) + " rows into " + tableName)
380
+ print(errors)
381
+