bqseine 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of bqseine might be problematic. Click here for more details.

@@ -0,0 +1,207 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ .envrc
140
+ .venv
141
+ env/
142
+ venv/
143
+ ENV/
144
+ env.bak/
145
+ venv.bak/
146
+
147
+ # Spyder project settings
148
+ .spyderproject
149
+ .spyproject
150
+
151
+ # Rope project settings
152
+ .ropeproject
153
+
154
+ # mkdocs documentation
155
+ /site
156
+
157
+ # mypy
158
+ .mypy_cache/
159
+ .dmypy.json
160
+ dmypy.json
161
+
162
+ # Pyre type checker
163
+ .pyre/
164
+
165
+ # pytype static type analyzer
166
+ .pytype/
167
+
168
+ # Cython debug symbols
169
+ cython_debug/
170
+
171
+ # PyCharm
172
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
175
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
+ #.idea/
177
+
178
+ # Abstra
179
+ # Abstra is an AI-powered process automation framework.
180
+ # Ignore directories containing user credentials, local state, and settings.
181
+ # Learn more at https://abstra.io/docs
182
+ .abstra/
183
+
184
+ # Visual Studio Code
185
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
+ # you could uncomment the following to ignore the entire vscode folder
189
+ # .vscode/
190
+
191
+ # Ruff stuff:
192
+ .ruff_cache/
193
+
194
+ # PyPI configuration file
195
+ .pypirc
196
+
197
+ # Cursor
198
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
+ # refer to https://docs.cursor.com/context/ignore-files
201
+ .cursorignore
202
+ .cursorindexingignore
203
+
204
+ # Marimo
205
+ marimo/_static/
206
+ marimo/_lsp/
207
+ __marimo__/
bqseine-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) [year] [fullname]
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
bqseine-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,75 @@
1
+ Metadata-Version: 2.4
2
+ Name: bqseine
3
+ Version: 0.1.0
4
+ Summary: ETL for BigQuery
5
+ Project-URL: Homepage, https://github.com/shaafiee/seine
6
+ Project-URL: Issues, https://github.com/shaafiee/siene/issues
7
+ Author-email: Shaafiee <shaafiee@gmail.com>
8
+ License-Expression: MIT
9
+ License-File: LICENSE
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Programming Language :: Python :: 3
12
+ Requires-Python: >=3.9
13
+ Description-Content-Type: text/markdown
14
+
15
+ # BQSeine
16
+ ## Python dict to BigQuery data loader
17
+ Seine is a data loader that pushes data in a dictionary to BigQuery in relational normalized form. Seine also has functionality to use Gemini to get data insights from Bigquery.
18
+
19
+ ## Usage
20
+
21
+ ### AI insights
22
+ ```
23
+ from bqseine.agent import chat
24
+ response = chat(question, context)
25
+ ```
26
+
27
+ In making the call to chat(<question>, <context>), the <context> argument explains to the LLM how the tables in Bigquery and their fields refer to each other and what they mean.
28
+
29
+ ### Data loading
30
+ ```
31
+ from bqseine.polyp import sync
32
+ sourceData = [
33
+ {
34
+ 'item': 'Juice',
35
+ 'price': 20.0,
36
+ 'stock': [
37
+ {
38
+ 'batch': '2025-01-20',
39
+ 'qty': 300
40
+ },
41
+ {
42
+ 'batch': '2025-02-02',
43
+ 'qty': 50
44
+ }
45
+ ]
46
+ },
47
+ {
48
+ 'item': 'Burger',
49
+ 'price': 30.0,
50
+ 'stock': [
51
+ {
52
+ 'batch': '2025-02-10',
53
+ 'qty': 200
54
+ }
55
+ ]
56
+ }
57
+ ]
58
+ sync('someGoogleProject', sourceData, 'catalog', 'US')
59
+ ### The arguments above are:
60
+ ### sync(<Google project name>, <dict>, <main table name>, <BigQuery region>)*
61
+ ```
62
+
63
+ The above example will generate the following tables in BigQuery:
64
+ #### catalog
65
+ | seine_id | seine_parent_id | item | price | injected |
66
+ | --- | --- | --- | --- | --- |
67
+ | 1 | 0 | 'Juice' | 20.0 | now() |
68
+ | 2 | 0 | 'Burger' | 30.0 | now() |
69
+
70
+ #### catalog_stock
71
+ | seine_id | seine_parent_id | batch | qty | injected |
72
+ | --- | --- | --- | --- | --- |
73
+ | 1 | 1 | '2025-01-20' | 300 | now() |
74
+ | 2 | 1 | '2025-02-02' | 50 | now() |
75
+ | 3 | 2 | '2025-02-10' | 200 | now() |
@@ -0,0 +1,61 @@
1
+ # BQSeine
2
+ ## Python dict to BigQuery data loader
3
+ Seine is a data loader that pushes data in a dictionary to BigQuery in relational normalized form. Seine also has functionality to use Gemini to get data insights from Bigquery.
4
+
5
+ ## Usage
6
+
7
+ ### AI insights
8
+ ```
9
+ from bqseine.agent import chat
10
+ response = chat(question, context)
11
+ ```
12
+
13
+ In making the call to chat(<question>, <context>), the <context> argument explains to the LLM how the tables in Bigquery and their fields refer to each other and what they mean.
14
+
15
+ ### Data loading
16
+ ```
17
+ from bqseine.polyp import sync
18
+ sourceData = [
19
+ {
20
+ 'item': 'Juice',
21
+ 'price': 20.0,
22
+ 'stock': [
23
+ {
24
+ 'batch': '2025-01-20',
25
+ 'qty': 300
26
+ },
27
+ {
28
+ 'batch': '2025-02-02',
29
+ 'qty': 50
30
+ }
31
+ ]
32
+ },
33
+ {
34
+ 'item': 'Burger',
35
+ 'price': 30.0,
36
+ 'stock': [
37
+ {
38
+ 'batch': '2025-02-10',
39
+ 'qty': 200
40
+ }
41
+ ]
42
+ }
43
+ ]
44
+ sync('someGoogleProject', sourceData, 'catalog', 'US')
45
+ ### The arguments above are:
46
+ ### sync(<Google project name>, <dict>, <main table name>, <BigQuery region>)*
47
+ ```
48
+
49
+ The above example will generate the following tables in BigQuery:
50
+ #### catalog
51
+ | seine_id | seine_parent_id | item | price | injected |
52
+ | --- | --- | --- | --- | --- |
53
+ | 1 | 0 | 'Juice' | 20.0 | now() |
54
+ | 2 | 0 | 'Burger' | 30.0 | now() |
55
+
56
+ #### catalog_stock
57
+ | seine_id | seine_parent_id | batch | qty | injected |
58
+ | --- | --- | --- | --- | --- |
59
+ | 1 | 1 | '2025-01-20' | 300 | now() |
60
+ | 2 | 1 | '2025-02-02' | 50 | now() |
61
+ | 3 | 2 | '2025-02-10' | 200 | now() |
@@ -0,0 +1,23 @@
1
+ [build-system]
2
+ requires = ["hatchling >= 1.26"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "bqseine"
7
+ version = "0.1.0"
8
+ authors = [
9
+ { name="Shaafiee", email="shaafiee@gmail.com" },
10
+ ]
11
+ description = "ETL for BigQuery"
12
+ readme = "README.md"
13
+ requires-python = ">=3.9"
14
+ classifiers = [
15
+ "Programming Language :: Python :: 3",
16
+ "Operating System :: OS Independent",
17
+ ]
18
+ license = "MIT"
19
+ license-files = ["LICEN[CS]E*"]
20
+
21
+ [project.urls]
22
+ Homepage = "https://github.com/shaafiee/seine"
23
+ Issues = "https://github.com/shaafiee/siene/issues"
File without changes
@@ -0,0 +1,281 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import json
5
+ from typing import Dict, Any, List, Optional
6
+
7
+ from google import genai
8
+ from google.genai import types as gtypes
9
+ from google.cloud import bigquery
10
+
11
+ from context import sales, stocr, traffic
12
+
13
+ from decimal import Decimal
14
+ from datetime import date, datetime
15
+
16
+ # --- BigQuery setup (ADC: env, WI, or SA on the host) ---
17
+ bq = bigquery.Client()
18
+
19
+ # --- Gemini (AI Studio) ---
20
+ # Uses GOOGLE_API_KEY if set, otherwise falls back to application-default creds when supported.
21
+ gclient = genai.Client()
22
+
23
+ # ---------- Tool implementations ----------
24
+
25
+ def json_safe(obj):
26
+ """Recursively convert BigQuery / Python types to JSON-serializable types."""
27
+ if isinstance(obj, Decimal):
28
+ # choose float or str depending on how precise you need
29
+ return float(obj)
30
+ if isinstance(obj, (datetime, date)):
31
+ return obj.isoformat()
32
+ if isinstance(obj, dict):
33
+ return {k: json_safe(v) for k, v in obj.items()}
34
+ if isinstance(obj, (list, tuple)):
35
+ return [json_safe(v) for v in obj]
36
+ # BigQuery Row and similar should already be converted before calling this
37
+ return obj
38
+
39
+ def list_datasets(project_id: Optional[str] = None) -> List[str]:
40
+ """List accessible BigQuery datasets in a project (defaults to client's project)."""
41
+ proj = project_id or bq.project
42
+ return [d.dataset_id for d in bq.list_datasets(project=proj)]
43
+
44
+ def list_tables(dataset: str, project_id: Optional[str] = None) -> List[str]:
45
+ """List tables in a dataset."""
46
+ proj = project_id or bq.project
47
+ return [t.table_id for t in bq.list_tables(f"{proj}.{dataset}")]
48
+
49
+ def get_table_schema(table: str, dataset: str, project_id: Optional[str] = None) -> List[Dict[str, Any]]:
50
+ """Return table schema (name, type, mode) as JSON."""
51
+ proj = project_id or bq.project
52
+ t = bq.get_table(f"{proj}.{dataset}.{table}")
53
+ return [{"name": f.name, "type": f.field_type, "mode": f.mode} for f in t.schema]
54
+
55
+ def run_query(sql: str, params: Optional[Dict[str, Any]] = None, dry_run: bool = False) -> Dict[str, Any]:
56
+ lowered = sql.strip().lower()
57
+ if lowered.startswith(("insert", "update", "delete", "merge", "create", "drop", "alter")):
58
+ return {
59
+ "error": "Refusing to run DDL/DML in this agent. SELECT-only allowed.",
60
+ "sql": sql,
61
+ }
62
+
63
+ qparams = []
64
+ if params:
65
+ for k, v in params.items():
66
+ if isinstance(v, bool):
67
+ typ = "BOOL"
68
+ elif isinstance(v, int):
69
+ typ = "INT64"
70
+ elif isinstance(v, float):
71
+ typ = "FLOAT64"
72
+ else:
73
+ typ = "STRING"
74
+ qparams.append(bigquery.ScalarQueryParameter(k, typ, v))
75
+
76
+ job_config = bigquery.QueryJobConfig(
77
+ dry_run=dry_run,
78
+ use_query_cache=True,
79
+ )
80
+ if qparams:
81
+ job_config.query_parameters = qparams
82
+
83
+ try:
84
+ job = bq.query(sql, job_config=job_config)
85
+
86
+ if dry_run:
87
+ return json_safe({
88
+ "rows": [],
89
+ "total_rows": 0,
90
+ "slot_ms": job.slot_millis,
91
+ "job_id": job.job_id,
92
+ "cache_hit": getattr(job, "cache_hit", None),
93
+ "sql": sql,
94
+ })
95
+
96
+ raw_rows = [dict(r) for r in job.result()]
97
+ return json_safe({
98
+ "rows": raw_rows,
99
+ "total_rows": len(raw_rows),
100
+ "slot_ms": job.slot_millis,
101
+ "job_id": job.job_id,
102
+ "cache_hit": getattr(job, "cache_hit", None),
103
+ "sql": sql,
104
+ })
105
+
106
+ except Exception as e:
107
+ return json_safe({
108
+ "error": str(e),
109
+ "sql": sql,
110
+ })
111
+
112
+ # ---------- Tool declarations for Gemini ----------
113
+
114
+ bq_tools = gtypes.Tool(function_declarations=[
115
+ {
116
+ "name": "list_datasets",
117
+ "description": "List accessible BigQuery datasets for a project.",
118
+ "parameters": {
119
+ "type": "object",
120
+ "properties": {
121
+ "project_id": {"type": "string", "description": "GCP project ID"},
122
+ }
123
+ },
124
+ },
125
+ {
126
+ "name": "list_tables",
127
+ "description": "List tables in a dataset.",
128
+ "parameters": {
129
+ "type": "object",
130
+ "properties": {
131
+ "dataset": {"type": "string"},
132
+ "project_id": {"type": "string"},
133
+ },
134
+ "required": ["dataset"]
135
+ },
136
+ },
137
+ {
138
+ "name": "get_table_schema",
139
+ "description": "Get schema for a table as JSON.",
140
+ "parameters": {
141
+ "type": "object",
142
+ "properties": {
143
+ "table": {"type": "string"},
144
+ "dataset": {"type": "string"},
145
+ "project_id": {"type": "string"},
146
+ },
147
+ "required": ["table", "dataset"]
148
+ },
149
+ },
150
+ {
151
+ "name": "run_query",
152
+ "description": "Run a SELECT-only BigQuery query with optional named parameters.",
153
+ "parameters": {
154
+ "type": "object",
155
+ "properties": {
156
+ "sql": {"type": "string"},
157
+ "params": {"type": "object"},
158
+ "dry_run": {"type": "boolean"}
159
+ },
160
+ "required": ["sql"]
161
+ },
162
+ },
163
+ ])
164
+
165
+ # Optional: force the model to use tools when needed (or try AUTO first).
166
+ tool_config_any = gtypes.ToolConfig(
167
+ function_calling_config=gtypes.FunctionCallingConfig(mode="AUTO")
168
+ )
169
+ gen_config = gtypes.GenerateContentConfig(
170
+ tools=[bq_tools],
171
+ tool_config=tool_config_any,
172
+ temperature=0.1,
173
+ )
174
+
175
+ SYSTEM_PROMPT = """You are a data analyst assistant for BigQuery.
176
+ - Prefer SELECT-only SQL.
177
+ - When missing a column/table name, use list_datasets/list_tables/get_table_schema.
178
+ - For final answers, provide a brief natural-language summary, include the SQL you ran in a fenced code block, and include the output from the SQL, styled as an HTML <table>, in a fenced code block.
179
+ """
180
+ def dispatch_tool(name: str, args: Dict[str, Any]) -> Dict[str, Any]:
181
+ try:
182
+ if name == "list_datasets":
183
+ return json_safe({"datasets": list_datasets(**args)})
184
+ if name == "list_tables":
185
+ return json_safe({"tables": list_tables(**args)})
186
+ if name == "get_table_schema":
187
+ return json_safe({"schema": get_table_schema(**args)})
188
+ if name == "run_query":
189
+ return json_safe(run_query(**args))
190
+ return {"error": f"Unknown tool: {name}"}
191
+ except Exception as e:
192
+ return {"error": str(e)}
193
+
194
+ def chat(user_data: list[str],
195
+ history: list[gtypes.Content] | None = None,
196
+ model: str = "gemini-2.5-pro") -> gtypes.GenerateContentResponse:
197
+ user_prompt = user_data[0]
198
+ additional_instructions = user_data[1]
199
+ if history is None:
200
+ history = []
201
+
202
+ if not history:
203
+ history.append(
204
+ gtypes.Content(
205
+ role="user",
206
+ parts=[gtypes.Part(text=f"{SYSTEM_PROMPT}\n{additional_instructions}")]
207
+ )
208
+ )
209
+
210
+ # Add the new user message
211
+ history.append(
212
+ gtypes.Content(
213
+ role="user",
214
+ parts=[gtypes.Part(text=user_prompt)]
215
+ )
216
+ )
217
+
218
+ # First call: model may respond with text + function_call parts
219
+ resp = gclient.models.generate_content(
220
+ model=model,
221
+ contents=history,
222
+ config=gen_config,
223
+ )
224
+
225
+ # Collect any tool calls from parts
226
+ tool_calls = []
227
+ for part in resp.candidates[0].content.parts:
228
+ if part.function_call:
229
+ tool_calls.append(part.function_call)
230
+
231
+ if not tool_calls:
232
+ # No tools requested → just return; caller can use resp.text safely
233
+ return resp
234
+
235
+ # Execute each requested tool
236
+ tool_response_contents: list[gtypes.Content] = []
237
+ for fc in tool_calls:
238
+ result = dispatch_tool(fc.name, dict(fc.args))
239
+ tool_response_contents.append(
240
+ gtypes.Content(
241
+ role="tool",
242
+ parts=[
243
+ gtypes.Part(
244
+ function_response=gtypes.FunctionResponse(
245
+ name=fc.name,
246
+ response=result,
247
+ )
248
+ )
249
+ ],
250
+ )
251
+ )
252
+
253
+ # Extend history with the model's function_call turn + tool responses
254
+ history.append(resp.candidates[0].content)
255
+ history.extend(tool_response_contents)
256
+
257
+ # Second call: model now sees tool outputs and should produce final answer text
258
+ final_resp = gclient.models.generate_content(
259
+ model=model,
260
+ contents=history,
261
+ config=gen_config,
262
+ )
263
+
264
+ return final_resp
265
+
266
+ if __name__ == "__main__":
267
+ #"Generate a table of all the fabrics for which a slipcover was sold in October of 2025, with first column showing the fabric name, second the volume sold and third the total earnings in USD."
268
+ #"Generate weekly STOCR 28 the second quarter of 2025."
269
+ # Example usage
270
+ """
271
+ question = (
272
+ "Find the top 5 product categories by revenue in 2025 Q3 from "
273
+ "`myproj.analytics.sales` and show revenue and order_count. "
274
+ "If you don't know columns, inspect the schema first."
275
+ )
276
+ """
277
+ question = (
278
+ "Generate a table of all the fabrics for which a slipcover was sold in October of 2025, with first column showing the fabric name, second the volume sold and third the total earnings in USD."
279
+ )
280
+ answer = chat_once([question, f"{sales}\n{stocr}\n{traffic}"])
281
+ print(answer.text)
@@ -0,0 +1,381 @@
1
+ from gcp_secrets.secrets import *
2
+ from google.cloud import bigquery
3
+ from google.cloud.exceptions import NotFound
4
+ from datetime import date, datetime
5
+ from db_lib import *
6
+ import json
7
+
8
+
9
+ lastSeineId = {}
10
+ tableSchema = {}
11
+ tableCurrentSchema = {}
12
+ tableCurrentSchemaType = {}
13
+ tableReset = {}
14
+ #client = bigquery.Client()
15
+
16
+ # * EXCEPT(is_generated, generation_expression, is_stored, is_updatable)
17
+ tableColumnsQuery = """
18
+ SELECT
19
+ column_name
20
+ FROM
21
+ `__dataset__`.INFORMATION_SCHEMA.COLUMNS
22
+ WHERE
23
+ table_name = '__table__';
24
+ """
25
+
26
+
27
+ def resolveType(value):
28
+ isDatetime = False
29
+ tempDatetime = None
30
+ try:
31
+ timeDatetime = datetime.strptime(value, "%Y-%m-%dT%H:%M:%SZ")
32
+ value = timeDatetime
33
+ isDatetime = True
34
+ except:
35
+ pass
36
+
37
+ if isinstance(value, bool):
38
+ return "BOOL"
39
+ elif isinstance(value, int):
40
+ return "INT64"
41
+ elif isinstance(value, float):
42
+ return "FLOAT64"
43
+ elif type(value) == datetime:
44
+ return "DATETIME"
45
+ else:
46
+ return "STRING"
47
+
48
+
49
+ def testValue(value, tableKey, fieldKey):
50
+ global tableCurrentSchemaType
51
+ try:
52
+ timeDatetime = datetime.strptime(value, "%Y-%m-%dT%H:%M:%SZ")
53
+ return timeDatetime.strftime('%Y-%m-%dT%H:%M:%S')
54
+ except:
55
+ pass
56
+
57
+ fieldType = None
58
+ if fieldKey in tableCurrentSchemaType[tableKey].keys():
59
+ fieldType = tableCurrentSchemaType[tableKey][fieldKey]
60
+ else:
61
+ fieldType = resolveType(value)
62
+ tableCurrentSchemaType[tableKey][fieldKey] = fieldType
63
+
64
+ if fieldType == "BOOL":
65
+ if isinstance(value, bool):
66
+ return value
67
+ else:
68
+ return False
69
+ elif fieldType == "INT64":
70
+ if isinstance(value, int):
71
+ return value
72
+ else:
73
+ return 0
74
+ elif fieldType == "FLOAT64":
75
+ if isinstance(value, float):
76
+ return value
77
+ else:
78
+ return 0
79
+ elif fieldType == "STRING":
80
+ if isinstance(value, str):
81
+ return value
82
+ else:
83
+ return ""
84
+ else:
85
+ if isinstance(value, str):
86
+ return value
87
+ else:
88
+ return json.dumps(value)
89
+
90
+ return ""
91
+
92
+
93
+ def incrementId(curKey):
94
+ global lastSeineId
95
+ if curKey in lastSeineId.keys():
96
+ lastSeineId[curKey] += 1
97
+ else:
98
+ lastSeineId[curKey] = 1
99
+
100
+
101
+ def sync(myGoogleProject, blob, curKey, bqRegion = 'US', firstReset = False, idField = None):
102
+ global lastSeineId
103
+ global tableSchema
104
+ global tableReset
105
+ global tableCurrentSchema
106
+ global tableCurrentSchemaType
107
+
108
+ if len(curKey) < 1:
109
+ raise ExceptionType("A default current key (second arg) should be provided")
110
+
111
+ stack = []
112
+ if isinstance(blob, list):
113
+ for part in blob:
114
+ stack.insert(0, (curKey, part, curKey, 0))
115
+ else:
116
+ stack = [(curKey, blob, curKey, 0)]
117
+
118
+ tableChecked = False
119
+
120
+ client = None
121
+ try:
122
+ client = bigquery.Client(project=myGoogleProject)
123
+ except:
124
+ raise ExceptionType(f"Could not connect to {myGoogleProject}")
125
+
126
+ if firstReset:
127
+ lastSeineId = {}
128
+ tableSchema = {}
129
+ tableReset = {}
130
+ tableCurrentSchema = {}
131
+ tableCurrentSchemaType = {}
132
+
133
+ keyNotInSchema = {}
134
+ dataToLoad = {}
135
+ seineDataset = False
136
+ dataset = ""
137
+ datasetName = "seine_" + curKey
138
+ try:
139
+ seineDataset = client.get_dataset(myGoogleProject + f".{datasetName}")
140
+ except NotFound:
141
+ seineDataset = bigquery.Dataset(myGoogleProject + f".{datasetName}")
142
+ seineDataset.location = bqRegion
143
+ seineDataset = client.create_dataset(seineDataset, timeout=30)
144
+ print("Created dataset {}".format(seineDataset.dataset_id))
145
+
146
+ valuesArray = []
147
+ currentDepth = 0
148
+ while stack:
149
+ curKey, curDict, lastKey, parentId = stack.pop()
150
+ currentDepth += 1
151
+ fieldTypes = {}
152
+ fields = []
153
+ fieldsJson = {}
154
+ values = []
155
+ fieldType = {}
156
+ valuePlaceholders = {}
157
+ counter = 0
158
+
159
+ #seineDataset = client.get_dataset(myGoogleProject + f".{datasetName}")
160
+ print("-------------------------")
161
+ print(curDict)
162
+ noUpdateNeeded = False
163
+
164
+ if curKey not in lastSeineId.keys():
165
+ lastSeineId[curKey] = 1
166
+
167
+ if parentId == 0 and curKey == idField and isinstance(curDict, int):
168
+ try:
169
+ curTableName = myGoogleProject + f".{datasetName}." + curKey
170
+ queryJob = client.query(f"select {idField} from {curTableName} where {idField} = {curDict}")
171
+ returned = queryJob.result()
172
+ foundId = False
173
+ for row in returned:
174
+ foundId = True
175
+ if foundId:
176
+ print(f"ID exists {idField} = {curDict}")
177
+ print(returned)
178
+ continue
179
+ except:
180
+ print("================== QUERY FAILED =======================")
181
+ pass
182
+
183
+ #if curKey not in keyNotInSchema.keys():
184
+ keyNotInSchema[curKey] = []
185
+ if curKey not in tableCurrentSchema.keys():
186
+ try:
187
+ curTableName = myGoogleProject + f".{datasetName}." + curKey
188
+ queryJob = client.query(f"select max(seine_id) as max_id from {curTableName}")
189
+ returned = queryJob.result()
190
+ print(returned)
191
+ for row in returned:
192
+ if row.max_id is not None and isinstance(row.max_id, int):
193
+ lastSeineId[curKey] = int(row.max_id) + 1
194
+ except:
195
+ pass
196
+ tableCurrentSchema[curKey] = []
197
+ tableCurrentSchemaType[curKey] = {}
198
+
199
+ if isinstance(curDict, list):
200
+ if not isinstance(curDict[0], dict):
201
+ fields.append(curKey)
202
+ fieldTypes[curKey] = resolveType(json.dumps(curDict))
203
+ valuePlaceholders[curKey] = testValue(json.dumps(curDict), curKey, curKey)
204
+ if curKey not in tableCurrentSchema[curKey]:
205
+ tableCurrentSchema[curKey].append(curKey)
206
+ keyNotInSchema[curKey].append(curKey)
207
+ #tableCurrentSchemaType[curKey][curKey] = resolveType(json.dumps(curDict))
208
+ else:
209
+ noUpdateNeeded = True
210
+ for tempDict in curDict:
211
+ stack.insert(0, (curKey, tempDict, lastKey, lastSeineId[lastKey]))
212
+ continue
213
+
214
+ elif isinstance(curDict, dict):
215
+ for key, value in curDict.items():
216
+ if isinstance(value, list):
217
+ if len(value) > 0:
218
+ if not isinstance(value[0], dict) and key not in ["edges"]:
219
+ fields.append(key)
220
+ fieldTypes[key] = resolveType(json.dumps(value))
221
+ valuePlaceholders[key] = testValue(json.dumps(value), curKey, key)
222
+ if key not in tableCurrentSchema[curKey]:
223
+ tableCurrentSchema[curKey].append(key)
224
+ keyNotInSchema[curKey].append(key)
225
+ #tableCurrentSchemaType[curKey][key] = resolveType(json.dumps(value))
226
+ elif key in ["edges"]:
227
+ noUpdateNeeded = True
228
+ for part in value:
229
+ stack.insert(0, (curKey, part, lastKey, lastSeineId[lastKey]))
230
+ else:
231
+ for part in value:
232
+ stack.insert(0, (curKey + "_" + key, part, curKey, lastSeineId[lastKey]))
233
+ #fields.append(key)
234
+ #fieldTypes[key] = resolveType(json.dumps(value))
235
+ #valuePlaceholders[key] = testValue(json.dumps(value), curKey, key)
236
+ #if key not in tableCurrentSchema[curKey]:
237
+ # tableCurrentSchema[curKey].append(key)
238
+ # keyNotInSchema[curKey].append(key)
239
+ else:
240
+ fields.append(key)
241
+ fieldTypes[key] = resolveType(json.dumps(value))
242
+ valuePlaceholders[key] = testValue(json.dumps(value), curKey, key)
243
+ if key not in tableCurrentSchema[curKey]:
244
+ tableCurrentSchema[curKey].append(key)
245
+ keyNotInSchema[curKey].append(key)
246
+ #tableCurrentSchemaType[curKey][key] = resolveType(json.dumps(value))
247
+ elif isinstance(value, dict):
248
+ if key in ["node"]:
249
+ noUpdateNeeded = True
250
+ stack.insert(0, (curKey, value, lastKey, lastSeineId[lastKey]))
251
+ else:
252
+ stack.insert(0, (curKey + "_" + key, value, curKey, lastSeineId[lastKey]))
253
+ fields.append(key)
254
+ fieldTypes[key] = resolveType(lastSeineId[curKey])
255
+ valuePlaceholders[key] = testValue(lastSeineId[curKey], curKey, key)
256
+ if key not in tableCurrentSchema[curKey]:
257
+ tableCurrentSchema[curKey].append(key)
258
+ keyNotInSchema[curKey].append(key)
259
+ #tableCurrentSchemaType[curKey][key] = resolveType(json.dumps(value))
260
+ else:
261
+ # Add schema
262
+ if key not in fields:
263
+ fields.append(key)
264
+ fieldTypes[key] = resolveType(value)
265
+ valuePlaceholders[key] = testValue(value, curKey, key)
266
+ if key not in tableCurrentSchema[curKey]:
267
+ tableCurrentSchema[curKey].append(key)
268
+ keyNotInSchema[curKey].append(key)
269
+ #tableCurrentSchemaType[curKey][key] = resolveType(value)
270
+
271
+ elif isinstance(curDict, str) or isinstance(curDict, int) or isinstance(curDict, float):
272
+ fields.append(curKey)
273
+ fieldTypes[curKey] = resolveType(curDict)
274
+ valuePlaceholders[curKey] = testValue(curDict, curKey, key)
275
+ if curKey not in tableCurrentSchema[curKey]:
276
+ tableCurrentSchema[curKey].append(curKey)
277
+ keyNotInSchema[curKey].append(curKey)
278
+ #tableCurrentSchemaType[curKey][curKey] = resolveType(curDict)
279
+
280
+ else:
281
+ fields.append(curKey)
282
+ fieldTypes[curKey] = resolveType(curDict)
283
+ valuePlaceholders[curKey] = testValue(curDict, curKey, key)
284
+ if curKey not in tableCurrentSchema[curKey]:
285
+ tableCurrentSchema[curKey].append(curKey)
286
+ keyNotInSchema[curKey].append(curKey)
287
+ #tableCurrentSchemaType[curKey][curKey] = resolveType(curDict)
288
+
289
+ #if len(fields) < 1 and parentId is None:
290
+ # continue
291
+
292
+ if noUpdateNeeded:
293
+ continue
294
+
295
+ if len(keyNotInSchema[curKey]) > 0 or curKey not in tableReset.keys():
296
+ tableReset[curKey] = True
297
+ curTableName = myGoogleProject + f".{datasetName}." + curKey
298
+ curTable = False
299
+ tableSchema[curKey] = []
300
+ tableSchema[curKey].append(bigquery.SchemaField("seine_id", "INT64"))
301
+ tableSchema[curKey].append(bigquery.SchemaField("seine_parent_id", "INT64"))
302
+ tableSchema[curKey].append(bigquery.SchemaField("injected", "DATETIME"))
303
+ tableCurrentSchema[curKey].append("seine_id");
304
+ tableCurrentSchema[curKey].append("seine_parent_id");
305
+ tableCurrentSchema[curKey].append("injected");
306
+ try:
307
+ curTable = client.get_table(curTableName)
308
+ #colQuery = tableColumnsQuery.replace("__dataset__", seineDataset).replace("__table__", key)
309
+ #queryJob = client.query(colQuery)
310
+ #returned = queryJob.result()
311
+ existingSchema = curTable.schema
312
+ tableSchema[curKey] = existingSchema
313
+ existingColumns = []
314
+ jobCconfig = bigquery.QueryJobConfig(
315
+ destination=curTableName,
316
+ schema_update_options=[bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION],
317
+ write_disposition=bigquery.WriteDisposition.WRITE_APPEND,
318
+ )
319
+ for schemaElement in existingSchema:
320
+ existingColumns.append(schemaElement.name)
321
+ schemaAdjusted = False
322
+ for fidx, field in enumerate(fields):
323
+ if field not in existingColumns:
324
+ if field not in tableCurrentSchema[curKey]:
325
+ tableCurrentSchema[curKey].append(field);
326
+ tableSchema[curKey].append(bigquery.SchemaField(field, fieldTypes[field]))
327
+ existingSchema.append(bigquery.SchemaField(field, fieldTypes[field]))
328
+ if not schemaAdjusted:
329
+ schemaAdjusted = True
330
+ if schemaAdjusted:
331
+ curTable.schema = existingSchema
332
+ try:
333
+ curTable = client.update_table(curTable, ["schema"])
334
+ except:
335
+ pass
336
+
337
+ #if returned.total_rows > 0:
338
+ # counter = 0
339
+ # for row in returned:
340
+ # print(row)
341
+ # counter += 1
342
+ # if counter < 2:
343
+ # continue
344
+ # if row.max_id + 1 > lastSeineId[curKey]:
345
+ # lastSeineId[curKey] = int(row.max_id) + 1
346
+ # print(f"{curKey}" + str(lastSeineId[curKey]))
347
+ except NotFound:
348
+ for fidx, field in enumerate(fields):
349
+ tableSchema[curKey].append(bigquery.SchemaField(field, fieldTypes[field]))
350
+ if field not in tableCurrentSchema[curKey]:
351
+ tableCurrentSchema[curKey].append(field);
352
+ curTable = bigquery.Table(curTableName, schema=tableSchema[curKey])
353
+ curTable = client.create_table(curTable)
354
+ lastSeineId[curKey] = 1
355
+ keyNotInSchema[curKey] = []
356
+
357
+ if curKey not in dataToLoad.keys():
358
+ dataToLoad[curKey] = []
359
+ tempRow = {
360
+ "seine_id": lastSeineId[curKey],
361
+ "seine_parent_id": parentId,
362
+ "injected": datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
363
+ }
364
+ for idx, field in enumerate(fields):
365
+ tempRow[field] = valuePlaceholders[field]
366
+ print(tempRow)
367
+ dataToLoad[curKey].append(tempRow)
368
+ lastSeineId[curKey] += 1
369
+
370
+ conn, cur = dbConnect()
371
+ for tableName in dataToLoad.keys():
372
+ curTable = client.get_table(myGoogleProject + f".{datasetName}." + tableName)
373
+ errors = client.insert_rows_json(
374
+ curTable, dataToLoad[tableName], row_ids=[None] * len(dataToLoad[tableName])
375
+ )
376
+ if errors == []:
377
+ print("Loaded " + str(len(dataToLoad[tableName])) + " rows into " + tableName)
378
+ else:
379
+ print("FAILED: loading " + str(len(dataToLoad[tableName])) + " rows into " + tableName)
380
+ print(errors)
381
+