quillsql 2.2.0__tar.gz → 2.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. {quillsql-2.2.0 → quillsql-2.2.1}/PKG-INFO +1 -1
  2. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/core.py +170 -22
  3. quillsql-2.2.1/quillsql/utils/pivot_template.py +485 -0
  4. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql.egg-info/PKG-INFO +1 -1
  5. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql.egg-info/SOURCES.txt +1 -0
  6. {quillsql-2.2.0 → quillsql-2.2.1}/setup.py +1 -1
  7. {quillsql-2.2.0 → quillsql-2.2.1}/README.md +0 -0
  8. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/__init__.py +0 -0
  9. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/assets/__init__.py +0 -0
  10. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/assets/pgtypes.py +0 -0
  11. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/db/__init__.py +0 -0
  12. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/db/bigquery.py +0 -0
  13. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/db/cached_connection.py +0 -0
  14. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/db/db_helper.py +0 -0
  15. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/db/postgres.py +0 -0
  16. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/error.py +0 -0
  17. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/utils/__init__.py +0 -0
  18. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/utils/filters.py +0 -0
  19. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/utils/run_query_processes.py +0 -0
  20. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/utils/schema_conversion.py +0 -0
  21. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql/utils/tenants.py +0 -0
  22. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql.egg-info/dependency_links.txt +0 -0
  23. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql.egg-info/requires.txt +0 -0
  24. {quillsql-2.2.0 → quillsql-2.2.1}/quillsql.egg-info/top_level.txt +0 -0
  25. {quillsql-2.2.0 → quillsql-2.2.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quillsql
3
- Version: 2.2.0
3
+ Version: 2.2.1
4
4
  Summary: Quill SDK for Python.
5
5
  Home-page: https://github.com/quill-sql/quill-python
6
6
  Author: Quill
@@ -20,6 +20,10 @@ from quillsql.utils.run_query_processes import (
20
20
  remove_fields,
21
21
  )
22
22
  from quillsql.utils.tenants import extract_tenant_ids
23
+ from quillsql.utils.pivot_template import (
24
+ parse_distinct_values,
25
+ hydrate_pivot_template,
26
+ )
23
27
 
24
28
  load_dotenv()
25
29
 
@@ -77,7 +81,14 @@ class Quill:
77
81
  )
78
82
  return None
79
83
 
80
- def query(self, tenants, metadata, flags = None, filters: list[Filter] = None):
84
+ def query(
85
+ self,
86
+ tenants,
87
+ metadata,
88
+ flags=None,
89
+ filters: list[Filter] = None,
90
+ admin_enabled: bool = None,
91
+ ):
81
92
  if not tenants:
82
93
  raise ValueError("You may not pass an empty tenants array.")
83
94
  if flags and not flags:
@@ -95,6 +106,105 @@ class Quill:
95
106
  # Set tenant IDs in the connection
96
107
  self.target_connection.tenant_ids = extract_tenant_ids(tenants)
97
108
 
109
+ # Handle pivot-template task
110
+ if task == "pivot-template":
111
+ # Step 1: Get pivot template and queries from server
112
+ pivot_payload = {
113
+ **metadata,
114
+ "tenants": tenants,
115
+ "flags": flags,
116
+ "adminEnabled": admin_enabled,
117
+ }
118
+ if filters is not None:
119
+ pivot_payload["sdkFilters"] = [
120
+ convert_custom_filter(f) for f in filters
121
+ ]
122
+ pivot_template_response = self.post_quill(
123
+ "pivot-template",
124
+ pivot_payload,
125
+ )
126
+
127
+ if pivot_template_response.get("error"):
128
+ return {
129
+ "status": "error",
130
+ "error": pivot_template_response.get("error"),
131
+ "data": pivot_template_response.get("metadata") or {},
132
+ }
133
+
134
+ template = pivot_template_response.get("metadata", {}).get("template")
135
+ config = pivot_template_response.get("metadata", {}).get("config")
136
+ distinct_values_query = pivot_template_response.get("metadata", {}).get("distinctValuesQuery")
137
+ row_count_query = pivot_template_response.get("metadata", {}).get("rowCountQuery")
138
+
139
+ # Step 2: Run the distinct values query to get unique values
140
+ distinct_values = []
141
+ if distinct_values_query:
142
+ distinct_value_results = self.run_queries(
143
+ [distinct_values_query],
144
+ self.target_connection.database_type,
145
+ metadata.get("databaseType"),
146
+ metadata,
147
+ None
148
+ )
149
+
150
+ # Parse distinct values from database results
151
+ distinct_values = parse_distinct_values(
152
+ distinct_value_results["queryResults"][0],
153
+ config.get("databaseType")
154
+ )
155
+
156
+ # Step 3: Hydrate the template with the distinct values
157
+ try:
158
+ final_query = hydrate_pivot_template(template, distinct_values, config)
159
+ except Exception as err:
160
+ return {
161
+ "status": "error",
162
+ "error": f"Failed to hydrate pivot template: {str(err)}",
163
+ "data": {},
164
+ }
165
+
166
+ # Step 4: Run queries - pivot query and optional row count query
167
+ queries_to_run = [final_query]
168
+ if row_count_query:
169
+ # Hydrate the rowCountQuery with the same distinct values
170
+ hydrated_row_count_query = hydrate_pivot_template(
171
+ row_count_query,
172
+ distinct_values,
173
+ config
174
+ )
175
+ queries_to_run.append(hydrated_row_count_query)
176
+
177
+ final_results = self.run_queries(
178
+ queries_to_run,
179
+ self.target_connection.database_type,
180
+ metadata.get("databaseType"),
181
+ metadata,
182
+ pivot_template_response.get("metadata", {}).get("runQueryConfig")
183
+ )
184
+
185
+ responseMetadata = pivot_template_response.get("metadata") or {}
186
+ # Set rows and fields from first query result (the pivot query)
187
+ if final_results.get("queryResults") and len(final_results["queryResults"]) >= 1:
188
+ query_results = final_results["queryResults"][0]
189
+ if query_results.get("rows"):
190
+ responseMetadata["rows"] = query_results["rows"]
191
+ if query_results.get("fields"):
192
+ responseMetadata["fields"] = query_results["fields"]
193
+
194
+ # Remove internal SDK fields before returning to frontend
195
+ if "template" in responseMetadata:
196
+ del responseMetadata["template"]
197
+ if "distinctValuesQuery" in responseMetadata:
198
+ del responseMetadata["distinctValuesQuery"]
199
+ if "rowCountQuery" in responseMetadata:
200
+ del responseMetadata["rowCountQuery"]
201
+
202
+ return {
203
+ "data": responseMetadata,
204
+ "queries": final_results,
205
+ "status": "success",
206
+ }
207
+
98
208
  # Handle tenant flags synthesis
99
209
  tenant_flags = None
100
210
  if (task in FLAG_TASKS and
@@ -102,13 +212,17 @@ class Quill:
102
212
  tenants[0] != SINGLE_TENANT
103
213
  ):
104
214
 
105
- response = self.post_quill('tenant-mapped-flags', {
106
- 'reportId': metadata.get('reportId') or metadata.get('dashboardItemId'),
107
- 'clientId': metadata.get('clientId'),
108
- 'dashboardName': metadata.get('name'),
109
- 'tenants': tenants,
110
- 'flags': flags,
111
- })
215
+ response = self.post_quill(
216
+ 'tenant-mapped-flags',
217
+ {
218
+ 'reportId': metadata.get('reportId') or metadata.get('dashboardItemId'),
219
+ 'clientId': metadata.get('clientId'),
220
+ 'dashboardName': metadata.get('name'),
221
+ 'tenants': tenants,
222
+ 'flags': flags,
223
+ 'adminEnabled': admin_enabled,
224
+ },
225
+ )
112
226
 
113
227
  if response.get('error'):
114
228
  return {
@@ -157,12 +271,19 @@ class Quill:
157
271
  view_query = None
158
272
  if metadata.get("preQueries"):
159
273
  view_query = metadata.get("preQueries")[0]
274
+ pre_query_columns = (
275
+ pre_query_results.get("columns")
276
+ if metadata.get("runQueryConfig")
277
+ and metadata.get("runQueryConfig").get("getColumns")
278
+ else None
279
+ )
160
280
  payload = {
161
281
  **metadata,
162
- **pre_query_results,
163
282
  "tenants": tenants,
164
283
  "flags": tenant_flags,
165
284
  "viewQuery": view_query,
285
+ "preQueryResultsColumns": pre_query_columns,
286
+ "adminEnabled": admin_enabled,
166
287
  }
167
288
  if filters is not None:
168
289
  payload["sdkFilters"] = [convert_custom_filter(f) for f in filters]
@@ -183,7 +304,7 @@ class Quill:
183
304
  quill_results["metadata"] = {}
184
305
  metadata = quill_results.get("metadata")
185
306
  responseMetadata = metadata
186
- final_query_results = self.run_queries(
307
+ results = self.run_queries(
187
308
  quill_results.get("queries"),
188
309
  self.target_connection.database_type,
189
310
  metadata.get("databaseType"),
@@ -191,30 +312,57 @@ class Quill:
191
312
  metadata.get("runQueryConfig"),
192
313
  )
193
314
 
194
- if final_query_results.get("mapped_array") and metadata.get("runQueryConfig", {}).get("arrayToMap"):
315
+ should_wrap_results = isinstance(results, list) or not results
316
+ if should_wrap_results:
317
+ normalized_results = {
318
+ "queryResults": results if isinstance(results, list) else []
319
+ }
320
+ else:
321
+ normalized_results = results
322
+
323
+ if (
324
+ should_wrap_results
325
+ and not normalized_results.get("queryResults")
326
+ and quill_results.get("queries")
327
+ ):
328
+ normalized_results["queryResults"] = (
329
+ normalized_results.get("queryResults") or []
330
+ )
331
+
332
+ if (
333
+ normalized_results.get("mapped_array")
334
+ and metadata.get("runQueryConfig", {}).get("arrayToMap")
335
+ ):
195
336
  array_to_map = metadata["runQueryConfig"]["arrayToMap"]
196
- for array, index in zip(final_query_results["mapped_array"], range(len(final_query_results["mapped_array"]))):
337
+ for array, index in zip(
338
+ normalized_results["mapped_array"],
339
+ range(len(normalized_results["mapped_array"])),
340
+ ):
197
341
  responseMetadata[array_to_map["arrayName"]][index][array_to_map["field"]] = array
198
- del final_query_results["mapped_array"]
342
+ del normalized_results["mapped_array"]
199
343
 
200
- # Quick hack to make the sdk work with the Frontend
201
- if len(final_query_results.get("queryResults")) == 1:
202
- query_result = final_query_results.get("queryResults")[0]
344
+ query_results_list = normalized_results.get("queryResults") or []
345
+ if len(query_results_list) == 1:
346
+ query_result = query_results_list[0]
203
347
  quill_results["metadata"]["rows"] = query_result.get("rows")
204
348
  quill_results["metadata"]["fields"] = query_result.get("fields")
205
349
  return {
206
350
  "data": quill_results.get("metadata"),
207
- "queries": final_query_results,
351
+ "queries": normalized_results,
208
352
  "status": "success",
209
353
  }
210
354
 
211
355
  except Exception as err:
212
356
  if task == "update-view":
213
- self.post_quill("set-broken-view", {
214
- "table": metadata.get("name"),
215
- "clientId": metadata.get("clientId"),
216
- "error": str(err),
217
- })
357
+ self.post_quill(
358
+ "set-broken-view",
359
+ {
360
+ "table": metadata.get("name"),
361
+ "clientId": metadata.get("clientId"),
362
+ "error": str(err),
363
+ "adminEnabled": admin_enabled,
364
+ },
365
+ )
218
366
  return {
219
367
  "error": str(err).splitlines()[0],
220
368
  "status": "error",
@@ -0,0 +1,485 @@
1
+ """
2
+ Pivot Template System - SDK SIDE
3
+
4
+ This file contains all the logic needed on the SDK to:
5
+ 1. Hydrate pivot query templates with actual distinct values
6
+ 2. Parse distinct values from different database result formats
7
+ 3. Validate templates before hydration
8
+
9
+ This runs on the customer's Python SDK where customer data is accessible.
10
+ Takes templates from server and populates them with actual data.
11
+ """
12
+
13
+ import json
14
+ import re
15
+ from typing import List, Dict, Any, Optional, TypedDict
16
+
17
+ # Constants
18
+ MAX_PIVOT_UNIQUE_VALUES = 250
19
+ PIVOT_COLUMN_MARKER = "{{QUILL_PIVOT_COLUMNS}}"
20
+ PIVOT_COLUMN_ALIAS_MARKER = "{{QUILL_PIVOT_COLUMN_ALIASES}}"
21
+
22
+
23
+ # Types
24
+ class PivotAggregation(TypedDict, total=False):
25
+ aggregationType: str
26
+ valueField: Optional[str]
27
+ valueFieldType: Optional[str]
28
+ valueField2: Optional[str]
29
+ valueField2Type: Optional[str]
30
+
31
+
32
+ class PivotConfig(TypedDict, total=False):
33
+ requiresDistinctValues: bool
34
+ columnField: Optional[str]
35
+ rowField: Optional[str]
36
+ rowFieldType: Optional[str]
37
+ aggregations: List[PivotAggregation]
38
+ databaseType: str
39
+ dateBucket: Optional[str]
40
+ pivotType: str
41
+ sort: Optional[bool]
42
+ sortField: Optional[str]
43
+ sortDirection: Optional[str]
44
+ rowLimit: Optional[int]
45
+
46
+
47
+ # ============================================================================
48
+ # HELPER FUNCTIONS
49
+ # ============================================================================
50
+
51
+
52
+ def process_single_quotes(value: str, database_type: str) -> str:
53
+ """Process single quotes based on database type."""
54
+ if database_type.lower() in ["postgresql", "snowflake", "clickhouse"]:
55
+ return value.replace("'", "''")
56
+ return value.replace("'", "\\'")
57
+
58
+
59
+ def process_agg_type(agg_type: str, has_column_field: bool = False) -> str:
60
+ """Process aggregation type."""
61
+ if agg_type == "count" and has_column_field:
62
+ return "SUM"
63
+ return "AVG" if agg_type and agg_type.lower() == "average" else (agg_type.lower() if agg_type else "")
64
+
65
+
66
+ def replace_bigquery_special_characters(column: str) -> str:
67
+ """Replace BigQuery special characters."""
68
+ return column.replace("/", "quill_forward_slash")
69
+
70
+
71
+ def process_column_reference(
72
+ column: str,
73
+ database_type: str,
74
+ fallback_on_null: Optional[str] = None,
75
+ is_column_field_alias: bool = False,
76
+ is_value_field_alias: bool = False
77
+ ) -> str:
78
+ """Process column reference based on database type."""
79
+ db = database_type.lower()
80
+
81
+ if db in ["postgresql", "clickhouse"]:
82
+ if column == "":
83
+ return f'"{fallback_on_null}"' if fallback_on_null else '"_"'
84
+ if is_column_field_alias:
85
+ return f'"{column.replace(chr(34), "")}"'
86
+ column_parts = column.split(".")
87
+ if len(column_parts) > 1:
88
+ return '"' + '","'.join([part.replace('"', '') for part in column_parts]) + '"'
89
+ return f'"{column.replace(chr(34), "")}"'
90
+
91
+ elif db == "mysql":
92
+ if column == "":
93
+ return fallback_on_null if fallback_on_null else "_"
94
+ if is_column_field_alias:
95
+ return f"`{column.replace('`', '').replace(chr(34), '')}`"
96
+ column_parts = column.split(".")
97
+ if len(column_parts) > 1:
98
+ return "`" + "`.`".join([part.replace("`", "") for part in column_parts]) + "`"
99
+ return f"`{column.replace('`', '')}`"
100
+
101
+ elif db == "snowflake":
102
+ if column == "":
103
+ return fallback_on_null if fallback_on_null else "_"
104
+ if is_column_field_alias:
105
+ return f'"{column.replace(chr(34), "")}"'
106
+ if is_value_field_alias:
107
+ cleaned_column = column.replace(")", "").replace("(", "_")
108
+ return cleaned_column
109
+ return column
110
+
111
+ elif db == "bigquery":
112
+ if column == "":
113
+ return f"`{fallback_on_null}`" if fallback_on_null else "`_`"
114
+ if is_column_field_alias:
115
+ return f"`{replace_bigquery_special_characters(column)}`"
116
+ column_parts = column.split(".")
117
+ if len(column_parts) > 1:
118
+ return "`" + "`.`".join([part for part in column_parts]) + "`"
119
+ return f"`{column}`"
120
+
121
+ elif db == "mssql":
122
+ if column == "":
123
+ return f"[{fallback_on_null}]" if fallback_on_null else "[_]"
124
+ if is_column_field_alias:
125
+ return f"[{column}]"
126
+ column_parts = column.split(".")
127
+ if len(column_parts) > 1:
128
+ return "[" + "].[".join([part for part in column_parts]) + "]"
129
+ return f"[{column}]"
130
+
131
+ elif db == "databricks":
132
+ if column == "":
133
+ return f"`{fallback_on_null}`" if fallback_on_null else "`_`"
134
+ if is_column_field_alias:
135
+ return f"`{column}`"
136
+ column_parts = column.split(".")
137
+ if len(column_parts) > 1:
138
+ return "`" + "`.`".join([part for part in column_parts]) + "`"
139
+ return f"`{column}`"
140
+
141
+ else:
142
+ return column
143
+
144
+
145
+ def process_value_field(agg_type: str, database_type: str, value_field: str) -> str:
146
+ """Process value field based on aggregation type."""
147
+ if agg_type in ["min", "max"] or (agg_type and agg_type.lower() == "average"):
148
+ return f"{process_column_reference(value_field, database_type)} ELSE null"
149
+ if agg_type == "count":
150
+ return "1 ELSE 0"
151
+ return f"{process_column_reference(value_field, database_type)} ELSE 0" if value_field else "1 ELSE 0"
152
+
153
+
154
+ # ============================================================================
155
+ # DISTINCT VALUES PARSING
156
+ # ============================================================================
157
+
158
+
159
+ def parse_distinct_values(query_result: Dict[str, Any], database_type: str) -> List[str]:
160
+ """
161
+ Parses distinct values from database query results.
162
+ Different databases return different formats.
163
+ """
164
+ if not query_result or not query_result.get("rows") or len(query_result["rows"]) == 0:
165
+ return []
166
+
167
+ row = query_result["rows"][0]
168
+ distinct_values = []
169
+
170
+ db = database_type.lower()
171
+
172
+ if db in ["postgresql", "bigquery", "snowflake", "databricks", "clickhouse"]:
173
+ # These return arrays in string_values field
174
+ if "string_values" in row:
175
+ if isinstance(row["string_values"], list):
176
+ distinct_values = row["string_values"]
177
+ elif isinstance(row["string_values"], str):
178
+ # Handle JSON string arrays
179
+ try:
180
+ distinct_values = json.loads(row["string_values"])
181
+ except:
182
+ distinct_values = []
183
+
184
+ elif db == "mysql":
185
+ # MySQL returns JSON_ARRAYAGG which should be an array
186
+ if "string_values" in row:
187
+ if isinstance(row["string_values"], list):
188
+ distinct_values = row["string_values"]
189
+ elif isinstance(row["string_values"], str):
190
+ try:
191
+ distinct_values = json.loads(row["string_values"])
192
+ except:
193
+ distinct_values = []
194
+
195
+ elif db == "mssql":
196
+ # MS SQL returns comma-separated string
197
+ if "string_values" in row and isinstance(row["string_values"], str):
198
+ distinct_values = [v.strip() for v in row["string_values"].split(",")]
199
+
200
+ else:
201
+ print(f"Warning: Unknown database type: {database_type}")
202
+ distinct_values = []
203
+
204
+ # Filter out null/undefined/empty values
205
+ return [value for value in distinct_values if value is not None and value != ""]
206
+
207
+
208
+ # ============================================================================
209
+ # MATCH CASING FUNCTION
210
+ # ============================================================================
211
+
212
+
213
+ def match_casing(text: Optional[str], template: Optional[str]) -> str:
214
+ """Matches the casing of text to template."""
215
+ if not text or not template:
216
+ return text or ""
217
+
218
+ # Detect patterns
219
+ def is_title_case(s: str) -> bool:
220
+ return bool(re.match(r'^[A-Z][a-z]*([A-Z][a-z]*)*$', s))
221
+
222
+ def is_camel_case(s: str) -> bool:
223
+ return bool(re.match(r'^[a-z]+([A-Z][a-z]*)*$', s))
224
+
225
+ def is_snake_case(s: str) -> bool:
226
+ return bool(re.match(r'^[a-z0-9]+(_[a-z0-9]+)*$', s))
227
+
228
+ def is_all_lower_case(s: str) -> bool:
229
+ return bool(re.match(r'^[a-z]+$', s))
230
+
231
+ def is_all_upper_case(s: str) -> bool:
232
+ return bool(re.match(r'^[A-Z]+$', s))
233
+
234
+ def is_capitalized(s: str) -> bool:
235
+ return bool(re.match(r'^[A-Z][a-z]*$', s))
236
+
237
+ def is_screaming_snake_case(s: str) -> bool:
238
+ return bool(re.match(r'^[A-Z][A-Z0-9]*(_[A-Z0-9]+)*$', s))
239
+
240
+ # Convert functions
241
+ def to_title_case(s: str) -> str:
242
+ return ''.join(word.capitalize() for word in re.split(r'[_\s]+', s.lower()))
243
+
244
+ def to_camel_case(s: str) -> str:
245
+ return re.sub(r'_(.)', lambda m: m.group(1).upper(), s.lower())
246
+
247
+ def to_snake_case(s: str) -> str:
248
+ return re.sub(r'[A-Z]', lambda m: f'_{m.group(0).lower()}', s)
249
+
250
+ def to_screaming_snake_case(s: str) -> str:
251
+ result = re.sub(r'([A-Z])', r'_\1', s)
252
+ result = result.lstrip('_')
253
+ return result.upper()
254
+
255
+ # Match casing
256
+ if is_title_case(template):
257
+ return to_title_case(text)
258
+ elif is_camel_case(template):
259
+ return to_camel_case(text)
260
+ elif is_snake_case(template):
261
+ return to_snake_case(text)
262
+ elif is_all_lower_case(template):
263
+ return text.lower()
264
+ elif is_all_upper_case(template):
265
+ return text.upper()
266
+ elif is_capitalized(template):
267
+ return text.capitalize()
268
+ elif is_screaming_snake_case(template):
269
+ return to_screaming_snake_case(text)
270
+ else:
271
+ return text # Default case if no specific pattern is detected
272
+
273
+
274
+ # ============================================================================
275
+ # TEMPLATE HYDRATION
276
+ # ============================================================================
277
+
278
+
279
+ def hydrate_pivot_template(
280
+ template: str,
281
+ distinct_values: List[str],
282
+ config: PivotConfig
283
+ ) -> str:
284
+ """
285
+ Hydrates a pivot query template with actual distinct values.
286
+ This function should be called in the Python SDK after fetching distinct values.
287
+
288
+ Args:
289
+ template: The SQL template string containing markers
290
+ distinct_values: Array of distinct values fetched from the database
291
+ config: config about the pivot configuration
292
+
293
+ Returns:
294
+ Hydrated SQL query string ready to execute
295
+ """
296
+ column_field = config.get("columnField")
297
+ row_field = config.get("rowField")
298
+ aggregations = config.get("aggregations", [])
299
+ database_type = config.get("databaseType", "postgresql")
300
+
301
+ # If this pivot doesn't require distinct values, return as-is
302
+ if not config.get("requiresDistinctValues") or not column_field or not row_field:
303
+ return template
304
+
305
+ # Filter and limit distinct values
306
+ filtered_values = [
307
+ value for value in distinct_values
308
+ if value is not None and value != ""
309
+ ][:MAX_PIVOT_UNIQUE_VALUES]
310
+
311
+ # Get properly quoted column references
312
+ column_field_alias = process_column_reference(
313
+ column_field,
314
+ database_type,
315
+ None,
316
+ False,
317
+ True
318
+ )
319
+
320
+ row_field_alias = process_column_reference(
321
+ row_field,
322
+ database_type,
323
+ None,
324
+ False,
325
+ True
326
+ )
327
+
328
+ # Generate column aliases for SELECT in quill_alias CTE
329
+ column_aliases = []
330
+ column_aliases.append(
331
+ f"{process_column_reference(row_field, database_type, None, True)} AS {row_field_alias}"
332
+ )
333
+
334
+ # Generate CASE WHEN columns for each aggregation
335
+ case_when_columns = []
336
+ seen_aggs: Dict[str, Dict[str, int]] = {}
337
+
338
+ for current_agg in aggregations:
339
+ agg_type = current_agg.get("aggregationType", "")
340
+ value_field = current_agg.get("valueField", "")
341
+
342
+ # Track duplicate aggregation combos for disambiguation
343
+ if agg_type in seen_aggs and value_field in seen_aggs[agg_type]:
344
+ seen_aggs[agg_type][value_field] += 1
345
+ else:
346
+ if agg_type not in seen_aggs:
347
+ seen_aggs[agg_type] = {}
348
+ seen_aggs[agg_type][value_field] = 1
349
+
350
+ disambiguation_index = str(seen_aggs[agg_type][value_field])
351
+ if disambiguation_index == "1":
352
+ disambiguation_index = ""
353
+
354
+ value_field_alias = process_column_reference(
355
+ current_agg.get("valueField") or row_field or "count",
356
+ database_type,
357
+ None,
358
+ False,
359
+ True
360
+ )
361
+
362
+ value_alias_substring = ""
363
+ if current_agg.get("valueField"):
364
+ value_alias_substring = f"{process_column_reference(current_agg['valueField'], database_type, None, True)} AS {value_field_alias}"
365
+
366
+ # Handle disambiguation for multiple aggregations
367
+ total_seen = sum(seen_aggs[agg_type].values())
368
+ disambiguation_field = ""
369
+ if total_seen > 1:
370
+ disambiguation_field = f"_{current_agg.get('valueField', '')}{disambiguation_index}"
371
+
372
+ disambiguation = ""
373
+ if len(aggregations) > 1:
374
+ if disambiguation_field:
375
+ disambiguation = f"{disambiguation_field}_{match_casing(agg_type, current_agg.get('valueField'))}"
376
+ else:
377
+ disambiguation = f"_{agg_type}"
378
+
379
+ # Wrap boolean fields in CASE WHEN
380
+ value_expr = ""
381
+ if current_agg.get("valueFieldType") == "bool":
382
+ value_expr = f"CASE WHEN {value_field_alias} THEN 1 ELSE 0 END"
383
+ else:
384
+ value_expr = process_value_field(
385
+ agg_type,
386
+ database_type,
387
+ value_field_alias
388
+ )
389
+
390
+ # Handle percentage aggregations specially
391
+ if agg_type == "percentage":
392
+ value_field2 = current_agg.get("valueField2") or current_agg.get("valueField") or "count"
393
+ value_field2_alias = process_column_reference(
394
+ value_field2,
395
+ database_type,
396
+ None,
397
+ False,
398
+ True
399
+ )
400
+
401
+ value_field2_type = current_agg.get("valueField2Type") or current_agg.get("valueFieldType")
402
+ value2_expr = ""
403
+ if value_field2_type == "bool":
404
+ value2_expr = f"CASE WHEN {value_field2_alias} THEN 1 ELSE 0 END"
405
+ else:
406
+ value2_expr = value_field2_alias
407
+
408
+ value2_alias_substring = ""
409
+ if current_agg.get("valueField2") and current_agg.get("valueField") != current_agg.get("valueField2"):
410
+ value2_alias_substring = f"{process_column_reference(current_agg['valueField2'], database_type, None, True)} AS {value_field2_alias}"
411
+
412
+ # Percentage with same field for numerator and denominator
413
+ if current_agg.get("valueField") == current_agg.get("valueField2") or not current_agg.get("valueField2"):
414
+ for column in filtered_values:
415
+ case_when_columns.append(
416
+ f"CAST(sum(CASE WHEN {column_field_alias} = '{process_single_quotes(column, database_type)}' THEN {value_expr} END) AS FLOAT) / GREATEST(sum({value2_expr}), 1) AS {process_column_reference(column + disambiguation, database_type, '_', True)}"
417
+ )
418
+ else:
419
+ # Percentage with different fields
420
+ for column in filtered_values:
421
+ case_when_columns.append(
422
+ f"CAST(sum(CASE WHEN {column_field_alias} = '{process_single_quotes(column, database_type)}' THEN {value_expr} END) AS FLOAT) / GREATEST(sum(CASE WHEN {column_field_alias} = '{process_single_quotes(column, database_type)}' THEN {value2_expr} END), 1) AS {process_column_reference(column + disambiguation, database_type, '_', True)}"
423
+ )
424
+ if value2_alias_substring:
425
+ column_aliases.append(value2_alias_substring)
426
+ else:
427
+ # Standard aggregations (sum, count, avg, min, max)
428
+ for column in filtered_values:
429
+ case_when_columns.append(
430
+ f"{process_agg_type(agg_type, True)}(CASE WHEN {column_field_alias} = '{process_single_quotes(column, database_type)}' THEN {value_expr} END) AS {process_column_reference(column + disambiguation, database_type, '_', True)}"
431
+ )
432
+
433
+ if value_alias_substring:
434
+ column_aliases.append(value_alias_substring)
435
+
436
+ # Add the column field to the aliases
437
+ column_aliases.append(
438
+ f"{process_column_reference(column_field, database_type, None, True)} AS {column_field_alias}"
439
+ )
440
+
441
+ # Remove duplicates
442
+ unique_column_aliases = list(dict.fromkeys(column_aliases))
443
+
444
+ # Replace markers with actual SQL
445
+ hydrated_template = template.replace(
446
+ PIVOT_COLUMN_ALIAS_MARKER,
447
+ ", ".join(unique_column_aliases)
448
+ ).replace(
449
+ PIVOT_COLUMN_MARKER,
450
+ ", ".join(case_when_columns)
451
+ )
452
+
453
+ return hydrated_template
454
+
455
+
456
+ # ============================================================================
457
+ # VALIDATION
458
+ # ============================================================================
459
+
460
+
461
+ def validate_template(template: str, config: PivotConfig) -> Dict[str, Any]:
462
+ """Validates that a template can be hydrated with the given config."""
463
+ errors = []
464
+
465
+ if not template:
466
+ errors.append("Template is empty")
467
+
468
+ if config.get("requiresDistinctValues"):
469
+ if PIVOT_COLUMN_MARKER not in template:
470
+ errors.append(f"Template is missing {PIVOT_COLUMN_MARKER} marker")
471
+ if PIVOT_COLUMN_ALIAS_MARKER not in template:
472
+ errors.append(f"Template is missing {PIVOT_COLUMN_ALIAS_MARKER} marker")
473
+ if not config.get("columnField"):
474
+ errors.append("config is missing columnField")
475
+ if not config.get("rowField"):
476
+ errors.append("config is missing rowField")
477
+
478
+ if not config.get("aggregations") or len(config.get("aggregations", [])) == 0:
479
+ errors.append("config is missing aggregations")
480
+
481
+ return {
482
+ "valid": len(errors) == 0,
483
+ "errors": errors
484
+ }
485
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quillsql
3
- Version: 2.2.0
3
+ Version: 2.2.1
4
4
  Summary: Quill SDK for Python.
5
5
  Home-page: https://github.com/quill-sql/quill-python
6
6
  Author: Quill
@@ -17,6 +17,7 @@ quillsql/db/db_helper.py
17
17
  quillsql/db/postgres.py
18
18
  quillsql/utils/__init__.py
19
19
  quillsql/utils/filters.py
20
+ quillsql/utils/pivot_template.py
20
21
  quillsql/utils/run_query_processes.py
21
22
  quillsql/utils/schema_conversion.py
22
23
  quillsql/utils/tenants.py
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="quillsql",
5
- version="2.2.0",
5
+ version="2.2.1",
6
6
  packages=find_packages(),
7
7
  install_requires=[
8
8
  "psycopg2-binary",
File without changes
File without changes
File without changes
File without changes