tellaro-query-language 0.1.9__tar.gz → 0.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/PKG-INFO +23 -1
  2. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/README.md +22 -0
  3. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/pyproject.toml +1 -1
  4. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/core.py +230 -36
  5. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/core_components/opensearch_operations.py +413 -90
  6. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/core_components/stats_operations.py +11 -1
  7. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/evaluator.py +39 -2
  8. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/evaluator_components/special_expressions.py +25 -6
  9. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/evaluator_components/value_comparison.py +31 -3
  10. tellaro_query_language-0.2.1/src/tql/mutator_analyzer.py +1228 -0
  11. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/mutators/__init__.py +5 -1
  12. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/mutators/dns.py +76 -53
  13. tellaro_query_language-0.2.1/src/tql/mutators/security.py +226 -0
  14. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/mutators/string.py +74 -0
  15. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/opensearch_components/field_mapping.py +9 -3
  16. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/opensearch_components/lucene_converter.py +12 -0
  17. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/opensearch_components/query_converter.py +134 -25
  18. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/opensearch_stats.py +170 -39
  19. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/parser.py +92 -37
  20. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/parser_components/ast_builder.py +37 -1
  21. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/parser_components/field_extractor.py +9 -1
  22. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/parser_components/grammar.py +32 -8
  23. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/post_processor.py +489 -31
  24. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/stats_evaluator.py +170 -12
  25. tellaro_query_language-0.1.9/src/tql/mutator_analyzer.py +0 -830
  26. tellaro_query_language-0.1.9/src/tql/mutators/security.py +0 -225
  27. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/LICENSE +0 -0
  28. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/__init__.py +0 -0
  29. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/analyzer.py +0 -0
  30. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/cache/__init__.py +0 -0
  31. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/cache/base.py +0 -0
  32. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/cache/memory.py +0 -0
  33. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/cache/redis.py +0 -0
  34. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/core_components/README.md +0 -0
  35. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/core_components/__init__.py +0 -0
  36. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/core_components/file_operations.py +0 -0
  37. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/core_components/validation_operations.py +0 -0
  38. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/evaluator_components/README.md +0 -0
  39. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/evaluator_components/__init__.py +0 -0
  40. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/evaluator_components/field_access.py +0 -0
  41. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/exceptions.py +0 -0
  42. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/geoip_normalizer.py +0 -0
  43. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/mutators/base.py +0 -0
  44. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/mutators/encoding.py +0 -0
  45. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/mutators/geo.py +0 -0
  46. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/mutators/list.py +0 -0
  47. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/mutators/network.py +0 -0
  48. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/opensearch.py +0 -0
  49. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/opensearch_components/README.md +0 -0
  50. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/opensearch_components/__init__.py +0 -0
  51. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/opensearch_mappings.py +0 -0
  52. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/parser_components/README.md +0 -0
  53. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/parser_components/__init__.py +0 -0
  54. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/parser_components/error_analyzer.py +0 -0
  55. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/scripts.py +0 -0
  56. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/stats_transformer.py +0 -0
  57. {tellaro_query_language-0.1.9 → tellaro_query_language-0.2.1}/src/tql/validators.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tellaro-query-language
3
- Version: 0.1.9
3
+ Version: 0.2.1
4
4
  Summary: A flexible, human-friendly query language for searching and filtering structured data
5
5
  Home-page: https://github.com/tellaro/tellaro-query-language
6
6
  License: MIT
@@ -217,6 +217,28 @@ poetry run tests
217
217
 
218
218
  **Note**: The development setup uses `python-dotenv` to load OpenSearch credentials from `.env` files for integration testing. This is NOT required when using TQL as a package - see the [Package Usage Guide](docs/package-usage-guide.md) for production configuration patterns.
219
219
 
220
+ ### TQL Playground
221
+
222
+ The repository includes an interactive web playground for testing TQL queries:
223
+
224
+ ```bash
225
+ # Navigate to the playground directory
226
+ cd playground
227
+
228
+ # Start with Docker (recommended)
229
+ docker-compose up
230
+
231
+ # Or start with OpenSearch included
232
+ docker-compose --profile opensearch up
233
+ ```
234
+
235
+ Access the playground at:
236
+ - Frontend: http://localhost:5173
237
+ - API: http://localhost:8000
238
+ - API Docs: http://localhost:8000/docs
239
+
240
+ The playground uses your local TQL source code, so any changes you make are immediately reflected. See [playground/README.md](playground/README.md) for more details.
241
+
220
242
  ### File Operations
221
243
 
222
244
  ```python
@@ -187,6 +187,28 @@ poetry run tests
187
187
 
188
188
  **Note**: The development setup uses `python-dotenv` to load OpenSearch credentials from `.env` files for integration testing. This is NOT required when using TQL as a package - see the [Package Usage Guide](docs/package-usage-guide.md) for production configuration patterns.
189
189
 
190
+ ### TQL Playground
191
+
192
+ The repository includes an interactive web playground for testing TQL queries:
193
+
194
+ ```bash
195
+ # Navigate to the playground directory
196
+ cd playground
197
+
198
+ # Start with Docker (recommended)
199
+ docker-compose up
200
+
201
+ # Or start with OpenSearch included
202
+ docker-compose --profile opensearch up
203
+ ```
204
+
205
+ Access the playground at:
206
+ - Frontend: http://localhost:5173
207
+ - API: http://localhost:8000
208
+ - API Docs: http://localhost:8000/docs
209
+
210
+ The playground uses your local TQL source code, so any changes you make are immediately reflected. See [playground/README.md](playground/README.md) for more details.
211
+
190
212
  ### File Operations
191
213
 
192
214
  ```python
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "tellaro-query-language"
3
- version = "0.1.9"
3
+ version = "0.2.1"
4
4
  description = "A flexible, human-friendly query language for searching and filtering structured data"
5
5
  authors = ["Justin Henderson <justin@tellaro.io>"]
6
6
  license = "MIT"
@@ -212,8 +212,10 @@ class TQL:
212
212
  if query_type == "stats_expr":
213
213
  # This is a pure stats query like "| stats count()"
214
214
  stats_result = self.stats(data, query)
215
+ # Get viz_hint from the parsed AST
216
+ viz_hint = ast.get("viz_hint")
215
217
  # Convert to execute_opensearch format
216
- result["stats"] = self._convert_stats_result(stats_result)
218
+ result["stats"] = self._convert_stats_result(stats_result, viz_hint)
217
219
  result["total"] = len(records)
218
220
  return result
219
221
 
@@ -232,7 +234,9 @@ class TQL:
232
234
 
233
235
  # Apply stats to filtered data
234
236
  stats_result = self.stats_evaluator.evaluate_stats(filtered_records, stats_ast)
235
- result["stats"] = self._convert_stats_result(stats_result)
237
+ # Get viz_hint from the stats AST
238
+ viz_hint = stats_ast.get("viz_hint")
239
+ result["stats"] = self._convert_stats_result(stats_result, viz_hint)
236
240
  result["total"] = len(filtered_records)
237
241
 
238
242
  # Include filtered documents if size > 0
@@ -241,40 +245,63 @@ class TQL:
241
245
 
242
246
  return result
243
247
 
244
- # Handle regular filter queries
245
- matched_records = []
246
- has_enrichments = False
248
+ # Handle regular filter queries with mutator analysis
249
+ # Analyze the query for mutators
250
+ from .mutator_analyzer import MutatorAnalyzer
251
+ from .post_processor import QueryPostProcessor
252
+
253
+ analyzer = MutatorAnalyzer(field_mappings=self.field_mappings)
254
+ analysis_result = analyzer.analyze_ast(ast, context="in_memory")
247
255
 
256
+ # Use the optimized AST for evaluation
257
+ optimized_ast = analysis_result.optimized_ast
258
+
259
+ # First pass: collect all matching records using optimized AST
260
+ matched_records = []
248
261
  for record in records:
249
- # Check if record matches
250
- if self.evaluator._evaluate_node(ast, record, self._simple_mappings):
251
- # Apply any mutators to enrich the record
252
- enriched_record = self._apply_mutators_to_record(ast, record)
253
- matched_records.append(enriched_record)
254
- # Check if enrichments were added
255
- if not has_enrichments and enriched_record is not record:
256
- has_enrichments = True
262
+ # Check if record matches using the optimized AST (without array operators)
263
+ if self.evaluator._evaluate_node(optimized_ast, record, self._simple_mappings):
264
+ matched_records.append(record)
265
+
266
+ # Apply post-processing if needed
267
+ if analysis_result.post_processing_requirements:
268
+ processor = QueryPostProcessor()
269
+
270
+ # Apply mutators/enrichments
271
+ processed_records = processor.process_results(
272
+ matched_records, analysis_result.post_processing_requirements, track_enrichments=save_enrichment
273
+ )
274
+
275
+ # Apply filters (for array operators like any/all/none)
276
+ filtered_records = processor.filter_results(processed_records, analysis_result.post_processing_requirements)
277
+
278
+ matched_records = filtered_records
279
+ result["post_processing_applied"] = True
280
+
281
+ # Add post-processing stats
282
+ result["post_processing_stats"] = {
283
+ "documents_retrieved": len(processed_records),
284
+ "documents_returned": len(filtered_records),
285
+ "documents_filtered": len(processed_records) - len(filtered_records),
286
+ }
257
287
 
258
288
  # Set result data
259
289
  result["total"] = len(matched_records)
260
290
  if size > 0:
261
291
  result["results"] = matched_records[:size]
262
292
 
263
- # Check if post-processing (mutators) were applied
264
- if has_enrichments:
265
- result["post_processing_applied"] = True
293
+ # Update health status based on analysis
294
+ result["health_status"] = analysis_result.health_status
295
+ result["health_reasons"] = [
296
+ reason.get("reason", reason.get("description", "")) for reason in analysis_result.health_reasons
297
+ ]
266
298
 
267
299
  # Save enrichments if requested
268
- if save_enrichment and has_enrichments and source_file:
269
- # For file sources, update all records (not just matches)
270
- all_enriched = []
271
- for record in records:
272
- enriched_record = self._apply_mutators_to_record(ast, record)
273
- all_enriched.append(enriched_record)
274
-
275
- # Save based on file type
276
- if source_file.lower().endswith(".json"):
277
- self.file_ops.save_enrichments_to_json(source_file, all_enriched)
300
+ if save_enrichment and result.get("post_processing_applied") and source_file:
301
+ # For file sources, we would need to re-process all records with enrichments
302
+ # This is a complex operation that would require applying mutators to all records
303
+ # For now, we'll skip this functionality in the in-memory implementation
304
+ pass
278
305
 
279
306
  return result
280
307
 
@@ -370,6 +397,38 @@ class TQL:
370
397
  elif node_type == "unary_op":
371
398
  stats["logical_operators"].add("not")
372
399
  traverse_ast(node.get("operand"), depth + 1)
400
+ elif node_type == "query_with_stats":
401
+ # Traverse into the filter part to find mutators and fields
402
+ filter_node = node.get("filter")
403
+ if filter_node:
404
+ traverse_ast(filter_node, depth + 1)
405
+ # Also traverse the stats part
406
+ stats_node = node.get("stats")
407
+ if stats_node:
408
+ traverse_ast(stats_node, depth + 1)
409
+ elif node_type == "stats_expr":
410
+ # Check aggregations for any fields or mutators
411
+ aggregations = node.get("aggregations", [])
412
+ for agg in aggregations:
413
+ if isinstance(agg, dict):
414
+ field = agg.get("field")
415
+ if field and field != "*":
416
+ stats["fields"].add(field)
417
+ # Check for field mutators in aggregations
418
+ if agg.get("field_mutators"):
419
+ stats["has_mutators"] = True
420
+ elif node_type == "geo_expr":
421
+ # Geo expressions always have mutators
422
+ field = node.get("field")
423
+ if field:
424
+ stats["fields"].add(field)
425
+ stats["has_mutators"] = True
426
+ elif node_type == "nslookup_expr":
427
+ # NSLookup expressions always have mutators
428
+ field = node.get("field")
429
+ if field:
430
+ stats["fields"].add(field)
431
+ stats["has_mutators"] = True
373
432
 
374
433
  traverse_ast(ast)
375
434
 
@@ -451,6 +510,7 @@ class TQL:
451
510
  query: Optional[str] = None,
452
511
  size: int = 500,
453
512
  from_: int = 0,
513
+ sort: Optional[List[Dict[str, Any]]] = None,
454
514
  timestamp_field: str = "@timestamp",
455
515
  time_range: Optional[Dict[str, str]] = None,
456
516
  scan_all: bool = False,
@@ -468,7 +528,8 @@ class TQL:
468
528
  index: Index name to search
469
529
  query: The TQL query string
470
530
  size: Number of results to return (default: 500)
471
- from_: Starting offset for pagination (default: 0)
531
+ search_after: Values from previous result's sort field for pagination
532
+ sort: Sort order specification (e.g., [{"@timestamp": "desc"}, {"_id": "asc"}])
472
533
  timestamp_field: Field name for timestamp filtering (default: "@timestamp")
473
534
  time_range: Optional time range dict with 'gte' and/or 'lte' keys
474
535
  scan_all: If True, use scroll API to retrieve all matching documents
@@ -480,6 +541,7 @@ class TQL:
480
541
  Dictionary containing:
481
542
  - results: List of processed results
482
543
  - total: Total number of matching documents
544
+ - sort_values: Sort values of the last document (for search_after pagination)
483
545
  - post_processing_applied: Whether post-processing was applied
484
546
  - health_status: Query health status
485
547
  - health_reasons: List of health issues
@@ -511,6 +573,8 @@ class TQL:
511
573
  "scan_all": scan_all,
512
574
  "scroll_size": scroll_size,
513
575
  "scroll_timeout": scroll_timeout,
576
+ "from_": from_,
577
+ "sort": sort,
514
578
  }
515
579
  )
516
580
 
@@ -519,7 +583,7 @@ class TQL:
519
583
  filtered_kwargs["client"] = opensearch_client
520
584
 
521
585
  # Execute using new implementation
522
- results = self.opensearch_ops.execute_opensearch(query, index=index, size=size, from_=from_, **filtered_kwargs)
586
+ results = self.opensearch_ops.execute_opensearch(query, index=index, size=size, **filtered_kwargs)
523
587
 
524
588
  # Convert to old format if needed
525
589
  if isinstance(results, list):
@@ -978,22 +1042,140 @@ class TQL:
978
1042
  Returns:
979
1043
  Enriched record (may be same as input if no enrichments)
980
1044
  """
981
- # For now, return the original record
982
- # TODO: Implement mutator application for enrichment # noqa: W0511
983
- return record
1045
+ # Check if we need to apply mutators
1046
+ if not self._has_output_mutators(ast):
1047
+ return record
1048
+
1049
+ # Deep copy to avoid modifying original
1050
+ import copy
1051
+
1052
+ enriched_record = copy.deepcopy(record)
1053
+
1054
+ # Apply mutators from AST nodes
1055
+ self._apply_node_mutators(ast, enriched_record)
1056
+
1057
+ return enriched_record
1058
+
1059
+ def _has_output_mutators(self, ast: Dict[str, Any]) -> bool:
1060
+ """Check if AST contains mutators that should transform output.
1061
+
1062
+ Args:
1063
+ ast: Query AST
1064
+
1065
+ Returns:
1066
+ True if output mutators are present
1067
+ """
1068
+ if isinstance(ast, dict):
1069
+ node_type = ast.get("type")
1070
+
1071
+ # Check for field mutators with exists operator (output transformation)
1072
+ if node_type == "comparison" and ast.get("operator") == "exists" and ast.get("field_mutators"):
1073
+ return True
1074
+
1075
+ # Recursively check child nodes
1076
+ if node_type == "logical_op":
1077
+ left = ast.get("left", {})
1078
+ right = ast.get("right", {})
1079
+ return self._has_output_mutators(left) or self._has_output_mutators(right)
1080
+ elif node_type == "unary_op":
1081
+ operand = ast.get("operand", {})
1082
+ return self._has_output_mutators(operand)
1083
+
1084
+ return False
1085
+
1086
+ def _apply_node_mutators(self, ast: Dict[str, Any], record: Dict[str, Any]) -> None:
1087
+ """Apply mutators from AST nodes to the record.
984
1088
 
985
- def _convert_stats_result(self, stats_result: Dict[str, Any]) -> Dict[str, Any]:
1089
+ Args:
1090
+ ast: Query AST
1091
+ record: Record to modify (in-place)
1092
+ """
1093
+ if not isinstance(ast, dict):
1094
+ return
1095
+
1096
+ node_type = ast.get("type")
1097
+
1098
+ # Apply mutators for exists operator (output transformation)
1099
+ if node_type == "comparison" and ast.get("operator") == "exists" and ast.get("field_mutators"):
1100
+ field_name = ast["field"]
1101
+ field_mutators = ast["field_mutators"]
1102
+
1103
+ # Get field value
1104
+ field_value = self._get_nested_field(record, field_name)
1105
+
1106
+ if field_value is not None:
1107
+ # Apply mutators
1108
+ from .mutators import apply_mutators
1109
+
1110
+ mutated_value = apply_mutators(field_value, field_mutators, field_name, record)
1111
+
1112
+ # Update record with mutated value
1113
+ self._set_nested_field(record, field_name, mutated_value)
1114
+
1115
+ # Recursively process child nodes
1116
+ elif node_type == "logical_op":
1117
+ self._apply_node_mutators(ast.get("left", {}), record)
1118
+ self._apply_node_mutators(ast.get("right", {}), record)
1119
+ elif node_type == "unary_op":
1120
+ self._apply_node_mutators(ast.get("operand", {}), record)
1121
+
1122
+ def _get_nested_field(self, record: Dict[str, Any], field_path: str) -> Any:
1123
+ """Get value from nested field path.
1124
+
1125
+ Args:
1126
+ record: Record dictionary
1127
+ field_path: Dot-separated field path
1128
+
1129
+ Returns:
1130
+ Field value or None if not found
1131
+ """
1132
+ parts = field_path.split(".")
1133
+ current = record
1134
+
1135
+ for part in parts:
1136
+ if isinstance(current, dict) and part in current:
1137
+ current = current[part]
1138
+ else:
1139
+ return None
1140
+
1141
+ return current
1142
+
1143
+ def _set_nested_field(self, record: Dict[str, Any], field_path: str, value: Any) -> None:
1144
+ """Set value in nested field path.
1145
+
1146
+ Args:
1147
+ record: Record dictionary to modify
1148
+ field_path: Dot-separated field path
1149
+ value: Value to set
1150
+ """
1151
+ parts = field_path.split(".")
1152
+ current = record
1153
+
1154
+ # Navigate to parent of target field
1155
+ for part in parts[:-1]:
1156
+ if part not in current:
1157
+ current[part] = {}
1158
+ current = current[part]
1159
+
1160
+ # Set the value
1161
+ if len(parts) > 0:
1162
+ current[parts[-1]] = value
1163
+
1164
+ def _convert_stats_result(self, stats_result: Dict[str, Any], viz_hint: Optional[str] = None) -> Dict[str, Any]:
986
1165
  """Convert stats result from query() format to execute_opensearch format.
987
1166
 
988
1167
  Args:
989
1168
  stats_result: Result from stats() or query_stats() method
1169
+ viz_hint: Optional visualization hint from the query
990
1170
 
991
1171
  Returns:
992
1172
  Stats result in execute_opensearch format
993
1173
  """
994
1174
  # Map the stats evaluator format to execute_opensearch format
1175
+ result = {}
1176
+
995
1177
  if stats_result.get("type") == "simple_aggregation":
996
- return {
1178
+ result = {
997
1179
  "type": "stats",
998
1180
  "operation": stats_result["function"],
999
1181
  "field": stats_result["field"],
@@ -1001,10 +1183,22 @@ class TQL:
1001
1183
  }
1002
1184
  elif stats_result.get("type") == "multiple_aggregations":
1003
1185
  # For multiple aggregations, return the results dict
1004
- return {"type": "stats_multiple", "results": stats_result["results"]}
1186
+ result = {"type": "stats_multiple", "results": stats_result["results"]}
1005
1187
  elif stats_result.get("type") == "grouped_aggregation":
1006
1188
  # For grouped aggregations
1007
- return {"type": "stats_grouped", "group_by": stats_result["group_by"], "results": stats_result["results"]}
1189
+ result = {
1190
+ "type": "stats_grouped",
1191
+ "group_by": stats_result["group_by"],
1192
+ "results": stats_result["results"],
1193
+ "operation": stats_result.get("function", "count"),
1194
+ "field": stats_result.get("field", "*"),
1195
+ }
1008
1196
  else:
1009
1197
  # Return as-is if format is unknown
1010
- return stats_result
1198
+ result = stats_result
1199
+
1200
+ # Add visualization hint if provided
1201
+ if viz_hint:
1202
+ result["viz_hint"] = viz_hint
1203
+
1204
+ return result