relationalai 0.12.0__py3-none-any.whl → 0.12.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -20,7 +20,16 @@ class SnowbookEnvironment(NotebookRuntimeEnvironment, SessionEnvironment):
20
20
 
21
21
  def __init__(self):
22
22
  super().__init__()
23
- self.runner = "container" if "snowflake.connector.auth" in sys.modules else "warehouse"
23
+ # Detect runner type based on module presence:
24
+ # - Warehouse runtime has '_snowflake' module
25
+ # - Container runtime has 'snowflake._legacy' module
26
+ if "_snowflake" in sys.modules:
27
+ self.runner = "warehouse"
28
+ elif "snowflake._legacy" in sys.modules:
29
+ self.runner = "container"
30
+ else:
31
+ # Fallback to original check
32
+ self.runner = "container" if "snowflake.connector.auth" in sys.modules else "warehouse"
24
33
 
25
34
  @classmethod
26
35
  def detect(cls):
@@ -2338,6 +2338,7 @@ class Fragment():
2338
2338
  self._define.extend(parent._define)
2339
2339
  self._order_by.extend(parent._order_by)
2340
2340
  self._limit = parent._limit
2341
+ self._meta.update(parent._meta)
2341
2342
 
2342
2343
  def _add_items(self, items:PySequence[Any], to_attr:list[Any]):
2343
2344
  # TODO: ensure that you are _either_ a select, require, or then
@@ -2416,9 +2417,26 @@ class Fragment():
2416
2417
  return f
2417
2418
 
2418
2419
  def meta(self, **kwargs: Any) -> Fragment:
2420
+ """Add metadata to the query.
2421
+
2422
+ Metadata can be used for debugging and observability purposes.
2423
+
2424
+ Args:
2425
+ **kwargs: Metadata key-value pairs
2426
+
2427
+ Returns:
2428
+ Fragment: Returns self for method chaining
2429
+
2430
+ Example:
2431
+ select(Person.name).meta(workload_name="test", priority=1, enabled=True)
2432
+ """
2433
+ if not kwargs:
2434
+ raise ValueError("meta() requires at least one argument")
2435
+
2419
2436
  self._meta.update(kwargs)
2420
2437
  return self
2421
2438
 
2439
+
2422
2440
  def annotate(self, *annos:Expression|Relationship|ir.Annotation) -> Fragment:
2423
2441
  self._annotations.extend(annos)
2424
2442
  return self
@@ -2497,7 +2515,7 @@ class Fragment():
2497
2515
  # @TODO for now we set tag to None but we need to work out how to properly propagate user-provided tag here
2498
2516
  with debugging.span("query", tag=None, dsl=str(self), **with_source(self), meta=self._meta) as query_span:
2499
2517
  query_task = qb_model._compiler.fragment(self)
2500
- results = qb_model._to_executor().execute(ir_model, query_task)
2518
+ results = qb_model._to_executor().execute(ir_model, query_task, meta=self._meta)
2501
2519
  query_span["results"] = results
2502
2520
  # For local debugging mostly
2503
2521
  dry_run = qb_model._dry_run or bool(qb_model._config.get("compiler.dry_run", False))
@@ -2524,7 +2542,7 @@ class Fragment():
2524
2542
  # @TODO for now we set tag to None but we need to work out how to properly propagate user-provided tag here
2525
2543
  with debugging.span("query", tag=None, dsl=str(clone), **with_source(clone), meta=clone._meta) as query_span:
2526
2544
  query_task = qb_model._compiler.fragment(clone)
2527
- results = qb_model._to_executor().execute(ir_model, query_task, format="snowpark")
2545
+ results = qb_model._to_executor().execute(ir_model, query_task, format="snowpark", meta=clone._meta)
2528
2546
  query_span["alt_format_results"] = results
2529
2547
  return results
2530
2548
 
@@ -2541,7 +2559,8 @@ class Fragment():
2541
2559
  clone._source = runtime_env.get_source_pos()
2542
2560
  with debugging.span("query", dsl=str(clone), **with_source(clone), meta=clone._meta):
2543
2561
  query_task = qb_model._compiler.fragment(clone)
2544
- qb_model._to_executor().execute(ir_model, query_task, result_cols=result_cols, export_to=table._fqn, update=update)
2562
+ qb_model._to_executor().execute(ir_model, query_task, result_cols=result_cols, export_to=table._fqn, update=update, meta=clone._meta)
2563
+
2545
2564
 
2546
2565
  #--------------------------------------------------
2547
2566
  # Select / Where
@@ -21,8 +21,8 @@ from relationalai.clients.config import Config
21
21
  from relationalai.clients.snowflake import APP_NAME
22
22
  from relationalai.clients.types import TransactionAsyncResponse
23
23
  from relationalai.clients.util import IdentityParser
24
- from relationalai.tools.constants import USE_DIRECT_ACCESS
25
-
24
+ from relationalai.tools.constants import USE_DIRECT_ACCESS, QUERY_ATTRIBUTES_HEADER
25
+ from relationalai.tools.query_utils import prepare_metadata_for_headers
26
26
 
27
27
  class LQPExecutor(e.Executor):
28
28
  """Executes LQP using the RAI client."""
@@ -267,7 +267,7 @@ class LQPExecutor(e.Executor):
267
267
  if ivm_flag:
268
268
  config_dict['ivm.maintenance_level'] = lqp_ir.Value(value=ivm_flag, meta=None)
269
269
  return construct_configure(config_dict, None)
270
-
270
+
271
271
  def _compile_intrinsics(self) -> lqp_ir.Epoch:
272
272
  """Construct an epoch that defines a number of built-in definitions used by the
273
273
  emitter."""
@@ -344,6 +344,10 @@ class LQPExecutor(e.Executor):
344
344
  df, errs = result_helpers.format_results(raw_results, cols)
345
345
  self.report_errors(errs)
346
346
 
347
+ # Rename columns if wide outputs is enabled
348
+ if self.wide_outputs and len(cols) - len(extra_cols) == len(df.columns):
349
+ df.columns = cols[: len(df.columns)]
350
+
347
351
  # Process exports
348
352
  if export_to and not self.dry_run:
349
353
  assert cols, "No columns found in the output"
@@ -362,7 +366,7 @@ class LQPExecutor(e.Executor):
362
366
 
363
367
  def execute(self, model: ir.Model, task: ir.Task, format: Literal["pandas", "snowpark"] = "pandas",
364
368
  result_cols: Optional[list[str]] = None, export_to: Optional[str] = None,
365
- update: bool = False) -> DataFrame:
369
+ update: bool = False, meta: dict[str, Any] | None = None) -> DataFrame:
366
370
  self.prepare_data()
367
371
  previous_model = self._last_model
368
372
 
@@ -374,6 +378,9 @@ class LQPExecutor(e.Executor):
374
378
  if format != "pandas":
375
379
  raise ValueError(f"Unsupported format: {format}")
376
380
 
381
+ # Format meta as headers
382
+ json_meta = prepare_metadata_for_headers(meta)
383
+ headers = {QUERY_ATTRIBUTES_HEADER: json_meta} if json_meta else {}
377
384
  raw_results = self.resources.exec_lqp(
378
385
  self.database,
379
386
  self.engine,
@@ -383,6 +390,7 @@ class LQPExecutor(e.Executor):
383
390
  # transactions are serialized.
384
391
  readonly=False,
385
392
  nowait_durable=True,
393
+ headers=headers,
386
394
  )
387
395
  assert isinstance(raw_results, TransactionAsyncResponse)
388
396
 
@@ -102,6 +102,7 @@ def _get_export_reads(export_ids: list[tuple[lqp.RelationId, int, lqp.Type]]) ->
102
102
  data_columns=csv_columns,
103
103
  compression="gzip",
104
104
  partition_size=200,
105
+ syntax_escapechar='"', # To follow Snowflake's expected format
105
106
  meta=None,
106
107
  )
107
108
  reads.append(lqp.Read(read_type=lqp.Export(config=export_csv_config, meta=None), meta=None))
@@ -56,5 +56,6 @@ class Executor():
56
56
  rich.print(f"[black]{df}[/black]")
57
57
  if not df.empty:
58
58
  for col in extra_cols:
59
- df = df.drop(col, axis=1)
59
+ if col in df.columns:
60
+ df = df.drop(col, axis=1)
60
61
  return df
@@ -558,22 +558,23 @@ class Flatten(Pass):
558
558
  def rewrite_wide_output(self, output: ir.Output):
559
559
  assert(output.keys)
560
560
 
561
- # only prefix keys that are not already in the output
562
- prefix_keys = []
561
+ # only append keys that are not already in the output
562
+ suffix_keys = []
563
563
  for key in output.keys:
564
564
  if all([val is not key for _, val in output.aliases]):
565
- prefix_keys.append(key)
565
+ suffix_keys.append(key)
566
566
 
567
567
  aliases: OrderedSet[Tuple[str, ir.Value]] = ordered_set()
568
- # add the keys to the output
569
- for key in prefix_keys:
570
- aliases.add((key.name, key))
571
568
 
572
569
  # add the remaining args, unless it is already a key
573
570
  for name, val in output.aliases:
574
- if not isinstance(val, ir.Var) or val not in prefix_keys:
571
+ if not isinstance(val, ir.Var) or val not in suffix_keys:
575
572
  aliases.add((name, val))
576
573
 
574
+ # add the keys to the output
575
+ for key in suffix_keys:
576
+ aliases.add((key.name, key))
577
+
577
578
  # TODO - we are assuming that the Rel compiler will translate nullable lookups
578
579
  # properly, returning a `Missing` if necessary, like this:
579
580
  # (nested_192(_adult, _adult_name) or (not nested_192(_adult, _) and _adult_name = Missing)) and