chalkpy 2.95.8__py3-none-any.whl → 2.95.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
chalk/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "2.95.8"
1
+ __version__ = "2.95.9"
@@ -4,11 +4,26 @@ import contextlib
4
4
  import io
5
5
  import logging
6
6
  import os
7
+ import queue
7
8
  import threading
8
9
  import typing
9
10
  import uuid
10
- from concurrent.futures import Future, ThreadPoolExecutor
11
- from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Mapping, Optional, Sequence, Type, Union
11
+ from concurrent.futures import ThreadPoolExecutor
12
+ from typing import (
13
+ TYPE_CHECKING,
14
+ Any,
15
+ Callable,
16
+ Dict,
17
+ Iterable,
18
+ List,
19
+ Mapping,
20
+ NewType,
21
+ Optional,
22
+ Sequence,
23
+ Type,
24
+ Union,
25
+ cast,
26
+ )
12
27
 
13
28
  import pyarrow as pa
14
29
  import pyarrow.parquet as pq
@@ -27,9 +42,10 @@ from chalk.sql._internal.sql_source import (
27
42
  from chalk.sql.finalized_query import FinalizedChalkQuery
28
43
  from chalk.utils.df_utils import is_binary_like, read_parquet
29
44
  from chalk.utils.environment_parsing import env_var_bool
30
- from chalk.utils.log_with_context import get_logger
45
+ from chalk.utils.log_with_context import LABELS_KEY, get_logger, get_logging_context
31
46
  from chalk.utils.missing_dependency import missing_dependency_exception
32
- from chalk.utils.threading import DEFAULT_IO_EXECUTOR
47
+ from chalk.utils.threading import DEFAULT_IO_EXECUTOR, MultiSemaphore
48
+ from chalk.utils.tracing import safe_incr, safe_set_gauge
33
49
 
34
50
  if TYPE_CHECKING:
35
51
  from mypy_boto3_s3 import S3Client
@@ -40,6 +56,22 @@ if TYPE_CHECKING:
40
56
  _logger = get_logger(__name__)
41
57
  _public_logger = chalk.logging.chalk_logger
42
58
 
59
+ _WorkerId = NewType("_WorkerId", int)
60
+
61
+
62
+ def _get_resolver_tags() -> list[str] | None:
63
+ """Extract resolver_fqn from log context and return as tags list."""
64
+ try:
65
+ log_ctx = get_logging_context()
66
+ labels = log_ctx.get(LABELS_KEY, {})
67
+ resolver_fqn = labels.get("resolver_fqn")
68
+ if resolver_fqn:
69
+ return [f"resolver_fqn:{resolver_fqn}"]
70
+ except Exception:
71
+ # Don't fail if we can't get the resolver_fqn
72
+ pass
73
+ return None
74
+
43
75
 
44
76
  def get_supported_redshift_unload_types() -> List[Type["TypeEngine"]]:
45
77
  """
@@ -287,36 +319,235 @@ class RedshiftSourceImpl(BaseSQLSource):
287
319
  unload_destination=unload_destination,
288
320
  columns_to_features=columns_to_features,
289
321
  yield_empty_batches=query_execution_parameters.yield_empty_batches,
322
+ max_prefetch_size_bytes=query_execution_parameters.max_prefetch_size_bytes,
323
+ num_client_prefetch_threads=query_execution_parameters.num_client_prefetch_threads,
290
324
  )
291
325
 
326
+ def _download_worker(
327
+ self,
328
+ file_handles: queue.Queue[str],
329
+ sem: MultiSemaphore | None,
330
+ pa_table_queue: queue.Queue[tuple[pa.Table, int] | _WorkerId],
331
+ worker_idx: _WorkerId,
332
+ columns_to_features: Callable[[Sequence[str]], Mapping[str, Feature]],
333
+ ):
334
+ """Worker thread that downloads files from S3 with memory control."""
335
+ assert self._s3_bucket is not None
336
+ try:
337
+ while True:
338
+ try:
339
+ filename = file_handles.get_nowait()
340
+ except queue.Empty:
341
+ break
342
+
343
+ # Estimate file size from S3 metadata if possible
344
+ weight: int | None = None
345
+ try:
346
+ with _boto_lock_ctx():
347
+ head_response = self._s3_client.head_object(Bucket=self._s3_bucket, Key=filename)
348
+ # Boto3 types indicate ContentLength is always int, but cast to Optional for defensive programming
349
+ content_length = cast(int | None, head_response.get("ContentLength"))
350
+ if content_length is not None:
351
+ # Estimate uncompressed size (parquet typically compresses 3-5x)
352
+ weight = content_length * 4
353
+ except Exception as e:
354
+ _logger.warning(f"Failed to get file size for {filename}, will estimate after download", exc_info=e)
355
+
356
+ # Acquire semaphore before downloading
357
+ if sem and weight is not None:
358
+ if weight > sem.initial_value:
359
+ # If the file is larger than the maximum size, truncate to max
360
+ weight = sem.initial_value
361
+ if weight > 0:
362
+ if not sem.acquire(weight):
363
+ raise RuntimeError("Failed to acquire semaphore for redshift download")
364
+ safe_set_gauge(
365
+ "chalk.redshift.remaining_prefetch_bytes", sem.get_value(), tags=_get_resolver_tags()
366
+ )
367
+
368
+ # Download and convert to table
369
+ tbl = _download_file_to_table(self._s3_client, self._s3_bucket, filename, columns_to_features)
370
+
371
+ # If we didn't have a weight estimate, use actual table size
372
+ if weight is None:
373
+ weight = tbl.nbytes
374
+ if sem and weight is not None and weight > 0:
375
+ if not sem.acquire(weight):
376
+ raise RuntimeError("Failed to acquire semaphore for redshift download")
377
+ safe_set_gauge(
378
+ "chalk.redshift.remaining_prefetch_bytes", sem.get_value(), tags=_get_resolver_tags()
379
+ )
380
+
381
+ # Ensure weight is always an int
382
+ final_weight: int = weight if weight is not None else 0
383
+ pa_table_queue.put((tbl, final_weight))
384
+ finally:
385
+ # Signal that this worker is done
386
+ pa_table_queue.put(worker_idx)
387
+
292
388
  def _download_objs_async(
293
389
  self,
294
390
  unload_destination: str,
295
391
  columns_to_features: Callable[[Sequence[str]], Mapping[str, Feature]],
296
392
  yield_empty_batches: bool,
393
+ max_prefetch_size_bytes: int,
394
+ num_client_prefetch_threads: int,
297
395
  ) -> Iterable[pa.RecordBatch]:
396
+ """Download objects from S3 with byte-bounded memory control."""
298
397
  assert self._s3_bucket is not None
299
398
  filenames = list(_list_files(self._s3_client, self._s3_bucket, unload_destination))
300
- download_futs: list[Future[pa.Table]] = []
399
+ _public_logger.info(
400
+ f"Downloading parquet data partitioned into {len(filenames)} files "
401
+ + f"(max_prefetch_bytes={max_prefetch_size_bytes}, threads={num_client_prefetch_threads})..."
402
+ )
403
+
404
+ if len(filenames) == 0:
405
+ if yield_empty_batches:
406
+ # Need to get schema somehow - return empty batch
407
+ # This matches the original behavior
408
+ schema: pa.Schema | None = None
409
+ if schema is not None:
410
+ yield pa.RecordBatch.from_pydict({k: [] for k in schema.names}, schema)
411
+ return
412
+
413
+ # Set up queues and semaphore for memory control
414
+ file_handles: queue.Queue[str] = queue.Queue()
301
415
  for filename in filenames:
302
- _logger.debug("Scheduling download of file %s", filename)
303
- fut = self._executor.submit(
304
- _download_file_to_table, self._s3_client, self._s3_bucket, filename, columns_to_features
416
+ file_handles.put(filename)
417
+
418
+ max_weight = max_prefetch_size_bytes if max_prefetch_size_bytes > 0 else None
419
+ pa_table_queue: queue.Queue[tuple[pa.Table, int] | _WorkerId] = queue.Queue()
420
+ sem = None if max_weight is None else MultiSemaphore(max_weight)
421
+
422
+ # Start download workers
423
+ futures = {
424
+ _WorkerId(i): self._executor.submit(
425
+ self._download_worker,
426
+ file_handles,
427
+ sem,
428
+ pa_table_queue,
429
+ _WorkerId(i),
430
+ columns_to_features,
305
431
  )
306
- download_futs.append(fut)
307
- _public_logger.info(f"Downloading parquet data partitioned into {len(download_futs)} files...")
432
+ for i in range(num_client_prefetch_threads)
433
+ }
434
+
308
435
  schema: pa.Schema | None = None
309
436
  yielded = False
310
- for fut in download_futs:
311
- tbl = fut.result()
312
- if len(tbl) > 0:
313
- yield tbl.combine_chunks().to_batches()[0]
314
- yielded = True
315
- if len(tbl) == 0 and schema is None:
437
+
438
+ # Process downloaded tables as they become available
439
+ while len(futures) > 0:
440
+ x = pa_table_queue.get()
441
+ if isinstance(x, int):
442
+ # Worker finished - remove from futures and check for errors
443
+ futures.pop(x).result()
444
+ continue
445
+
446
+ tbl, weight = x
447
+ if schema is None:
316
448
  schema = tbl.schema
317
- if not yielded and schema is not None:
449
+
450
+ try:
451
+ if len(tbl) > 0:
452
+ yield tbl.combine_chunks().to_batches()[0]
453
+ safe_incr("chalk.redshift.downloaded_bytes", tbl.nbytes or 0, tags=_get_resolver_tags())
454
+ safe_incr("chalk.redshift.downloaded_rows", tbl.num_rows or 0, tags=_get_resolver_tags())
455
+ yielded = True
456
+ finally:
457
+ # Release semaphore after yielding
458
+ if sem is not None and weight > 0:
459
+ sem.release(weight)
460
+ safe_set_gauge(
461
+ "chalk.redshift.remaining_prefetch_bytes", sem.get_value(), tags=_get_resolver_tags()
462
+ )
463
+
464
+ if not yielded and yield_empty_batches and schema is not None:
318
465
  yield pa.RecordBatch.from_pydict({k: [] for k in schema.names}, schema)
319
466
 
467
+ def _download_worker_raw(
468
+ self,
469
+ file_handles: queue.Queue[str],
470
+ sem: MultiSemaphore | None,
471
+ pa_table_queue: queue.Queue[tuple[pa.Table, int] | _WorkerId],
472
+ worker_idx: _WorkerId,
473
+ expected_output_schema: pa.Schema,
474
+ ):
475
+ """Worker thread that downloads files from S3 with memory control for raw execution."""
476
+ import pyarrow.compute as pc
477
+
478
+ assert self._s3_bucket is not None
479
+ try:
480
+ while True:
481
+ try:
482
+ filename = file_handles.get_nowait()
483
+ except queue.Empty:
484
+ break
485
+
486
+ # Estimate file size from S3 metadata if possible
487
+ weight: int | None = None
488
+ try:
489
+ with _boto_lock_ctx():
490
+ head_response = self._s3_client.head_object(Bucket=self._s3_bucket, Key=filename)
491
+ # Boto3 types indicate ContentLength is always int, but cast to Optional for defensive programming
492
+ content_length = cast(int | None, head_response.get("ContentLength"))
493
+ if content_length is not None:
494
+ # Estimate uncompressed size (parquet typically compresses 3-5x)
495
+ weight = content_length * 4
496
+ except Exception as e:
497
+ _logger.warning(f"Failed to get file size for {filename}, will estimate after download", exc_info=e)
498
+
499
+ # Acquire semaphore before downloading
500
+ if sem and weight is not None:
501
+ if weight > sem.initial_value:
502
+ weight = sem.initial_value
503
+ if weight > 0:
504
+ if not sem.acquire(weight):
505
+ raise RuntimeError("Failed to acquire semaphore for redshift download")
506
+ safe_set_gauge(
507
+ "chalk.redshift.remaining_prefetch_bytes", sem.get_value(), tags=_get_resolver_tags()
508
+ )
509
+
510
+ # Download parquet file
511
+ buffer = io.BytesIO()
512
+ with _boto_lock_ctx():
513
+ self._s3_client.download_fileobj(Bucket=self._s3_bucket, Key=filename, Fileobj=buffer)
514
+ buffer.seek(0)
515
+ if env_var_bool("CHALK_REDSHIFT_POLARS_PARQUET"):
516
+ tbl = read_parquet(buffer, use_pyarrow=False).to_arrow()
517
+ else:
518
+ tbl = pq.read_table(buffer)
519
+
520
+ # If we didn't have a weight estimate, use actual table size
521
+ if weight is None:
522
+ weight = tbl.nbytes
523
+ if sem and weight is not None and weight > 0:
524
+ if not sem.acquire(weight):
525
+ raise RuntimeError("Failed to acquire semaphore for redshift download")
526
+ safe_set_gauge(
527
+ "chalk.redshift.remaining_prefetch_bytes", sem.get_value(), tags=_get_resolver_tags()
528
+ )
529
+
530
+ # Map columns to expected schema
531
+ arrays: list[pa.Array] = []
532
+ for field in expected_output_schema:
533
+ if field.name in tbl.column_names:
534
+ col = tbl.column(field.name)
535
+ # Cast to expected type if needed
536
+ if col.type != field.type:
537
+ col = pc.cast(col, field.type)
538
+ arrays.append(col)
539
+ else:
540
+ # Column not found, create null array
541
+ arrays.append(pa.nulls(len(tbl), field.type))
542
+
543
+ mapped_tbl = pa.Table.from_arrays(arrays, schema=expected_output_schema)
544
+ # Ensure weight is always an int
545
+ final_weight: int = weight if weight is not None else 0
546
+ pa_table_queue.put((mapped_tbl, final_weight))
547
+ finally:
548
+ # Signal that this worker is done
549
+ pa_table_queue.put(worker_idx)
550
+
320
551
  def execute_query_efficient_raw(
321
552
  self,
322
553
  finalized_query: FinalizedChalkQuery,
@@ -325,8 +556,6 @@ class RedshiftSourceImpl(BaseSQLSource):
325
556
  query_execution_parameters: QueryExecutionParameters,
326
557
  ) -> Iterable[pa.RecordBatch]:
327
558
  """Execute query efficiently for Redshift and return raw PyArrow RecordBatches."""
328
- import pyarrow.compute as pc
329
-
330
559
  temp_query_id = id(finalized_query)
331
560
  _public_logger.debug(f"Executing RedShift query [{temp_query_id}]...")
332
561
 
@@ -387,47 +616,77 @@ class RedshiftSourceImpl(BaseSQLSource):
387
616
  except Exception:
388
617
  _logger.warning(f"Failed to drop temp table '{temp_table_name}'", exc_info=True)
389
618
 
390
- # Download files and map to expected schema
619
+ # Download files with memory control
391
620
  assert unload_destination is not None
392
621
  assert self._s3_bucket is not None
393
622
  filenames = list(_list_files(self._s3_client, self._s3_bucket, unload_destination))
623
+ _public_logger.info(
624
+ f"Downloading {len(filenames)} parquet files from Redshift UNLOAD "
625
+ + f"(max_prefetch_bytes={query_execution_parameters.max_prefetch_size_bytes}, "
626
+ + f"threads={query_execution_parameters.num_client_prefetch_threads})..."
627
+ )
394
628
 
395
- yielded = False
629
+ if len(filenames) == 0:
630
+ if query_execution_parameters.yield_empty_batches:
631
+ arrays = [pa.nulls(0, field.type) for field in expected_output_schema]
632
+ yield pa.RecordBatch.from_arrays(arrays, schema=expected_output_schema)
633
+ return
634
+
635
+ # Set up queues and semaphore for memory control
636
+ file_handles: queue.Queue[str] = queue.Queue()
396
637
  for filename in filenames:
397
- buffer = io.BytesIO()
398
- with _boto_lock_ctx():
399
- self._s3_client.download_fileobj(Bucket=self._s3_bucket, Key=filename, Fileobj=buffer)
400
- buffer.seek(0)
401
- if env_var_bool("CHALK_REDSHIFT_POLARS_PARQUET"):
402
- tbl = read_parquet(buffer, use_pyarrow=False).to_arrow()
403
- else:
404
- tbl = pq.read_table(buffer)
638
+ file_handles.put(filename)
405
639
 
406
- if len(tbl) == 0:
407
- continue
640
+ max_weight = (
641
+ query_execution_parameters.max_prefetch_size_bytes
642
+ if query_execution_parameters.max_prefetch_size_bytes > 0
643
+ else None
644
+ )
645
+ pa_table_queue: queue.Queue[tuple[pa.Table, int] | _WorkerId] = queue.Queue()
646
+ sem = None if max_weight is None else MultiSemaphore(max_weight)
647
+
648
+ # Start download workers
649
+ futures = {
650
+ _WorkerId(i): self._executor.submit(
651
+ self._download_worker_raw,
652
+ file_handles,
653
+ sem,
654
+ pa_table_queue,
655
+ _WorkerId(i),
656
+ expected_output_schema,
657
+ )
658
+ for i in range(query_execution_parameters.num_client_prefetch_threads)
659
+ }
408
660
 
409
- # Map columns to expected schema
410
- arrays: list[pa.Array] = []
411
- for field in expected_output_schema:
412
- if field.name in tbl.column_names:
413
- col = tbl.column(field.name)
414
- # Cast to expected type if needed
415
- if col.type != field.type:
416
- col = pc.cast(col, field.type)
417
- arrays.append(col)
418
- else:
419
- # Column not found, create null array
420
- arrays.append(pa.nulls(len(tbl), field.type))
661
+ yielded = False
421
662
 
422
- batch = pa.RecordBatch.from_arrays(arrays, schema=expected_output_schema)
423
- yield batch
424
- yielded = True
663
+ # Process downloaded tables as they become available
664
+ while len(futures) > 0:
665
+ x = pa_table_queue.get()
666
+ if isinstance(x, int):
667
+ # Worker finished - remove from futures and check for errors
668
+ futures.pop(x).result()
669
+ continue
670
+
671
+ tbl, weight = x
672
+
673
+ try:
674
+ if len(tbl) > 0:
675
+ yield tbl.to_batches()[0]
676
+ safe_incr("chalk.redshift.downloaded_bytes", tbl.nbytes or 0, tags=_get_resolver_tags())
677
+ safe_incr("chalk.redshift.downloaded_rows", tbl.num_rows or 0, tags=_get_resolver_tags())
678
+ yielded = True
679
+ finally:
680
+ # Release semaphore after yielding
681
+ if sem is not None and weight > 0:
682
+ sem.release(weight)
683
+ safe_set_gauge(
684
+ "chalk.redshift.remaining_prefetch_bytes", sem.get_value(), tags=_get_resolver_tags()
685
+ )
425
686
 
426
687
  if not yielded and query_execution_parameters.yield_empty_batches:
427
- # Create empty batch with expected schema
428
688
  arrays = [pa.nulls(0, field.type) for field in expected_output_schema]
429
- batch = pa.RecordBatch.from_arrays(arrays, schema=expected_output_schema)
430
- yield batch
689
+ yield pa.RecordBatch.from_arrays(arrays, schema=expected_output_schema)
431
690
 
432
691
  @classmethod
433
692
  def register_sqlalchemy_compiler_overrides(cls):
chalk/utils/tracing.py CHANGED
@@ -222,8 +222,8 @@ else:
222
222
  if can_use_datadog_statsd:
223
223
  from datadog.dogstatsd.base import statsd
224
224
 
225
- def safe_set_gauge(gauge: str, value: int | float):
226
- statsd.gauge(gauge, value)
225
+ def safe_set_gauge(gauge: str, value: int | float, tags: list[str] | None = None):
226
+ statsd.gauge(gauge, value, tags=tags)
227
227
 
228
228
  def safe_incr(counter: str, value: int | float, tags: list[str] | None = None):
229
229
  statsd.increment(counter, value, tags)
@@ -233,7 +233,7 @@ if can_use_datadog_statsd:
233
233
 
234
234
  else:
235
235
 
236
- def safe_set_gauge(gauge: str, value: int | float):
236
+ def safe_set_gauge(gauge: str, value: int | float, tags: list[str] | None = None):
237
237
  pass
238
238
 
239
239
  def safe_incr(counter: str, value: int | float, tags: list[str] | None = None):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chalkpy
3
- Version: 2.95.8
3
+ Version: 2.95.9
4
4
  Summary: Python SDK for Chalk
5
5
  Author: Chalk AI, Inc.
6
6
  Project-URL: Homepage, https://chalk.ai
@@ -279,6 +279,9 @@ Requires-Dist: python-json-logger<4.0.0,>=3.0.0; extra == "dynamodb"
279
279
  Requires-Dist: s3fs; extra == "dynamodb"
280
280
  Requires-Dist: sqlalchemy[asyncio]<2,>=1.4.26; extra == "dynamodb"
281
281
  Requires-Dist: sqlglot<21.2.0,>=19.0.0; extra == "dynamodb"
282
+ Provides-Extra: tracing
283
+ Requires-Dist: opentelemetry-api>=1.29.0; extra == "tracing"
284
+ Requires-Dist: opentelemetry-sdk>=1.29.0; extra == "tracing"
282
285
  Provides-Extra: all
283
286
  Requires-Dist: PyAthena>=3.0.0; extra == "all"
284
287
  Requires-Dist: adlfs; extra == "all"
@@ -303,6 +306,8 @@ Requires-Dist: google-cloud-bigquery<4,>=3.25.0; extra == "all"
303
306
  Requires-Dist: google-cloud-storage; extra == "all"
304
307
  Requires-Dist: httpx<0.28.0; extra == "all"
305
308
  Requires-Dist: openai<1.53,>=1.3.2; extra == "all"
309
+ Requires-Dist: opentelemetry-api>=1.29.0; extra == "all"
310
+ Requires-Dist: opentelemetry-sdk>=1.29.0; extra == "all"
306
311
  Requires-Dist: packaging; extra == "all"
307
312
  Requires-Dist: polars[timezone]!=1.0,!=1.1,!=1.10,!=1.11,!=1.12,!=1.13,!=1.14,!=1.15,!=1.16,!=1.17,!=1.18,!=1.19,!=1.2,!=1.20,!=1.21,!=1.22,!=1.23,!=1.24,!=1.25,!=1.26,!=1.27,!=1.28,!=1.29,!=1.3,!=1.30,!=1.31,!=1.32,!=1.4,!=1.5,!=1.6,!=1.7,!=1.8,!=1.9,<1.33.1,>=0.17.2; extra == "all"
308
313
  Requires-Dist: psycopg2<3,>=2.9.4; extra == "all"
@@ -1,5 +1,5 @@
1
1
  chalk/__init__.py,sha256=vKsx9-cl5kImlVWGHVRYO6bweBm79NAzGs3l36u71wM,2657
2
- chalk/_version.py,sha256=cRQs2s3pPBB8SSCGLqSSwNaGLb4lHjpqBBTPHQh_BU0,23
2
+ chalk/_version.py,sha256=6_obn3nZ0gJ-Hi9EsdcLyZmK96lqygLULpjsd8h1yUE,23
3
3
  chalk/cli.py,sha256=ckqqfOI-A2mT23-rnZzDMmblYj-2x1VBX8ebHlIEn9A,5873
4
4
  chalk/importer.py,sha256=m4lMn1lSYj_euDq8CS7LYTBnek9JOcjGJf9-82dJHbA,64441
5
5
  chalk/prompts.py,sha256=2H9UomLAamdfRTNUdKs9i3VTpiossuyRhntqsAXUhhg,16117
@@ -760,7 +760,7 @@ chalk/sql/_internal/integrations/dynamodb.py,sha256=MHJryj6xJ9B72spofeTpCE86pC7Z
760
760
  chalk/sql/_internal/integrations/mssql.py,sha256=gZfAb_b6eVpTUkcFMeORF9edbpsvpvvi-VW_kJXwH6I,11938
761
761
  chalk/sql/_internal/integrations/mysql.py,sha256=RjIc0TaQceZrZ-q5AIGExbH5VHirbscZqXII1Ht7M0I,8696
762
762
  chalk/sql/_internal/integrations/postgres.py,sha256=bwxwEeJYH5-A7S22YumukwX6aN6c_B_MOOnrmJuTZyI,29169
763
- chalk/sql/_internal/integrations/redshift.py,sha256=0f_h5Lnigth3O5BG16a967JokHCZfl4i2kWbb134-6Q,22872
763
+ chalk/sql/_internal/integrations/redshift.py,sha256=7HDF6FaiusiPgk00kFXttIkowGNbuSsjO0sXxPwWw68,34119
764
764
  chalk/sql/_internal/integrations/redshift_compiler_overrides.py,sha256=eKFeaCamTVfoHhdiBv1_3A6CxvFrv86Ovsa-vBBqjEo,5343
765
765
  chalk/sql/_internal/integrations/snowflake.py,sha256=Y8kKSA3W02yxi144KSOeKtlud4ArsjLKNPvTG6XkkXI,35241
766
766
  chalk/sql/_internal/integrations/snowflake_compiler_overrides.py,sha256=GbD3rdFWMpbht8dE-h9kcSsxideYHvVTGOYIfrczJJ8,6712
@@ -815,12 +815,12 @@ chalk/utils/storage_client.py,sha256=cK5KH8DVAt4Okk3X4jNMCkMiZgfUJE9Sq3zn4HkaBQo
815
815
  chalk/utils/string.py,sha256=mHciu1FR1NdXiE0GjiCOOs_Q3JBVpaNnjUQPorE5cJg,4268
816
816
  chalk/utils/stubgen.py,sha256=-mKIWFeiZojtfPwaTd9o3h4m4RvTmMTk6i-bI9JpU6c,21580
817
817
  chalk/utils/threading.py,sha256=dacvfFCpDs9GDWdRrE2mmM3Ex5DKOIaj5rCYDTqGshk,5305
818
- chalk/utils/tracing.py,sha256=qAyMRdFBkL4Q2_Bn-C31atDfJirUtBvcSjkYC2R51sM,13005
818
+ chalk/utils/tracing.py,sha256=NiiM-9dbuJhSCv6R1npR1uYNKWlkqTR6Ygm0Voi2NrY,13078
819
819
  chalk/utils/weak_set_by_identity.py,sha256=VmikA_laYwFeOphCwXJIuyOIkrdlQe0bSzaXq7onoQw,953
820
820
  chalk/utils/pydanticutil/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
821
821
  chalk/utils/pydanticutil/pydantic_compat.py,sha256=O575lLYJ5GvZC4HMzR9yATxf9XwjC6NrDUXbNwZidlE,3031
822
- chalkpy-2.95.8.dist-info/METADATA,sha256=JdoTXDaz9gGWi2knlGCnxu-RaXfpVJz_Df0JUxFYEDQ,27494
823
- chalkpy-2.95.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
824
- chalkpy-2.95.8.dist-info/entry_points.txt,sha256=Vg23sd8icwq-morJrljVFr-kQnMbm95rZfZj5wsZGis,42
825
- chalkpy-2.95.8.dist-info/top_level.txt,sha256=1Q6_19IGYfNxSw50W8tYKEJ2t5HKQ3W9Wiw4ia5yg2c,6
826
- chalkpy-2.95.8.dist-info/RECORD,,
822
+ chalkpy-2.95.9.dist-info/METADATA,sha256=0BpdYdoYau7n41noSYhSy4QF99-4gktS6xEhabl9Lbs,27754
823
+ chalkpy-2.95.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
824
+ chalkpy-2.95.9.dist-info/entry_points.txt,sha256=Vg23sd8icwq-morJrljVFr-kQnMbm95rZfZj5wsZGis,42
825
+ chalkpy-2.95.9.dist-info/top_level.txt,sha256=1Q6_19IGYfNxSw50W8tYKEJ2t5HKQ3W9Wiw4ia5yg2c,6
826
+ chalkpy-2.95.9.dist-info/RECORD,,