snowpark-checkpoints-validators 0.1.0rc3__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. snowflake/snowpark_checkpoints/__init__.py +34 -0
  2. snowflake/snowpark_checkpoints/checkpoint.py +482 -0
  3. snowflake/snowpark_checkpoints/errors.py +60 -0
  4. snowflake/snowpark_checkpoints/job_context.py +85 -0
  5. snowflake/snowpark_checkpoints/singleton.py +23 -0
  6. snowflake/snowpark_checkpoints/snowpark_sampler.py +99 -0
  7. snowflake/snowpark_checkpoints/spark_migration.py +222 -0
  8. snowflake/snowpark_checkpoints/utils/__init__.py +14 -0
  9. snowflake/snowpark_checkpoints/utils/checkpoint_logger.py +52 -0
  10. snowflake/snowpark_checkpoints/utils/constants.py +134 -0
  11. snowflake/snowpark_checkpoints/utils/extra_config.py +84 -0
  12. snowflake/snowpark_checkpoints/utils/pandera_check_manager.py +358 -0
  13. snowflake/snowpark_checkpoints/utils/supported_types.py +65 -0
  14. snowflake/snowpark_checkpoints/utils/telemetry.py +900 -0
  15. snowflake/snowpark_checkpoints/utils/utils_checks.py +372 -0
  16. snowflake/snowpark_checkpoints/validation_result_metadata.py +116 -0
  17. snowflake/snowpark_checkpoints/validation_results.py +49 -0
  18. {snowpark_checkpoints_validators-0.1.0rc3.dist-info → snowpark_checkpoints_validators-0.1.1.dist-info}/METADATA +4 -6
  19. snowpark_checkpoints_validators-0.1.1.dist-info/RECORD +21 -0
  20. snowpark_checkpoints_validators-0.1.0rc3.dist-info/RECORD +0 -4
  21. {snowpark_checkpoints_validators-0.1.0rc3.dist-info → snowpark_checkpoints_validators-0.1.1.dist-info}/WHEEL +0 -0
  22. {snowpark_checkpoints_validators-0.1.0rc3.dist-info → snowpark_checkpoints_validators-0.1.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,99 @@
1
+ # Copyright 2025 Snowflake Inc.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Optional
17
+
18
+ import pandas
19
+
20
+ from snowflake.snowpark import DataFrame as SnowparkDataFrame
21
+ from snowflake.snowpark_checkpoints.job_context import SnowparkJobContext
22
+
23
+
24
+ class SamplingStrategy:
25
+ RANDOM_SAMPLE = 1
26
+ LIMIT = 2
27
+
28
+
29
+ class SamplingError(Exception):
30
+ pass
31
+
32
+
33
+ class SamplingAdapter:
34
+ def __init__(
35
+ self,
36
+ job_context: Optional[SnowparkJobContext],
37
+ sample_frac: Optional[float] = None,
38
+ sample_number: Optional[int] = None,
39
+ sampling_strategy: SamplingStrategy = SamplingStrategy.RANDOM_SAMPLE,
40
+ ):
41
+ self.pandas_sample_args = []
42
+ self.job_context = job_context
43
+ if sample_frac and not (0 <= sample_frac <= 1):
44
+ raise ValueError(
45
+ f"'sample_size' value {sample_frac} is out of range (0 <= sample_size <= 1)"
46
+ )
47
+
48
+ self.sample_frac = sample_frac
49
+ self.sample_number = sample_number
50
+ self.sampling_strategy = sampling_strategy
51
+
52
+ def process_args(self, input_args):
53
+ # create the intermediate pandas
54
+ # data frame for the test data
55
+ for arg in input_args:
56
+ if isinstance(arg, SnowparkDataFrame):
57
+ if arg.count() == 0:
58
+ raise SamplingError(
59
+ "Input DataFrame is empty. Cannot sample from an empty DataFrame."
60
+ )
61
+
62
+ if self.sampling_strategy == SamplingStrategy.RANDOM_SAMPLE:
63
+ if self.sample_frac:
64
+ df_sample = arg.sample(frac=self.sample_frac).to_pandas()
65
+ else:
66
+ df_sample = arg.sample(n=self.sample_number).to_pandas()
67
+ else:
68
+ df_sample = arg.limit(self.sample_number).to_pandas()
69
+
70
+ self.pandas_sample_args.append(df_sample)
71
+ else:
72
+ self.pandas_sample_args.append(arg)
73
+
74
+ def get_sampled_pandas_args(self):
75
+ return self.pandas_sample_args
76
+
77
+ def get_sampled_snowpark_args(self):
78
+ if self.job_context is None:
79
+ raise SamplingError("Need a job context to compare with Spark")
80
+ snowpark_sample_args = []
81
+ for arg in self.pandas_sample_args:
82
+ if isinstance(arg, pandas.DataFrame):
83
+ snowpark_df = self.job_context.snowpark_session.create_dataframe(arg)
84
+ snowpark_sample_args.append(snowpark_df)
85
+ else:
86
+ snowpark_sample_args.append(arg)
87
+ return snowpark_sample_args
88
+
89
+ def get_sampled_spark_args(self):
90
+ if self.job_context is None:
91
+ raise SamplingError("Need a job context to compare with Spark")
92
+ pyspark_sample_args = []
93
+ for arg in self.pandas_sample_args:
94
+ if isinstance(arg, pandas.DataFrame):
95
+ pyspark_df = self.job_context.spark_session.createDataFrame(arg)
96
+ pyspark_sample_args.append(pyspark_df)
97
+ else:
98
+ pyspark_sample_args.append(arg)
99
+ return pyspark_sample_args
@@ -0,0 +1,222 @@
1
+ # Copyright 2025 Snowflake Inc.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import Callable, Optional, TypeVar
16
+
17
+ import pandas as pd
18
+
19
+ from pyspark.sql import DataFrame as SparkDataFrame
20
+
21
+ from snowflake.snowpark import DataFrame as SnowparkDataFrame
22
+ from snowflake.snowpark.types import PandasDataFrame
23
+ from snowflake.snowpark_checkpoints.errors import SparkMigrationError
24
+ from snowflake.snowpark_checkpoints.job_context import SnowparkJobContext
25
+ from snowflake.snowpark_checkpoints.snowpark_sampler import (
26
+ SamplingAdapter,
27
+ SamplingStrategy,
28
+ )
29
+ from snowflake.snowpark_checkpoints.utils.constants import FAIL_STATUS, PASS_STATUS
30
+ from snowflake.snowpark_checkpoints.utils.telemetry import STATUS_KEY, report_telemetry
31
+ from snowflake.snowpark_checkpoints.utils.utils_checks import (
32
+ _replace_special_characters,
33
+ _update_validation_result,
34
+ )
35
+
36
+
37
+ fn = TypeVar("F", bound=Callable)
38
+
39
+
40
+ def check_with_spark(
41
+ job_context: Optional[SnowparkJobContext],
42
+ spark_function: fn,
43
+ checkpoint_name: str,
44
+ sample_number: Optional[int] = 100,
45
+ sampling_strategy: Optional[SamplingStrategy] = SamplingStrategy.RANDOM_SAMPLE,
46
+ output_path: Optional[str] = None,
47
+ ) -> Callable[[fn], fn]:
48
+ """Validate function output with Spark instance.
49
+
50
+ Will take the input snowpark dataframe of this function, sample data, convert
51
+ it to a Spark dataframe and then execute `spark_function`. Subsequently
52
+ the output of that function will be compared to the output of this function
53
+ for the same sample of data.
54
+
55
+ Args:
56
+ job_context (SnowparkJobContext): The job context containing configuration and details for the validation.
57
+ spark_function (fn): The equivalent PySpark function to compare against the Snowpark implementation.
58
+ checkpoint_name (str): A name for the checkpoint. Defaults to None.
59
+ sample_number (Optional[int], optional): The number of rows for validation. Defaults to 100.
60
+ sampling_strategy (Optional[SamplingStrategy], optional): The strategy used for sampling data.
61
+ Defaults to SamplingStrategy.RANDOM_SAMPLE.
62
+ output_path (Optional[str], optional): The path to store the validation results. Defaults to None.
63
+
64
+ Returns:
65
+ Callable[[fn], fn]: A decorator that wraps the original Snowpark function with validation logic.
66
+
67
+ """
68
+
69
+ def check_with_spark_decorator(snowpark_fn):
70
+ _checkpoint_name = checkpoint_name
71
+ if checkpoint_name is None:
72
+ _checkpoint_name = snowpark_fn.__name__
73
+ _checkpoint_name = _replace_special_characters(_checkpoint_name)
74
+
75
+ def wrapper(*args, **kwargs):
76
+ sampler = SamplingAdapter(
77
+ job_context,
78
+ sample_number=sample_number,
79
+ sampling_strategy=sampling_strategy,
80
+ )
81
+ sampler.process_args(args)
82
+ snowpark_sample_args = sampler.get_sampled_snowpark_args()
83
+ pyspark_sample_args = sampler.get_sampled_spark_args()
84
+ # Run the sampled data in snowpark
85
+ snowpark_test_results = snowpark_fn(*snowpark_sample_args, **kwargs)
86
+ spark_test_results = spark_function(*pyspark_sample_args, **kwargs)
87
+ result, exception = _assert_return(
88
+ snowpark_test_results,
89
+ spark_test_results,
90
+ job_context,
91
+ _checkpoint_name,
92
+ output_path,
93
+ )
94
+ if not result:
95
+ raise exception from None
96
+ # Run the original function in snowpark
97
+ return snowpark_fn(*args, **kwargs)
98
+
99
+ return wrapper
100
+
101
+ return check_with_spark_decorator
102
+
103
+
104
+ @report_telemetry(
105
+ params_list=["snowpark_results", "spark_results"],
106
+ return_indexes=[(STATUS_KEY, 0)],
107
+ multiple_return=True,
108
+ )
109
+ def _assert_return(
110
+ snowpark_results, spark_results, job_context, checkpoint_name, output_path=None
111
+ ) -> tuple[bool, Optional[Exception]]:
112
+ """Assert and validate the results from Snowpark and Spark transformations.
113
+
114
+ Args:
115
+ snowpark_results (Any): Results from the Snowpark transformation.
116
+ spark_results (Any): Results from the Spark transformation to compare against.
117
+ job_context (Any): Additional context about the job. Defaults to None.
118
+ checkpoint_name (Any): Name of the checkpoint for logging. Defaults to None.
119
+ output_path (Optional[str], optional): The path to store the validation results. Defaults to None.
120
+
121
+ Raises:
122
+ AssertionError: If the Snowpark and Spark results do not match.
123
+ TypeError: If the results cannot be compared.
124
+
125
+ """
126
+ if isinstance(snowpark_results, SnowparkDataFrame) and isinstance(
127
+ spark_results, SparkDataFrame
128
+ ):
129
+ cmp = compare_spark_snowpark_dfs(spark_results, snowpark_results)
130
+
131
+ if not cmp.empty:
132
+ exception_result = SparkMigrationError(
133
+ "DataFrame difference:\n", job_context, checkpoint_name, cmp
134
+ )
135
+ return False, exception_result
136
+ job_context._mark_pass(checkpoint_name)
137
+ _update_validation_result(checkpoint_name, PASS_STATUS, output_path)
138
+ return True, None
139
+ else:
140
+
141
+ if snowpark_results != spark_results:
142
+ exception_result = SparkMigrationError(
143
+ "Return value difference:\n",
144
+ job_context,
145
+ checkpoint_name,
146
+ f"{snowpark_results} != {spark_results}",
147
+ )
148
+ _update_validation_result(checkpoint_name, FAIL_STATUS, output_path)
149
+ return False, exception_result
150
+ job_context._mark_pass(checkpoint_name)
151
+ _update_validation_result(checkpoint_name, PASS_STATUS, output_path)
152
+ return True, None
153
+
154
+
155
+ def compare_spark_snowpark_dfs(
156
+ spark_df: SparkDataFrame, snowpark_df: SnowparkDataFrame
157
+ ) -> PandasDataFrame:
158
+ """Compare two dataframes for equality.
159
+
160
+ Args:
161
+ spark_df (SparkDataFrame): The Spark dataframe to compare.
162
+ snowpark_df (SnowparkDataFrame): The Snowpark dataframe to compare.
163
+
164
+ Returns:Pandas DataFrame containing the differences between the two dataframes.
165
+
166
+ """
167
+ snowpark_df = snowpark_df.to_pandas()
168
+ snowpark_df.columns = snowpark_df.columns.str.upper()
169
+ spark_df = spark_df.toPandas()
170
+ spark_df.columns = spark_df.columns.str.upper()
171
+ spark_cols = set(spark_df.columns)
172
+ snowpark_cols = set(snowpark_df.columns)
173
+ cmp = pd.DataFrame([])
174
+ left = spark_cols - snowpark_cols
175
+ right = snowpark_cols - spark_cols
176
+ if left != set():
177
+ cmp = _compare_dfs(spark_df, snowpark_df, "spark", "snowpark")
178
+ if right != set():
179
+ right_cmp = _compare_dfs(snowpark_df, spark_df, "snowpark", "spark")
180
+ cmp = right_cmp if cmp.empty else pd.concat([cmp, right_cmp], ignore_index=True)
181
+ if left == set() and right == set():
182
+ if spark_df.shape == snowpark_df.shape:
183
+ cmp = spark_df.compare(snowpark_df, result_names=("spark", "snowpark"))
184
+ else:
185
+ cmp = spark_df.merge(snowpark_df, indicator=True, how="outer").loc[
186
+ lambda x: x["_merge"] != "both"
187
+ ]
188
+ cmp = cmp.replace(
189
+ {"left_only": "spark_only", "right_only": "snowpark_only"}
190
+ )
191
+
192
+ return cmp
193
+
194
+
195
+ def _compare_dfs(
196
+ df_a: pd.DataFrame, df_b: pd.DataFrame, left_label: str, right_label: str
197
+ ) -> PandasDataFrame:
198
+ """Compare two dataframes for equality.
199
+
200
+ Args:
201
+ df_a (PandasDataFrame): The first dataframe to compare.
202
+ df_b (PandasDataFrame): The second dataframe to compare.
203
+ left_label (str): The label for the first dataframe.
204
+ right_label (str): The label for the second dataframe.
205
+
206
+ :return: Pandas DataFrame containing the differences between the two dataframes.
207
+
208
+ """
209
+ df_a["side"] = "a"
210
+ df_b["side"] = "b"
211
+ a_only = [col for col in df_a.columns if col not in df_b.columns] + ["side"]
212
+ b_only = [col for col in df_b.columns if col not in df_a.columns] + ["side"]
213
+ cmp = (
214
+ df_a[a_only]
215
+ .merge(df_b[b_only], indicator=True, how="left")
216
+ .loc[lambda x: x["_merge"] != "both"]
217
+ )
218
+ cmp = cmp.replace(
219
+ {"left_only": f"{left_label}_only", "right_only": f"{right_label}_only"}
220
+ )
221
+ cmp = cmp.drop(columns="side")
222
+ return cmp
@@ -0,0 +1,14 @@
1
+ # Copyright 2025 Snowflake Inc.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
@@ -0,0 +1,52 @@
1
+ # Copyright 2025 Snowflake Inc.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import logging
17
+ import threading
18
+
19
+
20
+ class CheckpointLogger:
21
+ _instance = None
22
+ _lock = threading.Lock()
23
+
24
+ def __new__(cls, *args, **kwargs):
25
+ if not cls._instance:
26
+ with cls._lock:
27
+ if not cls._instance:
28
+ cls._instance = super().__new__(cls, *args, **kwargs)
29
+ cls._instance._initialize()
30
+ return cls._instance
31
+
32
+ def _initialize(self):
33
+ # Create formatter
34
+ formatter = logging.Formatter(
35
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
36
+ )
37
+
38
+ self.logger = logging.getLogger("CheckpointLogger")
39
+ self.logger.setLevel(logging.INFO)
40
+
41
+ # Create console handler and set level to debug
42
+ ch = logging.StreamHandler()
43
+ ch.setLevel(logging.DEBUG)
44
+
45
+ # Add formatter to ch
46
+ ch.setFormatter(formatter)
47
+
48
+ # Add ch to logger
49
+ self.logger.addHandler(ch)
50
+
51
+ def get_logger(self):
52
+ return self.logger
@@ -0,0 +1,134 @@
1
+ # Copyright 2025 Snowflake Inc.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Skip type
17
+ from enum import IntEnum
18
+ from typing import Final
19
+
20
+
21
+ class CheckpointMode(IntEnum):
22
+
23
+ """Enum class representing the validation mode."""
24
+
25
+ SCHEMA = 1
26
+ """Validate against a schema file"""
27
+ DATAFRAME = 2
28
+ """Validate against a dataframe"""
29
+
30
+
31
+ # Execution mode
32
+ SCHEMA_EXECUTION_MODE: Final[str] = "Schema"
33
+ DATAFRAME_EXECUTION_MODE: Final[str] = "Dataframe"
34
+
35
+
36
+ # File position on stack
37
+ STACK_POSITION_CHECKPOINT: Final[int] = 6
38
+
39
+ # Validation status
40
+ PASS_STATUS: Final[str] = "PASS"
41
+ FAIL_STATUS: Final[str] = "FAIL"
42
+
43
+ # Validation result keys
44
+ DEFAULT_KEY: Final[str] = "default"
45
+
46
+
47
+ # Skip type
48
+ SKIP_ALL: Final[str] = "skip_all"
49
+
50
+ # Supported types
51
+ BOOLEAN_TYPE: Final[str] = "boolean"
52
+ BINARY_TYPE: Final[str] = "binary"
53
+ BYTE_TYPE: Final[str] = "byte"
54
+ CHAR_TYPE: Final[str] = "char"
55
+ DATE_TYPE: Final[str] = "date"
56
+ DAYTIMEINTERVAL_TYPE: Final[str] = "daytimeinterval"
57
+ DECIMAL_TYPE: Final[str] = "decimal"
58
+ DOUBLE_TYPE: Final[str] = "double"
59
+ FLOAT_TYPE: Final[str] = "float"
60
+ INTEGER_TYPE: Final[str] = "integer"
61
+ LONG_TYPE: Final[str] = "long"
62
+ SHORT_TYPE: Final[str] = "short"
63
+ STRING_TYPE: Final[str] = "string"
64
+ TIMESTAMP_TYPE: Final[str] = "timestamp"
65
+ TIMESTAMP_NTZ_TYPE: Final[str] = "timestamp_ntz"
66
+ VARCHAR_TYPE: Final[str] = "varchar"
67
+
68
+ # Pandas data types
69
+ PANDAS_BOOLEAN_DTYPE: Final[str] = "bool"
70
+ PANDAS_DATETIME_DTYPE: Final[str] = "datetime64[ns]"
71
+ PANDAS_FLOAT_DTYPE: Final[str] = "float64"
72
+ PANDAS_INTEGER_DTYPE: Final[str] = "int64"
73
+ PANDAS_OBJECT_DTYPE: Final[str] = "object"
74
+ PANDAS_TIMEDELTA_DTYPE: Final[str] = "timedelta64[ns]"
75
+
76
+ # Schemas keys
77
+ COLUMNS_KEY: Final[str] = "columns"
78
+ COUNT_KEY: Final[str] = "rows_count"
79
+ DECIMAL_PRECISION_KEY: Final[str] = "decimal_precision"
80
+ FALSE_COUNT_KEY: Final[str] = "false_count"
81
+ FORMAT_KEY: Final[str] = "format"
82
+ NAME_KEY: Final[str] = "name"
83
+ MARGIN_ERROR_KEY: Final[str] = "margin_error"
84
+ MAX_KEY: Final[str] = "max"
85
+ MEAN_KEY: Final[str] = "mean"
86
+ MIN_KEY: Final[str] = "min"
87
+ NULL_COUNT_KEY: Final[str] = "rows_null_count"
88
+ NULLABLE_KEY: Final[str] = "nullable"
89
+ ROWS_NOT_NULL_COUNT_KEY: Final[str] = "rows_not_null_count"
90
+ TRUE_COUNT_KEY: Final[str] = "true_count"
91
+ TYPE_KEY: Final[str] = "type"
92
+ ROWS_COUNT_KEY: Final[str] = "rows_count"
93
+ FORMAT_KEY: Final[str] = "format"
94
+
95
+ DATAFRAME_CUSTOM_DATA_KEY: Final[str] = "custom_data"
96
+ DATAFRAME_PANDERA_SCHEMA_KEY: Final[str] = "pandera_schema"
97
+
98
+ # Default values
99
+ DEFAULT_DATE_FORMAT: Final[str] = "%Y-%m-%d"
100
+
101
+ # SQL Column names
102
+ TABLE_NAME_COL: Final[str] = "TABLE_NAME"
103
+ CREATED_COL: Final[str] = "CREATED"
104
+
105
+ # SQL Table names
106
+ INFORMATION_SCHEMA_TABLE_NAME: Final[str] = "INFORMATION_SCHEMA"
107
+ TABLES_TABLE_NAME: Final[str] = "TABLES"
108
+
109
+ # SQL Query
110
+ EXCEPT_HASH_AGG_QUERY: Final[
111
+ str
112
+ ] = "SELECT HASH_AGG(*) FROM IDENTIFIER(:1) EXCEPT SELECT HASH_AGG(*) FROM IDENTIFIER(:2)"
113
+
114
+ # Table checkpoints name
115
+ CHECKPOINT_TABLE_NAME_FORMAT: Final[str] = "{}_snowpark"
116
+
117
+ # Write mode
118
+ OVERWRITE_MODE: Final[str] = "overwrite"
119
+
120
+ # Validation modes
121
+ VALIDATION_MODE_KEY: Final[str] = "validation_mode"
122
+ PIPELINES_KEY: Final[str] = "pipelines"
123
+
124
+ # File name
125
+ CHECKPOINT_JSON_OUTPUT_FILE_FORMAT_NAME: Final[str] = "{}.json"
126
+ CHECKPOINTS_JSON_FILE_NAME: Final[str] = "checkpoints.json"
127
+ SNOWPARK_CHECKPOINTS_OUTPUT_DIRECTORY_NAME: Final[str] = "snowpark-checkpoints-output"
128
+ CHECKPOINT_PARQUET_OUTPUT_FILE_FORMAT_NAME: Final[str] = "{}.parquet"
129
+ VALIDATION_RESULTS_JSON_FILE_NAME: Final[str] = "checkpoint_validation_results.json"
130
+
131
+ # Environment variables
132
+ SNOWFLAKE_CHECKPOINT_CONTRACT_FILE_PATH_ENV_VAR: Final[
133
+ str
134
+ ] = "SNOWFLAKE_CHECKPOINT_CONTRACT_FILE_PATH"
@@ -0,0 +1,84 @@
1
+ # Copyright 2025 Snowflake Inc.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+
18
+ from typing import Optional
19
+
20
+ from snowflake.snowpark_checkpoints.utils.constants import (
21
+ SNOWFLAKE_CHECKPOINT_CONTRACT_FILE_PATH_ENV_VAR,
22
+ )
23
+
24
+
25
+ # noinspection DuplicatedCode
26
+ def _get_checkpoint_contract_file_path() -> str:
27
+ return os.environ.get(SNOWFLAKE_CHECKPOINT_CONTRACT_FILE_PATH_ENV_VAR, os.getcwd())
28
+
29
+
30
+ # noinspection DuplicatedCode
31
+ def _get_metadata():
32
+ try:
33
+ from snowflake.snowpark_checkpoints_configuration.checkpoint_metadata import (
34
+ CheckpointMetadata,
35
+ )
36
+
37
+ path = _get_checkpoint_contract_file_path()
38
+ metadata = CheckpointMetadata(path)
39
+ return True, metadata
40
+
41
+ except ImportError:
42
+ return False, None
43
+
44
+
45
+ def is_checkpoint_enabled(checkpoint_name: Optional[str] = None) -> bool:
46
+ """Check if a checkpoint is enabled.
47
+
48
+ Args:
49
+ checkpoint_name (Optional[str], optional): The name of the checkpoint.
50
+
51
+ Returns:
52
+ bool: True if the checkpoint is enabled, False otherwise.
53
+
54
+ """
55
+ enabled, metadata = _get_metadata()
56
+ if enabled and checkpoint_name is not None:
57
+ config = metadata.get_checkpoint(checkpoint_name)
58
+ return config.enabled
59
+ else:
60
+ return True
61
+
62
+
63
+ def get_checkpoint_file(checkpoint_name: str) -> Optional[str]:
64
+ """Retrieve the configuration for a specified checkpoint.
65
+
66
+ This function fetches the checkpoint configuration if metadata is enabled.
67
+ It extracts the file name from the checkpoint metadata or
68
+ from the call stack if not explicitly provided in the metadata.
69
+
70
+ Args:
71
+ checkpoint_name (str): The name of the checkpoint to retrieve the configuration for.
72
+
73
+ Returns:
74
+ Optional[dict]: A dictionary containing the file name,
75
+ or None if metadata is not enabled.
76
+
77
+ """
78
+ enabled, metadata = _get_metadata()
79
+ if enabled:
80
+ config = metadata.get_checkpoint(checkpoint_name)
81
+
82
+ return config.file
83
+ else:
84
+ return None