atlan-application-sdk 0.1.1rc38__py3-none-any.whl → 0.1.1rc39__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- application_sdk/clients/temporal.py +1 -1
- application_sdk/outputs/parquet.py +79 -34
- application_sdk/services/objectstore.py +83 -18
- application_sdk/version.py +1 -1
- {atlan_application_sdk-0.1.1rc38.dist-info → atlan_application_sdk-0.1.1rc39.dist-info}/METADATA +1 -1
- {atlan_application_sdk-0.1.1rc38.dist-info → atlan_application_sdk-0.1.1rc39.dist-info}/RECORD +9 -9
- {atlan_application_sdk-0.1.1rc38.dist-info → atlan_application_sdk-0.1.1rc39.dist-info}/WHEEL +0 -0
- {atlan_application_sdk-0.1.1rc38.dist-info → atlan_application_sdk-0.1.1rc39.dist-info}/licenses/LICENSE +0 -0
- {atlan_application_sdk-0.1.1rc38.dist-info → atlan_application_sdk-0.1.1rc39.dist-info}/licenses/NOTICE +0 -0
|
@@ -151,7 +151,7 @@ class TemporalWorkflowClient(WorkflowClient):
|
|
|
151
151
|
await asyncio.sleep(refresh_interval)
|
|
152
152
|
|
|
153
153
|
# Get fresh token
|
|
154
|
-
token = await self.auth_manager.get_access_token()
|
|
154
|
+
token = await self.auth_manager.get_access_token(force_refresh=True)
|
|
155
155
|
if self.client:
|
|
156
156
|
self.client.api_key = token
|
|
157
157
|
logger.info("Updated client RPC metadata with fresh token")
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import os
|
|
2
|
-
from
|
|
2
|
+
from enum import Enum
|
|
3
|
+
from typing import TYPE_CHECKING, List, Optional, Union
|
|
3
4
|
|
|
4
5
|
from temporalio import activity
|
|
5
6
|
|
|
@@ -18,6 +19,14 @@ if TYPE_CHECKING:
|
|
|
18
19
|
import pandas as pd
|
|
19
20
|
|
|
20
21
|
|
|
22
|
+
class WriteMode(Enum):
|
|
23
|
+
"""Enumeration of write modes for Parquet output operations."""
|
|
24
|
+
|
|
25
|
+
APPEND = "append"
|
|
26
|
+
OVERWRITE = "overwrite"
|
|
27
|
+
OVERWRITE_PARTITIONS = "overwrite-partitions"
|
|
28
|
+
|
|
29
|
+
|
|
21
30
|
class ParquetOutput(Output):
|
|
22
31
|
"""Output handler for writing data to Parquet files.
|
|
23
32
|
|
|
@@ -29,7 +38,6 @@ class ParquetOutput(Output):
|
|
|
29
38
|
output_prefix (str): Prefix for files when uploading to object store.
|
|
30
39
|
output_suffix (str): Suffix for output files.
|
|
31
40
|
typename (Optional[str]): Type name of the entity e.g database, schema, table.
|
|
32
|
-
mode (str): Write mode for parquet files ("append" or "overwrite").
|
|
33
41
|
chunk_size (int): Maximum number of records per chunk.
|
|
34
42
|
total_record_count (int): Total number of records processed.
|
|
35
43
|
chunk_count (int): Number of chunks created.
|
|
@@ -45,7 +53,6 @@ class ParquetOutput(Output):
|
|
|
45
53
|
output_suffix: str = "",
|
|
46
54
|
output_prefix: str = "",
|
|
47
55
|
typename: Optional[str] = None,
|
|
48
|
-
write_mode: Literal["append", "overwrite", "overwrite-partitions"] = "append",
|
|
49
56
|
chunk_size: Optional[int] = 100000,
|
|
50
57
|
buffer_size: Optional[int] = 100000,
|
|
51
58
|
total_record_count: int = 0,
|
|
@@ -61,7 +68,6 @@ class ParquetOutput(Output):
|
|
|
61
68
|
output_suffix (str): Suffix for output files.
|
|
62
69
|
output_prefix (str): Prefix for files when uploading to object store.
|
|
63
70
|
typename (Optional[str], optional): Type name of the entity e.g database, schema, table.
|
|
64
|
-
mode (str, optional): Write mode for parquet files. Defaults to "append".
|
|
65
71
|
chunk_size (int, optional): Maximum records per chunk. Defaults to 100000.
|
|
66
72
|
total_record_count (int, optional): Initial total record count. Defaults to 0.
|
|
67
73
|
chunk_count (int, optional): Initial chunk count. Defaults to 0.
|
|
@@ -78,7 +84,6 @@ class ParquetOutput(Output):
|
|
|
78
84
|
self.output_suffix = output_suffix
|
|
79
85
|
self.output_prefix = output_prefix
|
|
80
86
|
self.typename = typename
|
|
81
|
-
self.write_mode = write_mode
|
|
82
87
|
self.chunk_size = chunk_size
|
|
83
88
|
self.buffer_size = buffer_size
|
|
84
89
|
self.buffer: List[Union["pd.DataFrame", "daft.DataFrame"]] = [] # noqa: F821
|
|
@@ -103,7 +108,7 @@ class ParquetOutput(Output):
|
|
|
103
108
|
|
|
104
109
|
def path_gen(
|
|
105
110
|
self,
|
|
106
|
-
chunk_start: int
|
|
111
|
+
chunk_start: Optional[int] = None,
|
|
107
112
|
chunk_count: int = 0,
|
|
108
113
|
start_marker: Optional[str] = None,
|
|
109
114
|
end_marker: Optional[str] = None,
|
|
@@ -111,7 +116,7 @@ class ParquetOutput(Output):
|
|
|
111
116
|
"""Generate a file path for a chunk.
|
|
112
117
|
|
|
113
118
|
Args:
|
|
114
|
-
chunk_start (int
|
|
119
|
+
chunk_start (Optional[int]): Starting index of the chunk, or None for single chunk.
|
|
115
120
|
chunk_count (int): Total number of chunks.
|
|
116
121
|
start_marker (Optional[str]): Start marker for query extraction.
|
|
117
122
|
end_marker (Optional[str]): End marker for query extraction.
|
|
@@ -182,7 +187,7 @@ class ParquetOutput(Output):
|
|
|
182
187
|
name="parquet_write_records",
|
|
183
188
|
value=len(dataframe),
|
|
184
189
|
metric_type=MetricType.COUNTER,
|
|
185
|
-
labels={"type": "pandas", "mode":
|
|
190
|
+
labels={"type": "pandas", "mode": WriteMode.APPEND.value},
|
|
186
191
|
description="Number of records written to Parquet files from pandas DataFrame",
|
|
187
192
|
)
|
|
188
193
|
|
|
@@ -191,7 +196,7 @@ class ParquetOutput(Output):
|
|
|
191
196
|
name="parquet_chunks_written",
|
|
192
197
|
value=1,
|
|
193
198
|
metric_type=MetricType.COUNTER,
|
|
194
|
-
labels={"type": "pandas", "mode":
|
|
199
|
+
labels={"type": "pandas", "mode": WriteMode.APPEND.value},
|
|
195
200
|
description="Number of chunks written to Parquet files",
|
|
196
201
|
)
|
|
197
202
|
|
|
@@ -203,69 +208,109 @@ class ParquetOutput(Output):
|
|
|
203
208
|
name="parquet_write_errors",
|
|
204
209
|
value=1,
|
|
205
210
|
metric_type=MetricType.COUNTER,
|
|
206
|
-
labels={
|
|
211
|
+
labels={
|
|
212
|
+
"type": "pandas",
|
|
213
|
+
"mode": WriteMode.APPEND.value,
|
|
214
|
+
"error": str(e),
|
|
215
|
+
},
|
|
207
216
|
description="Number of errors while writing to Parquet files",
|
|
208
217
|
)
|
|
209
218
|
logger.error(f"Error writing pandas dataframe to parquet: {str(e)}")
|
|
210
219
|
raise
|
|
211
220
|
|
|
212
|
-
async def write_daft_dataframe(
|
|
221
|
+
async def write_daft_dataframe(
|
|
222
|
+
self,
|
|
223
|
+
dataframe: "daft.DataFrame", # noqa: F821
|
|
224
|
+
partition_cols: Optional[List] = None,
|
|
225
|
+
write_mode: Union[WriteMode, str] = WriteMode.APPEND,
|
|
226
|
+
morsel_size: int = 100_000,
|
|
227
|
+
):
|
|
213
228
|
"""Write a daft DataFrame to Parquet files and upload to object store.
|
|
214
229
|
|
|
230
|
+
Uses Daft's native file size management to automatically split large DataFrames
|
|
231
|
+
into multiple parquet files based on the configured target file size. Supports
|
|
232
|
+
Hive partitioning for efficient data organization.
|
|
233
|
+
|
|
215
234
|
Args:
|
|
216
235
|
dataframe (daft.DataFrame): The DataFrame to write.
|
|
236
|
+
partition_cols (Optional[List]): Column names or expressions to use for Hive partitioning.
|
|
237
|
+
Can be strings (column names) or daft column expressions. If None (default), no partitioning is applied.
|
|
238
|
+
write_mode (Union[WriteMode, str]): Write mode for parquet files.
|
|
239
|
+
Use WriteMode.APPEND, WriteMode.OVERWRITE, WriteMode.OVERWRITE_PARTITIONS, or their string equivalents.
|
|
240
|
+
morsel_size (int): Default number of rows in a morsel used for the new local executor, when running locally on just a single machine,
|
|
241
|
+
Daft does not use partitions. Instead of using partitioning to control parallelism, the local execution engine performs a streaming-based
|
|
242
|
+
execution on small "morsels" of data, which provides much more stable memory utilization while improving the user experience with not having
|
|
243
|
+
to worry about partitioning.
|
|
244
|
+
|
|
245
|
+
Note:
|
|
246
|
+
- Daft automatically handles file chunking based on parquet_target_filesize
|
|
247
|
+
- Multiple files will be created if DataFrame exceeds DAPR limit
|
|
248
|
+
- If partition_cols is set, creates Hive-style directory structure
|
|
217
249
|
"""
|
|
218
250
|
try:
|
|
251
|
+
import daft
|
|
252
|
+
|
|
253
|
+
# Convert string to enum if needed for backward compatibility
|
|
254
|
+
if isinstance(write_mode, str):
|
|
255
|
+
write_mode = WriteMode(write_mode)
|
|
256
|
+
|
|
219
257
|
row_count = dataframe.count_rows()
|
|
220
258
|
if row_count == 0:
|
|
221
259
|
return
|
|
222
260
|
|
|
261
|
+
# Use Daft's execution context for temporary configuration
|
|
262
|
+
with daft.execution_config_ctx(
|
|
263
|
+
parquet_target_filesize=self.max_file_size_bytes,
|
|
264
|
+
default_morsel_size=morsel_size,
|
|
265
|
+
):
|
|
266
|
+
# Daft automatically handles file splitting and naming
|
|
267
|
+
dataframe.write_parquet(
|
|
268
|
+
root_dir=self.output_path,
|
|
269
|
+
write_mode=write_mode.value,
|
|
270
|
+
partition_cols=partition_cols if partition_cols else [],
|
|
271
|
+
)
|
|
272
|
+
|
|
223
273
|
# Update counters
|
|
224
274
|
self.chunk_count += 1
|
|
225
275
|
self.total_record_count += row_count
|
|
226
276
|
|
|
227
|
-
# Generate file path using path_gen function
|
|
228
|
-
if self.start_marker and self.end_marker:
|
|
229
|
-
file_path = self.output_path
|
|
230
|
-
else:
|
|
231
|
-
file_path = f"{self.output_path}/{self.path_gen(self.chunk_start, self.chunk_count, self.start_marker, self.end_marker)}"
|
|
232
|
-
|
|
233
|
-
# Write the dataframe to parquet using daft
|
|
234
|
-
dataframe.write_parquet(
|
|
235
|
-
file_path,
|
|
236
|
-
write_mode=self.write_mode,
|
|
237
|
-
)
|
|
238
|
-
|
|
239
277
|
# Record metrics for successful write
|
|
240
278
|
self.metrics.record_metric(
|
|
241
279
|
name="parquet_write_records",
|
|
242
280
|
value=row_count,
|
|
243
281
|
metric_type=MetricType.COUNTER,
|
|
244
|
-
labels={"type": "daft", "mode":
|
|
282
|
+
labels={"type": "daft", "mode": write_mode.value},
|
|
245
283
|
description="Number of records written to Parquet files from daft DataFrame",
|
|
246
284
|
)
|
|
247
285
|
|
|
248
|
-
# Record
|
|
286
|
+
# Record operation metrics (note: actual file count may be higher due to Daft's splitting)
|
|
249
287
|
self.metrics.record_metric(
|
|
250
|
-
name="
|
|
288
|
+
name="parquet_write_operations",
|
|
251
289
|
value=1,
|
|
252
290
|
metric_type=MetricType.COUNTER,
|
|
253
|
-
labels={"type": "daft", "mode":
|
|
254
|
-
description="Number of
|
|
291
|
+
labels={"type": "daft", "mode": write_mode.value},
|
|
292
|
+
description="Number of write operations to Parquet files",
|
|
255
293
|
)
|
|
256
294
|
|
|
257
|
-
#
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
295
|
+
# Upload the entire directory (contains multiple parquet files created by Daft)
|
|
296
|
+
if write_mode == WriteMode.OVERWRITE:
|
|
297
|
+
# Delete the directory from object store
|
|
298
|
+
await ObjectStore.delete_prefix(
|
|
299
|
+
prefix=get_object_store_prefix(self.output_path)
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
await ObjectStore.upload_prefix(
|
|
303
|
+
source=self.output_path,
|
|
304
|
+
destination=get_object_store_prefix(self.output_path),
|
|
261
305
|
)
|
|
306
|
+
|
|
262
307
|
except Exception as e:
|
|
263
308
|
# Record metrics for failed write
|
|
264
309
|
self.metrics.record_metric(
|
|
265
310
|
name="parquet_write_errors",
|
|
266
311
|
value=1,
|
|
267
312
|
metric_type=MetricType.COUNTER,
|
|
268
|
-
labels={"type": "daft", "mode":
|
|
313
|
+
labels={"type": "daft", "mode": write_mode, "error": str(e)},
|
|
269
314
|
description="Number of errors while writing to Parquet files",
|
|
270
315
|
)
|
|
271
316
|
logger.error(f"Error writing daft dataframe to parquet: {str(e)}")
|
|
@@ -279,7 +324,7 @@ class ParquetOutput(Output):
|
|
|
279
324
|
"""
|
|
280
325
|
return self.output_path
|
|
281
326
|
|
|
282
|
-
async def _flush_buffer(self, chunk_part):
|
|
327
|
+
async def _flush_buffer(self, chunk_part: int):
|
|
283
328
|
"""Flush the current buffer to a Parquet file.
|
|
284
329
|
|
|
285
330
|
This method combines all DataFrames in the buffer, writes them to a Parquet file,
|
|
@@ -26,6 +26,31 @@ class ObjectStore:
|
|
|
26
26
|
OBJECT_CREATE_OPERATION = "create"
|
|
27
27
|
OBJECT_GET_OPERATION = "get"
|
|
28
28
|
OBJECT_LIST_OPERATION = "list"
|
|
29
|
+
OBJECT_DELETE_OPERATION = "delete"
|
|
30
|
+
|
|
31
|
+
@classmethod
|
|
32
|
+
def _create_file_metadata(cls, key: str) -> dict[str, str]:
|
|
33
|
+
"""Create metadata for file operations (get, delete, create).
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
key: The file key/path.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Metadata dictionary with key, fileName, and blobName fields.
|
|
40
|
+
"""
|
|
41
|
+
return {"key": key, "fileName": key, "blobName": key}
|
|
42
|
+
|
|
43
|
+
@classmethod
|
|
44
|
+
def _create_list_metadata(cls, prefix: str) -> dict[str, str]:
|
|
45
|
+
"""Create metadata for list operations.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
prefix: The prefix to list files under.
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
Metadata dictionary with prefix and fileName fields, or empty dict if no prefix.
|
|
52
|
+
"""
|
|
53
|
+
return {"prefix": prefix, "fileName": prefix} if prefix else {}
|
|
29
54
|
|
|
30
55
|
@classmethod
|
|
31
56
|
async def list_files(
|
|
@@ -44,12 +69,11 @@ class ObjectStore:
|
|
|
44
69
|
Exception: If there's an error listing files from the object store.
|
|
45
70
|
"""
|
|
46
71
|
try:
|
|
47
|
-
metadata = {"prefix": prefix, "fileName": prefix} if prefix else {}
|
|
48
72
|
data = json.dumps({"prefix": prefix}).encode("utf-8") if prefix else ""
|
|
49
73
|
|
|
50
74
|
response_data = await cls._invoke_dapr_binding(
|
|
51
75
|
operation=cls.OBJECT_LIST_OPERATION,
|
|
52
|
-
metadata=
|
|
76
|
+
metadata=cls._create_list_metadata(prefix),
|
|
53
77
|
data=data,
|
|
54
78
|
store_name=store_name,
|
|
55
79
|
)
|
|
@@ -105,12 +129,11 @@ class ObjectStore:
|
|
|
105
129
|
Exception: If there's an error getting the file from the object store.
|
|
106
130
|
"""
|
|
107
131
|
try:
|
|
108
|
-
metadata = {"key": key, "fileName": key, "blobName": key}
|
|
109
132
|
data = json.dumps({"key": key}).encode("utf-8") if key else ""
|
|
110
133
|
|
|
111
134
|
response_data = await cls._invoke_dapr_binding(
|
|
112
135
|
operation=cls.OBJECT_GET_OPERATION,
|
|
113
|
-
metadata=
|
|
136
|
+
metadata=cls._create_file_metadata(key),
|
|
114
137
|
data=data,
|
|
115
138
|
store_name=store_name,
|
|
116
139
|
)
|
|
@@ -144,20 +167,68 @@ class ObjectStore:
|
|
|
144
167
|
return False
|
|
145
168
|
|
|
146
169
|
@classmethod
|
|
147
|
-
async def
|
|
170
|
+
async def delete_file(
|
|
148
171
|
cls, key: str, store_name: str = DEPLOYMENT_OBJECT_STORE_NAME
|
|
149
172
|
) -> None:
|
|
150
|
-
"""Delete a file
|
|
173
|
+
"""Delete a single file from the object store.
|
|
151
174
|
|
|
152
175
|
Args:
|
|
153
|
-
key: The file path
|
|
176
|
+
key: The file path to delete.
|
|
154
177
|
store_name: Name of the Dapr object store binding to use.
|
|
155
178
|
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
179
|
+
Raises:
|
|
180
|
+
Exception: If there's an error deleting the file from the object store.
|
|
181
|
+
"""
|
|
182
|
+
try:
|
|
183
|
+
data = json.dumps({"key": key}).encode("utf-8")
|
|
184
|
+
|
|
185
|
+
await cls._invoke_dapr_binding(
|
|
186
|
+
operation=cls.OBJECT_DELETE_OPERATION,
|
|
187
|
+
metadata=cls._create_file_metadata(key),
|
|
188
|
+
data=data,
|
|
189
|
+
store_name=store_name,
|
|
190
|
+
)
|
|
191
|
+
logger.debug(f"Successfully deleted file: {key}")
|
|
192
|
+
except Exception as e:
|
|
193
|
+
logger.error(f"Error deleting file {key}: {str(e)}")
|
|
194
|
+
raise
|
|
195
|
+
|
|
196
|
+
@classmethod
|
|
197
|
+
async def delete_prefix(
|
|
198
|
+
cls, prefix: str, store_name: str = DEPLOYMENT_OBJECT_STORE_NAME
|
|
199
|
+
) -> None:
|
|
200
|
+
"""Delete all files under a prefix from the object store.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
prefix: The prefix path to delete all files under.
|
|
204
|
+
store_name: Name of the Dapr object store binding to use.
|
|
205
|
+
|
|
206
|
+
Raises:
|
|
207
|
+
Exception: If there's an error deleting files from the object store.
|
|
159
208
|
"""
|
|
160
|
-
|
|
209
|
+
try:
|
|
210
|
+
# First, list all files under the prefix
|
|
211
|
+
files_to_delete = await cls.list_files(prefix=prefix, store_name=store_name)
|
|
212
|
+
|
|
213
|
+
if not files_to_delete:
|
|
214
|
+
logger.info(f"No files found under prefix: {prefix}")
|
|
215
|
+
return
|
|
216
|
+
|
|
217
|
+
logger.info(f"Deleting {len(files_to_delete)} files under prefix: {prefix}")
|
|
218
|
+
|
|
219
|
+
# Delete each file individually
|
|
220
|
+
for file_path in files_to_delete:
|
|
221
|
+
try:
|
|
222
|
+
await cls.delete_file(key=file_path, store_name=store_name)
|
|
223
|
+
except Exception as e:
|
|
224
|
+
logger.warning(f"Failed to delete file {file_path}: {str(e)}")
|
|
225
|
+
# Continue with other files even if one fails
|
|
226
|
+
|
|
227
|
+
logger.info(f"Successfully deleted all files under prefix: {prefix}")
|
|
228
|
+
|
|
229
|
+
except Exception as e:
|
|
230
|
+
logger.error(f"Error deleting files under prefix {prefix}: {str(e)}")
|
|
231
|
+
raise
|
|
161
232
|
|
|
162
233
|
@classmethod
|
|
163
234
|
async def upload_file(
|
|
@@ -191,17 +262,11 @@ class ObjectStore:
|
|
|
191
262
|
logger.error(f"Error reading file {source}: {str(e)}")
|
|
192
263
|
raise e
|
|
193
264
|
|
|
194
|
-
metadata = {
|
|
195
|
-
"key": destination,
|
|
196
|
-
"blobName": destination,
|
|
197
|
-
"fileName": destination,
|
|
198
|
-
}
|
|
199
|
-
|
|
200
265
|
try:
|
|
201
266
|
await cls._invoke_dapr_binding(
|
|
202
267
|
operation=cls.OBJECT_CREATE_OPERATION,
|
|
203
268
|
data=file_content,
|
|
204
|
-
metadata=
|
|
269
|
+
metadata=cls._create_file_metadata(destination),
|
|
205
270
|
store_name=store_name,
|
|
206
271
|
)
|
|
207
272
|
logger.debug(f"Successfully uploaded file: {destination}")
|
application_sdk/version.py
CHANGED
{atlan_application_sdk-0.1.1rc38.dist-info → atlan_application_sdk-0.1.1rc39.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: atlan-application-sdk
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.1rc39
|
|
4
4
|
Summary: Atlan Application SDK is a Python library for developing applications on the Atlan Platform
|
|
5
5
|
Project-URL: Repository, https://github.com/atlanhq/application-sdk
|
|
6
6
|
Project-URL: Documentation, https://github.com/atlanhq/application-sdk/README.md
|
{atlan_application_sdk-0.1.1rc38.dist-info → atlan_application_sdk-0.1.1rc39.dist-info}/RECORD
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
application_sdk/__init__.py,sha256=2e2mvmLJ5dxmJGPELtb33xwP-j6JMdoIuqKycEn7hjg,151
|
|
2
2
|
application_sdk/constants.py,sha256=GzwZO0pa9M-FgibmfIs1lh-Fwo06K9Tk6WzGqMyJgpI,10362
|
|
3
|
-
application_sdk/version.py,sha256=
|
|
3
|
+
application_sdk/version.py,sha256=c9l4xzOeaiyxuYpkhXgaiBr52RJe_2lv_aUnX-NAeR4,88
|
|
4
4
|
application_sdk/worker.py,sha256=i5f0AeKI39IfsLO05QkwC6uMz0zDPSJqP7B2byri1VI,7489
|
|
5
5
|
application_sdk/activities/__init__.py,sha256=QaXLOBYbb0zPOY5kfDQh56qbXQFaYNXOjJ5PCvatiZ4,9530
|
|
6
6
|
application_sdk/activities/lock_management.py,sha256=L__GZ9BsArwU1ntYwAgCKsSjCqN6QBeOfT-OT4WyD4Y,3983
|
|
@@ -21,7 +21,7 @@ application_sdk/clients/atlan_auth.py,sha256=D7FuNqv81ohNXLJtdx1AFw_jU6a3g0Pw614
|
|
|
21
21
|
application_sdk/clients/base.py,sha256=TIn3pG89eXUc1XSYf4jk66m1vajWp0WxcCQOOltdazA,14021
|
|
22
22
|
application_sdk/clients/redis.py,sha256=IfAD32vLp88BCvsDTaQtxFHxzHlEx4V7TK7h1HwDDBg,15917
|
|
23
23
|
application_sdk/clients/sql.py,sha256=tW89SHuuWdU5jv8lDUP5AUCEpR2CF_5TyUvYDCBHses,17880
|
|
24
|
-
application_sdk/clients/temporal.py,sha256=
|
|
24
|
+
application_sdk/clients/temporal.py,sha256=MEGNT1_crWAn-vdfcBUH0A7IKvKDDSAaiEpGCS7gas4,18235
|
|
25
25
|
application_sdk/clients/utils.py,sha256=zLFOJbTr_6TOqnjfVFGY85OtIXZ4FQy_rquzjaydkbY,779
|
|
26
26
|
application_sdk/clients/workflow.py,sha256=6bSqmA3sNCk9oY68dOjBUDZ9DhNKQxPD75qqE0cfldc,6104
|
|
27
27
|
application_sdk/common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -70,7 +70,7 @@ application_sdk/observability/decorators/observability_decorator.py,sha256=JNrWN
|
|
|
70
70
|
application_sdk/outputs/__init__.py,sha256=HIENr2w9gu6u3sF_nvraj45yk53NDAddtaXSUHIVBjs,9469
|
|
71
71
|
application_sdk/outputs/iceberg.py,sha256=IGtj5WDgqLu6vzDEvw5DLsKsjm29Krto3AHvWpemr0A,5311
|
|
72
72
|
application_sdk/outputs/json.py,sha256=zyYQjGj5tb7bJhNt3ObwsuHT6Gakj8qNey-siUlWdP4,15065
|
|
73
|
-
application_sdk/outputs/parquet.py,sha256=
|
|
73
|
+
application_sdk/outputs/parquet.py,sha256=AHcsuIVU0C-yNmqk4oUuDwdxCBSjaAxJwe2v7EklVsg,16220
|
|
74
74
|
application_sdk/server/__init__.py,sha256=KTqE1YPw_3WDVMWatJUuf9OOiobLM2K5SMaBrI62sCo,1568
|
|
75
75
|
application_sdk/server/fastapi/__init__.py,sha256=YOdWNE-qqiXfo-exvxPg8T0PSuOxTdeSetUn6-BXxZg,27704
|
|
76
76
|
application_sdk/server/fastapi/models.py,sha256=K6eNl3XXiTXKUvRTpq3oqdGH3jY1-ApobXma04J86fE,6665
|
|
@@ -82,7 +82,7 @@ application_sdk/server/fastapi/routers/server.py,sha256=vfHQwZCysThzfeVFNVW1IjuA
|
|
|
82
82
|
application_sdk/services/__init__.py,sha256=H-5HZEPdr53MUfAggyHqHhRXDRLZFZsxvJgWbr257Ds,465
|
|
83
83
|
application_sdk/services/atlan_storage.py,sha256=TKzXxu0yXeUcmZehwp8PcnQTC4A9w9RlZ0Fl-Xp1bLE,8509
|
|
84
84
|
application_sdk/services/eventstore.py,sha256=X03JzodKByXh8w8nOl658rnnZfMFTj0IkmiLVbd6IN8,6729
|
|
85
|
-
application_sdk/services/objectstore.py,sha256=
|
|
85
|
+
application_sdk/services/objectstore.py,sha256=85e_68ubUsoj3riPSgPorFvJR0vnAlkSq3uglRjUtIA,16402
|
|
86
86
|
application_sdk/services/secretstore.py,sha256=UpyLLcdMia1tqFCpRrn-lE9AnERAt2iGVzET6QqkmqI,13976
|
|
87
87
|
application_sdk/services/statestore.py,sha256=CQuKq4FXPS0ebDH0e0cfTTAjvsIlrA1zz1MpsWCiWnM,9562
|
|
88
88
|
application_sdk/test_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -141,8 +141,8 @@ application_sdk/workflows/metadata_extraction/__init__.py,sha256=jHUe_ZBQ66jx8bg
|
|
|
141
141
|
application_sdk/workflows/metadata_extraction/sql.py,sha256=BhaZavEL8H3Jvf28FGcHtZwqdsUT_EHZ4VTqiaieWek,12278
|
|
142
142
|
application_sdk/workflows/query_extraction/__init__.py,sha256=n066_CX5RpJz6DIxGMkKS3eGSRg03ilaCtsqfJWQb7Q,117
|
|
143
143
|
application_sdk/workflows/query_extraction/sql.py,sha256=kT_JQkLCRZ44ZpaC4QvPL6DxnRIIVh8gYHLqRbMI-hA,4826
|
|
144
|
-
atlan_application_sdk-0.1.
|
|
145
|
-
atlan_application_sdk-0.1.
|
|
146
|
-
atlan_application_sdk-0.1.
|
|
147
|
-
atlan_application_sdk-0.1.
|
|
148
|
-
atlan_application_sdk-0.1.
|
|
144
|
+
atlan_application_sdk-0.1.1rc39.dist-info/METADATA,sha256=D1HHgG-YRM3VS43mQGsST_RJTkkDacnCSnFszqPfSL4,5567
|
|
145
|
+
atlan_application_sdk-0.1.1rc39.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
146
|
+
atlan_application_sdk-0.1.1rc39.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
147
|
+
atlan_application_sdk-0.1.1rc39.dist-info/licenses/NOTICE,sha256=A-XVVGt3KOYuuMmvSMIFkg534F1vHiCggEBp4Ez3wGk,1041
|
|
148
|
+
atlan_application_sdk-0.1.1rc39.dist-info/RECORD,,
|
{atlan_application_sdk-0.1.1rc38.dist-info → atlan_application_sdk-0.1.1rc39.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|