chalkpy 2.95.7__py3-none-any.whl → 2.95.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chalk/_gen/chalk/server/v1/deploy_pb2.py +34 -34
- chalk/_gen/chalk/server/v1/deploy_pb2.pyi +10 -1
- chalk/_gen/chalk/server/v1/deployment_pb2.py +10 -10
- chalk/_gen/chalk/server/v1/deployment_pb2.pyi +6 -2
- chalk/_version.py +1 -1
- chalk/sql/_internal/integrations/redshift.py +308 -49
- chalk/utils/tracing.py +3 -3
- {chalkpy-2.95.7.dist-info → chalkpy-2.95.9.dist-info}/METADATA +6 -1
- {chalkpy-2.95.7.dist-info → chalkpy-2.95.9.dist-info}/RECORD +12 -12
- {chalkpy-2.95.7.dist-info → chalkpy-2.95.9.dist-info}/WHEEL +0 -0
- {chalkpy-2.95.7.dist-info → chalkpy-2.95.9.dist-info}/entry_points.txt +0 -0
- {chalkpy-2.95.7.dist-info → chalkpy-2.95.9.dist-info}/top_level.txt +0 -0
|
@@ -24,7 +24,7 @@ from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mas
|
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
|
|
27
|
-
b'\n\x1c\x63halk/server/v1/deploy.proto\x12\x0f\x63halk.server.v1\x1a\x1f\x63halk/artifacts/v1/export.proto\x1a\x19\x63halk/auth/v1/audit.proto\x1a\x1f\x63halk/auth/v1/permissions.proto\x1a!chalk/common/v1/chalk_error.proto\x1a\x1a\x63halk/graph/v1/graph.proto\x1a chalk/server/v1/deployment.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto"\x97\x01\n\x13\x44\x65ployBranchRequest\x12\x1f\n\x0b\x62ranch_name\x18\x01 \x01(\tR\nbranchName\x12!\n\x0creset_branch\x18\x02 \x01(\x08R\x0bresetBranch\x12\x18\n\x07\x61rchive\x18\x03 \x01(\x0cR\x07\x61rchive\x12"\n\ris_hot_deploy\x18\x04 \x01(\x08R\x0bisHotDeploy"\x89\x02\n\x14\x44\x65ployBranchResponse\x12#\n\rdeployment_id\x18\x01 \x01(\tR\x0c\x64\x65ploymentId\x12\x34\n\x05graph\x18\x02 \x01(\x0b\x32\x15.chalk.graph.v1.GraphB\x02\x18\x01H\x00R\x05graph\x88\x01\x01\x12H\n\x11\x64\x65ployment_errors\x18\x03 \x03(\x0b\x32\x1b.chalk.common.v1.ChalkErrorR\x10\x64\x65ploymentErrors\x12\x37\n\x06\x65xport\x18\x04 \x01(\x0b\x32\x1a.chalk.artifacts.v1.ExportH\x01R\x06\x65xport\x88\x01\x01\x42\x08\n\x06_graphB\t\n\x07_export"\
|
|
27
|
+
b'\n\x1c\x63halk/server/v1/deploy.proto\x12\x0f\x63halk.server.v1\x1a\x1f\x63halk/artifacts/v1/export.proto\x1a\x19\x63halk/auth/v1/audit.proto\x1a\x1f\x63halk/auth/v1/permissions.proto\x1a!chalk/common/v1/chalk_error.proto\x1a\x1a\x63halk/graph/v1/graph.proto\x1a chalk/server/v1/deployment.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto"\x97\x01\n\x13\x44\x65ployBranchRequest\x12\x1f\n\x0b\x62ranch_name\x18\x01 \x01(\tR\nbranchName\x12!\n\x0creset_branch\x18\x02 \x01(\x08R\x0bresetBranch\x12\x18\n\x07\x61rchive\x18\x03 \x01(\x0cR\x07\x61rchive\x12"\n\ris_hot_deploy\x18\x04 \x01(\x08R\x0bisHotDeploy"\x89\x02\n\x14\x44\x65ployBranchResponse\x12#\n\rdeployment_id\x18\x01 \x01(\tR\x0c\x64\x65ploymentId\x12\x34\n\x05graph\x18\x02 \x01(\x0b\x32\x15.chalk.graph.v1.GraphB\x02\x18\x01H\x00R\x05graph\x88\x01\x01\x12H\n\x11\x64\x65ployment_errors\x18\x03 \x03(\x0b\x32\x1b.chalk.common.v1.ChalkErrorR\x10\x64\x65ploymentErrors\x12\x37\n\x06\x65xport\x18\x04 \x01(\x0b\x32\x1a.chalk.artifacts.v1.ExportH\x01R\x06\x65xport\x88\x01\x01\x42\x08\n\x06_graphB\t\n\x07_export"\xe8\x02\n\'CreateBranchFromSourceDeploymentRequest\x12\x1f\n\x0b\x62ranch_name\x18\x01 \x01(\tR\nbranchName\x12.\n\x12source_branch_name\x18\x02 \x01(\tH\x00R\x10sourceBranchName\x12\x32\n\x14source_deployment_id\x18\x03 \x01(\tH\x00R\x12sourceDeploymentId\x12X\n\x1b\x63urrent_mainline_deployment\x18\x04 \x01(\x0b\x32\x16.google.protobuf.EmptyH\x00R\x19\x63urrentMainlineDeployment\x12\x41\n\x0eoverride_graph\x18\x05 \x01(\x0b\x32\x15.chalk.graph.v1.GraphH\x01R\roverrideGraph\x88\x01\x01\x42\x08\n\x06sourceB\x11\n\x0f_override_graph"\x91\x02\n(CreateBranchFromSourceDeploymentResponse\x12#\n\rdeployment_id\x18\x01 \x01(\tR\x0c\x64\x65ploymentId\x12H\n\x11\x64\x65ployment_errors\x18\x02 \x03(\x0b\x32\x1b.chalk.common.v1.ChalkErrorR\x10\x64\x65ploymentErrors\x12\x37\n\x06\x65xport\x18\x03 \x01(\x0b\x32\x1a.chalk.artifacts.v1.ExportH\x00R\x06\x65xport\x88\x01\x01\x12\x32\n\x15\x62ranch_already_exists\x18\x04 \x01(\x08R\x13\x62ranchAlreadyExistsB\t\n\x07_export"t\n\x14GetDeploymentRequest\x12#\n\rdeployment_id\x18\x01 \x01(\tR\x0c\x64\x65ploymentId\x12\x37\n\tread_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskR\x08readMask"\x98\x01\n\x15GetDeploymentResponse\x12;\n\ndeployment\x18\x01 \x01(\x0b\x32\x1b.chalk.server.v1.DeploymentR\ndeployment\x12\x37\n\x06\x65xport\x18\x02 \x01(\x0b\x32\x1a.chalk.artifacts.v1.ExportH\x00R\x06\x65xport\x88\x01\x01\x42\t\n\x07_export"\xda\x01\n\x16ListDeploymentsRequest\x12\x1b\n\x06\x63ursor\x18\x01 \x01(\tH\x00R\x06\x63ursor\x88\x01\x01\x12\x19\n\x05limit\x18\x02 \x01(\x05H\x01R\x05limit\x88\x01\x01\x12*\n\x0einclude_branch\x18\x03 \x01(\x08H\x02R\rincludeBranch\x88\x01\x01\x12$\n\x0b\x62ranch_name\x18\x04 \x01(\tH\x03R\nbranchName\x88\x01\x01\x42\t\n\x07_cursorB\x08\n\x06_limitB\x11\n\x0f_include_branchB\x0e\n\x0c_branch_name"\x80\x01\n\x17ListDeploymentsResponse\x12=\n\x0b\x64\x65ployments\x18\x01 \x03(\x0b\x32\x1b.chalk.server.v1.DeploymentR\x0b\x64\x65ployments\x12\x1b\n\x06\x63ursor\x18\x02 \x01(\tH\x00R\x06\x63ursor\x88\x01\x01\x42\t\n\x07_cursor"?\n\x18SuspendDeploymentRequest\x12#\n\rdeployment_id\x18\x01 \x01(\tR\x0c\x64\x65ploymentId"X\n\x19SuspendDeploymentResponse\x12;\n\ndeployment\x18\x01 \x01(\x0b\x32\x1b.chalk.server.v1.DeploymentR\ndeployment"v\n\x16ScaleDeploymentRequest\x12#\n\rdeployment_id\x18\x01 \x01(\tR\x0c\x64\x65ploymentId\x12\x37\n\x06sizing\x18\x02 \x01(\x0b\x32\x1f.chalk.server.v1.InstanceSizingR\x06sizing"V\n\x17ScaleDeploymentResponse\x12;\n\ndeployment\x18\x01 \x01(\x0b\x32\x1b.chalk.server.v1.DeploymentR\ndeployment"\x89\x01\n\x14TagDeploymentRequest\x12#\n\rdeployment_id\x18\x01 \x01(\tR\x0c\x64\x65ploymentId\x12\x10\n\x03tag\x18\x02 \x01(\tR\x03tag\x12(\n\rmirror_weight\x18\x03 \x01(\x05H\x00R\x0cmirrorWeight\x88\x01\x01\x42\x10\n\x0e_mirror_weight"\xaa\x01\n\x15TagDeploymentResponse\x12;\n\ndeployment\x18\x01 \x01(\x0b\x32\x1b.chalk.server.v1.DeploymentR\ndeployment\x12\x39\n\x16untagged_deployment_id\x18\x02 \x01(\tH\x00R\x14untaggedDeploymentId\x88\x01\x01\x42\x19\n\x17_untagged_deployment_id"\x1d\n\x1bGetActiveDeploymentsRequest"]\n\x1cGetActiveDeploymentsResponse\x12=\n\x0b\x64\x65ployments\x18\x01 \x03(\x0b\x32\x1b.chalk.server.v1.DeploymentR\x0b\x64\x65ployments"A\n\x1aGetDeploymentSourceRequest\x12#\n\rdeployment_id\x18\x01 \x01(\tR\x0c\x64\x65ploymentId"<\n\x1bGetDeploymentSourceResponse\x12\x1d\n\nsigned_url\x18\x01 \x01(\tR\tsignedUrl2\xa4\x08\n\rDeployService\x12`\n\x0c\x44\x65ployBranch\x12$.chalk.server.v1.DeployBranchRequest\x1a%.chalk.server.v1.DeployBranchResponse"\x03\x80}\r\x12\x9c\x01\n CreateBranchFromSourceDeployment\x12\x38.chalk.server.v1.CreateBranchFromSourceDeploymentRequest\x1a\x39.chalk.server.v1.CreateBranchFromSourceDeploymentResponse"\x03\x80}\r\x12\x63\n\rGetDeployment\x12%.chalk.server.v1.GetDeploymentRequest\x1a&.chalk.server.v1.GetDeploymentResponse"\x03\x80}\x0b\x12i\n\x0fListDeployments\x12\'.chalk.server.v1.ListDeploymentsRequest\x1a(.chalk.server.v1.ListDeploymentsResponse"\x03\x80}\x0b\x12x\n\x14GetActiveDeployments\x12,.chalk.server.v1.GetActiveDeploymentsRequest\x1a-.chalk.server.v1.GetActiveDeploymentsResponse"\x03\x80}\x02\x12u\n\x11SuspendDeployment\x12).chalk.server.v1.SuspendDeploymentRequest\x1a*.chalk.server.v1.SuspendDeploymentResponse"\t\x80}\x0c\x8a\xd3\x0e\x02\x08\x02\x12o\n\x0fScaleDeployment\x12\'.chalk.server.v1.ScaleDeploymentRequest\x1a(.chalk.server.v1.ScaleDeploymentResponse"\t\x80}\x0c\x8a\xd3\x0e\x02\x08\x02\x12i\n\rTagDeployment\x12%.chalk.server.v1.TagDeploymentRequest\x1a&.chalk.server.v1.TagDeploymentResponse"\t\x80}\x0c\x8a\xd3\x0e\x02\x08\x02\x12u\n\x13GetDeploymentSource\x12+.chalk.server.v1.GetDeploymentSourceRequest\x1a,.chalk.server.v1.GetDeploymentSourceResponse"\x03\x80}\x0b\x42\x94\x01\n\x13\x63om.chalk.server.v1B\x0b\x44\x65ployProtoP\x01Z\x12server/v1;serverv1\xa2\x02\x03\x43SX\xaa\x02\x0f\x43halk.Server.V1\xca\x02\x0f\x43halk\\Server\\V1\xe2\x02\x1b\x43halk\\Server\\V1\\GPBMetadata\xea\x02\x11\x43halk::Server::V1b\x06proto3'
|
|
28
28
|
)
|
|
29
29
|
|
|
30
30
|
_globals = globals()
|
|
@@ -66,37 +66,37 @@ if _descriptor._USE_C_DESCRIPTORS == False:
|
|
|
66
66
|
_globals["_DEPLOYBRANCHRESPONSE"]._serialized_start = 457
|
|
67
67
|
_globals["_DEPLOYBRANCHRESPONSE"]._serialized_end = 722
|
|
68
68
|
_globals["_CREATEBRANCHFROMSOURCEDEPLOYMENTREQUEST"]._serialized_start = 725
|
|
69
|
-
_globals["_CREATEBRANCHFROMSOURCEDEPLOYMENTREQUEST"]._serialized_end =
|
|
70
|
-
_globals["_CREATEBRANCHFROMSOURCEDEPLOYMENTRESPONSE"]._serialized_start =
|
|
71
|
-
_globals["_CREATEBRANCHFROMSOURCEDEPLOYMENTRESPONSE"]._serialized_end =
|
|
72
|
-
_globals["_GETDEPLOYMENTREQUEST"]._serialized_start =
|
|
73
|
-
_globals["_GETDEPLOYMENTREQUEST"]._serialized_end =
|
|
74
|
-
_globals["_GETDEPLOYMENTRESPONSE"]._serialized_start =
|
|
75
|
-
_globals["_GETDEPLOYMENTRESPONSE"]._serialized_end =
|
|
76
|
-
_globals["_LISTDEPLOYMENTSREQUEST"]._serialized_start =
|
|
77
|
-
_globals["_LISTDEPLOYMENTSREQUEST"]._serialized_end =
|
|
78
|
-
_globals["_LISTDEPLOYMENTSRESPONSE"]._serialized_start =
|
|
79
|
-
_globals["_LISTDEPLOYMENTSRESPONSE"]._serialized_end =
|
|
80
|
-
_globals["_SUSPENDDEPLOYMENTREQUEST"]._serialized_start =
|
|
81
|
-
_globals["_SUSPENDDEPLOYMENTREQUEST"]._serialized_end =
|
|
82
|
-
_globals["_SUSPENDDEPLOYMENTRESPONSE"]._serialized_start =
|
|
83
|
-
_globals["_SUSPENDDEPLOYMENTRESPONSE"]._serialized_end =
|
|
84
|
-
_globals["_SCALEDEPLOYMENTREQUEST"]._serialized_start =
|
|
85
|
-
_globals["_SCALEDEPLOYMENTREQUEST"]._serialized_end =
|
|
86
|
-
_globals["_SCALEDEPLOYMENTRESPONSE"]._serialized_start =
|
|
87
|
-
_globals["_SCALEDEPLOYMENTRESPONSE"]._serialized_end =
|
|
88
|
-
_globals["_TAGDEPLOYMENTREQUEST"]._serialized_start =
|
|
89
|
-
_globals["_TAGDEPLOYMENTREQUEST"]._serialized_end =
|
|
90
|
-
_globals["_TAGDEPLOYMENTRESPONSE"]._serialized_start =
|
|
91
|
-
_globals["_TAGDEPLOYMENTRESPONSE"]._serialized_end =
|
|
92
|
-
_globals["_GETACTIVEDEPLOYMENTSREQUEST"]._serialized_start =
|
|
93
|
-
_globals["_GETACTIVEDEPLOYMENTSREQUEST"]._serialized_end =
|
|
94
|
-
_globals["_GETACTIVEDEPLOYMENTSRESPONSE"]._serialized_start =
|
|
95
|
-
_globals["_GETACTIVEDEPLOYMENTSRESPONSE"]._serialized_end =
|
|
96
|
-
_globals["_GETDEPLOYMENTSOURCEREQUEST"]._serialized_start =
|
|
97
|
-
_globals["_GETDEPLOYMENTSOURCEREQUEST"]._serialized_end =
|
|
98
|
-
_globals["_GETDEPLOYMENTSOURCERESPONSE"]._serialized_start =
|
|
99
|
-
_globals["_GETDEPLOYMENTSOURCERESPONSE"]._serialized_end =
|
|
100
|
-
_globals["_DEPLOYSERVICE"]._serialized_start =
|
|
101
|
-
_globals["_DEPLOYSERVICE"]._serialized_end =
|
|
69
|
+
_globals["_CREATEBRANCHFROMSOURCEDEPLOYMENTREQUEST"]._serialized_end = 1085
|
|
70
|
+
_globals["_CREATEBRANCHFROMSOURCEDEPLOYMENTRESPONSE"]._serialized_start = 1088
|
|
71
|
+
_globals["_CREATEBRANCHFROMSOURCEDEPLOYMENTRESPONSE"]._serialized_end = 1361
|
|
72
|
+
_globals["_GETDEPLOYMENTREQUEST"]._serialized_start = 1363
|
|
73
|
+
_globals["_GETDEPLOYMENTREQUEST"]._serialized_end = 1479
|
|
74
|
+
_globals["_GETDEPLOYMENTRESPONSE"]._serialized_start = 1482
|
|
75
|
+
_globals["_GETDEPLOYMENTRESPONSE"]._serialized_end = 1634
|
|
76
|
+
_globals["_LISTDEPLOYMENTSREQUEST"]._serialized_start = 1637
|
|
77
|
+
_globals["_LISTDEPLOYMENTSREQUEST"]._serialized_end = 1855
|
|
78
|
+
_globals["_LISTDEPLOYMENTSRESPONSE"]._serialized_start = 1858
|
|
79
|
+
_globals["_LISTDEPLOYMENTSRESPONSE"]._serialized_end = 1986
|
|
80
|
+
_globals["_SUSPENDDEPLOYMENTREQUEST"]._serialized_start = 1988
|
|
81
|
+
_globals["_SUSPENDDEPLOYMENTREQUEST"]._serialized_end = 2051
|
|
82
|
+
_globals["_SUSPENDDEPLOYMENTRESPONSE"]._serialized_start = 2053
|
|
83
|
+
_globals["_SUSPENDDEPLOYMENTRESPONSE"]._serialized_end = 2141
|
|
84
|
+
_globals["_SCALEDEPLOYMENTREQUEST"]._serialized_start = 2143
|
|
85
|
+
_globals["_SCALEDEPLOYMENTREQUEST"]._serialized_end = 2261
|
|
86
|
+
_globals["_SCALEDEPLOYMENTRESPONSE"]._serialized_start = 2263
|
|
87
|
+
_globals["_SCALEDEPLOYMENTRESPONSE"]._serialized_end = 2349
|
|
88
|
+
_globals["_TAGDEPLOYMENTREQUEST"]._serialized_start = 2352
|
|
89
|
+
_globals["_TAGDEPLOYMENTREQUEST"]._serialized_end = 2489
|
|
90
|
+
_globals["_TAGDEPLOYMENTRESPONSE"]._serialized_start = 2492
|
|
91
|
+
_globals["_TAGDEPLOYMENTRESPONSE"]._serialized_end = 2662
|
|
92
|
+
_globals["_GETACTIVEDEPLOYMENTSREQUEST"]._serialized_start = 2664
|
|
93
|
+
_globals["_GETACTIVEDEPLOYMENTSREQUEST"]._serialized_end = 2693
|
|
94
|
+
_globals["_GETACTIVEDEPLOYMENTSRESPONSE"]._serialized_start = 2695
|
|
95
|
+
_globals["_GETACTIVEDEPLOYMENTSRESPONSE"]._serialized_end = 2788
|
|
96
|
+
_globals["_GETDEPLOYMENTSOURCEREQUEST"]._serialized_start = 2790
|
|
97
|
+
_globals["_GETDEPLOYMENTSOURCEREQUEST"]._serialized_end = 2855
|
|
98
|
+
_globals["_GETDEPLOYMENTSOURCERESPONSE"]._serialized_start = 2857
|
|
99
|
+
_globals["_GETDEPLOYMENTSOURCERESPONSE"]._serialized_end = 2917
|
|
100
|
+
_globals["_DEPLOYSERVICE"]._serialized_start = 2920
|
|
101
|
+
_globals["_DEPLOYSERVICE"]._serialized_end = 3980
|
|
102
102
|
# @@protoc_insertion_point(module_scope)
|
|
@@ -56,21 +56,30 @@ class DeployBranchResponse(_message.Message):
|
|
|
56
56
|
) -> None: ...
|
|
57
57
|
|
|
58
58
|
class CreateBranchFromSourceDeploymentRequest(_message.Message):
|
|
59
|
-
__slots__ = (
|
|
59
|
+
__slots__ = (
|
|
60
|
+
"branch_name",
|
|
61
|
+
"source_branch_name",
|
|
62
|
+
"source_deployment_id",
|
|
63
|
+
"current_mainline_deployment",
|
|
64
|
+
"override_graph",
|
|
65
|
+
)
|
|
60
66
|
BRANCH_NAME_FIELD_NUMBER: _ClassVar[int]
|
|
61
67
|
SOURCE_BRANCH_NAME_FIELD_NUMBER: _ClassVar[int]
|
|
62
68
|
SOURCE_DEPLOYMENT_ID_FIELD_NUMBER: _ClassVar[int]
|
|
63
69
|
CURRENT_MAINLINE_DEPLOYMENT_FIELD_NUMBER: _ClassVar[int]
|
|
70
|
+
OVERRIDE_GRAPH_FIELD_NUMBER: _ClassVar[int]
|
|
64
71
|
branch_name: str
|
|
65
72
|
source_branch_name: str
|
|
66
73
|
source_deployment_id: str
|
|
67
74
|
current_mainline_deployment: _empty_pb2.Empty
|
|
75
|
+
override_graph: _graph_pb2.Graph
|
|
68
76
|
def __init__(
|
|
69
77
|
self,
|
|
70
78
|
branch_name: _Optional[str] = ...,
|
|
71
79
|
source_branch_name: _Optional[str] = ...,
|
|
72
80
|
source_deployment_id: _Optional[str] = ...,
|
|
73
81
|
current_mainline_deployment: _Optional[_Union[_empty_pb2.Empty, _Mapping]] = ...,
|
|
82
|
+
override_graph: _Optional[_Union[_graph_pb2.Graph, _Mapping]] = ...,
|
|
74
83
|
) -> None: ...
|
|
75
84
|
|
|
76
85
|
class CreateBranchFromSourceDeploymentResponse(_message.Message):
|
|
@@ -18,7 +18,7 @@ from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__
|
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
|
|
21
|
-
b'\n chalk/server/v1/deployment.proto\x12\x0f\x63halk.server.v1\x1a!chalk/server/v1/environment.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x88\x01\n\x0eInstanceSizing\x12(\n\rmin_instances\x18\x01 \x01(\rH\x00R\x0cminInstances\x88\x01\x01\x12(\n\rmax_instances\x18\x02 \x01(\rH\x01R\x0cmaxInstances\x88\x01\x01\x42\x10\n\x0e_min_instancesB\x10\n\x0e_max_instances"\xa3\x01\n\x0fSourceImageSpec\x12"\n\x0crequirements\x18\x01 \x01(\tR\x0crequirements\x12+\n\x11\x64\x65pendencies_hash\x18\x02 \x01(\tR\x10\x64\x65pendenciesHash\x12\x18\n\x07runtime\x18\x03 \x01(\tR\x07runtime\x12%\n\x0epython_version\x18\x04 \x01(\tR\rpythonVersion"\
|
|
21
|
+
b'\n chalk/server/v1/deployment.proto\x12\x0f\x63halk.server.v1\x1a!chalk/server/v1/environment.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x88\x01\n\x0eInstanceSizing\x12(\n\rmin_instances\x18\x01 \x01(\rH\x00R\x0cminInstances\x88\x01\x01\x12(\n\rmax_instances\x18\x02 \x01(\rH\x01R\x0cmaxInstances\x88\x01\x01\x42\x10\n\x0e_min_instancesB\x10\n\x0e_max_instances"\xa3\x01\n\x0fSourceImageSpec\x12"\n\x0crequirements\x18\x01 \x01(\tR\x0crequirements\x12+\n\x11\x64\x65pendencies_hash\x18\x02 \x01(\tR\x10\x64\x65pendenciesHash\x12\x18\n\x07runtime\x18\x03 \x01(\tR\x07runtime\x12%\n\x0epython_version\x18\x04 \x01(\tR\rpythonVersion"\xed\x01\n\x10SourceImageSpecs\x12\x42\n\x05specs\x18\x01 \x03(\x0b\x32,.chalk.server.v1.SourceImageSpecs.SpecsEntryR\x05specs\x12\x39\n\x19uses_uploaded_proto_graph\x18\x02 \x01(\x08R\x16usesUploadedProtoGraph\x1aZ\n\nSpecsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x36\n\x05value\x18\x02 \x01(\x0b\x32 .chalk.server.v1.SourceImageSpecR\x05value:\x02\x38\x01"\xc2\x10\n\nDeployment\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12%\n\x0e\x65nvironment_id\x18\x02 \x01(\tR\renvironmentId\x12\x39\n\x06status\x18\x03 \x01(\x0e\x32!.chalk.server.v1.DeploymentStatusR\x06status\x12\'\n\x0f\x64\x65ployment_tags\x18\x04 \x03(\tR\x0e\x64\x65ploymentTags\x12$\n\x0e\x63loud_build_id\x18\x05 \x01(\tR\x0c\x63loudBuildId\x12!\n\x0ctriggered_by\x18\x06 \x01(\tR\x0btriggeredBy\x12\x38\n\x15requirements_filepath\x18\x07 \x01(\tH\x00R\x14requirementsFilepath\x88\x01\x01\x12\x34\n\x13\x64ockerfile_filepath\x18\x08 \x01(\tH\x01R\x12\x64ockerfileFilepath\x88\x01\x01\x12\x1d\n\x07runtime\x18\t \x01(\tH\x02R\x07runtime\x88\x01\x01\x12\'\n\x0f\x63halkpy_version\x18\n \x01(\tR\x0e\x63halkpyVersion\x12.\n\x13raw_dependency_hash\x18\x0b \x01(\tR\x11rawDependencyHash\x12\x37\n\x15\x66inal_dependency_hash\x18\x0c \x01(\tH\x03R\x13\x66inalDependencyHash\x88\x01\x01\x12\x37\n\x15is_preview_deployment\x18\r \x01(\x08H\x04R\x13isPreviewDeployment\x88\x01\x01\x12\x39\n\ncreated_at\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tcreatedAt\x12\x39\n\nupdated_at\x18\x0f \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tupdatedAt\x12\x1d\n\ngit_commit\x18\x10 \x01(\tR\tgitCommit\x12\x15\n\x06git_pr\x18\x11 \x01(\tR\x05gitPr\x12\x1d\n\ngit_branch\x18\x12 \x01(\tR\tgitBranch\x12(\n\x10git_author_email\x18\x13 \x01(\tR\x0egitAuthorEmail\x12\x16\n\x06\x62ranch\x18\x14 \x01(\tR\x06\x62ranch\x12)\n\x10project_settings\x18\x15 \x01(\tR\x0fprojectSettings\x12\x32\n\x12requirements_files\x18\x16 \x01(\tH\x05R\x11requirementsFiles\x88\x01\x01\x12\x17\n\x07git_tag\x18\x17 \x01(\tR\x06gitTag\x12$\n\x0e\x62\x61se_image_sha\x18\x18 \x01(\tR\x0c\x62\x61seImageSha\x12\x46\n\x11status_changed_at\x18\x19 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x0fstatusChangedAt\x12;\n\x17pinned_platform_version\x18\x1a \x01(\tH\x06R\x15pinnedPlatformVersion\x88\x01\x01\x12\x39\n\x16preview_deployment_tag\x18\x1b \x01(\tH\x07R\x14previewDeploymentTag\x88\x01\x01\x12X\n\x0eprofiling_mode\x18\x1c \x01(\x0e\x32(.chalk.server.v1.DeploymentProfilingModeB\x02\x18\x01H\x08R\rprofilingMode\x88\x01\x01\x12\x31\n\x12source_image_specs\x18\x1d \x01(\x0cH\tR\x10sourceImageSpecs\x88\x01\x01\x12\x39\n\x19uses_uploaded_proto_graph\x18\x1e \x01(\x08R\x16usesUploadedProtoGraph\x12Q\n\rbuild_profile\x18\x1f \x01(\x0e\x32\'.chalk.server.v1.DeploymentBuildProfileH\nR\x0c\x62uildProfile\x88\x01\x01\x12\x36\n\x15\x63ustomer_cicd_job_url\x18 \x01(\tH\x0bR\x12\x63ustomerCicdJobUrl\x88\x01\x01\x12\x30\n\x11\x63ustomer_metadata\x18! \x01(\tH\x0cR\x10\x63ustomerMetadata\x88\x01\x01\x12-\n\x10\x63ustomer_vcs_url\x18" \x01(\tH\rR\x0e\x63ustomerVcsUrl\x88\x01\x01\x12\x34\n\x13\x64isplay_description\x18# \x01(\tH\x0eR\x12\x64isplayDescription\x88\x01\x01\x12\x31\n\x12git_commit_message\x18$ \x01(\tH\x0fR\x10gitCommitMessage\x88\x01\x01\x42\x18\n\x16_requirements_filepathB\x16\n\x14_dockerfile_filepathB\n\n\x08_runtimeB\x18\n\x16_final_dependency_hashB\x18\n\x16_is_preview_deploymentB\x15\n\x13_requirements_filesB\x1a\n\x18_pinned_platform_versionB\x19\n\x17_preview_deployment_tagB\x11\n\x0f_profiling_modeB\x15\n\x13_source_image_specsB\x10\n\x0e_build_profileB\x18\n\x16_customer_cicd_job_urlB\x14\n\x12_customer_metadataB\x13\n\x11_customer_vcs_urlB\x16\n\x14_display_descriptionB\x15\n\x13_git_commit_message*\xde\x03\n\x10\x44\x65ploymentStatus\x12!\n\x1d\x44\x45PLOYMENT_STATUS_UNSPECIFIED\x10\x00\x12\x1d\n\x19\x44\x45PLOYMENT_STATUS_UNKNOWN\x10\x01\x12\x1d\n\x19\x44\x45PLOYMENT_STATUS_PENDING\x10\x02\x12\x1c\n\x18\x44\x45PLOYMENT_STATUS_QUEUED\x10\x03\x12\x1d\n\x19\x44\x45PLOYMENT_STATUS_WORKING\x10\x04\x12\x1d\n\x19\x44\x45PLOYMENT_STATUS_SUCCESS\x10\x05\x12\x1d\n\x19\x44\x45PLOYMENT_STATUS_FAILURE\x10\x06\x12$\n DEPLOYMENT_STATUS_INTERNAL_ERROR\x10\x07\x12\x1d\n\x19\x44\x45PLOYMENT_STATUS_TIMEOUT\x10\x08\x12\x1f\n\x1b\x44\x45PLOYMENT_STATUS_CANCELLED\x10\t\x12\x1d\n\x19\x44\x45PLOYMENT_STATUS_EXPIRED\x10\n\x12!\n\x1d\x44\x45PLOYMENT_STATUS_BOOT_ERRORS\x10\x0b\x12%\n!DEPLOYMENT_STATUS_AWAITING_SOURCE\x10\x0c\x12\x1f\n\x1b\x44\x45PLOYMENT_STATUS_DEPLOYING\x10\r*\x8e\x01\n\x17\x44\x65ploymentProfilingMode\x12)\n%DEPLOYMENT_PROFILING_MODE_UNSPECIFIED\x10\x00\x12"\n\x1e\x44\x45PLOYMENT_PROFILING_MODE_NONE\x10\x01\x12 \n\x1c\x44\x45PLOYMENT_PROFILING_MODE_O2\x10\x02\x1a\x02\x18\x01\x42\x98\x01\n\x13\x63om.chalk.server.v1B\x0f\x44\x65ploymentProtoP\x01Z\x12server/v1;serverv1\xa2\x02\x03\x43SX\xaa\x02\x0f\x43halk.Server.V1\xca\x02\x0f\x43halk\\Server\\V1\xe2\x02\x1b\x43halk\\Server\\V1\\GPBMetadata\xea\x02\x11\x43halk::Server::V1b\x06proto3'
|
|
22
22
|
)
|
|
23
23
|
|
|
24
24
|
_globals = globals()
|
|
@@ -35,18 +35,18 @@ if _descriptor._USE_C_DESCRIPTORS == False:
|
|
|
35
35
|
_globals["_SOURCEIMAGESPECS_SPECSENTRY"]._serialized_options = b"8\001"
|
|
36
36
|
_globals["_DEPLOYMENT"].fields_by_name["profiling_mode"]._options = None
|
|
37
37
|
_globals["_DEPLOYMENT"].fields_by_name["profiling_mode"]._serialized_options = b"\030\001"
|
|
38
|
-
_globals["_DEPLOYMENTSTATUS"]._serialized_start =
|
|
39
|
-
_globals["_DEPLOYMENTSTATUS"]._serialized_end =
|
|
40
|
-
_globals["_DEPLOYMENTPROFILINGMODE"]._serialized_start =
|
|
41
|
-
_globals["_DEPLOYMENTPROFILINGMODE"]._serialized_end =
|
|
38
|
+
_globals["_DEPLOYMENTSTATUS"]._serialized_start = 2784
|
|
39
|
+
_globals["_DEPLOYMENTSTATUS"]._serialized_end = 3262
|
|
40
|
+
_globals["_DEPLOYMENTPROFILINGMODE"]._serialized_start = 3265
|
|
41
|
+
_globals["_DEPLOYMENTPROFILINGMODE"]._serialized_end = 3407
|
|
42
42
|
_globals["_INSTANCESIZING"]._serialized_start = 122
|
|
43
43
|
_globals["_INSTANCESIZING"]._serialized_end = 258
|
|
44
44
|
_globals["_SOURCEIMAGESPEC"]._serialized_start = 261
|
|
45
45
|
_globals["_SOURCEIMAGESPEC"]._serialized_end = 424
|
|
46
46
|
_globals["_SOURCEIMAGESPECS"]._serialized_start = 427
|
|
47
|
-
_globals["_SOURCEIMAGESPECS"]._serialized_end =
|
|
48
|
-
_globals["_SOURCEIMAGESPECS_SPECSENTRY"]._serialized_start =
|
|
49
|
-
_globals["_SOURCEIMAGESPECS_SPECSENTRY"]._serialized_end =
|
|
50
|
-
_globals["_DEPLOYMENT"]._serialized_start =
|
|
51
|
-
_globals["_DEPLOYMENT"]._serialized_end =
|
|
47
|
+
_globals["_SOURCEIMAGESPECS"]._serialized_end = 664
|
|
48
|
+
_globals["_SOURCEIMAGESPECS_SPECSENTRY"]._serialized_start = 574
|
|
49
|
+
_globals["_SOURCEIMAGESPECS_SPECSENTRY"]._serialized_end = 664
|
|
50
|
+
_globals["_DEPLOYMENT"]._serialized_start = 667
|
|
51
|
+
_globals["_DEPLOYMENT"]._serialized_end = 2781
|
|
52
52
|
# @@protoc_insertion_point(module_scope)
|
|
@@ -82,7 +82,7 @@ class SourceImageSpec(_message.Message):
|
|
|
82
82
|
) -> None: ...
|
|
83
83
|
|
|
84
84
|
class SourceImageSpecs(_message.Message):
|
|
85
|
-
__slots__ = ("specs",)
|
|
85
|
+
__slots__ = ("specs", "uses_uploaded_proto_graph")
|
|
86
86
|
class SpecsEntry(_message.Message):
|
|
87
87
|
__slots__ = ("key", "value")
|
|
88
88
|
KEY_FIELD_NUMBER: _ClassVar[int]
|
|
@@ -94,8 +94,12 @@ class SourceImageSpecs(_message.Message):
|
|
|
94
94
|
) -> None: ...
|
|
95
95
|
|
|
96
96
|
SPECS_FIELD_NUMBER: _ClassVar[int]
|
|
97
|
+
USES_UPLOADED_PROTO_GRAPH_FIELD_NUMBER: _ClassVar[int]
|
|
97
98
|
specs: _containers.MessageMap[str, SourceImageSpec]
|
|
98
|
-
|
|
99
|
+
uses_uploaded_proto_graph: bool
|
|
100
|
+
def __init__(
|
|
101
|
+
self, specs: _Optional[_Mapping[str, SourceImageSpec]] = ..., uses_uploaded_proto_graph: bool = ...
|
|
102
|
+
) -> None: ...
|
|
99
103
|
|
|
100
104
|
class Deployment(_message.Message):
|
|
101
105
|
__slots__ = (
|
chalk/_version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "2.95.
|
|
1
|
+
__version__ = "2.95.9"
|
|
@@ -4,11 +4,26 @@ import contextlib
|
|
|
4
4
|
import io
|
|
5
5
|
import logging
|
|
6
6
|
import os
|
|
7
|
+
import queue
|
|
7
8
|
import threading
|
|
8
9
|
import typing
|
|
9
10
|
import uuid
|
|
10
|
-
from concurrent.futures import
|
|
11
|
-
from typing import
|
|
11
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
12
|
+
from typing import (
|
|
13
|
+
TYPE_CHECKING,
|
|
14
|
+
Any,
|
|
15
|
+
Callable,
|
|
16
|
+
Dict,
|
|
17
|
+
Iterable,
|
|
18
|
+
List,
|
|
19
|
+
Mapping,
|
|
20
|
+
NewType,
|
|
21
|
+
Optional,
|
|
22
|
+
Sequence,
|
|
23
|
+
Type,
|
|
24
|
+
Union,
|
|
25
|
+
cast,
|
|
26
|
+
)
|
|
12
27
|
|
|
13
28
|
import pyarrow as pa
|
|
14
29
|
import pyarrow.parquet as pq
|
|
@@ -27,9 +42,10 @@ from chalk.sql._internal.sql_source import (
|
|
|
27
42
|
from chalk.sql.finalized_query import FinalizedChalkQuery
|
|
28
43
|
from chalk.utils.df_utils import is_binary_like, read_parquet
|
|
29
44
|
from chalk.utils.environment_parsing import env_var_bool
|
|
30
|
-
from chalk.utils.log_with_context import get_logger
|
|
45
|
+
from chalk.utils.log_with_context import LABELS_KEY, get_logger, get_logging_context
|
|
31
46
|
from chalk.utils.missing_dependency import missing_dependency_exception
|
|
32
|
-
from chalk.utils.threading import DEFAULT_IO_EXECUTOR
|
|
47
|
+
from chalk.utils.threading import DEFAULT_IO_EXECUTOR, MultiSemaphore
|
|
48
|
+
from chalk.utils.tracing import safe_incr, safe_set_gauge
|
|
33
49
|
|
|
34
50
|
if TYPE_CHECKING:
|
|
35
51
|
from mypy_boto3_s3 import S3Client
|
|
@@ -40,6 +56,22 @@ if TYPE_CHECKING:
|
|
|
40
56
|
_logger = get_logger(__name__)
|
|
41
57
|
_public_logger = chalk.logging.chalk_logger
|
|
42
58
|
|
|
59
|
+
_WorkerId = NewType("_WorkerId", int)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _get_resolver_tags() -> list[str] | None:
|
|
63
|
+
"""Extract resolver_fqn from log context and return as tags list."""
|
|
64
|
+
try:
|
|
65
|
+
log_ctx = get_logging_context()
|
|
66
|
+
labels = log_ctx.get(LABELS_KEY, {})
|
|
67
|
+
resolver_fqn = labels.get("resolver_fqn")
|
|
68
|
+
if resolver_fqn:
|
|
69
|
+
return [f"resolver_fqn:{resolver_fqn}"]
|
|
70
|
+
except Exception:
|
|
71
|
+
# Don't fail if we can't get the resolver_fqn
|
|
72
|
+
pass
|
|
73
|
+
return None
|
|
74
|
+
|
|
43
75
|
|
|
44
76
|
def get_supported_redshift_unload_types() -> List[Type["TypeEngine"]]:
|
|
45
77
|
"""
|
|
@@ -287,36 +319,235 @@ class RedshiftSourceImpl(BaseSQLSource):
|
|
|
287
319
|
unload_destination=unload_destination,
|
|
288
320
|
columns_to_features=columns_to_features,
|
|
289
321
|
yield_empty_batches=query_execution_parameters.yield_empty_batches,
|
|
322
|
+
max_prefetch_size_bytes=query_execution_parameters.max_prefetch_size_bytes,
|
|
323
|
+
num_client_prefetch_threads=query_execution_parameters.num_client_prefetch_threads,
|
|
290
324
|
)
|
|
291
325
|
|
|
326
|
+
def _download_worker(
|
|
327
|
+
self,
|
|
328
|
+
file_handles: queue.Queue[str],
|
|
329
|
+
sem: MultiSemaphore | None,
|
|
330
|
+
pa_table_queue: queue.Queue[tuple[pa.Table, int] | _WorkerId],
|
|
331
|
+
worker_idx: _WorkerId,
|
|
332
|
+
columns_to_features: Callable[[Sequence[str]], Mapping[str, Feature]],
|
|
333
|
+
):
|
|
334
|
+
"""Worker thread that downloads files from S3 with memory control."""
|
|
335
|
+
assert self._s3_bucket is not None
|
|
336
|
+
try:
|
|
337
|
+
while True:
|
|
338
|
+
try:
|
|
339
|
+
filename = file_handles.get_nowait()
|
|
340
|
+
except queue.Empty:
|
|
341
|
+
break
|
|
342
|
+
|
|
343
|
+
# Estimate file size from S3 metadata if possible
|
|
344
|
+
weight: int | None = None
|
|
345
|
+
try:
|
|
346
|
+
with _boto_lock_ctx():
|
|
347
|
+
head_response = self._s3_client.head_object(Bucket=self._s3_bucket, Key=filename)
|
|
348
|
+
# Boto3 types indicate ContentLength is always int, but cast to Optional for defensive programming
|
|
349
|
+
content_length = cast(int | None, head_response.get("ContentLength"))
|
|
350
|
+
if content_length is not None:
|
|
351
|
+
# Estimate uncompressed size (parquet typically compresses 3-5x)
|
|
352
|
+
weight = content_length * 4
|
|
353
|
+
except Exception as e:
|
|
354
|
+
_logger.warning(f"Failed to get file size for {filename}, will estimate after download", exc_info=e)
|
|
355
|
+
|
|
356
|
+
# Acquire semaphore before downloading
|
|
357
|
+
if sem and weight is not None:
|
|
358
|
+
if weight > sem.initial_value:
|
|
359
|
+
# If the file is larger than the maximum size, truncate to max
|
|
360
|
+
weight = sem.initial_value
|
|
361
|
+
if weight > 0:
|
|
362
|
+
if not sem.acquire(weight):
|
|
363
|
+
raise RuntimeError("Failed to acquire semaphore for redshift download")
|
|
364
|
+
safe_set_gauge(
|
|
365
|
+
"chalk.redshift.remaining_prefetch_bytes", sem.get_value(), tags=_get_resolver_tags()
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
# Download and convert to table
|
|
369
|
+
tbl = _download_file_to_table(self._s3_client, self._s3_bucket, filename, columns_to_features)
|
|
370
|
+
|
|
371
|
+
# If we didn't have a weight estimate, use actual table size
|
|
372
|
+
if weight is None:
|
|
373
|
+
weight = tbl.nbytes
|
|
374
|
+
if sem and weight is not None and weight > 0:
|
|
375
|
+
if not sem.acquire(weight):
|
|
376
|
+
raise RuntimeError("Failed to acquire semaphore for redshift download")
|
|
377
|
+
safe_set_gauge(
|
|
378
|
+
"chalk.redshift.remaining_prefetch_bytes", sem.get_value(), tags=_get_resolver_tags()
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
# Ensure weight is always an int
|
|
382
|
+
final_weight: int = weight if weight is not None else 0
|
|
383
|
+
pa_table_queue.put((tbl, final_weight))
|
|
384
|
+
finally:
|
|
385
|
+
# Signal that this worker is done
|
|
386
|
+
pa_table_queue.put(worker_idx)
|
|
387
|
+
|
|
292
388
|
def _download_objs_async(
|
|
293
389
|
self,
|
|
294
390
|
unload_destination: str,
|
|
295
391
|
columns_to_features: Callable[[Sequence[str]], Mapping[str, Feature]],
|
|
296
392
|
yield_empty_batches: bool,
|
|
393
|
+
max_prefetch_size_bytes: int,
|
|
394
|
+
num_client_prefetch_threads: int,
|
|
297
395
|
) -> Iterable[pa.RecordBatch]:
|
|
396
|
+
"""Download objects from S3 with byte-bounded memory control."""
|
|
298
397
|
assert self._s3_bucket is not None
|
|
299
398
|
filenames = list(_list_files(self._s3_client, self._s3_bucket, unload_destination))
|
|
300
|
-
|
|
399
|
+
_public_logger.info(
|
|
400
|
+
f"Downloading parquet data partitioned into {len(filenames)} files "
|
|
401
|
+
+ f"(max_prefetch_bytes={max_prefetch_size_bytes}, threads={num_client_prefetch_threads})..."
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
if len(filenames) == 0:
|
|
405
|
+
if yield_empty_batches:
|
|
406
|
+
# Need to get schema somehow - return empty batch
|
|
407
|
+
# This matches the original behavior
|
|
408
|
+
schema: pa.Schema | None = None
|
|
409
|
+
if schema is not None:
|
|
410
|
+
yield pa.RecordBatch.from_pydict({k: [] for k in schema.names}, schema)
|
|
411
|
+
return
|
|
412
|
+
|
|
413
|
+
# Set up queues and semaphore for memory control
|
|
414
|
+
file_handles: queue.Queue[str] = queue.Queue()
|
|
301
415
|
for filename in filenames:
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
416
|
+
file_handles.put(filename)
|
|
417
|
+
|
|
418
|
+
max_weight = max_prefetch_size_bytes if max_prefetch_size_bytes > 0 else None
|
|
419
|
+
pa_table_queue: queue.Queue[tuple[pa.Table, int] | _WorkerId] = queue.Queue()
|
|
420
|
+
sem = None if max_weight is None else MultiSemaphore(max_weight)
|
|
421
|
+
|
|
422
|
+
# Start download workers
|
|
423
|
+
futures = {
|
|
424
|
+
_WorkerId(i): self._executor.submit(
|
|
425
|
+
self._download_worker,
|
|
426
|
+
file_handles,
|
|
427
|
+
sem,
|
|
428
|
+
pa_table_queue,
|
|
429
|
+
_WorkerId(i),
|
|
430
|
+
columns_to_features,
|
|
305
431
|
)
|
|
306
|
-
|
|
307
|
-
|
|
432
|
+
for i in range(num_client_prefetch_threads)
|
|
433
|
+
}
|
|
434
|
+
|
|
308
435
|
schema: pa.Schema | None = None
|
|
309
436
|
yielded = False
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
437
|
+
|
|
438
|
+
# Process downloaded tables as they become available
|
|
439
|
+
while len(futures) > 0:
|
|
440
|
+
x = pa_table_queue.get()
|
|
441
|
+
if isinstance(x, int):
|
|
442
|
+
# Worker finished - remove from futures and check for errors
|
|
443
|
+
futures.pop(x).result()
|
|
444
|
+
continue
|
|
445
|
+
|
|
446
|
+
tbl, weight = x
|
|
447
|
+
if schema is None:
|
|
316
448
|
schema = tbl.schema
|
|
317
|
-
|
|
449
|
+
|
|
450
|
+
try:
|
|
451
|
+
if len(tbl) > 0:
|
|
452
|
+
yield tbl.combine_chunks().to_batches()[0]
|
|
453
|
+
safe_incr("chalk.redshift.downloaded_bytes", tbl.nbytes or 0, tags=_get_resolver_tags())
|
|
454
|
+
safe_incr("chalk.redshift.downloaded_rows", tbl.num_rows or 0, tags=_get_resolver_tags())
|
|
455
|
+
yielded = True
|
|
456
|
+
finally:
|
|
457
|
+
# Release semaphore after yielding
|
|
458
|
+
if sem is not None and weight > 0:
|
|
459
|
+
sem.release(weight)
|
|
460
|
+
safe_set_gauge(
|
|
461
|
+
"chalk.redshift.remaining_prefetch_bytes", sem.get_value(), tags=_get_resolver_tags()
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
if not yielded and yield_empty_batches and schema is not None:
|
|
318
465
|
yield pa.RecordBatch.from_pydict({k: [] for k in schema.names}, schema)
|
|
319
466
|
|
|
467
|
+
def _download_worker_raw(
|
|
468
|
+
self,
|
|
469
|
+
file_handles: queue.Queue[str],
|
|
470
|
+
sem: MultiSemaphore | None,
|
|
471
|
+
pa_table_queue: queue.Queue[tuple[pa.Table, int] | _WorkerId],
|
|
472
|
+
worker_idx: _WorkerId,
|
|
473
|
+
expected_output_schema: pa.Schema,
|
|
474
|
+
):
|
|
475
|
+
"""Worker thread that downloads files from S3 with memory control for raw execution."""
|
|
476
|
+
import pyarrow.compute as pc
|
|
477
|
+
|
|
478
|
+
assert self._s3_bucket is not None
|
|
479
|
+
try:
|
|
480
|
+
while True:
|
|
481
|
+
try:
|
|
482
|
+
filename = file_handles.get_nowait()
|
|
483
|
+
except queue.Empty:
|
|
484
|
+
break
|
|
485
|
+
|
|
486
|
+
# Estimate file size from S3 metadata if possible
|
|
487
|
+
weight: int | None = None
|
|
488
|
+
try:
|
|
489
|
+
with _boto_lock_ctx():
|
|
490
|
+
head_response = self._s3_client.head_object(Bucket=self._s3_bucket, Key=filename)
|
|
491
|
+
# Boto3 types indicate ContentLength is always int, but cast to Optional for defensive programming
|
|
492
|
+
content_length = cast(int | None, head_response.get("ContentLength"))
|
|
493
|
+
if content_length is not None:
|
|
494
|
+
# Estimate uncompressed size (parquet typically compresses 3-5x)
|
|
495
|
+
weight = content_length * 4
|
|
496
|
+
except Exception as e:
|
|
497
|
+
_logger.warning(f"Failed to get file size for {filename}, will estimate after download", exc_info=e)
|
|
498
|
+
|
|
499
|
+
# Acquire semaphore before downloading
|
|
500
|
+
if sem and weight is not None:
|
|
501
|
+
if weight > sem.initial_value:
|
|
502
|
+
weight = sem.initial_value
|
|
503
|
+
if weight > 0:
|
|
504
|
+
if not sem.acquire(weight):
|
|
505
|
+
raise RuntimeError("Failed to acquire semaphore for redshift download")
|
|
506
|
+
safe_set_gauge(
|
|
507
|
+
"chalk.redshift.remaining_prefetch_bytes", sem.get_value(), tags=_get_resolver_tags()
|
|
508
|
+
)
|
|
509
|
+
|
|
510
|
+
# Download parquet file
|
|
511
|
+
buffer = io.BytesIO()
|
|
512
|
+
with _boto_lock_ctx():
|
|
513
|
+
self._s3_client.download_fileobj(Bucket=self._s3_bucket, Key=filename, Fileobj=buffer)
|
|
514
|
+
buffer.seek(0)
|
|
515
|
+
if env_var_bool("CHALK_REDSHIFT_POLARS_PARQUET"):
|
|
516
|
+
tbl = read_parquet(buffer, use_pyarrow=False).to_arrow()
|
|
517
|
+
else:
|
|
518
|
+
tbl = pq.read_table(buffer)
|
|
519
|
+
|
|
520
|
+
# If we didn't have a weight estimate, use actual table size
|
|
521
|
+
if weight is None:
|
|
522
|
+
weight = tbl.nbytes
|
|
523
|
+
if sem and weight is not None and weight > 0:
|
|
524
|
+
if not sem.acquire(weight):
|
|
525
|
+
raise RuntimeError("Failed to acquire semaphore for redshift download")
|
|
526
|
+
safe_set_gauge(
|
|
527
|
+
"chalk.redshift.remaining_prefetch_bytes", sem.get_value(), tags=_get_resolver_tags()
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
# Map columns to expected schema
|
|
531
|
+
arrays: list[pa.Array] = []
|
|
532
|
+
for field in expected_output_schema:
|
|
533
|
+
if field.name in tbl.column_names:
|
|
534
|
+
col = tbl.column(field.name)
|
|
535
|
+
# Cast to expected type if needed
|
|
536
|
+
if col.type != field.type:
|
|
537
|
+
col = pc.cast(col, field.type)
|
|
538
|
+
arrays.append(col)
|
|
539
|
+
else:
|
|
540
|
+
# Column not found, create null array
|
|
541
|
+
arrays.append(pa.nulls(len(tbl), field.type))
|
|
542
|
+
|
|
543
|
+
mapped_tbl = pa.Table.from_arrays(arrays, schema=expected_output_schema)
|
|
544
|
+
# Ensure weight is always an int
|
|
545
|
+
final_weight: int = weight if weight is not None else 0
|
|
546
|
+
pa_table_queue.put((mapped_tbl, final_weight))
|
|
547
|
+
finally:
|
|
548
|
+
# Signal that this worker is done
|
|
549
|
+
pa_table_queue.put(worker_idx)
|
|
550
|
+
|
|
320
551
|
def execute_query_efficient_raw(
|
|
321
552
|
self,
|
|
322
553
|
finalized_query: FinalizedChalkQuery,
|
|
@@ -325,8 +556,6 @@ class RedshiftSourceImpl(BaseSQLSource):
|
|
|
325
556
|
query_execution_parameters: QueryExecutionParameters,
|
|
326
557
|
) -> Iterable[pa.RecordBatch]:
|
|
327
558
|
"""Execute query efficiently for Redshift and return raw PyArrow RecordBatches."""
|
|
328
|
-
import pyarrow.compute as pc
|
|
329
|
-
|
|
330
559
|
temp_query_id = id(finalized_query)
|
|
331
560
|
_public_logger.debug(f"Executing RedShift query [{temp_query_id}]...")
|
|
332
561
|
|
|
@@ -387,47 +616,77 @@ class RedshiftSourceImpl(BaseSQLSource):
|
|
|
387
616
|
except Exception:
|
|
388
617
|
_logger.warning(f"Failed to drop temp table '{temp_table_name}'", exc_info=True)
|
|
389
618
|
|
|
390
|
-
# Download files
|
|
619
|
+
# Download files with memory control
|
|
391
620
|
assert unload_destination is not None
|
|
392
621
|
assert self._s3_bucket is not None
|
|
393
622
|
filenames = list(_list_files(self._s3_client, self._s3_bucket, unload_destination))
|
|
623
|
+
_public_logger.info(
|
|
624
|
+
f"Downloading {len(filenames)} parquet files from Redshift UNLOAD "
|
|
625
|
+
+ f"(max_prefetch_bytes={query_execution_parameters.max_prefetch_size_bytes}, "
|
|
626
|
+
+ f"threads={query_execution_parameters.num_client_prefetch_threads})..."
|
|
627
|
+
)
|
|
394
628
|
|
|
395
|
-
|
|
629
|
+
if len(filenames) == 0:
|
|
630
|
+
if query_execution_parameters.yield_empty_batches:
|
|
631
|
+
arrays = [pa.nulls(0, field.type) for field in expected_output_schema]
|
|
632
|
+
yield pa.RecordBatch.from_arrays(arrays, schema=expected_output_schema)
|
|
633
|
+
return
|
|
634
|
+
|
|
635
|
+
# Set up queues and semaphore for memory control
|
|
636
|
+
file_handles: queue.Queue[str] = queue.Queue()
|
|
396
637
|
for filename in filenames:
|
|
397
|
-
|
|
398
|
-
with _boto_lock_ctx():
|
|
399
|
-
self._s3_client.download_fileobj(Bucket=self._s3_bucket, Key=filename, Fileobj=buffer)
|
|
400
|
-
buffer.seek(0)
|
|
401
|
-
if env_var_bool("CHALK_REDSHIFT_POLARS_PARQUET"):
|
|
402
|
-
tbl = read_parquet(buffer, use_pyarrow=False).to_arrow()
|
|
403
|
-
else:
|
|
404
|
-
tbl = pq.read_table(buffer)
|
|
638
|
+
file_handles.put(filename)
|
|
405
639
|
|
|
406
|
-
|
|
407
|
-
|
|
640
|
+
max_weight = (
|
|
641
|
+
query_execution_parameters.max_prefetch_size_bytes
|
|
642
|
+
if query_execution_parameters.max_prefetch_size_bytes > 0
|
|
643
|
+
else None
|
|
644
|
+
)
|
|
645
|
+
pa_table_queue: queue.Queue[tuple[pa.Table, int] | _WorkerId] = queue.Queue()
|
|
646
|
+
sem = None if max_weight is None else MultiSemaphore(max_weight)
|
|
647
|
+
|
|
648
|
+
# Start download workers
|
|
649
|
+
futures = {
|
|
650
|
+
_WorkerId(i): self._executor.submit(
|
|
651
|
+
self._download_worker_raw,
|
|
652
|
+
file_handles,
|
|
653
|
+
sem,
|
|
654
|
+
pa_table_queue,
|
|
655
|
+
_WorkerId(i),
|
|
656
|
+
expected_output_schema,
|
|
657
|
+
)
|
|
658
|
+
for i in range(query_execution_parameters.num_client_prefetch_threads)
|
|
659
|
+
}
|
|
408
660
|
|
|
409
|
-
|
|
410
|
-
arrays: list[pa.Array] = []
|
|
411
|
-
for field in expected_output_schema:
|
|
412
|
-
if field.name in tbl.column_names:
|
|
413
|
-
col = tbl.column(field.name)
|
|
414
|
-
# Cast to expected type if needed
|
|
415
|
-
if col.type != field.type:
|
|
416
|
-
col = pc.cast(col, field.type)
|
|
417
|
-
arrays.append(col)
|
|
418
|
-
else:
|
|
419
|
-
# Column not found, create null array
|
|
420
|
-
arrays.append(pa.nulls(len(tbl), field.type))
|
|
661
|
+
yielded = False
|
|
421
662
|
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
663
|
+
# Process downloaded tables as they become available
|
|
664
|
+
while len(futures) > 0:
|
|
665
|
+
x = pa_table_queue.get()
|
|
666
|
+
if isinstance(x, int):
|
|
667
|
+
# Worker finished - remove from futures and check for errors
|
|
668
|
+
futures.pop(x).result()
|
|
669
|
+
continue
|
|
670
|
+
|
|
671
|
+
tbl, weight = x
|
|
672
|
+
|
|
673
|
+
try:
|
|
674
|
+
if len(tbl) > 0:
|
|
675
|
+
yield tbl.to_batches()[0]
|
|
676
|
+
safe_incr("chalk.redshift.downloaded_bytes", tbl.nbytes or 0, tags=_get_resolver_tags())
|
|
677
|
+
safe_incr("chalk.redshift.downloaded_rows", tbl.num_rows or 0, tags=_get_resolver_tags())
|
|
678
|
+
yielded = True
|
|
679
|
+
finally:
|
|
680
|
+
# Release semaphore after yielding
|
|
681
|
+
if sem is not None and weight > 0:
|
|
682
|
+
sem.release(weight)
|
|
683
|
+
safe_set_gauge(
|
|
684
|
+
"chalk.redshift.remaining_prefetch_bytes", sem.get_value(), tags=_get_resolver_tags()
|
|
685
|
+
)
|
|
425
686
|
|
|
426
687
|
if not yielded and query_execution_parameters.yield_empty_batches:
|
|
427
|
-
# Create empty batch with expected schema
|
|
428
688
|
arrays = [pa.nulls(0, field.type) for field in expected_output_schema]
|
|
429
|
-
|
|
430
|
-
yield batch
|
|
689
|
+
yield pa.RecordBatch.from_arrays(arrays, schema=expected_output_schema)
|
|
431
690
|
|
|
432
691
|
@classmethod
|
|
433
692
|
def register_sqlalchemy_compiler_overrides(cls):
|
chalk/utils/tracing.py
CHANGED
|
@@ -222,8 +222,8 @@ else:
|
|
|
222
222
|
if can_use_datadog_statsd:
|
|
223
223
|
from datadog.dogstatsd.base import statsd
|
|
224
224
|
|
|
225
|
-
def safe_set_gauge(gauge: str, value: int | float):
|
|
226
|
-
statsd.gauge(gauge, value)
|
|
225
|
+
def safe_set_gauge(gauge: str, value: int | float, tags: list[str] | None = None):
|
|
226
|
+
statsd.gauge(gauge, value, tags=tags)
|
|
227
227
|
|
|
228
228
|
def safe_incr(counter: str, value: int | float, tags: list[str] | None = None):
|
|
229
229
|
statsd.increment(counter, value, tags)
|
|
@@ -233,7 +233,7 @@ if can_use_datadog_statsd:
|
|
|
233
233
|
|
|
234
234
|
else:
|
|
235
235
|
|
|
236
|
-
def safe_set_gauge(gauge: str, value: int | float):
|
|
236
|
+
def safe_set_gauge(gauge: str, value: int | float, tags: list[str] | None = None):
|
|
237
237
|
pass
|
|
238
238
|
|
|
239
239
|
def safe_incr(counter: str, value: int | float, tags: list[str] | None = None):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: chalkpy
|
|
3
|
-
Version: 2.95.
|
|
3
|
+
Version: 2.95.9
|
|
4
4
|
Summary: Python SDK for Chalk
|
|
5
5
|
Author: Chalk AI, Inc.
|
|
6
6
|
Project-URL: Homepage, https://chalk.ai
|
|
@@ -279,6 +279,9 @@ Requires-Dist: python-json-logger<4.0.0,>=3.0.0; extra == "dynamodb"
|
|
|
279
279
|
Requires-Dist: s3fs; extra == "dynamodb"
|
|
280
280
|
Requires-Dist: sqlalchemy[asyncio]<2,>=1.4.26; extra == "dynamodb"
|
|
281
281
|
Requires-Dist: sqlglot<21.2.0,>=19.0.0; extra == "dynamodb"
|
|
282
|
+
Provides-Extra: tracing
|
|
283
|
+
Requires-Dist: opentelemetry-api>=1.29.0; extra == "tracing"
|
|
284
|
+
Requires-Dist: opentelemetry-sdk>=1.29.0; extra == "tracing"
|
|
282
285
|
Provides-Extra: all
|
|
283
286
|
Requires-Dist: PyAthena>=3.0.0; extra == "all"
|
|
284
287
|
Requires-Dist: adlfs; extra == "all"
|
|
@@ -303,6 +306,8 @@ Requires-Dist: google-cloud-bigquery<4,>=3.25.0; extra == "all"
|
|
|
303
306
|
Requires-Dist: google-cloud-storage; extra == "all"
|
|
304
307
|
Requires-Dist: httpx<0.28.0; extra == "all"
|
|
305
308
|
Requires-Dist: openai<1.53,>=1.3.2; extra == "all"
|
|
309
|
+
Requires-Dist: opentelemetry-api>=1.29.0; extra == "all"
|
|
310
|
+
Requires-Dist: opentelemetry-sdk>=1.29.0; extra == "all"
|
|
306
311
|
Requires-Dist: packaging; extra == "all"
|
|
307
312
|
Requires-Dist: polars[timezone]!=1.0,!=1.1,!=1.10,!=1.11,!=1.12,!=1.13,!=1.14,!=1.15,!=1.16,!=1.17,!=1.18,!=1.19,!=1.2,!=1.20,!=1.21,!=1.22,!=1.23,!=1.24,!=1.25,!=1.26,!=1.27,!=1.28,!=1.29,!=1.3,!=1.30,!=1.31,!=1.32,!=1.4,!=1.5,!=1.6,!=1.7,!=1.8,!=1.9,<1.33.1,>=0.17.2; extra == "all"
|
|
308
313
|
Requires-Dist: psycopg2<3,>=2.9.4; extra == "all"
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
chalk/__init__.py,sha256=vKsx9-cl5kImlVWGHVRYO6bweBm79NAzGs3l36u71wM,2657
|
|
2
|
-
chalk/_version.py,sha256=
|
|
2
|
+
chalk/_version.py,sha256=6_obn3nZ0gJ-Hi9EsdcLyZmK96lqygLULpjsd8h1yUE,23
|
|
3
3
|
chalk/cli.py,sha256=ckqqfOI-A2mT23-rnZzDMmblYj-2x1VBX8ebHlIEn9A,5873
|
|
4
4
|
chalk/importer.py,sha256=m4lMn1lSYj_euDq8CS7LYTBnek9JOcjGJf9-82dJHbA,64441
|
|
5
5
|
chalk/prompts.py,sha256=2H9UomLAamdfRTNUdKs9i3VTpiossuyRhntqsAXUhhg,16117
|
|
@@ -372,12 +372,12 @@ chalk/_gen/chalk/server/v1/datasets_pb2.py,sha256=SkpKF9ZfXqRNXmiEJEqq9wZ5WaG1bA
|
|
|
372
372
|
chalk/_gen/chalk/server/v1/datasets_pb2.pyi,sha256=_VQ0_QRHmtGsrB7C9c-oDa1aoekuH81iUJ0gz0TyQhM,13146
|
|
373
373
|
chalk/_gen/chalk/server/v1/datasets_pb2_grpc.py,sha256=zMXWrXv5E2QEe2h_0_Mxgrgdt0igqG4MYMxBczDh08Q,10471
|
|
374
374
|
chalk/_gen/chalk/server/v1/datasets_pb2_grpc.pyi,sha256=PN6r_scA4nb9IQyS0_sZEY4CqU7pv8NzMhbd1rFK6Q8,2408
|
|
375
|
-
chalk/_gen/chalk/server/v1/deploy_pb2.py,sha256=
|
|
376
|
-
chalk/_gen/chalk/server/v1/deploy_pb2.pyi,sha256=
|
|
375
|
+
chalk/_gen/chalk/server/v1/deploy_pb2.py,sha256=xbisfsg41Dy7rl7IYVu78ZNq9JuS7UfPQE8fP8VkH1E,12453
|
|
376
|
+
chalk/_gen/chalk/server/v1/deploy_pb2.pyi,sha256=oZF3fAG0irgekzU8w0Y59XWi0put1nkSFK9XN7xijfI,9123
|
|
377
377
|
chalk/_gen/chalk/server/v1/deploy_pb2_grpc.py,sha256=wFNzaR82eZuw7z9QoduGqFWhSzX5AGYVArV921vC12w,17571
|
|
378
378
|
chalk/_gen/chalk/server/v1/deploy_pb2_grpc.pyi,sha256=PR5hxgRzgwEjfpAhaHU5Sb9Szy7frvMtWkhoQMcLC3k,3794
|
|
379
|
-
chalk/_gen/chalk/server/v1/deployment_pb2.py,sha256=
|
|
380
|
-
chalk/_gen/chalk/server/v1/deployment_pb2.pyi,sha256=
|
|
379
|
+
chalk/_gen/chalk/server/v1/deployment_pb2.py,sha256=lFwr97levWISdF63NFnqLJlyBCG9jLyqbAHIlqKXd4U,8188
|
|
380
|
+
chalk/_gen/chalk/server/v1/deployment_pb2.pyi,sha256=bhEtPaLuICJ5A5Tp4x5RsFal__9J5w7y4HB3x4Ew3e8,10217
|
|
381
381
|
chalk/_gen/chalk/server/v1/deployment_pb2_grpc.py,sha256=VCyAf0skoHSgQPkD4n8rKQPYesinqHqN8TEwyu7XGUo,159
|
|
382
382
|
chalk/_gen/chalk/server/v1/deployment_pb2_grpc.pyi,sha256=ff2TSiLVnG6IVQcTGzb2DIH3XRSoAvAo_RMcvbMFyc0,76
|
|
383
383
|
chalk/_gen/chalk/server/v1/environment_pb2.py,sha256=lIzWEZm41ucqUAsZ0HPExzQPQIoQp21JNS0ZaEMKT8c,19746
|
|
@@ -760,7 +760,7 @@ chalk/sql/_internal/integrations/dynamodb.py,sha256=MHJryj6xJ9B72spofeTpCE86pC7Z
|
|
|
760
760
|
chalk/sql/_internal/integrations/mssql.py,sha256=gZfAb_b6eVpTUkcFMeORF9edbpsvpvvi-VW_kJXwH6I,11938
|
|
761
761
|
chalk/sql/_internal/integrations/mysql.py,sha256=RjIc0TaQceZrZ-q5AIGExbH5VHirbscZqXII1Ht7M0I,8696
|
|
762
762
|
chalk/sql/_internal/integrations/postgres.py,sha256=bwxwEeJYH5-A7S22YumukwX6aN6c_B_MOOnrmJuTZyI,29169
|
|
763
|
-
chalk/sql/_internal/integrations/redshift.py,sha256=
|
|
763
|
+
chalk/sql/_internal/integrations/redshift.py,sha256=7HDF6FaiusiPgk00kFXttIkowGNbuSsjO0sXxPwWw68,34119
|
|
764
764
|
chalk/sql/_internal/integrations/redshift_compiler_overrides.py,sha256=eKFeaCamTVfoHhdiBv1_3A6CxvFrv86Ovsa-vBBqjEo,5343
|
|
765
765
|
chalk/sql/_internal/integrations/snowflake.py,sha256=Y8kKSA3W02yxi144KSOeKtlud4ArsjLKNPvTG6XkkXI,35241
|
|
766
766
|
chalk/sql/_internal/integrations/snowflake_compiler_overrides.py,sha256=GbD3rdFWMpbht8dE-h9kcSsxideYHvVTGOYIfrczJJ8,6712
|
|
@@ -815,12 +815,12 @@ chalk/utils/storage_client.py,sha256=cK5KH8DVAt4Okk3X4jNMCkMiZgfUJE9Sq3zn4HkaBQo
|
|
|
815
815
|
chalk/utils/string.py,sha256=mHciu1FR1NdXiE0GjiCOOs_Q3JBVpaNnjUQPorE5cJg,4268
|
|
816
816
|
chalk/utils/stubgen.py,sha256=-mKIWFeiZojtfPwaTd9o3h4m4RvTmMTk6i-bI9JpU6c,21580
|
|
817
817
|
chalk/utils/threading.py,sha256=dacvfFCpDs9GDWdRrE2mmM3Ex5DKOIaj5rCYDTqGshk,5305
|
|
818
|
-
chalk/utils/tracing.py,sha256=
|
|
818
|
+
chalk/utils/tracing.py,sha256=NiiM-9dbuJhSCv6R1npR1uYNKWlkqTR6Ygm0Voi2NrY,13078
|
|
819
819
|
chalk/utils/weak_set_by_identity.py,sha256=VmikA_laYwFeOphCwXJIuyOIkrdlQe0bSzaXq7onoQw,953
|
|
820
820
|
chalk/utils/pydanticutil/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
821
821
|
chalk/utils/pydanticutil/pydantic_compat.py,sha256=O575lLYJ5GvZC4HMzR9yATxf9XwjC6NrDUXbNwZidlE,3031
|
|
822
|
-
chalkpy-2.95.
|
|
823
|
-
chalkpy-2.95.
|
|
824
|
-
chalkpy-2.95.
|
|
825
|
-
chalkpy-2.95.
|
|
826
|
-
chalkpy-2.95.
|
|
822
|
+
chalkpy-2.95.9.dist-info/METADATA,sha256=0BpdYdoYau7n41noSYhSy4QF99-4gktS6xEhabl9Lbs,27754
|
|
823
|
+
chalkpy-2.95.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
824
|
+
chalkpy-2.95.9.dist-info/entry_points.txt,sha256=Vg23sd8icwq-morJrljVFr-kQnMbm95rZfZj5wsZGis,42
|
|
825
|
+
chalkpy-2.95.9.dist-info/top_level.txt,sha256=1Q6_19IGYfNxSw50W8tYKEJ2t5HKQ3W9Wiw4ia5yg2c,6
|
|
826
|
+
chalkpy-2.95.9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|