awslabs.s3-tables-mcp-server 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,4 +15,4 @@
15
15
  # This file is part of the awslabs namespace.
16
16
  # It is intentionally minimal to support PEP 420 namespace packages.
17
17
 
18
- __version__ = '0.0.4'
18
+ __version__ = '0.0.6'
@@ -18,13 +18,8 @@ This module provides functionality for processing and analyzing uploaded files,
18
18
  particularly focusing on CSV file handling and import capabilities.
19
19
  """
20
20
 
21
- import io
22
- import os
23
21
  import pyarrow.csv as pc
24
- from ..utils import get_s3_client, pyiceberg_load_catalog
25
- from pyiceberg.exceptions import NoSuchTableError
26
- from typing import Dict
27
- from urllib.parse import urlparse
22
+ from .utils import import_file_to_table
28
23
 
29
24
 
30
25
  async def import_csv_to_table(
@@ -37,87 +32,19 @@ async def import_csv_to_table(
37
32
  catalog_name: str = 's3tablescatalog',
38
33
  rest_signing_name: str = 's3tables',
39
34
  rest_sigv4_enabled: str = 'true',
40
- ) -> Dict:
41
- """Import data from a CSV file into an S3 table.
42
-
43
- This function reads data from a CSV file stored in S3 and imports it into an existing S3 table.
44
- If the table doesn't exist, it will be created using the schema inferred from the CSV file.
45
-
46
- Args:
47
- warehouse: Warehouse string for Iceberg catalog
48
- region: AWS region for S3Tables/Iceberg REST endpoint
49
- namespace: The namespace containing the table
50
- table_name: The name of the table to import data into
51
- s3_url: The S3 URL of the CSV file (format: s3://bucket-name/key)
52
- uri: REST URI for Iceberg catalog
53
- catalog_name: Catalog name
54
- rest_signing_name: REST signing name
55
- rest_sigv4_enabled: Enable SigV4 signing
56
-
57
- Returns:
58
- A dictionary containing:
59
- - status: 'success' or 'error'
60
- - message: Success message or error details
61
- - rows_processed: Number of rows processed (on success)
62
- - file_processed: Name of the processed file
63
- - table_created: Boolean indicating if a new table was created (on success)
64
- """
65
- # Parse S3 URL
66
- parsed = urlparse(s3_url)
67
- bucket = parsed.netloc
68
- key = parsed.path.lstrip('/')
69
-
70
- try:
71
- # Load Iceberg catalog
72
- catalog = pyiceberg_load_catalog(
73
- catalog_name,
74
- warehouse,
75
- uri,
76
- region,
77
- rest_signing_name,
78
- rest_sigv4_enabled,
79
- )
80
-
81
- # Get S3 client and read the CSV file to infer schema
82
- s3_client = get_s3_client()
83
- response = s3_client.get_object(Bucket=bucket, Key=key)
84
- csv_data = response['Body'].read()
85
-
86
- # Read CSV file into PyArrow Table to infer schema
87
- # Convert bytes to file-like object for PyArrow
88
- csv_buffer = io.BytesIO(csv_data)
89
- csv_table = pc.read_csv(csv_buffer)
90
- csv_schema = csv_table.schema
91
-
92
- table_created = False
93
- try:
94
- # Try to load existing table
95
- table = catalog.load_table(f'{namespace}.{table_name}')
96
- except NoSuchTableError:
97
- # Table doesn't exist, create it using the CSV schema
98
- try:
99
- table = catalog.create_table(
100
- identifier=f'{namespace}.{table_name}',
101
- schema=csv_schema,
102
- )
103
- table_created = True
104
- except Exception as create_error:
105
- return {
106
- 'status': 'error',
107
- 'error': f'Failed to create table: {str(create_error)}',
108
- }
109
-
110
- # Append data to Iceberg table
111
- table.append(csv_table)
112
-
113
- return {
114
- 'status': 'success',
115
- 'message': f'Successfully imported {csv_table.num_rows} rows{" and created new table" if table_created else ""}',
116
- 'rows_processed': csv_table.num_rows,
117
- 'file_processed': os.path.basename(key),
118
- 'table_created': table_created,
119
- 'table_uuid': table.metadata.table_uuid,
120
- }
121
-
122
- except Exception as e:
123
- return {'status': 'error', 'error': str(e)}
35
+ preserve_case: bool = False,
36
+ ):
37
+ """Import a CSV file into an S3 table using PyArrow."""
38
+ return await import_file_to_table(
39
+ warehouse=warehouse,
40
+ region=region,
41
+ namespace=namespace,
42
+ table_name=table_name,
43
+ s3_url=s3_url,
44
+ uri=uri,
45
+ create_pyarrow_table=pc.read_csv,
46
+ catalog_name=catalog_name,
47
+ rest_signing_name=rest_signing_name,
48
+ rest_sigv4_enabled=rest_sigv4_enabled,
49
+ preserve_case=preserve_case,
50
+ )
@@ -13,10 +13,7 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import pyarrow.parquet as pq
16
- from awslabs.s3_tables_mcp_server.utils import get_s3_client, pyiceberg_load_catalog
17
- from io import BytesIO
18
- from pyiceberg.exceptions import NoSuchTableError
19
- from typing import Dict
16
+ from .utils import import_file_to_table
20
17
 
21
18
 
22
19
  async def import_parquet_to_table(
@@ -29,88 +26,19 @@ async def import_parquet_to_table(
29
26
  catalog_name: str = 's3tablescatalog',
30
27
  rest_signing_name: str = 's3tables',
31
28
  rest_sigv4_enabled: str = 'true',
32
- ) -> Dict:
33
- """Import data from a Parquet file into an S3 table.
34
-
35
- This function reads data from a Parquet file stored in S3 and imports it into an existing Iceberg table.
36
- If the table doesn't exist, it will be created using the schema from the Parquet file.
37
-
38
- Args:
39
- warehouse: Warehouse string for Iceberg catalog
40
- region: AWS region for S3Tables/Iceberg REST endpoint
41
- namespace: The namespace containing the table
42
- table_name: The name of the table to import data into
43
- s3_url: The S3 URL of the Parquet file
44
- uri: REST URI for Iceberg catalog
45
- catalog_name: Catalog name
46
- rest_signing_name: REST signing name
47
- rest_sigv4_enabled: Enable SigV4 signing
48
-
49
- Returns:
50
- A dictionary containing:
51
- - status: 'success' or 'error'
52
- - message: Success message or error details
53
- - rows_processed: Number of rows processed (on success)
54
- - file_processed: Name of the processed file
55
- - table_created: Boolean indicating if a new table was created (on success)
56
- """
57
- import os
58
- from urllib.parse import urlparse
59
-
60
- # Parse S3 URL
61
- parsed = urlparse(s3_url)
62
- bucket = parsed.netloc
63
- key = parsed.path.lstrip('/')
64
-
65
- try:
66
- # Load Iceberg catalog
67
- catalog = pyiceberg_load_catalog(
68
- catalog_name,
69
- warehouse,
70
- uri,
71
- region,
72
- rest_signing_name,
73
- rest_sigv4_enabled,
74
- )
75
-
76
- # Get S3 client and read the Parquet file first to get the schema
77
- s3_client = get_s3_client()
78
- response = s3_client.get_object(Bucket=bucket, Key=key)
79
- parquet_data = BytesIO(response['Body'].read())
80
-
81
- # Read Parquet file into PyArrow Table
82
- parquet_table = pq.read_table(parquet_data)
83
- parquet_schema = parquet_table.schema
84
-
85
- table_created = False
86
- try:
87
- # Try to load existing table
88
- table = catalog.load_table(f'{namespace}.{table_name}')
89
- except NoSuchTableError:
90
- # Table doesn't exist, create it using the Parquet schema
91
- try:
92
- table = catalog.create_table(
93
- identifier=f'{namespace}.{table_name}',
94
- schema=parquet_schema,
95
- )
96
- table_created = True
97
- except Exception as create_error:
98
- return {
99
- 'status': 'error',
100
- 'error': f'Failed to create table: {str(create_error)}',
101
- }
102
-
103
- # Append data to Iceberg table
104
- table.append(parquet_table)
105
-
106
- return {
107
- 'status': 'success',
108
- 'message': f'Successfully imported {parquet_table.num_rows} rows{" and created new table" if table_created else ""}',
109
- 'rows_processed': parquet_table.num_rows,
110
- 'file_processed': os.path.basename(key),
111
- 'table_created': table_created,
112
- 'table_uuid': table.metadata.table_uuid,
113
- }
114
-
115
- except Exception as e:
116
- return {'status': 'error', 'error': str(e)}
29
+ preserve_case: bool = False,
30
+ ):
31
+ """Import a Parquet file into an S3 table using PyArrow."""
32
+ return await import_file_to_table(
33
+ warehouse=warehouse,
34
+ region=region,
35
+ namespace=namespace,
36
+ table_name=table_name,
37
+ s3_url=s3_url,
38
+ uri=uri,
39
+ create_pyarrow_table=pq.read_table,
40
+ catalog_name=catalog_name,
41
+ rest_signing_name=rest_signing_name,
42
+ rest_sigv4_enabled=rest_sigv4_enabled,
43
+ preserve_case=preserve_case,
44
+ )
@@ -0,0 +1,157 @@
1
+ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """AWS S3 Tables MCP Server file processing utilities.
16
+
17
+ This module provides utility functions for file processing operations,
18
+ particularly focusing on column name conversion and schema transformation.
19
+ """
20
+
21
+ import os
22
+ import pyarrow as pa
23
+ from ..utils import get_s3_client, pyiceberg_load_catalog
24
+ from io import BytesIO
25
+ from pydantic.alias_generators import to_snake
26
+ from pyiceberg.exceptions import NoSuchTableError
27
+ from typing import Any, Callable, Dict
28
+ from urllib.parse import urlparse
29
+
30
+
31
+ def convert_column_names_to_snake_case(schema: pa.Schema) -> pa.Schema:
32
+ """Convert column names in PyArrow schema to snake_case.
33
+
34
+ Args:
35
+ schema: PyArrow schema with original column names
36
+
37
+ Returns:
38
+ PyArrow schema with converted column names
39
+
40
+ Raises:
41
+ ValueError: If duplicate column names exist after conversion
42
+ """
43
+ # Extract original column names
44
+ original_names = schema.names
45
+
46
+ # Convert each column name to snake_case
47
+ converted_names = [to_snake(name) for name in original_names]
48
+
49
+ # Check for duplicates after conversion using set and len
50
+ if len(set(converted_names)) != len(converted_names):
51
+ raise ValueError(
52
+ f'Duplicate column names after case conversion. '
53
+ f'Original names: {original_names}. Converted names: {converted_names}'
54
+ )
55
+
56
+ # Create new schema with converted column names
57
+ new_fields = []
58
+ for i, field in enumerate(schema):
59
+ new_field = pa.field(
60
+ converted_names[i], field.type, nullable=field.nullable, metadata=field.metadata
61
+ )
62
+ new_fields.append(new_field)
63
+
64
+ return pa.schema(new_fields, metadata=schema.metadata)
65
+
66
+
67
+ async def import_file_to_table(
68
+ warehouse: str,
69
+ region: str,
70
+ namespace: str,
71
+ table_name: str,
72
+ s3_url: str,
73
+ uri: str,
74
+ create_pyarrow_table: Callable[[Any], pa.Table],
75
+ catalog_name: str = 's3tablescatalog',
76
+ rest_signing_name: str = 's3tables',
77
+ rest_sigv4_enabled: str = 'true',
78
+ preserve_case: bool = False,
79
+ ) -> Dict:
80
+ """Import data from a file (CSV, Parquet, etc.) into an S3 table using a provided PyArrow table creation function."""
81
+ # Parse S3 URL
82
+ parsed = urlparse(s3_url)
83
+ bucket = parsed.netloc
84
+ key = parsed.path.lstrip('/')
85
+
86
+ try:
87
+ # Load Iceberg catalog
88
+ catalog = pyiceberg_load_catalog(
89
+ catalog_name,
90
+ warehouse,
91
+ uri,
92
+ region,
93
+ rest_signing_name,
94
+ rest_sigv4_enabled,
95
+ )
96
+
97
+ # Get S3 client and read the file
98
+ s3_client = get_s3_client()
99
+ response = s3_client.get_object(Bucket=bucket, Key=key)
100
+ file_bytes = response['Body'].read()
101
+
102
+ # Create PyArrow Table and Schema (file-like interface)
103
+ file_like = BytesIO(file_bytes)
104
+ pyarrow_table = create_pyarrow_table(file_like)
105
+ pyarrow_schema = pyarrow_table.schema
106
+
107
+ # Convert column names to snake_case unless preserve_case is True
108
+ columns_converted = False
109
+ if not preserve_case:
110
+ try:
111
+ pyarrow_schema = convert_column_names_to_snake_case(pyarrow_schema)
112
+ pyarrow_table = pyarrow_table.rename_columns(pyarrow_schema.names)
113
+ columns_converted = True
114
+ except Exception as conv_err:
115
+ return {
116
+ 'status': 'error',
117
+ 'error': f'Column name conversion failed: {str(conv_err)}',
118
+ }
119
+
120
+ table_created = False
121
+ try:
122
+ # Try to load existing table
123
+ table = catalog.load_table(f'{namespace}.{table_name}')
124
+ except NoSuchTableError:
125
+ # Table doesn't exist, create it using the schema
126
+ try:
127
+ table = catalog.create_table(
128
+ identifier=f'{namespace}.{table_name}',
129
+ schema=pyarrow_schema,
130
+ )
131
+ table_created = True
132
+ except Exception as create_error:
133
+ return {
134
+ 'status': 'error',
135
+ 'error': f'Failed to create table: {str(create_error)}',
136
+ }
137
+
138
+ # Append data to Iceberg table
139
+ table.append(pyarrow_table)
140
+
141
+ # Build message with warnings if applicable
142
+ message = f'Successfully imported {pyarrow_table.num_rows} rows{" and created new table" if table_created else ""}'
143
+ if columns_converted:
144
+ message += '. WARNING: Column names were converted to snake_case format. To preserve the original case, set preserve_case to True.'
145
+
146
+ return {
147
+ 'status': 'success',
148
+ 'message': message,
149
+ 'rows_processed': pyarrow_table.num_rows,
150
+ 'file_processed': os.path.basename(key),
151
+ 'table_created': table_created,
152
+ 'table_uuid': table.metadata.table_uuid,
153
+ 'columns': pyarrow_schema.names,
154
+ }
155
+
156
+ except Exception as e:
157
+ return {'status': 'error', 'error': str(e)}
@@ -85,7 +85,6 @@ class S3TablesMCPServer(FastMCP):
85
85
  app = S3TablesMCPServer(
86
86
  name='s3-tables-server',
87
87
  instructions='A Model Context Protocol (MCP) server that enables programmatic access to AWS S3 Tables. This server provides a comprehensive interface for creating, managing, and interacting with S3-based table storage, supporting operations for table buckets, namespaces, and individual S3 tables. It integrates with Amazon Athena for SQL query execution, allowing both read and write operations on your S3 Tables data.',
88
- version=__version__,
89
88
  )
90
89
 
91
90
 
@@ -589,6 +588,9 @@ async def import_csv_to_table(
589
588
  str, Field('s3tables', description='REST signing name')
590
589
  ] = 's3tables',
591
590
  rest_sigv4_enabled: Annotated[str, Field('true', description='Enable SigV4 signing')] = 'true',
591
+ preserve_case: Annotated[
592
+ bool, Field(..., description='Preserve case of column names')
593
+ ] = False,
592
594
  ) -> dict:
593
595
  """Import data from a CSV file into an S3 table.
594
596
 
@@ -596,6 +598,7 @@ async def import_csv_to_table(
596
598
  If the table doesn't exist, it will be created with a schema inferred from the CSV file.
597
599
  If the table exists, the CSV file schema must be compatible with the table's schema.
598
600
  The tool will validate the schema before attempting to import the data.
601
+ If preserve_case is True, the column names will not be converted to snake_case. Otherwise, the column names will be converted to snake_case.
599
602
 
600
603
  Returns error dictionary with status and error message if:
601
604
  - URL is not a valid S3 URL
@@ -615,6 +618,7 @@ async def import_csv_to_table(
615
618
  catalog_name: 's3tablescatalog'
616
619
  rest_signing_name: 's3tables'
617
620
  rest_sigv4_enabled: 'true'
621
+ preserve_case: False
618
622
 
619
623
  Permissions:
620
624
  You must have:
@@ -634,6 +638,7 @@ async def import_csv_to_table(
634
638
  catalog_name=catalog_name,
635
639
  rest_signing_name=rest_signing_name,
636
640
  rest_sigv4_enabled=rest_sigv4_enabled,
641
+ preserve_case=preserve_case,
637
642
  )
638
643
 
639
644
 
@@ -656,6 +661,9 @@ async def import_parquet_to_table(
656
661
  str, Field('s3tables', description='REST signing name')
657
662
  ] = 's3tables',
658
663
  rest_sigv4_enabled: Annotated[str, Field('true', description='Enable SigV4 signing')] = 'true',
664
+ preserve_case: Annotated[
665
+ bool, Field(..., description='Preserve case of column names')
666
+ ] = False,
659
667
  ) -> dict:
660
668
  """Import data from a Parquet file into an S3 table.
661
669
 
@@ -663,6 +671,7 @@ async def import_parquet_to_table(
663
671
  If the table doesn't exist, it will be created with a schema inferred from the Parquet file.
664
672
  If the table exists, the Parquet file schema must be compatible with the table's schema.
665
673
  The tool will validate the schema before attempting to import the data.
674
+ If preserve_case is True, the column names will not be converted to snake_case. Otherwise, the column names will be converted to snake_case.
666
675
 
667
676
  Returns error dictionary with status and error message if:
668
677
  - URL is not a valid S3 URL
@@ -688,6 +697,7 @@ async def import_parquet_to_table(
688
697
  catalog_name: 's3tablescatalog'
689
698
  rest_signing_name: 's3tables'
690
699
  rest_sigv4_enabled: 'true'
700
+ preserve_case: False
691
701
 
692
702
  Permissions:
693
703
  You must have:
@@ -708,6 +718,7 @@ async def import_parquet_to_table(
708
718
  catalog_name=catalog_name,
709
719
  rest_signing_name=rest_signing_name,
710
720
  rest_sigv4_enabled=rest_sigv4_enabled,
721
+ preserve_case=preserve_case,
711
722
  )
712
723
 
713
724
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: awslabs.s3-tables-mcp-server
3
- Version: 0.0.4
3
+ Version: 0.0.6
4
4
  Summary: An AWS Labs Model Context Protocol (MCP) server for awslabs.s3-tables-mcp-server
5
5
  Project-URL: homepage, https://awslabs.github.io/mcp/
6
6
  Project-URL: docs, https://awslabs.github.io/mcp/servers/s3-tables-mcp-server/
@@ -1,23 +1,24 @@
1
1
  awslabs/__init__.py,sha256=BHp8_uaBohApyLlmVWvYVe5bSrH59FvLJ5cNTigMV_8,644
2
- awslabs/s3_tables_mcp_server/__init__.py,sha256=xRlx7sZhHIEshTK2wTt_3PvKa2tjm26ZydI5L2wYK0c,754
2
+ awslabs/s3_tables_mcp_server/__init__.py,sha256=HVzYSyvrA8Lk4lTr3Q6Jp71bD9RIj6uMoV_IWpbDRis,754
3
3
  awslabs/s3_tables_mcp_server/constants.py,sha256=qCWY9A9PAQXdVz-anO26zbQ72Dp79nGM7xeLR062a_o,4971
4
4
  awslabs/s3_tables_mcp_server/database.py,sha256=YorxcSx-9typfQ5W_LzwNPZkP47u__QSLJlp0fBsZLg,3851
5
5
  awslabs/s3_tables_mcp_server/models.py,sha256=zWTFJLBhIZRLEgOCTyNcGvbItxqYbFJKH6se1EzXDjY,8097
6
6
  awslabs/s3_tables_mcp_server/namespaces.py,sha256=KZqxJiEnlpxkqvbfygezbr0szwyDP2O0J6osyiPUzwg,2071
7
7
  awslabs/s3_tables_mcp_server/resources.py,sha256=PXZo0sTVn34tXJ4mlw_OS90p12SNoLZs4Re0gV815wk,8281
8
8
  awslabs/s3_tables_mcp_server/s3_operations.py,sha256=Zq3oe-uHuKbW87b_WQyM-6HZ0_ikbgiagb2SVesltdg,1656
9
- awslabs/s3_tables_mcp_server/server.py,sha256=cvXDTZuK1sGpYfjLbF6iLGe49BSA0yx4rSp73UEBcvE,32008
9
+ awslabs/s3_tables_mcp_server/server.py,sha256=0UQ1pDFGx43-7ck0CphrK6P5XBFhKhFcn_JrCWdTLrI,32635
10
10
  awslabs/s3_tables_mcp_server/table_buckets.py,sha256=JHmpB_P9h0Hz5Uis25_GPTD1G-mIODVwjaswwIGyCS4,4471
11
11
  awslabs/s3_tables_mcp_server/tables.py,sha256=ITnRDHHrtRWLsRhff4TP4B7gGT_jRXy994oxK3x10a4,10143
12
12
  awslabs/s3_tables_mcp_server/utils.py,sha256=SReyS3KsdikI9ycL5RsvtVI7MiRnA1W9bTiXGKf1lHc,4517
13
13
  awslabs/s3_tables_mcp_server/engines/__init__.py,sha256=O4wlFva3THWmjfaXfJAwi29mxJSKIhM0jcebVfd3S5U,615
14
14
  awslabs/s3_tables_mcp_server/engines/pyiceberg.py,sha256=wzkySQZgx7L8Fn2Oqk8Yz4V-hQZDUempd8q0IwvCY_4,5784
15
15
  awslabs/s3_tables_mcp_server/file_processor/__init__.py,sha256=8PeggFRY3ZKBdxcFPEqSSHkSJBZ57eOs-z0fqkMHn9E,978
16
- awslabs/s3_tables_mcp_server/file_processor/csv.py,sha256=Sngc5mfJDLxQaINBUJLBn5OLc842rv9FqqcJ1upK6iw,4406
17
- awslabs/s3_tables_mcp_server/file_processor/parquet.py,sha256=Lr7mtqsK9jqlWokQv74dgdEgYmNKlCJ869yNNMrm69o,4189
18
- awslabs_s3_tables_mcp_server-0.0.4.dist-info/METADATA,sha256=KrjJ9PUydW8GhMHSlFuIH_HOH-fJBhiMDa0qK_smSdo,11511
19
- awslabs_s3_tables_mcp_server-0.0.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
20
- awslabs_s3_tables_mcp_server-0.0.4.dist-info/entry_points.txt,sha256=WRA45Bi2dVY5hskxkka_e7BAGRqG1KiW3ImTBnHSyLs,90
21
- awslabs_s3_tables_mcp_server-0.0.4.dist-info/licenses/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
22
- awslabs_s3_tables_mcp_server-0.0.4.dist-info/licenses/NOTICE,sha256=jm-1A_8i-wl7KYs2Ynj2A29vXWJCMKLHmGfy4P_B51Y,96
23
- awslabs_s3_tables_mcp_server-0.0.4.dist-info/RECORD,,
16
+ awslabs/s3_tables_mcp_server/file_processor/csv.py,sha256=1JfFFKbnK1qvDvpOKWoXIv9zca8MRlPXk2Phd7-GgIU,1650
17
+ awslabs/s3_tables_mcp_server/file_processor/parquet.py,sha256=TA9HmVPAnwOc3hv3zsL7J7sRd8gMINGFp5_r1eX7uUE,1458
18
+ awslabs/s3_tables_mcp_server/file_processor/utils.py,sha256=MkNE0c1tlR_bs5ZeCTqAuRCLQWOc3mJqnJxATbGwoEc,5613
19
+ awslabs_s3_tables_mcp_server-0.0.6.dist-info/METADATA,sha256=hSSFHo6RJRQi8Ho8mFoT78fmtCRBTXfpP2OJrKBK72w,11511
20
+ awslabs_s3_tables_mcp_server-0.0.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
21
+ awslabs_s3_tables_mcp_server-0.0.6.dist-info/entry_points.txt,sha256=WRA45Bi2dVY5hskxkka_e7BAGRqG1KiW3ImTBnHSyLs,90
22
+ awslabs_s3_tables_mcp_server-0.0.6.dist-info/licenses/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
23
+ awslabs_s3_tables_mcp_server-0.0.6.dist-info/licenses/NOTICE,sha256=jm-1A_8i-wl7KYs2Ynj2A29vXWJCMKLHmGfy4P_B51Y,96
24
+ awslabs_s3_tables_mcp_server-0.0.6.dist-info/RECORD,,