datablade 0.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- datablade-0.0.0/LICENSE +21 -0
- datablade-0.0.0/PKG-INFO +13 -0
- datablade-0.0.0/pyproject.toml +3 -0
- datablade-0.0.0/setup.cfg +4 -0
- datablade-0.0.0/setup.py +12 -0
- datablade-0.0.0/src/datablade/__init__.py +1 -0
- datablade-0.0.0/src/datablade/core/__init__.py +7 -0
- datablade-0.0.0/src/datablade/core/frames.py +236 -0
- datablade-0.0.0/src/datablade/core/json.py +10 -0
- datablade-0.0.0/src/datablade/core/lists.py +10 -0
- datablade-0.0.0/src/datablade/core/messages.py +11 -0
- datablade-0.0.0/src/datablade/core/strings.py +43 -0
- datablade-0.0.0/src/datablade/core/zip.py +24 -0
- datablade-0.0.0/src/datablade.egg-info/PKG-INFO +13 -0
- datablade-0.0.0/src/datablade.egg-info/SOURCES.txt +16 -0
- datablade-0.0.0/src/datablade.egg-info/dependency_links.txt +1 -0
- datablade-0.0.0/src/datablade.egg-info/requires.txt +5 -0
- datablade-0.0.0/src/datablade.egg-info/top_level.txt +1 -0
datablade-0.0.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 Brent Carpenetti
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
datablade-0.0.0/PKG-INFO
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: datablade
|
|
3
|
+
Version: 0.0.0
|
|
4
|
+
Summary: datablade is a suite of functions to provide standard syntax across projects.
|
|
5
|
+
Author: Brent Carpenetti
|
|
6
|
+
Author-email: brentcarpenetti@gmail.com
|
|
7
|
+
License: MIT
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Requires-Dist: pandas
|
|
10
|
+
Requires-Dist: pyarrow
|
|
11
|
+
Requires-Dist: numpy
|
|
12
|
+
Requires-Dist: openpyxl
|
|
13
|
+
Requires-Dist: requests
|
datablade-0.0.0/setup.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from setuptools import setup, find_packages
|
|
2
|
+
desc = """datablade is a suite of functions to provide standard syntax across projects."""
|
|
3
|
+
|
|
4
|
+
setup(name='datablade',version='0.0.0',
|
|
5
|
+
packages=find_packages(where="src"),
|
|
6
|
+
package_dir={'': 'src'},
|
|
7
|
+
install_requires=['pandas','pyarrow','numpy','openpyxl','requests'],
|
|
8
|
+
include_package_data=True,
|
|
9
|
+
description=desc,
|
|
10
|
+
author='Brent Carpenetti',
|
|
11
|
+
author_email='brentcarpenetti@gmail.com',
|
|
12
|
+
license='MIT',)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
#empty
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import os, re
|
|
2
|
+
|
|
3
|
+
def find_python_files(path):
|
|
4
|
+
return [one_file_name.replace('.py','') for one_file_name in os.listdir(os.path.abspath(path)) if one_file_name != '__init__.py' and re.match(r'.*\.py$',one_file_name) is not None]
|
|
5
|
+
|
|
6
|
+
for each_file in find_python_files(path=os.path.dirname(__file__)):
|
|
7
|
+
exec('from .'+each_file+' import *')
|
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import pyarrow as pa
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from .messages import print_verbose
|
|
6
|
+
from .strings import sql_quotename
|
|
7
|
+
|
|
8
|
+
def try_cast_string_columns_to_numeric(df: pd.DataFrame=None, convert_partial: bool=False, verbose: bool=False) -> pd.DataFrame|None:
|
|
9
|
+
"""
|
|
10
|
+
Attempt to cast DataFrame string columns to numeric values where possible.
|
|
11
|
+
|
|
12
|
+
Parameters:
|
|
13
|
+
df (pd.DataFrame): The DataFrame to process.
|
|
14
|
+
convert_partial (bool): If True, columns with some values convertible to numeric types
|
|
15
|
+
will be converted to numeric types with NaNs where conversion failed.
|
|
16
|
+
If False, only columns where all values can be converted will be converted.
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
pd.DataFrame: DataFrame with string columns converted to numeric types where possible.
|
|
20
|
+
"""
|
|
21
|
+
if df is None:
|
|
22
|
+
print_verbose("No DataFrame provided; exiting try_cast_string_columns_to_numeric.", verbose)
|
|
23
|
+
exit # Exit the function if no DataFrame is provided
|
|
24
|
+
|
|
25
|
+
for col in df.columns:
|
|
26
|
+
if df[col].dtype == 'object':
|
|
27
|
+
converted = pd.to_numeric(df[col], errors='coerce')
|
|
28
|
+
has_nan = converted.isnull().any()
|
|
29
|
+
if not has_nan:
|
|
30
|
+
df[col] = converted
|
|
31
|
+
print_verbose(f"Column '{col}' successfully converted to numeric.", verbose)
|
|
32
|
+
else:
|
|
33
|
+
if convert_partial:
|
|
34
|
+
df[col] = converted
|
|
35
|
+
print_verbose(f"Column '{col}' partially converted to numeric with NaNs where conversion failed.", verbose)
|
|
36
|
+
else:
|
|
37
|
+
print_verbose(f"Column '{col}' could not be fully converted to numeric; leaving as is.", verbose)
|
|
38
|
+
return df
|
|
39
|
+
|
|
40
|
+
def clean_dataframe_columns(df: pd.DataFrame=None, verbose: bool=False) -> pd.DataFrame|None:
|
|
41
|
+
"""
|
|
42
|
+
Clean the DataFrame columns by:
|
|
43
|
+
- Flattening MultiIndex columns
|
|
44
|
+
- Converting non-string column names to strings
|
|
45
|
+
- Removing duplicate columns, keeping the first occurrence
|
|
46
|
+
|
|
47
|
+
Parameters:
|
|
48
|
+
df (pd.DataFrame): The DataFrame to clean.
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
pd.DataFrame: The cleaned DataFrame.
|
|
52
|
+
"""
|
|
53
|
+
if df is None:
|
|
54
|
+
print_verbose("No DataFrame provided; exiting clean_dataframe_columns.", verbose)
|
|
55
|
+
exit
|
|
56
|
+
# Step 1: Flatten MultiIndex columns
|
|
57
|
+
if isinstance(df.columns, pd.MultiIndex):
|
|
58
|
+
df.columns = ['_'.join(map(str, col)).strip() for col in df.columns.values]
|
|
59
|
+
print_verbose("Flattened MultiIndex columns.", verbose)
|
|
60
|
+
|
|
61
|
+
# Step 2: Convert non-string column names to strings
|
|
62
|
+
df.columns = df.columns.map(str)
|
|
63
|
+
print_verbose("Converted column names to strings.", verbose)
|
|
64
|
+
|
|
65
|
+
# Step 3: Remove duplicate columns, keeping the first occurrence
|
|
66
|
+
duplicates = df.columns.duplicated()
|
|
67
|
+
if duplicates.any():
|
|
68
|
+
duplicate_cols = df.columns[duplicates]
|
|
69
|
+
print_verbose(f"Duplicate columns found: {list(duplicate_cols)}", verbose)
|
|
70
|
+
df = df.loc[:, ~duplicates]
|
|
71
|
+
print_verbose("Removed duplicate columns, keeping the first occurrence.", verbose)
|
|
72
|
+
|
|
73
|
+
return df
|
|
74
|
+
|
|
75
|
+
def generate_parquet_schema(df: pd.DataFrame=None, verbose: bool=False) -> pa.Schema|None:
|
|
76
|
+
"""
|
|
77
|
+
Generate a PyArrow Schema from a pandas DataFrame.
|
|
78
|
+
Parameters:
|
|
79
|
+
df (pandas.DataFrame): The DataFrame to generate the schema from.
|
|
80
|
+
Returns:
|
|
81
|
+
pyarrow.Schema: The PyArrow Schema object.
|
|
82
|
+
"""
|
|
83
|
+
if df is None:
|
|
84
|
+
print_verbose("No DataFrame provided; exiting generate_parquet_schema.", verbose)
|
|
85
|
+
exit
|
|
86
|
+
|
|
87
|
+
fields = []
|
|
88
|
+
for column in df.columns:
|
|
89
|
+
col_data = df[column]
|
|
90
|
+
col_name = column
|
|
91
|
+
dtype = col_data.dtype
|
|
92
|
+
|
|
93
|
+
# Determine if the column contains any nulls
|
|
94
|
+
nullable = col_data.isnull().any()
|
|
95
|
+
|
|
96
|
+
# Map pandas dtype to PyArrow type
|
|
97
|
+
pa_type = None
|
|
98
|
+
|
|
99
|
+
if pd.api.types.is_integer_dtype(dtype):
|
|
100
|
+
# Check the range to determine the smallest integer type
|
|
101
|
+
min_value = col_data.min()
|
|
102
|
+
max_value = col_data.max()
|
|
103
|
+
if min_value >= np.iinfo(np.int8).min and max_value <= np.iinfo(np.int8).max:
|
|
104
|
+
pa_type = pa.int8()
|
|
105
|
+
elif min_value >= np.iinfo(np.int16).min and max_value <= np.iinfo(np.int16).max:
|
|
106
|
+
pa_type = pa.int16()
|
|
107
|
+
elif min_value >= np.iinfo(np.int32).min and max_value <= np.iinfo(np.int32).max:
|
|
108
|
+
pa_type = pa.int32()
|
|
109
|
+
else:
|
|
110
|
+
pa_type = pa.int64()
|
|
111
|
+
|
|
112
|
+
elif pd.api.types.is_float_dtype(dtype):
|
|
113
|
+
pa_type = pa.float64()
|
|
114
|
+
|
|
115
|
+
elif pd.api.types.is_bool_dtype(dtype):
|
|
116
|
+
pa_type = pa.bool_()
|
|
117
|
+
|
|
118
|
+
elif pd.api.types.is_datetime64_any_dtype(dtype):
|
|
119
|
+
pa_type = pa.timestamp('ms')
|
|
120
|
+
|
|
121
|
+
elif isinstance(dtype, pd.CategoricalDtype) or pd.api.types.is_object_dtype(dtype):
|
|
122
|
+
pa_type = pa.string()
|
|
123
|
+
|
|
124
|
+
else:
|
|
125
|
+
pa_type = pa.string()
|
|
126
|
+
|
|
127
|
+
# Create a field
|
|
128
|
+
field = pa.field(col_name, pa_type, nullable=nullable)
|
|
129
|
+
fields.append(field)
|
|
130
|
+
|
|
131
|
+
schema = pa.schema(fields)
|
|
132
|
+
return schema
|
|
133
|
+
|
|
134
|
+
def pandas_to_parquet_table(df: pd.DataFrame=None, convert: bool=True, partial: bool=False, preserve_index: bool=False, verbose: bool=False) -> pa.Table|None:
|
|
135
|
+
"""
|
|
136
|
+
Generate a PyArrow Table from a pandas DataFrame.
|
|
137
|
+
|
|
138
|
+
Parameters:
|
|
139
|
+
df (pandas.DataFrame): The DataFrame to generate the table from.
|
|
140
|
+
table (str): The name of the table.
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
pyarrow.Table: The PyArrow Table object.
|
|
144
|
+
"""
|
|
145
|
+
if df is None:
|
|
146
|
+
print_verbose("No DataFrame provided; exiting generate_parquet_table.", verbose)
|
|
147
|
+
exit
|
|
148
|
+
|
|
149
|
+
df = clean_dataframe_columns(df=df, verbose=verbose)
|
|
150
|
+
|
|
151
|
+
if convert:
|
|
152
|
+
df = try_cast_string_columns_to_numeric(df=df, convert_partial=partial, verbose=verbose)
|
|
153
|
+
|
|
154
|
+
schema = generate_parquet_schema(df=df, verbose=verbose)
|
|
155
|
+
try:
|
|
156
|
+
table = pa.Table.from_pandas(df, schema=schema, preserve_index=preserve_index)
|
|
157
|
+
return table
|
|
158
|
+
except Exception as e:
|
|
159
|
+
print_verbose(f"Error generating PyArrow Table: {e}", verbose)
|
|
160
|
+
exit
|
|
161
|
+
|
|
162
|
+
def generate_sql_server_create_table_string(df: pd.DataFrame=None, catalog: str='database', schema: str='dbo', table: str='table', dropexisting: bool=True, verbose: bool=False) -> str|None:
|
|
163
|
+
"""
|
|
164
|
+
Generate a SQL Server CREATE TABLE string from a pandas DataFrame.
|
|
165
|
+
|
|
166
|
+
Parameters:
|
|
167
|
+
df (pandas.DataFrame): The DataFrame to generate the schema from.
|
|
168
|
+
table_name (str): The name of the SQL table.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
str: The SQL Server CREATE TABLE statement.
|
|
172
|
+
"""
|
|
173
|
+
if df is None:
|
|
174
|
+
print_verbose("No DataFrame provided; exiting try_cast_string_columns_to_numeric.", verbose)
|
|
175
|
+
exit
|
|
176
|
+
|
|
177
|
+
table_name = f"{sql_quotename(catalog)}.{sql_quotename(schema)}.{sql_quotename(table)}"
|
|
178
|
+
drop_statement = f"use {sql_quotename(catalog)}\rgo\rif object_id('{table_name}') is not null drop table {table_name};\r" if dropexisting else ""
|
|
179
|
+
|
|
180
|
+
create_statement = [f"{drop_statement};create table {table_name} ("]
|
|
181
|
+
indent = " "
|
|
182
|
+
column_lines = []
|
|
183
|
+
|
|
184
|
+
for column in df.columns:
|
|
185
|
+
col_data = df[column]
|
|
186
|
+
col_name = column
|
|
187
|
+
dtype = col_data.dtype
|
|
188
|
+
|
|
189
|
+
# Determine if the column contains any nulls
|
|
190
|
+
nullable = col_data.isnull().any()
|
|
191
|
+
null_str = f"{' ' if nullable else 'not'} null"
|
|
192
|
+
|
|
193
|
+
# Map pandas dtype to SQL Server type
|
|
194
|
+
sql_type = None
|
|
195
|
+
|
|
196
|
+
if pd.api.types.is_integer_dtype(dtype):
|
|
197
|
+
min_value = col_data.min()
|
|
198
|
+
max_value = col_data.max()
|
|
199
|
+
if min_value >= 0 and max_value <= 255:
|
|
200
|
+
sql_type = "tinyint"
|
|
201
|
+
elif min_value >= -32768 and max_value <= 32767:
|
|
202
|
+
sql_type = "smallint"
|
|
203
|
+
elif min_value >= -2147483648 and max_value <= 2147483647:
|
|
204
|
+
sql_type = "int"
|
|
205
|
+
else:
|
|
206
|
+
sql_type = "bigint"
|
|
207
|
+
|
|
208
|
+
elif pd.api.types.is_float_dtype(dtype):
|
|
209
|
+
sql_type = "float"
|
|
210
|
+
|
|
211
|
+
elif pd.api.types.is_bool_dtype(dtype):
|
|
212
|
+
sql_type = "bit"
|
|
213
|
+
|
|
214
|
+
elif pd.api.types.is_datetime64_any_dtype(dtype):
|
|
215
|
+
sql_type = "datetime2"
|
|
216
|
+
|
|
217
|
+
elif isinstance(dtype, pd.CategoricalDtype) or pd.api.types.is_object_dtype(dtype):
|
|
218
|
+
# Determine maximum length of string data
|
|
219
|
+
max_length = col_data.dropna().astype(str).map(len).max()
|
|
220
|
+
sql_type = f"nvarchar({str(max_length) if max_length <= 4000 else 'max'})"
|
|
221
|
+
|
|
222
|
+
else:
|
|
223
|
+
sql_type = "nvarchar(max)"
|
|
224
|
+
|
|
225
|
+
# Build the column definition
|
|
226
|
+
column_line = f"{indent}{sql_quotename(col_name)} {sql_type} {null_str},"
|
|
227
|
+
column_lines.append(column_line)
|
|
228
|
+
|
|
229
|
+
# Remove the last comma from the last column definition
|
|
230
|
+
if column_lines:
|
|
231
|
+
column_lines[-1] = column_lines[-1].rstrip(',')
|
|
232
|
+
|
|
233
|
+
create_statement.extend(column_lines)
|
|
234
|
+
create_statement.append(");")
|
|
235
|
+
return_statement = "\r".join(create_statement)
|
|
236
|
+
return return_statement
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
from .messages import print_verbose
|
|
3
|
+
|
|
4
|
+
def get(url: str, verbose: bool = False, **kwargs) -> dict:
|
|
5
|
+
"""Get JSON data from a URL."""
|
|
6
|
+
try:
|
|
7
|
+
response = requests.get(url, **kwargs)
|
|
8
|
+
return response.json()
|
|
9
|
+
except requests.exceptions.RequestException as e:
|
|
10
|
+
print_verbose(f"Error: {e}", verbose=verbose)
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
|
|
2
|
+
def print_verbose(message: str, verbose: bool=True) -> None:
|
|
3
|
+
"""
|
|
4
|
+
Print a message if verbose is True.
|
|
5
|
+
|
|
6
|
+
Parameters:
|
|
7
|
+
message (str): The message to print.
|
|
8
|
+
verbose (bool): If True, the message will be printed.
|
|
9
|
+
"""
|
|
10
|
+
if verbose:
|
|
11
|
+
print(str(message))
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from .messages import print_verbose
|
|
2
|
+
import pathlib
|
|
3
|
+
|
|
4
|
+
def sql_quotename(name: str=None, brackets: bool=True, ticks: bool=False, verbose: bool=False) -> str|None:
|
|
5
|
+
"""
|
|
6
|
+
Quote a SQL Server name string.
|
|
7
|
+
Parameters:
|
|
8
|
+
name (str): The name to quote.
|
|
9
|
+
brackets (bool): Whether to use brackets.
|
|
10
|
+
Returns:
|
|
11
|
+
str: The quoted name.
|
|
12
|
+
"""
|
|
13
|
+
if name is None:
|
|
14
|
+
print_verbose("No name provided; exiting sql_quotename.", verbose)
|
|
15
|
+
exit
|
|
16
|
+
return_value = f"{name.replace('[','').replace(']','')}"
|
|
17
|
+
if brackets:
|
|
18
|
+
return_value = f"[{return_value}]"
|
|
19
|
+
if ticks or not brackets:
|
|
20
|
+
return_value = f"'{return_value}'"
|
|
21
|
+
return return_value
|
|
22
|
+
|
|
23
|
+
def pathing(input: str | pathlib.Path, verbose: bool=False) -> pathlib.Path|None:
|
|
24
|
+
"""
|
|
25
|
+
Standardize a path string.
|
|
26
|
+
Parameters:
|
|
27
|
+
path (str): The path to standardize.
|
|
28
|
+
Returns:
|
|
29
|
+
str: The standardized path.
|
|
30
|
+
"""
|
|
31
|
+
if input is None:
|
|
32
|
+
print_verbose("No path provided; exiting pathing.", verbose)
|
|
33
|
+
exit
|
|
34
|
+
if isinstance(input, str):
|
|
35
|
+
input.replace('\\','/')
|
|
36
|
+
input = pathlib.Path(input)
|
|
37
|
+
else:
|
|
38
|
+
input = input
|
|
39
|
+
if input.exists():
|
|
40
|
+
return input
|
|
41
|
+
else:
|
|
42
|
+
print_verbose(f"Path {input} does not exist; exiting pathing.", verbose)
|
|
43
|
+
exit
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import requests, zipfile, io, pathlib
|
|
2
|
+
from .messages import print_verbose
|
|
3
|
+
from .strings import pathing
|
|
4
|
+
|
|
5
|
+
def get(url:str, path:str|pathlib.Path=None, verbose:bool=False, **kwargs) -> None|io.BytesIO:
|
|
6
|
+
"""Download a file from a URL and save it to a path."""
|
|
7
|
+
try:
|
|
8
|
+
print_verbose(f"Downloading {url}", verbose=verbose)
|
|
9
|
+
data = requests.get(url, **kwargs).content
|
|
10
|
+
zip_buffer = io.BytesIO(data)
|
|
11
|
+
if path is None:
|
|
12
|
+
return zip_buffer
|
|
13
|
+
else:
|
|
14
|
+
print_verbose(f"Saving data to {path}", verbose=verbose)
|
|
15
|
+
zip_buffer.seek(0)
|
|
16
|
+
with zipfile.ZipFile(zip_buffer, 'r') as zip_ref:
|
|
17
|
+
for zip_info in zip_ref.infolist():
|
|
18
|
+
extract_path = pathing(path) / zip_info.filename
|
|
19
|
+
extract_path.parent.mkdir(parents=True, exist_ok=True)
|
|
20
|
+
with open(extract_path, 'wb') as f:
|
|
21
|
+
f.write(zip_ref.read(zip_info.filename))
|
|
22
|
+
f.close()
|
|
23
|
+
except requests.exceptions.RequestException as e:
|
|
24
|
+
print_verbose(f"Error: {e}", verbose=verbose)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: datablade
|
|
3
|
+
Version: 0.0.0
|
|
4
|
+
Summary: datablade is a suite of functions to provide standard syntax across projects.
|
|
5
|
+
Author: Brent Carpenetti
|
|
6
|
+
Author-email: brentcarpenetti@gmail.com
|
|
7
|
+
License: MIT
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Requires-Dist: pandas
|
|
10
|
+
Requires-Dist: pyarrow
|
|
11
|
+
Requires-Dist: numpy
|
|
12
|
+
Requires-Dist: openpyxl
|
|
13
|
+
Requires-Dist: requests
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
pyproject.toml
|
|
3
|
+
setup.py
|
|
4
|
+
src/datablade/__init__.py
|
|
5
|
+
src/datablade.egg-info/PKG-INFO
|
|
6
|
+
src/datablade.egg-info/SOURCES.txt
|
|
7
|
+
src/datablade.egg-info/dependency_links.txt
|
|
8
|
+
src/datablade.egg-info/requires.txt
|
|
9
|
+
src/datablade.egg-info/top_level.txt
|
|
10
|
+
src/datablade/core/__init__.py
|
|
11
|
+
src/datablade/core/frames.py
|
|
12
|
+
src/datablade/core/json.py
|
|
13
|
+
src/datablade/core/lists.py
|
|
14
|
+
src/datablade/core/messages.py
|
|
15
|
+
src/datablade/core/strings.py
|
|
16
|
+
src/datablade/core/zip.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
datablade
|