luxorasap 0.0.2__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- luxorasap/__init__.py +1 -1
- luxorasap/datareader/core.py +24 -120
- luxorasap/ingest/__init__.py +23 -0
- luxorasap/ingest/cloud/__init__.py +54 -0
- luxorasap/ingest/legacy_local/dataloader.py +280 -0
- luxorasap/utils/dataframe/__init__.py +2 -0
- luxorasap/utils/dataframe/transforms.py +54 -0
- luxorasap/utils/storage/__init__.py +2 -0
- luxorasap/utils/storage/blob.py +91 -0
- {luxorasap-0.0.2.dist-info → luxorasap-0.1.0.dist-info}/METADATA +11 -3
- luxorasap-0.1.0.dist-info/RECORD +16 -0
- luxorasap-0.0.2.dist-info/RECORD +0 -10
- {luxorasap-0.0.2.dist-info → luxorasap-0.1.0.dist-info}/WHEEL +0 -0
- {luxorasap-0.0.2.dist-info → luxorasap-0.1.0.dist-info}/entry_points.txt +0 -0
- {luxorasap-0.0.2.dist-info → luxorasap-0.1.0.dist-info}/top_level.txt +0 -0
luxorasap/__init__.py
CHANGED
|
@@ -13,7 +13,7 @@ from types import ModuleType
|
|
|
13
13
|
try:
|
|
14
14
|
__version__: str = metadata.version(__name__)
|
|
15
15
|
except metadata.PackageNotFoundError: # editable install
|
|
16
|
-
__version__ = "0.0
|
|
16
|
+
__version__ = "0.1.0"
|
|
17
17
|
|
|
18
18
|
# ─── Lazy loader ─────────────────────────────────────────────────
|
|
19
19
|
def __getattr__(name: str) -> ModuleType:
|
luxorasap/datareader/core.py
CHANGED
|
@@ -1,6 +1,4 @@
|
|
|
1
1
|
# Imports
|
|
2
|
-
__version__ = "0.0.1"
|
|
3
|
-
|
|
4
2
|
import pandas as pd
|
|
5
3
|
import datetime as dt
|
|
6
4
|
from datetime import timezone
|
|
@@ -9,20 +7,16 @@ import os, sys
|
|
|
9
7
|
import time
|
|
10
8
|
import numpy as np
|
|
11
9
|
from scipy.optimize import newton
|
|
12
|
-
from pathlib import Path
|
|
13
10
|
import io
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
print('pip install pyarrow')
|
|
24
|
-
print('pip install python-dotenv')
|
|
25
|
-
sys.exit()
|
|
11
|
+
from dotenv import load_dotenv
|
|
12
|
+
|
|
13
|
+
import pyarrow as pa
|
|
14
|
+
import pyarrow.parquet as pq
|
|
15
|
+
|
|
16
|
+
from luxorasap.utils.storage import BlobParquetClient
|
|
17
|
+
load_dotenv()
|
|
18
|
+
|
|
19
|
+
# Nao fazer import do ingest, risco de impor circular.
|
|
26
20
|
|
|
27
21
|
|
|
28
22
|
#ADLS_CONNECTION_STRING = os.getenv('AZURE_STORAGE_CONNECTION_STRING')
|
|
@@ -32,22 +26,16 @@ class LuxorQuery:
|
|
|
32
26
|
|
|
33
27
|
def __init__(self, update_mode="optimized", is_develop_mode=False, tables_path=None,
|
|
34
28
|
blob_directory='enriched/parquet', adls_connection_string:str=None, container_name="luxorasap"):
|
|
35
|
-
|
|
36
29
|
"""
|
|
37
30
|
update_mode:
|
|
38
31
|
'standard' - Carrega todas as tabelas disponiveis
|
|
39
32
|
'optimized' - Carrega apenas as tabelas utilizadas sob demanda
|
|
40
33
|
"""
|
|
41
|
-
|
|
42
|
-
if adls_connection_string is None:
|
|
43
|
-
adls_connection_string = os.getenv('AZURE_STORAGE_CONNECTION_STRING')
|
|
44
34
|
|
|
35
|
+
self.blob_client = BlobParquetClient(adls_connection_string=adls_connection_string,
|
|
36
|
+
container=container_name)
|
|
45
37
|
self.blob_directory = blob_directory
|
|
46
|
-
self.container_name = container_name
|
|
47
38
|
|
|
48
|
-
self.CONNECTION_STRING = adls_connection_string
|
|
49
|
-
self.blob_service_client = BlobServiceClient.from_connection_string(self.CONNECTION_STRING)
|
|
50
|
-
self.container_client = self.blob_service_client.get_container_client(self.container_name)
|
|
51
39
|
|
|
52
40
|
self.modified_tables = []
|
|
53
41
|
self.is_develop_mode = is_develop_mode
|
|
@@ -71,40 +59,12 @@ class LuxorQuery:
|
|
|
71
59
|
self.update() # Nessa 1° exec. vai inicializar os dicionarios acima
|
|
72
60
|
|
|
73
61
|
|
|
74
|
-
|
|
75
|
-
#def __set_tables_path(self):
|
|
76
|
-
#
|
|
77
|
-
# cur_dir = Path().absolute()
|
|
78
|
-
# cur_dir_items = cur_dir.parts
|
|
79
|
-
# home = cur_dir.home()
|
|
80
|
-
# onedrive_name = cur_dir_items[3]
|
|
81
|
-
# if "OneDrive".lower() not in onedrive_name.lower():
|
|
82
|
-
# logger.critical("No formato atual, para utilizar esse modulo a main deve ser executada dentro do diretorio do OneDrive.")
|
|
83
|
-
# #formato considerado eh: "C:/Users/{user}/{onedrive_name}"
|
|
84
|
-
#
|
|
85
|
-
# tables_path = Path(home)/onedrive_name/"projetos"/"LuxorASAP"/"luxorDB"/"tables"
|
|
86
|
-
# if self.is_develop_mode:
|
|
87
|
-
# tables_path = Path(home)/onedrive_name/"projetos"/"LuxorASAP_Develop"/"luxorDB"/"tables"
|
|
88
|
-
#
|
|
89
|
-
# return tables_path
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
def __get_blob_update_time(self, table_name):
|
|
93
|
-
|
|
94
|
-
blob_name = f"{self.blob_directory}/{table_name}.parquet"
|
|
95
|
-
blob_client = self.blob_service_client.get_blob_client(container=self.container_name, blob=blob_name)
|
|
96
|
-
|
|
97
|
-
# Obter as propriedades do blob
|
|
98
|
-
properties = blob_client.get_blob_properties()
|
|
99
|
-
return properties['last_modified'].replace(tzinfo=timezone.utc).timestamp()
|
|
100
|
-
|
|
101
|
-
|
|
102
62
|
def __is_table_modified(self, table_name):
|
|
103
63
|
""" Retorna 'True' ou 'False' informando se a tabela informada em 'table_name' foi criada ou modificada.
|
|
104
64
|
|
|
105
65
|
Args:
|
|
106
66
|
table_name (str): nome tabela
|
|
107
|
-
|
|
67
|
+
table_path (str): caminho ate a tabela no blob
|
|
108
68
|
Returns:
|
|
109
69
|
bool: True se foi criada ou modificada
|
|
110
70
|
"""
|
|
@@ -114,32 +74,13 @@ class LuxorQuery:
|
|
|
114
74
|
|
|
115
75
|
try:
|
|
116
76
|
file_path = self.tables_in_use[table_name]["table_path"]
|
|
117
|
-
file_last_update = self.__get_blob_update_time(table_name)
|
|
77
|
+
file_last_update = self.blob_client.get_df_update_time(file_path)#self.__get_blob_update_time(table_name)
|
|
118
78
|
return file_last_update > self.tables_in_use[table_name]["update_time"]
|
|
119
79
|
|
|
120
80
|
except:
|
|
121
81
|
logger.critical(f"Arquivo <{file_path}> não encontrado.")
|
|
122
82
|
|
|
123
83
|
return False
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
def __persist_column_formatting(self, t):
|
|
127
|
-
|
|
128
|
-
columns_to_persist = {"Name", "Class", "Vehicles", "Segment"}
|
|
129
|
-
|
|
130
|
-
if len(set(t.columns).intersection(columns_to_persist)) > 0:
|
|
131
|
-
# Vamos persistir a formatacao de algumas colunas
|
|
132
|
-
columns_order = list(t.columns)
|
|
133
|
-
columns_to_persist = list(set(t.columns).intersection(columns_to_persist))
|
|
134
|
-
persistent_data = t[columns_to_persist].copy()
|
|
135
|
-
|
|
136
|
-
columns_to_normalize = list(set(columns_order) - set(columns_to_persist))
|
|
137
|
-
t = self.text_to_lowercase(t[columns_to_normalize])
|
|
138
|
-
t.loc[:,columns_to_persist] = persistent_data
|
|
139
|
-
return t[columns_order]
|
|
140
|
-
|
|
141
|
-
# Nos outros casos, transformaremos tudo em lowercase
|
|
142
|
-
return self.text_to_lowercase(t)
|
|
143
84
|
|
|
144
85
|
|
|
145
86
|
def __get_tickers_bbg(self):
|
|
@@ -152,15 +93,13 @@ class LuxorQuery:
|
|
|
152
93
|
def table_exists(self, table_name, blob_directory=None):
|
|
153
94
|
# Checa no ADLS se existe alguma tabela com o nome informado
|
|
154
95
|
|
|
155
|
-
table_path = f"{blob_directory}/{table_name}.parquet"
|
|
156
96
|
if blob_directory is None:
|
|
157
97
|
blob_directory = self.blob_directory
|
|
158
|
-
|
|
98
|
+
|
|
99
|
+
table_path = f"{blob_directory}/{table_name}.parquet"
|
|
159
100
|
|
|
160
|
-
|
|
101
|
+
return self.blob_client.table_exists(table_path)
|
|
161
102
|
|
|
162
|
-
return blob_client.exists()
|
|
163
|
-
|
|
164
103
|
|
|
165
104
|
def get_table(self, table_name, index=False, index_name="index", dtypes_override={}, force_reload=False):
|
|
166
105
|
"""
|
|
@@ -189,6 +128,7 @@ class LuxorQuery:
|
|
|
189
128
|
dtypes_override: dict : set - Dicionario com os tipos de dados das colunas devem ser sobrescritos.
|
|
190
129
|
Deve possuir as chaves 'float', 'date', 'bool' e 'str_nan_format'(troca 'nan' por pd.NA)
|
|
191
130
|
Para cada chave, colocar um Set com os nomes das colunas que receberao o cast.
|
|
131
|
+
|
|
192
132
|
"""
|
|
193
133
|
table_name = table_name.lower().replace(" ", "_")
|
|
194
134
|
if table_name == 'bbg_tickers': return self.__get_tickers_bbg() # DEPRECATED TODO: remover apos testes
|
|
@@ -201,47 +141,24 @@ class LuxorQuery:
|
|
|
201
141
|
|
|
202
142
|
|
|
203
143
|
def __load_table(self, table_name, index=False, index_name="index", dtypes_override={}):
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
def __read_blob_parquet(table_name):
|
|
207
|
-
|
|
208
|
-
container_name = "luxorasap"
|
|
209
|
-
blob_name = f"{self.blob_directory}/{table_name}.parquet"
|
|
210
|
-
|
|
211
|
-
blob_client = self.blob_service_client.get_blob_client(container=container_name, blob=blob_name)
|
|
212
|
-
|
|
213
|
-
# Download do blob em memória
|
|
214
|
-
download_stream = None
|
|
215
|
-
try:
|
|
216
|
-
download_stream = blob_client.download_blob()
|
|
217
|
-
except Exception:
|
|
218
|
-
print(f"Tabela '{table_name}' não encontrada no blob.")
|
|
219
|
-
return None, False
|
|
220
|
-
parquet_data = download_stream.readall()
|
|
221
|
-
|
|
222
|
-
# Ler o parquet do stream em memória
|
|
223
|
-
parquet_buffer = io.BytesIO(parquet_data)
|
|
224
|
-
table = pq.read_table(parquet_buffer)
|
|
225
|
-
df = table.to_pandas()
|
|
226
|
-
|
|
227
|
-
return df, True
|
|
228
144
|
|
|
229
|
-
|
|
230
145
|
def __load_parquet(table_name):
|
|
231
146
|
table_path = f"{self.blob_directory}/{table_name}.parquet"#self.tables_path/"parquet"/f"{table_name}.parquet"
|
|
232
147
|
|
|
233
148
|
try:
|
|
234
149
|
#update_time = os.path.getmtime(table_path)
|
|
235
|
-
update_time = self.__get_blob_update_time(table_name)
|
|
236
150
|
table_data = None
|
|
237
151
|
# Primeiro, vamos tentar ler do blob
|
|
238
|
-
table_data, blob_read_success = __read_blob_parquet(table_name)
|
|
239
|
-
|
|
152
|
+
table_data, blob_read_success = self.blob_client.read_df(table_path)#__read_blob_parquet(table_name)
|
|
153
|
+
|
|
240
154
|
if not blob_read_success:
|
|
241
155
|
logger.critical(f"Não foi possível carregar a tabela '{table_name}' do blob.")
|
|
242
156
|
#print("--> Onedrive fallback.")
|
|
243
157
|
#table_data = pd.read_parquet(table_path,engine="fastparquet")
|
|
244
|
-
|
|
158
|
+
update_time = self.blob_client.get_df_update_time(table_path)
|
|
159
|
+
|
|
160
|
+
assert(table_data is not None)
|
|
161
|
+
|
|
245
162
|
table_columns = set(table_data.columns)
|
|
246
163
|
|
|
247
164
|
float_dtypes = {"Last_Price", "Price", "px_last", "Quota", "#", "Avg_price", "Variation", "Variation_tot",
|
|
@@ -339,7 +256,7 @@ class LuxorQuery:
|
|
|
339
256
|
except Exception:
|
|
340
257
|
logger.error(f"Nao foi possível setar a coluna {index_name} como index para a tabela {table_name}.")
|
|
341
258
|
|
|
342
|
-
table_data = self.__persist_column_formatting(table_data)
|
|
259
|
+
#table_data = self.__persist_column_formatting(table_data)
|
|
343
260
|
|
|
344
261
|
self.tables_in_use[table_name] = {"table_data" : table_data,
|
|
345
262
|
"table_path" : table_path,
|
|
@@ -2909,19 +2826,6 @@ class LuxorQuery:
|
|
|
2909
2826
|
assets = self.get_table("assets")
|
|
2910
2827
|
name = assets.query("Ticker == @ticker")["Name"].squeeze()
|
|
2911
2828
|
return name
|
|
2912
|
-
|
|
2913
|
-
|
|
2914
|
-
def list_blob_files(self, sub_dir, ends_with=None):
|
|
2915
|
-
"""
|
|
2916
|
-
Lista todos os arquivos dentro de um diretorio no blob storage.
|
|
2917
|
-
"""
|
|
2918
|
-
|
|
2919
|
-
# Vamos listar os arquivos do diretorio
|
|
2920
|
-
blob_files = self.container_client.list_blobs(name_starts_with=sub_dir)
|
|
2921
|
-
|
|
2922
|
-
if ends_with is not None:
|
|
2923
|
-
return [blob_file.name for blob_file in blob_files if blob_file.name.endswith(ends_with)]
|
|
2924
|
-
return [blob_file.name for blob_file in blob_files]
|
|
2925
2829
|
|
|
2926
2830
|
|
|
2927
2831
|
def simulate_portfolio_performance(self, portfolio: dict, portfolio_date: dt.date, adm_fee: float, performance_fee: float = 0):
|
luxorasap/ingest/__init__.py
CHANGED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""Exporta a API “cloud” por padrão e mantém DataLoader legado disponível."""
|
|
2
|
+
|
|
3
|
+
from importlib import import_module
|
|
4
|
+
from warnings import warn
|
|
5
|
+
|
|
6
|
+
# API moderna (recomendada)
|
|
7
|
+
from .cloud import save_table, incremental_load # noqa: F401
|
|
8
|
+
|
|
9
|
+
__all__ = ["save_table", "incremental_load"]
|
|
10
|
+
|
|
11
|
+
# Ponte para o loader antigo -------------------------------------------------
|
|
12
|
+
try:
|
|
13
|
+
legacy_mod = import_module(".legacy_local.dataloader", __name__)
|
|
14
|
+
DataLoader = legacy_mod.DataLoader # noqa: F401
|
|
15
|
+
warn(
|
|
16
|
+
"luxorasap.ingest.DataLoader está legado e será descontinuado; "
|
|
17
|
+
"migre para luxorasap.ingest.save_table / incremental_load.",
|
|
18
|
+
DeprecationWarning,
|
|
19
|
+
stacklevel=1,
|
|
20
|
+
)
|
|
21
|
+
except Exception:
|
|
22
|
+
# se o arquivo legado não existir, simplesmente não exporta
|
|
23
|
+
pass
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"""Camada moderna de ingestão: grava / incrementa tabelas em ADLS (Parquet)."""
|
|
2
|
+
|
|
3
|
+
import pandas as pd
|
|
4
|
+
|
|
5
|
+
from luxorasap.utils.storage import BlobParquetClient
|
|
6
|
+
from luxorasap.utils.dataframe import prep_for_save
|
|
7
|
+
from luxorasap.datareader import LuxorQuery
|
|
8
|
+
|
|
9
|
+
__all__ = ["save_table", "incremental_load"]
|
|
10
|
+
|
|
11
|
+
_client = BlobParquetClient() # instância única para o módulo
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# ────────────────────────────────────────────────────────────────
|
|
15
|
+
def save_table(
|
|
16
|
+
table_name: str,
|
|
17
|
+
df,
|
|
18
|
+
*,
|
|
19
|
+
index: bool = False,
|
|
20
|
+
index_name: str = "index",
|
|
21
|
+
normalize_columns: bool = False,
|
|
22
|
+
directory: str = "enriched/parquet",
|
|
23
|
+
):
|
|
24
|
+
"""Salva DataFrame como Parquet em ADLS (sobrescrevendo)."""
|
|
25
|
+
df = prep_for_save(df, index=index, index_name=index_name, normalize=normalize_columns)
|
|
26
|
+
_client.write_df(df.astype(str), f"{directory}/{table_name}.parquet")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def incremental_load(
|
|
30
|
+
lq: LuxorQuery,
|
|
31
|
+
table_name: str,
|
|
32
|
+
df,
|
|
33
|
+
*,
|
|
34
|
+
increment_column: str = "Date",
|
|
35
|
+
index: bool = False,
|
|
36
|
+
index_name: str = "index",
|
|
37
|
+
normalize_columns: bool = False,
|
|
38
|
+
directory: str = "enriched/parquet",
|
|
39
|
+
):
|
|
40
|
+
"""Concatena novos dados aos existentes, cortando duplicados pela data."""
|
|
41
|
+
if lq.table_exists(table_name):
|
|
42
|
+
prev = lq.get_table(table_name)
|
|
43
|
+
cutoff = df[increment_column].max()
|
|
44
|
+
prev = prev.query(f"{increment_column} < @cutoff")
|
|
45
|
+
df = pd.concat([prev, df], ignore_index=True)
|
|
46
|
+
|
|
47
|
+
save_table(
|
|
48
|
+
table_name,
|
|
49
|
+
df,
|
|
50
|
+
index=index,
|
|
51
|
+
index_name=index_name,
|
|
52
|
+
normalize_columns=normalize_columns,
|
|
53
|
+
directory=directory,
|
|
54
|
+
)
|
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import datetime as dt
|
|
3
|
+
import time
|
|
4
|
+
import io
|
|
5
|
+
import sys, os
|
|
6
|
+
|
|
7
|
+
from loguru import logger
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
from azure.storage.blob import BlobServiceClient
|
|
11
|
+
import pyarrow as pa
|
|
12
|
+
import pyarrow.parquet as pq
|
|
13
|
+
from dotenv import load_dotenv
|
|
14
|
+
load_dotenv()
|
|
15
|
+
|
|
16
|
+
from luxorasap.datareader import LuxorQuery
|
|
17
|
+
from luxorasap.utils.dataframe import transforms
|
|
18
|
+
|
|
19
|
+
import warnings
|
|
20
|
+
warnings.warn(
|
|
21
|
+
"luxorasap.ingest.legacy_local.dataloader é legado; "
|
|
22
|
+
"utilize luxorasap.ingest.cloud em novas rotinas.",
|
|
23
|
+
DeprecationWarning, stacklevel=2
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class DataLoader:
|
|
29
|
+
|
|
30
|
+
def __init__(self, luxorDB_directory = None):
|
|
31
|
+
"""Fornece uma forma padronizada de carregar tabelas para a luxorDB.
|
|
32
|
+
1. Possui metodos para carregar tabelas que já estao carregadas na memória
|
|
33
|
+
- Sao os metodos que possuem 'table' no nome
|
|
34
|
+
2. Possui metodos para carregar arquivos de excel, com todas as suas abas
|
|
35
|
+
Inclui metodo para checagem de alteracao de versao do arquivo
|
|
36
|
+
- Sao os metodos que possuem 'file' no nome
|
|
37
|
+
Args:
|
|
38
|
+
luxorDB_directory (pathlib.Path, optional): Caminho completo ate o diretorio de destino dos dados.
|
|
39
|
+
"""
|
|
40
|
+
self.luxorDB_directory = luxorDB_directory
|
|
41
|
+
|
|
42
|
+
if self.luxorDB_directory is None:
|
|
43
|
+
self.luxorDB_directory = Path(__file__).absolute().parent/"LuxorDB"/"tables"
|
|
44
|
+
|
|
45
|
+
self.tracked_files = {}
|
|
46
|
+
self.tracked_tables = {}
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def add_file_tracker(self, tracked_file_path, filetype="excel", sheet_names={},
|
|
50
|
+
excel_size_limit = None,index=False, index_name="index",normalize_columns=False):
|
|
51
|
+
""" Adiciona arquivo na lista para checar por alteracao
|
|
52
|
+
Args:
|
|
53
|
+
tracked_file_path (pathlib.Path): caminho completo ate o arquivo,
|
|
54
|
+
incluindo nome do arquivo e extensão.
|
|
55
|
+
sheet_names (dict, optional): Caso seja uma planilha com varias abas, mapear
|
|
56
|
+
aqui o nome da aba para o nome do arquivo de saida desejado.
|
|
57
|
+
"""
|
|
58
|
+
if tracked_file_path not in self.tracked_files:
|
|
59
|
+
self.tracked_files[tracked_file_path] = {
|
|
60
|
+
"last_mtime" : dt.datetime.timestamp(dt.datetime(2000,1,1)),
|
|
61
|
+
"filetype" : filetype, "sheet_names": sheet_names,
|
|
62
|
+
"excel_size_limit" : excel_size_limit,
|
|
63
|
+
"index" : index,
|
|
64
|
+
"index_name" : index_name,
|
|
65
|
+
"normalize_columns" : normalize_columns,
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def add_table_tracker(self, table_name:str):
|
|
70
|
+
""" Adiciona tabela na lista para controle de alteracao."""
|
|
71
|
+
|
|
72
|
+
if table_name not in self.tracked_tables:
|
|
73
|
+
self.tracked_tables[table_name] = dt.datetime.timestamp(dt.datetime(2000,1,1))
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def remove_file_tracker(self, tracked_file_path):
|
|
77
|
+
|
|
78
|
+
if tracked_file_path in self.tracked_files:
|
|
79
|
+
del self.tracked_files[tracked_file_path]
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def remove_table_tracker(self, table_name:str):
|
|
83
|
+
|
|
84
|
+
if table_name in self.tracked_tables:
|
|
85
|
+
del self.tracked_tables[table_name]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def is_file_modified(self, tracked_file_path: Path) -> {bool, float}:
|
|
89
|
+
""" Checa se o arquivo foi modificado desde a ultima leitura.
|
|
90
|
+
Returns:
|
|
91
|
+
tuple(bool, float): (foi modificado?, timestamp da ultima modificacao)
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
file_data = self.tracked_files[tracked_file_path]
|
|
95
|
+
|
|
96
|
+
last_saved_time = file_data["last_mtime"]
|
|
97
|
+
file_last_update = tracked_file_path.stat().st_mtime
|
|
98
|
+
return file_last_update > last_saved_time, file_last_update
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def set_file_modified_time(self, tracked_file_path, file_mtime):
|
|
102
|
+
|
|
103
|
+
self.tracked_files[tracked_file_path]["last_mtime"] = file_mtime
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def load_file_if_modified(self, tracked_file_path, export_to_blob=False, blob_directory='enriched/parquet'):
|
|
107
|
+
"""Carrega arquivo no caminho indicado, carregando na base de dados caso modificado.
|
|
108
|
+
Args:
|
|
109
|
+
tracked_file_path (pathlib.Path): caminho ate o arquivo(cadastro previamente por add_file_tracker)
|
|
110
|
+
type_map (_type_, optional): _description_. Defaults to None.
|
|
111
|
+
filetype (str, optional): _description_. Defaults to "excel".
|
|
112
|
+
"""
|
|
113
|
+
file_data = self.tracked_files[tracked_file_path]
|
|
114
|
+
|
|
115
|
+
last_saved_time = file_data["last_mtime"]
|
|
116
|
+
filetype = file_data["filetype"]
|
|
117
|
+
file_sheets = file_data["sheet_names"]
|
|
118
|
+
|
|
119
|
+
file_last_update = tracked_file_path.stat().st_mtime
|
|
120
|
+
|
|
121
|
+
if file_last_update > last_saved_time: # Houve alteracao no arquivo
|
|
122
|
+
if filetype == "excel":
|
|
123
|
+
file_sheets = None if len(file_sheets) == 0 else list(file_sheets.keys())
|
|
124
|
+
|
|
125
|
+
# tables sera sempre um dicionario de tabelas
|
|
126
|
+
tables = None
|
|
127
|
+
trials = 25
|
|
128
|
+
t_counter = 1
|
|
129
|
+
while trials - t_counter > 0:
|
|
130
|
+
try:
|
|
131
|
+
tables = pd.read_excel(tracked_file_path, sheet_name=file_sheets)
|
|
132
|
+
t_counter = trials # leitura concluida
|
|
133
|
+
except PermissionError:
|
|
134
|
+
|
|
135
|
+
logger.error(f"Erro ao tentar ler arquivo '{tracked_file_path}.\nTentativa {t_counter} de {trials};'.\nSe estiver aberto feche.")
|
|
136
|
+
time.sleep(10)
|
|
137
|
+
t_counter += 1
|
|
138
|
+
|
|
139
|
+
for sheet_name, table_data in tables.items():
|
|
140
|
+
|
|
141
|
+
table_name = sheet_name if file_sheets is None else file_data["sheet_names"][sheet_name]
|
|
142
|
+
|
|
143
|
+
if table_name == "trades":
|
|
144
|
+
table_data["ID"] = table_data.index
|
|
145
|
+
|
|
146
|
+
self.__export_table(table_name, table_data, index=file_data["index"], index_name=file_data["index_name"],
|
|
147
|
+
normalize_columns=file_data["normalize_columns"], export_to_blob=export_to_blob,
|
|
148
|
+
blob_directory=blob_directory)
|
|
149
|
+
self.tracked_files[tracked_file_path]["last_mtime"] = file_last_update
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def load_table_if_modified(self, table_name, table_data, last_update, index=False, index_name="index", normalize_columns=False,
|
|
153
|
+
do_not_load_excel=False, export_to_blob=False, blob_directory='enriched/parquet',
|
|
154
|
+
is_data_in_bytes=False, bytes_extension=".xlsx"):
|
|
155
|
+
"""
|
|
156
|
+
Args:
|
|
157
|
+
table_name (str): nome da tabela (sera o mesmo do arquivo a ser salvo)
|
|
158
|
+
table_data (pd.DataFrame): tabela de dados
|
|
159
|
+
last_update (timestamp): timestamp da ultima edicao feita na tabela
|
|
160
|
+
"""
|
|
161
|
+
|
|
162
|
+
if table_name not in self.tracked_tables:
|
|
163
|
+
self.add_table_tracker(table_name)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
last_update_time = self.tracked_tables[table_name]
|
|
167
|
+
if last_update > last_update_time:
|
|
168
|
+
|
|
169
|
+
self.tracked_tables[table_name] = last_update
|
|
170
|
+
self.__export_table(table_name, table_data, index=index, index_name=index_name, normalize_columns=normalize_columns,
|
|
171
|
+
do_not_load_excel=do_not_load_excel, export_to_blob=export_to_blob, blob_directory=blob_directory,
|
|
172
|
+
is_data_in_bytes=is_data_in_bytes, bytes_extension=bytes_extension)
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def scan_files(self, export_to_blob=False, blob_directory='enriched/parquet'):
|
|
176
|
+
"""
|
|
177
|
+
Para todos os arquivos cadastrados, vai buscar e carregar quando houver
|
|
178
|
+
arquivo mais recente.
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
for file in self.tracked_files:
|
|
182
|
+
|
|
183
|
+
self.load_file_if_modified(file, export_to_blob=export_to_blob, blob_directory=blob_directory)
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
#def __load_bytes(self, content: bytes, extension=".xlsx") -> pd.DataFrame:
|
|
187
|
+
# if extension == ".xlsx" or extension == "xlsx" or extension == "xls":
|
|
188
|
+
# df = pd.read_excel(io.BytesIO(content), engine="openpyxl")
|
|
189
|
+
#
|
|
190
|
+
# return df
|
|
191
|
+
#
|
|
192
|
+
# raise ValueError(f'Extension {extension} not supported')
|
|
193
|
+
|
|
194
|
+
def __load_bytes(self, content: bytes, extension: str) -> pd.DataFrame:
|
|
195
|
+
extension = extension.lower()
|
|
196
|
+
|
|
197
|
+
if extension in [".xlsx", ".xls", "xlsx", "xls"]:
|
|
198
|
+
df = pd.read_excel(io.BytesIO(content), engine="openpyxl")
|
|
199
|
+
return df
|
|
200
|
+
|
|
201
|
+
if extension == ".csv":
|
|
202
|
+
try:
|
|
203
|
+
return pd.read_csv(io.BytesIO(content), encoding="utf-8")
|
|
204
|
+
except UnicodeDecodeError:
|
|
205
|
+
return pd.read_csv(io.BytesIO(content), encoding="latin1")
|
|
206
|
+
|
|
207
|
+
if extension == ".parquet":
|
|
208
|
+
df = pd.read_parquet(io.BytesIO(content))
|
|
209
|
+
return df
|
|
210
|
+
|
|
211
|
+
raise ValueError(f'Extension {extension} not supported')
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def __export_table(self, table_name, table_data, index=False, index_name="index", normalize_columns=False,
|
|
215
|
+
do_not_load_excel=False, export_to_blob=False, blob_directory='enriched/parquet',
|
|
216
|
+
is_data_in_bytes=False, bytes_extension=".xlsx"):
|
|
217
|
+
|
|
218
|
+
dest_directory = self.luxorDB_directory
|
|
219
|
+
#TODO -> formatar para index=False
|
|
220
|
+
# Salvando em formato excel
|
|
221
|
+
attempts = 10
|
|
222
|
+
count_attempt = 0
|
|
223
|
+
|
|
224
|
+
if is_data_in_bytes:
|
|
225
|
+
table_data = self.__load_bytes(table_data, extension=bytes_extension)
|
|
226
|
+
|
|
227
|
+
# Se o index tiver dados, vamos trata-los para virar uma coluna
|
|
228
|
+
if index:
|
|
229
|
+
# Tratando o nome do index, caso seja necessario transformar em coluna
|
|
230
|
+
prev_index = table_data.index.name
|
|
231
|
+
if prev_index is not None and index_name == "index":
|
|
232
|
+
index_name = prev_index
|
|
233
|
+
table_data.index.name = index_name
|
|
234
|
+
|
|
235
|
+
table_data = table_data.reset_index()
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
if normalize_columns:
|
|
239
|
+
table_data = transforms.persist_column_formatting(table_data)
|
|
240
|
+
|
|
241
|
+
if not do_not_load_excel:
|
|
242
|
+
while count_attempt < attempts:
|
|
243
|
+
count_attempt += 1
|
|
244
|
+
try:
|
|
245
|
+
if len(table_data) > 1_000_000:
|
|
246
|
+
table_data = table_data.tail(1_000_000)
|
|
247
|
+
table_data.to_excel(dest_directory/f"{table_name}.xlsx", index=False)
|
|
248
|
+
count_attempt = attempts # sair do loop
|
|
249
|
+
|
|
250
|
+
except PermissionError:
|
|
251
|
+
logger.error(f"Erro ao tentar salvar arquivo {table_name}. Feche o arquivo. Tentativa {count_attempt} de {attempts}")
|
|
252
|
+
time.sleep(10 + count_attempt * 5)
|
|
253
|
+
|
|
254
|
+
# Salvando em csv
|
|
255
|
+
# -> Salvar em csv foi descontinuado por falta de uso.
|
|
256
|
+
#table_data.to_csv(dest_directory/"csv"/f"{table_name}.csv", sep=";", index=False)
|
|
257
|
+
|
|
258
|
+
# Salvando em parquet (tudo como string... dtypes deverao ser atribuidos na leitura)
|
|
259
|
+
table_data = table_data.astype(str)
|
|
260
|
+
table_data.to_parquet(dest_directory/"parquet"/f"{table_name}.parquet", engine="pyarrow", index=False)
|
|
261
|
+
|
|
262
|
+
if export_to_blob:
|
|
263
|
+
# Definindo o Container e o Blob Name
|
|
264
|
+
container_name = "luxorasap"
|
|
265
|
+
blob_name = f"{blob_directory}/{table_name}.parquet" #
|
|
266
|
+
|
|
267
|
+
# Conversão para parquet em memória (sem precisar salvar local)
|
|
268
|
+
table = pa.Table.from_pandas(table_data)
|
|
269
|
+
parquet_buffer = io.BytesIO()
|
|
270
|
+
pq.write_table(table, parquet_buffer)
|
|
271
|
+
parquet_buffer.seek(0) # Reseta o ponteiro para o início do buffer
|
|
272
|
+
|
|
273
|
+
# Conectando ao Blob Storage
|
|
274
|
+
connection_string = os.getenv('AZURE_STORAGE_CONNECTION_STRING')
|
|
275
|
+
blob_service_client = BlobServiceClient.from_connection_string(conn_str=connection_string)
|
|
276
|
+
|
|
277
|
+
# Criando um Blob Client
|
|
278
|
+
blob_client = blob_service_client.get_blob_client(container=container_name, blob=blob_name)
|
|
279
|
+
blob_client.upload_blob(parquet_buffer, overwrite=True)
|
|
280
|
+
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
from ..storage.blob import BlobParquetClient # ok – utils → utils
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def text_to_lowercase(t: pd.DataFrame) -> pd.DataFrame:
|
|
6
|
+
"""
|
|
7
|
+
Converte todas as colunas de texto para lowercase
|
|
8
|
+
Args:
|
|
9
|
+
t (pd.DataFrame): pandas DataFrame
|
|
10
|
+
Returns:
|
|
11
|
+
pd.DataFrame
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
return t.map(lambda x: x.lower().strip() if isinstance(x, str) else x)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def persist_column_formatting(t: pd.DataFrame, columns_to_persist_override : set = {}) -> pd.DataFrame:
|
|
18
|
+
"""
|
|
19
|
+
Persiste a formatacao de algumas colunas, e transforma o resto em lowercase
|
|
20
|
+
Args:
|
|
21
|
+
t (pd.DataFrame): pandas DataFrame
|
|
22
|
+
Returns:
|
|
23
|
+
pd.DataFrame
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
columns_to_persist = {"Name", "Class", "Vehicles", "Segment"}
|
|
27
|
+
columns_to_persist = columns_to_persist.union(columns_to_persist_override)
|
|
28
|
+
|
|
29
|
+
if len(set(t.columns).intersection(columns_to_persist)) > 0:
|
|
30
|
+
# Vamos persistir a formatacao de algumas colunas
|
|
31
|
+
columns_order = list(t.columns)
|
|
32
|
+
columns_to_persist = list(set(t.columns).intersection(columns_to_persist))
|
|
33
|
+
persistent_data = t[columns_to_persist].copy()
|
|
34
|
+
|
|
35
|
+
columns_to_normalize = list(set(columns_order) - set(columns_to_persist))
|
|
36
|
+
t = text_to_lowercase(t[columns_to_normalize])
|
|
37
|
+
t.loc[:,columns_to_persist] = persistent_data
|
|
38
|
+
return t[columns_order]
|
|
39
|
+
|
|
40
|
+
# Nos outros casos, transformaremos tudo em lowercase
|
|
41
|
+
return text_to_lowercase(t)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def prep_for_save(
|
|
45
|
+
df: pd.DataFrame,
|
|
46
|
+
*,
|
|
47
|
+
index: bool = False,
|
|
48
|
+
index_name: str = "index",
|
|
49
|
+
normalize: bool = False,
|
|
50
|
+
):
|
|
51
|
+
if index:
|
|
52
|
+
name = df.index.name or index_name
|
|
53
|
+
df = df.reset_index().rename(columns={"index": name})
|
|
54
|
+
return persist_column_formatting(df) if normalize else df
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
import io, os
|
|
2
|
+
from pathlib import PurePosixPath
|
|
3
|
+
from datetime import timezone
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import pyarrow as pa, pyarrow.parquet as pq
|
|
6
|
+
from azure.storage.blob import BlobServiceClient
|
|
7
|
+
|
|
8
|
+
class BlobParquetClient:
|
|
9
|
+
"""Leitura/gravacao de Parquet em Azure Blob – stateless & reutilizável."""
|
|
10
|
+
|
|
11
|
+
def __init__(self, container: str = "luxorasap", adls_connection_string: str = None):
|
|
12
|
+
if adls_connection_string is None:
|
|
13
|
+
adls_connection_string = os.getenv('AZURE_STORAGE_CONNECTION_STRING')
|
|
14
|
+
|
|
15
|
+
if adls_connection_string is None:
|
|
16
|
+
raise RuntimeError("AZURE_STORAGE_CONNECTION_STRING not set")
|
|
17
|
+
self._svc = BlobServiceClient.from_connection_string(adls_connection_string)
|
|
18
|
+
self._container = container
|
|
19
|
+
|
|
20
|
+
# ---------- API pública ----------
|
|
21
|
+
def read_df(self, blob_path: str) -> (pd.DataFrame, bool):
|
|
22
|
+
buf = io.BytesIO()
|
|
23
|
+
try:
|
|
24
|
+
self._blob(blob_path).download_blob().readinto(buf)
|
|
25
|
+
|
|
26
|
+
buf.seek(0)
|
|
27
|
+
return pq.read_table(buf).to_pandas(), True
|
|
28
|
+
except Exception:
|
|
29
|
+
return None, False
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def write_df(self, df, blob_path: str):
|
|
33
|
+
table = pa.Table.from_pandas(df)
|
|
34
|
+
buf = io.BytesIO()
|
|
35
|
+
pq.write_table(table, buf)
|
|
36
|
+
buf.seek(0)
|
|
37
|
+
self._blob(blob_path).upload_blob(buf, overwrite=True)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def get_df_update_time(self, blob_path: str) -> float:
|
|
41
|
+
try:
|
|
42
|
+
properties = self._blob(blob_path).get_blob_properties()
|
|
43
|
+
return properties['last_modified'].replace(tzinfo=timezone.utc).timestamp()
|
|
44
|
+
except Exception:
|
|
45
|
+
return 0.0
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def exists_df(self, blob_path: str) -> bool:
|
|
49
|
+
try:
|
|
50
|
+
self._blob(blob_path).get_blob_properties()
|
|
51
|
+
return True
|
|
52
|
+
except Exception:
|
|
53
|
+
return False
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def list_blob_files(self, blob_path: str, ends_with: str = None) -> list:
|
|
57
|
+
"""
|
|
58
|
+
Lista os arquivos em um diretório do blob storage.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
blob_path (str): O caminho do diretório no blob storage.
|
|
62
|
+
ends_with (str, optional): Filtra os arquivos que terminam com esta string.(Ex.: '.parquet')
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
list: Uma lista de nomes de blob.
|
|
66
|
+
|
|
67
|
+
"""
|
|
68
|
+
try:
|
|
69
|
+
container_client = self._svc.get_container_client(self._container)
|
|
70
|
+
blob_list = container_client.list_blobs(name_starts_with=blob_path)
|
|
71
|
+
if ends_with:
|
|
72
|
+
return [blob.name for blob in blob_list if blob.name.endswith(ends_with)]
|
|
73
|
+
return [blob.name for blob in blob_list]
|
|
74
|
+
except Exception:
|
|
75
|
+
return []
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def table_exists(self, table_path: str) -> bool:
|
|
79
|
+
"""
|
|
80
|
+
Checa se uma tabela existe no blob storage.
|
|
81
|
+
"""
|
|
82
|
+
return self.exists_df(table_path)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
# ---------- interno --------------
|
|
89
|
+
def _blob(self, path: str):
|
|
90
|
+
path = str(PurePosixPath(path))
|
|
91
|
+
return self._svc.get_blob_client(self._container, path)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: luxorasap
|
|
3
|
-
Version: 0.0
|
|
3
|
+
Version: 0.1.0
|
|
4
4
|
Summary: Luxor’s unified toolbox for data ingestion, querying and analytics.
|
|
5
5
|
Author-email: Luxor Group <backoffice@luxor.com.br>
|
|
6
6
|
License: Proprietary – All rights reserved
|
|
@@ -18,10 +18,18 @@ Requires-Dist: pyarrow>=15.0
|
|
|
18
18
|
Requires-Dist: requests>=2.32
|
|
19
19
|
Requires-Dist: pydantic>=2.7
|
|
20
20
|
Requires-Dist: scipy>=1.13
|
|
21
|
+
Provides-Extra: storage
|
|
22
|
+
Requires-Dist: azure-storage-blob>=12.19; extra == "storage"
|
|
23
|
+
Requires-Dist: pyarrow>=15.0; extra == "storage"
|
|
24
|
+
Provides-Extra: dataframe
|
|
25
|
+
Requires-Dist: pandas>=2.2; extra == "dataframe"
|
|
21
26
|
Provides-Extra: datareader
|
|
22
|
-
Requires-Dist:
|
|
27
|
+
Requires-Dist: luxorasap[dataframe,storage]; extra == "datareader"
|
|
28
|
+
Requires-Dist: numpy>=1.25; extra == "datareader"
|
|
29
|
+
Requires-Dist: scipy>=1.13; extra == "datareader"
|
|
23
30
|
Provides-Extra: ingest
|
|
24
|
-
Requires-Dist:
|
|
31
|
+
Requires-Dist: luxorasap[dataframe,storage]; extra == "ingest"
|
|
32
|
+
Requires-Dist: pandas>=2.2; extra == "ingest"
|
|
25
33
|
Provides-Extra: btgapi
|
|
26
34
|
Requires-Dist: requests>=2.32; extra == "btgapi"
|
|
27
35
|
Requires-Dist: pydantic>=2.7; extra == "btgapi"
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
luxorasap/__init__.py,sha256=XLioUOz-0RizsKh06R0U3uYMG7EwTQ08p_GLVSQBXNY,1355
|
|
2
|
+
luxorasap/datareader/__init__.py,sha256=41RAvbrQ4R6oj67S32CrKqolx0CJ2W8cbOF6g5Cqm2g,120
|
|
3
|
+
luxorasap/datareader/core.py,sha256=LpXe5g4lZpfEqaz_gjjHizVA-vPEjBi5yJKg_7K0Nkw,153205
|
|
4
|
+
luxorasap/ingest/__init__.py,sha256=XhxDTN2ar-u6UCPhnxNU_to-nWiit-SpQ6cA_N9eMSs,795
|
|
5
|
+
luxorasap/ingest/cloud/__init__.py,sha256=V8cCNloP1RgPTEPsepHvWVL4m_t5geQuBORLm7x-OKQ,1729
|
|
6
|
+
luxorasap/ingest/legacy_local/dataloader.py,sha256=zKPhuiBSFwkuWN6d8g2s60KkbVk1R_1cGMCtQM9j-0c,11908
|
|
7
|
+
luxorasap/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
|
+
luxorasap/utils/dataframe/__init__.py,sha256=LXcMsv8kPFfIVzcwfA9lI-TExH6ty6a3NHBVBWyxfTA,161
|
|
9
|
+
luxorasap/utils/dataframe/transforms.py,sha256=O5VxJHMV2g6zKLJc2O7F84wyF9c_hqo6kJOVjixLeI4,1757
|
|
10
|
+
luxorasap/utils/storage/__init__.py,sha256=U3XRq94yzRp3kgBSUcRzs2tQgJ4o8h8a1ZzwiscA5XM,67
|
|
11
|
+
luxorasap/utils/storage/blob.py,sha256=lJPN5VoTVheijUVp1zhSY51GnqapiYYDkypFTVtAb10,3086
|
|
12
|
+
luxorasap-0.1.0.dist-info/METADATA,sha256=UD40ZCS8E2-jhopS3gTyy6jOaEOykQ71PnncCrVCjew,3018
|
|
13
|
+
luxorasap-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
14
|
+
luxorasap-0.1.0.dist-info/entry_points.txt,sha256=XFh-dOwUhlya9DmGvgookMI0ezyUJjcOvTIHDEYS44g,52
|
|
15
|
+
luxorasap-0.1.0.dist-info/top_level.txt,sha256=9YOL6bUIpzY06XFBRkUW1e4rgB32Ds91fQPGwUEjxzU,10
|
|
16
|
+
luxorasap-0.1.0.dist-info/RECORD,,
|
luxorasap-0.0.2.dist-info/RECORD
DELETED
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
luxorasap/__init__.py,sha256=UTn522ZJQ17wVxA8touIplzT0y21tzJL3XmsqAbq7XY,1355
|
|
2
|
-
luxorasap/datareader/__init__.py,sha256=41RAvbrQ4R6oj67S32CrKqolx0CJ2W8cbOF6g5Cqm2g,120
|
|
3
|
-
luxorasap/datareader/core.py,sha256=VURibG9qly5Q1Fu6rrgvXypERb39S535wKd-xc8g0uU,156887
|
|
4
|
-
luxorasap/ingest/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
|
-
luxorasap/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
-
luxorasap-0.0.2.dist-info/METADATA,sha256=SrdjdL1Ce62fym3jfpsTYxI66NCuaRx_21GdwJwnQb0,2633
|
|
7
|
-
luxorasap-0.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
8
|
-
luxorasap-0.0.2.dist-info/entry_points.txt,sha256=XFh-dOwUhlya9DmGvgookMI0ezyUJjcOvTIHDEYS44g,52
|
|
9
|
-
luxorasap-0.0.2.dist-info/top_level.txt,sha256=9YOL6bUIpzY06XFBRkUW1e4rgB32Ds91fQPGwUEjxzU,10
|
|
10
|
-
luxorasap-0.0.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|