pyEasyMatrixDb 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyeasymatrixdb/DbDriver.py +35 -0
- pyeasymatrixdb/__init__.py +3 -0
- pyeasymatrixdb/subclasses/DbDriverCore.py +53 -0
- pyeasymatrixdb/subclasses/DbDriverSearch.py +61 -0
- pyeasymatrixdb/subclasses/DbDriverUpdate.py +130 -0
- pyeasymatrixdb/subclasses/DbDriverUtils.py +417 -0
- pyeasymatrixdb/subclasses/__init__.py +11 -0
- pyeasymatrixdb-0.1.0.dist-info/METADATA +248 -0
- pyeasymatrixdb-0.1.0.dist-info/RECORD +11 -0
- pyeasymatrixdb-0.1.0.dist-info/WHEEL +5 -0
- pyeasymatrixdb-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from sqlalchemy import text, Engine, MetaData
|
|
4
|
+
|
|
5
|
+
from .subclasses.DbDriverSearch import DbDriverSearch
|
|
6
|
+
from .subclasses.DbDriverUpdate import DbDriverUpdate
|
|
7
|
+
from .subclasses.DbDriverUtils import DbDriverUtils
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class DbDriver:
|
|
11
|
+
def __init__(self, metadata: MetaData, engine: Engine):
|
|
12
|
+
self._metadata = metadata
|
|
13
|
+
self._engine = engine
|
|
14
|
+
self.Pesquisar = DbDriverSearch(metadata, engine)
|
|
15
|
+
self.Atualizar = DbDriverUpdate(metadata, engine)
|
|
16
|
+
|
|
17
|
+
def execute(self, query: str, dialect: str = ""):
|
|
18
|
+
# Mantido o parâmetro dialect para compatibilidade com o descritivo.
|
|
19
|
+
del dialect
|
|
20
|
+
with self._engine.begin() as conn:
|
|
21
|
+
result = conn.execute(text(query))
|
|
22
|
+
if result.returns_rows:
|
|
23
|
+
columns = list(result.keys())
|
|
24
|
+
records = [list(row) for row in result]
|
|
25
|
+
return DbDriverUtils.to_matrix_from_records(columns, records)
|
|
26
|
+
return DbDriverUtils.to_meta_matrix(result.rowcount)
|
|
27
|
+
|
|
28
|
+
def execute_stmt(self, stmt):
|
|
29
|
+
with self._engine.begin() as conn:
|
|
30
|
+
result = conn.execute(stmt)
|
|
31
|
+
if result.returns_rows:
|
|
32
|
+
columns = list(result.keys())
|
|
33
|
+
records = [list(row) for row in result]
|
|
34
|
+
return DbDriverUtils.to_matrix_from_records(columns, records)
|
|
35
|
+
return DbDriverUtils.to_meta_matrix(result.rowcount)
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, List
|
|
4
|
+
|
|
5
|
+
from .DbDriverUtils import DbDriverUtils
|
|
6
|
+
|
|
7
|
+
from sqlalchemy import Engine, MetaData
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class DbDriverCore:
|
|
11
|
+
def __init__(self, metadata:MetaData, engine:Engine):
|
|
12
|
+
self._metadata = metadata
|
|
13
|
+
self._engine = engine
|
|
14
|
+
self._columns_definitions = DbDriverUtils.get_columns_definitions(metadata)
|
|
15
|
+
self._primary_keys = DbDriverUtils.get_primary_keys(metadata)
|
|
16
|
+
|
|
17
|
+
def reset(self):
|
|
18
|
+
for attr in list(vars(self).keys()):
|
|
19
|
+
if attr.startswith("_"):
|
|
20
|
+
continue
|
|
21
|
+
delattr(self, attr)
|
|
22
|
+
|
|
23
|
+
def define_filter(self, filter: List[List[Any]]):
|
|
24
|
+
positions, valid_filter = DbDriverUtils.get_valid_columns(self._columns_definitions, filter)
|
|
25
|
+
self.filter_positions = positions
|
|
26
|
+
self.filter = valid_filter
|
|
27
|
+
return self
|
|
28
|
+
|
|
29
|
+
def define_relationships(self, relationships: List[List[Any]]):
|
|
30
|
+
if relationships is None:
|
|
31
|
+
self.relationships = []
|
|
32
|
+
return self
|
|
33
|
+
|
|
34
|
+
valid_relationships = []
|
|
35
|
+
for rel in relationships:
|
|
36
|
+
if len(rel) < 4:
|
|
37
|
+
raise ValueError("Relacionamento inválido. Esperado ao menos 4 colunas.")
|
|
38
|
+
|
|
39
|
+
table_a, table_b, col_a, col_b = rel[0], rel[1], rel[2], rel[3]
|
|
40
|
+
|
|
41
|
+
if not DbDriverUtils.is_valid_table(self._columns_definitions, table_a):
|
|
42
|
+
raise ValueError(f"Tabela inválida em relacionamento: {table_a}")
|
|
43
|
+
if not DbDriverUtils.is_valid_table(self._columns_definitions, table_b):
|
|
44
|
+
raise ValueError(f"Tabela inválida em relacionamento: {table_b}")
|
|
45
|
+
if col_a not in self._columns_definitions[table_a]:
|
|
46
|
+
raise ValueError(f"Coluna inválida em relacionamento: {table_a}.{col_a}")
|
|
47
|
+
if col_b not in self._columns_definitions[table_b]:
|
|
48
|
+
raise ValueError(f"Coluna inválida em relacionamento: {table_b}.{col_b}")
|
|
49
|
+
|
|
50
|
+
valid_relationships.append(rel)
|
|
51
|
+
|
|
52
|
+
self.relationships = valid_relationships
|
|
53
|
+
return self
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, List
|
|
4
|
+
|
|
5
|
+
from .DbDriverCore import DbDriverCore
|
|
6
|
+
from .DbDriverUtils import DbDriverUtils
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class DbDriverSearch(DbDriverCore):
|
|
10
|
+
def reset(self):
|
|
11
|
+
super().reset()
|
|
12
|
+
for attr in ("table", "header", "filter"):
|
|
13
|
+
if hasattr(self, attr):
|
|
14
|
+
delattr(self, attr)
|
|
15
|
+
|
|
16
|
+
def define_header(self, header: List[List[Any]]):
|
|
17
|
+
positions, valid_header = DbDriverUtils.get_valid_columns(self._columns_definitions, header)
|
|
18
|
+
self.header_positions = positions
|
|
19
|
+
self.header = valid_header
|
|
20
|
+
self.table = valid_header[0][0] if valid_header and valid_header[0] else None
|
|
21
|
+
return self
|
|
22
|
+
|
|
23
|
+
def search(self, reset: bool = True, complete: bool = False, default: Any = None):
|
|
24
|
+
if not hasattr(self, "header"):
|
|
25
|
+
raise ValueError("Header não definido. Use define_header antes de pesquisar.")
|
|
26
|
+
|
|
27
|
+
relationships = getattr(self, "relationships", [])
|
|
28
|
+
filters = getattr(self, "filter", [])
|
|
29
|
+
|
|
30
|
+
stmt = DbDriverUtils.buid_select(
|
|
31
|
+
self._columns_definitions,
|
|
32
|
+
self.header,
|
|
33
|
+
relationships=relationships,
|
|
34
|
+
filters=filters,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
with self._engine.connect() as conn:
|
|
38
|
+
result = conn.execute(stmt)
|
|
39
|
+
records = [list(row) for row in result]
|
|
40
|
+
|
|
41
|
+
if complete:
|
|
42
|
+
target_tables, target_headers, source_index_map = DbDriverUtils.expand_structure(
|
|
43
|
+
self._columns_definitions,
|
|
44
|
+
self.header,
|
|
45
|
+
include_md=False,
|
|
46
|
+
)
|
|
47
|
+
base_matrix = [self.header[0], self.header[1], *records]
|
|
48
|
+
output = DbDriverUtils.project_matrix(
|
|
49
|
+
base_matrix,
|
|
50
|
+
target_tables,
|
|
51
|
+
target_headers,
|
|
52
|
+
source_index_map,
|
|
53
|
+
default=default,
|
|
54
|
+
)
|
|
55
|
+
else:
|
|
56
|
+
output = [self.header[0], self.header[1], *records]
|
|
57
|
+
|
|
58
|
+
if reset:
|
|
59
|
+
self.reset()
|
|
60
|
+
|
|
61
|
+
return output
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, List
|
|
4
|
+
|
|
5
|
+
from sqlalchemy import delete, insert, select, update
|
|
6
|
+
|
|
7
|
+
from .DbDriverCore import DbDriverCore
|
|
8
|
+
from .DbDriverUtils import DbDriverUtils
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class DbDriverUpdate(DbDriverCore):
|
|
12
|
+
def define_data(self, data: List[List[Any]]):
|
|
13
|
+
positions, valid_data = DbDriverUtils.get_valid_columns(self._columns_definitions, data)
|
|
14
|
+
|
|
15
|
+
# A coluna MD pode não estar em colunas_definitions, então tratamos manualmente.
|
|
16
|
+
if "MD" in data[1]:
|
|
17
|
+
md_idx_original = data[1].index("MD")
|
|
18
|
+
if md_idx_original not in positions:
|
|
19
|
+
positions.append(md_idx_original)
|
|
20
|
+
positions = sorted(positions)
|
|
21
|
+
valid_data = [[row[i] if i < len(row) else None for i in positions] for row in data]
|
|
22
|
+
|
|
23
|
+
self.data_positions = positions
|
|
24
|
+
self.data = valid_data
|
|
25
|
+
return self
|
|
26
|
+
|
|
27
|
+
def update(self, reset: bool = True, complete: bool = False, default: Any = None):
|
|
28
|
+
if not hasattr(self, "data"):
|
|
29
|
+
raise ValueError("Data não definida. Use define_data antes de atualizar.")
|
|
30
|
+
|
|
31
|
+
data = self.data
|
|
32
|
+
if len(data) < 3:
|
|
33
|
+
raise ValueError("Data inválida. É necessário ao menos uma linha de dados.")
|
|
34
|
+
if "MD" not in data[1]:
|
|
35
|
+
raise ValueError('A última coluna deve se chamar "MD".')
|
|
36
|
+
|
|
37
|
+
md_idx = data[1].index("MD")
|
|
38
|
+
table_name = data[0][0]
|
|
39
|
+
table_obj = self._columns_definitions[table_name][data[1][0]]["table_obj"]
|
|
40
|
+
pk_col = self._primary_keys.get(table_name)
|
|
41
|
+
pk_idx = data[1].index(pk_col) if pk_col in data[1] else None
|
|
42
|
+
extra_filter = DbDriverUtils._build_filters(self._columns_definitions, getattr(self, "filter", []))
|
|
43
|
+
|
|
44
|
+
with self._engine.begin() as conn:
|
|
45
|
+
for row in data[2:]:
|
|
46
|
+
if md_idx >= len(row):
|
|
47
|
+
continue
|
|
48
|
+
|
|
49
|
+
marker = row[md_idx]
|
|
50
|
+
if marker not in ("U", "A", "D"):
|
|
51
|
+
raise ValueError(f"Valor inválido na coluna MD: {marker}")
|
|
52
|
+
|
|
53
|
+
values = {}
|
|
54
|
+
for idx, col_name in enumerate(data[1]):
|
|
55
|
+
if col_name == "MD":
|
|
56
|
+
continue
|
|
57
|
+
if idx < len(row):
|
|
58
|
+
values[col_name] = row[idx]
|
|
59
|
+
|
|
60
|
+
pk_value = None
|
|
61
|
+
if pk_idx is not None and pk_idx < len(row):
|
|
62
|
+
pk_value = row[pk_idx]
|
|
63
|
+
|
|
64
|
+
if marker == "D":
|
|
65
|
+
if not pk_col or pk_idx is None or pk_value is None:
|
|
66
|
+
raise ValueError("Remoção exige chave primária presente no data.")
|
|
67
|
+
|
|
68
|
+
stmt = delete(table_obj).where(table_obj.c[pk_col] == pk_value)
|
|
69
|
+
if extra_filter is not None:
|
|
70
|
+
stmt = stmt.where(extra_filter)
|
|
71
|
+
conn.execute(stmt)
|
|
72
|
+
continue
|
|
73
|
+
|
|
74
|
+
# U/A: tenta update se houver PK, caso contrário faz insert.
|
|
75
|
+
if pk_col and pk_idx is not None and pk_value is not None:
|
|
76
|
+
update_values = {k: v for k, v in values.items() if k != pk_col}
|
|
77
|
+
if update_values:
|
|
78
|
+
stmt = update(table_obj).where(table_obj.c[pk_col] == pk_value).values(**update_values)
|
|
79
|
+
if extra_filter is not None:
|
|
80
|
+
stmt = stmt.where(extra_filter)
|
|
81
|
+
updated = conn.execute(stmt).rowcount or 0
|
|
82
|
+
if updated > 0:
|
|
83
|
+
continue
|
|
84
|
+
|
|
85
|
+
# Se o registro já existe pela PK, não deve cair em insert por causa de filtro extra.
|
|
86
|
+
exists_stmt = select(table_obj.c[pk_col]).where(table_obj.c[pk_col] == pk_value).limit(1)
|
|
87
|
+
exists = conn.execute(exists_stmt).first() is not None
|
|
88
|
+
if exists:
|
|
89
|
+
continue
|
|
90
|
+
|
|
91
|
+
insert_values = {k: v for k, v in values.items() if k != "MD"}
|
|
92
|
+
|
|
93
|
+
missing_required = []
|
|
94
|
+
for col_name, col_info in self._columns_definitions[table_name].items():
|
|
95
|
+
if col_name in insert_values:
|
|
96
|
+
continue
|
|
97
|
+
if col_info["primary"]:
|
|
98
|
+
continue
|
|
99
|
+
has_default = col_info["default"] is not None
|
|
100
|
+
if not col_info["nullable"] and not has_default:
|
|
101
|
+
missing_required.append(col_name)
|
|
102
|
+
|
|
103
|
+
if missing_required:
|
|
104
|
+
raise ValueError(
|
|
105
|
+
"Insert inválido: faltam colunas obrigatórias sem default: "
|
|
106
|
+
+ ", ".join(missing_required)
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
conn.execute(insert(table_obj).values(**insert_values))
|
|
110
|
+
|
|
111
|
+
if complete:
|
|
112
|
+
target_tables, target_headers, source_index_map = DbDriverUtils.expand_structure(
|
|
113
|
+
self._columns_definitions,
|
|
114
|
+
data,
|
|
115
|
+
include_md=True,
|
|
116
|
+
)
|
|
117
|
+
output = DbDriverUtils.project_matrix(
|
|
118
|
+
data,
|
|
119
|
+
target_tables,
|
|
120
|
+
target_headers,
|
|
121
|
+
source_index_map,
|
|
122
|
+
default=default,
|
|
123
|
+
)
|
|
124
|
+
else:
|
|
125
|
+
output = data
|
|
126
|
+
|
|
127
|
+
if reset:
|
|
128
|
+
self.reset()
|
|
129
|
+
|
|
130
|
+
return output
|
|
@@ -0,0 +1,417 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List, Tuple
|
|
4
|
+
|
|
5
|
+
from sqlalchemy import MetaData, and_, delete, insert, or_, select, update
|
|
6
|
+
|
|
7
|
+
class DbDriverUtils:
|
|
8
|
+
def __new__(cls, *args, **kwargs):
|
|
9
|
+
raise TypeError("This class cannot be instantiated")
|
|
10
|
+
|
|
11
|
+
@staticmethod
|
|
12
|
+
def get_columns_definitions(metadata: MetaData) -> Dict[str, Dict[str, Dict[str, Any]]]:
|
|
13
|
+
columns_definitions: Dict[str, Dict[str, Dict[str, Any]]] = {}
|
|
14
|
+
|
|
15
|
+
for table_name, table_obj in metadata.tables.items():
|
|
16
|
+
columns_definitions[table_name] = {}
|
|
17
|
+
for col in table_obj.columns:
|
|
18
|
+
default_value = None
|
|
19
|
+
if col.default is not None:
|
|
20
|
+
default_value = getattr(col.default, "arg", col.default)
|
|
21
|
+
|
|
22
|
+
columns_definitions[table_name][col.name] = {
|
|
23
|
+
"type": col.type,
|
|
24
|
+
"primary": bool(col.primary_key),
|
|
25
|
+
"unique": bool(col.unique),
|
|
26
|
+
"default": default_value,
|
|
27
|
+
"nullable": bool(col.nullable),
|
|
28
|
+
"table_obj": table_obj,
|
|
29
|
+
"column_obj": col,
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
return columns_definitions
|
|
33
|
+
|
|
34
|
+
@staticmethod
|
|
35
|
+
def get_primary_keys(metadata: MetaData) -> Dict[str, Any]:
|
|
36
|
+
primary_keys: Dict[str, Any] = {}
|
|
37
|
+
for table_name, table_obj in metadata.tables.items():
|
|
38
|
+
pks = [col.name for col in table_obj.primary_key.columns]
|
|
39
|
+
primary_keys[table_name] = pks[0] if pks else None
|
|
40
|
+
return primary_keys
|
|
41
|
+
|
|
42
|
+
@staticmethod
|
|
43
|
+
def get_valid_columns(
|
|
44
|
+
columns_definitions: Dict[str, Dict[str, Dict[str, Any]]],
|
|
45
|
+
matrix: List[List[Any]],
|
|
46
|
+
) -> Tuple[List[int], List[List[Any]]]:
|
|
47
|
+
#valida se a matriz tem ao menos 2 linhas
|
|
48
|
+
if not matrix or len(matrix) < 2:
|
|
49
|
+
raise ValueError("Matriz inválida. É necessário ter ao menos 2 linhas.")
|
|
50
|
+
|
|
51
|
+
#separa a linha de tabelas e a linha de colunas
|
|
52
|
+
table_row = matrix[0]
|
|
53
|
+
header_row = matrix[1]
|
|
54
|
+
|
|
55
|
+
#erro caso o número de colunas seja diferente entre a linha de tabelas e a linha de colunas
|
|
56
|
+
if len(table_row) != len(header_row):
|
|
57
|
+
raise ValueError("Matriz inválida. O número de colunas na linha de tabelas e na linha de colunas deve ser o mesmo.")
|
|
58
|
+
|
|
59
|
+
max_cols = len(header_row)
|
|
60
|
+
|
|
61
|
+
valid_positions: List[int] = []
|
|
62
|
+
for idx in range(max_cols):
|
|
63
|
+
table_name = table_row[idx]
|
|
64
|
+
column_name = header_row[idx]
|
|
65
|
+
|
|
66
|
+
if (
|
|
67
|
+
table_name in columns_definitions
|
|
68
|
+
and column_name in columns_definitions[table_name]
|
|
69
|
+
):
|
|
70
|
+
valid_positions.append(idx)
|
|
71
|
+
|
|
72
|
+
if not valid_positions:
|
|
73
|
+
raise ValueError("Nenhuma coluna válida foi encontrada na matriz.")
|
|
74
|
+
|
|
75
|
+
#monta a matriz com as colunas válidas
|
|
76
|
+
valid_matrix = [[row[i] for i in valid_positions] for row in matrix]
|
|
77
|
+
return valid_positions, valid_matrix
|
|
78
|
+
|
|
79
|
+
@staticmethod
|
|
80
|
+
def is_valid_table(columns_definitions: Dict[str, Dict[str, Any]], table: str) -> bool:
|
|
81
|
+
return table in columns_definitions
|
|
82
|
+
|
|
83
|
+
@staticmethod
|
|
84
|
+
def buid_select(
|
|
85
|
+
columns_definitions: Dict[str, Dict[str, Dict[str, Any]]],
|
|
86
|
+
headers: List[List[Any]],
|
|
87
|
+
relationships: List[List[Any]] = [],
|
|
88
|
+
filters: List[List[Any]] = [],
|
|
89
|
+
):
|
|
90
|
+
if not headers or len(headers) < 2:
|
|
91
|
+
raise ValueError("Header inválido para select.")
|
|
92
|
+
|
|
93
|
+
columns = []
|
|
94
|
+
for idx, column_name in enumerate(headers[1]):
|
|
95
|
+
table_name = headers[0][idx]
|
|
96
|
+
table_obj = columns_definitions[table_name][column_name]["table_obj"]
|
|
97
|
+
columns.append(table_obj.c[column_name])
|
|
98
|
+
|
|
99
|
+
base_table = columns_definitions[headers[0][0]][headers[1][0]]["table_obj"]
|
|
100
|
+
from_clause = base_table
|
|
101
|
+
|
|
102
|
+
for rel in reversed(relationships or []):
|
|
103
|
+
if len(rel) < 4:
|
|
104
|
+
continue
|
|
105
|
+
|
|
106
|
+
table_a, table_b, col_a, col_b = rel[0], rel[1], rel[2], rel[3]
|
|
107
|
+
inner = True if len(rel) < 5 else bool(rel[4])
|
|
108
|
+
|
|
109
|
+
table_a_obj = columns_definitions[table_a][col_a]["table_obj"]
|
|
110
|
+
table_b_obj = columns_definitions[table_b][col_b]["table_obj"]
|
|
111
|
+
condition = table_a_obj.c[col_a] == table_b_obj.c[col_b]
|
|
112
|
+
|
|
113
|
+
if inner:
|
|
114
|
+
from_clause = from_clause.join(table_a_obj, condition)
|
|
115
|
+
else:
|
|
116
|
+
from_clause = from_clause.outerjoin(table_a_obj, condition)
|
|
117
|
+
|
|
118
|
+
stmt = select(*columns).select_from(from_clause)
|
|
119
|
+
filter_condition = DbDriverUtils._build_filters(columns_definitions, filters)
|
|
120
|
+
if filter_condition is not None:
|
|
121
|
+
stmt = stmt.where(filter_condition)
|
|
122
|
+
|
|
123
|
+
return stmt
|
|
124
|
+
|
|
125
|
+
@staticmethod
|
|
126
|
+
def buid_update(
|
|
127
|
+
columns_definitions: Dict[str, Dict[str, Dict[str, Any]]],
|
|
128
|
+
data: List[List[Any]],
|
|
129
|
+
relationships: List[List[Any]] = [],
|
|
130
|
+
filters: List[List[Any]] = [],
|
|
131
|
+
):
|
|
132
|
+
#valida se a matriz tem 3 linhas ou mais
|
|
133
|
+
if not data or len(data) < 3:
|
|
134
|
+
raise ValueError("Dados inválidos para update.")
|
|
135
|
+
|
|
136
|
+
table_name = data[0][0]
|
|
137
|
+
#valida se toda a primeira linha tem o mesmo nome de tabela
|
|
138
|
+
if any(t != table_name for t in data[0]):
|
|
139
|
+
raise ValueError("Dados inválidos para update. A primeira linha deve conter o mesmo nome de tabela.")
|
|
140
|
+
|
|
141
|
+
#coleta a tabela e o índice da coluna MD (se existir)
|
|
142
|
+
table_obj = columns_definitions[table_name][data[1][0]]["table_obj"]
|
|
143
|
+
md_idx = len(data[1])-1
|
|
144
|
+
if data[1][md_idx] != "MD":
|
|
145
|
+
raise ValueError("A coluna MD é obrigatória.")
|
|
146
|
+
|
|
147
|
+
pk_col = None
|
|
148
|
+
for col_name, info in columns_definitions[table_name].items():
|
|
149
|
+
if info["primary"]:
|
|
150
|
+
pk_col = col_name
|
|
151
|
+
break
|
|
152
|
+
|
|
153
|
+
#testa se a pk está presente no header
|
|
154
|
+
pk_idx = None
|
|
155
|
+
if pk_col:
|
|
156
|
+
if pk_col in data[1]:
|
|
157
|
+
pk_idx = data[1].index(pk_col)
|
|
158
|
+
|
|
159
|
+
#valida as possibilidades filter sem pk (com ou sem relationship), ou apenas pk sem filter/relationship
|
|
160
|
+
if pk_idx is None and not filters and not relationships:
|
|
161
|
+
raise ValueError("Dados inválidos para update. É necessário ter uma chave primária ou um filtro/relacionamento para identificar as linhas a serem atualizadas.")
|
|
162
|
+
if pk_idx is None:
|
|
163
|
+
if not filter:
|
|
164
|
+
raise ValueError("Dados inválidos para update. Filtro obrigatório quando não há chave primária.")
|
|
165
|
+
else:
|
|
166
|
+
if filters or relationships:
|
|
167
|
+
raise ValueError("Dados inválidos para update. Não é permitido usar filtros ou relacionamentos quando a chave primária está presente.")
|
|
168
|
+
|
|
169
|
+
#monta as regras
|
|
170
|
+
stmts = []
|
|
171
|
+
for row in data[2:]:
|
|
172
|
+
#verifica se precisa fazer algo pela coluna MD
|
|
173
|
+
marker = row[md_idx]
|
|
174
|
+
if marker not in ("U", "A"):
|
|
175
|
+
continue
|
|
176
|
+
|
|
177
|
+
#inicia a montagem do update, ignorando colunas MD e PK
|
|
178
|
+
values = {}
|
|
179
|
+
for idx, col_name in enumerate(data[1]):
|
|
180
|
+
if col_name in ("MD", pk_col):
|
|
181
|
+
continue
|
|
182
|
+
if idx < len(row) and row[idx] is not None:
|
|
183
|
+
values[col_name] = row[idx]
|
|
184
|
+
|
|
185
|
+
if values:
|
|
186
|
+
stmt = update(table_obj)
|
|
187
|
+
if pk_idx is not None:
|
|
188
|
+
stmt = stmt.where(table_obj.c[pk_col] == row[pk_idx]).values(**values)
|
|
189
|
+
else:
|
|
190
|
+
if len(values) != 1:
|
|
191
|
+
raise ValueError("Dados inválidos para update. Quando não há chave primária, deve haver exatamente uma linha com atualizações.")
|
|
192
|
+
extra_filter = DbDriverUtils._build_filters(columns_definitions, filters)
|
|
193
|
+
if not extra_filter:
|
|
194
|
+
raise ValueError("Dados inválidos para update. Filtro inválido ou sem correspondência.")
|
|
195
|
+
relationships_filter = None
|
|
196
|
+
if relationships:
|
|
197
|
+
for rel in reversed(relationships):
|
|
198
|
+
if len(rel) < 4:
|
|
199
|
+
continue
|
|
200
|
+
|
|
201
|
+
table_a, table_b, col_a, col_b = rel[0], rel[1], rel[2], rel[3]
|
|
202
|
+
table_a_obj = columns_definitions[table_a][col_a]["table_obj"]
|
|
203
|
+
table_b_obj = columns_definitions[table_b][col_b]["table_obj"]
|
|
204
|
+
condition = table_a_obj.c[col_a] == table_b_obj.c[col_b]
|
|
205
|
+
|
|
206
|
+
if relationships_filter is None:
|
|
207
|
+
relationships_filter = condition
|
|
208
|
+
else:
|
|
209
|
+
relationships_filter = and_(relationships_filter, condition)
|
|
210
|
+
if relationships_filter is not None:
|
|
211
|
+
stmt = stmt.where(relationships_filter)
|
|
212
|
+
stmt = stmt.where(extra_filter).values(**values)
|
|
213
|
+
stmts.append(stmt)
|
|
214
|
+
|
|
215
|
+
return stmts
|
|
216
|
+
|
|
217
|
+
@staticmethod
|
|
218
|
+
def buid_insert(
|
|
219
|
+
columns_definitions: Dict[str, Dict[str, Dict[str, Any]]],
|
|
220
|
+
data: List[List[Any]],
|
|
221
|
+
):
|
|
222
|
+
if not data or len(data) < 3:
|
|
223
|
+
raise ValueError("Dados inválidos para insert.")
|
|
224
|
+
|
|
225
|
+
table_name = data[0][0]
|
|
226
|
+
table_obj = columns_definitions[table_name][data[1][0]]["table_obj"]
|
|
227
|
+
md_idx = len(data[1])-1
|
|
228
|
+
|
|
229
|
+
rows = []
|
|
230
|
+
for row in data[2:]:
|
|
231
|
+
if md_idx != -1 and md_idx < len(row) and row[md_idx] in ("D",):
|
|
232
|
+
continue
|
|
233
|
+
|
|
234
|
+
values = {}
|
|
235
|
+
for idx, col_name in enumerate(data[1]):
|
|
236
|
+
if col_name == "MD":
|
|
237
|
+
continue
|
|
238
|
+
if idx < len(row):
|
|
239
|
+
values[col_name] = row[idx]
|
|
240
|
+
rows.append(values)
|
|
241
|
+
|
|
242
|
+
if not rows:
|
|
243
|
+
return None
|
|
244
|
+
|
|
245
|
+
return insert(table_obj).values(rows)
|
|
246
|
+
|
|
247
|
+
@staticmethod
|
|
248
|
+
def buid_delete(
|
|
249
|
+
columns_definitions: Dict[str, Dict[str, Dict[str, Any]]],
|
|
250
|
+
keys: List[List[Any]] = [],
|
|
251
|
+
relationships: List[List[Any]] = [],
|
|
252
|
+
filters: List[List[Any]] = [],
|
|
253
|
+
):
|
|
254
|
+
if not keys or len(keys) < 3:
|
|
255
|
+
raise ValueError("Dados inválidos para delete.")
|
|
256
|
+
|
|
257
|
+
table_name = keys[0][0]
|
|
258
|
+
table_obj = columns_definitions[table_name][keys[1][0]]["table_obj"]
|
|
259
|
+
stmts = []
|
|
260
|
+
|
|
261
|
+
for row in keys[2:]:
|
|
262
|
+
row_filters = [keys[0], keys[1], row]
|
|
263
|
+
cond = DbDriverUtils._build_filters(columns_definitions, row_filters)
|
|
264
|
+
if cond is None:
|
|
265
|
+
continue
|
|
266
|
+
|
|
267
|
+
stmt = delete(table_obj).where(cond)
|
|
268
|
+
extra_filter = DbDriverUtils._build_filters(columns_definitions, filters)
|
|
269
|
+
if extra_filter is not None:
|
|
270
|
+
stmt = stmt.where(extra_filter)
|
|
271
|
+
stmts.append(stmt)
|
|
272
|
+
|
|
273
|
+
return stmts
|
|
274
|
+
|
|
275
|
+
@staticmethod
|
|
276
|
+
def _build_filters(
|
|
277
|
+
columns_definitions: Dict[str, Dict[str, Dict[str, Any]]],
|
|
278
|
+
filters: List[List[Any]],
|
|
279
|
+
):
|
|
280
|
+
if not filters or len(filters) < 3:
|
|
281
|
+
return None
|
|
282
|
+
|
|
283
|
+
row_conditions = []
|
|
284
|
+
for row in filters[2:]:
|
|
285
|
+
col_conditions = []
|
|
286
|
+
for idx, raw_value in enumerate(row):
|
|
287
|
+
if idx >= len(filters[0]) or idx >= len(filters[1]):
|
|
288
|
+
continue
|
|
289
|
+
if raw_value is None:
|
|
290
|
+
continue
|
|
291
|
+
|
|
292
|
+
table_name = filters[0][idx]
|
|
293
|
+
col_name = filters[1][idx]
|
|
294
|
+
column = columns_definitions[table_name][col_name]["column_obj"]
|
|
295
|
+
value = DbDriverUtils._valid_info(str(column.type), raw_value)
|
|
296
|
+
|
|
297
|
+
if isinstance(raw_value, tuple) and len(raw_value) == 2:
|
|
298
|
+
op, val = raw_value
|
|
299
|
+
val = DbDriverUtils._valid_info(str(column.type), val)
|
|
300
|
+
if op == "!=":
|
|
301
|
+
col_conditions.append(column != val)
|
|
302
|
+
elif op == ">":
|
|
303
|
+
col_conditions.append(column > val)
|
|
304
|
+
elif op == ">=":
|
|
305
|
+
col_conditions.append(column >= val)
|
|
306
|
+
elif op == "<":
|
|
307
|
+
col_conditions.append(column < val)
|
|
308
|
+
elif op == "<=":
|
|
309
|
+
col_conditions.append(column <= val)
|
|
310
|
+
elif op == "like":
|
|
311
|
+
col_conditions.append(column.like(val))
|
|
312
|
+
else:
|
|
313
|
+
col_conditions.append(column == val)
|
|
314
|
+
else:
|
|
315
|
+
col_conditions.append(column == value)
|
|
316
|
+
|
|
317
|
+
if col_conditions:
|
|
318
|
+
row_conditions.append(and_(*col_conditions))
|
|
319
|
+
|
|
320
|
+
if not row_conditions:
|
|
321
|
+
return None
|
|
322
|
+
return or_(*row_conditions)
|
|
323
|
+
|
|
324
|
+
@staticmethod
|
|
325
|
+
def _valid_info(type: str, info: Any) -> Any:
|
|
326
|
+
if info is None:
|
|
327
|
+
return None
|
|
328
|
+
|
|
329
|
+
type_upper = type.upper()
|
|
330
|
+
try:
|
|
331
|
+
if "INT" in type_upper:
|
|
332
|
+
return int(info)
|
|
333
|
+
if "FLOAT" in type_upper or "NUMERIC" in type_upper or "DECIMAL" in type_upper:
|
|
334
|
+
return float(info)
|
|
335
|
+
if "BOOL" in type_upper:
|
|
336
|
+
if isinstance(info, bool):
|
|
337
|
+
return info
|
|
338
|
+
return str(info).strip().lower() in ("1", "true", "t", "y", "yes")
|
|
339
|
+
return info
|
|
340
|
+
except Exception:
|
|
341
|
+
return info
|
|
342
|
+
|
|
343
|
+
@staticmethod
|
|
344
|
+
def to_matrix_from_records(column_names: List[str], records: List[List[Any]]) -> List[List[Any]]:
|
|
345
|
+
if not column_names:
|
|
346
|
+
return []
|
|
347
|
+
table_row = ["__result__" for _ in column_names]
|
|
348
|
+
return [table_row, list(column_names), *records]
|
|
349
|
+
|
|
350
|
+
@staticmethod
|
|
351
|
+
def to_meta_matrix(rowcount: int) -> List[List[Any]]:
|
|
352
|
+
return [["__meta__"], ["rowcount"], [rowcount]]
|
|
353
|
+
|
|
354
|
+
@staticmethod
|
|
355
|
+
def expand_structure(
|
|
356
|
+
columns_definitions: Dict[str, Dict[str, Dict[str, Any]]],
|
|
357
|
+
matrix: List[List[Any]],
|
|
358
|
+
include_md: bool = False,
|
|
359
|
+
) -> Tuple[List[Any], List[Any], List[int]]:
|
|
360
|
+
if not matrix or len(matrix) < 2:
|
|
361
|
+
raise ValueError("Matriz inválida. É necessário ter ao menos 2 linhas.")
|
|
362
|
+
|
|
363
|
+
src_tables = list(matrix[0])
|
|
364
|
+
src_headers = list(matrix[1])
|
|
365
|
+
src_pairs = [(src_tables[i], src_headers[i]) for i in range(min(len(src_tables), len(src_headers)))]
|
|
366
|
+
|
|
367
|
+
has_md = "MD" in src_headers
|
|
368
|
+
md_idx = src_headers.index("MD") if has_md else -1
|
|
369
|
+
|
|
370
|
+
tables_in_order: List[str] = []
|
|
371
|
+
for t in src_tables:
|
|
372
|
+
if t not in tables_in_order and t in columns_definitions:
|
|
373
|
+
tables_in_order.append(t)
|
|
374
|
+
|
|
375
|
+
target_tables: List[Any] = []
|
|
376
|
+
target_headers: List[Any] = []
|
|
377
|
+
for t in tables_in_order:
|
|
378
|
+
for col_name in columns_definitions[t].keys():
|
|
379
|
+
target_tables.append(t)
|
|
380
|
+
target_headers.append(col_name)
|
|
381
|
+
|
|
382
|
+
if include_md and has_md:
|
|
383
|
+
target_tables.append(src_tables[md_idx])
|
|
384
|
+
target_headers.append("MD")
|
|
385
|
+
|
|
386
|
+
source_index_map: List[int] = []
|
|
387
|
+
for i in range(len(target_headers)):
|
|
388
|
+
target_pair = (target_tables[i], target_headers[i])
|
|
389
|
+
idx = next((j for j, pair in enumerate(src_pairs) if pair == target_pair), -1)
|
|
390
|
+
source_index_map.append(idx)
|
|
391
|
+
|
|
392
|
+
return target_tables, target_headers, source_index_map
|
|
393
|
+
|
|
394
|
+
@staticmethod
|
|
395
|
+
def project_matrix(
|
|
396
|
+
matrix: List[List[Any]],
|
|
397
|
+
target_tables: List[Any],
|
|
398
|
+
target_headers: List[Any],
|
|
399
|
+
source_index_map: List[int],
|
|
400
|
+
default: Any = None,
|
|
401
|
+
) -> List[List[Any]]:
|
|
402
|
+
if not matrix or len(matrix) < 2:
|
|
403
|
+
raise ValueError("Matriz inválida. É necessário ter ao menos 2 linhas.")
|
|
404
|
+
|
|
405
|
+
out: List[List[Any]] = [target_tables, target_headers]
|
|
406
|
+
for src_row in matrix[2:]:
|
|
407
|
+
dst_row: List[Any] = []
|
|
408
|
+
for src_idx in source_index_map:
|
|
409
|
+
if src_idx == -1:
|
|
410
|
+
dst_row.append(default)
|
|
411
|
+
elif src_idx < len(src_row):
|
|
412
|
+
dst_row.append(src_row[src_idx])
|
|
413
|
+
else:
|
|
414
|
+
dst_row.append(default)
|
|
415
|
+
out.append(dst_row)
|
|
416
|
+
|
|
417
|
+
return out
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from .DbDriverCore import DbDriverCore
|
|
2
|
+
from .DbDriverSearch import DbDriverSearch
|
|
3
|
+
from .DbDriverUpdate import DbDriverUpdate
|
|
4
|
+
from .DbDriverUtils import DbDriverUtils
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"DbDriverCore",
|
|
8
|
+
"DbDriverSearch",
|
|
9
|
+
"DbDriverUpdate",
|
|
10
|
+
"DbDriverUtils",
|
|
11
|
+
]
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: pyEasyMatrixDb
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Easiest way to search and update a database using matrix like queries
|
|
5
|
+
Author-email: Ricardo Nazar Rodrigues <ricardo@toxt.com.br>
|
|
6
|
+
Requires-Python: >=3.10
|
|
7
|
+
Description-Content-Type: text/markdown
|
|
8
|
+
Requires-Dist: sqlalchemy>=2.0
|
|
9
|
+
|
|
10
|
+
# pyEasyDb
|
|
11
|
+
|
|
12
|
+
Biblioteca Python para consultar e modificar bancos de dados usando **matrizes bidimensionais** como interface.
|
|
13
|
+
Abstrai SELECT, INSERT, UPDATE e DELETE via SQLAlchemy Core — funciona com qualquer banco suportado (SQLite, PostgreSQL, MySQL, etc.).
|
|
14
|
+
|
|
15
|
+
## Instalação
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
pip install pyeasymatrixdb
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## GitHub
|
|
22
|
+
|
|
23
|
+
https://github.com/RicNazar/toxt-p10_dbdriver
|
|
24
|
+
|
|
25
|
+
## Início Rápido
|
|
26
|
+
|
|
27
|
+
```pythons
|
|
28
|
+
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, ForeignKey
|
|
29
|
+
|
|
30
|
+
# Cria uma engine em memória
|
|
31
|
+
engine = create_engine("sqlite+pysqlite:///:memory:")
|
|
32
|
+
metadata = MetaData()
|
|
33
|
+
|
|
34
|
+
users = Table("users", metadata,
|
|
35
|
+
Column("id", Integer, primary_key=True),
|
|
36
|
+
Column("name", String(100), nullable=False),
|
|
37
|
+
Column("email", String(150), nullable=False),
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
orders = Table("orders", metadata,
|
|
41
|
+
Column("id", Integer, primary_key=True),
|
|
42
|
+
Column("user_id", ForeignKey("users.id"), nullable=False),
|
|
43
|
+
Column("product", String(100), nullable=False),
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# Cria as tabelas caso não existam
|
|
47
|
+
metadata.create_all(engine)
|
|
48
|
+
|
|
49
|
+
from pyeasydb import DbDriver
|
|
50
|
+
db = DbDriver(metadata, engine)
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
## Funcionalidades
|
|
54
|
+
|
|
55
|
+
### 1. SQL puro — `execute()` / `execute_stmt()`
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
# SQL como texto
|
|
59
|
+
resultado = db.execute("SELECT id, name FROM users ORDER BY id")
|
|
60
|
+
# → [["__result__", "__result__"], ["id", "name"], [1, "Ana"], [2, "Bruno"]]
|
|
61
|
+
|
|
62
|
+
# Statement SQLAlchemy
|
|
63
|
+
from sqlalchemy import select
|
|
64
|
+
stmt = select(users.c.id, users.c.name).where(users.c.id >= 2)
|
|
65
|
+
resultado = db.execute_stmt(stmt)
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
Comandos sem retorno de linhas devolvem `[["__meta__"], ["rowcount"], [n]]`.
|
|
69
|
+
|
|
70
|
+
---
|
|
71
|
+
|
|
72
|
+
### 2. Pesquisa — `db.Pesquisar`
|
|
73
|
+
|
|
74
|
+
#### Pesquisa simples
|
|
75
|
+
|
|
76
|
+
```python
|
|
77
|
+
resultado = (
|
|
78
|
+
db.Pesquisar
|
|
79
|
+
.define_header([
|
|
80
|
+
["users", "users"],
|
|
81
|
+
["id", "name"],
|
|
82
|
+
])
|
|
83
|
+
.search()
|
|
84
|
+
)
|
|
85
|
+
# → [["users", "users"], ["id", "name"], [1, "Ana"], [2, "Bruno"], ...]
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
#### Pesquisa com JOIN + filtro
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
resultado = (
|
|
92
|
+
db.Pesquisar
|
|
93
|
+
.define_header([
|
|
94
|
+
["users", "orders", "orders"],
|
|
95
|
+
["name", "product", "status"],
|
|
96
|
+
])
|
|
97
|
+
.define_relationships([
|
|
98
|
+
["orders", "users", "user_id", "id", 1], # INNER JOIN
|
|
99
|
+
])
|
|
100
|
+
.define_filter([
|
|
101
|
+
["orders"],
|
|
102
|
+
["status"],
|
|
103
|
+
["OPEN"],
|
|
104
|
+
])
|
|
105
|
+
.search()
|
|
106
|
+
)
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
#### Filtro com OR (múltiplas linhas)
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
resultado = (
|
|
113
|
+
db.Pesquisar
|
|
114
|
+
.define_header([["users", "users"], ["id", "name"]])
|
|
115
|
+
.define_filter([
|
|
116
|
+
["users"],
|
|
117
|
+
["name"],
|
|
118
|
+
["Ana"], # OR
|
|
119
|
+
["Carla"],
|
|
120
|
+
])
|
|
121
|
+
.search()
|
|
122
|
+
)
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
#### Filtro com operadores
|
|
126
|
+
|
|
127
|
+
```python
|
|
128
|
+
.define_filter([
|
|
129
|
+
["users"],
|
|
130
|
+
["id"],
|
|
131
|
+
[(">=", 2)], # operadores: !=, >, >=, <, <=, like
|
|
132
|
+
])
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
#### Pesquisa completa (todas as colunas)
|
|
136
|
+
|
|
137
|
+
```python
|
|
138
|
+
resultado = db.Pesquisar.define_header(header).search(complete=True, default=None)
|
|
139
|
+
# Expande a saída para todas as colunas das tabelas envolvidas.
|
|
140
|
+
# Colunas ausentes são preenchidas com o valor de `default`.
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
---
|
|
144
|
+
|
|
145
|
+
### 3. Atualização — `db.Atualizar`
|
|
146
|
+
|
|
147
|
+
A coluna `MD` (última) controla a operação por linha:
|
|
148
|
+
|
|
149
|
+
- `"U"` / `"A"` → Upsert (UPDATE se PK existe ou exclusivo Filtro, INSERT caso contrário)
|
|
150
|
+
- `"D"` → DELETE (exige PK ou exclusifi Filtro)
|
|
151
|
+
|
|
152
|
+
#### Upsert (update + insert)
|
|
153
|
+
|
|
154
|
+
```python
|
|
155
|
+
resultado = (
|
|
156
|
+
db.Atualizar
|
|
157
|
+
.define_data([
|
|
158
|
+
["users", "users", "users", "users"],
|
|
159
|
+
["id", "name", "email", "MD" ],
|
|
160
|
+
[1, "Ana R.","ana@x.com", "U" ], # atualiza id=1
|
|
161
|
+
[5, "Novo", "novo@x.com","U" ], # insere id=5
|
|
162
|
+
])
|
|
163
|
+
.update()
|
|
164
|
+
)
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
#### Delete
|
|
168
|
+
|
|
169
|
+
```python
|
|
170
|
+
resultado = (
|
|
171
|
+
db.Atualizar
|
|
172
|
+
.define_data([
|
|
173
|
+
["orders", "orders", "orders"],
|
|
174
|
+
["id", "product", "MD" ],
|
|
175
|
+
[101, "Mouse", "D" ],
|
|
176
|
+
])
|
|
177
|
+
.update()
|
|
178
|
+
)
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
#### Atualização com filtro extra
|
|
182
|
+
|
|
183
|
+
```python
|
|
184
|
+
resultado = (
|
|
185
|
+
db.Atualizar
|
|
186
|
+
.define_data([
|
|
187
|
+
["orders", "orders", "orders", "orders"],
|
|
188
|
+
["id", "user_id","status", "MD" ],
|
|
189
|
+
[100, 1, "CLOSED", "U" ],
|
|
190
|
+
])
|
|
191
|
+
.define_filter([
|
|
192
|
+
["orders"],
|
|
193
|
+
["status"],
|
|
194
|
+
["OPEN"], # só atualiza se status = "OPEN"
|
|
195
|
+
])
|
|
196
|
+
.update()
|
|
197
|
+
)
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
#### Retorno completo
|
|
201
|
+
|
|
202
|
+
```python
|
|
203
|
+
resultado = db.Atualizar.define_data(data).update(complete=True, default="<vazio>")
|
|
204
|
+
# Expande a saída para todas as colunas das tabelas, preenchendo ausentes com o default.
|
|
205
|
+
```
|
|
206
|
+
|
|
207
|
+
---
|
|
208
|
+
|
|
209
|
+
### 4. Encadeamento (fluent API)
|
|
210
|
+
|
|
211
|
+
Todos os métodos `define_*` retornam `self`, permitindo encadeamento:
|
|
212
|
+
|
|
213
|
+
```python
|
|
214
|
+
db.Pesquisar.define_header(h).define_relationships(r).define_filter(f).search()
|
|
215
|
+
db.Atualizar.define_data(d).define_filter(f).update()
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
### 5. Reset automático
|
|
219
|
+
|
|
220
|
+
Por padrão, `search()` e `update()` executam `reset()` após a operação, limpando header/filter/data para a próxima chamada. Passe `reset=False` para manter o estado.
|
|
221
|
+
|
|
222
|
+
---
|
|
223
|
+
|
|
224
|
+
## Formato das Matrizes
|
|
225
|
+
|
|
226
|
+
| Modelo | Linhas | Descrição |
|
|
227
|
+
| ----------------- | ------ | --------------------------------------------------------------------- |
|
|
228
|
+
| **header** | 2 | `[tabelas, colunas]` — define colunas do SELECT |
|
|
229
|
+
| **filter** | ≥ 3 | `[tabelas, colunas, valores...]` — AND entre colunas, OR entre linhas |
|
|
230
|
+
| **data** | ≥ 3 | `[tabelas, colunas, valores...]` — última coluna = `"MD"` |
|
|
231
|
+
| **relationships** | N | `[[tabelaA, tabelaB, colA, colB, inner?], ...]` |
|
|
232
|
+
|
|
233
|
+
## Estrutura do Projeto
|
|
234
|
+
|
|
235
|
+
```
|
|
236
|
+
app/
|
|
237
|
+
DbDriver/
|
|
238
|
+
DbDriver.py — Classe principal (execute, execute_stmt)
|
|
239
|
+
subclasses/
|
|
240
|
+
DbDriverCore.py — Classe base (reset, define_filter, define_relationships)
|
|
241
|
+
DbDriverSearch.py — Pesquisa (define_header, search)
|
|
242
|
+
DbDriverUpdate.py — Escrita (define_data, update)
|
|
243
|
+
DbDriverUtils.py — Utilitários estáticos (builders, validações)
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
## Licença
|
|
247
|
+
|
|
248
|
+
Consulte o arquivo de licença do repositório.
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
pyeasymatrixdb/DbDriver.py,sha256=IY473MddJQAlb2yLCHWB5TuwIr5DKzILDCot2TZCOgI,1276
|
|
2
|
+
pyeasymatrixdb/__init__.py,sha256=8H2t3RhTOf1UmRcZ83CAnfY9MefOjHsrMgmwCa7FgTI,58
|
|
3
|
+
pyeasymatrixdb/subclasses/DbDriverCore.py,sha256=9_s4iYUR61KzqUiaErdAmo0PR59ryj9wXDMnSpfNtCI,2103
|
|
4
|
+
pyeasymatrixdb/subclasses/DbDriverSearch.py,sha256=K6ULdFWSctGCH6OGOKUHAoXPbR0bdtxxkmiYdNO3mmE,2077
|
|
5
|
+
pyeasymatrixdb/subclasses/DbDriverUpdate.py,sha256=D1066nxVzk58F4fH4aZGDcoVR7wVFVZwztRToCypgeM,5457
|
|
6
|
+
pyeasymatrixdb/subclasses/DbDriverUtils.py,sha256=djlE-5_n0NJUUOWYeaX_idEw7S08QMUwQct9ADRDMkQ,16739
|
|
7
|
+
pyeasymatrixdb/subclasses/__init__.py,sha256=DjnMY74SJjl9rm6rSOZgaL9UTETF5nlQzDd1cVGNfbA,277
|
|
8
|
+
pyeasymatrixdb-0.1.0.dist-info/METADATA,sha256=Z8DAldHE4ZnlAHULbiuLzc0qpyVi-genJuW_YEsf7_I,6529
|
|
9
|
+
pyeasymatrixdb-0.1.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
|
|
10
|
+
pyeasymatrixdb-0.1.0.dist-info/top_level.txt,sha256=VsJhZBUUDHyDZvxDXnoN5dg6f5bLNSDVBZaqAxbgirg,15
|
|
11
|
+
pyeasymatrixdb-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
pyeasymatrixdb
|