MindsDB 25.5.4.2__py3-none-any.whl → 25.6.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of MindsDB might be problematic. Click here for more details.
- mindsdb/__about__.py +1 -1
- mindsdb/api/a2a/agent.py +50 -26
- mindsdb/api/a2a/common/server/server.py +32 -26
- mindsdb/api/a2a/task_manager.py +68 -6
- mindsdb/api/executor/command_executor.py +69 -14
- mindsdb/api/executor/datahub/datanodes/integration_datanode.py +49 -65
- mindsdb/api/executor/datahub/datanodes/mindsdb_tables.py +91 -84
- mindsdb/api/executor/datahub/datanodes/project_datanode.py +29 -48
- mindsdb/api/executor/datahub/datanodes/system_tables.py +35 -61
- mindsdb/api/executor/planner/plan_join.py +67 -77
- mindsdb/api/executor/planner/query_planner.py +176 -155
- mindsdb/api/executor/planner/steps.py +37 -12
- mindsdb/api/executor/sql_query/result_set.py +45 -64
- mindsdb/api/executor/sql_query/steps/fetch_dataframe.py +14 -18
- mindsdb/api/executor/sql_query/steps/fetch_dataframe_partition.py +17 -18
- mindsdb/api/executor/sql_query/steps/insert_step.py +13 -33
- mindsdb/api/executor/sql_query/steps/subselect_step.py +43 -35
- mindsdb/api/executor/utilities/sql.py +42 -48
- mindsdb/api/http/namespaces/config.py +1 -1
- mindsdb/api/http/namespaces/file.py +14 -23
- mindsdb/api/http/namespaces/knowledge_bases.py +132 -154
- mindsdb/api/mysql/mysql_proxy/data_types/mysql_datum.py +12 -28
- mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/binary_resultset_row_package.py +59 -50
- mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/resultset_row_package.py +9 -8
- mindsdb/api/mysql/mysql_proxy/libs/constants/mysql.py +449 -461
- mindsdb/api/mysql/mysql_proxy/utilities/dump.py +87 -36
- mindsdb/integrations/handlers/bigquery_handler/bigquery_handler.py +219 -28
- mindsdb/integrations/handlers/file_handler/file_handler.py +15 -9
- mindsdb/integrations/handlers/file_handler/tests/test_file_handler.py +43 -24
- mindsdb/integrations/handlers/litellm_handler/litellm_handler.py +10 -3
- mindsdb/integrations/handlers/llama_index_handler/requirements.txt +1 -1
- mindsdb/integrations/handlers/mysql_handler/mysql_handler.py +29 -33
- mindsdb/integrations/handlers/openai_handler/openai_handler.py +277 -356
- mindsdb/integrations/handlers/oracle_handler/oracle_handler.py +74 -51
- mindsdb/integrations/handlers/postgres_handler/postgres_handler.py +305 -98
- mindsdb/integrations/handlers/salesforce_handler/salesforce_handler.py +145 -40
- mindsdb/integrations/handlers/salesforce_handler/salesforce_tables.py +136 -6
- mindsdb/integrations/handlers/snowflake_handler/snowflake_handler.py +352 -83
- mindsdb/integrations/libs/api_handler.py +279 -57
- mindsdb/integrations/libs/base.py +185 -30
- mindsdb/integrations/utilities/files/file_reader.py +99 -73
- mindsdb/integrations/utilities/handler_utils.py +23 -8
- mindsdb/integrations/utilities/sql_utils.py +35 -40
- mindsdb/interfaces/agents/agents_controller.py +226 -196
- mindsdb/interfaces/agents/constants.py +8 -1
- mindsdb/interfaces/agents/langchain_agent.py +42 -11
- mindsdb/interfaces/agents/mcp_client_agent.py +29 -21
- mindsdb/interfaces/agents/mindsdb_database_agent.py +23 -18
- mindsdb/interfaces/data_catalog/__init__.py +0 -0
- mindsdb/interfaces/data_catalog/base_data_catalog.py +54 -0
- mindsdb/interfaces/data_catalog/data_catalog_loader.py +375 -0
- mindsdb/interfaces/data_catalog/data_catalog_reader.py +38 -0
- mindsdb/interfaces/database/database.py +81 -57
- mindsdb/interfaces/database/integrations.py +222 -234
- mindsdb/interfaces/database/log.py +72 -104
- mindsdb/interfaces/database/projects.py +156 -193
- mindsdb/interfaces/file/file_controller.py +21 -65
- mindsdb/interfaces/knowledge_base/controller.py +66 -25
- mindsdb/interfaces/knowledge_base/evaluate.py +516 -0
- mindsdb/interfaces/knowledge_base/llm_client.py +75 -0
- mindsdb/interfaces/skills/custom/text2sql/mindsdb_kb_tools.py +83 -43
- mindsdb/interfaces/skills/skills_controller.py +31 -36
- mindsdb/interfaces/skills/sql_agent.py +113 -86
- mindsdb/interfaces/storage/db.py +242 -82
- mindsdb/migrations/versions/2025-05-28_a44643042fe8_added_data_catalog_tables.py +118 -0
- mindsdb/migrations/versions/2025-06-09_608e376c19a7_updated_data_catalog_data_types.py +58 -0
- mindsdb/utilities/config.py +13 -2
- mindsdb/utilities/log.py +35 -26
- mindsdb/utilities/ml_task_queue/task.py +19 -22
- mindsdb/utilities/render/sqlalchemy_render.py +129 -181
- mindsdb/utilities/starters.py +40 -0
- {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/METADATA +257 -257
- {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/RECORD +76 -68
- {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/WHEEL +0 -0
- {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/licenses/LICENSE +0 -0
- {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/top_level.txt +0 -0
|
@@ -1,13 +1,11 @@
|
|
|
1
1
|
import time
|
|
2
2
|
import inspect
|
|
3
3
|
from dataclasses import astuple
|
|
4
|
-
from typing import Iterable
|
|
4
|
+
from typing import Iterable, List
|
|
5
5
|
|
|
6
6
|
import numpy as np
|
|
7
7
|
import pandas as pd
|
|
8
|
-
from sqlalchemy.types import
|
|
9
|
-
Integer, Float
|
|
10
|
-
)
|
|
8
|
+
from sqlalchemy.types import Integer, Float
|
|
11
9
|
|
|
12
10
|
from mindsdb_sql_parser.ast.base import ASTNode
|
|
13
11
|
from mindsdb_sql_parser.ast import Insert, Identifier, CreateTable, TableColumn, DropTables
|
|
@@ -32,7 +30,7 @@ class DBHandlerException(Exception):
|
|
|
32
30
|
|
|
33
31
|
|
|
34
32
|
class IntegrationDataNode(DataNode):
|
|
35
|
-
type =
|
|
33
|
+
type = "integration"
|
|
36
34
|
|
|
37
35
|
def __init__(self, integration_name, ds_type, integration_controller):
|
|
38
36
|
self.integration_name = integration_name
|
|
@@ -46,15 +44,17 @@ class IntegrationDataNode(DataNode):
|
|
|
46
44
|
def get_tables(self):
|
|
47
45
|
response = self.integration_handler.get_tables()
|
|
48
46
|
if response.type == RESPONSE_TYPE.TABLE:
|
|
49
|
-
result_dict = response.data_frame.to_dict(orient=
|
|
47
|
+
result_dict = response.data_frame.to_dict(orient="records")
|
|
50
48
|
result = []
|
|
51
49
|
for row in result_dict:
|
|
52
|
-
|
|
53
50
|
result.append(TablesRow.from_dict(row))
|
|
54
51
|
return result
|
|
55
52
|
else:
|
|
56
53
|
raise Exception(f"Can't get tables: {response.error_message}")
|
|
57
54
|
|
|
55
|
+
result_dict = response.data_frame.to_dict(orient="records")
|
|
56
|
+
return [TablesRow.from_dict(row) for row in result_dict]
|
|
57
|
+
|
|
58
58
|
def get_table_columns_df(self, table_name: str, schema_name: str | None = None) -> pd.DataFrame:
|
|
59
59
|
"""Get a DataFrame containing representation of information_schema.columns for the specified table.
|
|
60
60
|
|
|
@@ -66,7 +66,7 @@ class IntegrationDataNode(DataNode):
|
|
|
66
66
|
pd.DataFrame: A DataFrame containing representation of information_schema.columns for the specified table.
|
|
67
67
|
The DataFrame has list of columns as in the integrations.libs.response.INF_SCHEMA_COLUMNS_NAMES.
|
|
68
68
|
"""
|
|
69
|
-
if
|
|
69
|
+
if "schema_name" in inspect.signature(self.integration_handler.get_columns).parameters:
|
|
70
70
|
response = self.integration_handler.get_columns(table_name, schema_name)
|
|
71
71
|
else:
|
|
72
72
|
response = self.integration_handler.get_columns(table_name)
|
|
@@ -81,18 +81,18 @@ class IntegrationDataNode(DataNode):
|
|
|
81
81
|
# region fallback for old handlers
|
|
82
82
|
df = response.data_frame
|
|
83
83
|
df.columns = [name.upper() for name in df.columns]
|
|
84
|
-
if
|
|
84
|
+
if "FIELD" not in df.columns or "TYPE" not in df.columns:
|
|
85
85
|
logger.warning(
|
|
86
86
|
f"Response from the handler's `get_columns` call does not contain required columns: f{df.columns}"
|
|
87
87
|
)
|
|
88
88
|
return pd.DataFrame([], columns=astuple(INF_SCHEMA_COLUMNS_NAMES))
|
|
89
89
|
|
|
90
|
-
new_df = df[[
|
|
91
|
-
new_df.columns = [
|
|
90
|
+
new_df = df[["FIELD", "TYPE"]]
|
|
91
|
+
new_df.columns = ["COLUMN_NAME", "DATA_TYPE"]
|
|
92
92
|
|
|
93
|
-
new_df[INF_SCHEMA_COLUMNS_NAMES.MYSQL_DATA_TYPE] = new_df[
|
|
94
|
-
|
|
95
|
-
|
|
93
|
+
new_df[INF_SCHEMA_COLUMNS_NAMES.MYSQL_DATA_TYPE] = new_df[INF_SCHEMA_COLUMNS_NAMES.DATA_TYPE].apply(
|
|
94
|
+
lambda x: infer_mysql_type(x).value
|
|
95
|
+
)
|
|
96
96
|
|
|
97
97
|
for column_name in astuple(INF_SCHEMA_COLUMNS_NAMES):
|
|
98
98
|
if column_name in new_df.columns:
|
|
@@ -116,54 +116,50 @@ class IntegrationDataNode(DataNode):
|
|
|
116
116
|
return df[INF_SCHEMA_COLUMNS_NAMES.COLUMN_NAME].to_list()
|
|
117
117
|
|
|
118
118
|
def drop_table(self, name: Identifier, if_exists=False):
|
|
119
|
-
drop_ast = DropTables(
|
|
120
|
-
tables=[name],
|
|
121
|
-
if_exists=if_exists
|
|
122
|
-
)
|
|
119
|
+
drop_ast = DropTables(tables=[name], if_exists=if_exists)
|
|
123
120
|
self.query(drop_ast)
|
|
124
121
|
|
|
125
|
-
def create_table(
|
|
126
|
-
|
|
122
|
+
def create_table(
|
|
123
|
+
self,
|
|
124
|
+
table_name: Identifier,
|
|
125
|
+
result_set: ResultSet = None,
|
|
126
|
+
columns: List[TableColumn] = None,
|
|
127
|
+
is_replace: bool = False,
|
|
128
|
+
is_create: bool = False,
|
|
129
|
+
raise_if_exists: bool = True,
|
|
130
|
+
**kwargs,
|
|
131
|
+
) -> DataHubResponse:
|
|
127
132
|
# is_create - create table
|
|
133
|
+
# if !raise_if_exists: error will be skipped
|
|
128
134
|
# is_replace - drop table if exists
|
|
129
135
|
# is_create==False and is_replace==False: just insert
|
|
130
136
|
|
|
131
137
|
table_columns_meta = {}
|
|
132
138
|
|
|
133
139
|
if columns is None:
|
|
134
|
-
columns = []
|
|
135
|
-
|
|
136
|
-
df = result_set.get_raw_df()
|
|
137
|
-
|
|
138
140
|
columns: list[TableColumn] = result_set.get_ast_columns()
|
|
139
|
-
table_columns_meta = {
|
|
140
|
-
column.name: column.type
|
|
141
|
-
for column in columns
|
|
142
|
-
}
|
|
141
|
+
table_columns_meta = {column.name: column.type for column in columns}
|
|
143
142
|
|
|
144
143
|
if is_replace:
|
|
145
144
|
# drop
|
|
146
|
-
drop_ast = DropTables(
|
|
147
|
-
tables=[table_name],
|
|
148
|
-
if_exists=True
|
|
149
|
-
)
|
|
145
|
+
drop_ast = DropTables(tables=[table_name], if_exists=True)
|
|
150
146
|
self.query(drop_ast)
|
|
151
147
|
is_create = True
|
|
152
148
|
|
|
153
149
|
if is_create:
|
|
154
|
-
create_table_ast = CreateTable(
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
150
|
+
create_table_ast = CreateTable(name=table_name, columns=columns, is_replace=is_replace)
|
|
151
|
+
try:
|
|
152
|
+
self.query(create_table_ast)
|
|
153
|
+
except Exception as e:
|
|
154
|
+
if raise_if_exists:
|
|
155
|
+
raise e
|
|
160
156
|
|
|
161
157
|
if result_set is None:
|
|
162
158
|
# it is just a 'create table'
|
|
163
159
|
return DataHubResponse()
|
|
164
160
|
|
|
165
161
|
# native insert
|
|
166
|
-
if hasattr(self.integration_handler,
|
|
162
|
+
if hasattr(self.integration_handler, "insert"):
|
|
167
163
|
df = result_set.to_df()
|
|
168
164
|
|
|
169
165
|
result: HandlerResponse = self.integration_handler.insert(table_name.parts[-1], df)
|
|
@@ -176,9 +172,9 @@ class IntegrationDataNode(DataNode):
|
|
|
176
172
|
column_type = table_columns_meta[col.alias]
|
|
177
173
|
|
|
178
174
|
if column_type == Integer:
|
|
179
|
-
type_name =
|
|
175
|
+
type_name = "int"
|
|
180
176
|
elif column_type == Float:
|
|
181
|
-
type_name =
|
|
177
|
+
type_name = "float"
|
|
182
178
|
else:
|
|
183
179
|
continue
|
|
184
180
|
|
|
@@ -193,24 +189,19 @@ class IntegrationDataNode(DataNode):
|
|
|
193
189
|
# not need to insert
|
|
194
190
|
return DataHubResponse()
|
|
195
191
|
|
|
196
|
-
insert_ast = Insert(
|
|
197
|
-
table=table_name,
|
|
198
|
-
columns=insert_columns,
|
|
199
|
-
values=values,
|
|
200
|
-
is_plain=True
|
|
201
|
-
)
|
|
192
|
+
insert_ast = Insert(table=table_name, columns=insert_columns, values=values, is_plain=True)
|
|
202
193
|
|
|
203
194
|
try:
|
|
204
195
|
result: DataHubResponse = self.query(insert_ast)
|
|
205
196
|
except Exception as e:
|
|
206
|
-
msg = f
|
|
197
|
+
msg = f"[{self.ds_type}/{self.integration_name}]: {str(e)}"
|
|
207
198
|
raise DBHandlerException(msg) from e
|
|
208
199
|
|
|
209
200
|
return DataHubResponse(affected_rows=result.affected_rows)
|
|
210
201
|
|
|
211
202
|
def has_support_stream(self) -> bool:
|
|
212
203
|
# checks if data handler has query_stream method
|
|
213
|
-
return hasattr(self.integration_handler,
|
|
204
|
+
return hasattr(self.integration_handler, "query_stream") and callable(self.integration_handler.query_stream)
|
|
214
205
|
|
|
215
206
|
@profiler.profile()
|
|
216
207
|
def query_stream(self, query: ASTNode, fetch_size: int = None) -> Iterable:
|
|
@@ -230,24 +221,26 @@ class IntegrationDataNode(DataNode):
|
|
|
230
221
|
# metrics
|
|
231
222
|
elapsed_seconds = time.perf_counter() - time_before_query
|
|
232
223
|
query_time_with_labels = metrics.INTEGRATION_HANDLER_QUERY_TIME.labels(
|
|
233
|
-
get_class_name(self.integration_handler), result.type
|
|
224
|
+
get_class_name(self.integration_handler), result.type
|
|
225
|
+
)
|
|
234
226
|
query_time_with_labels.observe(elapsed_seconds)
|
|
235
227
|
|
|
236
228
|
num_rows = 0
|
|
237
229
|
if result.data_frame is not None:
|
|
238
230
|
num_rows = len(result.data_frame.index)
|
|
239
231
|
response_size_with_labels = metrics.INTEGRATION_HANDLER_RESPONSE_SIZE.labels(
|
|
240
|
-
get_class_name(self.integration_handler), result.type
|
|
232
|
+
get_class_name(self.integration_handler), result.type
|
|
233
|
+
)
|
|
241
234
|
response_size_with_labels.observe(num_rows)
|
|
242
235
|
except Exception as e:
|
|
243
236
|
msg = str(e).strip()
|
|
244
|
-
if msg ==
|
|
237
|
+
if msg == "":
|
|
245
238
|
msg = e.__class__.__name__
|
|
246
|
-
msg = f
|
|
239
|
+
msg = f"[{self.ds_type}/{self.integration_name}]: {msg}"
|
|
247
240
|
raise DBHandlerException(msg) from e
|
|
248
241
|
|
|
249
242
|
if result.type == RESPONSE_TYPE.ERROR:
|
|
250
|
-
raise Exception(f
|
|
243
|
+
raise Exception(f"Error in {self.integration_name}: {result.error_message}")
|
|
251
244
|
if result.type == RESPONSE_TYPE.OK:
|
|
252
245
|
return DataHubResponse(affected_rows=result.affected_rows)
|
|
253
246
|
|
|
@@ -265,17 +258,8 @@ class IntegrationDataNode(DataNode):
|
|
|
265
258
|
logger.error(f"Issue with clearing DF from NaN values: {e}")
|
|
266
259
|
# endregion
|
|
267
260
|
|
|
268
|
-
columns_info = [
|
|
269
|
-
{
|
|
270
|
-
'name': k,
|
|
271
|
-
'type': v
|
|
272
|
-
}
|
|
273
|
-
for k, v in df.dtypes.items()
|
|
274
|
-
]
|
|
261
|
+
columns_info = [{"name": k, "type": v} for k, v in df.dtypes.items()]
|
|
275
262
|
|
|
276
263
|
return DataHubResponse(
|
|
277
|
-
data_frame=df,
|
|
278
|
-
columns=columns_info,
|
|
279
|
-
affected_rows=result.affected_rows,
|
|
280
|
-
mysql_types=result.mysql_types
|
|
264
|
+
data_frame=df, columns=columns_info, affected_rows=result.affected_rows, mysql_types=result.mysql_types
|
|
281
265
|
)
|
|
@@ -26,18 +26,17 @@ def to_json(obj):
|
|
|
26
26
|
def get_project_name(query: ASTNode = None):
|
|
27
27
|
project_name = None
|
|
28
28
|
if (
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
29
|
+
isinstance(query, Select)
|
|
30
|
+
and type(query.where) is BinaryOperation
|
|
31
|
+
and query.where.op == "="
|
|
32
|
+
and query.where.args[0].parts == ["project"]
|
|
33
|
+
and isinstance(query.where.args[1], Constant)
|
|
34
34
|
):
|
|
35
35
|
project_name = query.where.args[1].value
|
|
36
36
|
return project_name
|
|
37
37
|
|
|
38
38
|
|
|
39
39
|
class MdbTable(Table):
|
|
40
|
-
|
|
41
40
|
visible: bool = True
|
|
42
41
|
|
|
43
42
|
|
|
@@ -75,27 +74,29 @@ class ModelsTable(MdbTable):
|
|
|
75
74
|
table_name = row["name"]
|
|
76
75
|
table_meta = row["metadata"]
|
|
77
76
|
|
|
78
|
-
data.append(
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
77
|
+
data.append(
|
|
78
|
+
[
|
|
79
|
+
table_name,
|
|
80
|
+
table_meta["engine"],
|
|
81
|
+
project_name,
|
|
82
|
+
table_meta["active"],
|
|
83
|
+
table_meta["version"],
|
|
84
|
+
table_meta["status"],
|
|
85
|
+
table_meta["accuracy"],
|
|
86
|
+
table_meta["predict"],
|
|
87
|
+
table_meta["update_status"],
|
|
88
|
+
table_meta["mindsdb_version"],
|
|
89
|
+
table_meta["error"],
|
|
90
|
+
table_meta["select_data_query"],
|
|
91
|
+
to_json(table_meta["training_options"]),
|
|
92
|
+
table_meta["current_training_phase"],
|
|
93
|
+
table_meta["total_training_phases"],
|
|
94
|
+
table_meta["training_phase_name"],
|
|
95
|
+
table_meta["label"],
|
|
96
|
+
row["created_at"],
|
|
97
|
+
table_meta["training_time"],
|
|
98
|
+
]
|
|
99
|
+
)
|
|
99
100
|
# TODO optimise here
|
|
100
101
|
# if target_table is not None and target_table != project_name:
|
|
101
102
|
# continue
|
|
@@ -110,12 +111,8 @@ class DatabasesTable(MdbTable):
|
|
|
110
111
|
|
|
111
112
|
@classmethod
|
|
112
113
|
def get_data(cls, session, inf_schema, **kwargs):
|
|
113
|
-
|
|
114
114
|
project = inf_schema.database_controller.get_list(with_secrets=session.show_secrets)
|
|
115
|
-
data = [
|
|
116
|
-
[x["name"], x["type"], x["engine"], to_json(x.get("connection_data"))]
|
|
117
|
-
for x in project
|
|
118
|
-
]
|
|
115
|
+
data = [[x["name"], x["type"], x["engine"], to_json(x.get("connection_data"))] for x in project]
|
|
119
116
|
|
|
120
117
|
df = pd.DataFrame(data, columns=cls.columns)
|
|
121
118
|
return df
|
|
@@ -123,17 +120,12 @@ class DatabasesTable(MdbTable):
|
|
|
123
120
|
|
|
124
121
|
class MLEnginesTable(MdbTable):
|
|
125
122
|
name = "ML_ENGINES"
|
|
126
|
-
columns = [
|
|
127
|
-
"NAME", "HANDLER", "CONNECTION_DATA"
|
|
128
|
-
]
|
|
123
|
+
columns = ["NAME", "HANDLER", "CONNECTION_DATA"]
|
|
129
124
|
|
|
130
125
|
@classmethod
|
|
131
126
|
def get_data(cls, session, inf_schema, **kwargs):
|
|
132
|
-
|
|
133
127
|
integrations = inf_schema.integration_controller.get_all(show_secrets=session.show_secrets)
|
|
134
|
-
ml_integrations = {
|
|
135
|
-
key: val for key, val in integrations.items() if val["type"] == "ml"
|
|
136
|
-
}
|
|
128
|
+
ml_integrations = {key: val for key, val in integrations.items() if val["type"] == "ml"}
|
|
137
129
|
|
|
138
130
|
data = []
|
|
139
131
|
for _key, val in ml_integrations.items():
|
|
@@ -158,7 +150,6 @@ class HandlersTable(MdbTable):
|
|
|
158
150
|
|
|
159
151
|
@classmethod
|
|
160
152
|
def get_data(cls, inf_schema, **kwargs):
|
|
161
|
-
|
|
162
153
|
handlers = inf_schema.integration_controller.get_handlers_import_status()
|
|
163
154
|
|
|
164
155
|
data = []
|
|
@@ -272,7 +263,7 @@ class TriggersTable(MdbTable):
|
|
|
272
263
|
data = triggers_controller.get_list(project_name)
|
|
273
264
|
|
|
274
265
|
columns = cls.mindsdb_columns
|
|
275
|
-
if inf_schema.session.api_type ==
|
|
266
|
+
if inf_schema.session.api_type == "sql":
|
|
276
267
|
columns = columns + cls.columns
|
|
277
268
|
columns_lower = [col.lower() for col in columns]
|
|
278
269
|
|
|
@@ -283,7 +274,7 @@ class TriggersTable(MdbTable):
|
|
|
283
274
|
|
|
284
275
|
|
|
285
276
|
class ChatbotsTable(MdbTable):
|
|
286
|
-
name =
|
|
277
|
+
name = "CHATBOTS"
|
|
287
278
|
columns = [
|
|
288
279
|
"NAME",
|
|
289
280
|
"PROJECT",
|
|
@@ -319,62 +310,78 @@ class ChatbotsTable(MdbTable):
|
|
|
319
310
|
# to list of lists
|
|
320
311
|
data = []
|
|
321
312
|
for row in chatbot_data:
|
|
322
|
-
row[
|
|
313
|
+
row["params"] = to_json(row["params"])
|
|
323
314
|
data.append([row[k] for k in columns_lower])
|
|
324
315
|
|
|
325
316
|
return pd.DataFrame(data, columns=columns)
|
|
326
317
|
|
|
327
318
|
|
|
328
319
|
class KBTable(MdbTable):
|
|
329
|
-
name =
|
|
330
|
-
columns = [
|
|
331
|
-
|
|
320
|
+
name = "KNOWLEDGE_BASES"
|
|
321
|
+
columns = [
|
|
322
|
+
"NAME",
|
|
323
|
+
"PROJECT",
|
|
324
|
+
"EMBEDDING_MODEL",
|
|
325
|
+
"RERANKING_MODEL",
|
|
326
|
+
"STORAGE",
|
|
327
|
+
"METADATA_COLUMNS",
|
|
328
|
+
"CONTENT_COLUMNS",
|
|
329
|
+
"ID_COLUMN",
|
|
330
|
+
"PARAMS",
|
|
331
|
+
"INSERT_STARTED_AT",
|
|
332
|
+
"INSERT_FINISHED_AT",
|
|
333
|
+
"PROCESSED_ROWS",
|
|
334
|
+
"ERROR",
|
|
335
|
+
"QUERY_ID",
|
|
336
|
+
]
|
|
332
337
|
|
|
333
338
|
@classmethod
|
|
334
339
|
def get_data(cls, query: ASTNode = None, inf_schema=None, **kwargs):
|
|
335
340
|
project_name = get_project_name(query)
|
|
336
341
|
|
|
337
342
|
from mindsdb.interfaces.knowledge_base.controller import KnowledgeBaseController
|
|
343
|
+
|
|
338
344
|
controller = KnowledgeBaseController(inf_schema.session)
|
|
339
345
|
kb_list = controller.list(project_name)
|
|
340
346
|
|
|
341
347
|
# shouldn't be a lot of queries, we can fetch them all
|
|
342
|
-
queries_data = {
|
|
343
|
-
item['id']: item
|
|
344
|
-
for item in query_context_controller.list_queries()
|
|
345
|
-
}
|
|
348
|
+
queries_data = {item["id"]: item for item in query_context_controller.list_queries()}
|
|
346
349
|
|
|
347
350
|
data = []
|
|
348
351
|
|
|
349
352
|
for kb in kb_list:
|
|
350
|
-
vector_database_name = kb['vector_database'] or ''
|
|
351
|
-
|
|
352
353
|
query_item = {}
|
|
353
|
-
query_id = kb[
|
|
354
|
+
query_id = kb["query_id"]
|
|
354
355
|
if query_id is not None:
|
|
355
356
|
if query_id in queries_data:
|
|
356
357
|
query_item = queries_data.get(query_id)
|
|
357
358
|
else:
|
|
358
359
|
query_id = None
|
|
359
360
|
|
|
360
|
-
data.append(
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
361
|
+
data.append(
|
|
362
|
+
(
|
|
363
|
+
kb["name"],
|
|
364
|
+
kb["project_name"],
|
|
365
|
+
to_json(kb["embedding_model"]),
|
|
366
|
+
to_json(kb["reranking_model"]),
|
|
367
|
+
kb["vector_database"] + "." + kb["vector_database_table"],
|
|
368
|
+
to_json(kb["metadata_columns"]),
|
|
369
|
+
to_json(kb["content_columns"]),
|
|
370
|
+
kb["id_column"],
|
|
371
|
+
to_json(kb["params"]),
|
|
372
|
+
query_item.get("started_at"),
|
|
373
|
+
query_item.get("finished_at"),
|
|
374
|
+
query_item.get("processed_rows"),
|
|
375
|
+
query_item.get("error"),
|
|
376
|
+
query_id,
|
|
377
|
+
)
|
|
378
|
+
)
|
|
372
379
|
|
|
373
380
|
return pd.DataFrame(data, columns=cls.columns)
|
|
374
381
|
|
|
375
382
|
|
|
376
383
|
class SkillsTable(MdbTable):
|
|
377
|
-
name =
|
|
384
|
+
name = "SKILLS"
|
|
378
385
|
columns = ["NAME", "PROJECT", "TYPE", "PARAMS"]
|
|
379
386
|
|
|
380
387
|
@classmethod
|
|
@@ -394,14 +401,8 @@ class SkillsTable(MdbTable):
|
|
|
394
401
|
|
|
395
402
|
|
|
396
403
|
class AgentsTable(MdbTable):
|
|
397
|
-
name =
|
|
398
|
-
columns = [
|
|
399
|
-
"NAME",
|
|
400
|
-
"PROJECT",
|
|
401
|
-
"MODEL_NAME",
|
|
402
|
-
"SKILLS",
|
|
403
|
-
"PARAMS"
|
|
404
|
-
]
|
|
404
|
+
name = "AGENTS"
|
|
405
|
+
columns = ["NAME", "PROJECT", "MODEL_NAME", "SKILLS", "PARAMS"]
|
|
405
406
|
|
|
406
407
|
@classmethod
|
|
407
408
|
def get_data(cls, query: ASTNode = None, inf_schema=None, **kwargs):
|
|
@@ -411,10 +412,7 @@ class AgentsTable(MdbTable):
|
|
|
411
412
|
all_agents = agents_controller.get_agents(project_name)
|
|
412
413
|
|
|
413
414
|
project_controller = ProjectController()
|
|
414
|
-
project_names = {
|
|
415
|
-
i.id: i.name
|
|
416
|
-
for i in project_controller.get_list()
|
|
417
|
-
}
|
|
415
|
+
project_names = {i.id: i.name for i in project_controller.get_list()}
|
|
418
416
|
|
|
419
417
|
# NAME, PROJECT, MODEL, SKILLS, PARAMS
|
|
420
418
|
data = [
|
|
@@ -423,7 +421,7 @@ class AgentsTable(MdbTable):
|
|
|
423
421
|
project_names[a.project_id],
|
|
424
422
|
a.model_name,
|
|
425
423
|
[rel.skill.name for rel in a.skills_relationships],
|
|
426
|
-
to_json(a.params)
|
|
424
|
+
to_json(a.params),
|
|
427
425
|
)
|
|
428
426
|
for a in all_agents
|
|
429
427
|
]
|
|
@@ -431,12 +429,11 @@ class AgentsTable(MdbTable):
|
|
|
431
429
|
|
|
432
430
|
|
|
433
431
|
class ViewsTable(MdbTable):
|
|
434
|
-
name =
|
|
432
|
+
name = "VIEWS"
|
|
435
433
|
columns = ["NAME", "PROJECT", "QUERY"]
|
|
436
434
|
|
|
437
435
|
@classmethod
|
|
438
436
|
def get_data(cls, query: ASTNode = None, **kwargs):
|
|
439
|
-
|
|
440
437
|
project_name = get_project_name(query)
|
|
441
438
|
|
|
442
439
|
data = ViewController().list(project_name)
|
|
@@ -450,9 +447,19 @@ class ViewsTable(MdbTable):
|
|
|
450
447
|
|
|
451
448
|
|
|
452
449
|
class QueriesTable(MdbTable):
|
|
453
|
-
name =
|
|
454
|
-
columns = [
|
|
455
|
-
|
|
450
|
+
name = "QUERIES"
|
|
451
|
+
columns = [
|
|
452
|
+
"ID",
|
|
453
|
+
"STARTED_AT",
|
|
454
|
+
"FINISHED_AT",
|
|
455
|
+
"PROCESSED_ROWS",
|
|
456
|
+
"ERROR",
|
|
457
|
+
"SQL",
|
|
458
|
+
"DATABASE",
|
|
459
|
+
"PARAMETERS",
|
|
460
|
+
"CONTEXT",
|
|
461
|
+
"UPDATED_AT",
|
|
462
|
+
]
|
|
456
463
|
|
|
457
464
|
@classmethod
|
|
458
465
|
def get_data(cls, **kwargs):
|