MindsDB 25.4.3.2__py3-none-any.whl → 25.4.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MindsDB might be problematic. Click here for more details.

Files changed (43) hide show
  1. mindsdb/__about__.py +1 -1
  2. mindsdb/__main__.py +18 -4
  3. mindsdb/api/executor/data_types/response_type.py +1 -0
  4. mindsdb/api/executor/datahub/classes/tables_row.py +3 -10
  5. mindsdb/api/executor/datahub/datanodes/datanode.py +7 -2
  6. mindsdb/api/executor/datahub/datanodes/information_schema_datanode.py +44 -10
  7. mindsdb/api/executor/datahub/datanodes/integration_datanode.py +57 -38
  8. mindsdb/api/executor/datahub/datanodes/project_datanode.py +39 -7
  9. mindsdb/api/executor/datahub/datanodes/system_tables.py +116 -109
  10. mindsdb/api/executor/planner/query_planner.py +10 -1
  11. mindsdb/api/executor/planner/steps.py +8 -2
  12. mindsdb/api/executor/sql_query/steps/apply_predictor_step.py +5 -5
  13. mindsdb/api/executor/sql_query/steps/fetch_dataframe_partition.py +1 -1
  14. mindsdb/api/executor/sql_query/steps/insert_step.py +2 -1
  15. mindsdb/api/executor/sql_query/steps/prepare_steps.py +2 -3
  16. mindsdb/api/litellm/start.py +82 -0
  17. mindsdb/api/mysql/mysql_proxy/libs/constants/mysql.py +133 -0
  18. mindsdb/integrations/handlers/chromadb_handler/chromadb_handler.py +7 -2
  19. mindsdb/integrations/handlers/chromadb_handler/settings.py +1 -0
  20. mindsdb/integrations/handlers/mssql_handler/mssql_handler.py +13 -4
  21. mindsdb/integrations/handlers/mysql_handler/mysql_handler.py +14 -5
  22. mindsdb/integrations/handlers/oracle_handler/oracle_handler.py +14 -4
  23. mindsdb/integrations/handlers/pgvector_handler/pgvector_handler.py +34 -19
  24. mindsdb/integrations/handlers/postgres_handler/postgres_handler.py +21 -18
  25. mindsdb/integrations/handlers/snowflake_handler/snowflake_handler.py +14 -4
  26. mindsdb/integrations/handlers/web_handler/urlcrawl_helpers.py +1 -1
  27. mindsdb/integrations/libs/response.py +80 -32
  28. mindsdb/integrations/utilities/rag/rerankers/reranker_compressor.py +208 -13
  29. mindsdb/interfaces/agents/litellm_server.py +345 -0
  30. mindsdb/interfaces/agents/mcp_client_agent.py +252 -0
  31. mindsdb/interfaces/agents/run_mcp_agent.py +205 -0
  32. mindsdb/interfaces/knowledge_base/controller.py +17 -7
  33. mindsdb/interfaces/skills/skill_tool.py +7 -1
  34. mindsdb/interfaces/skills/sql_agent.py +8 -3
  35. mindsdb/utilities/config.py +8 -1
  36. mindsdb/utilities/starters.py +7 -0
  37. {mindsdb-25.4.3.2.dist-info → mindsdb-25.4.4.0.dist-info}/METADATA +225 -223
  38. {mindsdb-25.4.3.2.dist-info → mindsdb-25.4.4.0.dist-info}/RECORD +42 -39
  39. {mindsdb-25.4.3.2.dist-info → mindsdb-25.4.4.0.dist-info}/WHEEL +1 -1
  40. mindsdb/integrations/handlers/snowflake_handler/tests/test_snowflake_handler.py +0 -230
  41. /mindsdb/{integrations/handlers/snowflake_handler/tests → api/litellm}/__init__.py +0 -0
  42. {mindsdb-25.4.3.2.dist-info → mindsdb-25.4.4.0.dist-info}/licenses/LICENSE +0 -0
  43. {mindsdb-25.4.3.2.dist-info → mindsdb-25.4.4.0.dist-info}/top_level.txt +0 -0
@@ -241,14 +241,23 @@ class SqlServerHandler(DatabaseHandler):
241
241
 
242
242
  query = f"""
243
243
  SELECT
244
- column_name as "Field",
245
- data_type as "Type"
244
+ COLUMN_NAME,
245
+ DATA_TYPE,
246
+ ORDINAL_POSITION,
247
+ COLUMN_DEFAULT,
248
+ IS_NULLABLE,
249
+ CHARACTER_MAXIMUM_LENGTH,
250
+ CHARACTER_OCTET_LENGTH,
251
+ NUMERIC_PRECISION,
252
+ NUMERIC_SCALE,
253
+ DATETIME_PRECISION,
254
+ CHARACTER_SET_NAME,
255
+ COLLATION_NAME
246
256
  FROM
247
257
  information_schema.columns
248
258
  WHERE
249
259
  table_name = '{table_name}'
250
260
  """
251
261
  result = self.native_query(query)
252
- if result.resp_type is RESPONSE_TYPE.TABLE:
253
- result.data_frame['mysql_data_type'] = result.data_frame['Type'].apply(_map_type)
262
+ result.to_columns_table_response(map_type_fn=_map_type)
254
263
  return result
@@ -231,14 +231,23 @@ class MySQLHandler(DatabaseHandler):
231
231
  """
232
232
  q = f"""
233
233
  select
234
- COLUMN_NAME AS FIELD, DATA_TYPE AS TYPE
234
+ COLUMN_NAME,
235
+ DATA_TYPE,
236
+ ORDINAL_POSITION,
237
+ COLUMN_DEFAULT,
238
+ IS_NULLABLE,
239
+ CHARACTER_MAXIMUM_LENGTH,
240
+ CHARACTER_OCTET_LENGTH,
241
+ NUMERIC_PRECISION,
242
+ NUMERIC_SCALE,
243
+ DATETIME_PRECISION,
244
+ CHARACTER_SET_NAME,
245
+ COLLATION_NAME
235
246
  from
236
247
  information_schema.columns
237
248
  where
238
- table_name = '{table_name}'
249
+ table_name = '{table_name}';
239
250
  """
240
251
  result = self.native_query(q)
241
- if result.resp_type is RESPONSE_TYPE.TABLE:
242
- result.data_frame = result.data_frame.rename(columns={'FIELD': 'Field', 'TYPE': 'Type'})
243
- result.data_frame['mysql_data_type'] = result.data_frame['Type'].apply(_map_type)
252
+ result.to_columns_table_response(map_type_fn=_map_type)
244
253
  return result
@@ -282,13 +282,23 @@ class OracleHandler(DatabaseHandler):
282
282
  """
283
283
  query = f"""
284
284
  SELECT
285
- column_name AS field,
286
- data_type AS type
285
+ COLUMN_NAME,
286
+ DATA_TYPE,
287
+ COLUMN_ID AS ORDINAL_POSITION,
288
+ DATA_DEFAULT AS COLUMN_DEFAULT,
289
+ CASE NULLABLE WHEN 'Y' THEN 'YES' ELSE 'NO' END AS IS_NULLABLE,
290
+ CHAR_LENGTH AS CHARACTER_MAXIMUM_LENGTH,
291
+ NULL AS CHARACTER_OCTET_LENGTH,
292
+ DATA_PRECISION AS NUMERIC_PRECISION,
293
+ DATA_SCALE AS NUMERIC_SCALE,
294
+ NULL AS DATETIME_PRECISION,
295
+ CHARACTER_SET_NAME,
296
+ NULL AS COLLATION_NAME
287
297
  FROM USER_TAB_COLUMNS
288
298
  WHERE table_name = '{table_name}'
299
+ ORDER BY TABLE_NAME, COLUMN_ID;
289
300
  """
290
301
  result = self.native_query(query)
291
302
  if result.resp_type is RESPONSE_TYPE.TABLE:
292
- result.data_frame.columns = [name.lower() for name in result.data_frame.columns]
293
- result.data_frame['mysql_data_type'] = result.data_frame['type'].apply(_map_type)
303
+ result.to_columns_table_response(map_type_fn=_map_type)
294
304
  return result
@@ -40,8 +40,31 @@ class PgVectorHandler(PostgresHandler, VectorStoreHandler):
40
40
  # we get these from the connection args on PostgresHandler parent
41
41
  self._is_sparse = self.connection_args.get('is_sparse', False)
42
42
  self._vector_size = self.connection_args.get('vector_size', None)
43
- if self._is_sparse and not self._vector_size:
44
- raise ValueError("vector_size is required when is_sparse=True")
43
+
44
+ if self._is_sparse:
45
+ if not self._vector_size:
46
+ raise ValueError("vector_size is required when is_sparse=True")
47
+
48
+ # Use inner product for sparse vectors
49
+ distance_op = "<#>"
50
+
51
+ else:
52
+ distance_op = '<=>'
53
+ if 'distance' in self.connection_args:
54
+ distance_ops = {
55
+ 'l1': '<+>',
56
+ 'l2': '<->',
57
+ 'ip': '<#>', # inner product
58
+ 'cosine': '<=>',
59
+ 'hamming': '<~>',
60
+ 'jaccard': '<%>'
61
+ }
62
+
63
+ distance_op = distance_ops.get(self.connection_args['distance'])
64
+ if distance_op is None:
65
+ raise ValueError(f'Wrong distance type. Allowed options are {list(distance_ops.keys())}')
66
+
67
+ self.distance_op = distance_op
45
68
  self.connect()
46
69
 
47
70
  def _make_connection_args(self):
@@ -224,20 +247,16 @@ class PgVectorHandler(PostgresHandler, VectorStoreHandler):
224
247
  from pgvector.utils import SparseVector
225
248
  embedding = SparseVector(search_vector, self._vector_size)
226
249
  search_vector = embedding.to_text()
227
- # Use inner product for sparse vectors
228
- distance_op = "<#>"
229
250
  else:
230
251
  # Convert list to vector string if needed
231
252
  if isinstance(search_vector, list):
232
253
  search_vector = f"[{','.join(str(x) for x in search_vector)}]"
233
- # Use cosine similarity for dense vectors
234
- distance_op = "<=>"
235
254
 
236
255
  # Calculate distance as part of the query if needed
237
256
  if has_distance:
238
- targets = f"{targets}, (embeddings {distance_op} '{search_vector}') as distance"
257
+ targets = f"{targets}, (embeddings {self.distance_op} '{search_vector}') as distance"
239
258
 
240
- return f"SELECT {targets} FROM {table_name} {where_clause} ORDER BY embeddings {distance_op} '{search_vector}' ASC {limit_clause} {offset_clause} "
259
+ return f"SELECT {targets} FROM {table_name} {where_clause} ORDER BY embeddings {self.distance_op} '{search_vector}' ASC {limit_clause} {offset_clause} "
241
260
 
242
261
  else:
243
262
  # if filter conditions, return rows that satisfy the conditions
@@ -418,18 +437,14 @@ class PgVectorHandler(PostgresHandler, VectorStoreHandler):
418
437
  """
419
438
  table_name = self._check_table(table_name)
420
439
 
421
- data_dict = data.to_dict(orient="list")
422
-
423
- if 'metadata' in data_dict:
424
- data_dict['metadata'] = [json.dumps(i) for i in data_dict['metadata']]
425
- transposed_data = list(zip(*data_dict.values()))
426
-
427
- columns = ", ".join(data.keys())
428
- values = ", ".join(["%s"] * len(data.keys()))
440
+ if 'metadata' in data.columns:
441
+ data['metadata'] = data['metadata'].apply(json.dumps)
429
442
 
430
- insert_statement = f"INSERT INTO {table_name} ({columns}) VALUES ({values})"
431
-
432
- self.raw_query(insert_statement, params=transposed_data)
443
+ resp = super().insert(table_name, data)
444
+ if resp.resp_type == RESPONSE_TYPE.ERROR:
445
+ raise RuntimeError(resp.error_message)
446
+ if resp.resp_type == RESPONSE_TYPE.TABLE:
447
+ return resp.data_frame
433
448
 
434
449
  def update(
435
450
  self, table_name: str, data: pd.DataFrame, key_columns: List[str] = None
@@ -1,7 +1,6 @@
1
1
  import time
2
2
  import json
3
3
  from typing import Optional
4
- import threading
5
4
 
6
5
  import pandas as pd
7
6
  import psycopg
@@ -43,7 +42,8 @@ def _map_type(internal_type_name: str) -> MYSQL_DATA_TYPE:
43
42
  ('real', 'money', 'float'): MYSQL_DATA_TYPE.FLOAT,
44
43
  ('numeric', 'decimal'): MYSQL_DATA_TYPE.DECIMAL,
45
44
  ('double precision',): MYSQL_DATA_TYPE.DOUBLE,
46
- ('character varying', 'varchar', 'character', 'char', 'bpchar', 'bpchar', 'text'): MYSQL_DATA_TYPE.TEXT,
45
+ ('character varying', 'varchar'): MYSQL_DATA_TYPE.VARCHAR,
46
+ ('character', 'char', 'bpchar', 'bpchar', 'text'): MYSQL_DATA_TYPE.TEXT,
47
47
  ('timestamp', 'timestamp without time zone', 'timestamp with time zone'): MYSQL_DATA_TYPE.DATETIME,
48
48
  ('date', ): MYSQL_DATA_TYPE.DATE,
49
49
  ('time', 'time without time zone', 'time with time zone'): MYSQL_DATA_TYPE.TIME,
@@ -76,9 +76,7 @@ class PostgresHandler(DatabaseHandler):
76
76
 
77
77
  self.connection = None
78
78
  self.is_connected = False
79
- self.thread_safe = True
80
-
81
- self._insert_lock = threading.Lock()
79
+ self.thread_safe = False
82
80
 
83
81
  def __del__(self):
84
82
  if self.is_connected:
@@ -266,15 +264,13 @@ class PostgresHandler(DatabaseHandler):
266
264
 
267
265
  columns = df.columns
268
266
 
269
- # postgres 'copy' is not thread safe. use lock to prevent concurrent execution
270
- with self._insert_lock:
271
- resp = self.get_columns(table_name)
267
+ resp = self.get_columns(table_name)
272
268
 
273
269
  # copy requires precise cases of names: get current column names from table and adapt input dataframe columns
274
270
  if resp.data_frame is not None and not resp.data_frame.empty:
275
271
  db_columns = {
276
272
  c.lower(): c
277
- for c in resp.data_frame['field']
273
+ for c in resp.data_frame['COLUMN_NAME']
278
274
  }
279
275
 
280
276
  # try to get case of existing column
@@ -288,11 +284,10 @@ class PostgresHandler(DatabaseHandler):
288
284
 
289
285
  with connection.cursor() as cur:
290
286
  try:
291
- with self._insert_lock:
292
- with cur.copy(f'copy "{table_name}" ({",".join(columns)}) from STDIN WITH CSV') as copy:
293
- df.to_csv(copy, index=False, header=False)
287
+ with cur.copy(f'copy "{table_name}" ({",".join(columns)}) from STDIN WITH CSV') as copy:
288
+ df.to_csv(copy, index=False, header=False)
294
289
 
295
- connection.commit()
290
+ connection.commit()
296
291
  except Exception as e:
297
292
  logger.error(f'Error running insert to {table_name} on {self.database}, {e}!')
298
293
  connection.rollback()
@@ -366,8 +361,18 @@ class PostgresHandler(DatabaseHandler):
366
361
  schema_name = 'current_schema()'
367
362
  query = f"""
368
363
  SELECT
369
- column_name as "Field",
370
- data_type as "Type"
364
+ COLUMN_NAME,
365
+ DATA_TYPE,
366
+ ORDINAL_POSITION,
367
+ COLUMN_DEFAULT,
368
+ IS_NULLABLE,
369
+ CHARACTER_MAXIMUM_LENGTH,
370
+ CHARACTER_OCTET_LENGTH,
371
+ NUMERIC_PRECISION,
372
+ NUMERIC_SCALE,
373
+ DATETIME_PRECISION,
374
+ CHARACTER_SET_NAME,
375
+ COLLATION_NAME
371
376
  FROM
372
377
  information_schema.columns
373
378
  WHERE
@@ -376,9 +381,7 @@ class PostgresHandler(DatabaseHandler):
376
381
  table_schema = {schema_name}
377
382
  """
378
383
  result = self.native_query(query)
379
- if result.resp_type is RESPONSE_TYPE.TABLE:
380
- result.data_frame.columns = [name.lower() for name in result.data_frame.columns]
381
- result.data_frame['mysql_data_type'] = result.data_frame['type'].apply(_map_type)
384
+ result.to_columns_table_response(map_type_fn=_map_type)
382
385
  return result
383
386
 
384
387
  def subscribe(self, stop_event, callback, table_name, columns=None, **kwargs):
@@ -340,14 +340,24 @@ class SnowflakeHandler(DatabaseHandler):
340
340
  raise ValueError("Invalid table name provided.")
341
341
 
342
342
  query = f"""
343
- SELECT COLUMN_NAME AS FIELD, DATA_TYPE AS TYPE
343
+ SELECT
344
+ COLUMN_NAME,
345
+ DATA_TYPE,
346
+ ORDINAL_POSITION,
347
+ COLUMN_DEFAULT,
348
+ IS_NULLABLE,
349
+ CHARACTER_MAXIMUM_LENGTH,
350
+ CHARACTER_OCTET_LENGTH,
351
+ NUMERIC_PRECISION,
352
+ NUMERIC_SCALE,
353
+ DATETIME_PRECISION,
354
+ CHARACTER_SET_NAME,
355
+ COLLATION_NAME
344
356
  FROM INFORMATION_SCHEMA.COLUMNS
345
357
  WHERE TABLE_NAME = '{table_name}'
346
358
  AND TABLE_SCHEMA = current_schema()
347
359
  """
348
360
  result = self.native_query(query)
349
- if result.resp_type is RESPONSE_TYPE.TABLE:
350
- result.data_frame = result.data_frame.rename(columns={'FIELD': 'Field', 'TYPE': 'Type'})
351
- result.data_frame['mysql_data_type'] = result.data_frame['Type'].apply(_map_type)
361
+ result.to_columns_table_response(map_type_fn=_map_type)
352
362
 
353
363
  return result
@@ -151,7 +151,7 @@ def get_all_website_links(url) -> dict:
151
151
  # Parse HTML content with BeautifulSoup
152
152
  soup = BeautifulSoup(content_html, "html.parser")
153
153
  content_text = get_readable_text_from_soup(soup)
154
- for a_tag in soup.findAll("a"):
154
+ for a_tag in soup.find_all("a"):
155
155
  href = a_tag.attrs.get("href")
156
156
  if href == "" or href is None:
157
157
  continue
@@ -1,5 +1,8 @@
1
- from typing import Optional
2
- from pandas import DataFrame
1
+ from typing import Callable
2
+ from dataclasses import dataclass, fields
3
+
4
+ import numpy
5
+ import pandas
3
6
 
4
7
  from mindsdb.utilities import log
5
8
  from mindsdb.api.executor.data_types.response_type import RESPONSE_TYPE
@@ -8,9 +11,36 @@ from mindsdb_sql_parser.ast import ASTNode
8
11
 
9
12
  logger = log.getLogger(__name__)
10
13
 
14
+
15
+ @dataclass(frozen=True)
16
+ class _INFORMATION_SCHEMA_COLUMNS_NAMES:
17
+ """Set of DataFrame columns that must be returned when calling `handler.get_columns(...)`.
18
+ These column names match the standard INFORMATION_SCHEMA.COLUMNS structure
19
+ used in SQL databases to describe table metadata.
20
+ """
21
+ COLUMN_NAME: str = 'COLUMN_NAME'
22
+ DATA_TYPE: str = 'DATA_TYPE'
23
+ ORDINAL_POSITION: str = 'ORDINAL_POSITION'
24
+ COLUMN_DEFAULT: str = 'COLUMN_DEFAULT'
25
+ IS_NULLABLE: str = 'IS_NULLABLE'
26
+ CHARACTER_MAXIMUM_LENGTH: str = 'CHARACTER_MAXIMUM_LENGTH'
27
+ CHARACTER_OCTET_LENGTH: str = 'CHARACTER_OCTET_LENGTH'
28
+ NUMERIC_PRECISION: str = 'NUMERIC_PRECISION'
29
+ NUMERIC_SCALE: str = 'NUMERIC_SCALE'
30
+ DATETIME_PRECISION: str = 'DATETIME_PRECISION'
31
+ CHARACTER_SET_NAME: str = 'CHARACTER_SET_NAME'
32
+ COLLATION_NAME: str = 'COLLATION_NAME'
33
+ MYSQL_DATA_TYPE: str = 'MYSQL_DATA_TYPE'
34
+
35
+
36
+ INF_SCHEMA_COLUMNS_NAMES = _INFORMATION_SCHEMA_COLUMNS_NAMES()
37
+ INF_SCHEMA_COLUMNS_NAMES_SET = set(f.name for f in fields(INF_SCHEMA_COLUMNS_NAMES))
38
+
39
+
40
+
11
41
  class HandlerResponse:
12
- def __init__(self, resp_type: RESPONSE_TYPE, data_frame: DataFrame = None, query: ASTNode = 0, error_code: int = 0,
13
- error_message: Optional[str] = None, affected_rows: Optional[int] = None) -> None:
42
+ def __init__(self, resp_type: RESPONSE_TYPE, data_frame: pandas.DataFrame = None, query: ASTNode = 0, error_code: int = 0,
43
+ error_message: str | None = None, affected_rows: int | None = None) -> None:
14
44
  self.resp_type = resp_type
15
45
  self.query = query
16
46
  self.data_frame = data_frame
@@ -24,6 +54,51 @@ class HandlerResponse:
24
54
  def type(self):
25
55
  return self.resp_type
26
56
 
57
+ def to_columns_table_response(self, map_type_fn: Callable) -> None:
58
+ """Transform the response to a `columns table` response.
59
+ NOTE: original dataframe will be mutated
60
+ """
61
+ if self.resp_type == RESPONSE_TYPE.COLUMNS_TABLE:
62
+ return
63
+ if self.resp_type != RESPONSE_TYPE.TABLE:
64
+ if self.resp_type == RESPONSE_TYPE.ERROR:
65
+ raise ValueError(
66
+ f"Cannot convert {self.resp_type} to {RESPONSE_TYPE.COLUMNS_TABLE}, "
67
+ f"the error is: {self.error_message}"
68
+ )
69
+ raise ValueError(f"Cannot convert {self.resp_type} to {RESPONSE_TYPE.COLUMNS_TABLE}")
70
+
71
+ self.data_frame.columns = [name.upper() for name in self.data_frame.columns]
72
+ self.data_frame[INF_SCHEMA_COLUMNS_NAMES.MYSQL_DATA_TYPE] = self.data_frame[
73
+ INF_SCHEMA_COLUMNS_NAMES.DATA_TYPE
74
+ ].apply(map_type_fn)
75
+
76
+ # region validate df
77
+ current_columns_set = set(self.data_frame.columns)
78
+ if INF_SCHEMA_COLUMNS_NAMES_SET != current_columns_set:
79
+ raise ValueError(
80
+ f'Columns set for INFORMATION_SCHEMA.COLUMNS is wrong: {list(current_columns_set)}'
81
+ )
82
+ # endregion
83
+
84
+ self.data_frame = self.data_frame.astype({
85
+ INF_SCHEMA_COLUMNS_NAMES.COLUMN_NAME: 'string',
86
+ INF_SCHEMA_COLUMNS_NAMES.DATA_TYPE: 'string',
87
+ INF_SCHEMA_COLUMNS_NAMES.ORDINAL_POSITION: 'Int32',
88
+ INF_SCHEMA_COLUMNS_NAMES.COLUMN_DEFAULT: 'string',
89
+ INF_SCHEMA_COLUMNS_NAMES.IS_NULLABLE: 'string',
90
+ INF_SCHEMA_COLUMNS_NAMES.CHARACTER_MAXIMUM_LENGTH: 'Int32',
91
+ INF_SCHEMA_COLUMNS_NAMES.CHARACTER_OCTET_LENGTH: 'Int32',
92
+ INF_SCHEMA_COLUMNS_NAMES.NUMERIC_PRECISION: 'Int32',
93
+ INF_SCHEMA_COLUMNS_NAMES.NUMERIC_SCALE: 'Int32',
94
+ INF_SCHEMA_COLUMNS_NAMES.DATETIME_PRECISION: 'Int32',
95
+ INF_SCHEMA_COLUMNS_NAMES.CHARACTER_SET_NAME: 'string',
96
+ INF_SCHEMA_COLUMNS_NAMES.COLLATION_NAME: 'string',
97
+ })
98
+ self.data_frame.replace([numpy.NaN, pandas.NA], None, inplace=True)
99
+
100
+ self.resp_type = RESPONSE_TYPE.COLUMNS_TABLE
101
+
27
102
  def to_json(self):
28
103
  try:
29
104
  data = None
@@ -49,6 +124,7 @@ class HandlerResponse:
49
124
  self.affected_rows
50
125
  )
51
126
 
127
+
52
128
  class HandlerStatusResponse:
53
129
  def __init__(self, success: bool = True,
54
130
  error_message: str = None,
@@ -70,31 +146,3 @@ class HandlerStatusResponse:
70
146
  return f"{self.__class__.__name__}: success={self.success},\
71
147
  error={self.error_message},\
72
148
  redirect_url={self.redirect_url}"
73
-
74
-
75
- class ExecutorResponse:
76
- def __init__(self, resp_type: RESPONSE_TYPE, query: object, error_code: int = 0, error_message: str = None):
77
- self.resp_type = resp_type
78
- self.query = query
79
-
80
- self.error_code = error_code
81
- self.error_message = error_message
82
-
83
- @property
84
- def type(self):
85
- return self.resp_type
86
-
87
- def to_json(self):
88
- return {"type": self.resp_type,
89
- "query": self.query,
90
- "error_code": self.error_code,
91
- "error": self.error_message}
92
-
93
- def __repr__(self):
94
- return "%s: resp_type=%s, query=%s, err_code=%s, error=%s" % (
95
- self.__class__.__name__,
96
- self.resp_type,
97
- self.query,
98
- self.error_code,
99
- self.error_message,
100
- )