MindsDB 25.5.4.2__py3-none-any.whl → 25.6.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MindsDB might be problematic. Click here for more details.

Files changed (76) hide show
  1. mindsdb/__about__.py +1 -1
  2. mindsdb/api/a2a/agent.py +50 -26
  3. mindsdb/api/a2a/common/server/server.py +32 -26
  4. mindsdb/api/a2a/task_manager.py +68 -6
  5. mindsdb/api/executor/command_executor.py +69 -14
  6. mindsdb/api/executor/datahub/datanodes/integration_datanode.py +49 -65
  7. mindsdb/api/executor/datahub/datanodes/mindsdb_tables.py +91 -84
  8. mindsdb/api/executor/datahub/datanodes/project_datanode.py +29 -48
  9. mindsdb/api/executor/datahub/datanodes/system_tables.py +35 -61
  10. mindsdb/api/executor/planner/plan_join.py +67 -77
  11. mindsdb/api/executor/planner/query_planner.py +176 -155
  12. mindsdb/api/executor/planner/steps.py +37 -12
  13. mindsdb/api/executor/sql_query/result_set.py +45 -64
  14. mindsdb/api/executor/sql_query/steps/fetch_dataframe.py +14 -18
  15. mindsdb/api/executor/sql_query/steps/fetch_dataframe_partition.py +17 -18
  16. mindsdb/api/executor/sql_query/steps/insert_step.py +13 -33
  17. mindsdb/api/executor/sql_query/steps/subselect_step.py +43 -35
  18. mindsdb/api/executor/utilities/sql.py +42 -48
  19. mindsdb/api/http/namespaces/config.py +1 -1
  20. mindsdb/api/http/namespaces/file.py +14 -23
  21. mindsdb/api/http/namespaces/knowledge_bases.py +132 -154
  22. mindsdb/api/mysql/mysql_proxy/data_types/mysql_datum.py +12 -28
  23. mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/binary_resultset_row_package.py +59 -50
  24. mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/resultset_row_package.py +9 -8
  25. mindsdb/api/mysql/mysql_proxy/libs/constants/mysql.py +449 -461
  26. mindsdb/api/mysql/mysql_proxy/utilities/dump.py +87 -36
  27. mindsdb/integrations/handlers/bigquery_handler/bigquery_handler.py +219 -28
  28. mindsdb/integrations/handlers/file_handler/file_handler.py +15 -9
  29. mindsdb/integrations/handlers/file_handler/tests/test_file_handler.py +43 -24
  30. mindsdb/integrations/handlers/litellm_handler/litellm_handler.py +10 -3
  31. mindsdb/integrations/handlers/llama_index_handler/requirements.txt +1 -1
  32. mindsdb/integrations/handlers/mysql_handler/mysql_handler.py +29 -33
  33. mindsdb/integrations/handlers/openai_handler/openai_handler.py +277 -356
  34. mindsdb/integrations/handlers/oracle_handler/oracle_handler.py +74 -51
  35. mindsdb/integrations/handlers/postgres_handler/postgres_handler.py +305 -98
  36. mindsdb/integrations/handlers/salesforce_handler/salesforce_handler.py +145 -40
  37. mindsdb/integrations/handlers/salesforce_handler/salesforce_tables.py +136 -6
  38. mindsdb/integrations/handlers/snowflake_handler/snowflake_handler.py +352 -83
  39. mindsdb/integrations/libs/api_handler.py +279 -57
  40. mindsdb/integrations/libs/base.py +185 -30
  41. mindsdb/integrations/utilities/files/file_reader.py +99 -73
  42. mindsdb/integrations/utilities/handler_utils.py +23 -8
  43. mindsdb/integrations/utilities/sql_utils.py +35 -40
  44. mindsdb/interfaces/agents/agents_controller.py +226 -196
  45. mindsdb/interfaces/agents/constants.py +8 -1
  46. mindsdb/interfaces/agents/langchain_agent.py +42 -11
  47. mindsdb/interfaces/agents/mcp_client_agent.py +29 -21
  48. mindsdb/interfaces/agents/mindsdb_database_agent.py +23 -18
  49. mindsdb/interfaces/data_catalog/__init__.py +0 -0
  50. mindsdb/interfaces/data_catalog/base_data_catalog.py +54 -0
  51. mindsdb/interfaces/data_catalog/data_catalog_loader.py +375 -0
  52. mindsdb/interfaces/data_catalog/data_catalog_reader.py +38 -0
  53. mindsdb/interfaces/database/database.py +81 -57
  54. mindsdb/interfaces/database/integrations.py +222 -234
  55. mindsdb/interfaces/database/log.py +72 -104
  56. mindsdb/interfaces/database/projects.py +156 -193
  57. mindsdb/interfaces/file/file_controller.py +21 -65
  58. mindsdb/interfaces/knowledge_base/controller.py +66 -25
  59. mindsdb/interfaces/knowledge_base/evaluate.py +516 -0
  60. mindsdb/interfaces/knowledge_base/llm_client.py +75 -0
  61. mindsdb/interfaces/skills/custom/text2sql/mindsdb_kb_tools.py +83 -43
  62. mindsdb/interfaces/skills/skills_controller.py +31 -36
  63. mindsdb/interfaces/skills/sql_agent.py +113 -86
  64. mindsdb/interfaces/storage/db.py +242 -82
  65. mindsdb/migrations/versions/2025-05-28_a44643042fe8_added_data_catalog_tables.py +118 -0
  66. mindsdb/migrations/versions/2025-06-09_608e376c19a7_updated_data_catalog_data_types.py +58 -0
  67. mindsdb/utilities/config.py +13 -2
  68. mindsdb/utilities/log.py +35 -26
  69. mindsdb/utilities/ml_task_queue/task.py +19 -22
  70. mindsdb/utilities/render/sqlalchemy_render.py +129 -181
  71. mindsdb/utilities/starters.py +40 -0
  72. {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/METADATA +257 -257
  73. {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/RECORD +76 -68
  74. {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/WHEEL +0 -0
  75. {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/licenses/LICENSE +0 -0
  76. {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/top_level.txt +0 -0
@@ -15,7 +15,10 @@ OPEN_AI_CHAT_MODELS = (
15
15
  "gpt-4-32k",
16
16
  "gpt-4-1106-preview",
17
17
  "gpt-4-0125-preview",
18
+ "gpt-4.1",
19
+ "gpt-4.1-mini",
18
20
  "gpt-4o",
21
+ "o4-mini",
19
22
  "o3-mini",
20
23
  "o1-mini",
21
24
  )
@@ -216,8 +219,12 @@ You are an AI assistant powered by MindsDB. When answering questions, follow the
216
219
  - Finally use kb_query_tool to query the knowledge base for specific information
217
220
 
218
221
  2. For questions about database tables and their contents:
219
- - Use the sql_tool to query the tables directly
222
+ - Use the sql_db_query to query the tables directly
220
223
  - You can join tables if needed to get comprehensive information
224
+ - You are running on a federated query engine, so joins across multiple databases are allowed and supported
225
+ - **Important Rule for SQL Queries:** If you formulate an SQL query as part of answering a user's question, you *must* then use the `sql_db_query` tool to execute that query and get its results. The SQL query string itself is NOT the final answer to the user unless the user has specifically asked for the query. Your final AI response should be based on the *results* obtained from executing the query.
226
+
221
227
 
222
228
  For factual questions, ALWAYS use the available tools to look up information rather than relying on your internal knowledge.
229
+
223
230
  """
@@ -226,7 +226,7 @@ def process_chunk(chunk):
226
226
 
227
227
 
228
228
  class LangchainAgent:
229
- def __init__(self, agent: db.Agents, model: dict = None):
229
+ def __init__(self, agent: db.Agents, model: dict = None, params: dict = None):
230
230
  self.agent = agent
231
231
  self.model = model
232
232
 
@@ -239,16 +239,35 @@ class LangchainAgent:
239
239
  self.mdb_langfuse_callback_handler: Optional[object] = None # custom (see langfuse_callback_handler.py)
240
240
 
241
241
  self.langfuse_client_wrapper = LangfuseClientWrapper()
242
- self.args = self._initialize_args()
242
+ self.args = self._initialize_args(params)
243
243
 
244
244
  # Back compatibility for old models
245
245
  self.provider = self.args.get("provider", get_llm_provider(self.args))
246
246
 
247
- def _initialize_args(self) -> dict:
248
- """Initialize the arguments based on the agent's parameters."""
249
- args = self.agent.params.copy()
250
- args["model_name"] = self.agent.model_name
251
- args["provider"] = self.agent.provider
247
+ def _initialize_args(self, params: dict = None) -> dict:
248
+ """
249
+ Initialize the arguments for agent execution.
250
+
251
+ Takes the parameters passed during execution and sets necessary defaults.
252
+ The params are already merged with defaults by AgentsController.get_agent_llm_params.
253
+
254
+ Args:
255
+ params: Parameters for agent execution (already merged with defaults)
256
+
257
+ Returns:
258
+ dict: Final parameters for agent execution
259
+ """
260
+ # Use the parameters passed to the method (already merged with defaults by AgentsController)
261
+ # No fallback needed as AgentsController.get_agent_llm_params already handles this
262
+ args = params.copy() if params else {}
263
+
264
+ # Set model name and provider if given in create agent otherwise use global llm defaults
265
+ # AgentsController.get_agent_llm_params
266
+ if self.agent.model_name is not None:
267
+ args["model_name"] = self.agent.model_name
268
+ if self.agent.provider is not None:
269
+ args["provider"] = self.agent.provider
270
+
252
271
  args["embedding_model_provider"] = args.get("embedding_model", get_embedding_model_provider(args))
253
272
 
254
273
  # agent is using current langchain model
@@ -261,11 +280,20 @@ class LangchainAgent:
261
280
  # only update prompt_template if it is set on the model
262
281
  args["prompt_template"] = prompt_template
263
282
 
283
+ # Set default prompt template if not provided
264
284
  if args.get("prompt_template") is None:
285
+ # Default prompt template depends on agent mode
265
286
  if args.get("mode") == "retrieval":
266
287
  args["prompt_template"] = DEFAULT_RAG_PROMPT_TEMPLATE
288
+ logger.info(f"Using default retrieval prompt template: {DEFAULT_RAG_PROMPT_TEMPLATE[:50]}...")
267
289
  else:
268
- raise ValueError("Please provide a `prompt_template` or set `mode=retrieval`")
290
+ # Set a default prompt template for non-retrieval mode
291
+ default_prompt = "you are an assistant, answer using the tables connected"
292
+ args["prompt_template"] = default_prompt
293
+ logger.info(f"Using default prompt template: {default_prompt}")
294
+
295
+ if "prompt_template" in args:
296
+ logger.info(f"Using prompt template: {args['prompt_template'][:50]}...")
269
297
 
270
298
  return args
271
299
 
@@ -318,7 +346,7 @@ class LangchainAgent:
318
346
  self.provider = args.get("provider", get_llm_provider(args))
319
347
 
320
348
  df = df.reset_index(drop=True)
321
- agent = self.create_agent(df, args)
349
+ agent = self.create_agent(df)
322
350
  # Use last message as prompt, remove other questions.
323
351
  user_column = args.get("user_column", USER_COLUMN)
324
352
  df.iloc[:-1, df.columns.get_loc(user_column)] = None
@@ -348,14 +376,17 @@ class LangchainAgent:
348
376
  self.provider = args.get("provider", get_llm_provider(args))
349
377
 
350
378
  df = df.reset_index(drop=True)
351
- agent = self.create_agent(df, args)
379
+ agent = self.create_agent(df)
352
380
  # Use last message as prompt, remove other questions.
353
381
  user_column = args.get("user_column", USER_COLUMN)
354
382
  df.iloc[:-1, df.columns.get_loc(user_column)] = None
355
383
  return self.stream_agent(df, agent, args)
356
384
 
357
- def create_agent(self, df: pd.DataFrame, args: Dict = None) -> AgentExecutor:
385
+ def create_agent(self, df: pd.DataFrame) -> AgentExecutor:
358
386
  # Set up tools.
387
+
388
+ args = self.args
389
+
359
390
  llm = create_chat_model(args)
360
391
  self.llm = llm
361
392
 
@@ -63,11 +63,19 @@ class MCPQueryTool(BaseTool):
63
63
  return loop.run_until_complete(self._arun(query))
64
64
 
65
65
 
66
+ # todo move instantiation to agent controller
66
67
  class MCPLangchainAgent(LangchainAgent):
67
68
  """Extension of LangchainAgent that delegates to MCP server"""
68
69
 
69
- def __init__(self, agent: db.Agents, model: dict = None, mcp_host: str = "127.0.0.1", mcp_port: int = 47337):
70
- super().__init__(agent, model)
70
+ def __init__(
71
+ self,
72
+ agent: db.Agents,
73
+ model: dict = None,
74
+ params: dict = None,
75
+ mcp_host: str = "127.0.0.1",
76
+ mcp_port: int = 47337,
77
+ ):
78
+ super().__init__(agent, model, params)
71
79
  self.mcp_host = mcp_host
72
80
  self.mcp_port = mcp_port
73
81
  self.exit_stack = AsyncExitStack()
@@ -85,7 +93,7 @@ class MCPLangchainAgent(LangchainAgent):
85
93
  server_params = StdioServerParameters(
86
94
  command="python",
87
95
  args=["-m", "mindsdb", "--api=mcp"],
88
- env={"MCP_HOST": self.mcp_host, "MCP_PORT": str(self.mcp_port)}
96
+ env={"MCP_HOST": self.mcp_host, "MCP_PORT": str(self.mcp_port)},
89
97
  )
90
98
 
91
99
  logger.info(f"Connecting to MCP server at {self.mcp_host}:{self.mcp_port}")
@@ -99,7 +107,9 @@ class MCPLangchainAgent(LangchainAgent):
99
107
 
100
108
  # Test the connection by listing tools
101
109
  tools_response = await self.session.list_tools()
102
- logger.info(f"Successfully connected to MCP server. Available tools: {[tool.name for tool in tools_response.tools]}")
110
+ logger.info(
111
+ f"Successfully connected to MCP server. Available tools: {[tool.name for tool in tools_response.tools]}"
112
+ )
103
113
 
104
114
  except Exception as e:
105
115
  logger.error(f"Failed to connect to MCP server: {str(e)}")
@@ -141,7 +151,7 @@ class MCPLangchainAgent(LangchainAgent):
141
151
  response = super().get_completion(messages, stream)
142
152
 
143
153
  # Ensure response is a string (not a DataFrame)
144
- if hasattr(response, 'to_string'): # It's a DataFrame
154
+ if hasattr(response, "to_string"): # It's a DataFrame
145
155
  return response.to_string()
146
156
 
147
157
  return response
@@ -167,7 +177,7 @@ class LiteLLMAgentWrapper:
167
177
  formatted_messages = [
168
178
  {
169
179
  "question": msg["content"] if msg["role"] == "user" else "",
170
- "answer": msg["content"] if msg["role"] == "assistant" else ""
180
+ "answer": msg["content"] if msg["role"] == "assistant" else "",
171
181
  }
172
182
  for msg in messages
173
183
  ]
@@ -177,23 +187,16 @@ class LiteLLMAgentWrapper:
177
187
 
178
188
  # Ensure response is a string
179
189
  if not isinstance(response, str):
180
- if hasattr(response, 'to_string'): # It's a DataFrame
190
+ if hasattr(response, "to_string"): # It's a DataFrame
181
191
  response = response.to_string()
182
192
  else:
183
193
  response = str(response)
184
194
 
185
195
  # Format response in LiteLLM expected format
186
196
  return {
187
- "choices": [
188
- {
189
- "message": {
190
- "role": "assistant",
191
- "content": response
192
- }
193
- }
194
- ],
197
+ "choices": [{"message": {"role": "assistant", "content": response}}],
195
198
  "model": self.agent.args["model_name"],
196
- "object": "chat.completion"
199
+ "object": "chat.completion",
197
200
  }
198
201
 
199
202
  async def acompletion_stream(self, messages: List[Dict[str, str]], **kwargs) -> Iterator[Dict[str, Any]]:
@@ -202,7 +205,7 @@ class LiteLLMAgentWrapper:
202
205
  formatted_messages = [
203
206
  {
204
207
  "question": msg["content"] if msg["role"] == "user" else "",
205
- "answer": msg["content"] if msg["role"] == "assistant" else ""
208
+ "answer": msg["content"] if msg["role"] == "assistant" else "",
206
209
  }
207
210
  for msg in messages
208
211
  ]
@@ -217,7 +220,7 @@ class LiteLLMAgentWrapper:
217
220
  yield {
218
221
  "choices": [{"delta": {"role": "assistant", "content": content}}],
219
222
  "model": model_name,
220
- "object": "chat.completion.chunk"
223
+ "object": "chat.completion.chunk",
221
224
  }
222
225
  # Allow async context switch
223
226
  await asyncio.sleep(0)
@@ -230,7 +233,9 @@ class LiteLLMAgentWrapper:
230
233
  await self.agent.cleanup()
231
234
 
232
235
 
233
- def create_mcp_agent(agent_name: str, project_name: str, mcp_host: str = "127.0.0.1", mcp_port: int = 47337) -> LiteLLMAgentWrapper:
236
+ def create_mcp_agent(
237
+ agent_name: str, project_name: str, mcp_host: str = "127.0.0.1", mcp_port: int = 47337
238
+ ) -> LiteLLMAgentWrapper:
234
239
  """Create an MCP agent and wrap it for LiteLLM compatibility"""
235
240
  from mindsdb.interfaces.agents.agents_controller import AgentsController
236
241
  from mindsdb.interfaces.storage import db
@@ -245,8 +250,11 @@ def create_mcp_agent(agent_name: str, project_name: str, mcp_host: str = "127.0.
245
250
  if agent_db is None:
246
251
  raise ValueError(f"Agent {agent_name} not found in project {project_name}")
247
252
 
248
- # Create MCP agent
249
- mcp_agent = MCPLangchainAgent(agent_db, mcp_host=mcp_host, mcp_port=mcp_port)
253
+ # Get merged parameters (defaults + agent params)
254
+ merged_params = agent_controller.get_agent_llm_params(agent_db.params)
255
+
256
+ # Create MCP agent with merged parameters
257
+ mcp_agent = MCPLangchainAgent(agent_db, params=merged_params, mcp_host=mcp_host, mcp_port=mcp_port)
250
258
 
251
259
  # Wrap for LiteLLM compatibility
252
260
  return LiteLLMAgentWrapper(mcp_agent)
@@ -1,7 +1,8 @@
1
1
  """
2
- Wrapper around MindsDB's executor and integration controller following the implementation of the original
3
- langchain.sql_database.SQLDatabase class to partly replicate its behavior.
2
+ Wrapper around MindsDB's executor and integration controller following the implementation of the original
3
+ langchain.sql_database.SQLDatabase class to partly replicate its behavior.
4
4
  """
5
+
5
6
  import traceback
6
7
  from typing import Any, Iterable, List, Optional
7
8
 
@@ -13,26 +14,25 @@ logger = log.getLogger(__name__)
13
14
 
14
15
 
15
16
  def extract_essential(input: str) -> str:
16
- """ Sometimes LLM include to input unnecessary data. We can't control stochastic nature of LLM, so we need to
17
- 'clean' input somehow. LLM prompt contains instruction to enclose input between '$START$' and '$STOP$'.
17
+ """Sometimes LLM include to input unnecessary data. We can't control stochastic nature of LLM, so we need to
18
+ 'clean' input somehow. LLM prompt contains instruction to enclose input between '$START$' and '$STOP$'.
18
19
  """
19
- if '$START$' in input:
20
- input = input.partition('$START$')[-1]
21
- if '$STOP$' in input:
22
- input = input.partition('$STOP$')[0]
23
- return input.strip(' ')
20
+ if "$START$" in input:
21
+ input = input.partition("$START$")[-1]
22
+ if "$STOP$" in input:
23
+ input = input.partition("$STOP$")[0]
24
+ return input.strip(" ")
24
25
 
25
26
 
26
27
  class MindsDBSQL(SQLDatabase):
27
28
  @staticmethod
28
- def custom_init(
29
- sql_agent: 'SQLAgent'
30
- ) -> 'MindsDBSQL':
29
+ def custom_init(sql_agent: "SQLAgent") -> "MindsDBSQL":
31
30
  instance = MindsDBSQL()
32
31
  instance._sql_agent = sql_agent
33
32
  return instance
34
33
 
35
34
  """ Can't modify signature, as LangChain does a Pydantic check."""
35
+
36
36
  def __init__(
37
37
  self,
38
38
  engine: Optional[Any] = None,
@@ -51,7 +51,7 @@ class MindsDBSQL(SQLDatabase):
51
51
 
52
52
  @property
53
53
  def dialect(self) -> str:
54
- return 'mindsdb'
54
+ return "mindsdb"
55
55
 
56
56
  @property
57
57
  def table_info(self) -> str:
@@ -93,23 +93,26 @@ class MindsDBSQL(SQLDatabase):
93
93
  command = extract_essential(command)
94
94
 
95
95
  try:
96
-
97
96
  # Log the query for debugging
98
97
  logger.info(f"Executing SQL query: {command}")
99
98
 
99
+ # Removing backticks causes in query execution.
100
100
  # remove backticks
101
- command = command.replace('`', '')
101
+ # command = command.replace('`', '')
102
102
 
103
103
  # Parse the SQL string to an AST object first
104
104
  from mindsdb_sql_parser import parse_sql
105
+
105
106
  ast_query = parse_sql(command)
106
107
 
107
108
  # Now execute the parsed query
108
- result = self._sql_agent.skill_tool.get_command_executor().execute_command(ast_query, database_name="mindsdb")
109
+ result = self._sql_agent.skill_tool.get_command_executor().execute_command(
110
+ ast_query, database_name="mindsdb"
111
+ )
109
112
 
110
113
  # Convert ExecuteAnswer to a DataFrame for easier manipulation
111
114
  df = None
112
- if hasattr(result, 'data') and hasattr(result.data, 'data_frame'):
115
+ if hasattr(result, "data") and hasattr(result.data, "data_frame"):
113
116
  df = result.data.data_frame
114
117
  else:
115
118
  # Fallback to to_df when data_frame attr not available
@@ -130,7 +133,9 @@ class MindsDBSQL(SQLDatabase):
130
133
  except Exception as e:
131
134
  logger.error(f"Error executing SQL command: {str(e)}\n{traceback.format_exc()}")
132
135
  # If this is a knowledge base query, provide a more helpful error message
133
- if "knowledge_base" in command.lower() or any(kb in command for kb in self._sql_agent.get_usable_knowledge_base_names()):
136
+ if "knowledge_base" in command.lower() or any(
137
+ kb in command for kb in self._sql_agent.get_usable_knowledge_base_names()
138
+ ):
134
139
  return f"Error executing knowledge base query: {str(e)}. Please check that the knowledge base exists and your query syntax is correct."
135
140
  return f"Error: {str(e)}"
136
141
 
File without changes
@@ -0,0 +1,54 @@
1
+ from typing import List, Optional, Union
2
+
3
+ from mindsdb.integrations.libs.api_handler import MetaAPIHandler
4
+ from mindsdb.integrations.libs.base import MetaDatabaseHandler
5
+ from mindsdb.utilities import log
6
+
7
+
8
+ logger = log.getLogger("mindsdb")
9
+
10
+
11
+ class BaseDataCatalog:
12
+ """
13
+ This is the base class for the Data Catalog interface.
14
+ """
15
+
16
+ def __init__(self, database_name: str, table_names: Optional[List[str]] = None) -> None:
17
+ """
18
+ Initialize the DataCatalogReader.
19
+
20
+ Args:
21
+ database_name (str): The data source to read/write metadata from.
22
+ table_names (Optional[List[str]]): The list of table names to read or write metadata for. If None, all tables will be read or written.
23
+ """
24
+ from mindsdb.api.executor.controllers.session_controller import (
25
+ SessionController,
26
+ )
27
+
28
+ session = SessionController()
29
+
30
+ self.database_name = database_name
31
+ self.data_handler: Union[MetaDatabaseHandler, MetaAPIHandler] = session.integration_controller.get_data_handler(
32
+ database_name
33
+ )
34
+ integration = session.integration_controller.get(database_name)
35
+ self.integration_id = integration["id"]
36
+ self.integration_engine = integration["engine"]
37
+ # TODO: Handle situations where a schema is provided along with the database name, e.g., 'schema.table'.
38
+ # TODO: Handle situations where a file path is provided with integrations like S3, e.g., 'dir/file.csv'.
39
+ self.table_names = table_names
40
+
41
+ self.logger = logger
42
+
43
+ def is_data_catalog_supported(self) -> bool:
44
+ """
45
+ Check if the data catalog is supported for the given database.
46
+
47
+ Returns:
48
+ bool: True if the data catalog is supported, False otherwise.
49
+ """
50
+ if not isinstance(self.data_handler, (MetaDatabaseHandler, MetaAPIHandler)):
51
+ self.logger.warning(f"Data catalog is not supported for the '{self.integration_engine}' integration'. ")
52
+ return False
53
+
54
+ return True