MindsDB 25.5.4.2__py3-none-any.whl → 25.6.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MindsDB might be problematic. Click here for more details.

Files changed (76) hide show
  1. mindsdb/__about__.py +1 -1
  2. mindsdb/api/a2a/agent.py +50 -26
  3. mindsdb/api/a2a/common/server/server.py +32 -26
  4. mindsdb/api/a2a/task_manager.py +68 -6
  5. mindsdb/api/executor/command_executor.py +69 -14
  6. mindsdb/api/executor/datahub/datanodes/integration_datanode.py +49 -65
  7. mindsdb/api/executor/datahub/datanodes/mindsdb_tables.py +91 -84
  8. mindsdb/api/executor/datahub/datanodes/project_datanode.py +29 -48
  9. mindsdb/api/executor/datahub/datanodes/system_tables.py +35 -61
  10. mindsdb/api/executor/planner/plan_join.py +67 -77
  11. mindsdb/api/executor/planner/query_planner.py +176 -155
  12. mindsdb/api/executor/planner/steps.py +37 -12
  13. mindsdb/api/executor/sql_query/result_set.py +45 -64
  14. mindsdb/api/executor/sql_query/steps/fetch_dataframe.py +14 -18
  15. mindsdb/api/executor/sql_query/steps/fetch_dataframe_partition.py +17 -18
  16. mindsdb/api/executor/sql_query/steps/insert_step.py +13 -33
  17. mindsdb/api/executor/sql_query/steps/subselect_step.py +43 -35
  18. mindsdb/api/executor/utilities/sql.py +42 -48
  19. mindsdb/api/http/namespaces/config.py +1 -1
  20. mindsdb/api/http/namespaces/file.py +14 -23
  21. mindsdb/api/http/namespaces/knowledge_bases.py +132 -154
  22. mindsdb/api/mysql/mysql_proxy/data_types/mysql_datum.py +12 -28
  23. mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/binary_resultset_row_package.py +59 -50
  24. mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/resultset_row_package.py +9 -8
  25. mindsdb/api/mysql/mysql_proxy/libs/constants/mysql.py +449 -461
  26. mindsdb/api/mysql/mysql_proxy/utilities/dump.py +87 -36
  27. mindsdb/integrations/handlers/bigquery_handler/bigquery_handler.py +219 -28
  28. mindsdb/integrations/handlers/file_handler/file_handler.py +15 -9
  29. mindsdb/integrations/handlers/file_handler/tests/test_file_handler.py +43 -24
  30. mindsdb/integrations/handlers/litellm_handler/litellm_handler.py +10 -3
  31. mindsdb/integrations/handlers/llama_index_handler/requirements.txt +1 -1
  32. mindsdb/integrations/handlers/mysql_handler/mysql_handler.py +29 -33
  33. mindsdb/integrations/handlers/openai_handler/openai_handler.py +277 -356
  34. mindsdb/integrations/handlers/oracle_handler/oracle_handler.py +74 -51
  35. mindsdb/integrations/handlers/postgres_handler/postgres_handler.py +305 -98
  36. mindsdb/integrations/handlers/salesforce_handler/salesforce_handler.py +145 -40
  37. mindsdb/integrations/handlers/salesforce_handler/salesforce_tables.py +136 -6
  38. mindsdb/integrations/handlers/snowflake_handler/snowflake_handler.py +352 -83
  39. mindsdb/integrations/libs/api_handler.py +279 -57
  40. mindsdb/integrations/libs/base.py +185 -30
  41. mindsdb/integrations/utilities/files/file_reader.py +99 -73
  42. mindsdb/integrations/utilities/handler_utils.py +23 -8
  43. mindsdb/integrations/utilities/sql_utils.py +35 -40
  44. mindsdb/interfaces/agents/agents_controller.py +226 -196
  45. mindsdb/interfaces/agents/constants.py +8 -1
  46. mindsdb/interfaces/agents/langchain_agent.py +42 -11
  47. mindsdb/interfaces/agents/mcp_client_agent.py +29 -21
  48. mindsdb/interfaces/agents/mindsdb_database_agent.py +23 -18
  49. mindsdb/interfaces/data_catalog/__init__.py +0 -0
  50. mindsdb/interfaces/data_catalog/base_data_catalog.py +54 -0
  51. mindsdb/interfaces/data_catalog/data_catalog_loader.py +375 -0
  52. mindsdb/interfaces/data_catalog/data_catalog_reader.py +38 -0
  53. mindsdb/interfaces/database/database.py +81 -57
  54. mindsdb/interfaces/database/integrations.py +222 -234
  55. mindsdb/interfaces/database/log.py +72 -104
  56. mindsdb/interfaces/database/projects.py +156 -193
  57. mindsdb/interfaces/file/file_controller.py +21 -65
  58. mindsdb/interfaces/knowledge_base/controller.py +66 -25
  59. mindsdb/interfaces/knowledge_base/evaluate.py +516 -0
  60. mindsdb/interfaces/knowledge_base/llm_client.py +75 -0
  61. mindsdb/interfaces/skills/custom/text2sql/mindsdb_kb_tools.py +83 -43
  62. mindsdb/interfaces/skills/skills_controller.py +31 -36
  63. mindsdb/interfaces/skills/sql_agent.py +113 -86
  64. mindsdb/interfaces/storage/db.py +242 -82
  65. mindsdb/migrations/versions/2025-05-28_a44643042fe8_added_data_catalog_tables.py +118 -0
  66. mindsdb/migrations/versions/2025-06-09_608e376c19a7_updated_data_catalog_data_types.py +58 -0
  67. mindsdb/utilities/config.py +13 -2
  68. mindsdb/utilities/log.py +35 -26
  69. mindsdb/utilities/ml_task_queue/task.py +19 -22
  70. mindsdb/utilities/render/sqlalchemy_render.py +129 -181
  71. mindsdb/utilities/starters.py +40 -0
  72. {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/METADATA +257 -257
  73. {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/RECORD +76 -68
  74. {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/WHEEL +0 -0
  75. {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/licenses/LICENSE +0 -0
  76. {mindsdb-25.5.4.2.dist-info → mindsdb-25.6.3.0.dist-info}/top_level.txt +0 -0
mindsdb/__about__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  __title__ = "MindsDB"
2
2
  __package_name__ = "mindsdb"
3
- __version__ = "25.5.4.2"
3
+ __version__ = "25.6.3.0"
4
4
  __description__ = "MindsDB's AI SQL Server enables developers to build AI tools that need access to real-time data to perform their tasks"
5
5
  __email__ = "jorge@mindsdb.com"
6
6
  __author__ = "MindsDB Inc"
mindsdb/api/a2a/agent.py CHANGED
@@ -28,9 +28,7 @@ class MindsDBAgent:
28
28
  self.host = host
29
29
  self.port = port
30
30
  self.base_url = f"http://{host}:{port}"
31
- self.agent_url = (
32
- f"{self.base_url}/api/projects/{project_name}/agents/{agent_name}"
33
- )
31
+ self.agent_url = f"{self.base_url}/api/projects/{project_name}/agents/{agent_name}"
34
32
  self.sql_url = f"{self.base_url}/api/sql/query"
35
33
  logger.info(f"Initialized MindsDB agent connector to {self.base_url}")
36
34
 
@@ -65,9 +63,7 @@ class MindsDBAgent:
65
63
  for column in ["response", "result", "answer", "completion", "output"]:
66
64
  if column in result_row:
67
65
  content = result_row[column]
68
- logger.info(
69
- f"Found result in column '{column}': {content[:100]}..."
70
- )
66
+ logger.info(f"Found result in column '{column}': {content[:100]}...")
71
67
  return {
72
68
  "content": content,
73
69
  "parts": [{"type": "text", "text": content}],
@@ -122,9 +118,7 @@ class MindsDBAgent:
122
118
  "parts": [{"type": "text", "text": error_msg}],
123
119
  }
124
120
 
125
- def streaming_invoke(
126
- self, messages: List[dict], timeout: int = DEFAULT_STREAM_TIMEOUT
127
- ) -> Iterator[Dict[str, Any]]:
121
+ def streaming_invoke(self, messages: List[dict], timeout: int = DEFAULT_STREAM_TIMEOUT) -> Iterator[Dict[str, Any]]:
128
122
  """Stream responses from the MindsDB agent using the direct API endpoint.
129
123
 
130
124
  Args:
@@ -140,15 +134,11 @@ class MindsDBAgent:
140
134
  url = f"{self.base_url}/api/projects/{self.project_name}/agents/{self.agent_name}/completions/stream"
141
135
 
142
136
  # Log request for debugging
143
- logger.info(
144
- f"Sending streaming request to MindsDB agent: {self.agent_name}"
145
- )
137
+ logger.info(f"Sending streaming request to MindsDB agent: {self.agent_name}")
146
138
  logger.debug(f"Request messages: {json.dumps(messages)[:200]}...")
147
139
 
148
140
  # Send the request to MindsDB streaming API with timeout
149
- stream = requests.post(
150
- url, json={"messages": messages}, stream=True, timeout=timeout
151
- )
141
+ stream = requests.post(url, json={"messages": messages}, stream=True, timeout=timeout)
152
142
  stream.raise_for_status()
153
143
 
154
144
  # Process the streaming response directly
@@ -165,9 +155,7 @@ class MindsDBAgent:
165
155
  # Pass through the chunk with minimal modifications
166
156
  yield chunk
167
157
  except json.JSONDecodeError as e:
168
- logger.warning(
169
- f"Failed to parse JSON from line: {data}. Error: {str(e)}"
170
- )
158
+ logger.warning(f"Failed to parse JSON from line: {data}. Error: {str(e)}")
171
159
  # Yield error information but continue processing
172
160
  yield {
173
161
  "error": f"JSON parse error: {str(e)}",
@@ -186,9 +174,7 @@ class MindsDBAgent:
186
174
  logger.debug(f"Received non-data line: {line}")
187
175
 
188
176
  # If it looks like a raw text response (not SSE format), wrap it
189
- if not line.startswith("event:") and not line.startswith(
190
- ":"
191
- ):
177
+ if not line.startswith("event:") and not line.startswith(":"):
192
178
  yield {"content": line, "is_task_complete": False}
193
179
  except UnicodeDecodeError as e:
194
180
  logger.warning(f"Failed to decode line: {str(e)}")
@@ -252,16 +238,54 @@ class MindsDBAgent:
252
238
  # Send a final completion message
253
239
  yield {"is_task_complete": True, "metadata": {"complete": True}}
254
240
 
255
- async def stream(self, query, session_id) -> AsyncIterable[Dict[str, Any]]:
256
- """Stream responses from the MindsDB agent (uses streaming API endpoint)."""
241
+ async def stream(
242
+ self,
243
+ query: str,
244
+ session_id: str,
245
+ history: List[dict] | None = None,
246
+ ) -> AsyncIterable[Dict[str, Any]]:
247
+ """Stream responses from the MindsDB agent (uses streaming API endpoint).
248
+
249
+ Args:
250
+ query: The current query to send to the agent.
251
+ session_id: Unique identifier for the conversation session.
252
+ history: Optional list of previous messages in the conversation.
253
+
254
+ Returns:
255
+ AsyncIterable yielding chunks of the streaming response.
256
+ """
257
257
  try:
258
258
  logger.info(f"Using streaming API for query: {query[:100]}...")
259
259
 
260
- # Format the query into the message structure expected by streaming_invoke
261
- messages = [{"question": query, "answer": None}]
260
+ # Format history into the expected format
261
+ formatted_messages = []
262
+ if history:
263
+ for msg in history:
264
+ # Convert Message object to dict if needed
265
+ msg_dict = msg.dict() if hasattr(msg, "dict") else msg
266
+ role = msg_dict.get("role", "user")
267
+
268
+ # Extract text from parts
269
+ text = ""
270
+ for part in msg_dict.get("parts", []):
271
+ if part.get("type") == "text":
272
+ text = part.get("text", "")
273
+ break
274
+
275
+ if text:
276
+ if role == "user":
277
+ formatted_messages.append({"question": text, "answer": None})
278
+ elif role == "assistant" and formatted_messages:
279
+ # Add the answer to the last question
280
+ formatted_messages[-1]["answer"] = text
281
+
282
+ # Add the current query to the messages
283
+ formatted_messages.append({"question": query, "answer": None})
284
+
285
+ logger.debug(f"Formatted messages for agent: {formatted_messages}")
262
286
 
263
287
  # Use the streaming_invoke method to get real streaming responses
264
- streaming_response = self.streaming_invoke(messages)
288
+ streaming_response = self.streaming_invoke(formatted_messages)
265
289
 
266
290
  # Yield all chunks directly from the streaming response
267
291
  for chunk in streaming_response:
@@ -20,7 +20,8 @@ from ...common.types import (
20
20
  )
21
21
  from pydantic import ValidationError
22
22
  import json
23
- from typing import AsyncIterable, Any
23
+ import time
24
+ from typing import AsyncIterable, Any, Dict
24
25
  from ...common.server.task_manager import TaskManager
25
26
 
26
27
  import logging
@@ -44,9 +45,9 @@ class A2AServer:
44
45
  self.agent_card = agent_card
45
46
  self.app = Starlette()
46
47
  self.app.add_route(self.endpoint, self._process_request, methods=["POST"])
47
- self.app.add_route(
48
- "/.well-known/agent.json", self._get_agent_card, methods=["GET"]
49
- )
48
+ self.app.add_route("/.well-known/agent.json", self._get_agent_card, methods=["GET"])
49
+ # Add status endpoint
50
+ self.app.add_route("/status", self._get_status, methods=["GET"])
50
51
  # TODO: Remove this when we have a proper CORS policy
51
52
  self.app.add_middleware(
52
53
  CORSMiddleware,
@@ -55,6 +56,7 @@ class A2AServer:
55
56
  allow_methods=["*"],
56
57
  allow_headers=["*"],
57
58
  )
59
+ self.start_time = time.time()
58
60
 
59
61
  def start(self):
60
62
  if self.agent_card is None:
@@ -66,18 +68,30 @@ class A2AServer:
66
68
  import uvicorn
67
69
 
68
70
  # Configure uvicorn with optimized settings for streaming
69
- uvicorn.run(
70
- self.app,
71
- host=self.host,
72
- port=self.port,
73
- http="h11",
74
- timeout_keep_alive=65,
75
- log_level="info"
76
- )
71
+ uvicorn.run(self.app, host=self.host, port=self.port, http="h11", timeout_keep_alive=65, log_level="info")
77
72
 
78
73
  def _get_agent_card(self, request: Request) -> JSONResponse:
79
74
  return JSONResponse(self.agent_card.model_dump(exclude_none=True))
80
75
 
76
+ def _get_status(self, request: Request) -> JSONResponse:
77
+ """
78
+ Status endpoint that returns basic server information.
79
+ This endpoint can be used by the frontend to check if the A2A server is running.
80
+ """
81
+ uptime_seconds = time.time() - self.start_time
82
+
83
+ status_info: Dict[str, Any] = {
84
+ "status": "ok",
85
+ "service": "mindsdb-a2a",
86
+ "uptime_seconds": round(uptime_seconds, 2),
87
+ "host": self.host,
88
+ "port": self.port,
89
+ "agent_name": self.agent_card.name if self.agent_card else None,
90
+ "version": self.agent_card.version if self.agent_card else "unknown",
91
+ }
92
+
93
+ return JSONResponse(status_info)
94
+
81
95
  async def _process_request(self, request: Request):
82
96
  try:
83
97
  body = await request.json()
@@ -89,23 +103,15 @@ class A2AServer:
89
103
  result = await self.task_manager.on_send_task(json_rpc_request)
90
104
  elif isinstance(json_rpc_request, SendTaskStreamingRequest):
91
105
  # Don't await the async generator, just pass it to _create_response
92
- result = self.task_manager.on_send_task_subscribe(
93
- json_rpc_request
94
- )
106
+ result = self.task_manager.on_send_task_subscribe(json_rpc_request)
95
107
  elif isinstance(json_rpc_request, CancelTaskRequest):
96
108
  result = await self.task_manager.on_cancel_task(json_rpc_request)
97
109
  elif isinstance(json_rpc_request, SetTaskPushNotificationRequest):
98
- result = await self.task_manager.on_set_task_push_notification(
99
- json_rpc_request
100
- )
110
+ result = await self.task_manager.on_set_task_push_notification(json_rpc_request)
101
111
  elif isinstance(json_rpc_request, GetTaskPushNotificationRequest):
102
- result = await self.task_manager.on_get_task_push_notification(
103
- json_rpc_request
104
- )
112
+ result = await self.task_manager.on_get_task_push_notification(json_rpc_request)
105
113
  elif isinstance(json_rpc_request, TaskResubscriptionRequest):
106
- result = await self.task_manager.on_resubscribe_to_task(
107
- json_rpc_request
108
- )
114
+ result = await self.task_manager.on_resubscribe_to_task(json_rpc_request)
109
115
  else:
110
116
  logger.warning(f"Unexpected request type: {type(json_rpc_request)}")
111
117
  raise ValueError(f"Unexpected request type: {type(request)}")
@@ -152,10 +158,10 @@ class A2AServer:
152
158
  "X-Accel-Buffering": "no",
153
159
  "Connection": "keep-alive",
154
160
  "Content-Type": "text/event-stream",
155
- "Transfer-Encoding": "chunked"
161
+ "Transfer-Encoding": "chunked",
156
162
  },
157
163
  # Explicitly set media_type
158
- media_type="text/event-stream"
164
+ media_type="text/event-stream",
159
165
  )
160
166
  elif isinstance(result, JSONRPCResponse):
161
167
  return JSONResponse(result.model_dump(exclude_none=True))
@@ -63,7 +63,8 @@ class AgentTaskManager(InMemoryTaskManager):
63
63
 
64
64
  # Create and store the task first to ensure it exists
65
65
  try:
66
- await self.upsert_task(task_send_params)
66
+ task = await self.upsert_task(task_send_params)
67
+ logger.info(f"Task created/updated with history length: {len(task.history) if task.history else 0}")
67
68
  except Exception as e:
68
69
  logger.error(f"Error creating task: {str(e)}")
69
70
  yield SendTaskStreamingResponse(
@@ -74,10 +75,27 @@ class AgentTaskManager(InMemoryTaskManager):
74
75
 
75
76
  agent = self._create_agent(agent_name)
76
77
 
78
+ # Get the history from the task
79
+ history = task.history if task and task.history else []
80
+ logger.info(f"Using history with length {len(history)} for request")
81
+
82
+ # Log the history for debugging
83
+ logger.info(f"Conversation history for task {task_send_params.id}:")
84
+ for idx, msg in enumerate(history):
85
+ # Convert Message object to dict if needed
86
+ msg_dict = msg.dict() if hasattr(msg, "dict") else msg
87
+ role = msg_dict.get("role", "unknown")
88
+ text = ""
89
+ for part in msg_dict.get("parts", []):
90
+ if part.get("type") == "text":
91
+ text = part.get("text", "")
92
+ break
93
+ logger.info(f"Message {idx + 1} ({role}): {text[:100]}...")
94
+
77
95
  if not streaming:
78
96
  # If streaming is disabled, use invoke and return a single response
79
97
  try:
80
- result = agent.invoke(query, task_send_params.sessionId)
98
+ result = agent.invoke(query, task_send_params.sessionId, history=history)
81
99
 
82
100
  # Use the parts from the agent response if available, or create them
83
101
  if "parts" in result:
@@ -134,7 +152,7 @@ class AgentTaskManager(InMemoryTaskManager):
134
152
  # Track the chunks we've seen to avoid duplicates
135
153
  seen_chunks = set()
136
154
 
137
- async for item in agent.stream(query, task_send_params.sessionId):
155
+ async for item in agent.stream(query, task_send_params.sessionId, history=history):
138
156
  # Ensure item has the required fields or provide defaults
139
157
  is_task_complete = item.get("is_task_complete", False)
140
158
 
@@ -356,13 +374,26 @@ class AgentTaskManager(InMemoryTaskManager):
356
374
  message = task_send_params.message
357
375
  message_dict = message.dict() if hasattr(message, "dict") else message
358
376
 
377
+ # Get history from request if available
378
+ history = []
379
+ if hasattr(task_send_params, "history") and task_send_params.history:
380
+ # Convert each history item to dict if needed and ensure proper role
381
+ for item in task_send_params.history:
382
+ item_dict = item.dict() if hasattr(item, "dict") else item
383
+ # Ensure the role is properly set
384
+ if "role" not in item_dict:
385
+ item_dict["role"] = "assistant" if "answer" in item_dict else "user"
386
+ history.append(item_dict)
387
+
388
+ # Add current message to history
389
+ history.append(message_dict)
390
+
359
391
  # Create a new task
360
392
  task = Task(
361
393
  id=task_send_params.id,
362
394
  sessionId=task_send_params.sessionId,
363
- messages=[message_dict],
364
395
  status=TaskStatus(state=TaskState.SUBMITTED),
365
- history=[message_dict],
396
+ history=history,
366
397
  artifacts=[],
367
398
  )
368
399
  self.tasks[task_send_params.id] = task
@@ -372,6 +403,22 @@ class AgentTaskManager(InMemoryTaskManager):
372
403
  message_dict = message.dict() if hasattr(message, "dict") else message
373
404
 
374
405
  # Update the existing task
406
+ if task.history is None:
407
+ task.history = []
408
+
409
+ # If we have new history from the request, use it
410
+ if hasattr(task_send_params, "history") and task_send_params.history:
411
+ # Convert each history item to dict if needed and ensure proper role
412
+ history = []
413
+ for item in task_send_params.history:
414
+ item_dict = item.dict() if hasattr(item, "dict") else item
415
+ # Ensure the role is properly set
416
+ if "role" not in item_dict:
417
+ item_dict["role"] = "assistant" if "answer" in item_dict else "user"
418
+ history.append(item_dict)
419
+ task.history = history
420
+
421
+ # Add current message to history
375
422
  task.history.append(message_dict)
376
423
  return task
377
424
 
@@ -459,6 +506,17 @@ class AgentTaskManager(InMemoryTaskManager):
459
506
  self.tasks[task_id] = task
460
507
 
461
508
  task.status = status
509
+
510
+ # Store assistant's response in history if we have a message
511
+ if status.message and status.message.role == "agent":
512
+ if task.history is None:
513
+ task.history = []
514
+ # Convert message to dict if needed
515
+ message_dict = status.message.dict() if hasattr(status.message, "dict") else status.message
516
+ # Ensure role is set to assistant
517
+ message_dict["role"] = "assistant"
518
+ task.history.append(message_dict)
519
+
462
520
  if artifacts is not None:
463
521
  for artifact in artifacts:
464
522
  if artifact.append and len(task.artifacts) > 0:
@@ -505,12 +563,16 @@ class AgentTaskManager(InMemoryTaskManager):
505
563
  agent = self._create_agent(agent_name)
506
564
 
507
565
  try:
566
+ # Get the history from the task
567
+ task = self.tasks.get(task_send_params.id)
568
+ history = task.history if task and task.history else []
569
+
508
570
  # Always use streaming internally, but handle the response differently based on the streaming parameter
509
571
  all_parts = []
510
572
  final_metadata = {}
511
573
 
512
574
  # Create a streaming generator
513
- stream_gen = agent.stream(query, task_send_params.sessionId)
575
+ stream_gen = agent.stream(query, task_send_params.sessionId, history=history)
514
576
 
515
577
  if streaming:
516
578
  # For streaming mode, we'll use the streaming endpoint instead
@@ -6,6 +6,7 @@ from functools import reduce
6
6
 
7
7
  import pandas as pd
8
8
  from mindsdb_sql_parser import parse_sql
9
+ from mindsdb_sql_parser.ast.mindsdb import AlterDatabase
9
10
  from mindsdb_sql_parser.ast import (
10
11
  Alter,
11
12
  ASTNode,
@@ -39,6 +40,7 @@ from mindsdb_sql_parser.ast import (
39
40
 
40
41
  # typed models
41
42
  from mindsdb_sql_parser.ast.mindsdb import (
43
+ AlterView,
42
44
  CreateAgent,
43
45
  CreateAnomalyDetectionModel,
44
46
  CreateChatBot,
@@ -51,6 +53,7 @@ from mindsdb_sql_parser.ast.mindsdb import (
51
53
  CreateTrigger,
52
54
  CreateView,
53
55
  CreateKnowledgeBaseIndex,
56
+ EvaluateKnowledgeBase,
54
57
  DropAgent,
55
58
  DropChatBot,
56
59
  DropDatasource,
@@ -189,6 +192,8 @@ class ExecuteCommands:
189
192
  return self.answer_drop_tables(statement, database_name)
190
193
  elif statement_type is DropDatasource or statement_type is DropDatabase:
191
194
  return self.answer_drop_database(statement)
195
+ elif statement_type is AlterDatabase:
196
+ return self.answer_alter_database(statement)
192
197
  elif statement_type is Describe:
193
198
  # NOTE in sql 'describe table' is same as 'show columns'
194
199
  obj_type = statement.type
@@ -551,7 +556,9 @@ class ExecuteCommands:
551
556
  ):
552
557
  return self.answer_create_predictor(statement, database_name)
553
558
  elif statement_type is CreateView:
554
- return self.answer_create_view(statement, database_name)
559
+ return self.answer_create_or_alter_view(statement, database_name)
560
+ elif statement_type is AlterView:
561
+ return self.answer_create_or_alter_view(statement, database_name)
555
562
  elif statement_type is DropView:
556
563
  return self.answer_drop_view(statement, database_name)
557
564
  elif statement_type is Delete:
@@ -618,6 +625,8 @@ class ExecuteCommands:
618
625
  return self.answer_evaluate_metric(statement, database_name)
619
626
  elif statement_type is CreateKnowledgeBaseIndex:
620
627
  return self.answer_create_kb_index(statement, database_name)
628
+ elif statement_type is EvaluateKnowledgeBase:
629
+ return self.answer_evaluate_kb(statement, database_name)
621
630
  else:
622
631
  logger.warning(f"Unknown SQL statement: {sql}")
623
632
  raise NotSupportedYet(f"Unknown SQL statement: {sql}")
@@ -906,6 +915,14 @@ class ExecuteCommands:
906
915
  self.session.kb_controller.create_index(table_name=table_name, project_name=project_name)
907
916
  return ExecuteAnswer()
908
917
 
918
+ def answer_evaluate_kb(self, statement: EvaluateKnowledgeBase, database_name):
919
+ table_name = statement.name.parts[-1]
920
+ project_name = statement.name.parts[0] if len(statement.name.parts) > 1 else database_name
921
+ scores = self.session.kb_controller.evaluate(
922
+ table_name=table_name, project_name=project_name, params=statement.params
923
+ )
924
+ return ExecuteAnswer(data=ResultSet.from_df(scores))
925
+
909
926
  def _get_model_info(self, identifier, except_absent=True, database_name=None):
910
927
  if len(identifier.parts) == 1:
911
928
  identifier.parts = [database_name, identifier.parts[0]]
@@ -1181,6 +1198,13 @@ class ExecuteCommands:
1181
1198
  raise
1182
1199
  return ExecuteAnswer()
1183
1200
 
1201
+ def answer_alter_database(self, statement):
1202
+ if len(statement.name.parts) != 1:
1203
+ raise Exception("Database name should contain only 1 part.")
1204
+ db_name = statement.name.parts[0]
1205
+ self.session.database_controller.update(db_name, data=statement.params)
1206
+ return ExecuteAnswer()
1207
+
1184
1208
  def answer_drop_tables(self, statement, database_name):
1185
1209
  """answer on 'drop table [if exists] {name}'
1186
1210
  Args:
@@ -1214,17 +1238,35 @@ class ExecuteCommands:
1214
1238
 
1215
1239
  return ExecuteAnswer()
1216
1240
 
1217
- def answer_create_view(self, statement, database_name):
1241
+ def answer_create_or_alter_view(self, statement: ASTNode, database_name: str) -> ExecuteAnswer:
1242
+ """Process CREATE and ALTER VIEW commands
1243
+
1244
+ Args:
1245
+ statement (ASTNode): data for creating or altering view
1246
+ database_name (str): name of the current database
1247
+
1248
+ Returns:
1249
+ ExecuteAnswer: answer for the command
1250
+ """
1218
1251
  project_name = database_name
1219
- # TEMP
1220
- if isinstance(statement.name, Identifier):
1252
+
1253
+ if isinstance(statement.name, str):
1254
+ parts = statement.name.split(".")
1255
+ elif isinstance(statement.name, Identifier):
1221
1256
  parts = statement.name.parts
1222
1257
  else:
1223
- parts = statement.name.split(".")
1258
+ raise ValueError(f"Unknown type of view name: {statement.name}")
1224
1259
 
1225
- view_name = parts[-1]
1226
- if len(parts) == 2:
1227
- project_name = parts[0]
1260
+ match parts:
1261
+ case [project_name, view_name]:
1262
+ pass
1263
+ case [view_name]:
1264
+ pass
1265
+ case _:
1266
+ raise ValueError(
1267
+ 'View name should be in the form "project_name.view_name" '
1268
+ f'or "view_name", got {statement.name.parts}'
1269
+ )
1228
1270
 
1229
1271
  query_str = statement.query_str
1230
1272
 
@@ -1233,7 +1275,7 @@ class ExecuteCommands:
1233
1275
  targets=[Star()],
1234
1276
  from_table=NativeQuery(integration=statement.from_table, query=statement.query_str),
1235
1277
  )
1236
- query_str = str(query)
1278
+ query_str = query.to_string()
1237
1279
  else:
1238
1280
  query = parse_sql(query_str)
1239
1281
 
@@ -1248,11 +1290,21 @@ class ExecuteCommands:
1248
1290
  query_context_controller.release_context(query_context_controller.IGNORE_CONTEXT)
1249
1291
 
1250
1292
  project = self.session.database_controller.get_project(project_name)
1251
- try:
1252
- project.create_view(view_name, query=query_str)
1253
- except EntityExistsError:
1254
- if getattr(statement, "if_not_exists", False) is False:
1255
- raise
1293
+
1294
+ if isinstance(statement, CreateView):
1295
+ try:
1296
+ project.create_view(view_name, query=query_str)
1297
+ except EntityExistsError:
1298
+ if getattr(statement, "if_not_exists", False) is False:
1299
+ raise
1300
+ elif isinstance(statement, AlterView):
1301
+ try:
1302
+ project.update_view(view_name, query=query_str)
1303
+ except EntityNotExistsError:
1304
+ raise ExecutorException(f"View {view_name} does not exist in {project_name}")
1305
+ else:
1306
+ raise ValueError(f"Unknown view DDL statement: {statement}")
1307
+
1256
1308
  return ExecuteAnswer()
1257
1309
 
1258
1310
  def answer_drop_view(self, statement, database_name):
@@ -1467,6 +1519,9 @@ class ExecuteCommands:
1467
1519
  is_full=False,
1468
1520
  database_name=None,
1469
1521
  ):
1522
+ if isinstance(target, Identifier) is False:
1523
+ raise TableNotExistError("The table name is required for the query.")
1524
+
1470
1525
  if len(target.parts) > 1:
1471
1526
  db = target.parts[0]
1472
1527
  elif isinstance(database_name, str) and len(database_name) > 0: