ai-data-science-team 0.0.0.9012__tar.gz → 0.0.0.9013__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (51) hide show
  1. {ai_data_science_team-0.0.0.9012/ai_data_science_team.egg-info → ai_data_science_team-0.0.0.9013}/PKG-INFO +6 -2
  2. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/README.md +5 -1
  3. ai_data_science_team-0.0.0.9013/ai_data_science_team/_version.py +1 -0
  4. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/agents/data_loader_tools_agent.py +11 -0
  5. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/ds_agents/eda_tools_agent.py +13 -0
  6. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/ml_agents/mlflow_tools_agent.py +10 -0
  7. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/tools/dataframe.py +6 -1
  8. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/tools/eda.py +75 -16
  9. ai_data_science_team-0.0.0.9013/ai_data_science_team/utils/messages.py +27 -0
  10. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013/ai_data_science_team.egg-info}/PKG-INFO +6 -2
  11. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team.egg-info/SOURCES.txt +1 -0
  12. ai_data_science_team-0.0.0.9012/ai_data_science_team/_version.py +0 -1
  13. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/LICENSE +0 -0
  14. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/MANIFEST.in +0 -0
  15. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/__init__.py +0 -0
  16. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/agents/__init__.py +0 -0
  17. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/agents/data_cleaning_agent.py +0 -0
  18. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/agents/data_visualization_agent.py +0 -0
  19. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/agents/data_wrangling_agent.py +0 -0
  20. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/agents/feature_engineering_agent.py +0 -0
  21. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/agents/sql_database_agent.py +0 -0
  22. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/ds_agents/__init__.py +0 -0
  23. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/ds_agents/modeling_tools_agent.py +0 -0
  24. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/ml_agents/__init__.py +0 -0
  25. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/ml_agents/h2o_ml_agent.py +0 -0
  26. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/ml_agents/h2o_ml_tools_agent.py +0 -0
  27. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/multiagents/__init__.py +0 -0
  28. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/multiagents/sql_data_analyst.py +0 -0
  29. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/multiagents/supervised_data_analyst.py +0 -0
  30. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/orchestration.py +0 -0
  31. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/parsers/__init__.py +0 -0
  32. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/parsers/parsers.py +0 -0
  33. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/templates/__init__.py +0 -0
  34. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/templates/agent_templates.py +0 -0
  35. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/tools/__init__.py +0 -0
  36. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/tools/data_loader.py +0 -0
  37. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/tools/h2o.py +0 -0
  38. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/tools/mlflow.py +0 -0
  39. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/tools/sql.py +0 -0
  40. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/utils/__init__.py +0 -0
  41. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/utils/html.py +0 -0
  42. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/utils/logging.py +0 -0
  43. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/utils/matplotlib.py +0 -0
  44. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/utils/plotly.py +0 -0
  45. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team/utils/regex.py +0 -0
  46. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team.egg-info/dependency_links.txt +0 -0
  47. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team.egg-info/requires.txt +0 -0
  48. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/ai_data_science_team.egg-info/top_level.txt +0 -0
  49. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/requirements.txt +0 -0
  50. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/setup.cfg +0 -0
  51. {ai_data_science_team-0.0.0.9012 → ai_data_science_team-0.0.0.9013}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ai-data-science-team
3
- Version: 0.0.0.9012
3
+ Version: 0.0.0.9013
4
4
  Summary: Build and run an AI-powered data science team.
5
5
  Home-page: https://github.com/business-science/ai-data-science-team
6
6
  Author: Matt Dancho
@@ -152,7 +152,11 @@ This is a top secret project I'm working on. It's a multi-agent data science app
152
152
 
153
153
  #### 🔥 Agentic Applications
154
154
 
155
- 1. **SQL Database Agent App:** Connects any SQL Database, generates SQL queries from natural language, and returns data as a downloadable table. [See Application](/apps/sql-database-agent-app/)
155
+ 1. **NEW Exploratory Data Copilot**: An AI-powered data science app that performs automated exploratory data analysis (EDA) with EDA Reporting, Missing Data Analysis, Correlation Analysis, and more. [See Application](/apps/exploratory-copilot-app/)
156
+
157
+ ![Exploratory Data Copilot](/img/apps/ai_exploratory_copilot.jpg)
158
+
159
+ 2. **SQL Database Agent App:** Connects any SQL Database, generates SQL queries from natural language, and returns data as a downloadable table. [See Application](/apps/sql-database-agent-app/)
156
160
 
157
161
  ### Agents Available Now
158
162
 
@@ -98,7 +98,11 @@ This is a top secret project I'm working on. It's a multi-agent data science app
98
98
 
99
99
  #### 🔥 Agentic Applications
100
100
 
101
- 1. **SQL Database Agent App:** Connects any SQL Database, generates SQL queries from natural language, and returns data as a downloadable table. [See Application](/apps/sql-database-agent-app/)
101
+ 1. **NEW Exploratory Data Copilot**: An AI-powered data science app that performs automated exploratory data analysis (EDA) with EDA Reporting, Missing Data Analysis, Correlation Analysis, and more. [See Application](/apps/exploratory-copilot-app/)
102
+
103
+ ![Exploratory Data Copilot](/img/apps/ai_exploratory_copilot.jpg)
104
+
105
+ 2. **SQL Database Agent App:** Connects any SQL Database, generates SQL queries from natural language, and returns data as a downloadable table. [See Application](/apps/sql-database-agent-app/)
102
106
 
103
107
  ### Agents Available Now
104
108
 
@@ -0,0 +1 @@
1
+ __version__ = "0.0.0.9013"
@@ -25,6 +25,7 @@ from ai_data_science_team.tools.data_loader import (
25
25
  get_file_info,
26
26
  search_files_by_pattern,
27
27
  )
28
+ from ai_data_science_team.utils.messages import get_tool_call_names
28
29
 
29
30
  AGENT_NAME = "data_loader_tools_agent"
30
31
 
@@ -174,6 +175,12 @@ class DataLoaderToolsAgent(BaseAgent):
174
175
  return Markdown(self.response["messages"][0].content)
175
176
  else:
176
177
  return self.response["messages"][0].content
178
+
179
+ def get_tool_calls(self):
180
+ """
181
+ Returns the tool calls made by the agent.
182
+ """
183
+ return self.response["tool_calls"]
177
184
 
178
185
 
179
186
 
@@ -204,6 +211,7 @@ def make_data_loader_tools_agent(
204
211
  internal_messages: Annotated[Sequence[BaseMessage], operator.add]
205
212
  user_instructions: str
206
213
  data_loader_artifacts: dict
214
+ tool_calls: List[str]
207
215
 
208
216
  def data_loader_agent(state):
209
217
 
@@ -253,10 +261,13 @@ def make_data_loader_tools_agent(
253
261
  elif isinstance(last_message, dict) and "artifact" in last_message:
254
262
  last_tool_artifact = last_message["artifact"]
255
263
 
264
+ tool_calls = get_tool_call_names(internal_messages)
265
+
256
266
  return {
257
267
  "messages": [last_ai_message],
258
268
  "internal_messages": internal_messages,
259
269
  "data_loader_artifacts": last_tool_artifact,
270
+ "tool_calls": tool_calls,
260
271
  }
261
272
 
262
273
  workflow = StateGraph(GraphState)
@@ -19,17 +19,20 @@ from ai_data_science_team.templates import BaseAgent
19
19
  from ai_data_science_team.utils.regex import format_agent_name
20
20
 
21
21
  from ai_data_science_team.tools.eda import (
22
+ explain_data,
22
23
  describe_dataset,
23
24
  visualize_missing,
24
25
  correlation_funnel,
25
26
  generate_sweetviz_report,
26
27
  )
28
+ from ai_data_science_team.utils.messages import get_tool_call_names
27
29
 
28
30
 
29
31
  AGENT_NAME = "exploratory_data_analyst_agent"
30
32
 
31
33
  # Updated tool list for EDA
32
34
  EDA_TOOLS = [
35
+ explain_data,
33
36
  describe_dataset,
34
37
  visualize_missing,
35
38
  correlation_funnel,
@@ -162,6 +165,12 @@ class EDAToolsAgent(BaseAgent):
162
165
  return Markdown(self.response["messages"][0].content)
163
166
  else:
164
167
  return self.response["messages"][0].content
168
+
169
+ def get_tool_calls(self):
170
+ """
171
+ Returns the tool calls made by the agent.
172
+ """
173
+ return self.response["tool_calls"]
165
174
 
166
175
  def make_eda_tools_agent(
167
176
  model: Any,
@@ -191,6 +200,7 @@ def make_eda_tools_agent(
191
200
  user_instructions: str
192
201
  data_raw: dict
193
202
  eda_artifacts: dict
203
+ tool_calls: list
194
204
 
195
205
  def exploratory_agent(state):
196
206
  print(format_agent_name(AGENT_NAME))
@@ -229,11 +239,14 @@ def make_eda_tools_agent(
229
239
  last_tool_artifact = last_message.artifact
230
240
  elif isinstance(last_message, dict) and "artifact" in last_message:
231
241
  last_tool_artifact = last_message["artifact"]
242
+
243
+ tool_calls = get_tool_call_names(internal_messages)
232
244
 
233
245
  return {
234
246
  "messages": [last_ai_message],
235
247
  "internal_messages": internal_messages,
236
248
  "eda_artifacts": last_tool_artifact,
249
+ "tool_calls": tool_calls,
237
250
  }
238
251
 
239
252
  workflow = StateGraph(GraphState)
@@ -27,6 +27,7 @@ from ai_data_science_team.tools.mlflow import (
27
27
  mlflow_search_registered_models,
28
28
  mlflow_get_model_version_details,
29
29
  )
30
+ from ai_data_science_team.utils.messages import get_tool_call_names
30
31
 
31
32
  AGENT_NAME = "mlflow_tools_agent"
32
33
 
@@ -228,6 +229,12 @@ class MLflowToolsAgent(BaseAgent):
228
229
  return Markdown(self.response["messages"][0].content)
229
230
  else:
230
231
  return self.response["messages"][0].content
232
+
233
+ def get_tool_calls(self):
234
+ """
235
+ Returns the tool calls made by the agent.
236
+ """
237
+ return self.response["tool_calls"]
231
238
 
232
239
 
233
240
 
@@ -330,10 +337,13 @@ def make_mlflow_tools_agent(
330
337
  elif isinstance(last_message, dict) and "artifact" in last_message:
331
338
  last_tool_artifact = last_message["artifact"]
332
339
 
340
+ tool_calls = get_tool_call_names(internal_messages)
341
+
333
342
  return {
334
343
  "messages": [last_ai_message],
335
344
  "internal_messages": internal_messages,
336
345
  "mlflow_artifacts": last_tool_artifact,
346
+ "tool_calls": tool_calls,
337
347
  }
338
348
 
339
349
 
@@ -74,7 +74,12 @@ def get_dataframe_summary(
74
74
  return summaries
75
75
 
76
76
 
77
- def _summarize_dataframe(df: pd.DataFrame, dataset_name: str, n_sample=30, skip_stats=False) -> str:
77
+ def _summarize_dataframe(
78
+ df: pd.DataFrame,
79
+ dataset_name: str,
80
+ n_sample=30,
81
+ skip_stats=False
82
+ ) -> str:
78
83
  """Generate a summary string for a single DataFrame."""
79
84
  # 1. Convert dictionary-type cells to strings
80
85
  # This prevents unhashable dict errors during df.nunique().
@@ -2,11 +2,44 @@
2
2
  from typing import Annotated, Dict, Tuple, Union
3
3
 
4
4
  import os
5
+ import tempfile
5
6
 
6
7
  from langchain.tools import tool
7
8
 
8
9
  from langgraph.prebuilt import InjectedState
9
10
 
11
+ from ai_data_science_team.tools.dataframe import get_dataframe_summary
12
+
13
+
14
+ @tool(response_format='content')
15
+ def explain_data(
16
+ data_raw: Annotated[dict, InjectedState("data_raw")],
17
+ n_sample: int = 30,
18
+ skip_stats: bool = False,
19
+ ):
20
+ """
21
+ Tool: explain_data
22
+ Description:
23
+ Provides an extensive, narrative summary of a DataFrame including its shape, column types,
24
+ missing value percentages, unique counts, sample rows, and (if not skipped) descriptive stats/info.
25
+
26
+ Parameters:
27
+ data_raw (dict): Raw data.
28
+ n_sample (int, default=30): Number of rows to display.
29
+ skip_stats (bool, default=False): If True, omit descriptive stats/info.
30
+
31
+ LLM Guidance:
32
+ Use when a detailed, human-readable explanation is needed—i.e., a full overview is preferred over a concise numerical summary.
33
+
34
+ Returns:
35
+ str: Detailed DataFrame summary.
36
+ """
37
+ print(" * Tool: explain_data")
38
+ import pandas as pd
39
+
40
+ result = get_dataframe_summary(pd.DataFrame(data_raw), n_sample=n_sample, skip_stats=skip_stats)
41
+
42
+ return result
10
43
 
11
44
  @tool(response_format='content_and_artifact')
12
45
  def describe_dataset(
@@ -15,21 +48,33 @@ def describe_dataset(
15
48
  """
16
49
  Tool: describe_dataset
17
50
  Description:
18
- Describe the dataset by computing summary
19
- statistics using the DataFrame's describe() method.
20
-
51
+ Compute and return summary statistics for the dataset using pandas' describe() method.
52
+ The tool provides both a textual summary and a structured artifact (a dictionary) for further processing.
53
+
54
+ Parameters:
55
+ -----------
56
+ data_raw : dict
57
+ The raw data in dictionary format.
58
+
59
+ LLM Selection Guidance:
60
+ ------------------------
61
+ Use this tool when:
62
+ - The request emphasizes numerical descriptive statistics (e.g., count, mean, std, min, quartiles, max).
63
+ - The user needs a concise statistical snapshot rather than a detailed narrative.
64
+ - Both a brief text explanation and a structured data artifact (for downstream tasks) are required.
65
+
21
66
  Returns:
22
67
  -------
23
68
  Tuple[str, Dict]:
24
- content: A textual summary of the DataFrame's descriptive statistics.
25
- artifact: A dictionary (from DataFrame.describe()) for further inspection.
69
+ - content: A textual summary indicating that summary statistics have been computed.
70
+ - artifact: A dictionary (derived from DataFrame.describe()) containing detailed statistical measures.
26
71
  """
27
72
  print(" * Tool: describe_dataset")
28
73
  import pandas as pd
29
74
  df = pd.DataFrame(data_raw)
30
75
  description_df = df.describe(include='all')
31
76
  content = "Summary statistics computed using pandas describe()."
32
- artifact = description_df.to_dict()
77
+ artifact = {'describe_df': description_df.to_dict()}
33
78
  return content, artifact
34
79
 
35
80
 
@@ -226,8 +271,8 @@ def generate_sweetviz_report(
226
271
  data_raw: Annotated[dict, InjectedState("data_raw")],
227
272
  target: str = None,
228
273
  report_name: str = "sweetviz_report.html",
229
- report_directory: str = os.path.join(os.getcwd(), "reports"),
230
- open_browser: bool = True,
274
+ report_directory: str = None, # <-- Default to None
275
+ open_browser: bool = False,
231
276
  ) -> Tuple[str, Dict]:
232
277
  """
233
278
  Tool: generate_sweetviz_report
@@ -243,9 +288,10 @@ def generate_sweetviz_report(
243
288
  report_name : str, optional
244
289
  The file name to save the Sweetviz HTML report. Default is "sweetviz_report.html".
245
290
  report_directory : str, optional
246
- The directory where the report should be saved. Defaults to a 'reports' directory in the current working directory.
291
+ The directory where the report should be saved.
292
+ If None, a temporary directory is created and used.
247
293
  open_browser : bool, optional
248
- Whether to open the report in a web browser. Default is True.
294
+ Whether to open the report in a web browser. Default is False.
249
295
 
250
296
  Returns:
251
297
  --------
@@ -254,28 +300,37 @@ def generate_sweetviz_report(
254
300
  artifact: A dictionary with the report file path and optionally the report's HTML content.
255
301
  """
256
302
  print(" * Tool: generate_sweetviz_report")
303
+
304
+ # Import sweetviz
257
305
  try:
258
306
  import sweetviz as sv
259
307
  except ImportError:
260
308
  raise ImportError("Please install the 'sweetviz' package to use this tool. Run: pip install sweetviz")
309
+
261
310
  import pandas as pd
311
+
262
312
  # Convert injected raw data to a DataFrame.
263
313
  df = pd.DataFrame(data_raw)
264
314
 
315
+ # If no directory is specified, use a temporary directory.
316
+ if not report_directory:
317
+ report_directory = tempfile.mkdtemp()
318
+ print(f" * Using temporary directory: {report_directory}")
319
+ else:
320
+ # Ensure user-specified directory exists.
321
+ if not os.path.exists(report_directory):
322
+ os.makedirs(report_directory)
323
+
265
324
  # Create the Sweetviz report.
266
325
  report = sv.analyze(df, target_feat=target)
267
326
 
268
- # Ensure the directory exists; default is os.getcwd()/reports
269
- if not os.path.exists(report_directory):
270
- os.makedirs(report_directory)
271
-
272
327
  # Determine the full path for the report.
273
328
  full_report_path = os.path.join(report_directory, report_name)
274
329
 
275
330
  # Save the report to the specified HTML file.
276
331
  report.show_html(
277
332
  filepath=full_report_path,
278
- open_browser=True,
333
+ open_browser=open_browser,
279
334
  )
280
335
 
281
336
  # Optionally, read the HTML content (if desired to pass along in the artifact).
@@ -285,9 +340,13 @@ def generate_sweetviz_report(
285
340
  except Exception:
286
341
  html_content = None
287
342
 
288
- content = f"Sweetviz EDA report generated and saved as '{os.path.abspath(full_report_path)}'."
343
+ content = (
344
+ f"Sweetviz EDA report generated and saved as '{os.path.abspath(full_report_path)}'. "
345
+ f"{'This was saved in a temporary directory.' if 'tmp' in report_directory else ''}"
346
+ )
289
347
  artifact = {
290
348
  "report_file": os.path.abspath(full_report_path),
291
349
  "report_html": html_content,
292
350
  }
293
351
  return content, artifact
352
+
@@ -0,0 +1,27 @@
1
+
2
+
3
+
4
+ def get_tool_call_names(messages):
5
+ """
6
+ Method to extract the tool call names from a list of LangChain messages.
7
+
8
+ Parameters:
9
+ ----------
10
+ messages : list
11
+ A list of LangChain messages.
12
+
13
+ Returns:
14
+ -------
15
+ tool_calls : list
16
+ A list of tool call names.
17
+
18
+ """
19
+ tool_calls = []
20
+ for message in messages:
21
+ try:
22
+ if "tool_call_id" in list(dict(message).keys()):
23
+ tool_calls.append(message.name)
24
+ except:
25
+ pass
26
+ return tool_calls
27
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ai-data-science-team
3
- Version: 0.0.0.9012
3
+ Version: 0.0.0.9013
4
4
  Summary: Build and run an AI-powered data science team.
5
5
  Home-page: https://github.com/business-science/ai-data-science-team
6
6
  Author: Matt Dancho
@@ -152,7 +152,11 @@ This is a top secret project I'm working on. It's a multi-agent data science app
152
152
 
153
153
  #### 🔥 Agentic Applications
154
154
 
155
- 1. **SQL Database Agent App:** Connects any SQL Database, generates SQL queries from natural language, and returns data as a downloadable table. [See Application](/apps/sql-database-agent-app/)
155
+ 1. **NEW Exploratory Data Copilot**: An AI-powered data science app that performs automated exploratory data analysis (EDA) with EDA Reporting, Missing Data Analysis, Correlation Analysis, and more. [See Application](/apps/exploratory-copilot-app/)
156
+
157
+ ![Exploratory Data Copilot](/img/apps/ai_exploratory_copilot.jpg)
158
+
159
+ 2. **SQL Database Agent App:** Connects any SQL Database, generates SQL queries from natural language, and returns data as a downloadable table. [See Application](/apps/sql-database-agent-app/)
156
160
 
157
161
  ### Agents Available Now
158
162
 
@@ -43,5 +43,6 @@ ai_data_science_team/utils/__init__.py
43
43
  ai_data_science_team/utils/html.py
44
44
  ai_data_science_team/utils/logging.py
45
45
  ai_data_science_team/utils/matplotlib.py
46
+ ai_data_science_team/utils/messages.py
46
47
  ai_data_science_team/utils/plotly.py
47
48
  ai_data_science_team/utils/regex.py
@@ -1 +0,0 @@
1
- __version__ = "0.0.0.9012"