flowcept 0.9.17__py3-none-any.whl → 0.9.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. flowcept/agents/agent_client.py +10 -4
  2. flowcept/agents/agents_utils.py +54 -19
  3. flowcept/agents/flowcept_agent.py +116 -12
  4. flowcept/agents/flowcept_ctx_manager.py +116 -46
  5. flowcept/agents/gui/gui_utils.py +21 -3
  6. flowcept/agents/prompts/general_prompts.py +1 -1
  7. flowcept/agents/prompts/in_memory_query_prompts.py +158 -45
  8. flowcept/agents/tools/general_tools.py +20 -3
  9. flowcept/agents/tools/in_memory_queries/in_memory_queries_tools.py +14 -31
  10. flowcept/commons/daos/docdb_dao/lmdb_dao.py +48 -0
  11. flowcept/commons/daos/keyvalue_dao.py +12 -3
  12. flowcept/commons/daos/mq_dao/mq_dao_base.py +37 -20
  13. flowcept/commons/daos/mq_dao/mq_dao_kafka.py +2 -2
  14. flowcept/commons/daos/mq_dao/mq_dao_redis.py +33 -2
  15. flowcept/commons/flowcept_dataclasses/task_object.py +4 -1
  16. flowcept/configs.py +17 -3
  17. flowcept/flowcept_api/flowcept_controller.py +5 -1
  18. flowcept/flowceptor/adapters/mlflow/interception_event_handler.py +33 -2
  19. flowcept/flowceptor/adapters/mlflow/mlflow_interceptor.py +18 -4
  20. flowcept/flowceptor/adapters/tensorboard/tensorboard_interceptor.py +1 -0
  21. flowcept/flowceptor/consumers/agent/base_agent_context_manager.py +9 -10
  22. flowcept/flowceptor/consumers/base_consumer.py +22 -4
  23. flowcept/flowceptor/consumers/document_inserter.py +22 -1
  24. flowcept/instrumentation/flowcept_task.py +147 -51
  25. flowcept/instrumentation/task_capture.py +10 -1
  26. flowcept/version.py +1 -1
  27. {flowcept-0.9.17.dist-info → flowcept-0.9.19.dist-info}/METADATA +8 -1
  28. {flowcept-0.9.17.dist-info → flowcept-0.9.19.dist-info}/RECORD +32 -32
  29. {flowcept-0.9.17.dist-info → flowcept-0.9.19.dist-info}/WHEEL +1 -1
  30. resources/sample_settings.yaml +2 -1
  31. {flowcept-0.9.17.dist-info → flowcept-0.9.19.dist-info}/entry_points.txt +0 -0
  32. {flowcept-0.9.17.dist-info → flowcept-0.9.19.dist-info}/licenses/LICENSE +0 -0
@@ -1,4 +1,6 @@
1
1
  import asyncio
2
+ import json
3
+ import re
2
4
  from typing import Dict, List, Callable
3
5
 
4
6
  from flowcept.configs import AGENT_HOST, AGENT_PORT
@@ -48,10 +50,14 @@ def run_tool(
48
50
  result: List[TextContent] = await session.call_tool(tool_name, arguments=kwargs)
49
51
  actual_result = []
50
52
  for r in result.content:
51
- if isinstance(r, str):
52
- actual_result.append(r)
53
- else:
54
- actual_result.append(r.text)
53
+ text = r if isinstance(r, str) else r.text
54
+ try:
55
+ json.loads(text)
56
+ actual_result.append(text)
57
+ except Exception:
58
+ match = re.search(r"Error code:\\s*(\\d+)", text)
59
+ code = int(match.group(1)) if match else 400
60
+ actual_result.append(json.dumps({"code": code, "result": text, "tool_name": tool_name}))
55
61
 
56
62
  return actual_result
57
63
 
@@ -1,4 +1,6 @@
1
1
  import os
2
+ import re
3
+ import unicodedata
2
4
  from typing import Union, Dict
3
5
 
4
6
  from flowcept.flowceptor.consumers.agent.base_agent_context_manager import BaseAgentContextManager
@@ -137,8 +139,8 @@ def build_llm_model(
137
139
  if _service_provider == "sambanova":
138
140
  from langchain_community.llms.sambanova import SambaStudio
139
141
 
140
- os.environ["SAMBASTUDIO_URL"] = AGENT.get("llm_server_url")
141
- os.environ["SAMBASTUDIO_API_KEY"] = AGENT.get("api_key")
142
+ os.environ["SAMBASTUDIO_URL"] = os.environ.get("SAMBASTUDIO_URL", AGENT.get("llm_server_url"))
143
+ os.environ["SAMBASTUDIO_API_KEY"] = os.environ.get("SAMBASTUDIO_API_KEY", AGENT.get("api_key"))
142
144
 
143
145
  llm = SambaStudio(model_kwargs=_model_kwargs)
144
146
  elif _service_provider == "azure":
@@ -153,7 +155,16 @@ def build_llm_model(
153
155
  from langchain_openai import ChatOpenAI
154
156
 
155
157
  api_key = os.environ.get("OPENAI_API_KEY", AGENT.get("api_key", None))
156
- llm = ChatOpenAI(openai_api_key=api_key, **model_kwargs)
158
+ base_url = os.environ.get("OPENAI_BASE_URL", AGENT.get("llm_server_url") or None)
159
+ org = os.environ.get("OPENAI_ORG_ID", AGENT.get("organization", None))
160
+
161
+ init_kwargs = {"api_key": api_key}
162
+ if base_url:
163
+ init_kwargs["base_url"] = base_url
164
+ if org:
165
+ init_kwargs["organization"] = org
166
+
167
+ llm = ChatOpenAI(**init_kwargs, **_model_kwargs)
157
168
  elif _service_provider == "google":
158
169
  if "claude" in _model_kwargs["model"]:
159
170
  api_key = os.environ.get("GOOGLE_API_KEY", AGENT.get("api_key", None))
@@ -166,22 +177,6 @@ def build_llm_model(
166
177
  from flowcept.agents.llms.gemini25 import Gemini25LLM
167
178
 
168
179
  llm = Gemini25LLM(**_model_kwargs)
169
- elif _service_provider == "openai":
170
- from langchain_openai import ChatOpenAI
171
-
172
- api_key = os.environ.get("OPENAI_API_KEY", AGENT.get("api_key"))
173
- base_url = os.environ.get("OPENAI_BASE_URL", AGENT.get("llm_server_url") or None) # optional
174
- org = os.environ.get("OPENAI_ORG_ID", AGENT.get("organization", None)) # optional
175
-
176
- init_kwargs = {"api_key": api_key}
177
- if base_url:
178
- init_kwargs["base_url"] = base_url
179
- if org:
180
- init_kwargs["organization"] = org
181
-
182
- # IMPORTANT: use the merged kwargs so `model` and temps flow through
183
- llm = ChatOpenAI(**init_kwargs, **_model_kwargs)
184
-
185
180
  else:
186
181
  raise Exception("Currently supported providers are sambanova, openai, azure, and google.")
187
182
  if track_tools:
@@ -194,3 +189,43 @@ def build_llm_model(
194
189
  if tool_task:
195
190
  llm.parent_task_id = tool_task.task_id
196
191
  return llm
192
+
193
+
194
+ def normalize_message(user_msg: str) -> str:
195
+ """
196
+ Normalize a user message into a canonical, comparison-friendly form.
197
+
198
+ The function standardizes text by trimming whitespace, applying Unicode
199
+ normalization, normalizing dash characters, collapsing repeated whitespace,
200
+ removing trailing punctuation that does not affect semantics, and converting
201
+ the result to lowercase.
202
+
203
+ Parameters
204
+ ----------
205
+ user_msg : str
206
+ Raw user input message.
207
+
208
+ Returns
209
+ -------
210
+ str
211
+ Normalized message suitable for matching, comparison, or hashing.
212
+ """
213
+ # 1) Strip leading/trailing whitespace
214
+ user_msg = user_msg.strip()
215
+
216
+ # 2) Unicode normalize to avoid weird characters (like fancy quotes, dashes)
217
+ user_msg = unicodedata.normalize("NFKC", user_msg)
218
+
219
+ # 3) Normalize dashes commonly used in chemistry (C–H, C—H, etc.)
220
+ user_msg = user_msg.replace("–", "-").replace("—", "-")
221
+
222
+ # 4) Collapse multiple spaces / newlines into a single space
223
+ user_msg = re.sub(r"\s+", " ", user_msg)
224
+
225
+ # 5) Remove trailing punctuation that doesn't change semantics
226
+ # e.g., "?", "!", "." at the VERY end
227
+ user_msg = re.sub(r"[?!.\s]+$", "", user_msg)
228
+
229
+ user_msg = user_msg.lower()
230
+
231
+ return user_msg
@@ -1,29 +1,133 @@
1
+ import json
2
+ import os
1
3
  from threading import Thread
2
- from time import sleep
3
4
 
4
5
  from flowcept.agents import check_liveness
6
+ from flowcept.agents.agents_utils import ToolResult
7
+ from flowcept.agents.tools.general_tools import prompt_handler
5
8
  from flowcept.agents.agent_client import run_tool
6
- from flowcept.agents.flowcept_ctx_manager import mcp_flowcept
7
- from flowcept.configs import AGENT_HOST, AGENT_PORT
8
- from flowcept.flowcept_api.flowcept_controller import Flowcept
9
+ from flowcept.agents.flowcept_ctx_manager import mcp_flowcept, ctx_manager
10
+ from flowcept.commons.flowcept_logger import FlowceptLogger
11
+ from flowcept.configs import AGENT_HOST, AGENT_PORT, DUMP_BUFFER_PATH, MQ_ENABLED
12
+ from flowcept.flowceptor.consumers.agent.base_agent_context_manager import BaseAgentContextManager
13
+ from uuid import uuid4
9
14
 
10
15
  import uvicorn
11
16
 
12
17
 
13
- def main():
18
+ class FlowceptAgent:
14
19
  """
15
- Start the MCP server.
20
+ Flowcept agent server wrapper with optional offline buffer loading.
16
21
  """
17
- f = Flowcept(start_persistence=False, save_workflow=False, check_safe_stops=False).start()
18
- f.logger.info(f"This section's workflow_id={Flowcept.current_workflow_id}")
19
22
 
20
- def run():
21
- uvicorn.run(mcp_flowcept.streamable_http_app, host=AGENT_HOST, port=AGENT_PORT, lifespan="on")
23
+ def __init__(self, buffer_path: str | None = None):
24
+ """
25
+ Initialize a FlowceptAgent.
26
+
27
+ Parameters
28
+ ----------
29
+ buffer_path : str or None
30
+ Optional path to a JSONL buffer file. When MQ is disabled, the agent
31
+ loads this file once at startup.
32
+ """
33
+ self.buffer_path = buffer_path
34
+ self.logger = FlowceptLogger()
35
+ self._server_thread: Thread | None = None
36
+ self._server = None
37
+
38
+ def _load_buffer_once(self) -> int:
39
+ """
40
+ Load messages from a JSONL buffer file into the agent context.
41
+
42
+ Returns
43
+ -------
44
+ int
45
+ Number of messages loaded.
46
+ """
47
+ path = self.buffer_path or DUMP_BUFFER_PATH
48
+ if not os.path.exists(path):
49
+ raise FileNotFoundError(f"Buffer file not found: {path}")
50
+
51
+ count = 0
52
+ self.logger.info(f"Loading agent buffer from {path}")
53
+ if ctx_manager.agent_id is None:
54
+ agent_id = str(uuid4())
55
+ BaseAgentContextManager.agent_id = agent_id
56
+ ctx_manager.agent_id = agent_id
57
+ with open(path, "r") as handle:
58
+ for line in handle:
59
+ line = line.strip()
60
+ if not line:
61
+ continue
62
+ msg_obj = json.loads(line)
63
+ ctx_manager.message_handler(msg_obj)
64
+ count += 1
65
+ self.logger.info(f"Loaded {count} messages from buffer.")
66
+ return count
67
+
68
+ def _run_server(self):
69
+ """Run the MCP server (blocking call)."""
70
+ config = uvicorn.Config(mcp_flowcept.streamable_http_app, host=AGENT_HOST, port=AGENT_PORT, lifespan="on")
71
+ self._server = uvicorn.Server(config)
72
+ self._server.run()
73
+
74
+ def start(self):
75
+ """
76
+ Start the agent server in a background thread.
77
+
78
+ Returns
79
+ -------
80
+ FlowceptAgent
81
+ The current instance.
82
+ """
83
+ if not MQ_ENABLED:
84
+ self._load_buffer_once()
85
+
86
+ self._server_thread = Thread(target=self._run_server, daemon=False)
87
+ self._server_thread.start()
88
+ self.logger.info(f"Flowcept agent server started on {AGENT_HOST}:{AGENT_PORT}")
89
+ return self
22
90
 
23
- Thread(target=run).start()
24
- sleep(2)
91
+ def stop(self):
92
+ """Stop the agent server and wait briefly for shutdown."""
93
+ if self._server is not None:
94
+ self._server.should_exit = True
95
+ if self._server_thread is not None:
96
+ self._server_thread.join(timeout=5)
97
+
98
+ def wait(self):
99
+ """Block until the server thread exits."""
100
+ if self._server_thread is not None:
101
+ self._server_thread.join()
102
+
103
+ def query(self, message: str) -> ToolResult:
104
+ """
105
+ Send a prompt to the agent's main router tool and return the response.
106
+ """
107
+ try:
108
+ resp = run_tool(tool_name=prompt_handler, kwargs={"message": message})[0]
109
+ except Exception as e:
110
+ return ToolResult(code=400, result=f"Error executing tool prompt_handler: {e}", tool_name="prompt_handler")
111
+
112
+ try:
113
+ return ToolResult(**json.loads(resp))
114
+ except Exception as e:
115
+ return ToolResult(
116
+ code=499,
117
+ result=f"Could not parse tool response as JSON: {resp}",
118
+ extra=str(e),
119
+ tool_name="prompt_handler",
120
+ )
121
+
122
+
123
+ def main():
124
+ """
125
+ Start the MCP server.
126
+ """
127
+ agent = FlowceptAgent().start()
25
128
  # Wake up tool call
26
129
  print(run_tool(check_liveness, host=AGENT_HOST, port=AGENT_PORT)[0])
130
+ agent.wait()
27
131
 
28
132
 
29
133
  if __name__ == "__main__":
@@ -1,6 +1,9 @@
1
1
  from flowcept.agents.dynamic_schema_tracker import DynamicSchemaTracker
2
2
  from flowcept.agents.tools.in_memory_queries.pandas_agent_utils import load_saved_df
3
3
  from flowcept.commons.flowcept_dataclasses.task_object import TaskObject
4
+ from flowcept.commons.flowcept_logger import FlowceptLogger
5
+ from flowcept.commons.vocabulary import Status
6
+ from flowcept.configs import AGENT
4
7
  from mcp.server.fastmcp import FastMCP
5
8
 
6
9
  import json
@@ -12,11 +15,12 @@ import pandas as pd
12
15
 
13
16
  from flowcept.flowceptor.consumers.agent.base_agent_context_manager import BaseAgentContextManager, BaseAppContext
14
17
 
15
-
16
- from flowcept.agents import agent_client
17
18
  from flowcept.commons.task_data_preprocess import summarize_task
18
19
 
19
20
 
21
+ AGENT_DEBUG = AGENT.get("debug", False)
22
+
23
+
20
24
  @dataclass
21
25
  class FlowceptAppContext(BaseAppContext):
22
26
  """
@@ -39,6 +43,39 @@ class FlowceptAppContext(BaseAppContext):
39
43
  tracker_config: Dict | None
40
44
  custom_guidance: List[str] | None
41
45
 
46
+ def __init__(self):
47
+ self.logger = FlowceptLogger()
48
+ self.reset_context()
49
+
50
+ def reset_context(self):
51
+ """
52
+ Reset the agent's context to a clean state, initializing a new QA setup.
53
+ """
54
+ self.tasks = []
55
+ self.task_summaries = []
56
+ self.critical_tasks = []
57
+ self.df = pd.DataFrame()
58
+ self.tasks_schema = {}
59
+ self.value_examples = {}
60
+ self.custom_guidance = []
61
+ self.tracker_config = {}
62
+
63
+ if AGENT_DEBUG:
64
+ from flowcept.commons.flowcept_logger import FlowceptLogger
65
+
66
+ FlowceptLogger().warning("Running agent in DEBUG mode!")
67
+ df_path = "/tmp/current_agent_df.csv"
68
+ if os.path.exists(df_path):
69
+ self.logger.warning("Going to load df into context")
70
+ df = load_saved_df(df_path)
71
+ self.df = df
72
+ if os.path.exists("/tmp/current_tasks_schema.json"):
73
+ with open("/tmp/current_tasks_schema.json") as f:
74
+ self.tasks_schema = json.load(f)
75
+ if os.path.exists("/tmp/value_examples.json"):
76
+ with open("/tmp/value_examples.json") as f:
77
+ self.value_examples = json.load(f)
78
+
42
79
 
43
80
  class FlowceptAgentContextManager(BaseAgentContextManager):
44
81
  """
@@ -61,12 +98,12 @@ class FlowceptAgentContextManager(BaseAgentContextManager):
61
98
  """
62
99
 
63
100
  def __init__(self):
64
- self.context: FlowceptAppContext = None
101
+ self.context = FlowceptAppContext()
65
102
  self.tracker_config = dict(max_examples=3, max_str_len=50)
66
103
  self.schema_tracker = DynamicSchemaTracker(**self.tracker_config)
67
104
  self.msgs_counter = 0
68
105
  self.context_chunk_size = 1 # Should be in the settings
69
- super().__init__()
106
+ super().__init__(allow_mq_disabled=True)
70
107
 
71
108
  def message_handler(self, msg_obj: Dict):
72
109
  """
@@ -82,7 +119,6 @@ class FlowceptAgentContextManager(BaseAgentContextManager):
82
119
  bool
83
120
  True if the message was handled successfully.
84
121
  """
85
- print("Received:", msg_obj)
86
122
  msg_type = msg_obj.get("type", None)
87
123
  if msg_type == "task":
88
124
  task_msg = TaskObject.from_dict(msg_obj)
@@ -90,8 +126,68 @@ class FlowceptAgentContextManager(BaseAgentContextManager):
90
126
  self.logger.info(f"Going to ignore our own LLM messages: {task_msg}")
91
127
  return True
92
128
 
93
- self.msgs_counter += 1
94
129
  self.logger.debug("Received task msg!")
130
+ if task_msg.subtype == "call_agent_task":
131
+ from flowcept.instrumentation.task_capture import FlowceptTask
132
+
133
+ if task_msg.activity_id == "reset_user_context":
134
+ self.context.reset_context()
135
+ self.msgs_counter = 0
136
+ if self._mq_dao is None:
137
+ self.logger.warning("MQ is disabled; skipping reset_user_context response message.")
138
+ else:
139
+ FlowceptTask(
140
+ agent_id=self.agent_id,
141
+ generated={"msg": "Provenance Agent reset context."},
142
+ subtype="agent_task",
143
+ activity_id="reset_user_context",
144
+ ).send()
145
+ return True
146
+ elif task_msg.activity_id == "provenance_query":
147
+ self.logger.info("Received a prov query message!")
148
+ query_text = task_msg.used.get("query")
149
+ from flowcept.agents import ToolResult
150
+ from flowcept.agents.tools.general_tools import prompt_handler
151
+ from flowcept.agents.agent_client import run_tool
152
+
153
+ resp = run_tool(tool_name=prompt_handler, kwargs={"message": query_text})[0]
154
+
155
+ try:
156
+ error = None
157
+ status = Status.FINISHED
158
+ tool_result = ToolResult(**json.loads(resp))
159
+ if tool_result.result_is_str():
160
+ generated = {"text": tool_result.result}
161
+ else:
162
+ generated = tool_result.result
163
+ except Exception as e:
164
+ status = Status.ERROR
165
+ error = f"Could not convert the following into a ToolResult:\n{resp}\nException: {e}"
166
+ generated = {"text": str(resp)}
167
+ if self._mq_dao is None:
168
+ self.logger.warning("MQ is disabled; skipping provenance_query response message.")
169
+ else:
170
+ FlowceptTask(
171
+ agent_id=self.agent_id,
172
+ generated=generated,
173
+ stderr=error,
174
+ status=status,
175
+ subtype="agent_task",
176
+ activity_id="provenance_query_response",
177
+ ).send()
178
+
179
+ return True
180
+
181
+ elif (
182
+ task_msg.subtype == "agent_task"
183
+ and task_msg.agent_id is not None
184
+ and task_msg.agent_id == self.agent_id
185
+ ):
186
+ self.logger.info(f"Ignoring agent tasks from myself: {task_msg}")
187
+ return True
188
+
189
+ self.msgs_counter += 1
190
+
95
191
  self.context.tasks.append(msg_obj)
96
192
 
97
193
  task_summary = summarize_task(msg_obj, logger=self.logger)
@@ -110,12 +206,10 @@ class FlowceptAgentContextManager(BaseAgentContextManager):
110
206
  ]
111
207
  )
112
208
  except Exception as e:
113
- self.logger.error(
114
- f"Could not add these tasks to buffer!\n"
115
- f"{
116
- self.context.task_summaries[self.msgs_counter - self.context_chunk_size : self.msgs_counter]
117
- }"
118
- )
209
+ task_slice = self.context.task_summaries[
210
+ self.msgs_counter - self.context_chunk_size : self.msgs_counter
211
+ ]
212
+ self.logger.error(f"Could not add these tasks to buffer!\n{task_slice}")
119
213
  self.logger.exception(e)
120
214
 
121
215
  # self.monitor_chunk()
@@ -136,46 +230,22 @@ class FlowceptAgentContextManager(BaseAgentContextManager):
136
230
  Perform LLM-based analysis on the current chunk of task messages and send the results.
137
231
  """
138
232
  self.logger.debug(f"Going to begin LLM job! {self.msgs_counter}")
139
- result = agent_client.run_tool("analyze_task_chunk")
233
+ from flowcept.agents.agent_client import run_tool
234
+
235
+ result = run_tool("analyze_task_chunk")
140
236
  if len(result):
141
237
  content = result[0].text
142
238
  if content != "Error executing tool":
143
- msg = {"type": "flowcept_agent", "info": "monitor", "content": content}
144
- self._mq_dao.send_message(msg)
145
- self.logger.debug(str(content))
239
+ if self._mq_dao is None:
240
+ self.logger.warning("MQ is disabled; skipping monitor message.")
241
+ else:
242
+ msg = {"type": "flowcept_agent", "info": "monitor", "content": content}
243
+ self._mq_dao.send_message(msg)
244
+ self.logger.debug(str(content))
146
245
  else:
147
246
  self.logger.error(content)
148
247
 
149
- def reset_context(self):
150
- """
151
- Reset the agent's context to a clean state, initializing a new QA setup.
152
- """
153
- self.context = FlowceptAppContext(
154
- tasks=[],
155
- task_summaries=[],
156
- critical_tasks=[],
157
- df=pd.DataFrame(),
158
- tasks_schema={},
159
- value_examples={},
160
- custom_guidance=[],
161
- tracker_config=self.tracker_config,
162
- )
163
- DEBUG = True # TODO debugging!
164
- if DEBUG:
165
- self.logger.warning("Running agent in DEBUG mode!")
166
- df_path = "/tmp/current_agent_df.csv"
167
- if os.path.exists(df_path):
168
- self.logger.warning("Going to load df into context")
169
- df = load_saved_df(df_path)
170
- self.context.df = df
171
- if os.path.exists("/tmp/current_tasks_schema.json"):
172
- with open("/tmp/current_tasks_schema.json") as f:
173
- self.context.tasks_schema = json.load(f)
174
- if os.path.exists("/tmp/value_examples.json"):
175
- with open("/tmp/value_examples.json") as f:
176
- self.context.value_examples = json.load(f)
177
-
178
248
 
179
249
  # Exporting the ctx_manager and the mcp_flowcept
180
250
  ctx_manager = FlowceptAgentContextManager()
181
- mcp_flowcept = FastMCP("FlowceptAgent", require_session=False, lifespan=ctx_manager.lifespan, stateless_http=True)
251
+ mcp_flowcept = FastMCP("FlowceptAgent", lifespan=ctx_manager.lifespan, stateless_http=True)
@@ -351,10 +351,28 @@ def exec_st_plot_code(code, result_df, st_module):
351
351
  >>> code = "st.line_chart(result)"
352
352
  >>> exec_st_plot_code(code, df, st)
353
353
  """
354
- print("Plot code \n", code)
354
+ # 1) Make a copy of result_df and rename columns with dots
355
+ plot_df = result_df.copy()
356
+ col_map = {}
357
+
358
+ for col in plot_df.columns:
359
+ if "." in col:
360
+ new_col = col.replace(".", "_")
361
+ col_map[col] = new_col
362
+ plot_df.rename(columns={col: new_col}, inplace=True)
363
+
364
+ # 2) Rewrite the code so column names match the renamed columns
365
+ sanitized_code = code
366
+ for old, new in col_map.items():
367
+ # replace only inside quotes: 'generated.bd_enthalpy' → 'generated_bd_enthalpy'
368
+ sanitized_code = sanitized_code.replace(f"'{old}'", f"'{new}'")
369
+ sanitized_code = sanitized_code.replace(f'"{old}"', f'"{new}"')
370
+
371
+ print("SANITIZED CODE:\n", sanitized_code)
372
+ print(f"Renamed DF columms: {plot_df}")
355
373
  exec(
356
- code,
357
- {"result": result_df, "st": st_module, "plt": __import__("matplotlib.pyplot"), "alt": __import__("altair")},
374
+ sanitized_code,
375
+ {"result": plot_df, "st": st_module, "plt": __import__("matplotlib.pyplot"), "alt": __import__("altair")},
358
376
  )
359
377
 
360
378
 
@@ -28,7 +28,7 @@ ROUTING_PROMPT = (
28
28
  # "- in_context_query: if the user asks questions about tasks or data in running workflow (or a workflow that ran recently) or if the user mentions the in-memory 'df' or a dataframe.\n"
29
29
  # "- historical_prov_query: if the user wants to query historical provenance data\n"
30
30
  "- in_chat_query: if the user appears to be asking about something that has said recently in this chat.\n"
31
- "- unknown: if you don't know.\n"
31
+ "- in_context_query: if you don't know.\n"
32
32
  "Respond with only the route label."
33
33
  "User message is below:\n "
34
34
  )