flowcept 0.8.11__py3-none-any.whl → 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. flowcept/__init__.py +7 -4
  2. flowcept/agents/__init__.py +5 -0
  3. flowcept/{flowceptor/consumers/agent/client_agent.py → agents/agent_client.py} +22 -12
  4. flowcept/agents/agents_utils.py +181 -0
  5. flowcept/agents/dynamic_schema_tracker.py +191 -0
  6. flowcept/agents/flowcept_agent.py +30 -0
  7. flowcept/agents/flowcept_ctx_manager.py +175 -0
  8. flowcept/agents/gui/__init__.py +5 -0
  9. flowcept/agents/gui/agent_gui.py +76 -0
  10. flowcept/agents/gui/gui_utils.py +239 -0
  11. flowcept/agents/llms/__init__.py +1 -0
  12. flowcept/agents/llms/claude_gcp.py +139 -0
  13. flowcept/agents/llms/gemini25.py +119 -0
  14. flowcept/agents/prompts/__init__.py +1 -0
  15. flowcept/{flowceptor/adapters/agents/prompts.py → agents/prompts/general_prompts.py} +18 -0
  16. flowcept/agents/prompts/in_memory_query_prompts.py +297 -0
  17. flowcept/agents/tools/__init__.py +1 -0
  18. flowcept/agents/tools/general_tools.py +102 -0
  19. flowcept/agents/tools/in_memory_queries/__init__.py +1 -0
  20. flowcept/agents/tools/in_memory_queries/in_memory_queries_tools.py +704 -0
  21. flowcept/agents/tools/in_memory_queries/pandas_agent_utils.py +309 -0
  22. flowcept/cli.py +286 -44
  23. flowcept/commons/daos/docdb_dao/mongodb_dao.py +47 -0
  24. flowcept/commons/daos/mq_dao/mq_dao_base.py +24 -13
  25. flowcept/commons/daos/mq_dao/mq_dao_kafka.py +18 -2
  26. flowcept/commons/flowcept_dataclasses/task_object.py +16 -21
  27. flowcept/commons/flowcept_dataclasses/workflow_object.py +9 -1
  28. flowcept/commons/task_data_preprocess.py +260 -60
  29. flowcept/commons/utils.py +25 -6
  30. flowcept/configs.py +41 -26
  31. flowcept/flowcept_api/flowcept_controller.py +73 -6
  32. flowcept/flowceptor/adapters/base_interceptor.py +11 -5
  33. flowcept/flowceptor/consumers/agent/base_agent_context_manager.py +25 -1
  34. flowcept/flowceptor/consumers/base_consumer.py +4 -0
  35. flowcept/flowceptor/consumers/consumer_utils.py +5 -4
  36. flowcept/flowceptor/consumers/document_inserter.py +2 -2
  37. flowcept/flowceptor/telemetry_capture.py +5 -2
  38. flowcept/instrumentation/flowcept_agent_task.py +294 -0
  39. flowcept/instrumentation/flowcept_decorator.py +43 -0
  40. flowcept/instrumentation/flowcept_loop.py +3 -3
  41. flowcept/instrumentation/flowcept_task.py +64 -24
  42. flowcept/instrumentation/flowcept_torch.py +5 -5
  43. flowcept/instrumentation/task_capture.py +83 -6
  44. flowcept/version.py +1 -1
  45. {flowcept-0.8.11.dist-info → flowcept-0.9.1.dist-info}/METADATA +42 -14
  46. {flowcept-0.8.11.dist-info → flowcept-0.9.1.dist-info}/RECORD +50 -36
  47. resources/sample_settings.yaml +12 -4
  48. flowcept/flowceptor/adapters/agents/__init__.py +0 -1
  49. flowcept/flowceptor/adapters/agents/agents_utils.py +0 -89
  50. flowcept/flowceptor/adapters/agents/flowcept_agent.py +0 -292
  51. flowcept/flowceptor/adapters/agents/flowcept_llm_prov_capture.py +0 -186
  52. flowcept/flowceptor/consumers/agent/flowcept_agent_context_manager.py +0 -145
  53. flowcept/flowceptor/consumers/agent/flowcept_qa_manager.py +0 -112
  54. {flowcept-0.8.11.dist-info → flowcept-0.9.1.dist-info}/WHEEL +0 -0
  55. {flowcept-0.8.11.dist-info → flowcept-0.9.1.dist-info}/entry_points.txt +0 -0
  56. {flowcept-0.8.11.dist-info → flowcept-0.9.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,89 +0,0 @@
1
- from typing import List, Union
2
-
3
- from langchain_community.llms.sambanova import SambaStudio
4
- from mcp.server.fastmcp.prompts import base
5
- from langchain_core.language_models import LLM
6
- from langchain_core.messages import HumanMessage, AIMessage
7
-
8
- from flowcept.configs import AGENT
9
-
10
-
11
- def build_llm_model() -> LLM:
12
- """
13
- Build and return an LLM instance using agent configuration.
14
-
15
- This function retrieves the model name and keyword arguments from the AGENT configuration,
16
- constructs a SambaStudio LLM instance, and returns it.
17
-
18
- Returns
19
- -------
20
- LLM
21
- An initialized LLM object configured using the `AGENT` settings.
22
- """
23
- model_kwargs = AGENT.get("model_kwargs").copy()
24
- model_kwargs["model"] = AGENT.get("model")
25
- llm = SambaStudio(model_kwargs=model_kwargs)
26
-
27
- return llm
28
-
29
-
30
- def convert_mcp_messages_to_plain_text(messages: list[base.Message]) -> str:
31
- """
32
- Convert a list of MCP base.Message objects into a plain text dialogue.
33
-
34
- Parameters
35
- ----------
36
- messages : list of BaseMessage
37
- The list of messages, typically from HumanMessage, AIMessage, SystemMessage, etc.
38
-
39
- Returns
40
- -------
41
- str
42
- A plain text version of the conversation, with roles labeled.
43
- """
44
- lines = []
45
- for message in messages:
46
- role = message.role.capitalize() # e.g., "human" → "Human"
47
- line = f"{role}: {message.content.text}"
48
- lines.append(line)
49
- return "\n".join(lines)
50
-
51
-
52
- def convert_mcp_to_langchain(messages: list[base.Message]) -> List[Union[HumanMessage, AIMessage]]:
53
- """
54
- Convert a list of MCP-style messages to LangChain-compatible message objects.
55
-
56
- Parameters
57
- ----------
58
- messages : list of base.Message
59
- A list of messages in the MCP message format, each with a `role` and `content`.
60
-
61
- Returns
62
- -------
63
- list of Union[HumanMessage, AIMessage]
64
- A list of LangChain message objects, converted from the original MCP format.
65
-
66
- Raises
67
- ------
68
- ValueError
69
- If a message has a role that is not 'user' or 'assistant'.
70
-
71
- Notes
72
- -----
73
- This function extracts the `text` attribute from message content if present, falling back to `str(content)`
74
- otherwise. It maps MCP 'user' roles to LangChain `HumanMessage` and 'assistant' roles to `AIMessage`.
75
- """
76
- converted = []
77
- for m in messages:
78
- if hasattr(m.content, "text"):
79
- content = m.content.text
80
- else:
81
- content = str(m.content) # fallback if it's already a string
82
-
83
- if m.role == "user":
84
- converted.append(HumanMessage(content=content))
85
- elif m.role == "assistant":
86
- converted.append(AIMessage(content=content))
87
- else:
88
- raise ValueError(f"Unsupported role: {m.role}")
89
- return converted
@@ -1,292 +0,0 @@
1
- import json
2
- import os
3
- from typing import Dict, List
4
-
5
- import uvicorn
6
- from langchain.chains.retrieval_qa.base import RetrievalQA
7
- from mcp.server.fastmcp import FastMCP
8
- from mcp.server.fastmcp.prompts import base
9
-
10
- from flowcept.configs import AGENT
11
- from flowcept.flowcept_api.flowcept_controller import Flowcept
12
- from flowcept.flowceptor.adapters.agents.agents_utils import (
13
- convert_mcp_to_langchain,
14
- convert_mcp_messages_to_plain_text,
15
- )
16
- from flowcept.flowceptor.adapters.agents.flowcept_llm_prov_capture import (
17
- invoke_llm,
18
- invoke_qa_question,
19
- add_preamble_to_response,
20
- )
21
- from flowcept.flowceptor.adapters.agents.prompts import (
22
- get_question_prompt,
23
- BASE_MULTITASK_PROMPT,
24
- BASE_SINGLETASK_PROMPT,
25
- )
26
- from flowcept.flowceptor.consumers.agent.flowcept_agent_context_manager import FlowceptAgentContextManager
27
- from flowcept.flowceptor.consumers.agent.flowcept_qa_manager import FlowceptQAManager
28
-
29
- os.environ["SAMBASTUDIO_URL"] = AGENT.get("llm_server_url")
30
- os.environ["SAMBASTUDIO_API_KEY"] = AGENT.get("api_key")
31
-
32
- agent_controller = FlowceptAgentContextManager()
33
- mcp = FastMCP("FlowceptAgent", require_session=True, lifespan=agent_controller.lifespan)
34
-
35
- #################################################
36
- # PROMPTS
37
- #################################################
38
-
39
-
40
- @mcp.prompt()
41
- def single_task_used_generated_prompt(task_data: Dict, question: str) -> list[base.Message]:
42
- """
43
- Generate a prompt for analyzing a single task's provenance and resource usage.
44
-
45
- Parameters
46
- ----------
47
- task_data : dict
48
- The task object containing provenance and telemetry fields.
49
- question : str
50
- A specific question to ask about the task.
51
-
52
- Returns
53
- -------
54
- list of base.Message
55
- The structured prompt messages for LLM analysis.
56
- """
57
- msgs = BASE_SINGLETASK_PROMPT.copy()
58
- msgs.append(get_question_prompt(question))
59
- msgs.append(base.UserMessage(f"This is the task object I need you to focus on: \n {task_data}\n"))
60
- return msgs
61
-
62
-
63
- @mcp.prompt()
64
- def multi_task_summary_prompt(task_list: List[Dict]) -> List[base.Message]:
65
- """
66
- Generate a prompt for analyzing multiple task objects in a workflow.
67
-
68
- Parameters
69
- ----------
70
- task_list : list of dict
71
- A list of task objects with provenance and telemetry data.
72
-
73
- Returns
74
- -------
75
- list of base.Message
76
- The structured prompt messages for the LLM.
77
- """
78
- messages = BASE_MULTITASK_PROMPT.copy()
79
- pretty_tasks = json.dumps(task_list, indent=2, default=str)
80
- messages.append(base.UserMessage(f"These are the tasks I need you to reason about:\n\n{pretty_tasks}\n\n"))
81
- return messages
82
-
83
-
84
- @mcp.prompt()
85
- def multi_task_qa_prompt(question: str) -> List[base.Message]:
86
- """
87
- Generate a prompt for asking a specific question about multiple tasks.
88
-
89
- Parameters
90
- ----------
91
- question : str
92
- The user's query about task data.
93
-
94
- Returns
95
- -------
96
- list of base.Message
97
- Prompt messages structured for the LLM.
98
- """
99
- messages = BASE_MULTITASK_PROMPT.copy()
100
- messages.append(get_question_prompt(question))
101
- return messages
102
-
103
-
104
- #################################################
105
- # TOOLS
106
- #################################################
107
-
108
-
109
- @mcp.tool()
110
- def analyze_task_chunk() -> str:
111
- """
112
- Analyze a recent chunk of tasks using an LLM to detect patterns or anomalies.
113
-
114
- Returns
115
- -------
116
- str
117
- LLM-generated analysis of the selected task chunk.
118
- """
119
- LAST_K = 5 # TODO make this dynamic from config
120
- ctx = mcp.get_context()
121
- task_list = ctx.request_context.lifespan_context.task_summaries[:-LAST_K]
122
- agent_controller.logger.debug(f"N Tasks = {len(task_list)}")
123
- if not task_list:
124
- return "No tasks available."
125
-
126
- messages = multi_task_summary_prompt(task_list)
127
- langchain_messages = convert_mcp_to_langchain(messages)
128
- response = invoke_llm(langchain_messages)
129
- result = add_preamble_to_response(response, mcp, task_data=None)
130
- agent_controller.logger.debug(f"Result={result}")
131
- return result
132
-
133
-
134
- @mcp.tool()
135
- def ask_about_tasks_buffer(question: str) -> str:
136
- """
137
- Use a QA chain to answer a question about the current task buffer.
138
-
139
- Parameters
140
- ----------
141
- question : str
142
- The question to ask about the buffered tasks.
143
-
144
- Returns
145
- -------
146
- str
147
- Answer from the QA chain or an error message.
148
- """
149
- ctx = mcp.get_context()
150
- qa_chain = build_qa_chain_from_ctx(ctx)
151
- if not qa_chain:
152
- return "No tasks available."
153
-
154
- messages = multi_task_qa_prompt(question)
155
-
156
- try:
157
- query_str = convert_mcp_messages_to_plain_text(messages)
158
- except Exception as e:
159
- agent_controller.logger.exception(e)
160
- return f"An internal error happened: {e}"
161
-
162
- response = invoke_qa_question(qa_chain, query_str=query_str)
163
- agent_controller.logger.debug(f"Response={response}")
164
- return response
165
-
166
-
167
- def build_qa_chain_from_ctx(ctx) -> RetrievalQA:
168
- """
169
- Build or retrieve a QA chain from the current request context.
170
-
171
- Parameters
172
- ----------
173
- ctx : RequestContext
174
- The current MCP request context.
175
-
176
- Returns
177
- -------
178
- RetrievalQA or None
179
- A QA chain built from vectorstore metadata, or None if unavailable.
180
- """
181
- qa_chain = ctx.request_context.lifespan_context.qa_chain
182
- if not qa_chain:
183
- vectorstore_path = ctx.request_context.lifespan_context.vectorstore_path
184
- if not vectorstore_path:
185
- return None
186
- agent_controller.logger.debug(f"Path: {vectorstore_path}")
187
- qa_chain = FlowceptQAManager.build_qa_chain_from_vectorstore_path(vectorstore_path)
188
- if not qa_chain:
189
- return None
190
- return qa_chain
191
-
192
-
193
- @mcp.tool()
194
- def get_latest(n: int = None) -> str:
195
- """
196
- Return the most recent task(s) from the task buffer.
197
-
198
- Parameters
199
- ----------
200
- n : int, optional
201
- Number of most recent tasks to return. If None, return only the latest.
202
-
203
- Returns
204
- -------
205
- str
206
- JSON-encoded task(s).
207
- """
208
- ctx = mcp.get_context()
209
- tasks = ctx.request_context.lifespan_context.tasks
210
- if not tasks:
211
- return "No tasks available."
212
- if n is None:
213
- return json.dumps(tasks[-1])
214
- return json.dumps(tasks[-n])
215
-
216
-
217
- @mcp.tool()
218
- def check_liveness() -> str:
219
- """
220
- Confirm the agent is alive and responding.
221
-
222
- Returns
223
- -------
224
- str
225
- Liveness status string.
226
- """
227
- return f"I'm {mcp.name} and I'm ready!"
228
-
229
-
230
- @mcp.tool()
231
- def check_llm() -> str:
232
- """
233
- Check connectivity and response from the LLM backend.
234
-
235
- Returns
236
- -------
237
- str
238
- LLM response, formatted with MCP metadata.
239
- """
240
- messages = [base.UserMessage("Hi, are you working properly?")]
241
-
242
- langchain_messages = convert_mcp_to_langchain(messages)
243
- response = invoke_llm(langchain_messages)
244
- result = add_preamble_to_response(response, mcp)
245
-
246
- return result
247
-
248
-
249
- @mcp.tool()
250
- def ask_about_latest_task(question) -> str:
251
- """
252
- Ask a question specifically about the latest task in the buffer.
253
-
254
- Parameters
255
- ----------
256
- question : str
257
- A user-defined question to analyze the latest task.
258
-
259
- Returns
260
- -------
261
- str
262
- Response from the LLM based on the latest task.
263
- """
264
- ctx = mcp.get_context()
265
- tasks = ctx.request_context.lifespan_context.task_summaries
266
- if not tasks:
267
- return "No tasks available."
268
- task_data = tasks[-1]
269
-
270
- messages = single_task_used_generated_prompt(task_data, question)
271
-
272
- langchain_messages = convert_mcp_to_langchain(messages)
273
-
274
- response = invoke_llm(langchain_messages)
275
- result = add_preamble_to_response(response, mcp, task_data)
276
- return result
277
-
278
-
279
- def main():
280
- """
281
- Start the MCP server.
282
- """
283
- f = Flowcept(start_persistence=False, save_workflow=False, check_safe_stops=False).start()
284
- f.logger.info(f"This section's workflow_id={Flowcept.current_workflow_id}")
285
- setattr(mcp, "workflow_id", f.current_workflow_id)
286
- uvicorn.run(
287
- mcp.streamable_http_app, host=AGENT.get("mcp_host", "0.0.0.0"), port=AGENT.get("mcp_port", 8000), lifespan="on"
288
- )
289
-
290
-
291
- if __name__ == "__main__":
292
- main()
@@ -1,186 +0,0 @@
1
- import inspect
2
- import json
3
- from typing import List, Union, Dict
4
-
5
- from langchain.chains.retrieval_qa.base import RetrievalQA
6
- from langchain_community.callbacks import get_openai_callback
7
- from langchain_core.language_models import LLM
8
- from langchain_core.messages import HumanMessage, AIMessage
9
-
10
- from flowcept.flowceptor.adapters.agents.agents_utils import build_llm_model
11
- from flowcept.instrumentation.task_capture import FlowceptTask
12
-
13
-
14
- def add_preamble_to_response(response, mcp, task_data=None):
15
- """
16
- Add workflow/task-related metadata as a preamble to the LLM response.
17
-
18
- Parameters
19
- ----------
20
- response : str
21
- The LLM response text.
22
- mcp : Any
23
- The agent or workflow object, expected to have an optional `workflow_id` attribute.
24
- task_data : dict, optional
25
- Dictionary containing task metadata such as `workflow_id` and `task_id`.
26
-
27
- Returns
28
- -------
29
- str
30
- The response string prefixed with workflow/task metadata.
31
- """
32
- preamb_obj = {}
33
- if hasattr(mcp, "workflow_id"):
34
- agent_id = getattr(mcp, "workflow_id")
35
- preamb_obj["agent_id"] = agent_id
36
- if task_data:
37
- preamb_obj["workflow_id"] = task_data.get("workflow_id")
38
- preamb_obj["task_id"] = task_data.get("task_id")
39
- result = ""
40
- if preamb_obj:
41
- result = f"{json.dumps(preamb_obj)}\n\n"
42
- result += f"Response:\n{response}"
43
- return result
44
-
45
-
46
- def invoke_llm(messages: List[Union[HumanMessage, AIMessage]], llm: LLM = None, activity_id=None) -> str:
47
- """
48
- Invoke an LLM with a list of chat-style messages and return its response.
49
-
50
- Parameters
51
- ----------
52
- messages : List[Union[HumanMessage, AIMessage]]
53
- The list of messages forming the conversation history for the LLM.
54
- llm : LLM, optional
55
- An instance of a LangChain-compatible LLM. If None, a default model is built.
56
- activity_id : str, optional
57
- An optional identifier for the activity, used for Flowcept instrumentation.
58
-
59
- Returns
60
- -------
61
- str
62
- The LLM's text response.
63
- """
64
- if llm is None:
65
- llm = build_llm_model()
66
- if activity_id is None:
67
- activity_id = inspect.stack()[1].function
68
-
69
- used = {"messages": [{"role": msg.type, "content": msg.content} for msg in messages]}
70
-
71
- llm_metadata = _extract_llm_metadata(llm)
72
-
73
- with FlowceptTask(
74
- activity_id=activity_id,
75
- used=used,
76
- custom_metadata={"llm_metadata": llm_metadata, "query_type": "llm_invoke"},
77
- subtype="llm_query",
78
- ) as t:
79
- with get_openai_callback() as cb:
80
- response = llm.invoke(messages)
81
- generated = {
82
- "text_response": response,
83
- "total_tokens": cb.total_tokens,
84
- "prompt_tokens": cb.prompt_tokens,
85
- "completion_tokens": cb.completion_tokens,
86
- "cost": cb.total_cost,
87
- }
88
- t.end(generated)
89
- return response
90
-
91
-
92
- def invoke_qa_question(qa_chain: RetrievalQA, query_str: str, activity_id=None) -> str:
93
- """
94
- Query a RetrievalQA chain with a given question and return the response.
95
-
96
- Parameters
97
- ----------
98
- qa_chain : RetrievalQA
99
- The QA chain object to invoke.
100
- query_str : str
101
- The question to ask the QA chain.
102
- activity_id : str, optional
103
- An optional identifier for the activity, used for Flowcept instrumentation.
104
-
105
- Returns
106
- -------
107
- str
108
- The textual result from the QA chain.
109
- """
110
- used = {"message": query_str}
111
- qa_chain_metadata = _extract_qa_chain_metadata(qa_chain)
112
- with FlowceptTask(
113
- activity_id=activity_id,
114
- used=used,
115
- subtype="llm_query",
116
- custom_metadata={"qa_chain_metadata": qa_chain_metadata, "query_type": "qa_chain"},
117
- ) as t:
118
- with get_openai_callback() as cb:
119
- response = dict(qa_chain({"query": f"{query_str}"})) # TODO bug?
120
- text_response = response.pop("result")
121
- generated = {
122
- "response": response,
123
- "text_response": text_response,
124
- "total_tokens": cb.total_tokens,
125
- "prompt_tokens": cb.prompt_tokens,
126
- "completion_tokens": cb.completion_tokens,
127
- "cost": cb.total_cost,
128
- }
129
- t.end(generated)
130
- return text_response
131
-
132
-
133
- def _extract_llm_metadata(llm: LLM) -> Dict:
134
- """
135
- Extract metadata from a LangChain LLM instance.
136
-
137
- Parameters
138
- ----------
139
- llm : LLM
140
- The language model instance.
141
-
142
- Returns
143
- -------
144
- dict
145
- Dictionary containing class name, module, model name, and configuration if available.
146
- """
147
- llm_metadata = {
148
- "class_name": llm.__class__.__name__,
149
- "module": llm.__class__.__module__,
150
- "config": llm.dict() if hasattr(llm, "dict") else {},
151
- }
152
- return llm_metadata
153
-
154
-
155
- def _extract_qa_chain_metadata(qa_chain: RetrievalQA) -> Dict:
156
- """
157
- Extract metadata from a RetrievalQA chain, including LLM and retriever details.
158
-
159
- Parameters
160
- ----------
161
- qa_chain : RetrievalQA
162
- The QA chain to extract metadata from.
163
-
164
- Returns
165
- -------
166
- dict
167
- Metadata dictionary including QA chain class name, retriever details, and optionally LLM metadata.
168
- """
169
- retriever = getattr(qa_chain, "retriever", None)
170
- retriever_metadata = {
171
- "class_name": retriever.__class__.__name__ if retriever else None,
172
- "module": retriever.__class__.__module__ if retriever else None,
173
- "vectorstore_type": getattr(retriever, "vectorstore", None).__class__.__name__
174
- if hasattr(retriever, "vectorstore")
175
- else None,
176
- "retriever_config": retriever.__dict__ if retriever else {},
177
- }
178
- metadata = {
179
- "qa_chain_class": qa_chain.__class__.__name__,
180
- "retriever": retriever_metadata,
181
- }
182
- llm = getattr(qa_chain, "llm", None)
183
- if llm:
184
- metadata["llm"] = _extract_llm_metadata(llm)
185
-
186
- return metadata
@@ -1,145 +0,0 @@
1
- from dataclasses import dataclass
2
- from typing import Dict, List
3
-
4
- from flowcept.flowceptor.consumers.agent.base_agent_context_manager import BaseAgentContextManager, BaseAppContext
5
- from langchain.chains.retrieval_qa.base import BaseRetrievalQA
6
- from langchain_community.embeddings import HuggingFaceEmbeddings
7
-
8
- from flowcept.flowceptor.consumers.agent import client_agent
9
- from flowcept.flowceptor.consumers.agent.flowcept_qa_manager import FlowceptQAManager
10
- from flowcept.commons.task_data_preprocess import summarize_task
11
-
12
-
13
- @dataclass
14
- class FlowceptAppContext(BaseAppContext):
15
- """
16
- Context object for holding flowcept-specific state (e.g., tasks data) during the agent's lifecycle.
17
-
18
- Attributes
19
- ----------
20
- task_summaries : List[Dict]
21
- List of summarized task dictionaries.
22
- critical_tasks : List[Dict]
23
- List of critical task summaries with tags or anomalies.
24
- qa_chain : BaseRetrievalQA
25
- The QA chain used for question-answering over task summaries.
26
- vectorstore_path : str
27
- Path to the persisted vectorstore used by the QA system.
28
- embedding_model : HuggingFaceEmbeddings
29
- The embedding model used to generate vector representations for tasks.
30
- """
31
-
32
- task_summaries: List[Dict]
33
- critical_tasks: List[Dict]
34
- qa_chain: BaseRetrievalQA
35
- vectorstore_path: str
36
- embedding_model: HuggingFaceEmbeddings
37
-
38
-
39
- class FlowceptAgentContextManager(BaseAgentContextManager):
40
- """
41
- Manages agent context and operations for Flowcept's intelligent task monitoring.
42
-
43
- This class extends BaseAgentContextManager and maintains a rolling buffer of task messages.
44
- It summarizes and tags tasks, builds a QA index over them, and uses LLM tools to analyze
45
- task batches periodically.
46
-
47
- Attributes
48
- ----------
49
- context : FlowceptAppContext
50
- Current application context holding task state and QA components.
51
- msgs_counter : int
52
- Counter tracking how many task messages have been processed.
53
- context_size : int
54
- Number of task messages to collect before triggering QA index building and LLM analysis.
55
- qa_manager : FlowceptQAManager
56
- Utility for constructing QA chains from task summaries.
57
- """
58
-
59
- def __init__(self):
60
- super().__init__()
61
- self.context: FlowceptAppContext = None
62
- self.reset_context()
63
- self.msgs_counter = 0
64
- self.context_size = 5
65
- self.qa_manager = FlowceptQAManager()
66
-
67
- def message_handler(self, msg_obj: Dict):
68
- """
69
- Handle an incoming message and update context accordingly.
70
-
71
- Parameters
72
- ----------
73
- msg_obj : Dict
74
- The incoming message object.
75
-
76
- Returns
77
- -------
78
- bool
79
- True if the message was handled successfully.
80
- """
81
- msg_type = msg_obj.get("type", None)
82
- if msg_type == "task":
83
- self.msgs_counter += 1
84
- self.logger.debug("Received task msg!")
85
- self.context.tasks.append(msg_obj)
86
-
87
- self.logger.debug(f"This is QA! {self.context.qa_chain}")
88
-
89
- task_summary = summarize_task(msg_obj)
90
- self.context.task_summaries.append(task_summary)
91
- if len(task_summary.get("tags", [])):
92
- self.context.critical_tasks.append(task_summary)
93
-
94
- if self.msgs_counter > 0 and self.msgs_counter % self.context_size == 0:
95
- self.build_qa_index()
96
-
97
- self.monitor_chunk()
98
-
99
- return True
100
-
101
- def monitor_chunk(self):
102
- """
103
- Perform LLM-based analysis on the current chunk of task messages and send the results.
104
- """
105
- self.logger.debug(f"Going to begin LLM job! {self.msgs_counter}")
106
- result = client_agent.run_tool("analyze_task_chunk")
107
- if len(result):
108
- content = result[0].text
109
- if content != "Error executing tool":
110
- msg = {"type": "flowcept_agent", "info": "monitor", "content": content}
111
- self._mq_dao.send_message(msg)
112
- self.logger.debug(str(content))
113
- else:
114
- self.logger.error(content)
115
-
116
- def build_qa_index(self):
117
- """
118
- Build a new QA index from the current list of task summaries.
119
- """
120
- self.logger.debug(f"Going to begin QA Build! {self.msgs_counter}")
121
- try:
122
- qa_chain_result = self.qa_manager.build_qa(docs=self.context.task_summaries)
123
-
124
- self.context.qa_chain = qa_chain_result.get("qa_chain")
125
- self.context.vectorstore_path = qa_chain_result.get("path")
126
-
127
- self.logger.debug(f"Built QA! {self.msgs_counter}")
128
- assert self.context.qa_chain is not None
129
- self.logger.debug(f"This is QA! {self.context.qa_chain}")
130
- self.logger.debug(f"This is QA path! {self.context.vectorstore_path}")
131
- except Exception as e:
132
- self.logger.exception(e)
133
-
134
- def reset_context(self):
135
- """
136
- Reset the agent's context to a clean state, initializing a new QA setup.
137
- """
138
- self.context = FlowceptAppContext(
139
- tasks=[],
140
- task_summaries=[],
141
- critical_tasks=[],
142
- qa_chain=None,
143
- vectorstore_path=None,
144
- embedding_model=FlowceptQAManager.embedding_model,
145
- )