flowcept 0.8.9__py3-none-any.whl → 0.8.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. flowcept/cli.py +460 -0
  2. flowcept/commons/daos/keyvalue_dao.py +19 -23
  3. flowcept/commons/daos/mq_dao/mq_dao_base.py +29 -29
  4. flowcept/commons/daos/mq_dao/mq_dao_kafka.py +4 -3
  5. flowcept/commons/daos/mq_dao/mq_dao_mofka.py +4 -0
  6. flowcept/commons/daos/mq_dao/mq_dao_redis.py +38 -5
  7. flowcept/commons/daos/redis_conn.py +47 -0
  8. flowcept/commons/flowcept_dataclasses/task_object.py +36 -8
  9. flowcept/commons/settings_factory.py +2 -4
  10. flowcept/commons/task_data_preprocess.py +200 -0
  11. flowcept/commons/utils.py +1 -1
  12. flowcept/configs.py +11 -9
  13. flowcept/flowcept_api/flowcept_controller.py +30 -13
  14. flowcept/flowceptor/adapters/agents/__init__.py +1 -0
  15. flowcept/flowceptor/adapters/agents/agents_utils.py +89 -0
  16. flowcept/flowceptor/adapters/agents/flowcept_agent.py +292 -0
  17. flowcept/flowceptor/adapters/agents/flowcept_llm_prov_capture.py +186 -0
  18. flowcept/flowceptor/adapters/agents/prompts.py +51 -0
  19. flowcept/flowceptor/adapters/base_interceptor.py +17 -19
  20. flowcept/flowceptor/adapters/brokers/__init__.py +1 -0
  21. flowcept/flowceptor/adapters/brokers/mqtt_interceptor.py +132 -0
  22. flowcept/flowceptor/adapters/mlflow/mlflow_interceptor.py +3 -3
  23. flowcept/flowceptor/adapters/tensorboard/tensorboard_interceptor.py +3 -3
  24. flowcept/flowceptor/consumers/agent/__init__.py +1 -0
  25. flowcept/flowceptor/consumers/agent/base_agent_context_manager.py +101 -0
  26. flowcept/flowceptor/consumers/agent/client_agent.py +48 -0
  27. flowcept/flowceptor/consumers/agent/flowcept_agent_context_manager.py +145 -0
  28. flowcept/flowceptor/consumers/agent/flowcept_qa_manager.py +112 -0
  29. flowcept/flowceptor/consumers/base_consumer.py +90 -0
  30. flowcept/flowceptor/consumers/document_inserter.py +138 -53
  31. flowcept/flowceptor/telemetry_capture.py +1 -1
  32. flowcept/instrumentation/task_capture.py +19 -9
  33. flowcept/version.py +1 -1
  34. {flowcept-0.8.9.dist-info → flowcept-0.8.11.dist-info}/METADATA +18 -6
  35. {flowcept-0.8.9.dist-info → flowcept-0.8.11.dist-info}/RECORD +39 -25
  36. flowcept-0.8.11.dist-info/entry_points.txt +2 -0
  37. resources/sample_settings.yaml +44 -23
  38. flowcept/flowceptor/adapters/zambeze/__init__.py +0 -1
  39. flowcept/flowceptor/adapters/zambeze/zambeze_dataclasses.py +0 -41
  40. flowcept/flowceptor/adapters/zambeze/zambeze_interceptor.py +0 -102
  41. {flowcept-0.8.9.dist-info → flowcept-0.8.11.dist-info}/WHEEL +0 -0
  42. {flowcept-0.8.9.dist-info → flowcept-0.8.11.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,292 @@
1
+ import json
2
+ import os
3
+ from typing import Dict, List
4
+
5
+ import uvicorn
6
+ from langchain.chains.retrieval_qa.base import RetrievalQA
7
+ from mcp.server.fastmcp import FastMCP
8
+ from mcp.server.fastmcp.prompts import base
9
+
10
+ from flowcept.configs import AGENT
11
+ from flowcept.flowcept_api.flowcept_controller import Flowcept
12
+ from flowcept.flowceptor.adapters.agents.agents_utils import (
13
+ convert_mcp_to_langchain,
14
+ convert_mcp_messages_to_plain_text,
15
+ )
16
+ from flowcept.flowceptor.adapters.agents.flowcept_llm_prov_capture import (
17
+ invoke_llm,
18
+ invoke_qa_question,
19
+ add_preamble_to_response,
20
+ )
21
+ from flowcept.flowceptor.adapters.agents.prompts import (
22
+ get_question_prompt,
23
+ BASE_MULTITASK_PROMPT,
24
+ BASE_SINGLETASK_PROMPT,
25
+ )
26
+ from flowcept.flowceptor.consumers.agent.flowcept_agent_context_manager import FlowceptAgentContextManager
27
+ from flowcept.flowceptor.consumers.agent.flowcept_qa_manager import FlowceptQAManager
28
+
29
+ os.environ["SAMBASTUDIO_URL"] = AGENT.get("llm_server_url")
30
+ os.environ["SAMBASTUDIO_API_KEY"] = AGENT.get("api_key")
31
+
32
+ agent_controller = FlowceptAgentContextManager()
33
+ mcp = FastMCP("FlowceptAgent", require_session=True, lifespan=agent_controller.lifespan)
34
+
35
+ #################################################
36
+ # PROMPTS
37
+ #################################################
38
+
39
+
40
+ @mcp.prompt()
41
+ def single_task_used_generated_prompt(task_data: Dict, question: str) -> list[base.Message]:
42
+ """
43
+ Generate a prompt for analyzing a single task's provenance and resource usage.
44
+
45
+ Parameters
46
+ ----------
47
+ task_data : dict
48
+ The task object containing provenance and telemetry fields.
49
+ question : str
50
+ A specific question to ask about the task.
51
+
52
+ Returns
53
+ -------
54
+ list of base.Message
55
+ The structured prompt messages for LLM analysis.
56
+ """
57
+ msgs = BASE_SINGLETASK_PROMPT.copy()
58
+ msgs.append(get_question_prompt(question))
59
+ msgs.append(base.UserMessage(f"This is the task object I need you to focus on: \n {task_data}\n"))
60
+ return msgs
61
+
62
+
63
+ @mcp.prompt()
64
+ def multi_task_summary_prompt(task_list: List[Dict]) -> List[base.Message]:
65
+ """
66
+ Generate a prompt for analyzing multiple task objects in a workflow.
67
+
68
+ Parameters
69
+ ----------
70
+ task_list : list of dict
71
+ A list of task objects with provenance and telemetry data.
72
+
73
+ Returns
74
+ -------
75
+ list of base.Message
76
+ The structured prompt messages for the LLM.
77
+ """
78
+ messages = BASE_MULTITASK_PROMPT.copy()
79
+ pretty_tasks = json.dumps(task_list, indent=2, default=str)
80
+ messages.append(base.UserMessage(f"These are the tasks I need you to reason about:\n\n{pretty_tasks}\n\n"))
81
+ return messages
82
+
83
+
84
+ @mcp.prompt()
85
+ def multi_task_qa_prompt(question: str) -> List[base.Message]:
86
+ """
87
+ Generate a prompt for asking a specific question about multiple tasks.
88
+
89
+ Parameters
90
+ ----------
91
+ question : str
92
+ The user's query about task data.
93
+
94
+ Returns
95
+ -------
96
+ list of base.Message
97
+ Prompt messages structured for the LLM.
98
+ """
99
+ messages = BASE_MULTITASK_PROMPT.copy()
100
+ messages.append(get_question_prompt(question))
101
+ return messages
102
+
103
+
104
+ #################################################
105
+ # TOOLS
106
+ #################################################
107
+
108
+
109
+ @mcp.tool()
110
+ def analyze_task_chunk() -> str:
111
+ """
112
+ Analyze a recent chunk of tasks using an LLM to detect patterns or anomalies.
113
+
114
+ Returns
115
+ -------
116
+ str
117
+ LLM-generated analysis of the selected task chunk.
118
+ """
119
+ LAST_K = 5 # TODO make this dynamic from config
120
+ ctx = mcp.get_context()
121
+ task_list = ctx.request_context.lifespan_context.task_summaries[:-LAST_K]
122
+ agent_controller.logger.debug(f"N Tasks = {len(task_list)}")
123
+ if not task_list:
124
+ return "No tasks available."
125
+
126
+ messages = multi_task_summary_prompt(task_list)
127
+ langchain_messages = convert_mcp_to_langchain(messages)
128
+ response = invoke_llm(langchain_messages)
129
+ result = add_preamble_to_response(response, mcp, task_data=None)
130
+ agent_controller.logger.debug(f"Result={result}")
131
+ return result
132
+
133
+
134
+ @mcp.tool()
135
+ def ask_about_tasks_buffer(question: str) -> str:
136
+ """
137
+ Use a QA chain to answer a question about the current task buffer.
138
+
139
+ Parameters
140
+ ----------
141
+ question : str
142
+ The question to ask about the buffered tasks.
143
+
144
+ Returns
145
+ -------
146
+ str
147
+ Answer from the QA chain or an error message.
148
+ """
149
+ ctx = mcp.get_context()
150
+ qa_chain = build_qa_chain_from_ctx(ctx)
151
+ if not qa_chain:
152
+ return "No tasks available."
153
+
154
+ messages = multi_task_qa_prompt(question)
155
+
156
+ try:
157
+ query_str = convert_mcp_messages_to_plain_text(messages)
158
+ except Exception as e:
159
+ agent_controller.logger.exception(e)
160
+ return f"An internal error happened: {e}"
161
+
162
+ response = invoke_qa_question(qa_chain, query_str=query_str)
163
+ agent_controller.logger.debug(f"Response={response}")
164
+ return response
165
+
166
+
167
+ def build_qa_chain_from_ctx(ctx) -> RetrievalQA:
168
+ """
169
+ Build or retrieve a QA chain from the current request context.
170
+
171
+ Parameters
172
+ ----------
173
+ ctx : RequestContext
174
+ The current MCP request context.
175
+
176
+ Returns
177
+ -------
178
+ RetrievalQA or None
179
+ A QA chain built from vectorstore metadata, or None if unavailable.
180
+ """
181
+ qa_chain = ctx.request_context.lifespan_context.qa_chain
182
+ if not qa_chain:
183
+ vectorstore_path = ctx.request_context.lifespan_context.vectorstore_path
184
+ if not vectorstore_path:
185
+ return None
186
+ agent_controller.logger.debug(f"Path: {vectorstore_path}")
187
+ qa_chain = FlowceptQAManager.build_qa_chain_from_vectorstore_path(vectorstore_path)
188
+ if not qa_chain:
189
+ return None
190
+ return qa_chain
191
+
192
+
193
+ @mcp.tool()
194
+ def get_latest(n: int = None) -> str:
195
+ """
196
+ Return the most recent task(s) from the task buffer.
197
+
198
+ Parameters
199
+ ----------
200
+ n : int, optional
201
+ Number of most recent tasks to return. If None, return only the latest.
202
+
203
+ Returns
204
+ -------
205
+ str
206
+ JSON-encoded task(s).
207
+ """
208
+ ctx = mcp.get_context()
209
+ tasks = ctx.request_context.lifespan_context.tasks
210
+ if not tasks:
211
+ return "No tasks available."
212
+ if n is None:
213
+ return json.dumps(tasks[-1])
214
+ return json.dumps(tasks[-n])
215
+
216
+
217
+ @mcp.tool()
218
+ def check_liveness() -> str:
219
+ """
220
+ Confirm the agent is alive and responding.
221
+
222
+ Returns
223
+ -------
224
+ str
225
+ Liveness status string.
226
+ """
227
+ return f"I'm {mcp.name} and I'm ready!"
228
+
229
+
230
+ @mcp.tool()
231
+ def check_llm() -> str:
232
+ """
233
+ Check connectivity and response from the LLM backend.
234
+
235
+ Returns
236
+ -------
237
+ str
238
+ LLM response, formatted with MCP metadata.
239
+ """
240
+ messages = [base.UserMessage("Hi, are you working properly?")]
241
+
242
+ langchain_messages = convert_mcp_to_langchain(messages)
243
+ response = invoke_llm(langchain_messages)
244
+ result = add_preamble_to_response(response, mcp)
245
+
246
+ return result
247
+
248
+
249
+ @mcp.tool()
250
+ def ask_about_latest_task(question) -> str:
251
+ """
252
+ Ask a question specifically about the latest task in the buffer.
253
+
254
+ Parameters
255
+ ----------
256
+ question : str
257
+ A user-defined question to analyze the latest task.
258
+
259
+ Returns
260
+ -------
261
+ str
262
+ Response from the LLM based on the latest task.
263
+ """
264
+ ctx = mcp.get_context()
265
+ tasks = ctx.request_context.lifespan_context.task_summaries
266
+ if not tasks:
267
+ return "No tasks available."
268
+ task_data = tasks[-1]
269
+
270
+ messages = single_task_used_generated_prompt(task_data, question)
271
+
272
+ langchain_messages = convert_mcp_to_langchain(messages)
273
+
274
+ response = invoke_llm(langchain_messages)
275
+ result = add_preamble_to_response(response, mcp, task_data)
276
+ return result
277
+
278
+
279
+ def main():
280
+ """
281
+ Start the MCP server.
282
+ """
283
+ f = Flowcept(start_persistence=False, save_workflow=False, check_safe_stops=False).start()
284
+ f.logger.info(f"This section's workflow_id={Flowcept.current_workflow_id}")
285
+ setattr(mcp, "workflow_id", f.current_workflow_id)
286
+ uvicorn.run(
287
+ mcp.streamable_http_app, host=AGENT.get("mcp_host", "0.0.0.0"), port=AGENT.get("mcp_port", 8000), lifespan="on"
288
+ )
289
+
290
+
291
+ if __name__ == "__main__":
292
+ main()
@@ -0,0 +1,186 @@
1
+ import inspect
2
+ import json
3
+ from typing import List, Union, Dict
4
+
5
+ from langchain.chains.retrieval_qa.base import RetrievalQA
6
+ from langchain_community.callbacks import get_openai_callback
7
+ from langchain_core.language_models import LLM
8
+ from langchain_core.messages import HumanMessage, AIMessage
9
+
10
+ from flowcept.flowceptor.adapters.agents.agents_utils import build_llm_model
11
+ from flowcept.instrumentation.task_capture import FlowceptTask
12
+
13
+
14
+ def add_preamble_to_response(response, mcp, task_data=None):
15
+ """
16
+ Add workflow/task-related metadata as a preamble to the LLM response.
17
+
18
+ Parameters
19
+ ----------
20
+ response : str
21
+ The LLM response text.
22
+ mcp : Any
23
+ The agent or workflow object, expected to have an optional `workflow_id` attribute.
24
+ task_data : dict, optional
25
+ Dictionary containing task metadata such as `workflow_id` and `task_id`.
26
+
27
+ Returns
28
+ -------
29
+ str
30
+ The response string prefixed with workflow/task metadata.
31
+ """
32
+ preamb_obj = {}
33
+ if hasattr(mcp, "workflow_id"):
34
+ agent_id = getattr(mcp, "workflow_id")
35
+ preamb_obj["agent_id"] = agent_id
36
+ if task_data:
37
+ preamb_obj["workflow_id"] = task_data.get("workflow_id")
38
+ preamb_obj["task_id"] = task_data.get("task_id")
39
+ result = ""
40
+ if preamb_obj:
41
+ result = f"{json.dumps(preamb_obj)}\n\n"
42
+ result += f"Response:\n{response}"
43
+ return result
44
+
45
+
46
+ def invoke_llm(messages: List[Union[HumanMessage, AIMessage]], llm: LLM = None, activity_id=None) -> str:
47
+ """
48
+ Invoke an LLM with a list of chat-style messages and return its response.
49
+
50
+ Parameters
51
+ ----------
52
+ messages : List[Union[HumanMessage, AIMessage]]
53
+ The list of messages forming the conversation history for the LLM.
54
+ llm : LLM, optional
55
+ An instance of a LangChain-compatible LLM. If None, a default model is built.
56
+ activity_id : str, optional
57
+ An optional identifier for the activity, used for Flowcept instrumentation.
58
+
59
+ Returns
60
+ -------
61
+ str
62
+ The LLM's text response.
63
+ """
64
+ if llm is None:
65
+ llm = build_llm_model()
66
+ if activity_id is None:
67
+ activity_id = inspect.stack()[1].function
68
+
69
+ used = {"messages": [{"role": msg.type, "content": msg.content} for msg in messages]}
70
+
71
+ llm_metadata = _extract_llm_metadata(llm)
72
+
73
+ with FlowceptTask(
74
+ activity_id=activity_id,
75
+ used=used,
76
+ custom_metadata={"llm_metadata": llm_metadata, "query_type": "llm_invoke"},
77
+ subtype="llm_query",
78
+ ) as t:
79
+ with get_openai_callback() as cb:
80
+ response = llm.invoke(messages)
81
+ generated = {
82
+ "text_response": response,
83
+ "total_tokens": cb.total_tokens,
84
+ "prompt_tokens": cb.prompt_tokens,
85
+ "completion_tokens": cb.completion_tokens,
86
+ "cost": cb.total_cost,
87
+ }
88
+ t.end(generated)
89
+ return response
90
+
91
+
92
+ def invoke_qa_question(qa_chain: RetrievalQA, query_str: str, activity_id=None) -> str:
93
+ """
94
+ Query a RetrievalQA chain with a given question and return the response.
95
+
96
+ Parameters
97
+ ----------
98
+ qa_chain : RetrievalQA
99
+ The QA chain object to invoke.
100
+ query_str : str
101
+ The question to ask the QA chain.
102
+ activity_id : str, optional
103
+ An optional identifier for the activity, used for Flowcept instrumentation.
104
+
105
+ Returns
106
+ -------
107
+ str
108
+ The textual result from the QA chain.
109
+ """
110
+ used = {"message": query_str}
111
+ qa_chain_metadata = _extract_qa_chain_metadata(qa_chain)
112
+ with FlowceptTask(
113
+ activity_id=activity_id,
114
+ used=used,
115
+ subtype="llm_query",
116
+ custom_metadata={"qa_chain_metadata": qa_chain_metadata, "query_type": "qa_chain"},
117
+ ) as t:
118
+ with get_openai_callback() as cb:
119
+ response = dict(qa_chain({"query": f"{query_str}"})) # TODO bug?
120
+ text_response = response.pop("result")
121
+ generated = {
122
+ "response": response,
123
+ "text_response": text_response,
124
+ "total_tokens": cb.total_tokens,
125
+ "prompt_tokens": cb.prompt_tokens,
126
+ "completion_tokens": cb.completion_tokens,
127
+ "cost": cb.total_cost,
128
+ }
129
+ t.end(generated)
130
+ return text_response
131
+
132
+
133
+ def _extract_llm_metadata(llm: LLM) -> Dict:
134
+ """
135
+ Extract metadata from a LangChain LLM instance.
136
+
137
+ Parameters
138
+ ----------
139
+ llm : LLM
140
+ The language model instance.
141
+
142
+ Returns
143
+ -------
144
+ dict
145
+ Dictionary containing class name, module, model name, and configuration if available.
146
+ """
147
+ llm_metadata = {
148
+ "class_name": llm.__class__.__name__,
149
+ "module": llm.__class__.__module__,
150
+ "config": llm.dict() if hasattr(llm, "dict") else {},
151
+ }
152
+ return llm_metadata
153
+
154
+
155
+ def _extract_qa_chain_metadata(qa_chain: RetrievalQA) -> Dict:
156
+ """
157
+ Extract metadata from a RetrievalQA chain, including LLM and retriever details.
158
+
159
+ Parameters
160
+ ----------
161
+ qa_chain : RetrievalQA
162
+ The QA chain to extract metadata from.
163
+
164
+ Returns
165
+ -------
166
+ dict
167
+ Metadata dictionary including QA chain class name, retriever details, and optionally LLM metadata.
168
+ """
169
+ retriever = getattr(qa_chain, "retriever", None)
170
+ retriever_metadata = {
171
+ "class_name": retriever.__class__.__name__ if retriever else None,
172
+ "module": retriever.__class__.__module__ if retriever else None,
173
+ "vectorstore_type": getattr(retriever, "vectorstore", None).__class__.__name__
174
+ if hasattr(retriever, "vectorstore")
175
+ else None,
176
+ "retriever_config": retriever.__dict__ if retriever else {},
177
+ }
178
+ metadata = {
179
+ "qa_chain_class": qa_chain.__class__.__name__,
180
+ "retriever": retriever_metadata,
181
+ }
182
+ llm = getattr(qa_chain, "llm", None)
183
+ if llm:
184
+ metadata["llm"] = _extract_llm_metadata(llm)
185
+
186
+ return metadata
@@ -0,0 +1,51 @@
1
+ from mcp.server.fastmcp.prompts import base
2
+
3
+ BASE_ROLE = (
4
+ "You are a helpful assistant analyzing provenance data from a large-scale workflow composed of multiple tasks."
5
+ )
6
+
7
+ DATA_SCHEMA_PROMPT = (
8
+ "A task object has its provenance: input data is stored in the 'used' field, output in the 'generated' field. "
9
+ "Tasks sharing the same 'workflow_id' belong to the same workflow execution trace. "
10
+ "Pay attention to the 'tags' field, as it may indicate critical tasks. "
11
+ "The 'telemetry_summary' field reports CPU, disk, memory, and network usage, along with 'duration_sec'. "
12
+ "Task placement is stored in the 'hostname' field."
13
+ )
14
+
15
+ QUESTION_PROMPT = "I am particularly more interested in the following question: %QUESTION%."
16
+
17
+
18
+ def get_question_prompt(question: str):
19
+ """Generates a user prompt with the given question filled in."""
20
+ return base.UserMessage(QUESTION_PROMPT.replace("%QUESTION%", question))
21
+
22
+
23
+ SINGLE_TASK_PROMPT = {
24
+ "role": f"{BASE_ROLE} You are focusing now on a particular task object which I will provide below.",
25
+ "data_schema": DATA_SCHEMA_PROMPT,
26
+ "job": (
27
+ "Your job is to analyze this single task. Find any anomalies, relationships, or correlations between input,"
28
+ " output, resource usage metrics, task duration, and task placement. "
29
+ "Correlations involving 'used' vs 'generated' data are especially important. "
30
+ "So are relationships between (used or generated) data and resource metrics. "
31
+ "Highlight outliers or critical information and give actionable insights or recommendations. "
32
+ "Explain what this task may be doing, using the data provided."
33
+ ),
34
+ }
35
+
36
+ MULTITASK_PROMPTS = {
37
+ "role": BASE_ROLE,
38
+ "data_schema": DATA_SCHEMA_PROMPT,
39
+ "job": (
40
+ "Your job is to analyze a list of task objects to identify patterns across tasks, anomalies, relationships,"
41
+ " or correlations between inputs, outputs, resource usage, duration, and task placement. "
42
+ "Correlations involving 'used' vs 'generated' data are especially important. "
43
+ "So are relationships between (used or generated) data and resource metrics. "
44
+ "Try to infer the purpose of the workflow. "
45
+ "Highlight outliers or critical tasks and give actionable insights or recommendations. "
46
+ "Use the data provided to justify your analysis."
47
+ ),
48
+ }
49
+
50
+ BASE_SINGLETASK_PROMPT = [base.UserMessage(SINGLE_TASK_PROMPT[k]) for k in ("role", "data_schema", "job")]
51
+ BASE_MULTITASK_PROMPT = [base.UserMessage(MULTITASK_PROMPTS[k]) for k in ("role", "data_schema", "job")]
@@ -9,7 +9,6 @@ from flowcept.commons.flowcept_dataclasses.workflow_object import (
9
9
  )
10
10
  from flowcept.configs import (
11
11
  ENRICH_MESSAGES,
12
- INSTRUMENTATION,
13
12
  )
14
13
  from flowcept.commons.flowcept_logger import FlowceptLogger
15
14
  from flowcept.commons.daos.mq_dao.mq_dao_base import MQDao
@@ -29,8 +28,6 @@ from flowcept.flowceptor.telemetry_capture import TelemetryCapture
29
28
  class BaseInterceptor(object):
30
29
  """Base interceptor class."""
31
30
 
32
- # KINDS_TO_NOT_EXPLICITLY_CONTROL = {"dask"}
33
-
34
31
  @staticmethod
35
32
  def build(kind: str) -> "BaseInterceptor":
36
33
  """Build the Interceptor."""
@@ -43,6 +40,11 @@ class BaseInterceptor(object):
43
40
  from flowcept.flowceptor.adapters.tensorboard.tensorboard_interceptor import TensorboardInterceptor
44
41
 
45
42
  return TensorboardInterceptor()
43
+
44
+ elif kind == "broker_mqtt":
45
+ from flowcept.flowceptor.adapters.brokers.mqtt_interceptor import MQTTBrokerInterceptor
46
+
47
+ return MQTTBrokerInterceptor()
46
48
  elif kind == "dask_worker":
47
49
  from flowcept.flowceptor.adapters.dask.dask_interceptor import DaskWorkerInterceptor
48
50
 
@@ -50,23 +52,15 @@ class BaseInterceptor(object):
50
52
  elif kind in "dask":
51
53
  # This is dask's client interceptor. We essentially use it to store the dask workflow.
52
54
  # That's why we don't need another special interceptor and we can reuse the instrumentation one.
53
- return BaseInterceptor._build_instrumentation_interceptor()
54
- elif kind == "instrumentation":
55
- return BaseInterceptor._build_instrumentation_interceptor()
56
- else:
57
- raise NotImplementedError
55
+ from flowcept.flowceptor.adapters.instrumentation_interceptor import InstrumentationInterceptor
58
56
 
59
- @staticmethod
60
- def _build_instrumentation_interceptor():
61
- # By using singleton, we lose the thread safety for the Interceptor, particularly, its MQ buffer.
62
- # Since some use cases need threads, this allows disabling the singleton for more thread safety.
63
- is_singleton = INSTRUMENTATION.get("singleton", True)
64
- if is_singleton:
57
+ return InstrumentationInterceptor.get_instance()
58
+ elif kind == "instrumentation":
65
59
  from flowcept.flowceptor.adapters.instrumentation_interceptor import InstrumentationInterceptor
66
60
 
67
61
  return InstrumentationInterceptor.get_instance()
68
62
  else:
69
- return BaseInterceptor(kind="instrumentation")
63
+ raise NotImplementedError
70
64
 
71
65
  def __init__(self, plugin_key=None, kind=None):
72
66
  self.logger = FlowceptLogger()
@@ -89,17 +83,21 @@ class BaseInterceptor(object):
89
83
  """Prepare a task."""
90
84
  raise NotImplementedError()
91
85
 
92
- def start(self, bundle_exec_id) -> "BaseInterceptor":
86
+ def start(self, bundle_exec_id, check_safe_stops: bool = True) -> "BaseInterceptor":
93
87
  """Start an interceptor."""
94
88
  if not self.started:
95
89
  self._bundle_exec_id = bundle_exec_id
96
- self._mq_dao.init_buffer(self._interceptor_instance_id, bundle_exec_id)
90
+ self._mq_dao.init_buffer(self._interceptor_instance_id, bundle_exec_id, check_safe_stops)
97
91
  self.started = True
98
92
  return self
99
93
 
100
- def stop(self):
94
+ def stop(self, check_safe_stops: bool = True):
101
95
  """Stop an interceptor."""
102
- self._mq_dao.stop(self._interceptor_instance_id, self._bundle_exec_id)
96
+ self._mq_dao.stop(
97
+ interceptor_instance_id=self._interceptor_instance_id,
98
+ check_safe_stops=check_safe_stops,
99
+ bundle_exec_id=self._bundle_exec_id,
100
+ )
103
101
  self.started = False
104
102
 
105
103
  def observe(self, *args, **kwargs):
@@ -0,0 +1 @@
1
+ """Brokers' adapters subpackage."""