flock-core 0.4.2__py3-none-any.whl → 0.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (38) hide show
  1. flock/core/__init__.py +11 -0
  2. flock/core/flock.py +144 -42
  3. flock/core/flock_agent.py +117 -4
  4. flock/core/flock_evaluator.py +1 -1
  5. flock/core/flock_factory.py +290 -2
  6. flock/core/flock_module.py +101 -0
  7. flock/core/flock_registry.py +39 -2
  8. flock/core/flock_server_manager.py +136 -0
  9. flock/core/logging/telemetry.py +1 -1
  10. flock/core/mcp/__init__.py +1 -0
  11. flock/core/mcp/flock_mcp_server.py +614 -0
  12. flock/core/mcp/flock_mcp_tool_base.py +201 -0
  13. flock/core/mcp/mcp_client.py +658 -0
  14. flock/core/mcp/mcp_client_manager.py +201 -0
  15. flock/core/mcp/mcp_config.py +237 -0
  16. flock/core/mcp/types/__init__.py +1 -0
  17. flock/core/mcp/types/callbacks.py +86 -0
  18. flock/core/mcp/types/factories.py +111 -0
  19. flock/core/mcp/types/handlers.py +240 -0
  20. flock/core/mcp/types/types.py +157 -0
  21. flock/core/mcp/util/__init__.py +0 -0
  22. flock/core/mcp/util/helpers.py +23 -0
  23. flock/core/mixin/dspy_integration.py +45 -12
  24. flock/core/serialization/flock_serializer.py +52 -1
  25. flock/core/util/spliter.py +4 -0
  26. flock/evaluators/declarative/declarative_evaluator.py +4 -3
  27. flock/mcp/servers/sse/__init__.py +1 -0
  28. flock/mcp/servers/sse/flock_sse_server.py +139 -0
  29. flock/mcp/servers/stdio/__init__.py +1 -0
  30. flock/mcp/servers/stdio/flock_stdio_server.py +138 -0
  31. flock/mcp/servers/websockets/__init__.py +1 -0
  32. flock/mcp/servers/websockets/flock_websocket_server.py +119 -0
  33. flock/modules/performance/metrics_module.py +159 -1
  34. {flock_core-0.4.2.dist-info → flock_core-0.4.5.dist-info}/METADATA +278 -64
  35. {flock_core-0.4.2.dist-info → flock_core-0.4.5.dist-info}/RECORD +38 -18
  36. {flock_core-0.4.2.dist-info → flock_core-0.4.5.dist-info}/WHEEL +0 -0
  37. {flock_core-0.4.2.dist-info → flock_core-0.4.5.dist-info}/entry_points.txt +0 -0
  38. {flock_core-0.4.2.dist-info → flock_core-0.4.5.dist-info}/licenses/LICENSE +0 -0
@@ -52,7 +52,7 @@ class DeclarativeEvaluator(
52
52
  _lm_history: list = PrivateAttr(default_factory=list)
53
53
 
54
54
  async def evaluate(
55
- self, agent: FlockAgent, inputs: dict[str, Any], tools: list[Any]
55
+ self, agent: FlockAgent, inputs: dict[str, Any], tools: list[Any], mcp_tools: list[Any] | None = None
56
56
  ) -> dict[str, Any]:
57
57
  """Evaluate using DSPy, with optional asynchronous streaming."""
58
58
  # --- Setup Signature and LM ---
@@ -84,6 +84,7 @@ class DeclarativeEvaluator(
84
84
  _dspy_signature,
85
85
  override_evaluator_type=self.config.override_evaluator_type,
86
86
  tools=tools,
87
+ mcp_tools=mcp_tools,
87
88
  kwargs=self.config.kwargs,
88
89
  )
89
90
  except Exception as setup_error:
@@ -106,7 +107,7 @@ class DeclarativeEvaluator(
106
107
  "DSPy task could not be created or is not callable."
107
108
  )
108
109
 
109
- streaming_task = dspy.streamify(agent_task)
110
+ streaming_task = dspy.streamify(agent_task, is_async_program=True)
110
111
  stream_generator: Generator = streaming_task(**inputs)
111
112
  delta_content = ""
112
113
 
@@ -139,7 +140,7 @@ class DeclarativeEvaluator(
139
140
  logger.info(f"Evaluating agent '{agent.name}' without streaming.")
140
141
  try:
141
142
  # Ensure the call is awaited if the underlying task is async
142
- result_obj = agent_task(**inputs)
143
+ result_obj = await agent_task.acall(**inputs)
143
144
  result_dict, cost, lm_history = self._process_result(
144
145
  result_obj, inputs
145
146
  )
@@ -0,0 +1 @@
1
+ """Default SSE Server Implementation for Flock."""
@@ -0,0 +1,139 @@
1
+ """This module provides the Flock SSE Server functionality."""
2
+
3
+ import copy
4
+ from contextlib import AbstractAsyncContextManager
5
+ from typing import Any, Literal
6
+
7
+ from anyio.streams.memory import (
8
+ MemoryObjectReceiveStream,
9
+ MemoryObjectSendStream,
10
+ )
11
+ from mcp.client.sse import sse_client
12
+ from mcp.types import JSONRPCMessage
13
+ from opentelemetry import trace
14
+ from pydantic import Field
15
+
16
+ from flock.core.logging.logging import get_logger
17
+ from flock.core.mcp.flock_mcp_server import FlockMCPServerBase
18
+ from flock.core.mcp.mcp_client import FlockMCPClientBase
19
+ from flock.core.mcp.mcp_client_manager import FlockMCPClientManagerBase
20
+ from flock.core.mcp.mcp_config import (
21
+ FlockMCPConfigurationBase,
22
+ FlockMCPConnectionConfigurationBase,
23
+ )
24
+ from flock.core.mcp.types.types import SseServerParameters
25
+
26
+ logger = get_logger("mcp.sse.server")
27
+ tracer = trace.get_tracer(__name__)
28
+
29
+
30
+ class FlockSSEConnectionConfig(FlockMCPConnectionConfigurationBase):
31
+ """Concrete ConnectionConfig for an SSEClient."""
32
+
33
+ # Only thing we need to override here is the concrete transport_type
34
+ # and connection_parameters fields.
35
+ transport_type: Literal["sse"] = Field(
36
+ default="sse", description="Use the sse transport type."
37
+ )
38
+
39
+ connection_parameters: SseServerParameters = Field(
40
+ ..., description="SSE Server Connection Parameters."
41
+ )
42
+
43
+
44
+ class FlockSSEConfig(FlockMCPConfigurationBase):
45
+ """Configuration for SSE Clients."""
46
+
47
+ # The only thing we need to override here is the concrete
48
+ # connection config. The rest is generic enough to handle
49
+ # everything else.
50
+ connection_config: FlockSSEConnectionConfig = Field(
51
+ ..., description="Concrete SSE Connection Configuration."
52
+ )
53
+
54
+
55
+ class FlockSSEClient(FlockMCPClientBase):
56
+ """Client for SSE Servers."""
57
+
58
+ config: FlockSSEConfig = Field(..., description="Client configuration.")
59
+
60
+ async def create_transport(
61
+ self,
62
+ params: SseServerParameters,
63
+ additional_params: dict[str, Any] | None = None,
64
+ ) -> AbstractAsyncContextManager[
65
+ tuple[
66
+ MemoryObjectReceiveStream[JSONRPCMessage | Exception],
67
+ MemoryObjectSendStream[JSONRPCMessage],
68
+ ]
69
+ ]:
70
+ """Return an async context manager whose __aenter__ method yields (read_stream, send_stream)."""
71
+ # avoid modifying the config of the client as a side-effect.
72
+ param_copy = copy.deepcopy(params)
73
+
74
+ if self.additional_params:
75
+ override_headers = bool(
76
+ self.additional_params.get("override_headers", False)
77
+ )
78
+ if "headers" in self.additional_params:
79
+ if override_headers:
80
+ param_copy.headers = self.additional_params.get(
81
+ "headers", params.headers
82
+ )
83
+ else:
84
+ param_copy.headers.update(
85
+ self.additional_params.get("headers", {})
86
+ )
87
+ if "read_timeout_seconds" in self.additional_params:
88
+ param_copy.timeout = self.additional_params.get(
89
+ "read_timeout_seconds", params.timeout
90
+ )
91
+
92
+ if "sse_read_timeout" in self.additional_params:
93
+ param_copy.sse_read_timeout = self.additional_params.get(
94
+ "sse_read_timeout",
95
+ params.sse_read_timeout,
96
+ )
97
+ if "url" in self.additional_params:
98
+ param_copy.url = self.additional_params.get(
99
+ "url",
100
+ params.url,
101
+ )
102
+
103
+ return sse_client(
104
+ url=param_copy.url,
105
+ headers=param_copy.headers,
106
+ timeout=float(param_copy.timeout),
107
+ sse_read_timeout=float(param_copy.sse_read_timeout),
108
+ )
109
+
110
+
111
+ class FlockSSEClientManager(FlockMCPClientManagerBase):
112
+ """Manager for handling SSE Clients."""
113
+
114
+ client_config: FlockSSEConfig = Field(
115
+ ..., description="Configuration for clients."
116
+ )
117
+
118
+ async def make_client(
119
+ self, additional_params: dict[str, Any]
120
+ ) -> FlockSSEClient:
121
+ """Create a new client instance."""
122
+ new_client = FlockSSEClient(
123
+ config=self.client_config, additional_params=additional_params
124
+ )
125
+ return new_client
126
+
127
+
128
+ class FlockSSEServer(FlockMCPServerBase):
129
+ """Class which represents a MCP Server using the SSE Transport type."""
130
+
131
+ config: FlockSSEConfig = Field(..., description="Config for the server.")
132
+
133
+ async def initialize(self) -> FlockSSEClientManager:
134
+ """Called when initializing the server."""
135
+ client_manager = FlockSSEClientManager(
136
+ client_config=self.config,
137
+ )
138
+
139
+ return client_manager
@@ -0,0 +1 @@
1
+ """Default Stdio Server Implementation for Flock."""
@@ -0,0 +1,138 @@
1
+ """This module provides the default implementation for MCP servers using the stdio transport."""
2
+
3
+ import copy
4
+ from contextlib import AbstractAsyncContextManager
5
+ from typing import Any, Literal
6
+
7
+ from anyio.streams.memory import (
8
+ MemoryObjectReceiveStream,
9
+ MemoryObjectSendStream,
10
+ )
11
+ from mcp import stdio_client
12
+ from mcp.types import JSONRPCMessage
13
+ from opentelemetry import trace
14
+ from pydantic import Field
15
+
16
+ from flock.core.logging.logging import get_logger
17
+ from flock.core.mcp.flock_mcp_server import FlockMCPServerBase
18
+ from flock.core.mcp.mcp_client import FlockMCPClientBase
19
+ from flock.core.mcp.mcp_client_manager import FlockMCPClientManagerBase
20
+ from flock.core.mcp.mcp_config import (
21
+ FlockMCPConfigurationBase,
22
+ FlockMCPConnectionConfigurationBase,
23
+ )
24
+ from flock.core.mcp.types.types import StdioServerParameters
25
+
26
+ logger = get_logger("mcp.stdio.server")
27
+ tracer = trace.get_tracer(__name__)
28
+
29
+
30
+ class FlockStdioConnectionConfig(FlockMCPConnectionConfigurationBase):
31
+ """Concrete ConnectionConfig for an StdioClient."""
32
+
33
+ # Only thing we need to override here is the concrete transport_type
34
+ # and connection_parameters fields.
35
+ transport_type: Literal["stdio"] = Field(
36
+ default="stdio", description="Use the stdio transport type."
37
+ )
38
+
39
+ connection_parameters: StdioServerParameters = Field(
40
+ ...,
41
+ description="StdioServerParameters to be used for the stdio transport.",
42
+ )
43
+
44
+
45
+ class FlockStdioConfig(FlockMCPConfigurationBase):
46
+ """Configuration for Stdio Clients."""
47
+
48
+ # The only thing we need to override here is the
49
+ # concrete connection config. The rest is generic
50
+ # enough to handle everything else.
51
+ connection_config: FlockStdioConnectionConfig = Field(
52
+ ..., description="Concrete Stdio Connection Configuration."
53
+ )
54
+
55
+
56
+ class FlockStdioClient(FlockMCPClientBase):
57
+ """Client for Stdio Servers."""
58
+
59
+ config: FlockStdioConfig = Field(..., description="Client Configuration.")
60
+
61
+ async def create_transport(
62
+ self,
63
+ params: StdioServerParameters,
64
+ additional_params: dict[str, Any] | None = None,
65
+ ) -> AbstractAsyncContextManager[
66
+ tuple[
67
+ MemoryObjectReceiveStream[JSONRPCMessage | Exception],
68
+ MemoryObjectSendStream[JSONRPCMessage],
69
+ ]
70
+ ]:
71
+ """Return an async context manager whose __aenter__ method yields (read_stream, send_stream)."""
72
+ # additional_params take precedence over passed config, as modules can influence
73
+ # how to connect to a stdio server.
74
+
75
+ # avoid modifying the config of the client as a side-effect.
76
+ param_copy = copy.deepcopy(params)
77
+
78
+ if additional_params:
79
+ # If it is present, then modify server parameters based on certain keys.
80
+ if "command" in additional_params:
81
+ param_copy.command = additional_params.get(
82
+ "command", params.command
83
+ )
84
+ if "args" in additional_params:
85
+ param_copy.args = additional_params.get("args", params.command)
86
+ if "env" in additional_params:
87
+ param_copy.env = additional_params.get("env", params.env)
88
+
89
+ if "cwd" in additional_params:
90
+ param_copy.cwd = additional_params.get("cwd", params.env)
91
+
92
+ if "encoding" in additional_params:
93
+ param_copy.encoding = additional_params.get(
94
+ "encoding", params.encoding
95
+ )
96
+
97
+ if "encoding_error_handler" in additional_params:
98
+ param_copy.encoding_error_handler = additional_params.get(
99
+ "encoding_error_handler", params.encoding_error_handler
100
+ )
101
+
102
+ # stdio_client already is an AsyncContextManager
103
+ return stdio_client(server=param_copy)
104
+
105
+
106
+ # Not really needed but kept here as an example.
107
+ class FlockStdioClientManager(FlockMCPClientManagerBase):
108
+ """Manager for handling Stdio Clients."""
109
+
110
+ client_config: FlockStdioConfig = Field(
111
+ ..., description="Configuration for clients."
112
+ )
113
+
114
+ async def make_client(
115
+ self, additional_params: dict[str, Any] | None = None
116
+ ):
117
+ """Create a new client instance with any additional parameters."""
118
+ new_client = FlockStdioClient(
119
+ config=self.client_config,
120
+ additional_params=additional_params,
121
+ )
122
+ return new_client
123
+
124
+
125
+ class FlockMCPStdioServer(FlockMCPServerBase):
126
+ """Class which represents a MCP Server using the Stdio Transport type.
127
+
128
+ This means (most likely) that the server is a locally
129
+ executed script.
130
+ """
131
+
132
+ config: FlockStdioConfig = Field(..., description="Config for the server.")
133
+
134
+ async def initialize(self) -> FlockStdioClientManager:
135
+ """Called when initializing the server."""
136
+ client_manager = FlockStdioClientManager(client_config=self.config)
137
+
138
+ return client_manager
@@ -0,0 +1 @@
1
+ """Default Websocket Server Implementation."""
@@ -0,0 +1,119 @@
1
+ """This module provides the default implementation for MCP servers using the websocket transport."""
2
+
3
+ import copy
4
+ from contextlib import AbstractAsyncContextManager
5
+ from typing import Any, Literal
6
+
7
+ from anyio.streams.memory import (
8
+ MemoryObjectReceiveStream,
9
+ MemoryObjectSendStream,
10
+ )
11
+ from mcp.client.websocket import websocket_client
12
+ from mcp.types import JSONRPCMessage
13
+ from opentelemetry import trace
14
+ from pydantic import Field
15
+
16
+ from flock.core.logging.logging import get_logger
17
+ from flock.core.mcp.flock_mcp_server import FlockMCPServerBase
18
+ from flock.core.mcp.mcp_client import FlockMCPClientBase
19
+ from flock.core.mcp.mcp_client_manager import FlockMCPClientManagerBase
20
+ from flock.core.mcp.mcp_config import (
21
+ FlockMCPConfigurationBase,
22
+ FlockMCPConnectionConfigurationBase,
23
+ )
24
+ from flock.core.mcp.types.types import WebsocketServerParameters
25
+
26
+ logger = get_logger("mcp.ws.server")
27
+ tracer = trace.get_tracer(__name__)
28
+
29
+
30
+ # Optional to provide type hints.
31
+ class FlockWSConnectionConfig(FlockMCPConnectionConfigurationBase):
32
+ """Concrete ConnectionConfig for a WS Client."""
33
+
34
+ # Only thing we need to override here is the concrete transport_type
35
+ # and connection_parameters fields.
36
+ transport_type: Literal["websockets"] = Field(
37
+ default="websockets", description="Use the websockets transport type."
38
+ )
39
+
40
+ connection_parameters: WebsocketServerParameters = Field(
41
+ ...,
42
+ description="WebsocketServer parameters to be used for the websocket transport.",
43
+ )
44
+
45
+
46
+ # Optional to provide type hints.
47
+ class FlockWSConfig(FlockMCPConfigurationBase):
48
+ """Configuration for Websocket clients."""
49
+
50
+ # The only thing we need to override here is the concrete
51
+ # connection config. The rest is generic enough to handle
52
+ # everything else. (This is just here so that type hints work for the
53
+ # rest of the implementation, we could just omit this override entirely.)
54
+ connection_config: FlockWSConnectionConfig = Field(
55
+ ..., description="Concrete WS connection configuration"
56
+ )
57
+
58
+
59
+ class FlockWSClient(FlockMCPClientBase):
60
+ """Client for Websocket servers."""
61
+
62
+ config: FlockWSConfig = Field(..., description="Client Configuration")
63
+
64
+ # This one we HAVE to specify. This tells Flock
65
+ # how to create the underlying connection.
66
+ async def create_transport(
67
+ self,
68
+ params: WebsocketServerParameters,
69
+ additional_params: dict[str, Any] | None = None,
70
+ ) -> AbstractAsyncContextManager[
71
+ tuple[
72
+ MemoryObjectReceiveStream[JSONRPCMessage | Exception],
73
+ MemoryObjectSendStream[JSONRPCMessage],
74
+ ]
75
+ ]:
76
+ """Return an async context manager whose __aenter__ method yields a read_stream and a send_stream."""
77
+ # additional_params take precedence over passed config, as modules
78
+ # can influece how to connect to a ws server.
79
+
80
+ # avoid modifying the underlying config directly
81
+ param_copy = copy.deepcopy(params)
82
+
83
+ if additional_params and "url" in additional_params:
84
+ # If present, then apply the changes in "url" to the create_transport logic.
85
+ param_copy.url = additional_params.get("url", params.url)
86
+
87
+ return websocket_client(
88
+ url=param_copy.url
89
+ ) # return the async context manager
90
+
91
+
92
+ # not really needed, but kept for type hints and as an example.
93
+ class FlockWSClientManager(FlockMCPClientManagerBase):
94
+ """Manager for handling websocket clients."""
95
+
96
+ client_config: FlockWSConfig = Field(
97
+ ..., description="Configuration for clients."
98
+ )
99
+
100
+ async def make_client(self, additional_params=None):
101
+ """Create a new client instance."""
102
+ new_client = FlockWSClient(
103
+ config=self.client_config,
104
+ additional_params=additional_params,
105
+ )
106
+ return new_client
107
+
108
+
109
+ class FlockWSServer(FlockMCPServerBase):
110
+ """Class which represents an MCP Server using the websocket transport type."""
111
+
112
+ config: FlockWSConfig = Field(..., description="Config for the server.")
113
+
114
+ # Specify the concrete type for the server.
115
+ async def initialize(self) -> FlockWSClientManager:
116
+ """Called when initializing the server."""
117
+ client_manager = FlockWSClientManager(client_config=self.config)
118
+
119
+ return client_manager
@@ -15,6 +15,7 @@ from flock.core.context.context import FlockContext
15
15
  from flock.core.flock_agent import FlockAgent
16
16
  from flock.core.flock_module import FlockModule, FlockModuleConfig
17
17
  from flock.core.flock_registry import flock_component
18
+ from flock.core.mcp.flock_mcp_server import FlockMCPServerBase
18
19
 
19
20
 
20
21
  class MetricPoint(BaseModel):
@@ -92,7 +93,10 @@ class MetricsModule(FlockModule):
92
93
  MetricsModule._INSTANCE = self
93
94
  self._metrics = defaultdict(list)
94
95
  self._start_time: float | None = None
96
+ self._server_start_time: float | None = None
95
97
  self._start_memory: int | None = None
98
+ self._server_start_memory: int | None = None
99
+ self._client_refreshs: int = 0
96
100
 
97
101
  # Set up storage
98
102
  if self.config.storage_type == "json":
@@ -258,7 +262,7 @@ class MetricsModule(FlockModule):
258
262
  # Get all unique metric names from files
259
263
  all_metrics = self._load_metrics_from_files()
260
264
 
261
- for metric_name in all_metrics.keys():
265
+ for metric_name in all_metrics:
262
266
  stats = self.get_statistics(metric_name)
263
267
  if stats: # Only include metrics that have data
264
268
  summary["metrics"][metric_name] = stats
@@ -518,3 +522,157 @@ class MetricsModule(FlockModule):
518
522
  if instance is None:
519
523
  return # silently ignore if module isn't active
520
524
  instance._record_metric(name, value, tags or {})
525
+
526
+ # --- MCP Server Lifecycle Hooks ---
527
+ async def on_server_error(
528
+ self, server: FlockMCPServerBase, error: Exception
529
+ ) -> None:
530
+ """Record server error metrics."""
531
+ self._record_metric(
532
+ "errors",
533
+ 1,
534
+ {
535
+ "server": server.config.name,
536
+ "error_type": type(error).__name__,
537
+ },
538
+ )
539
+
540
+ async def on_pre_server_init(self, server: FlockMCPServerBase):
541
+ """Initialize metrics collection for server."""
542
+ self._server_start_time = time.time()
543
+
544
+ if self.config.collect_memory:
545
+ self._server_start_memory = psutil.Process().memory_info().rss
546
+ self._record_metric(
547
+ "server_memory",
548
+ self._server_start_memory,
549
+ {"server": server.config.name, "phase": "pre_init"},
550
+ )
551
+
552
+ async def on_post_server_init(self, server: FlockMCPServerBase):
553
+ """Collect metrics after server starts."""
554
+ if self.config.collect_memory:
555
+ checkpoint_memory = psutil.Process().memory_info().rss
556
+ self._record_metric(
557
+ "server_memory",
558
+ checkpoint_memory,
559
+ {"server": server.config.name, "phase": "post_init"},
560
+ )
561
+
562
+ async def on_pre_server_terminate(self, server: FlockMCPServerBase):
563
+ """Collect metrics before server terminates."""
564
+ if self.config.collect_memory:
565
+ checkpoint_memory = psutil.Process().memory_info().rss
566
+ self._record_metric(
567
+ "server_memory",
568
+ checkpoint_memory,
569
+ {"server": server.config.name, "phase": "pre_terminate"},
570
+ )
571
+
572
+ async def on_post_server_terminate(self, server: FlockMCPServerBase):
573
+ """Collect metrics after server terminates.
574
+
575
+ Clean up and final metric recording.
576
+ """
577
+ if self.config.storage_type == "json":
578
+ # Save aggregated metrics
579
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
580
+ summary_file = os.path.join(
581
+ self.config.metrics_dir,
582
+ f"summary_{server.config.name}_{timestamp}.json",
583
+ )
584
+
585
+ # Calculate summary for all metrics
586
+ summary = {
587
+ "server": server.config.name,
588
+ "timestamp": timestamp,
589
+ "metrics": {},
590
+ }
591
+
592
+ # Get all unique metric names from files
593
+ all_metrics = self._load_metrics_from_files()
594
+
595
+ for metric_name in all_metrics:
596
+ stats = self.get_statistics(metric_name)
597
+ if stats: # Only include metrics that have data
598
+ summary["metrics"][metric_name] = stats
599
+ with open(summary_file, "w") as f:
600
+ json.dump(summary, f, indent=2)
601
+
602
+ async def on_pre_mcp_call(
603
+ self, server: FlockMCPServerBase, arguments: Any | None = None
604
+ ):
605
+ """Record pre-call metrics."""
606
+ if self.config.collect_cpu:
607
+ cpu_percent = psutil.Process().cpu_percent()
608
+ self._record_metric(
609
+ "cpu",
610
+ cpu_percent,
611
+ {"server": server.config.name, "phase": "pre_mcp_call"},
612
+ )
613
+ if self.config.collect_memory:
614
+ current_memory = psutil.Process().memory_info().rss
615
+ memory_diff = current_memory - self._server_start_memory
616
+ self._record_metric(
617
+ "memory",
618
+ memory_diff,
619
+ {"server": server.config.name, "phase": "pre_mcp_call"},
620
+ )
621
+
622
+ if isinstance(arguments, dict):
623
+ self._record_metric(
624
+ "arguments",
625
+ len(arguments),
626
+ {
627
+ "server": server.config.name,
628
+ "phase": "pre_mcp_call",
629
+ }.update(arguments),
630
+ )
631
+
632
+ async def on_post_mcp_call(
633
+ self, server: FlockMCPServerBase, result: Any | None = None
634
+ ):
635
+ """Record post-call metrics."""
636
+ if self.config.collect_timing and self._server_start_time:
637
+ latency = time.time() - self._server_start_time
638
+ self._record_metric(
639
+ "latency", latency, {"server": server.config.name}
640
+ )
641
+
642
+ # Check for alerts
643
+ if self._should_alert("latency", latency):
644
+ # In practice, you'd want to integrate with a proper alerting system
645
+ print(f"ALERT: High latency detected: {latency * 1000:.2f}ms")
646
+
647
+ if self.config.collect_cpu:
648
+ cpu_percent = psutil.Process().cpu_percent()
649
+ self._record_metric(
650
+ "cpu",
651
+ cpu_percent,
652
+ {"server": server.config.name, "phase": "post_mcp_call"},
653
+ )
654
+ if self.config.collect_memory:
655
+ current_memory = psutil.Process().memory_info().rss
656
+ memory_diff = current_memory - self._server_start_memory
657
+ self._record_metric(
658
+ "memory",
659
+ memory_diff,
660
+ {"server": server.config.name, "phase": "post_mcp_call"},
661
+ )
662
+
663
+ async def on_connect(
664
+ self, server: FlockMCPServerBase, additional_params: dict[str, Any]
665
+ ) -> dict[str, Any]:
666
+ """Collect metrics during connect."""
667
+ # We should track the refresh rate for clients
668
+ if "refresh_client" in additional_params and additional_params.get(
669
+ "refresh_client", False
670
+ ):
671
+ self._client_refreshs += 1
672
+ self._record_metric(
673
+ "client_refreshs",
674
+ self._client_refreshs,
675
+ {"server": server.config.name, "phase": "connect"},
676
+ )
677
+
678
+ return additional_params