flock-core 0.4.512__py3-none-any.whl → 0.4.514__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flock-core might be problematic. Click here for more details.
- flock/core/execution/opik_executor.py +103 -0
- flock/core/flock_agent.py +1 -1
- flock/core/flock_factory.py +85 -2
- flock/core/interpreter/python_interpreter.py +87 -81
- flock/core/logging/logging.py +8 -0
- flock/core/mcp/flock_mcp_server.py +30 -4
- flock/core/mcp/flock_mcp_tool_base.py +1 -1
- flock/core/mcp/mcp_client.py +57 -28
- flock/core/mcp/mcp_client_manager.py +1 -1
- flock/core/mcp/mcp_config.py +245 -9
- flock/core/mcp/types/callbacks.py +3 -5
- flock/core/mcp/types/factories.py +12 -14
- flock/core/mcp/types/handlers.py +9 -12
- flock/core/mcp/types/types.py +205 -2
- flock/core/mixin/dspy_integration.py +1 -1
- flock/core/util/input_resolver.py +1 -1
- flock/mcp/servers/sse/flock_sse_server.py +21 -14
- flock/mcp/servers/streamable_http/__init__.py +0 -0
- flock/mcp/servers/streamable_http/flock_streamable_http_server.py +169 -0
- flock/mcp/servers/websockets/flock_websocket_server.py +3 -3
- flock/tools/code_tools.py +111 -0
- flock/webapp/app/api/execution.py +1 -1
- flock/webapp/app/main.py +1 -1
- {flock_core-0.4.512.dist-info → flock_core-0.4.514.dist-info}/METADATA +4 -1
- {flock_core-0.4.512.dist-info → flock_core-0.4.514.dist-info}/RECORD +29 -26
- /flock/core/util/{spliter.py → splitter.py} +0 -0
- {flock_core-0.4.512.dist-info → flock_core-0.4.514.dist-info}/WHEEL +0 -0
- {flock_core-0.4.512.dist-info → flock_core-0.4.514.dist-info}/entry_points.txt +0 -0
- {flock_core-0.4.512.dist-info → flock_core-0.4.514.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# src/flock/core/execution/evaluation_processor.py
|
|
2
|
+
"""Contains the EvaluationProcessor class responsible for evaluating Flock agents
|
|
3
|
+
against datasets using various metrics.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import (
|
|
8
|
+
TYPE_CHECKING,
|
|
9
|
+
Any,
|
|
10
|
+
Union,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
from opik import Opik
|
|
14
|
+
from pandas import DataFrame
|
|
15
|
+
|
|
16
|
+
# Conditional pandas import
|
|
17
|
+
try:
|
|
18
|
+
import pandas as pd
|
|
19
|
+
|
|
20
|
+
PANDAS_AVAILABLE = True
|
|
21
|
+
except ImportError:
|
|
22
|
+
pd = None # type: ignore
|
|
23
|
+
PANDAS_AVAILABLE = False
|
|
24
|
+
|
|
25
|
+
# Box for results
|
|
26
|
+
from datasets import Dataset as HFDataset
|
|
27
|
+
|
|
28
|
+
from flock.core.evaluation.utils import (
|
|
29
|
+
normalize_dataset,
|
|
30
|
+
# Import metric calculation/aggregation helpers
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
# Flock core imports
|
|
34
|
+
from flock.core.logging.logging import get_logger
|
|
35
|
+
|
|
36
|
+
if TYPE_CHECKING:
|
|
37
|
+
from flock.core.flock import Flock
|
|
38
|
+
from flock.core.flock_agent import FlockAgent
|
|
39
|
+
# Conditional types
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
logger = get_logger("execution.opik")
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class OpikExecutor:
|
|
46
|
+
"""Handles the evaluation of Flock agents against datasets."""
|
|
47
|
+
|
|
48
|
+
def __init__(self, flock_instance: "Flock"):
|
|
49
|
+
"""Initializes the EvaluationProcessor.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
flock_instance: The Flock instance this processor will use.
|
|
53
|
+
"""
|
|
54
|
+
self.flock = flock_instance
|
|
55
|
+
|
|
56
|
+
async def evaluate_with_opik(
|
|
57
|
+
self,
|
|
58
|
+
dataset: str | Path | list[dict[str, Any]] | DataFrame | HFDataset,
|
|
59
|
+
start_agent: Union["FlockAgent", str],
|
|
60
|
+
input_mapping: dict[str, str],
|
|
61
|
+
answer_mapping: dict[str, str],) -> DataFrame | list[dict[str, Any]]:
|
|
62
|
+
"""Evaluates the Flock's performance against a dataset asynchronously."""
|
|
63
|
+
logger.info(f"Evaluating Flock's performance against dataset: {dataset}")
|
|
64
|
+
|
|
65
|
+
# Evaluation task
|
|
66
|
+
def evaluation_task(dataset_item):
|
|
67
|
+
flock_result = self.flock.run(start_agent=start_agent, input=dataset_item, box_result=False)
|
|
68
|
+
|
|
69
|
+
result = {
|
|
70
|
+
"input": dataset_item.get("test"),
|
|
71
|
+
"output": flock_result.get("answer"),
|
|
72
|
+
"context": ["placeholder string"]
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
return result
|
|
76
|
+
|
|
77
|
+
start_agent_name = (
|
|
78
|
+
start_agent.name if hasattr(start_agent, "name") else start_agent
|
|
79
|
+
)
|
|
80
|
+
dataset_name = str(dataset)
|
|
81
|
+
|
|
82
|
+
# --- 1. Normalize Dataset ---
|
|
83
|
+
try:
|
|
84
|
+
df = normalize_dataset(dataset) # Uses helper
|
|
85
|
+
if df is None or df.empty:
|
|
86
|
+
raise ValueError(
|
|
87
|
+
"Provided dataset is empty or could not be normalized."
|
|
88
|
+
)
|
|
89
|
+
logger.info(f"Normalized dataset with {len(df)} items.")
|
|
90
|
+
except Exception as e:
|
|
91
|
+
logger.error(
|
|
92
|
+
f"Failed to load or normalize dataset: {e}", exc_info=True
|
|
93
|
+
)
|
|
94
|
+
raise ValueError(f"Dataset processing failed: {e}") from e
|
|
95
|
+
|
|
96
|
+
logger.info(f"type(df): {type(df)}") # ➜ <class 'pandas.core.frame.DataFrame'>
|
|
97
|
+
logger.info(f"df.shape: {df.shape}") # e.g. (123456, N_COLUMNS+2)
|
|
98
|
+
logger.info(f"df['split'].value_counts(): {df['split'].value_counts()}")
|
|
99
|
+
logger.info(f"df['config'].unique(): {df['config'].unique()}")
|
|
100
|
+
client = Opik()
|
|
101
|
+
dataset = client.get_or_create_dataset(name=dataset_name)
|
|
102
|
+
dataset.insert_from_pandas(dataframe=df, ignore_keys=["source"])
|
|
103
|
+
logger.info(f"Imported dataset to Opik")
|
flock/core/flock_agent.py
CHANGED
|
@@ -384,7 +384,7 @@ class FlockAgent(BaseModel, Serializable, DSPyIntegrationMixin, ABC):
|
|
|
384
384
|
)
|
|
385
385
|
else:
|
|
386
386
|
logger.warning(
|
|
387
|
-
f"No Server with name '{server}' registered! Skipping."
|
|
387
|
+
f"No Server with name '{server.config.name}' registered! Skipping."
|
|
388
388
|
)
|
|
389
389
|
mcp_tools = mcp_tools + server_tools
|
|
390
390
|
|
flock/core/flock_factory.py
CHANGED
|
@@ -5,7 +5,8 @@ from collections.abc import Callable
|
|
|
5
5
|
from pathlib import Path
|
|
6
6
|
from typing import Any, Literal
|
|
7
7
|
|
|
8
|
-
|
|
8
|
+
import httpx
|
|
9
|
+
from pydantic import AnyUrl, BaseModel, ConfigDict, Field, FileUrl
|
|
9
10
|
|
|
10
11
|
from flock.core.config.scheduled_agent_config import ScheduledAgentConfig
|
|
11
12
|
from flock.core.flock_agent import FlockAgent, SignatureType
|
|
@@ -24,6 +25,7 @@ from flock.core.mcp.types.types import (
|
|
|
24
25
|
MCPRoot,
|
|
25
26
|
SseServerParameters,
|
|
26
27
|
StdioServerParameters,
|
|
28
|
+
StreamableHttpServerParameters,
|
|
27
29
|
WebsocketServerParameters,
|
|
28
30
|
)
|
|
29
31
|
from flock.evaluators.declarative.declarative_evaluator import (
|
|
@@ -40,6 +42,11 @@ from flock.mcp.servers.stdio.flock_stdio_server import (
|
|
|
40
42
|
FlockStdioConfig,
|
|
41
43
|
FlockStdioConnectionConfig,
|
|
42
44
|
)
|
|
45
|
+
from flock.mcp.servers.streamable_http.flock_streamable_http_server import (
|
|
46
|
+
FlockStreamableHttpConfig,
|
|
47
|
+
FlockStreamableHttpConnectionConfig,
|
|
48
|
+
FlockStreamableHttpServer,
|
|
49
|
+
)
|
|
43
50
|
from flock.mcp.servers.websockets.flock_websocket_server import (
|
|
44
51
|
FlockWSConfig,
|
|
45
52
|
FlockWSConnectionConfig,
|
|
@@ -101,6 +108,44 @@ class FlockFactory:
|
|
|
101
108
|
description="The text encoding error handler. See https://docs.python.org/3/library/codecs.html#codec-base-classes for explanations of possible values",
|
|
102
109
|
)
|
|
103
110
|
|
|
111
|
+
class StreamableHttpParams(BaseModel):
|
|
112
|
+
"""Factory-Params for Streamable Http Servers."""
|
|
113
|
+
|
|
114
|
+
url: str | AnyUrl = Field(
|
|
115
|
+
...,
|
|
116
|
+
description="Url the server listens at."
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
headers: dict[str, Any] | None = Field(
|
|
120
|
+
default=None,
|
|
121
|
+
description="Additional Headers to pass to the client."
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
auth: httpx.Auth | None = Field(
|
|
125
|
+
default=None,
|
|
126
|
+
description="Httpx Auth Schema."
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
timeout_seconds: float | int = Field(
|
|
130
|
+
default=5,
|
|
131
|
+
description="Http Timeout in Seconds"
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
sse_read_timeout_seconds: float | int = Field(
|
|
135
|
+
default=60*5,
|
|
136
|
+
description="How many seconds to wait for server-sent events until closing the connection."
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
terminate_on_close: bool = Field(
|
|
140
|
+
default=True,
|
|
141
|
+
description="Whether or not to terminate the underlying connection on close."
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
model_config = ConfigDict(
|
|
145
|
+
arbitrary_types_allowed=True,
|
|
146
|
+
extra="allow",
|
|
147
|
+
)
|
|
148
|
+
|
|
104
149
|
class SSEParams(BaseModel):
|
|
105
150
|
"""Factory-Params for SSE-Servers."""
|
|
106
151
|
|
|
@@ -123,6 +168,16 @@ class FlockFactory:
|
|
|
123
168
|
description="How many seconds to wait for server-sent events until closing the connection. (connections will be automatically re-established.)",
|
|
124
169
|
)
|
|
125
170
|
|
|
171
|
+
auth: httpx.Auth | None = Field(
|
|
172
|
+
default=None,
|
|
173
|
+
description="Httpx Auth Scheme."
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
model_config = ConfigDict(
|
|
177
|
+
arbitrary_types_allowed=True,
|
|
178
|
+
extra="allow",
|
|
179
|
+
)
|
|
180
|
+
|
|
126
181
|
class WebsocketParams(BaseModel):
|
|
127
182
|
"""Factory-Params for Websocket Servers."""
|
|
128
183
|
|
|
@@ -134,7 +189,7 @@ class FlockFactory:
|
|
|
134
189
|
@staticmethod
|
|
135
190
|
def create_mcp_server(
|
|
136
191
|
name: str,
|
|
137
|
-
connection_params: SSEParams | StdioParams | WebsocketParams,
|
|
192
|
+
connection_params: StreamableHttpParams | SSEParams | StdioParams | WebsocketParams,
|
|
138
193
|
max_retries: int = 3,
|
|
139
194
|
mount_points: list[str | MCPRoot] | None = None,
|
|
140
195
|
timeout_seconds: int | float = 10,
|
|
@@ -176,6 +231,9 @@ class FlockFactory:
|
|
|
176
231
|
if isinstance(connection_params, FlockFactory.WebsocketParams):
|
|
177
232
|
server_kind = "websockets"
|
|
178
233
|
concrete_server_cls = FlockWSServer
|
|
234
|
+
if isinstance(connection_params, FlockFactory.StreamableHttpParams):
|
|
235
|
+
server_kind = "streamable_http"
|
|
236
|
+
concrete_server_cls = FlockStreamableHttpServer
|
|
179
237
|
|
|
180
238
|
# convert mount points.
|
|
181
239
|
mounts: list[MCPRoot] = []
|
|
@@ -244,12 +302,37 @@ class FlockFactory:
|
|
|
244
302
|
caching_config=caching_config,
|
|
245
303
|
callback_config=callback_config,
|
|
246
304
|
)
|
|
305
|
+
elif server_kind == "streamable_http":
|
|
306
|
+
# build streamable http config
|
|
307
|
+
connection_config = FlockStreamableHttpConnectionConfig(
|
|
308
|
+
max_retries=max_retries,
|
|
309
|
+
connection_parameters=StreamableHttpServerParameters(
|
|
310
|
+
url=connection_params.url,
|
|
311
|
+
headers=connection_params.headers,
|
|
312
|
+
auth=connection_params.auth,
|
|
313
|
+
timeout=connection_params.timeout_seconds,
|
|
314
|
+
sse_read_timeout=connection_params.sse_read_timeout_seconds,
|
|
315
|
+
terminate_on_close=connection_params.terminate_on_close,
|
|
316
|
+
),
|
|
317
|
+
mount_points=mounts,
|
|
318
|
+
server_logging_level=server_logging_level,
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
server_config = FlockStreamableHttpConfig(
|
|
322
|
+
name=name,
|
|
323
|
+
connection_config=connection_config,
|
|
324
|
+
feature_config=feature_config,
|
|
325
|
+
caching_config=caching_config,
|
|
326
|
+
callback_config=callback_config,
|
|
327
|
+
)
|
|
328
|
+
|
|
247
329
|
elif server_kind == "sse":
|
|
248
330
|
# build sse config
|
|
249
331
|
connection_config = FlockSSEConnectionConfig(
|
|
250
332
|
max_retries=max_retries,
|
|
251
333
|
connection_parameters=SseServerParameters(
|
|
252
334
|
url=connection_params.url,
|
|
335
|
+
auth=connection_params.auth,
|
|
253
336
|
headers=connection_params.headers,
|
|
254
337
|
timeout=connection_params.timeout_seconds,
|
|
255
338
|
sse_read_timeout=connection_params.sse_read_timeout_seconds,
|
|
@@ -37,6 +37,14 @@ class InterpreterError(ValueError):
|
|
|
37
37
|
|
|
38
38
|
pass
|
|
39
39
|
|
|
40
|
+
class BreakException(Exception):
|
|
41
|
+
"""Signal a 'break' from the simulated loop."""
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
class ContinueException(Exception):
|
|
45
|
+
"""Signal a 'continue' in the simulated loop."""
|
|
46
|
+
pass
|
|
47
|
+
|
|
40
48
|
|
|
41
49
|
class PythonInterpreter:
|
|
42
50
|
r"""A customized python interpreter to control the execution of
|
|
@@ -44,8 +52,6 @@ class PythonInterpreter:
|
|
|
44
52
|
functions given in action space and import white list. It also supports
|
|
45
53
|
fuzzy variable matching to receive uncertain input variable name.
|
|
46
54
|
|
|
47
|
-
[Documentation omitted for brevity]
|
|
48
|
-
|
|
49
55
|
Args:
|
|
50
56
|
action_space (Dict[str, Any]): A dictionary mapping action names to
|
|
51
57
|
their corresponding functions or objects.
|
|
@@ -76,6 +82,9 @@ class PythonInterpreter:
|
|
|
76
82
|
"enum",
|
|
77
83
|
"json",
|
|
78
84
|
"ast",
|
|
85
|
+
"numpy",
|
|
86
|
+
"sympy",
|
|
87
|
+
"pandas",
|
|
79
88
|
] # default imports
|
|
80
89
|
self.verbose = verbose
|
|
81
90
|
|
|
@@ -95,50 +104,57 @@ class PythonInterpreter:
|
|
|
95
104
|
|
|
96
105
|
[Documentation omitted for brevity]
|
|
97
106
|
"""
|
|
98
|
-
if state is not None:
|
|
99
|
-
self.state.update(state)
|
|
100
|
-
if fuzz_state is not None:
|
|
101
|
-
self.fuzz_state.update(fuzz_state)
|
|
102
|
-
|
|
103
107
|
try:
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
f"Syntax error in code at line {e.lineno}: {error_line}\nError: {e}"
|
|
109
|
-
)
|
|
108
|
+
if state is not None:
|
|
109
|
+
self.state.update(state)
|
|
110
|
+
if fuzz_state is not None:
|
|
111
|
+
self.fuzz_state.update(fuzz_state)
|
|
110
112
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
113
|
+
try:
|
|
114
|
+
expression = ast.parse(code)
|
|
115
|
+
except SyntaxError as e:
|
|
116
|
+
error_line = code.splitlines()[e.lineno - 1]
|
|
117
|
+
self.log(
|
|
118
|
+
f"[Interpreter] Syntax error in code at line {e.lineno}: {error_line}\nError: {e}")
|
|
119
|
+
return(
|
|
120
|
+
f"Syntax error in code at line {e.lineno}: {error_line}\nError: {e}"
|
|
121
|
+
)
|
|
114
122
|
|
|
115
|
-
|
|
116
|
-
# Log the AST node being executed (using unparse if available)
|
|
123
|
+
result = None
|
|
117
124
|
if self.verbose:
|
|
118
|
-
|
|
119
|
-
node_repr = ast.unparse(node)
|
|
120
|
-
except Exception:
|
|
121
|
-
node_repr = ast.dump(node)
|
|
122
|
-
self.log(f"[Interpreter] Executing node {idx}: {node_repr}")
|
|
125
|
+
self.log("[Interpreter] Starting code execution...")
|
|
123
126
|
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
except InterpreterError as e:
|
|
127
|
-
if not keep_state:
|
|
128
|
-
self.clear_state()
|
|
129
|
-
msg = f"Evaluation of the code stopped at node {idx}. See:\n{e}"
|
|
130
|
-
raise InterpreterError(msg)
|
|
131
|
-
if line_result is not None:
|
|
132
|
-
result = line_result
|
|
127
|
+
for idx, node in enumerate(expression.body):
|
|
128
|
+
# Log the AST node being executed (using unparse if available)
|
|
133
129
|
if self.verbose:
|
|
134
|
-
|
|
130
|
+
try:
|
|
131
|
+
node_repr = ast.unparse(node)
|
|
132
|
+
except Exception:
|
|
133
|
+
node_repr = ast.dump(node)
|
|
134
|
+
self.log(f"[Interpreter] Executing node {idx}: {node_repr}")
|
|
135
135
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
136
|
+
try:
|
|
137
|
+
line_result = self._execute_ast(node)
|
|
138
|
+
except InterpreterError as e:
|
|
139
|
+
if not keep_state:
|
|
140
|
+
self.clear_state()
|
|
141
|
+
msg = f"Evaluation of the code stopped at node {idx}. See:\n{e}"
|
|
142
|
+
return msg
|
|
143
|
+
if line_result is not None:
|
|
144
|
+
result = line_result
|
|
145
|
+
if self.verbose:
|
|
146
|
+
self.log(f"[Interpreter] Node {idx} result: {result}")
|
|
140
147
|
|
|
141
|
-
|
|
148
|
+
if self.verbose:
|
|
149
|
+
self.log("[Interpreter] Finished code execution.")
|
|
150
|
+
if not keep_state:
|
|
151
|
+
self.clear_state()
|
|
152
|
+
|
|
153
|
+
return result
|
|
154
|
+
except Exception as e:
|
|
155
|
+
self.log(
|
|
156
|
+
f"[Interpreter] Error during code execution: {e}")
|
|
157
|
+
return f"[Interpreter] Error during code execution: {e}"
|
|
142
158
|
|
|
143
159
|
def clear_state(self) -> None:
|
|
144
160
|
r"""Initialize :obj:`state` and :obj:`fuzz_state`"""
|
|
@@ -236,7 +252,7 @@ class PythonInterpreter:
|
|
|
236
252
|
elif isinstance(expression, ast.Assert):
|
|
237
253
|
return self._execute_assert(expression)
|
|
238
254
|
else:
|
|
239
|
-
|
|
255
|
+
return(
|
|
240
256
|
f"{expression.__class__.__name__} is not supported."
|
|
241
257
|
)
|
|
242
258
|
|
|
@@ -253,17 +269,17 @@ class PythonInterpreter:
|
|
|
253
269
|
self.state[target.id] = value
|
|
254
270
|
elif isinstance(target, ast.Tuple):
|
|
255
271
|
if not isinstance(value, tuple):
|
|
256
|
-
|
|
272
|
+
return(
|
|
257
273
|
f"Expected type tuple, but got {value.__class__.__name__} instead."
|
|
258
274
|
)
|
|
259
275
|
if len(target.elts) != len(value):
|
|
260
|
-
|
|
276
|
+
return(
|
|
261
277
|
f"Expected {len(target.elts)} values but got {len(value)}."
|
|
262
278
|
)
|
|
263
279
|
for t, v in zip(target.elts, value):
|
|
264
280
|
self.state[self._execute_ast(t)] = v
|
|
265
281
|
else:
|
|
266
|
-
|
|
282
|
+
return(
|
|
267
283
|
f"Unsupported variable type. Expected ast.Name or ast.Tuple, got {target.__class__.__name__} instead."
|
|
268
284
|
)
|
|
269
285
|
|
|
@@ -297,7 +313,7 @@ class PythonInterpreter:
|
|
|
297
313
|
isinstance(current_value, (int, float))
|
|
298
314
|
and isinstance(increment_value, (int, float))
|
|
299
315
|
):
|
|
300
|
-
|
|
316
|
+
return(
|
|
301
317
|
f"Invalid types for augmented assignment: {type(current_value)}, {type(increment_value)}"
|
|
302
318
|
)
|
|
303
319
|
if isinstance(augassign.op, ast.Add):
|
|
@@ -309,7 +325,7 @@ class PythonInterpreter:
|
|
|
309
325
|
elif isinstance(augassign.op, ast.Div):
|
|
310
326
|
new_value = current_value / increment_value
|
|
311
327
|
else:
|
|
312
|
-
|
|
328
|
+
return(
|
|
313
329
|
f"Augmented assignment operator {augassign.op} is not supported"
|
|
314
330
|
)
|
|
315
331
|
self._assign(augassign.target, new_value)
|
|
@@ -319,7 +335,7 @@ class PythonInterpreter:
|
|
|
319
335
|
index = self._execute_ast(subscript.slice)
|
|
320
336
|
value = self._execute_ast(subscript.value)
|
|
321
337
|
if not isinstance(subscript.ctx, ast.Load):
|
|
322
|
-
|
|
338
|
+
return(
|
|
323
339
|
f"{subscript.ctx.__class__.__name__} is not supported for subscript."
|
|
324
340
|
)
|
|
325
341
|
if isinstance(value, (list, tuple)):
|
|
@@ -330,7 +346,7 @@ class PythonInterpreter:
|
|
|
330
346
|
close_matches = difflib.get_close_matches(index, list(value.keys()))
|
|
331
347
|
if len(close_matches) > 0:
|
|
332
348
|
return value[close_matches[0]]
|
|
333
|
-
|
|
349
|
+
return(f"Could not index {value} with '{index}'.")
|
|
334
350
|
|
|
335
351
|
def _execute_name(self, name: ast.Name):
|
|
336
352
|
if name.id in dir(builtins):
|
|
@@ -340,7 +356,7 @@ class PythonInterpreter:
|
|
|
340
356
|
elif isinstance(name.ctx, ast.Load):
|
|
341
357
|
return self._get_value_from_state(name.id)
|
|
342
358
|
else:
|
|
343
|
-
|
|
359
|
+
return(f"{name.ctx} is not supported.")
|
|
344
360
|
|
|
345
361
|
def _execute_condition(self, condition):
|
|
346
362
|
if isinstance(condition, ast.BoolOp):
|
|
@@ -355,12 +371,12 @@ class PythonInterpreter:
|
|
|
355
371
|
]
|
|
356
372
|
return any(results)
|
|
357
373
|
else:
|
|
358
|
-
|
|
374
|
+
return(
|
|
359
375
|
f"Boolean operator {condition.op} is not supported"
|
|
360
376
|
)
|
|
361
377
|
elif isinstance(condition, ast.Compare):
|
|
362
378
|
if len(condition.ops) > 1:
|
|
363
|
-
|
|
379
|
+
return(
|
|
364
380
|
"Cannot evaluate conditions with multiple operators"
|
|
365
381
|
)
|
|
366
382
|
left = self._execute_ast(condition.left)
|
|
@@ -387,7 +403,7 @@ class PythonInterpreter:
|
|
|
387
403
|
elif isinstance(comparator, ast.NotIn):
|
|
388
404
|
return left not in right
|
|
389
405
|
else:
|
|
390
|
-
|
|
406
|
+
return("Unsupported comparison operator")
|
|
391
407
|
elif isinstance(condition, ast.UnaryOp):
|
|
392
408
|
return self._execute_unaryop(condition)
|
|
393
409
|
elif isinstance(condition, ast.Name) or isinstance(condition, ast.Call):
|
|
@@ -395,7 +411,7 @@ class PythonInterpreter:
|
|
|
395
411
|
elif isinstance(condition, ast.Constant):
|
|
396
412
|
return bool(condition.value)
|
|
397
413
|
else:
|
|
398
|
-
|
|
414
|
+
return(
|
|
399
415
|
f"Unsupported condition type: {type(condition).__name__}"
|
|
400
416
|
)
|
|
401
417
|
|
|
@@ -428,7 +444,7 @@ class PythonInterpreter:
|
|
|
428
444
|
|
|
429
445
|
def _execute_import_from(self, import_from: ast.ImportFrom):
|
|
430
446
|
if import_from.module is None:
|
|
431
|
-
|
|
447
|
+
return('"from . import" is not supported.')
|
|
432
448
|
for import_name in import_from.names:
|
|
433
449
|
full_name = import_from.module + f".{import_name.name}"
|
|
434
450
|
self._validate_import(full_name)
|
|
@@ -440,12 +456,6 @@ class PythonInterpreter:
|
|
|
440
456
|
# We keep both as provided, but you may wish to consolidate these in your code.
|
|
441
457
|
|
|
442
458
|
def _execute_for(self, for_statement: ast.For):
|
|
443
|
-
class BreakException(Exception):
|
|
444
|
-
pass
|
|
445
|
-
|
|
446
|
-
class ContinueException(Exception):
|
|
447
|
-
pass
|
|
448
|
-
|
|
449
459
|
result = None
|
|
450
460
|
try:
|
|
451
461
|
for value in self._execute_ast(for_statement.iter):
|
|
@@ -462,12 +472,6 @@ class PythonInterpreter:
|
|
|
462
472
|
return result
|
|
463
473
|
|
|
464
474
|
def _execute_while(self, while_statement: ast.While):
|
|
465
|
-
class BreakException(Exception):
|
|
466
|
-
pass
|
|
467
|
-
|
|
468
|
-
class ContinueException(Exception):
|
|
469
|
-
pass
|
|
470
|
-
|
|
471
475
|
result = None
|
|
472
476
|
try:
|
|
473
477
|
while self._execute_condition(while_statement.test):
|
|
@@ -540,7 +544,7 @@ class PythonInterpreter:
|
|
|
540
544
|
found_name = True
|
|
541
545
|
return
|
|
542
546
|
if not found_name:
|
|
543
|
-
|
|
547
|
+
return(
|
|
544
548
|
f"It is not permitted to import modules "
|
|
545
549
|
f"than module white list (try to import {full_name})."
|
|
546
550
|
)
|
|
@@ -577,7 +581,7 @@ class PythonInterpreter:
|
|
|
577
581
|
elif isinstance(operator, ast.MatMult):
|
|
578
582
|
return left @ right
|
|
579
583
|
else:
|
|
580
|
-
|
|
584
|
+
return(f"Operator not supported: {operator}")
|
|
581
585
|
|
|
582
586
|
def _execute_unaryop(self, unaryop: ast.UnaryOp):
|
|
583
587
|
operand = self._execute_ast(unaryop.operand)
|
|
@@ -592,31 +596,33 @@ class PythonInterpreter:
|
|
|
592
596
|
elif isinstance(operator, ast.Invert):
|
|
593
597
|
return ~operand
|
|
594
598
|
else:
|
|
595
|
-
|
|
599
|
+
return(f"Operator not supported: {operator}")
|
|
596
600
|
|
|
597
601
|
def _execute_listcomp(self, comp: ast.ListComp):
|
|
598
|
-
return
|
|
599
|
-
|
|
600
|
-
def _execute_dictcomp(self, comp: ast.DictComp):
|
|
601
|
-
return {
|
|
602
|
-
self._execute_comp(comp.key, comp.generators): self._execute_comp(
|
|
603
|
-
comp.value, comp.generators
|
|
604
|
-
)
|
|
605
|
-
}
|
|
602
|
+
return self._execute_comp(comp.elt, comp.generators)
|
|
606
603
|
|
|
607
604
|
def _execute_setcomp(self, comp: ast.SetComp):
|
|
608
|
-
return
|
|
605
|
+
return set(self._execute_comp(comp.elt, comp.generators))
|
|
606
|
+
|
|
607
|
+
def _execute_dictcomp(self, comp: ast.DictComp):
|
|
608
|
+
keys = self._execute_comp(comp.key, comp.generators)
|
|
609
|
+
values = self._execute_comp(comp.value, comp.generators)
|
|
610
|
+
return dict(zip(keys, values))
|
|
609
611
|
|
|
610
612
|
def _execute_comp(self, elt, generators):
|
|
613
|
+
# Base-case: wrap the single element in a list so that
|
|
614
|
+
# callers can safely .extend() it.
|
|
611
615
|
if not generators:
|
|
612
|
-
return self._execute_ast(elt)
|
|
616
|
+
return [self._execute_ast(elt)]
|
|
617
|
+
|
|
613
618
|
gen = generators[0]
|
|
614
|
-
|
|
619
|
+
acc: list[Any] = []
|
|
615
620
|
for value in self._execute_ast(gen.iter):
|
|
616
621
|
self._assign(gen.target, value)
|
|
617
622
|
if all(self._execute_condition(if_cond) for if_cond in gen.ifs):
|
|
618
|
-
|
|
619
|
-
return
|
|
623
|
+
acc.extend(self._execute_comp(elt, generators[1:]))
|
|
624
|
+
return acc
|
|
625
|
+
|
|
620
626
|
|
|
621
627
|
def _execute_generatorexp(self, genexp: ast.GeneratorExp):
|
|
622
628
|
def generator():
|
|
@@ -631,7 +637,7 @@ class PythonInterpreter:
|
|
|
631
637
|
elif key in self.fuzz_state:
|
|
632
638
|
return self.fuzz_state[key]
|
|
633
639
|
else:
|
|
634
|
-
|
|
640
|
+
return(f"The variable `{key}` is not defined.")
|
|
635
641
|
|
|
636
642
|
|
|
637
643
|
class TextPrompt(str):
|
flock/core/logging/logging.py
CHANGED
|
@@ -68,6 +68,14 @@ COLOR_MAP = {
|
|
|
68
68
|
"workflow": "cyan", # Color only
|
|
69
69
|
"activities": "cyan",
|
|
70
70
|
"context": "green",
|
|
71
|
+
"mcp.server": "blue",
|
|
72
|
+
"mcp.tool": "cyan",
|
|
73
|
+
"mcp.client_manager": "light-blue",
|
|
74
|
+
"mcp.client": "light-cyan",
|
|
75
|
+
"mcp.callback.logging": "white",
|
|
76
|
+
"mcp.callback.sampling": "pink",
|
|
77
|
+
"mcp.callback.root": "light-yellow",
|
|
78
|
+
"mcp.callback.message": "light-blue",
|
|
71
79
|
# Components & Mechanisms
|
|
72
80
|
"registry": "yellow", # Color only
|
|
73
81
|
"serialization": "yellow",
|