flock-core 0.5.0b53__py3-none-any.whl → 0.5.0b55__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

@@ -2,8 +2,10 @@
2
2
 
3
3
  import functools
4
4
  import inspect
5
+ import json
5
6
 
6
7
  from opentelemetry import trace
8
+ from opentelemetry.trace import Status, StatusCode
7
9
 
8
10
  from flock.logging.logging import get_logger
9
11
 
@@ -12,44 +14,291 @@ logger = get_logger("tools")
12
14
  tracer = trace.get_tracer(__name__)
13
15
 
14
16
 
17
+ # Global trace filter configuration
18
+ class TraceFilterConfig:
19
+ """Configuration for filtering which operations get traced."""
20
+
21
+ def __init__(self):
22
+ self.services: set[str] | None = None # Whitelist: only trace these services
23
+ self.ignore_operations: set[str] = set() # Blacklist: never trace these operations
24
+
25
+ def should_trace(self, service: str, operation: str) -> bool:
26
+ """Check if an operation should be traced based on filters.
27
+
28
+ Args:
29
+ service: Service name (e.g., "Flock", "Agent")
30
+ operation: Full operation name (e.g., "Flock.publish")
31
+
32
+ Returns:
33
+ True if should trace, False otherwise
34
+ """
35
+ # Check blacklist first (highest priority)
36
+ if operation in self.ignore_operations:
37
+ return False
38
+
39
+ # Check whitelist if configured
40
+ if self.services is not None:
41
+ service_lower = service.lower()
42
+ if service_lower not in self.services:
43
+ return False
44
+
45
+ return True
46
+
47
+
48
+ # Global instance
49
+ _trace_filter_config = TraceFilterConfig()
50
+
51
+
52
+ def _serialize_value(value, max_depth=10, current_depth=0):
53
+ """Serialize a value to JSON-compatible format for span attributes.
54
+
55
+ Args:
56
+ value: The value to serialize
57
+ max_depth: Maximum recursion depth to prevent infinite loops
58
+ current_depth: Current recursion depth
59
+
60
+ Returns:
61
+ A JSON-serializable representation of the value
62
+ """
63
+ if current_depth >= max_depth:
64
+ return f"<max_depth_reached: {type(value).__name__}>"
65
+
66
+ try:
67
+ # Handle None
68
+ if value is None:
69
+ return None
70
+
71
+ # Handle primitives - these are already JSON-serializable
72
+ if isinstance(value, (str, int, float, bool)):
73
+ return value
74
+
75
+ # Handle lists/tuples
76
+ if isinstance(value, (list, tuple)):
77
+ return [_serialize_value(item, max_depth, current_depth + 1) for item in value]
78
+
79
+ # Handle dicts
80
+ if isinstance(value, dict):
81
+ return {
82
+ str(k): _serialize_value(v, max_depth, current_depth + 1) for k, v in value.items()
83
+ }
84
+
85
+ # Handle sets
86
+ if isinstance(value, set):
87
+ return [_serialize_value(item, max_depth, current_depth + 1) for item in value]
88
+
89
+ # For custom objects with __dict__, serialize their attributes
90
+ if hasattr(value, "__dict__"):
91
+ class_name = value.__class__.__name__
92
+ try:
93
+ obj_dict = {}
94
+ for k, v in value.__dict__.items():
95
+ # Skip private attributes and methods
96
+ if k.startswith("_"):
97
+ continue
98
+ # Skip methods and callables
99
+ if callable(v):
100
+ continue
101
+ try:
102
+ obj_dict[k] = _serialize_value(v, max_depth, current_depth + 1)
103
+ except Exception:
104
+ obj_dict[k] = f"<error serializing {k}>"
105
+
106
+ return {
107
+ "__class__": class_name,
108
+ "__module__": value.__class__.__module__,
109
+ **obj_dict,
110
+ }
111
+ except Exception as e:
112
+ return {"__class__": class_name, "__error__": str(e)}
113
+
114
+ # For objects with a useful string representation
115
+ result = str(value)
116
+ # If the string is too long (> 5000 chars), truncate it
117
+ if len(result) > 5000:
118
+ return result[:5000] + "... (string truncated at 5000 chars)"
119
+ return result
120
+
121
+ except Exception as e:
122
+ # If all else fails, return type information with error
123
+ return {"__type__": type(value).__name__, "__error__": str(e)}
124
+
125
+
126
+ def _extract_span_attributes(func, args, kwargs):
127
+ """Extract useful attributes from function arguments for OTEL spans.
128
+
129
+ Returns a dict of attributes and a display name for the span.
130
+ """
131
+ attributes = {}
132
+ span_name = func.__name__
133
+
134
+ # Try to get class name if this is a method
135
+ if args and hasattr(args[0], "__class__"):
136
+ obj = args[0]
137
+ class_name = obj.__class__.__name__
138
+ span_name = f"{class_name}.{func.__name__}"
139
+ attributes["class"] = class_name
140
+
141
+ # Extract agent-specific attributes
142
+ if hasattr(obj, "name"):
143
+ attributes["agent.name"] = str(obj.name)
144
+ if hasattr(obj, "description"):
145
+ attributes["agent.description"] = str(obj.description)[:200] # Truncate
146
+
147
+ # Extract context attributes (correlation_id, task_id)
148
+ for arg_name, arg_value in kwargs.items():
149
+ if arg_name == "ctx" and hasattr(arg_value, "correlation_id"):
150
+ if arg_value.correlation_id:
151
+ attributes["correlation_id"] = str(arg_value.correlation_id)
152
+ if hasattr(arg_value, "task_id"):
153
+ attributes["task_id"] = str(arg_value.task_id)
154
+
155
+ # Check positional args for Context
156
+ for arg in args[1:]: # Skip self
157
+ if hasattr(arg, "correlation_id"):
158
+ if arg.correlation_id:
159
+ attributes["correlation_id"] = str(arg.correlation_id)
160
+ if hasattr(arg, "task_id"):
161
+ attributes["task_id"] = str(arg.task_id)
162
+ break
163
+
164
+ # Add function metadata
165
+ attributes["function"] = func.__name__
166
+ attributes["module"] = func.__module__
167
+
168
+ # Capture input arguments (skip 'self' for methods)
169
+ try:
170
+ sig = inspect.signature(func)
171
+ bound_args = sig.bind(*args, **kwargs)
172
+ bound_args.apply_defaults()
173
+
174
+ # Serialize arguments
175
+ for param_name, param_value in bound_args.arguments.items():
176
+ # Skip 'self' and 'cls'
177
+ if param_name in ("self", "cls"):
178
+ continue
179
+ # Serialize the argument value to JSON-compatible format
180
+ serialized = _serialize_value(param_value)
181
+ # Convert to JSON string for OTEL attribute storage
182
+ try:
183
+ attributes[f"input.{param_name}"] = json.dumps(serialized, default=str)
184
+ except Exception:
185
+ # If JSON serialization fails, use string representation
186
+ attributes[f"input.{param_name}"] = str(serialized)
187
+ except Exception as e:
188
+ # If we can't capture inputs, just note that
189
+ attributes["input.error"] = str(e)
190
+
191
+ return attributes, span_name
192
+
193
+
15
194
  def traced_and_logged(func):
16
195
  """A decorator that wraps a function in an OpenTelemetry span.
17
196
 
18
- and logs its inputs,
19
- outputs, and exceptions. Supports both synchronous and asynchronous functions.
197
+ Creates proper parent-child span relationships and extracts relevant
198
+ attributes for observability in Grafana/Jaeger.
199
+
200
+ Automatically extracts:
201
+ - Agent name and description
202
+ - Correlation ID and task ID from Context
203
+ - Class and method names
204
+ - Exception information
205
+
206
+ Supports both synchronous and asynchronous functions.
20
207
  """
21
208
  if inspect.iscoroutinefunction(func):
22
209
 
23
210
  @functools.wraps(func)
24
211
  async def async_wrapper(*args, **kwargs):
25
- with tracer.start_as_current_span(func.__name__) as span:
26
- span.set_attribute("args", str(args))
27
- span.set_attribute("kwargs", str(kwargs))
212
+ attributes, span_name = _extract_span_attributes(func, args, kwargs)
213
+
214
+ # Check if we should trace this operation
215
+ service_name = span_name.split(".")[0] if "." in span_name else span_name
216
+ if not _trace_filter_config.should_trace(service_name, span_name):
217
+ # Skip tracing, just call the function
218
+ return await func(*args, **kwargs)
219
+
220
+ with tracer.start_as_current_span(span_name) as span:
221
+ # Set all extracted attributes
222
+ for key, value in attributes.items():
223
+ span.set_attribute(key, value)
224
+
28
225
  try:
29
226
  result = await func(*args, **kwargs)
30
- span.set_attribute("result", str(result))
31
- logger.debug(f"{func.__name__} executed successfully", result=result)
227
+
228
+ # Capture output value as JSON
229
+ try:
230
+ serialized_result = _serialize_value(result)
231
+ span.set_attribute(
232
+ "output.value", json.dumps(serialized_result, default=str)
233
+ )
234
+ except Exception as e:
235
+ span.set_attribute("output.value", str(result))
236
+ span.set_attribute("output.serialization_error", str(e))
237
+
238
+ # Set result type and metadata
239
+ if result is not None:
240
+ span.set_attribute("output.type", type(result).__name__)
241
+ if hasattr(result, "__len__"):
242
+ try:
243
+ span.set_attribute("output.length", len(result))
244
+ except TypeError:
245
+ pass
246
+
247
+ span.set_status(Status(StatusCode.OK))
248
+ logger.debug(f"{span_name} executed successfully")
32
249
  return result
250
+
33
251
  except Exception as e:
34
- logger.exception(f"Error in {func.__name__}", error=str(e))
252
+ span.set_status(Status(StatusCode.ERROR, str(e)))
35
253
  span.record_exception(e)
254
+ logger.exception(f"Error in {span_name}", error=str(e))
36
255
  raise
37
256
 
38
257
  return async_wrapper
39
258
 
40
259
  @functools.wraps(func)
41
260
  def wrapper(*args, **kwargs):
42
- with tracer.start_as_current_span(func.__name__) as span:
43
- span.set_attribute("args", str(args))
44
- span.set_attribute("kwargs", str(kwargs))
261
+ attributes, span_name = _extract_span_attributes(func, args, kwargs)
262
+
263
+ # Check if we should trace this operation
264
+ service_name = span_name.split(".")[0] if "." in span_name else span_name
265
+ if not _trace_filter_config.should_trace(service_name, span_name):
266
+ # Skip tracing, just call the function
267
+ return func(*args, **kwargs)
268
+
269
+ with tracer.start_as_current_span(span_name) as span:
270
+ # Set all extracted attributes
271
+ for key, value in attributes.items():
272
+ span.set_attribute(key, value)
273
+
45
274
  try:
46
275
  result = func(*args, **kwargs)
47
- span.set_attribute("result", str(result))
48
- logger.debug(f"{func.__name__} executed successfully", result=result)
276
+
277
+ # Capture output value as JSON
278
+ try:
279
+ serialized_result = _serialize_value(result)
280
+ span.set_attribute("output.value", json.dumps(serialized_result, default=str))
281
+ except Exception as e:
282
+ span.set_attribute("output.value", str(result))
283
+ span.set_attribute("output.serialization_error", str(e))
284
+
285
+ # Set result type and metadata
286
+ if result is not None:
287
+ span.set_attribute("output.type", type(result).__name__)
288
+ if hasattr(result, "__len__"):
289
+ try:
290
+ span.set_attribute("output.length", len(result))
291
+ except TypeError:
292
+ pass
293
+
294
+ span.set_status(Status(StatusCode.OK))
295
+ logger.debug(f"{span_name} executed successfully")
49
296
  return result
297
+
50
298
  except Exception as e:
51
- logger.exception(f"Error in {func.__name__}", error=str(e))
299
+ span.set_status(Status(StatusCode.ERROR, str(e)))
52
300
  span.record_exception(e)
301
+ logger.exception(f"Error in {span_name}", error=str(e))
53
302
  raise
54
303
 
55
304
  return wrapper
flock/orchestrator.py CHANGED
@@ -10,11 +10,14 @@ from contextlib import asynccontextmanager
10
10
  from typing import TYPE_CHECKING, Any
11
11
  from uuid import uuid4
12
12
 
13
+ from opentelemetry import trace
14
+ from opentelemetry.trace import Status, StatusCode
13
15
  from pydantic import BaseModel
14
16
 
15
17
  from flock.agent import Agent, AgentBuilder
16
18
  from flock.artifacts import Artifact
17
19
  from flock.helper.cli_helper import init_console
20
+ from flock.logging.auto_trace import AutoTracedMeta
18
21
  from flock.mcp import (
19
22
  FlockMCPClientManager,
20
23
  FlockMCPConfiguration,
@@ -48,7 +51,12 @@ class BoardHandle:
48
51
  return await self._orchestrator.store.list()
49
52
 
50
53
 
51
- class Flock:
54
+ class Flock(metaclass=AutoTracedMeta):
55
+ """Main orchestrator for blackboard-based agent coordination.
56
+
57
+ All public methods are automatically traced via OpenTelemetry.
58
+ """
59
+
52
60
  def _patch_litellm_proxy_imports(self) -> None:
53
61
  """Stub litellm proxy_server to avoid optional proxy deps when not used.
54
62
 
@@ -91,6 +99,14 @@ class Flock:
91
99
  self.max_agent_iterations: int = max_agent_iterations
92
100
  self._agent_iteration_count: dict[str, int] = {}
93
101
  self.is_dashboard: bool = False
102
+ # Unified tracing support
103
+ self._workflow_span = None
104
+ self._auto_workflow_enabled = os.getenv("FLOCK_AUTO_WORKFLOW_TRACE", "false").lower() in {
105
+ "true",
106
+ "1",
107
+ "yes",
108
+ "on",
109
+ }
94
110
  if not model:
95
111
  self.model = os.getenv("DEFAULT_MODEL")
96
112
 
@@ -214,6 +230,119 @@ class Flock:
214
230
 
215
231
  return self._mcp_manager
216
232
 
233
+ # Unified Tracing ------------------------------------------------------
234
+
235
+ @asynccontextmanager
236
+ async def traced_run(self, name: str = "workflow"):
237
+ """Context manager for wrapping an entire execution in a single unified trace.
238
+
239
+ This creates a parent span that encompasses all operations (publish, run_until_idle, etc.)
240
+ within the context, ensuring they all belong to the same trace_id for better observability.
241
+
242
+ Args:
243
+ name: Name for the workflow trace (default: "workflow")
244
+
245
+ Yields:
246
+ The workflow span for optional manual attribute setting
247
+
248
+ Examples:
249
+ # Explicit workflow tracing (recommended)
250
+ async with flock.traced_run("pizza_workflow"):
251
+ await flock.publish(pizza_idea)
252
+ await flock.run_until_idle()
253
+ # All operations now share the same trace_id!
254
+
255
+ # Custom attributes
256
+ async with flock.traced_run("data_pipeline") as span:
257
+ span.set_attribute("pipeline.version", "2.0")
258
+ await flock.publish(data)
259
+ await flock.run_until_idle()
260
+ """
261
+ tracer = trace.get_tracer(__name__)
262
+ with tracer.start_as_current_span(name) as span:
263
+ # Set workflow-level attributes
264
+ span.set_attribute("flock.workflow", True)
265
+ span.set_attribute("workflow.name", name)
266
+ span.set_attribute("workflow.flock_id", str(id(self)))
267
+
268
+ # Store span for nested operations to use
269
+ prev_workflow_span = self._workflow_span
270
+ self._workflow_span = span
271
+
272
+ try:
273
+ yield span
274
+ span.set_status(Status(StatusCode.OK))
275
+ except Exception as e:
276
+ span.set_status(Status(StatusCode.ERROR, str(e)))
277
+ span.record_exception(e)
278
+ raise
279
+ finally:
280
+ # Restore previous workflow span
281
+ self._workflow_span = prev_workflow_span
282
+
283
+ @staticmethod
284
+ def clear_traces(db_path: str = ".flock/traces.duckdb") -> dict[str, Any]:
285
+ """Clear all traces from the DuckDB database.
286
+
287
+ Useful for resetting debug sessions or cleaning up test data.
288
+
289
+ Args:
290
+ db_path: Path to the DuckDB database file (default: ".flock/traces.duckdb")
291
+
292
+ Returns:
293
+ Dictionary with operation results:
294
+ - deleted_count: Number of spans deleted
295
+ - success: Whether operation succeeded
296
+ - error: Error message if failed
297
+
298
+ Examples:
299
+ # Clear all traces
300
+ result = Flock.clear_traces()
301
+ print(f"Deleted {result['deleted_count']} spans")
302
+
303
+ # Custom database path
304
+ result = Flock.clear_traces(".flock/custom_traces.duckdb")
305
+
306
+ # Check if operation succeeded
307
+ if result['success']:
308
+ print("Traces cleared successfully!")
309
+ else:
310
+ print(f"Error: {result['error']}")
311
+ """
312
+ try:
313
+ from pathlib import Path
314
+
315
+ import duckdb
316
+
317
+ db_file = Path(db_path)
318
+ if not db_file.exists():
319
+ return {
320
+ "success": False,
321
+ "deleted_count": 0,
322
+ "error": f"Database file not found: {db_path}",
323
+ }
324
+
325
+ # Connect and clear
326
+ conn = duckdb.connect(str(db_file))
327
+ try:
328
+ # Get count before deletion
329
+ count_result = conn.execute("SELECT COUNT(*) FROM spans").fetchone()
330
+ deleted_count = count_result[0] if count_result else 0
331
+
332
+ # Delete all spans
333
+ conn.execute("DELETE FROM spans")
334
+
335
+ # Vacuum to reclaim space
336
+ conn.execute("VACUUM")
337
+
338
+ return {"success": True, "deleted_count": deleted_count, "error": None}
339
+
340
+ finally:
341
+ conn.close()
342
+
343
+ except Exception as e:
344
+ return {"success": False, "deleted_count": 0, "error": str(e)}
345
+
217
346
  # Runtime --------------------------------------------------------------
218
347
 
219
348
  async def run_until_idle(self) -> None:
flock/store.py CHANGED
@@ -5,7 +5,7 @@ from __future__ import annotations
5
5
 
6
6
  from asyncio import Lock
7
7
  from collections import defaultdict
8
- from typing import TYPE_CHECKING
8
+ from typing import TYPE_CHECKING, TypeVar
9
9
 
10
10
  from flock.registry import type_registry
11
11
 
@@ -17,6 +17,8 @@ if TYPE_CHECKING:
17
17
 
18
18
  from flock.artifacts import Artifact
19
19
 
20
+ T = TypeVar("T")
21
+
20
22
 
21
23
  class BlackboardStore:
22
24
  async def publish(self, artifact: Artifact) -> None:
@@ -31,6 +33,21 @@ class BlackboardStore:
31
33
  async def list_by_type(self, type_name: str) -> builtins.list[Artifact]:
32
34
  raise NotImplementedError
33
35
 
36
+ async def get_by_type(self, artifact_type: type[T]) -> builtins.list[T]:
37
+ """Get artifacts by Pydantic type, returning data already cast.
38
+
39
+ Args:
40
+ artifact_type: The Pydantic model class (e.g., BugAnalysis)
41
+
42
+ Returns:
43
+ List of data objects of the specified type (not Artifact wrappers)
44
+
45
+ Example:
46
+ bug_analyses = await store.get_by_type(BugAnalysis)
47
+ # Returns list[BugAnalysis] directly, no .data access needed
48
+ """
49
+ raise NotImplementedError
50
+
34
51
 
35
52
  class InMemoryBlackboardStore(BlackboardStore):
36
53
  """Simple in-memory implementation suitable for local dev and tests."""
@@ -58,6 +75,22 @@ class InMemoryBlackboardStore(BlackboardStore):
58
75
  canonical = type_registry.resolve_name(type_name)
59
76
  return list(self._by_type.get(canonical, []))
60
77
 
78
+ async def get_by_type(self, artifact_type: type[T]) -> builtins.list[T]:
79
+ """Get artifacts by Pydantic type, returning data already cast.
80
+
81
+ Args:
82
+ artifact_type: The Pydantic model class (e.g., BugAnalysis)
83
+
84
+ Returns:
85
+ List of data objects of the specified type (not Artifact wrappers)
86
+ """
87
+ async with self._lock:
88
+ # Get canonical name from the type
89
+ canonical = type_registry.resolve_name(artifact_type.__name__)
90
+ artifacts = self._by_type.get(canonical, [])
91
+ # Reconstruct Pydantic models from payload dictionaries
92
+ return [artifact_type(**artifact.payload) for artifact in artifacts] # type: ignore
93
+
61
94
  async def extend(self, artifacts: Iterable[Artifact]) -> None: # pragma: no cover - helper
62
95
  for artifact in artifacts:
63
96
  await self.publish(artifact)