flock-core 0.4.0b50__py3-none-any.whl → 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (36) hide show
  1. flock/adapter/__init__.py +14 -0
  2. flock/adapter/azure_adapter.py +68 -0
  3. flock/adapter/chroma_adapter.py +73 -0
  4. flock/adapter/faiss_adapter.py +97 -0
  5. flock/adapter/pinecone_adapter.py +51 -0
  6. flock/adapter/vector_base.py +47 -0
  7. flock/cli/constants.py +1 -1
  8. flock/config.py +1 -1
  9. flock/core/context/context.py +20 -0
  10. flock/core/flock.py +71 -91
  11. flock/core/flock_agent.py +58 -3
  12. flock/core/flock_module.py +5 -0
  13. flock/core/util/cli_helper.py +1 -1
  14. flock/di.py +41 -0
  15. flock/modules/enterprise_memory/README.md +99 -0
  16. flock/modules/enterprise_memory/enterprise_memory_module.py +526 -0
  17. flock/modules/mem0/mem0_module.py +79 -16
  18. flock/modules/mem0_async/async_mem0_module.py +126 -0
  19. flock/modules/memory/memory_module.py +28 -8
  20. flock/modules/performance/metrics_module.py +24 -1
  21. flock/modules/zep/__init__.py +1 -0
  22. flock/modules/zep/zep_module.py +192 -0
  23. flock/webapp/app/api/execution.py +79 -2
  24. flock/webapp/app/chat.py +83 -3
  25. flock/webapp/app/services/sharing_models.py +38 -0
  26. flock/webapp/app/services/sharing_store.py +60 -1
  27. flock/webapp/static/css/chat.css +2 -0
  28. flock/webapp/templates/partials/_chat_messages.html +50 -4
  29. flock/webapp/templates/partials/_results_display.html +39 -0
  30. {flock_core-0.4.0b50.dist-info → flock_core-0.4.2.dist-info}/METADATA +5 -7
  31. {flock_core-0.4.0b50.dist-info → flock_core-0.4.2.dist-info}/RECORD +35 -24
  32. flock/modules/mem0graph/mem0_graph_module.py +0 -63
  33. /flock/modules/{mem0graph → mem0_async}/__init__.py +0 -0
  34. {flock_core-0.4.0b50.dist-info → flock_core-0.4.2.dist-info}/WHEEL +0 -0
  35. {flock_core-0.4.0b50.dist-info → flock_core-0.4.2.dist-info}/entry_points.txt +0 -0
  36. {flock_core-0.4.0b50.dist-info → flock_core-0.4.2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,14 @@
1
+ from __future__ import annotations
2
+
3
+ """Adapter package for pluggable vector-store back-ends.
4
+
5
+ Importing the package will NOT import heavy third-party clients by default –
6
+ individual adapters are only loaded when referenced explicitly.
7
+ """
8
+
9
+ from .vector_base import VectorAdapter, VectorHit
10
+
11
+ __all__ = [
12
+ "VectorAdapter",
13
+ "VectorHit",
14
+ ]
@@ -0,0 +1,68 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+ from .vector_base import VectorAdapter, VectorHit
6
+
7
+
8
+ class AzureSearchAdapter(VectorAdapter):
9
+ """Adapter for Azure Cognitive Search vector capabilities."""
10
+
11
+ def __init__(
12
+ self,
13
+ *,
14
+ endpoint: str,
15
+ key: str,
16
+ index_name: str,
17
+ embedding_field: str = "embedding",
18
+ ) -> None:
19
+ super().__init__()
20
+ try:
21
+ from azure.core.credentials import AzureKeyCredential
22
+ from azure.search.documents import SearchClient
23
+ except ImportError as e:
24
+ raise RuntimeError("azure-search-documents package is required for AzureSearchAdapter") from e
25
+
26
+ self._client = SearchClient(
27
+ endpoint=endpoint,
28
+ index_name=index_name,
29
+ credential=AzureKeyCredential(key),
30
+ )
31
+ self._embedding_field = embedding_field
32
+
33
+ # -----------------------------
34
+ def add(
35
+ self,
36
+ *,
37
+ id: str,
38
+ content: str,
39
+ embedding: list[float],
40
+ metadata: dict[str, Any] | None = None,
41
+ ) -> None:
42
+ document = {
43
+ "id": id,
44
+ "content": content,
45
+ self._embedding_field: embedding,
46
+ **(metadata or {}),
47
+ }
48
+ # Upload is sync but returns iterator; consume to check errors
49
+ list(self._client.upload_documents(documents=[document]))
50
+
51
+ def query(self, *, embedding: list[float], k: int) -> list[VectorHit]:
52
+ results = self._client.search(
53
+ search_text=None,
54
+ vector=embedding,
55
+ k=k,
56
+ vector_fields=self._embedding_field,
57
+ )
58
+ hits: list[VectorHit] = []
59
+ for doc in results:
60
+ hits.append(
61
+ VectorHit(
62
+ id=doc["id"],
63
+ content=doc.get("content"),
64
+ metadata={k: v for k, v in doc.items() if k not in ("id", "content", self._embedding_field, "@search.score")},
65
+ score=doc["@search.score"],
66
+ )
67
+ )
68
+ return hits
@@ -0,0 +1,73 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+ from typing import Any
5
+
6
+ from .vector_base import VectorAdapter, VectorHit
7
+
8
+
9
+ class ChromaAdapter(VectorAdapter):
10
+ """Adapter for Chroma vector DB (local or HTTP)."""
11
+
12
+ def __init__(
13
+ self,
14
+ *,
15
+ collection: str = "flock_memories",
16
+ host: str | None = None,
17
+ port: int = 8000,
18
+ path: str | None = "./vector_store",
19
+ ) -> None:
20
+ super().__init__()
21
+ try:
22
+ import chromadb
23
+ from chromadb.config import Settings
24
+ except ImportError as e:
25
+ raise RuntimeError("chromadb is required for ChromaAdapter") from e
26
+
27
+ if host:
28
+ client = chromadb.HttpClient(host=host, port=port)
29
+ else:
30
+ p = Path(path or "./vector_store")
31
+ p.mkdir(parents=True, exist_ok=True)
32
+ client = chromadb.PersistentClient(settings=Settings(path=str(p)))
33
+
34
+ self._collection = client.get_or_create_collection(collection)
35
+
36
+ # -------------------------------
37
+ # VectorAdapter implementation
38
+ # -------------------------------
39
+ def add(
40
+ self,
41
+ *,
42
+ id: str,
43
+ content: str,
44
+ embedding: list[float],
45
+ metadata: dict[str, Any] | None = None,
46
+ ) -> None:
47
+ self._collection.add(
48
+ ids=[id],
49
+ documents=[content],
50
+ embeddings=[embedding],
51
+ metadatas=[metadata or {}],
52
+ )
53
+
54
+ def query(self, *, embedding: list[float], k: int) -> list[VectorHit]:
55
+ res = self._collection.query(
56
+ query_embeddings=[embedding],
57
+ n_results=k,
58
+ include=["documents", "metadatas", "distances", "ids"],
59
+ )
60
+ hits: list[VectorHit] = []
61
+ if res and res["ids"]:
62
+ for idx in range(len(res["ids"][0])):
63
+ dist = res["distances"][0][idx]
64
+ score = 1 - dist # Convert L2 → similarity
65
+ hits.append(
66
+ VectorHit(
67
+ id=res["ids"][0][idx],
68
+ content=res["documents"][0][idx],
69
+ metadata=res["metadatas"][0][idx],
70
+ score=score,
71
+ )
72
+ )
73
+ return hits
@@ -0,0 +1,97 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+ from typing import Any
5
+
6
+ import numpy as np
7
+
8
+ from .vector_base import VectorAdapter, VectorHit
9
+
10
+
11
+ class FAISSAdapter(VectorAdapter):
12
+ """Simple on-disk FAISS vector store.
13
+
14
+ Index is stored in `index_path` (flat L2). Metadata & content are kept in a
15
+ parallel JSONL file for quick prototyping; not optimised for massive scale.
16
+ """
17
+
18
+ def __init__(self, *, index_path: str = "./faiss.index") -> None:
19
+ super().__init__()
20
+ try:
21
+ import faiss # type: ignore
22
+ except ImportError as e:
23
+ raise RuntimeError("faiss library is required for FAISSAdapter") from e
24
+
25
+ self._faiss = __import__("faiss") # lazy alias
26
+ self._index_path = Path(index_path)
27
+ self._meta_path = self._index_path.with_suffix(".meta.jsonl")
28
+ self._metadata: dict[int, dict[str, Any]] = {}
29
+
30
+ if self._index_path.exists():
31
+ self._index = self._faiss.read_index(str(self._index_path))
32
+ # Load metadata
33
+ if self._meta_path.exists():
34
+ import json
35
+
36
+ with open(self._meta_path) as f:
37
+ for line_no, line in enumerate(f):
38
+ self._metadata[line_no] = json.loads(line)
39
+ else:
40
+ self._index = None # created on first add
41
+
42
+ # -----------------------------
43
+ def _ensure_index(self, dim: int):
44
+ if self._index is None:
45
+ self._index = self._faiss.IndexFlatL2(dim)
46
+
47
+ def add(
48
+ self,
49
+ *,
50
+ id: str,
51
+ content: str,
52
+ embedding: list[float],
53
+ metadata: dict[str, Any] | None = None,
54
+ ) -> None:
55
+ import json
56
+
57
+ vec = np.array([embedding], dtype="float32")
58
+ self._ensure_index(vec.shape[1])
59
+ self._index.add(vec)
60
+ # Row id is current size - 1
61
+ row_id = self._index.ntotal - 1
62
+ self._metadata[row_id] = {
63
+ "id": id,
64
+ "content": content,
65
+ "metadata": metadata or {},
66
+ }
67
+ # Append metadata to file for persistence
68
+ self._meta_path.parent.mkdir(parents=True, exist_ok=True)
69
+ with open(self._meta_path, "a") as f:
70
+ f.write(json.dumps(self._metadata[row_id]) + "\n")
71
+ # Persist index lazily every 100 inserts
72
+ if row_id % 100 == 0:
73
+ self._faiss.write_index(self._index, str(self._index_path))
74
+
75
+ def query(self, *, embedding: list[float], k: int) -> list[VectorHit]:
76
+ if self._index is None or self._index.ntotal == 0:
77
+ return []
78
+ vec = np.array([embedding], dtype="float32")
79
+ distances, indices = self._index.search(vec, k)
80
+ hits: list[VectorHit] = []
81
+ for dist, idx in zip(distances[0], indices[0]):
82
+ if idx == -1:
83
+ continue
84
+ meta = self._metadata.get(idx, {})
85
+ hits.append(
86
+ VectorHit(
87
+ id=meta.get("id", str(idx)),
88
+ content=meta.get("content"),
89
+ metadata=meta.get("metadata", {}),
90
+ score=1 - float(dist), # approximate similarity
91
+ )
92
+ )
93
+ return hits
94
+
95
+ def close(self) -> None:
96
+ if self._index is not None:
97
+ self._faiss.write_index(self._index, str(self._index_path))
@@ -0,0 +1,51 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+ from .vector_base import VectorAdapter, VectorHit
6
+
7
+
8
+ class PineconeAdapter(VectorAdapter):
9
+ """Adapter for Pinecone vector DB."""
10
+
11
+ def __init__(
12
+ self,
13
+ *,
14
+ api_key: str,
15
+ environment: str,
16
+ index: str,
17
+ ) -> None:
18
+ super().__init__()
19
+ try:
20
+ import pinecone
21
+ except ImportError as e:
22
+ raise RuntimeError("pinecone-client is required for PineconeAdapter") from e
23
+
24
+ pinecone.init(api_key=api_key, environment=environment)
25
+ self._index = pinecone.Index(index)
26
+
27
+ # -------------------------------
28
+ def add(
29
+ self,
30
+ *,
31
+ id: str,
32
+ content: str,
33
+ embedding: list[float],
34
+ metadata: dict[str, Any] | None = None,
35
+ ) -> None:
36
+ meta = {"content": content, **(metadata or {})}
37
+ self._index.upsert(vectors=[(id, embedding, meta)])
38
+
39
+ def query(self, *, embedding: list[float], k: int) -> list[VectorHit]:
40
+ res = self._index.query(vector=embedding, top_k=k, include_values=False, include_metadata=True)
41
+ hits: list[VectorHit] = []
42
+ for match in res.matches or []:
43
+ hits.append(
44
+ VectorHit(
45
+ id=match.id,
46
+ content=match.metadata.get("content") if match.metadata else None,
47
+ metadata={k: v for k, v in (match.metadata or {}).items() if k != "content"},
48
+ score=match.score,
49
+ )
50
+ )
51
+ return hits
@@ -0,0 +1,47 @@
1
+ from __future__ import annotations
2
+
3
+ from abc import ABC, abstractmethod
4
+ from dataclasses import dataclass
5
+ from typing import Any
6
+
7
+
8
+ @dataclass
9
+ class VectorHit:
10
+ """Result object returned from vector search."""
11
+
12
+ id: str
13
+ content: str | None
14
+ metadata: dict[str, Any]
15
+ score: float # similarity score (higher = more similar)
16
+
17
+
18
+ class VectorAdapter(ABC):
19
+ """Protocol for vector-store adapters."""
20
+
21
+ def __init__(self, **kwargs):
22
+ """Store-specific kwargs are passed through subclass constructor."""
23
+ super().__init__()
24
+
25
+ # ----------------------
26
+ # CRUD operations
27
+ # ----------------------
28
+ @abstractmethod
29
+ def add(
30
+ self,
31
+ *,
32
+ id: str,
33
+ content: str,
34
+ embedding: list[float],
35
+ metadata: dict[str, Any] | None = None,
36
+ ) -> None: # pragma: no cover – interface
37
+ """Insert or upsert a single document."""
38
+
39
+ @abstractmethod
40
+ def query(
41
+ self, *, embedding: list[float], k: int
42
+ ) -> list[VectorHit]: # pragma: no cover – interface
43
+ """Return top-k most similar hits."""
44
+
45
+ def close(self) -> None: # Optional override
46
+ """Free resources / flush buffers."""
47
+ return
flock/cli/constants.py CHANGED
@@ -18,7 +18,7 @@ CLI_LOAD_FLOCK = "Load a *.flock file"
18
18
  CLI_THEME_BUILDER = "Theme builder"
19
19
  CLI_LOAD_EXAMPLE = "Load a example"
20
20
  CLI_SETTINGS = "Settings"
21
- CLI_NOTES = "'Hummingbird' release notes"
21
+ CLI_NOTES = "'Magpie' release notes"
22
22
  CLI_START_WEB_SERVER = "Start web server"
23
23
  CLI_REGISTRY_MANAGEMENT = "Registry management"
24
24
  CLI_EXIT = "Exit"
flock/config.py CHANGED
@@ -22,7 +22,7 @@ GITHUB_USERNAME = config("GITHUB_USERNAME", "")
22
22
  # -- Debugging and Logging Configurations --
23
23
  LOCAL_DEBUG = config("LOCAL_DEBUG", True)
24
24
  LOG_LEVEL = config("LOG_LEVEL", "DEBUG")
25
- LOGGING_DIR = config("LOGGING_DIR", "logs")
25
+ LOGGING_DIR = config("LOGGING_DIR", ".flock/logs")
26
26
 
27
27
  OTEL_SERVICE_NAME = config("OTL_SERVICE_NAME", "otel-flock")
28
28
  JAEGER_ENDPOINT = config(
@@ -192,3 +192,23 @@ class FlockContext(Serializable, BaseModel):
192
192
 
193
193
  converted = convert(data)
194
194
  return cls(**converted)
195
+
196
+ def resolve(self, svc_type):
197
+ """Resolve a service from the request-scoped DI container if present.
198
+
199
+ The bootstrap code is expected to store the active `ServiceProvider` from
200
+ `wd.di` in the context variable key ``di.container``. This helper
201
+ provides a convenient façade so that Flock components can simply call
202
+ ``context.resolve(SomeType)`` regardless of whether a container is
203
+ available. When the container is missing or the service cannot be
204
+ resolved, ``None`` is returned instead of raising to keep backward
205
+ compatibility.
206
+ """
207
+ container = self.get_variable("di.container")
208
+ if container is None:
209
+ return None
210
+ try:
211
+ return container.get_service(svc_type)
212
+ except Exception:
213
+ # Service not registered or other resolution error – fall back to None
214
+ return None
flock/core/flock.py CHANGED
@@ -4,10 +4,11 @@
4
4
  from __future__ import annotations # Ensure forward references work
5
5
 
6
6
  import asyncio
7
- import concurrent.futures # Added import
7
+ import contextvars
8
8
  import os
9
9
  import uuid
10
- from collections.abc import Callable, Sequence
10
+ from collections.abc import Awaitable, Callable, Sequence
11
+ from concurrent.futures import ThreadPoolExecutor
11
12
  from pathlib import Path
12
13
  from typing import (
13
14
  TYPE_CHECKING,
@@ -16,6 +17,7 @@ from typing import (
16
17
  TypeVar,
17
18
  )
18
19
 
20
+ _R = TypeVar("_R")
19
21
  # Third-party imports
20
22
  from box import Box
21
23
  from temporalio import workflow
@@ -142,6 +144,28 @@ class Flock(BaseModel, Serializable):
142
144
  "ignored_types": (type(FlockRegistry),),
143
145
  }
144
146
 
147
+ def _run_sync(self, coro: Awaitable[_R]) -> _R:
148
+ """Execute *coro* synchronously.
149
+
150
+ * If no loop is running → ``asyncio.run``.
151
+ * Otherwise run ``asyncio.run`` inside a fresh thread **with**
152
+ context-vars propagation.
153
+ """
154
+ try:
155
+ asyncio.get_running_loop()
156
+ except RuntimeError: # no loop → simple
157
+ return asyncio.run(coro)
158
+
159
+ # A loop is already running – Jupyter / ASGI / etc.
160
+ ctx = contextvars.copy_context() # propagate baggage
161
+ with ThreadPoolExecutor(max_workers=1) as pool:
162
+ future = pool.submit(ctx.run, asyncio.run, coro)
163
+ try:
164
+ return future.result()
165
+ finally:
166
+ if not future.done():
167
+ future.cancel()
168
+
145
169
  def __init__(
146
170
  self,
147
171
  name: str | None = None,
@@ -356,39 +380,19 @@ class Flock(BaseModel, Serializable):
356
380
  run_id: str = "",
357
381
  box_result: bool = True,
358
382
  agents: list[FlockAgent] | None = None,
383
+ memo: dict[str, Any] | None = None
359
384
  ) -> Box | dict:
360
- """Entry point for running an agent system synchronously."""
361
- # Prepare the coroutine that needs to be run
362
- coro = self.run_async(
363
- start_agent=start_agent,
364
- input=input,
365
- context=context,
366
- run_id=run_id,
367
- box_result=box_result, # run_async handles boxing
368
- agents=agents,
369
- )
370
-
371
- try:
372
- # Check if an event loop is already running in the current thread
373
- loop = asyncio.get_running_loop()
374
- except RuntimeError:
375
- # No event loop is running in the current thread.
376
- # We can safely use asyncio.run() to create a new loop,
377
- # run the coroutine, and close the loop.
378
- return asyncio.run(coro)
379
- else:
380
- # An event loop is already running in the current thread.
381
- # Calling loop.run_until_complete() or asyncio.run() here would raise an error.
382
- # To run the async code and wait for its result synchronously,
383
- # we execute it in a separate thread with its own event loop.
384
- logger.debug(
385
- "Flock.run called in a context with an existing event loop. "
386
- "Running async task in a separate thread to avoid event loop conflict."
385
+ return self._run_sync(
386
+ self.run_async(
387
+ start_agent=start_agent,
388
+ input=input,
389
+ context=context,
390
+ run_id=run_id,
391
+ box_result=box_result,
392
+ agents=agents,
393
+ memo=memo,
387
394
  )
388
- with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
389
- future = executor.submit(asyncio.run, coro)
390
- # Block and wait for the result from the other thread
391
- return future.result()
395
+ )
392
396
 
393
397
 
394
398
  async def run_async(
@@ -602,35 +606,24 @@ class Flock(BaseModel, Serializable):
602
606
  hide_columns: list[str] | None = None,
603
607
  delimiter: str = ",",
604
608
  ) -> list[Box | dict | None | Exception]:
605
- """Synchronous wrapper for run_batch_async."""
606
- coro = self.run_batch_async(
607
- start_agent=start_agent,
608
- batch_inputs=batch_inputs,
609
- input_mapping=input_mapping,
610
- static_inputs=static_inputs,
611
- parallel=parallel,
612
- max_workers=max_workers,
613
- use_temporal=use_temporal,
614
- box_results=box_results,
615
- return_errors=return_errors,
616
- silent_mode=silent_mode,
617
- write_to_csv=write_to_csv,
618
- hide_columns=hide_columns,
619
- delimiter=delimiter,
609
+ return self._run_sync(
610
+ self.run_batch_async(
611
+ start_agent=start_agent,
612
+ batch_inputs=batch_inputs,
613
+ input_mapping=input_mapping,
614
+ static_inputs=static_inputs,
615
+ parallel=parallel,
616
+ max_workers=max_workers,
617
+ use_temporal=use_temporal,
618
+ box_results=box_results,
619
+ return_errors=return_errors,
620
+ silent_mode=silent_mode,
621
+ write_to_csv=write_to_csv,
622
+ hide_columns=hide_columns,
623
+ delimiter=delimiter,
624
+ )
620
625
  )
621
626
 
622
- try:
623
- loop = asyncio.get_running_loop()
624
- except RuntimeError:
625
- return asyncio.run(coro)
626
- else:
627
- logger.debug(
628
- "Flock.run_batch called in a context with an existing event loop. "
629
- "Running async task in a separate thread."
630
- )
631
- with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
632
- future = executor.submit(asyncio.run, coro)
633
- return future.result()
634
627
 
635
628
  # --- Evaluation (Delegation) ---
636
629
  async def evaluate_async(
@@ -704,38 +697,25 @@ class Flock(BaseModel, Serializable):
704
697
  silent_mode: bool = False,
705
698
  metadata_columns: list[str] | None = None,
706
699
  ) -> DataFrame | list[dict[str, Any]]: # type: ignore
707
- """Synchronous wrapper for evaluate_async."""
708
- coro = self.evaluate_async(
709
- dataset=dataset,
710
- start_agent=start_agent,
711
- input_mapping=input_mapping,
712
- answer_mapping=answer_mapping,
713
- metrics=metrics,
714
- metric_configs=metric_configs,
715
- static_inputs=static_inputs,
716
- parallel=parallel,
717
- max_workers=max_workers,
718
- use_temporal=use_temporal,
719
- error_handling=error_handling,
720
- output_file=output_file,
721
- return_dataframe=return_dataframe,
722
- silent_mode=silent_mode,
723
- metadata_columns=metadata_columns,
724
- )
725
-
726
- try:
727
- loop = asyncio.get_running_loop()
728
- except RuntimeError:
729
- return asyncio.run(coro)
730
- else:
731
- logger.debug(
732
- "Flock.evaluate called in a context with an existing event loop. "
733
- "Running async task in a separate thread."
700
+ return self._run_sync(
701
+ self.evaluate_async(
702
+ dataset=dataset,
703
+ start_agent=start_agent,
704
+ input_mapping=input_mapping,
705
+ answer_mapping=answer_mapping,
706
+ metrics=metrics,
707
+ metric_configs=metric_configs,
708
+ static_inputs=static_inputs,
709
+ parallel=parallel,
710
+ max_workers=max_workers,
711
+ use_temporal=use_temporal,
712
+ error_handling=error_handling,
713
+ output_file=output_file,
714
+ return_dataframe=return_dataframe,
715
+ silent_mode=silent_mode,
716
+ metadata_columns=metadata_columns,
734
717
  )
735
- with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
736
- future = executor.submit(asyncio.run, coro)
737
- return future.result()
738
-
718
+ )
739
719
  # --- Server & CLI Starters (Delegation) ---
740
720
  def start_api(
741
721
  self,
flock/core/flock_agent.py CHANGED
@@ -335,9 +335,64 @@ class FlockAgent(BaseModel, Serializable, DSPyIntegrationMixin, ABC):
335
335
  # For now, assume evaluator handles tool resolution if necessary
336
336
  registered_tools = self.tools
337
337
 
338
- result = await self.evaluator.evaluate(
339
- self, current_inputs, registered_tools
340
- )
338
+ # --------------------------------------------------
339
+ # Optional DI middleware pipeline
340
+ # --------------------------------------------------
341
+ container = None
342
+ if self.context is not None:
343
+ container = self.context.get_variable("di.container")
344
+
345
+ # If a MiddlewarePipeline is registered in DI, wrap the evaluator
346
+ result: dict[str, Any] | None = None
347
+
348
+ if container is not None:
349
+ try:
350
+ from wd.di.middleware import (
351
+ MiddlewarePipeline,
352
+ )
353
+
354
+ pipeline: MiddlewarePipeline | None = None
355
+ try:
356
+ pipeline = container.get_service(MiddlewarePipeline)
357
+ except Exception:
358
+ pipeline = None
359
+
360
+ if pipeline is not None:
361
+ # Build execution chain where the evaluator is the terminal handler
362
+
363
+ async def _final_handler():
364
+ return await self.evaluator.evaluate(
365
+ self, current_inputs, registered_tools
366
+ )
367
+
368
+ idx = 0
369
+
370
+ async def _invoke_next():
371
+ nonlocal idx
372
+
373
+ if idx < len(pipeline._middleware):
374
+ mw = pipeline._middleware[idx]
375
+ idx += 1
376
+ return await mw(self.context, _invoke_next) # type: ignore[arg-type]
377
+ return await _final_handler()
378
+
379
+ # Execute pipeline
380
+ result = await _invoke_next()
381
+ else:
382
+ # No pipeline registered, direct evaluation
383
+ result = await self.evaluator.evaluate(
384
+ self, current_inputs, registered_tools
385
+ )
386
+ except ImportError:
387
+ # wd.di not installed – fall back
388
+ result = await self.evaluator.evaluate(
389
+ self, current_inputs, registered_tools
390
+ )
391
+ else:
392
+ # No DI container – standard execution
393
+ result = await self.evaluator.evaluate(
394
+ self, current_inputs, registered_tools
395
+ )
341
396
  except Exception as eval_error:
342
397
  logger.error(
343
398
  "Error during evaluate",