flock-core 0.4.519__py3-none-any.whl → 0.5.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (103) hide show
  1. flock/cli/manage_agents.py +3 -3
  2. flock/components/__init__.py +28 -0
  3. flock/components/evaluation/__init__.py +9 -0
  4. flock/components/evaluation/declarative_evaluation_component.py +198 -0
  5. flock/components/routing/__init__.py +15 -0
  6. flock/{routers/conditional/conditional_router.py → components/routing/conditional_routing_component.py} +60 -49
  7. flock/components/routing/default_routing_component.py +103 -0
  8. flock/components/routing/llm_routing_component.py +208 -0
  9. flock/components/utility/__init__.py +15 -0
  10. flock/{modules/enterprise_memory/enterprise_memory_module.py → components/utility/memory_utility_component.py} +195 -173
  11. flock/{modules/performance/metrics_module.py → components/utility/metrics_utility_component.py} +101 -86
  12. flock/{modules/output/output_module.py → components/utility/output_utility_component.py} +49 -49
  13. flock/core/__init__.py +2 -8
  14. flock/core/agent/__init__.py +16 -0
  15. flock/core/agent/flock_agent_components.py +104 -0
  16. flock/core/agent/flock_agent_execution.py +101 -0
  17. flock/core/agent/flock_agent_integration.py +147 -0
  18. flock/core/agent/flock_agent_lifecycle.py +177 -0
  19. flock/core/agent/flock_agent_serialization.py +378 -0
  20. flock/core/component/__init__.py +15 -0
  21. flock/core/{flock_module.py → component/agent_component_base.py} +136 -35
  22. flock/core/component/evaluation_component_base.py +56 -0
  23. flock/core/component/routing_component_base.py +75 -0
  24. flock/core/component/utility_component_base.py +69 -0
  25. flock/core/config/flock_agent_config.py +49 -2
  26. flock/core/evaluation/utils.py +1 -1
  27. flock/core/execution/evaluation_executor.py +1 -1
  28. flock/core/flock.py +137 -483
  29. flock/core/flock_agent.py +151 -1018
  30. flock/core/flock_factory.py +94 -73
  31. flock/core/{flock_registry.py → flock_registry.py.backup} +3 -17
  32. flock/core/logging/logging.py +1 -0
  33. flock/core/mcp/flock_mcp_server.py +42 -37
  34. flock/core/mixin/dspy_integration.py +5 -5
  35. flock/core/orchestration/__init__.py +18 -0
  36. flock/core/orchestration/flock_batch_processor.py +94 -0
  37. flock/core/orchestration/flock_evaluator.py +113 -0
  38. flock/core/orchestration/flock_execution.py +288 -0
  39. flock/core/orchestration/flock_initialization.py +125 -0
  40. flock/core/orchestration/flock_server_manager.py +65 -0
  41. flock/core/orchestration/flock_web_server.py +117 -0
  42. flock/core/registry/__init__.py +39 -0
  43. flock/core/registry/agent_registry.py +69 -0
  44. flock/core/registry/callable_registry.py +139 -0
  45. flock/core/registry/component_discovery.py +142 -0
  46. flock/core/registry/component_registry.py +64 -0
  47. flock/core/registry/config_mapping.py +64 -0
  48. flock/core/registry/decorators.py +137 -0
  49. flock/core/registry/registry_hub.py +202 -0
  50. flock/core/registry/server_registry.py +57 -0
  51. flock/core/registry/type_registry.py +86 -0
  52. flock/core/serialization/flock_serializer.py +33 -30
  53. flock/core/serialization/serialization_utils.py +28 -25
  54. flock/core/util/input_resolver.py +29 -2
  55. flock/platform/docker_tools.py +3 -3
  56. flock/tools/markdown_tools.py +1 -2
  57. flock/tools/text_tools.py +1 -2
  58. flock/webapp/app/main.py +9 -5
  59. flock/workflow/activities.py +59 -84
  60. flock/workflow/activities_unified.py +230 -0
  61. flock/workflow/agent_execution_activity.py +6 -6
  62. flock/workflow/flock_workflow.py +1 -1
  63. {flock_core-0.4.519.dist-info → flock_core-0.5.0b1.dist-info}/METADATA +4 -4
  64. {flock_core-0.4.519.dist-info → flock_core-0.5.0b1.dist-info}/RECORD +67 -68
  65. flock/core/flock_evaluator.py +0 -60
  66. flock/core/flock_router.py +0 -83
  67. flock/evaluators/__init__.py +0 -1
  68. flock/evaluators/declarative/__init__.py +0 -1
  69. flock/evaluators/declarative/declarative_evaluator.py +0 -194
  70. flock/evaluators/memory/memory_evaluator.py +0 -90
  71. flock/evaluators/test/test_case_evaluator.py +0 -38
  72. flock/evaluators/zep/zep_evaluator.py +0 -59
  73. flock/modules/__init__.py +0 -1
  74. flock/modules/assertion/__init__.py +0 -1
  75. flock/modules/assertion/assertion_module.py +0 -286
  76. flock/modules/callback/__init__.py +0 -1
  77. flock/modules/callback/callback_module.py +0 -91
  78. flock/modules/enterprise_memory/README.md +0 -99
  79. flock/modules/mem0/__init__.py +0 -1
  80. flock/modules/mem0/mem0_module.py +0 -126
  81. flock/modules/mem0_async/__init__.py +0 -1
  82. flock/modules/mem0_async/async_mem0_module.py +0 -126
  83. flock/modules/memory/__init__.py +0 -1
  84. flock/modules/memory/memory_module.py +0 -429
  85. flock/modules/memory/memory_parser.py +0 -125
  86. flock/modules/memory/memory_storage.py +0 -736
  87. flock/modules/output/__init__.py +0 -1
  88. flock/modules/performance/__init__.py +0 -1
  89. flock/modules/zep/__init__.py +0 -1
  90. flock/modules/zep/zep_module.py +0 -192
  91. flock/routers/__init__.py +0 -1
  92. flock/routers/agent/__init__.py +0 -1
  93. flock/routers/agent/agent_router.py +0 -236
  94. flock/routers/agent/handoff_agent.py +0 -58
  95. flock/routers/default/__init__.py +0 -1
  96. flock/routers/default/default_router.py +0 -80
  97. flock/routers/feedback/feedback_router.py +0 -114
  98. flock/routers/list_generator/list_generator_router.py +0 -166
  99. flock/routers/llm/__init__.py +0 -1
  100. flock/routers/llm/llm_router.py +0 -365
  101. {flock_core-0.4.519.dist-info → flock_core-0.5.0b1.dist-info}/WHEEL +0 -0
  102. {flock_core-0.4.519.dist-info → flock_core-0.5.0b1.dist-info}/entry_points.txt +0 -0
  103. {flock_core-0.4.519.dist-info → flock_core-0.5.0b1.dist-info}/licenses/LICENSE +0 -0
@@ -1,8 +1,7 @@
1
- from __future__ import annotations
2
-
3
- """Enterprise-grade memory module for Flock.
1
+ # src/flock/components/utility/memory_utility_component.py
2
+ """Enterprise-grade memory utility component for Flock using unified component architecture.
4
3
 
5
- This module persists:
4
+ This component persists:
6
5
  • vector embeddings in a Chroma collection (or any collection that
7
6
  implements the same API)
8
7
  • a concept graph in Neo4j/Memgraph (Cypher-compatible)
@@ -11,12 +10,14 @@ It follows the same life-cycle callbacks as the standard MemoryModule but
11
10
  is designed for large-scale, concurrent deployments.
12
11
  """
13
12
 
13
+ from __future__ import annotations
14
+
14
15
  import asyncio
15
16
  import json
16
17
  import time
17
18
  import uuid
18
19
  from pathlib import Path
19
- from typing import Any, Literal
20
+ from typing import TYPE_CHECKING, Any, Literal
20
21
 
21
22
  from neo4j import AsyncGraphDatabase
22
23
  from opentelemetry import trace
@@ -27,31 +28,32 @@ from flock.adapter.azure_adapter import AzureSearchAdapter
27
28
  from flock.adapter.chroma_adapter import ChromaAdapter
28
29
  from flock.adapter.faiss_adapter import FAISSAdapter
29
30
  from flock.adapter.pinecone_adapter import PineconeAdapter
30
-
31
- # Adapter imports
32
31
  from flock.adapter.vector_base import VectorAdapter
32
+ from flock.core.component.agent_component_base import AgentComponentConfig
33
+ from flock.core.component.utility_component_base import UtilityComponentBase
33
34
  from flock.core.context.context import FlockContext
34
- from flock.core.flock_agent import FlockAgent
35
- from flock.core.flock_module import FlockModule, FlockModuleConfig
36
- from flock.core.flock_registry import flock_component
35
+ from flock.core.registry import flock_component
37
36
  from flock.core.logging.logging import get_logger
38
- from flock.modules.performance.metrics_module import MetricsModule
39
37
 
40
- logger = get_logger("enterprise_memory")
38
+ # Conditional import for MetricsUtilityComponent to avoid circular imports
39
+ if TYPE_CHECKING:
40
+ from flock.components.utility.metrics_utility_component import (
41
+ MetricsUtilityComponent,
42
+ )
43
+ from flock.core.flock_agent import FlockAgent
44
+
45
+ logger = get_logger("components.utility.memory")
41
46
  tracer = trace.get_tracer(__name__)
42
47
 
43
48
 
44
- # ---------------------------------------------------------------------------
45
- # Configuration
46
- # ---------------------------------------------------------------------------
47
- class EnterpriseMemoryModuleConfig(FlockModuleConfig):
48
- """Configuration for EnterpriseMemoryModule."""
49
+ class MemoryUtilityConfig(AgentComponentConfig):
50
+ """Configuration for MemoryUtilityComponent."""
49
51
 
50
52
  # ---------------------
51
53
  # Vector store settings
52
54
  # ---------------------
53
55
 
54
- vector_backend: Literal["chroma", "pinecone", "azure"] = Field(
56
+ vector_backend: Literal["chroma", "pinecone", "azure", "faiss"] = Field(
55
57
  default="chroma",
56
58
  description="Which vector backend to use (chroma | pinecone | azure)",
57
59
  )
@@ -68,29 +70,48 @@ class EnterpriseMemoryModuleConfig(FlockModuleConfig):
68
70
  default=None,
69
71
  description="If provided, connect to a remote Chroma HTTP server at this host",
70
72
  )
71
- chroma_port: int = Field(default=8000, description="Remote Chroma HTTP port")
73
+ chroma_port: int = Field(
74
+ default=8000, description="Remote Chroma HTTP port"
75
+ )
72
76
 
73
77
  # --- Pinecone ---
74
- pinecone_api_key: str | None = Field(default=None, description="Pinecone API key")
75
- pinecone_env: str | None = Field(default=None, description="Pinecone environment")
76
- pinecone_index: str | None = Field(default=None, description="Pinecone index name")
78
+ pinecone_api_key: str | None = Field(
79
+ default=None, description="Pinecone API key"
80
+ )
81
+ pinecone_env: str | None = Field(
82
+ default=None, description="Pinecone environment"
83
+ )
84
+ pinecone_index: str | None = Field(
85
+ default=None, description="Pinecone index name"
86
+ )
77
87
 
78
88
  # --- Azure Cognitive Search ---
79
- azure_search_endpoint: str | None = Field(default=None, description="Azure search endpoint (https://<service>.search.windows.net)")
80
- azure_search_key: str | None = Field(default=None, description="Azure search admin/key")
81
- azure_search_index_name: str | None = Field(default=None, description="Azure search index name")
89
+ azure_search_endpoint: str | None = Field(
90
+ default=None,
91
+ description="Azure search endpoint (https://<service>.search.windows.net)",
92
+ )
93
+ azure_search_key: str | None = Field(
94
+ default=None, description="Azure search admin/key"
95
+ )
96
+ azure_search_index_name: str | None = Field(
97
+ default=None, description="Azure search index name"
98
+ )
82
99
 
83
100
  # Graph DB (Neo4j / Memgraph) settings
84
101
  cypher_uri: str = Field(
85
102
  default="bolt://localhost:7687", description="Bolt URI for the graph DB"
86
103
  )
87
104
  cypher_username: str = Field(default="neo4j", description="Username for DB")
88
- cypher_password: str = Field(default="password", description="Password for DB")
105
+ cypher_password: str = Field(
106
+ default="password", description="Password for DB"
107
+ )
89
108
 
90
109
  similarity_threshold: float = Field(
91
110
  default=0.5, description="Cosine-similarity threshold for retrieval"
92
111
  )
93
- max_results: int = Field(default=10, description="Maximum retrieved memories")
112
+ max_results: int = Field(
113
+ default=10, description="Maximum retrieved memories"
114
+ )
94
115
  number_of_concepts_to_extract: int = Field(
95
116
  default=3, description="Number of concepts extracted per chunk"
96
117
  )
@@ -109,16 +130,17 @@ class EnterpriseMemoryModuleConfig(FlockModuleConfig):
109
130
  )
110
131
 
111
132
 
112
- # ---------------------------------------------------------------------------
113
- # Storage Abstraction
114
- # ---------------------------------------------------------------------------
115
- class EnterpriseMemoryStore:
116
- """Persistence layer that wraps Chroma + Cypher graph."""
133
+ class MemoryStore:
134
+ """Persistence layer that wraps vector store + Cypher graph."""
117
135
 
118
- def __init__(self, cfg: EnterpriseMemoryModuleConfig, metrics_module: MetricsModule | None = None):
136
+ def __init__(
137
+ self,
138
+ cfg: MemoryUtilityConfig,
139
+ metrics_component: MetricsUtilityComponent | None = None,
140
+ ):
119
141
  self.cfg = cfg
120
- # Metrics module (DI-resolved or fallback)
121
- self._metrics = metrics_module or MetricsModule # can be either instance or class exposing .record
142
+ # Metrics component (DI-resolved or fallback)
143
+ self._metrics = metrics_component
122
144
  # Lazy initialise expensive resources
123
145
  self._embedding_model: SentenceTransformer | None = None
124
146
  self._adapter: VectorAdapter | None = None
@@ -127,15 +149,16 @@ class EnterpriseMemoryStore:
127
149
  self._write_lock = asyncio.Lock()
128
150
  self._concept_cache: set[str] | None = None # names of known concepts
129
151
 
130
- # ---------------------------------------------------------------------
131
- # Connections
132
- # ---------------------------------------------------------------------
133
152
  def _ensure_embedding_model(self) -> SentenceTransformer:
134
153
  if self._embedding_model is None:
135
154
  logger.debug("Loading embedding model 'all-MiniLM-L6-v2'")
136
- with tracer.start_as_current_span("memory.load_embedding_model") as span:
155
+ with tracer.start_as_current_span(
156
+ "memory.load_embedding_model"
157
+ ) as span:
137
158
  try:
138
- self._embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
159
+ self._embedding_model = SentenceTransformer(
160
+ "all-MiniLM-L6-v2"
161
+ )
139
162
  span.set_attribute("model", "all-MiniLM-L6-v2")
140
163
  except Exception as e:
141
164
  span.record_exception(e)
@@ -183,9 +206,6 @@ class EnterpriseMemoryStore:
183
206
  )
184
207
  return self._driver
185
208
 
186
- # ---------------------------------------------------------------------
187
- # Public API
188
- # ---------------------------------------------------------------------
189
209
  async def add_entry(
190
210
  self,
191
211
  content: str,
@@ -194,7 +214,8 @@ class EnterpriseMemoryStore:
194
214
  ) -> str:
195
215
  """Store a chunk in both vector store and graph DB and return its id."""
196
216
  with tracer.start_as_current_span("memory.add_entry") as span:
197
- span.set_attribute("entry_id", str(uuid.uuid4()))
217
+ entry_id = str(uuid.uuid4())
218
+ span.set_attribute("entry_id", entry_id)
198
219
 
199
220
  # Embed
200
221
  embedding = self._ensure_embedding_model().encode(content).tolist()
@@ -207,7 +228,7 @@ class EnterpriseMemoryStore:
207
228
  start_t = time.perf_counter()
208
229
  try:
209
230
  adapter.add(
210
- id=span.get_attribute("entry_id"),
231
+ id=entry_id,
211
232
  content=content,
212
233
  embedding=embedding,
213
234
  metadata=metadata,
@@ -217,18 +238,22 @@ class EnterpriseMemoryStore:
217
238
  raise
218
239
  finally:
219
240
  elapsed = (time.perf_counter() - start_t) * 1000 # ms
220
- self._metrics.record(
221
- "memory_add_latency_ms",
222
- elapsed,
223
- {"backend": self.cfg.vector_backend},
224
- )
241
+ if self._metrics:
242
+ self._metrics.record(
243
+ "memory_add_latency_ms",
244
+ elapsed,
245
+ {"backend": self.cfg.vector_backend},
246
+ )
225
247
 
226
- # Schedule graph writes (batched)
227
- async with self._write_lock:
228
- self._pending_writes.append((span.get_attribute("entry_id"), {"concepts": concepts}))
229
- if self.cfg.save_interval and len(self._pending_writes) >= self.cfg.save_interval:
230
- await self._flush_pending_graph_writes()
231
- return span.get_attribute("entry_id")
248
+ # Schedule graph writes (batched)
249
+ async with self._write_lock:
250
+ self._pending_writes.append((entry_id, {"concepts": concepts}))
251
+ if (
252
+ self.cfg.save_interval
253
+ and len(self._pending_writes) >= self.cfg.save_interval
254
+ ):
255
+ await self._flush_pending_graph_writes()
256
+ return entry_id
232
257
 
233
258
  async def search(
234
259
  self, query_text: str, threshold: float, k: int
@@ -247,9 +272,10 @@ class EnterpriseMemoryStore:
247
272
  search_start = time.perf_counter()
248
273
  vector_hits = adapter.query(embedding=embedding, k=k)
249
274
  search_elapsed = (time.perf_counter() - search_start) * 1000
250
- self._metrics.record(
251
- "memory_search_hits", len(vector_hits), {"backend": backend}
252
- )
275
+ if self._metrics:
276
+ self._metrics.record(
277
+ "memory_search_hits", len(vector_hits), {"backend": backend}
278
+ )
253
279
  for hit in vector_hits:
254
280
  if hit.score < threshold:
255
281
  continue
@@ -263,14 +289,14 @@ class EnterpriseMemoryStore:
263
289
  )
264
290
 
265
291
  span.set_attribute("results_count", len(results))
266
- self._metrics.record(
267
- "memory_search_latency_ms", search_elapsed, {"backend": backend}
268
- )
292
+ if self._metrics:
293
+ self._metrics.record(
294
+ "memory_search_latency_ms",
295
+ search_elapsed,
296
+ {"backend": backend},
297
+ )
269
298
  return results
270
299
 
271
- # ------------------------------------------------------------------
272
- # Graph persistence helpers
273
- # ------------------------------------------------------------------
274
300
  async def _flush_pending_graph_writes(self):
275
301
  """Commit queued node/edge creations to the Cypher store."""
276
302
  if not self._pending_writes:
@@ -304,6 +330,7 @@ class EnterpriseMemoryStore:
304
330
  """Generate and save a PNG of the concept graph."""
305
331
  try:
306
332
  import matplotlib
333
+
307
334
  matplotlib.use("Agg")
308
335
  import matplotlib.pyplot as plt
309
336
  import networkx as nx
@@ -312,7 +339,10 @@ class EnterpriseMemoryStore:
312
339
  "MATCH (c1:Concept)<-[:MENTIONS]-(:Memory)-[:MENTIONS]->(c2:Concept) "
313
340
  "RETURN DISTINCT c1.name AS source, c2.name AS target"
314
341
  )
315
- edges = [(r["source"], r["target"]) for r in await records.values("source", "target")]
342
+ edges = [
343
+ (r["source"], r["target"])
344
+ for r in await records.values("source", "target")
345
+ ]
316
346
  if not edges:
317
347
  return
318
348
 
@@ -321,7 +351,9 @@ class EnterpriseMemoryStore:
321
351
 
322
352
  pos = nx.spring_layout(G, k=0.4)
323
353
  plt.figure(figsize=(12, 9), dpi=100)
324
- nx.draw_networkx_nodes(G, pos, node_color="#8fa8d6", node_size=500, edgecolors="white")
354
+ nx.draw_networkx_nodes(
355
+ G, pos, node_color="#8fa8d6", node_size=500, edgecolors="white"
356
+ )
325
357
  nx.draw_networkx_edges(G, pos, alpha=0.5, width=1.5)
326
358
  nx.draw_networkx_labels(G, pos, font_size=8)
327
359
  plt.axis("off")
@@ -335,6 +367,52 @@ class EnterpriseMemoryStore:
335
367
  except Exception as e:
336
368
  logger.warning("Failed to export concept graph image: %s", e)
337
369
 
370
+ async def _deduplicate_concepts(self, new_concepts: set[str]) -> set[str]:
371
+ """Return a set of concept names that merges with existing ones to avoid duplicates.
372
+
373
+ Strategy: case-insensitive equality first, then fuzzy match via difflib with cutoff 0.85.
374
+ """
375
+ await self._ensure_concept_cache()
376
+ assert self._concept_cache is not None
377
+
378
+ from difflib import get_close_matches
379
+
380
+ unified: set[str] = set()
381
+ for concept in new_concepts:
382
+ # Exact (case-insensitive) match
383
+ lower = concept.lower()
384
+ exact = next(
385
+ (c for c in self._concept_cache if c.lower() == lower), None
386
+ )
387
+ if exact:
388
+ unified.add(exact)
389
+ continue
390
+
391
+ # Fuzzy match (>=0.85 similarity)
392
+ close = get_close_matches(
393
+ concept, list(self._concept_cache), n=1, cutoff=0.85
394
+ )
395
+ if close:
396
+ unified.add(close[0])
397
+ continue
398
+
399
+ # No match – treat as new
400
+ unified.add(concept)
401
+ self._concept_cache.add(concept)
402
+ return unified
403
+
404
+ async def _ensure_concept_cache(self):
405
+ if self._concept_cache is not None:
406
+ return
407
+ driver = self._ensure_graph_driver()
408
+ async with driver.session() as session:
409
+ records = await session.run(
410
+ "MATCH (c:Concept) RETURN c.name AS name"
411
+ )
412
+ self._concept_cache = {
413
+ r["name"] for r in await records.values("name")
414
+ }
415
+
338
416
  async def close(self):
339
417
  if self._pending_writes:
340
418
  await self._flush_pending_graph_writes()
@@ -344,77 +422,51 @@ class EnterpriseMemoryStore:
344
422
  self._adapter.close()
345
423
 
346
424
 
347
- # ---------------------------------------------------------------------------
348
- # Module
349
- # ---------------------------------------------------------------------------
350
- @flock_component(config_class=EnterpriseMemoryModuleConfig)
351
- class EnterpriseMemoryModule(FlockModule):
352
- """Enterprise-ready memory module using real datastores."""
425
+ @flock_component(config_class=MemoryUtilityConfig)
426
+ class MemoryUtilityComponent(UtilityComponentBase):
427
+ """Enterprise-ready memory utility component using real datastores."""
353
428
 
354
- name: str = "enterprise_memory"
355
- config: EnterpriseMemoryModuleConfig = Field(default_factory=EnterpriseMemoryModuleConfig)
356
-
357
- _store: EnterpriseMemoryStore | None = None
358
- _container: Any | None = None # DI container if supplied
359
- _metrics_module: MetricsModule | None = None
429
+ config: MemoryUtilityConfig = Field(
430
+ default_factory=MemoryUtilityConfig,
431
+ description="Memory configuration",
432
+ )
360
433
 
361
- # ----------------------------------------------------------
362
- # DI-enabled constructor
363
- # ----------------------------------------------------------
364
434
  def __init__(
365
435
  self,
366
- name: str = "enterprise_memory",
367
- config: EnterpriseMemoryModuleConfig | None = None,
368
- *,
369
- container: object | None = None,
370
- **kwargs,
436
+ name: str = "memory",
437
+ config: MemoryUtilityConfig | None = None,
438
+ **data,
371
439
  ):
372
- """Create a new EnterpriseMemoryModule instance.
373
-
374
- Parameters
375
- ----------
376
- container : ServiceProvider | None
377
- Optional DI container used to resolve shared services. When
378
- provided, the module will attempt to resolve
379
- :class:`flock.modules.performance.metrics_module.MetricsModule` from
380
- it. Falling back to the global singleton when not available keeps
381
- backward-compatibility.
382
- """
383
- from wd.di.container import (
384
- ServiceProvider, # Local import to avoid hard dependency if wd.di is absent
385
- )
386
-
387
440
  if config is None:
388
- config = EnterpriseMemoryModuleConfig()
441
+ config = MemoryUtilityConfig()
442
+ super().__init__(name=name, config=config, **data)
389
443
 
390
- super().__init__(name=name, config=config, **kwargs)
444
+ self._store: MemoryStore | None = None
445
+ self._metrics_component: MetricsUtilityComponent | None = None
391
446
 
392
- self._container = container if isinstance(container, ServiceProvider) else None
393
-
394
- # Attempt to resolve MetricsModule via DI, then via FlockModule registry
395
- resolved_metrics: MetricsModule | None = None
396
- if self._container is not None:
397
- try:
398
- resolved_metrics = self._container.get_service(MetricsModule)
399
- except Exception:
400
- resolved_metrics = None
401
-
402
- if resolved_metrics is None:
403
- resolved_metrics = MetricsModule._INSTANCE
447
+ def _get_metrics_component(self) -> MetricsUtilityComponent | None:
448
+ """Try to get the metrics component from the singleton if available."""
449
+ try:
450
+ from flock.components.utility.metrics_utility_component import (
451
+ MetricsUtilityComponent,
452
+ )
404
453
 
405
- self._metrics_module = resolved_metrics
454
+ return MetricsUtilityComponent._INSTANCE
455
+ except ImportError:
456
+ return None
406
457
 
407
- # ----------------------------------------------------------
408
- # Life-cycle hooks
409
- # ----------------------------------------------------------
410
458
  async def on_initialize(
411
459
  self,
412
460
  agent: FlockAgent,
413
461
  inputs: dict[str, Any],
414
462
  context: FlockContext | None = None,
415
463
  ) -> None:
416
- self._store = EnterpriseMemoryStore(self.config, self._metrics_module)
417
- logger.info("EnterpriseMemoryModule initialised", agent=agent.name)
464
+ """Initialize the memory store."""
465
+ self._metrics_component = self._get_metrics_component()
466
+ self._store = MemoryStore(self.config, self._metrics_component)
467
+ logger.info(
468
+ "MemoryUtilityComponent initialised for agent: %s", agent.name
469
+ )
418
470
 
419
471
  async def on_pre_evaluate(
420
472
  self,
@@ -422,6 +474,7 @@ class EnterpriseMemoryModule(FlockModule):
422
474
  inputs: dict[str, Any],
423
475
  context: FlockContext | None = None,
424
476
  ) -> dict[str, Any]:
477
+ """Retrieve relevant memories and inject into inputs."""
425
478
  if not self._store:
426
479
  return inputs
427
480
  try:
@@ -434,10 +487,15 @@ class EnterpriseMemoryModule(FlockModule):
434
487
  if matches:
435
488
  inputs = {**inputs, "context": matches}
436
489
  # Advertise new input key to DSPy signature if needed
437
- if isinstance(agent.input, str) and "context:" not in agent.input:
490
+ if (
491
+ isinstance(agent.input, str)
492
+ and "context:" not in agent.input
493
+ ):
438
494
  agent.input += ", context: list | retrieved memories"
439
495
  except Exception as e:
440
- logger.warning("Enterprise memory retrieval failed: %s", e, agent=agent.name)
496
+ logger.warning(
497
+ "Memory retrieval failed for agent %s: %s", agent.name, e
498
+ )
441
499
  return inputs
442
500
 
443
501
  async def on_post_evaluate(
@@ -447,35 +505,40 @@ class EnterpriseMemoryModule(FlockModule):
447
505
  context: FlockContext | None = None,
448
506
  result: dict[str, Any] | None = None,
449
507
  ) -> dict[str, Any] | None:
508
+ """Store new memories from inputs and results."""
450
509
  if not self._store:
451
510
  return result
452
511
  try:
453
- full_text = json.dumps(inputs) + (json.dumps(result) if result else "")
512
+ full_text = json.dumps(inputs) + (
513
+ json.dumps(result) if result else ""
514
+ )
454
515
  concepts = await self._extract_concepts(agent, full_text)
455
516
  if self._store:
456
517
  concepts = await self._store._deduplicate_concepts(concepts)
457
518
  await self._store.add_entry(full_text, concepts)
458
519
  except Exception as e:
459
- logger.warning("Enterprise memory store failed: %s", e, agent=agent.name)
520
+ logger.warning(
521
+ "Memory store failed for agent %s: %s", agent.name, e
522
+ )
460
523
  return result
461
524
 
462
525
  async def on_terminate(
463
526
  self,
464
527
  agent: FlockAgent,
465
528
  inputs: dict[str, Any],
466
- result: dict[str, Any],
467
529
  context: FlockContext | None = None,
530
+ result: dict[str, Any] | None = None,
468
531
  ) -> None:
532
+ """Clean up memory store resources."""
469
533
  if self._store:
470
534
  await self._store.close()
471
535
 
472
- # ----------------------------------------------------------
473
- # Helpers (mostly copied from original module but simplified)
474
- # ----------------------------------------------------------
475
- async def _extract_concepts(self, agent: FlockAgent, text: str) -> set[str]:
536
+ async def _extract_concepts(
537
+ self, agent: FlockAgent, text: str
538
+ ) -> set[str]:
476
539
  """Use the LLM to extract concept tokens."""
477
540
  concept_signature = agent.create_dspy_signature_class(
478
- f"{agent.name}_concept_extractor_enterprise",
541
+ f"{agent.name}_concept_extractor_memory",
479
542
  "Extract key concepts from text",
480
543
  "text: str | Input text -> concepts: list[str] | key concepts lower case",
481
544
  )
@@ -483,44 +546,3 @@ class EnterpriseMemoryModule(FlockModule):
483
546
  predictor = agent._select_task(concept_signature, "Completion")
484
547
  res = predictor(text=text)
485
548
  return set(getattr(res, "concepts", []))
486
-
487
- # --------------------------------------------------------------
488
- # Concept helpers
489
- # --------------------------------------------------------------
490
- async def _ensure_concept_cache(self):
491
- if self._concept_cache is not None:
492
- return
493
- driver = self._ensure_graph_driver()
494
- async with driver.session() as session:
495
- records = await session.run("MATCH (c:Concept) RETURN c.name AS name")
496
- self._concept_cache = {r["name"] for r in await records.values("name")}
497
-
498
- async def _deduplicate_concepts(self, new_concepts: set[str]) -> set[str]:
499
- """Return a set of concept names that merges with existing ones to avoid duplicates.
500
-
501
- Strategy: case-insensitive equality first, then fuzzy match via difflib with cutoff 0.85.
502
- """
503
- await self._ensure_concept_cache()
504
- assert self._concept_cache is not None
505
-
506
- from difflib import get_close_matches
507
-
508
- unified: set[str] = set()
509
- for concept in new_concepts:
510
- # Exact (case-insensitive) match
511
- lower = concept.lower()
512
- exact = next((c for c in self._concept_cache if c.lower() == lower), None)
513
- if exact:
514
- unified.add(exact)
515
- continue
516
-
517
- # Fuzzy match (>=0.85 similarity)
518
- close = get_close_matches(concept, list(self._concept_cache), n=1, cutoff=0.85)
519
- if close:
520
- unified.add(close[0])
521
- continue
522
-
523
- # No match – treat as new
524
- unified.add(concept)
525
- self._concept_cache.add(concept)
526
- return unified