flock-core 0.4.520__py3-none-any.whl → 0.5.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (103) hide show
  1. flock/cli/manage_agents.py +3 -3
  2. flock/components/__init__.py +28 -0
  3. flock/components/evaluation/__init__.py +9 -0
  4. flock/components/evaluation/declarative_evaluation_component.py +198 -0
  5. flock/components/routing/__init__.py +15 -0
  6. flock/{routers/conditional/conditional_router.py → components/routing/conditional_routing_component.py} +60 -49
  7. flock/components/routing/default_routing_component.py +103 -0
  8. flock/components/routing/llm_routing_component.py +208 -0
  9. flock/components/utility/__init__.py +15 -0
  10. flock/{modules/enterprise_memory/enterprise_memory_module.py → components/utility/memory_utility_component.py} +195 -173
  11. flock/{modules/performance/metrics_module.py → components/utility/metrics_utility_component.py} +101 -86
  12. flock/{modules/output/output_module.py → components/utility/output_utility_component.py} +49 -49
  13. flock/core/__init__.py +2 -8
  14. flock/core/agent/__init__.py +16 -0
  15. flock/core/agent/flock_agent_components.py +104 -0
  16. flock/core/agent/flock_agent_execution.py +101 -0
  17. flock/core/agent/flock_agent_integration.py +147 -0
  18. flock/core/agent/flock_agent_lifecycle.py +177 -0
  19. flock/core/agent/flock_agent_serialization.py +378 -0
  20. flock/core/component/__init__.py +15 -0
  21. flock/core/{flock_module.py → component/agent_component_base.py} +136 -35
  22. flock/core/component/evaluation_component_base.py +56 -0
  23. flock/core/component/routing_component_base.py +75 -0
  24. flock/core/component/utility_component_base.py +69 -0
  25. flock/core/config/flock_agent_config.py +49 -2
  26. flock/core/evaluation/utils.py +1 -1
  27. flock/core/execution/evaluation_executor.py +1 -1
  28. flock/core/flock.py +137 -483
  29. flock/core/flock_agent.py +151 -1018
  30. flock/core/flock_factory.py +94 -73
  31. flock/core/{flock_registry.py → flock_registry.py.backup} +3 -17
  32. flock/core/logging/logging.py +1 -0
  33. flock/core/mcp/flock_mcp_server.py +42 -37
  34. flock/core/mixin/dspy_integration.py +5 -5
  35. flock/core/orchestration/__init__.py +18 -0
  36. flock/core/orchestration/flock_batch_processor.py +94 -0
  37. flock/core/orchestration/flock_evaluator.py +113 -0
  38. flock/core/orchestration/flock_execution.py +288 -0
  39. flock/core/orchestration/flock_initialization.py +125 -0
  40. flock/core/orchestration/flock_server_manager.py +65 -0
  41. flock/core/orchestration/flock_web_server.py +117 -0
  42. flock/core/registry/__init__.py +39 -0
  43. flock/core/registry/agent_registry.py +69 -0
  44. flock/core/registry/callable_registry.py +139 -0
  45. flock/core/registry/component_discovery.py +142 -0
  46. flock/core/registry/component_registry.py +64 -0
  47. flock/core/registry/config_mapping.py +64 -0
  48. flock/core/registry/decorators.py +137 -0
  49. flock/core/registry/registry_hub.py +202 -0
  50. flock/core/registry/server_registry.py +57 -0
  51. flock/core/registry/type_registry.py +86 -0
  52. flock/core/serialization/flock_serializer.py +33 -30
  53. flock/core/serialization/serialization_utils.py +28 -25
  54. flock/core/util/input_resolver.py +29 -2
  55. flock/platform/docker_tools.py +3 -3
  56. flock/tools/markdown_tools.py +1 -2
  57. flock/tools/text_tools.py +1 -2
  58. flock/webapp/app/main.py +9 -5
  59. flock/workflow/activities.py +59 -84
  60. flock/workflow/activities_unified.py +230 -0
  61. flock/workflow/agent_execution_activity.py +6 -6
  62. flock/workflow/flock_workflow.py +1 -1
  63. {flock_core-0.4.520.dist-info → flock_core-0.5.0b1.dist-info}/METADATA +2 -2
  64. {flock_core-0.4.520.dist-info → flock_core-0.5.0b1.dist-info}/RECORD +67 -68
  65. flock/core/flock_evaluator.py +0 -60
  66. flock/core/flock_router.py +0 -83
  67. flock/evaluators/__init__.py +0 -1
  68. flock/evaluators/declarative/__init__.py +0 -1
  69. flock/evaluators/declarative/declarative_evaluator.py +0 -194
  70. flock/evaluators/memory/memory_evaluator.py +0 -90
  71. flock/evaluators/test/test_case_evaluator.py +0 -38
  72. flock/evaluators/zep/zep_evaluator.py +0 -59
  73. flock/modules/__init__.py +0 -1
  74. flock/modules/assertion/__init__.py +0 -1
  75. flock/modules/assertion/assertion_module.py +0 -286
  76. flock/modules/callback/__init__.py +0 -1
  77. flock/modules/callback/callback_module.py +0 -91
  78. flock/modules/enterprise_memory/README.md +0 -99
  79. flock/modules/mem0/__init__.py +0 -1
  80. flock/modules/mem0/mem0_module.py +0 -126
  81. flock/modules/mem0_async/__init__.py +0 -1
  82. flock/modules/mem0_async/async_mem0_module.py +0 -126
  83. flock/modules/memory/__init__.py +0 -1
  84. flock/modules/memory/memory_module.py +0 -429
  85. flock/modules/memory/memory_parser.py +0 -125
  86. flock/modules/memory/memory_storage.py +0 -736
  87. flock/modules/output/__init__.py +0 -1
  88. flock/modules/performance/__init__.py +0 -1
  89. flock/modules/zep/__init__.py +0 -1
  90. flock/modules/zep/zep_module.py +0 -192
  91. flock/routers/__init__.py +0 -1
  92. flock/routers/agent/__init__.py +0 -1
  93. flock/routers/agent/agent_router.py +0 -236
  94. flock/routers/agent/handoff_agent.py +0 -58
  95. flock/routers/default/__init__.py +0 -1
  96. flock/routers/default/default_router.py +0 -80
  97. flock/routers/feedback/feedback_router.py +0 -114
  98. flock/routers/list_generator/list_generator_router.py +0 -166
  99. flock/routers/llm/__init__.py +0 -1
  100. flock/routers/llm/llm_router.py +0 -365
  101. {flock_core-0.4.520.dist-info → flock_core-0.5.0b1.dist-info}/WHEEL +0 -0
  102. {flock_core-0.4.520.dist-info → flock_core-0.5.0b1.dist-info}/entry_points.txt +0 -0
  103. {flock_core-0.4.520.dist-info → flock_core-0.5.0b1.dist-info}/licenses/LICENSE +0 -0
@@ -1,126 +0,0 @@
1
- from typing import Any
2
-
3
- # from mem0.client.main import AsyncMemoryClient, MemoryClient
4
- # from mem0.memory.main import AsyncMemory
5
- from mem0 import AsyncMemory, AsyncMemoryClient
6
- from pydantic import Field
7
-
8
- from flock.core.context.context import FlockContext
9
- from flock.core.flock_agent import FlockAgent
10
- from flock.core.flock_module import FlockModule, FlockModuleConfig
11
- from flock.core.flock_registry import flock_component
12
- from flock.core.logging.logging import get_logger
13
-
14
- logger = get_logger("module.mem0")
15
-
16
-
17
- config = {
18
- "vector_store": {
19
- "provider": "chroma",
20
- "config": {
21
- "collection_name": "flock_memory",
22
- "path": ".flock/memory",
23
- }
24
- }
25
- }
26
-
27
-
28
- class AsyncMem0ModuleConfig(FlockModuleConfig):
29
- top_k: int = Field(default=10, description="Number of memories to retrieve")
30
- user_id: str = Field(default="flock", description="User ID the memories will be associated with")
31
- agent_id: str = Field(default="flock", description="Agent ID the memories will be associated with")
32
- memory_input_key: str | None = Field(default=None, description="Input key to use for memory, if none the description of the agent will be used")
33
- api_key: str | None = Field(default=None, description="API key for mem0 Platform")
34
- config: dict[str, Any] = Field(default=config, description="Configuration for mem0")
35
-
36
-
37
- @flock_component(config_class=AsyncMem0ModuleConfig)
38
- class AsyncMem0Module(FlockModule):
39
-
40
- name: str = "mem0"
41
- config: AsyncMem0ModuleConfig = AsyncMem0ModuleConfig()
42
-
43
-
44
- def __init__(self, name, config: AsyncMem0ModuleConfig) -> None:
45
- global memory
46
- """Initialize Mem0 module."""
47
- super().__init__(name=name, config=config)
48
- logger.debug("Initializing Mem0 module")
49
-
50
-
51
-
52
-
53
- def dict_to_str_repr(self,d: dict) -> str:
54
- return repr(d)
55
-
56
-
57
- async def on_post_evaluate(
58
- self,
59
- agent: FlockAgent,
60
- inputs: dict[str, Any],
61
- context: FlockContext | None = None,
62
- result: dict[str, Any] | None = None,
63
- ) -> dict[str, Any]:
64
- if self.config.api_key:
65
- memory = AsyncMemoryClient(api_key=self.config.api_key)
66
- else:
67
- memory = await AsyncMemory.from_config(config_dict=self.config.config)
68
-
69
- agent_id = self.config.agent_id if self.config.agent_id else agent.name
70
-
71
- # get the result without the inputs
72
- filtered_result = {k: v for k, v in result.items() if k not in inputs}
73
- # get the inputs without memory
74
- filtered_inputs = {k: v for k, v in inputs.items() if k not in [self.config.memory_input_key]}
75
-
76
- # add memories about the user inputs
77
- added_user_memory = await memory.add(self.dict_to_str_repr(filtered_inputs), user_id=self.config.user_id)
78
- logger.info(f"Added caller memory: {added_user_memory}")
79
-
80
- # add memories about the agent result
81
- added_agent_memory = await memory.add(self.dict_to_str_repr(filtered_result), agent_id=agent_id)
82
- logger.info(f"Added agent memory: {added_agent_memory}")
83
-
84
-
85
- return result
86
-
87
- async def on_pre_evaluate(
88
- self,
89
- agent: FlockAgent,
90
- inputs: dict[str, Any],
91
- context: FlockContext | None = None,
92
- ) -> dict[str, Any]:
93
- if self.config.api_key:
94
- memory = AsyncMemoryClient(api_key=self.config.api_key)
95
- else:
96
- memory = await AsyncMemory.from_config(config_dict=self.config.config)
97
-
98
- message = self.dict_to_str_repr(inputs)
99
- agent_id = self.config.agent_id if self.config.agent_id else agent.name
100
-
101
- relevant_agent_memories = await memory.search(query=message, agent_id=agent_id, limit=self.config.top_k)
102
- logger.info(f"Relevant agent memories: {relevant_agent_memories}")
103
-
104
- relevant_user_memories = await memory.search(query=message, user_id=self.config.user_id, limit=self.config.top_k)
105
- logger.info(f"Relevant user memories: {relevant_user_memories}")
106
-
107
- if relevant_agent_memories or relevant_user_memories:
108
- memories_str = ''
109
- if "results" in relevant_agent_memories:
110
- memories_str = "\n".join(f"- {entry['memory']}" for entry in relevant_agent_memories["results"])
111
- else:
112
- memories_str = "\n".join(f"- {entry}" for entry in relevant_agent_memories)
113
-
114
- if "results" in relevant_user_memories:
115
- memories_str = memories_str + "\n" + "\n".join(f"- {entry['memory']}" for entry in relevant_user_memories["results"])
116
- else:
117
- memories_str = memories_str + "\n" + "\n".join(f"- {entry}" for entry in relevant_user_memories)
118
-
119
- if memories_str:
120
- if self.config.memory_input_key:
121
- inputs[self.config.memory_input_key] = memories_str
122
- else:
123
- agent.description = agent.description + "\n\n Memories:" + memories_str
124
-
125
-
126
- return inputs
@@ -1 +0,0 @@
1
- # Package for modules
@@ -1,429 +0,0 @@
1
- import json
2
- import uuid
3
- from datetime import datetime
4
- from typing import Any, Literal
5
-
6
- from pydantic import Field
7
-
8
- from flock.core.context.context import FlockContext
9
-
10
- # if TYPE_CHECKING:
11
- # from flock.core import FlockAgent
12
- from flock.core.flock_agent import FlockAgent
13
- from flock.core.flock_module import FlockModule, FlockModuleConfig
14
- from flock.core.flock_registry import flock_component
15
- from flock.core.logging.logging import get_logger
16
- from flock.modules.memory.memory_parser import MemoryMappingParser
17
- from flock.modules.memory.memory_storage import FlockMemoryStore, MemoryEntry
18
-
19
- logger = get_logger("memory")
20
-
21
-
22
- class MemoryModuleConfig(FlockModuleConfig):
23
- """Configuration for the MemoryModule.
24
-
25
- This class defines the configuration for the MemoryModule.
26
- """
27
-
28
- folder_path: str = Field(
29
- default=".flock/memory/",
30
- description="Directory where memory file and concept graph will be saved",
31
- )
32
- concept_graph_file: str = Field(
33
- default="concept_graph.png",
34
- description="Base filename for the concept graph image",
35
- )
36
- file_path: str | None = Field(
37
- default="agent_memory.json", description="Path to save memory file"
38
- )
39
- memory_mapping: str | None = Field(
40
- default=None, description="Memory mapping configuration"
41
- )
42
- similarity_threshold: float = Field(
43
- default=0.5, description="Threshold for semantic similarity"
44
- )
45
- max_length: int = Field(
46
- default=1000, description="Max length of memory entry before splitting"
47
- )
48
- save_after_update: bool = Field(
49
- default=True, description="Whether to save memory after each update"
50
- )
51
- splitting_mode: Literal["summary", "semantic", "characters", "none"] = (
52
- Field(default="characters", description="Mode to split memory content")
53
- )
54
- enable_read_only_mode: bool = Field(
55
- default=False, description="Whether to enable read only mode"
56
- )
57
- enable_write_only_mode: bool = Field(
58
- default=False, description="Whether to enable write only mode"
59
- )
60
- number_of_concepts_to_extract: int = Field(
61
- default=3, description="Number of concepts to extract from the memory"
62
- )
63
- memory_input_key: str | None = Field(default=None, description="Input key to use for memory, if none the description of the agent will be used")
64
-
65
-
66
-
67
- @flock_component(config_class=MemoryModuleConfig)
68
- class MemoryModule(FlockModule):
69
- """Module that adds memory capabilities to a Flock agent."""
70
-
71
- name: str = "memory"
72
- config: MemoryModuleConfig = Field(
73
- default_factory=MemoryModuleConfig,
74
- description="Memory module configuration",
75
- )
76
- memory_store: FlockMemoryStore | None = None
77
- memory_ops: list[Any] = []
78
-
79
- def __init__(self, name: str, config: MemoryModuleConfig):
80
- super().__init__(name=name, config=config)
81
- self.memory_store = FlockMemoryStore.load_from_file(
82
- self.get_memory_filename(name)
83
- )
84
- self.memory_ops = (
85
- MemoryMappingParser().parse(self.config.memory_mapping)
86
- if self.config.memory_mapping
87
- else [{"type": "semantic"}]
88
- )
89
-
90
- async def on_initialize(
91
- self,
92
- agent: FlockAgent,
93
- inputs: dict[str, Any],
94
- context: FlockContext | None = None,
95
- ) -> None:
96
- """Initialize memory store if needed."""
97
- if not self.memory_store:
98
- self.memory_store = FlockMemoryStore.load_from_file(
99
- self.get_memory_filename(self.name)
100
- )
101
- self.memory_ops = (
102
- MemoryMappingParser().parse(self.config.memory_mapping)
103
- if self.config.memory_mapping
104
- else [{"type": "semantic"}]
105
- )
106
- logger.debug(f"Initialized memory module for agent {agent.name}")
107
-
108
- async def on_pre_evaluate(
109
- self,
110
- agent: FlockAgent,
111
- inputs: dict[str, Any],
112
- context: FlockContext | None = None,
113
- ) -> dict[str, Any]:
114
- """Check memory before evaluation."""
115
- if not self.memory_store:
116
- return inputs
117
-
118
- if self.config.enable_write_only_mode:
119
- return inputs
120
-
121
- inputs = await self.search_memory(agent, inputs)
122
-
123
- if "context" in inputs:
124
- agent.input = (
125
- agent.input + ", context: list | context with more information"
126
- )
127
-
128
- return inputs
129
-
130
- def get_memory_filename(self, module_name: str) -> str:
131
- """Generate the full file path for the memory file."""
132
- folder = self.config.folder_path
133
- if not folder.endswith(("/", "\\")):
134
- folder += "/"
135
- import os
136
-
137
- if not os.path.exists(folder):
138
- os.makedirs(folder, exist_ok=True)
139
- # Determine base filename and extension from file_path config
140
- if self.config.file_path:
141
- file_name = self.config.file_path.rsplit("/", 1)[-1].rsplit(
142
- "\\", 1
143
- )[-1]
144
- if "." in file_name:
145
- base, ext = file_name.rsplit(".", 1)
146
- ext = f".{ext}"
147
- else:
148
- base, ext = file_name, ""
149
- else:
150
- base, ext = "agent_memory", ".json"
151
- return f"{folder}{module_name}_{base}{ext}"
152
-
153
- def get_concept_graph_filename(self, module_name: str) -> str:
154
- """Generate the full file path for the concept graph image."""
155
- folder = self.config.folder_path
156
- if not folder.endswith(("/", "\\")):
157
- folder += "/"
158
- import os
159
-
160
- if not os.path.exists(folder):
161
- os.makedirs(folder, exist_ok=True)
162
- # Use timestamp to create a unique filename
163
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]
164
- if self.config.concept_graph_file:
165
- file_name = self.config.concept_graph_file.rsplit("/", 1)[
166
- -1
167
- ].rsplit("\\", 1)[-1]
168
- if "." in file_name:
169
- base, ext = file_name.rsplit(".", 1)
170
- ext = f".{ext}"
171
- else:
172
- base, ext = file_name, ""
173
- else:
174
- base, ext = "concept_graph", ".png"
175
- return f"{folder}{module_name}_{base}_{timestamp}{ext}"
176
-
177
- async def search_memory(
178
- self, agent: FlockAgent, query: dict[str, Any]
179
- ) -> dict[str, Any]:
180
- """Search memory for the query."""
181
- if not self.memory_store:
182
- # No memory store loaded – just return the untouched input
183
- return query
184
-
185
- try:
186
- input_text = json.dumps(query)
187
- query_embedding = self.memory_store.compute_embedding(input_text)
188
- concepts = await self._extract_concepts(
189
- agent, input_text, self.config.number_of_concepts_to_extract
190
- )
191
-
192
- memory_results = []
193
- for op in self.memory_ops:
194
- if op["type"] == "semantic":
195
- semantic_results = self.memory_store.retrieve(
196
- query_embedding,
197
- concepts,
198
- similarity_threshold=self.config.similarity_threshold,
199
- )
200
- memory_results.extend(semantic_results)
201
- elif op["type"] == "exact":
202
- exact_results = self.memory_store.exact_match(query)
203
- memory_results.extend(exact_results)
204
-
205
- context: list[dict[str, Any]] = []
206
- if memory_results:
207
- for result in memory_results:
208
- context.append(
209
- {"content": result.content, "concepts": result.concepts}
210
- )
211
-
212
- logger.debug(
213
- f"Found {len(memory_results)} relevant memories",
214
- agent=agent.name,
215
- )
216
- query["context"] = context
217
-
218
- return query
219
-
220
- except Exception as e:
221
- logger.warning(f"Memory retrieval failed: {e}", agent=agent.name)
222
- return query
223
-
224
- async def add_to_memory(
225
- self, agent: FlockAgent, data: dict[str, Any]
226
- ) -> None:
227
- """Add data to memory."""
228
- if not self.memory_store:
229
- return
230
-
231
- try:
232
- chunks = await self._get_chunks(agent, data, None)
233
- await self._store_chunks(agent, chunks)
234
- except Exception as e:
235
- logger.warning(f"Memory storage failed: {e}", agent=agent.name)
236
-
237
- async def on_post_evaluate(
238
- self,
239
- agent: FlockAgent,
240
- inputs: dict[str, Any],
241
- context: FlockContext | None = None,
242
- result: dict[str, Any] | None = None,
243
- ) -> dict[str, Any]:
244
- """Store results in memory after evaluation."""
245
- if not self.memory_store:
246
- return result
247
-
248
- if self.config.enable_read_only_mode:
249
- return result
250
-
251
- try:
252
- chunks = await self._get_chunks(agent, inputs, result)
253
- await self._store_chunks(agent, chunks)
254
- except Exception as e:
255
- logger.warning(f"Memory storage failed: {e}", agent=agent.name)
256
-
257
- return result
258
-
259
- async def on_terminate(
260
- self,
261
- agent: Any,
262
- inputs: dict[str, Any],
263
- result: dict[str, Any],
264
- context: FlockContext | None = None,
265
- ) -> None:
266
- """Save memory store if configured."""
267
- if self.config.save_after_update and self.memory_store:
268
- self.save_memory()
269
-
270
- async def _extract_concepts(
271
- self, agent: FlockAgent, text: str, number_of_concepts: int = 3
272
- ) -> set[str]:
273
- """Extract concepts using the agent's LLM capabilities."""
274
- existing_concepts = set()
275
- if self.memory_store and self.memory_store.concept_graph:
276
- existing_concepts = set(
277
- self.memory_store.concept_graph.graph.nodes()
278
- )
279
-
280
- input_signature = "text: str | Text to analyze"
281
- if existing_concepts:
282
- input_signature += ", existing_concepts: list[str] | Already known concepts that might apply"
283
-
284
- concept_signature = agent.create_dspy_signature_class(
285
- f"{agent.name}_concept_extractor",
286
- "Extract key concepts from text",
287
- f"{input_signature} -> concepts: list[str] | Max {number_of_concepts} key concepts all lower case",
288
- )
289
-
290
- agent._configure_language_model(agent.model, True, 0.0, 8192)
291
- predictor = agent._select_task(concept_signature, "Completion")
292
- result_obj = predictor(
293
- text=text,
294
- existing_concepts=list(existing_concepts)
295
- if existing_concepts
296
- else None,
297
- )
298
- concept_list = getattr(result_obj, "concepts", [])
299
- return set(concept_list)
300
-
301
- async def _summarize_mode(
302
- self,
303
- agent: FlockAgent,
304
- inputs: dict[str, Any],
305
- result: dict[str, Any],
306
- ) -> str:
307
- """Extract information chunks using summary mode."""
308
- split_signature = agent.create_dspy_signature_class(
309
- f"{agent.name}_splitter",
310
- "Extract a list of potentially needed data and information for future reference",
311
- """
312
- content: str | The content to split
313
- -> chunks: list[str] | List of data and information for future reference
314
- """,
315
- )
316
- agent._configure_language_model(agent.model, True, 0.0, 8192)
317
- splitter = agent._select_task(split_signature, "Completion")
318
- full_text = json.dumps(inputs) + json.dumps(result)
319
- split_result = splitter(content=full_text)
320
- return "\n".join(split_result.chunks)
321
-
322
- async def _semantic_splitter_mode(
323
- self,
324
- agent: FlockAgent,
325
- inputs: dict[str, Any],
326
- result: dict[str, Any],
327
- ) -> list[str]:
328
- """Extract information chunks using semantic mode."""
329
- split_signature = agent.create_dspy_signature_class(
330
- f"{self.name}_splitter",
331
- "Split content into meaningful, self-contained chunks",
332
- """
333
- content: str | The content to split
334
- -> chunks: list[dict[str,str]] | List of chunks as key-value pairs - keys are a short title and values are the chunk content
335
- """,
336
- )
337
- agent._configure_language_model(agent.model, True, 0.0, 8192)
338
- splitter = agent._select_task(split_signature, "Completion")
339
- full_text = json.dumps(inputs) + (json.dumps(result) if result else "")
340
- split_result = splitter(content=full_text)
341
- # Flatten list[dict] into list[str] of "title: content" strings to
342
- # keep downstream storage logic simple and type-safe.
343
- flattened: list[str] = []
344
- for chunk in split_result.chunks:
345
- if isinstance(chunk, dict):
346
- flattened.extend([f"{k}: {v}" for k, v in chunk.items()])
347
- else:
348
- flattened.append(str(chunk))
349
- return flattened
350
-
351
- async def _character_splitter_mode(
352
- self,
353
- agent: FlockAgent,
354
- inputs: dict[str, Any],
355
- result: dict[str, Any],
356
- ) -> list[str]:
357
- """Extract information chunks by splitting text into fixed character lengths."""
358
- full_text = json.dumps(inputs) + (json.dumps(result) if result else "")
359
- return [
360
- full_text[i : i + self.config.max_length]
361
- for i in range(0, len(full_text), self.config.max_length)
362
- ]
363
-
364
- async def _get_chunks(
365
- self,
366
- agent: FlockAgent,
367
- inputs: dict[str, Any],
368
- result: dict[str, Any] | None,
369
- ) -> str | list[str]:
370
- """Get memory chunks based on the configured splitting mode."""
371
- mode = self.config.splitting_mode
372
- if mode == "semantic":
373
- return await self._semantic_splitter_mode(agent, inputs, result)
374
- elif mode == "summary":
375
- return await self._summarize_mode(agent, inputs, result)
376
- elif mode == "characters":
377
- return await self._character_splitter_mode(agent, inputs, result)
378
- elif mode == "none":
379
- return (
380
- json.dumps(inputs) + json.dumps(result)
381
- if result
382
- else json.dumps(inputs)
383
- )
384
- else:
385
- raise ValueError(f"Unknown splitting mode: {mode}")
386
-
387
- async def _store_chunk(self, agent: FlockAgent, chunk: str) -> None:
388
- """Store a single chunk in memory."""
389
- chunk_concepts = await self._extract_concepts(
390
- agent, chunk, self.config.number_of_concepts_to_extract
391
- )
392
- entry = MemoryEntry(
393
- id=str(uuid.uuid4()),
394
- content=chunk,
395
- embedding=self.memory_store.compute_embedding(chunk).tolist(),
396
- concepts=chunk_concepts,
397
- timestamp=datetime.now(),
398
- )
399
- self.memory_store.add_entry(entry)
400
- if self.config.save_after_update:
401
- self.save_memory()
402
- logger.debug(
403
- "Stored interaction in memory",
404
- agent=agent.name,
405
- entry_id=entry.id,
406
- concepts=chunk_concepts,
407
- )
408
-
409
- async def _store_chunks(
410
- self, agent: FlockAgent, chunks: str | list[str]
411
- ) -> None:
412
- """Store chunks (single or multiple) in memory."""
413
- if isinstance(chunks, str):
414
- await self._store_chunk(agent, chunks)
415
- elif isinstance(chunks, list):
416
- # Avoid tqdm in async context – simple for-loop is safer.
417
- for chunk in chunks:
418
- await self._store_chunk(agent, chunk)
419
-
420
- def save_memory(self) -> None:
421
- """Save memory store to file."""
422
- if self.memory_store and self.config.file_path:
423
- json_str = self.memory_store.model_dump_json()
424
- filename = self.get_memory_filename(self.name)
425
- with open(filename, "w") as file:
426
- file.write(json_str)
427
- self.memory_store.concept_graph.save_as_image(
428
- self.get_concept_graph_filename(self.name)
429
- )
@@ -1,125 +0,0 @@
1
- """Parser for memory mapping declarations into executable operations."""
2
-
3
- import re
4
- from typing import Any
5
-
6
- from flock.modules.memory.memory_storage import (
7
- CombineOperation,
8
- EnrichOperation,
9
- ExactOperation,
10
- FilterOperation,
11
- MemoryOperation,
12
- MemoryScope,
13
- SemanticOperation,
14
- SortOperation,
15
- )
16
-
17
-
18
- class MemoryMappingParser:
19
- """Parses memory mapping declarations into executable operations."""
20
-
21
- def parse(self, mapping: str) -> list[MemoryOperation]:
22
- """Parse a memory mapping string into operations.
23
-
24
- Example mappings:
25
- "topic -> memory.semantic(threshold=0.9) | memory.exact -> output"
26
- "query -> memory.semantic(scope='global') | memory.filter(recency='7d') | memory.sort(by='relevance')"
27
- """
28
- operations = []
29
- stages = [s.strip() for s in mapping.split("|")]
30
-
31
- for stage in stages:
32
- if "->" not in stage:
33
- continue
34
-
35
- inputs, op_spec = stage.split("->")
36
- inputs = [i.strip() for i in inputs.split(",")]
37
-
38
- if "memory." in op_spec:
39
- # Extract operation name and parameters
40
- match = re.match(r"memory\.(\w+)(?:\((.*)\))?", op_spec.strip())
41
- if not match:
42
- continue
43
-
44
- op_name, params_str = match.groups()
45
- params = self._parse_params(params_str or "")
46
-
47
- # Create appropriate operation object
48
- if op_name == "semantic":
49
- operation = SemanticOperation(
50
- threshold=params.get("threshold", 0.8),
51
- scope=params.get("scope", MemoryScope.BOTH),
52
- max_results=params.get("max_results", 10),
53
- )
54
- elif op_name == "exact":
55
- operation = ExactOperation(
56
- keys=inputs, scope=params.get("scope", MemoryScope.BOTH)
57
- )
58
- elif op_name == "enrich":
59
- operation = EnrichOperation(
60
- tools=params.get("tools", []),
61
- strategy=params.get("strategy", "comprehensive"),
62
- scope=params.get("scope", MemoryScope.BOTH),
63
- )
64
- elif op_name == "filter":
65
- operation = FilterOperation(
66
- recency=params.get("recency"),
67
- relevance=params.get("relevance"),
68
- metadata=params.get("metadata", {}),
69
- )
70
- elif op_name == "sort":
71
- operation = SortOperation(
72
- by=params.get("by", "relevance"),
73
- ascending=params.get("ascending", False),
74
- )
75
- elif op_name == "combine":
76
- operation = CombineOperation(
77
- weights=params.get(
78
- "weights", {"semantic": 0.7, "exact": 0.3}
79
- )
80
- )
81
-
82
- operations.append(operation)
83
-
84
- return operations
85
-
86
- def _parse_params(self, params_str: str) -> dict[str, Any]:
87
- """Parse parameters string into a dictionary.
88
-
89
- Handles:
90
- - Quoted strings: threshold='high'
91
- - Numbers: threshold=0.9
92
- - Lists: tools=['web_search', 'extract_numbers']
93
- - Dictionaries: weights={'semantic': 0.7, 'exact': 0.3}
94
- """
95
- if not params_str:
96
- return {}
97
-
98
- params = {}
99
- # Split on commas not inside brackets or quotes
100
- param_pairs = re.findall(
101
- r"""
102
- (?:[^,"]|"[^"]*"|'[^']*')+ # Match everything except comma, or quoted strings
103
- """,
104
- params_str,
105
- re.VERBOSE,
106
- )
107
-
108
- for pair in param_pairs:
109
- if "=" not in pair:
110
- continue
111
- key, value = pair.split("=", 1)
112
- key = key.strip()
113
- value = value.strip()
114
-
115
- # Try to evaluate the value (for lists, dicts, numbers)
116
- try:
117
- # Safely evaluate the value
118
- value = eval(value, {"__builtins__": {}}, {})
119
- except:
120
- # If evaluation fails, treat as string
121
- value = value.strip("'\"")
122
-
123
- params[key] = value
124
-
125
- return params