flock-core 0.4.520__py3-none-any.whl → 0.5.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (103) hide show
  1. flock/cli/manage_agents.py +3 -3
  2. flock/components/__init__.py +28 -0
  3. flock/components/evaluation/__init__.py +9 -0
  4. flock/components/evaluation/declarative_evaluation_component.py +198 -0
  5. flock/components/routing/__init__.py +15 -0
  6. flock/{routers/conditional/conditional_router.py → components/routing/conditional_routing_component.py} +60 -49
  7. flock/components/routing/default_routing_component.py +103 -0
  8. flock/components/routing/llm_routing_component.py +208 -0
  9. flock/components/utility/__init__.py +15 -0
  10. flock/{modules/enterprise_memory/enterprise_memory_module.py → components/utility/memory_utility_component.py} +195 -173
  11. flock/{modules/performance/metrics_module.py → components/utility/metrics_utility_component.py} +101 -86
  12. flock/{modules/output/output_module.py → components/utility/output_utility_component.py} +49 -49
  13. flock/core/__init__.py +2 -8
  14. flock/core/agent/__init__.py +16 -0
  15. flock/core/agent/flock_agent_components.py +104 -0
  16. flock/core/agent/flock_agent_execution.py +101 -0
  17. flock/core/agent/flock_agent_integration.py +147 -0
  18. flock/core/agent/flock_agent_lifecycle.py +177 -0
  19. flock/core/agent/flock_agent_serialization.py +378 -0
  20. flock/core/component/__init__.py +15 -0
  21. flock/core/{flock_module.py → component/agent_component_base.py} +136 -35
  22. flock/core/component/evaluation_component_base.py +56 -0
  23. flock/core/component/routing_component_base.py +75 -0
  24. flock/core/component/utility_component_base.py +69 -0
  25. flock/core/config/flock_agent_config.py +49 -2
  26. flock/core/evaluation/utils.py +1 -1
  27. flock/core/execution/evaluation_executor.py +1 -1
  28. flock/core/flock.py +137 -483
  29. flock/core/flock_agent.py +151 -1018
  30. flock/core/flock_factory.py +94 -73
  31. flock/core/{flock_registry.py → flock_registry.py.backup} +3 -17
  32. flock/core/logging/logging.py +1 -0
  33. flock/core/mcp/flock_mcp_server.py +42 -37
  34. flock/core/mixin/dspy_integration.py +5 -5
  35. flock/core/orchestration/__init__.py +18 -0
  36. flock/core/orchestration/flock_batch_processor.py +94 -0
  37. flock/core/orchestration/flock_evaluator.py +113 -0
  38. flock/core/orchestration/flock_execution.py +288 -0
  39. flock/core/orchestration/flock_initialization.py +125 -0
  40. flock/core/orchestration/flock_server_manager.py +65 -0
  41. flock/core/orchestration/flock_web_server.py +117 -0
  42. flock/core/registry/__init__.py +39 -0
  43. flock/core/registry/agent_registry.py +69 -0
  44. flock/core/registry/callable_registry.py +139 -0
  45. flock/core/registry/component_discovery.py +142 -0
  46. flock/core/registry/component_registry.py +64 -0
  47. flock/core/registry/config_mapping.py +64 -0
  48. flock/core/registry/decorators.py +137 -0
  49. flock/core/registry/registry_hub.py +202 -0
  50. flock/core/registry/server_registry.py +57 -0
  51. flock/core/registry/type_registry.py +86 -0
  52. flock/core/serialization/flock_serializer.py +33 -30
  53. flock/core/serialization/serialization_utils.py +28 -25
  54. flock/core/util/input_resolver.py +29 -2
  55. flock/platform/docker_tools.py +3 -3
  56. flock/tools/markdown_tools.py +1 -2
  57. flock/tools/text_tools.py +1 -2
  58. flock/webapp/app/main.py +9 -5
  59. flock/workflow/activities.py +59 -84
  60. flock/workflow/activities_unified.py +230 -0
  61. flock/workflow/agent_execution_activity.py +6 -6
  62. flock/workflow/flock_workflow.py +1 -1
  63. {flock_core-0.4.520.dist-info → flock_core-0.5.0b1.dist-info}/METADATA +2 -2
  64. {flock_core-0.4.520.dist-info → flock_core-0.5.0b1.dist-info}/RECORD +67 -68
  65. flock/core/flock_evaluator.py +0 -60
  66. flock/core/flock_router.py +0 -83
  67. flock/evaluators/__init__.py +0 -1
  68. flock/evaluators/declarative/__init__.py +0 -1
  69. flock/evaluators/declarative/declarative_evaluator.py +0 -194
  70. flock/evaluators/memory/memory_evaluator.py +0 -90
  71. flock/evaluators/test/test_case_evaluator.py +0 -38
  72. flock/evaluators/zep/zep_evaluator.py +0 -59
  73. flock/modules/__init__.py +0 -1
  74. flock/modules/assertion/__init__.py +0 -1
  75. flock/modules/assertion/assertion_module.py +0 -286
  76. flock/modules/callback/__init__.py +0 -1
  77. flock/modules/callback/callback_module.py +0 -91
  78. flock/modules/enterprise_memory/README.md +0 -99
  79. flock/modules/mem0/__init__.py +0 -1
  80. flock/modules/mem0/mem0_module.py +0 -126
  81. flock/modules/mem0_async/__init__.py +0 -1
  82. flock/modules/mem0_async/async_mem0_module.py +0 -126
  83. flock/modules/memory/__init__.py +0 -1
  84. flock/modules/memory/memory_module.py +0 -429
  85. flock/modules/memory/memory_parser.py +0 -125
  86. flock/modules/memory/memory_storage.py +0 -736
  87. flock/modules/output/__init__.py +0 -1
  88. flock/modules/performance/__init__.py +0 -1
  89. flock/modules/zep/__init__.py +0 -1
  90. flock/modules/zep/zep_module.py +0 -192
  91. flock/routers/__init__.py +0 -1
  92. flock/routers/agent/__init__.py +0 -1
  93. flock/routers/agent/agent_router.py +0 -236
  94. flock/routers/agent/handoff_agent.py +0 -58
  95. flock/routers/default/__init__.py +0 -1
  96. flock/routers/default/default_router.py +0 -80
  97. flock/routers/feedback/feedback_router.py +0 -114
  98. flock/routers/list_generator/list_generator_router.py +0 -166
  99. flock/routers/llm/__init__.py +0 -1
  100. flock/routers/llm/llm_router.py +0 -365
  101. {flock_core-0.4.520.dist-info → flock_core-0.5.0b1.dist-info}/WHEEL +0 -0
  102. {flock_core-0.4.520.dist-info → flock_core-0.5.0b1.dist-info}/entry_points.txt +0 -0
  103. {flock_core-0.4.520.dist-info → flock_core-0.5.0b1.dist-info}/licenses/LICENSE +0 -0
@@ -1,83 +0,0 @@
1
- """Base router class for the Flock framework."""
2
-
3
- from abc import ABC, abstractmethod
4
- from typing import Any, Literal
5
-
6
- from pydantic import BaseModel, Field
7
-
8
- from flock.core.context.context import FlockContext
9
-
10
-
11
- class HandOffRequest(BaseModel):
12
- """Base class for handoff returns."""
13
-
14
- next_agent: str = Field(default="", description="Next agent to invoke")
15
- # match = use the output fields of the current agent that also exists as input field of the next agent
16
- # add = add the output of the current agent to the input of the next agent
17
- output_to_input_merge_strategy: Literal["match", "add"] = Field(
18
- default="match"
19
- )
20
- add_input_fields: list[str] | None = Field(
21
- default=None,
22
- description="List of input fields to add to the next agent",
23
- )
24
- add_output_fields: list[str] | None = Field(
25
- default=None,
26
- description="List of output fields to add to the next agent",
27
- )
28
- add_description: str | None = Field(
29
- default=None, description="Add this description to the next agent"
30
- )
31
- override_next_agent: Any | None = Field(
32
- default=None,
33
- description="Override the next agent to hand off to",
34
- )
35
- override_context: FlockContext | None = Field(
36
- default=None, description="Override context parameters"
37
- )
38
-
39
-
40
- class FlockRouterConfig(BaseModel):
41
- """Configuration for a router.
42
-
43
- This class defines the configuration parameters for a router.
44
- Subclasses can extend this to add additional parameters.
45
- """
46
-
47
- enabled: bool = Field(
48
- default=True, description="Whether the router is enabled"
49
- )
50
- # agents: list[str] | None = Field(
51
- # default=None,
52
- # description="List of agents to choose from",
53
- # )
54
-
55
-
56
- class FlockRouter(BaseModel, ABC):
57
- """Base class for all routers.
58
-
59
- A router is responsible for determining the next agent in a workflow
60
- based on the current agent's output.
61
- """
62
-
63
- name: str = Field(..., description="Name of the router")
64
- config: FlockRouterConfig = Field(default_factory=FlockRouterConfig)
65
-
66
- @abstractmethod
67
- async def route(
68
- self,
69
- current_agent: Any,
70
- result: dict[str, Any],
71
- context: FlockContext,
72
- ) -> HandOffRequest:
73
- """Determine the next agent to hand off to based on the current agent's output.
74
-
75
- Args:
76
- current_agent: The agent that just completed execution
77
- result: The output from the current agent
78
- context: The global execution context
79
-
80
- Returns:
81
- A HandOff object containing the next agent and input data
82
- """
83
- pass
@@ -1 +0,0 @@
1
- # Package for modules
@@ -1 +0,0 @@
1
- # Package for modules
@@ -1,194 +0,0 @@
1
- from collections.abc import Generator
2
- from typing import Any
3
-
4
- from temporalio import workflow
5
-
6
- with workflow.unsafe.imports_passed_through():
7
- import dspy
8
-
9
- from pydantic import Field, PrivateAttr
10
-
11
- from flock.core.flock_agent import FlockAgent
12
- from flock.core.flock_evaluator import FlockEvaluator, FlockEvaluatorConfig
13
- from flock.core.flock_registry import flock_component
14
- from flock.core.logging.logging import get_logger
15
- from flock.core.mixin.dspy_integration import DSPyIntegrationMixin
16
- from flock.core.mixin.prompt_parser import PromptParserMixin
17
-
18
- logger = get_logger("evaluators.declarative")
19
-
20
-
21
- class DeclarativeEvaluatorConfig(FlockEvaluatorConfig):
22
- """Configuration for the DeclarativeEvaluator."""
23
-
24
- override_evaluator_type: str | None = None
25
- model: str | None = "openai/gpt-4o"
26
- use_cache: bool = True
27
- temperature: float = 0.0
28
- max_tokens: int = 4096
29
- max_retries: int = 3
30
- max_tool_calls: int = 10
31
- stream: bool = Field(
32
- default=False,
33
- description="Enable streaming output from the underlying DSPy program.",
34
- )
35
- include_thought_process: bool = Field(
36
- default=False,
37
- description="Include the thought process in the output.",
38
- )
39
- kwargs: dict[str, Any] = Field(default_factory=dict)
40
-
41
-
42
- @flock_component(config_class=DeclarativeEvaluatorConfig)
43
- class DeclarativeEvaluator(
44
- FlockEvaluator, DSPyIntegrationMixin, PromptParserMixin
45
- ):
46
- """Evaluator that uses DSPy for generation."""
47
-
48
- config: DeclarativeEvaluatorConfig = Field(
49
- default_factory=DeclarativeEvaluatorConfig,
50
- description="Evaluator configuration",
51
- )
52
-
53
- _cost: float = PrivateAttr(default=0.0)
54
- _lm_history: list = PrivateAttr(default_factory=list)
55
-
56
- # def __init__(self, name: str, config: DeclarativeEvaluatorConfig) -> None:
57
- # super().__init__(name=name, config=config)
58
- # self._configure_language_model(
59
- # model=config.model,
60
- # use_cache=config.use_cache,
61
- # temperature=config.temperature,
62
- # max_tokens=config.max_tokens,
63
- # )
64
-
65
- async def evaluate(
66
- self,
67
- agent: FlockAgent,
68
- inputs: dict[str, Any],
69
- tools: list[Any],
70
- mcp_tools: list[Any] | None = None,
71
- ) -> dict[str, Any]:
72
- """Evaluate using DSPy, with optional asynchronous streaming."""
73
- # --- Setup Signature and LM ---
74
-
75
- with dspy.context(
76
- lm=dspy.LM(
77
- model=self.config.model or agent.model,
78
- cache=self.config.use_cache,
79
- temperature=self.config.temperature,
80
- max_tokens=self.config.max_tokens,
81
- num_retries=self.config.max_retries,
82
- )
83
- ):
84
- try:
85
- from rich.console import Console
86
-
87
- console = Console()
88
- _dspy_signature = self.create_dspy_signature_class(
89
- agent.name,
90
- agent.description,
91
- f"{agent.input} -> {agent.output}",
92
- )
93
- # --- Get output field names ---
94
- # dspy.Signature holds fields in .output_fields attribute
95
- output_field_names = list(_dspy_signature.output_fields.keys())
96
- if not output_field_names:
97
- logger.warning(
98
- f"DSPy signature for agent '{agent.name}' has no defined output fields. Streaming might not produce text."
99
- )
100
- # -----------------------------
101
-
102
- agent_task = self._select_task(
103
- _dspy_signature,
104
- override_evaluator_type=self.config.override_evaluator_type,
105
- tools=tools,
106
- max_tool_calls=self.config.max_tool_calls,
107
- mcp_tools=mcp_tools,
108
- kwargs=self.config.kwargs,
109
- )
110
- except Exception as setup_error:
111
- logger.error(
112
- f"Error setting up DSPy task for agent '{agent.name}': {setup_error}",
113
- exc_info=True,
114
- )
115
- raise RuntimeError(
116
- f"DSPy task setup failed: {setup_error}"
117
- ) from setup_error
118
-
119
- # --- Conditional Evaluation (Stream vs No Stream) ---
120
- if self.config.stream:
121
- logger.info(
122
- f"Evaluating agent '{agent.name}' with async streaming."
123
- )
124
- if not callable(agent_task):
125
- logger.error("agent_task is not callable, cannot stream.")
126
- raise TypeError(
127
- "DSPy task could not be created or is not callable."
128
- )
129
-
130
- streaming_task = dspy.streamify(
131
- agent_task, is_async_program=True
132
- )
133
- stream_generator: Generator = streaming_task(**inputs)
134
- delta_content = ""
135
-
136
- console.print("\n")
137
- async for chunk in stream_generator:
138
- if (
139
- hasattr(chunk, "choices")
140
- and chunk.choices
141
- and hasattr(chunk.choices[0], "delta")
142
- and chunk.choices[0].delta
143
- and hasattr(chunk.choices[0].delta, "content")
144
- ):
145
- delta_content = chunk.choices[0].delta.content
146
-
147
- if delta_content:
148
- console.print(delta_content, end="")
149
-
150
- result_dict, cost, lm_history = self._process_result(
151
- chunk, inputs
152
- )
153
- self._cost = cost
154
- self._lm_history = lm_history
155
-
156
- console.print("\n")
157
- return self.filter_thought_process(
158
- result_dict, self.config.include_thought_process
159
- )
160
-
161
- else: # Non-streaming path
162
- logger.info(
163
- f"Evaluating agent '{agent.name}' without streaming."
164
- )
165
- try:
166
- # Ensure the call is awaited if the underlying task is async
167
- result_obj = await agent_task.acall(**inputs)
168
- result_dict, cost, lm_history = self._process_result(
169
- result_obj, inputs
170
- )
171
- self._cost = cost
172
- self._lm_history = lm_history
173
- return self.filter_thought_process(
174
- result_dict, self.config.include_thought_process
175
- )
176
- except Exception as e:
177
- logger.error(
178
- f"Error during non-streaming evaluation for agent '{agent.name}': {e}",
179
- exc_info=True,
180
- )
181
- raise RuntimeError(f"Evaluation failed: {e}") from e
182
-
183
- def filter_thought_process(
184
- self, result_dict: dict[str, Any], include_thought_process: bool
185
- ) -> dict[str, Any]:
186
- """Filter out thought process from the result dictionary."""
187
- if include_thought_process:
188
- return result_dict
189
- else:
190
- return {
191
- k: v
192
- for k, v in result_dict.items()
193
- if not (k.startswith("reasoning") or k.startswith("trajectory"))
194
- }
@@ -1,90 +0,0 @@
1
- from typing import Any, Literal
2
-
3
- from pydantic import Field
4
-
5
- from flock.core.flock_agent import FlockAgent
6
- from flock.core.flock_evaluator import FlockEvaluator, FlockEvaluatorConfig
7
- from flock.core.flock_registry import flock_component
8
- from flock.core.mixin.dspy_integration import DSPyIntegrationMixin
9
- from flock.core.mixin.prompt_parser import PromptParserMixin
10
- from flock.modules.memory.memory_module import MemoryModule, MemoryModuleConfig
11
-
12
-
13
- class MemoryEvaluatorConfig(FlockEvaluatorConfig):
14
- folder_path: str = Field(
15
- default="concept_memory/",
16
- description="Directory where memory file and concept graph will be saved",
17
- )
18
- concept_graph_file: str = Field(
19
- default="concept_graph.png",
20
- description="Base filename for the concept graph image",
21
- )
22
-
23
- file_path: str | None = Field(
24
- default="agent_memory.json", description="Path to save memory file"
25
- )
26
- memory_mapping: str | None = Field(
27
- default=None, description="Memory mapping configuration"
28
- )
29
- similarity_threshold: float = Field(
30
- default=0.5, description="Threshold for semantic similarity"
31
- )
32
- max_length: int = Field(
33
- default=1000, description="Max length of memory entry before splitting"
34
- )
35
- save_after_update: bool = Field(
36
- default=True, description="Whether to save memory after each update"
37
- )
38
- splitting_mode: Literal["summary", "semantic", "characters", "none"] = (
39
- Field(default="none", description="Mode to split memory content")
40
- )
41
- enable_read_only_mode: bool = Field(
42
- default=False, description="Whether to enable read only mode"
43
- )
44
- number_of_concepts_to_extract: int = Field(
45
- default=3, description="Number of concepts to extract from the memory"
46
- )
47
-
48
-
49
- @flock_component(config_class=MemoryEvaluatorConfig)
50
- class MemoryEvaluator(FlockEvaluator, DSPyIntegrationMixin, PromptParserMixin):
51
- """Evaluator that uses DSPy for generation."""
52
-
53
- config: MemoryEvaluatorConfig = Field(
54
- default_factory=MemoryEvaluatorConfig,
55
- description="Evaluator configuration",
56
- )
57
-
58
- async def evaluate(
59
- self, agent: FlockAgent, inputs: dict[str, Any], tools: list[Any]
60
- ) -> dict[str, Any]:
61
- """Simple evaluator that uses a memory concept graph.
62
-
63
- if inputs contain "query", it searches memory for the query and returns the facts.
64
- if inputs contain "data", it adds the data to memory
65
- """
66
- result = {}
67
- memory_module = MemoryModule(
68
- name=self.name,
69
- config=MemoryModuleConfig(
70
- folder_path=self.config.folder_path,
71
- concept_graph_file=self.config.concept_graph_file,
72
- file_path=self.config.file_path,
73
- memory_mapping=self.config.memory_mapping,
74
- similarity_threshold=self.config.similarity_threshold,
75
- max_length=self.config.max_length,
76
- save_after_update=self.config.save_after_update,
77
- splitting_mode=self.config.splitting_mode,
78
- enable_read_only_mode=self.config.enable_read_only_mode,
79
- number_of_concepts_to_extract=self.config.number_of_concepts_to_extract,
80
- ),
81
- )
82
-
83
- if "query" in inputs:
84
- facts = await memory_module.search_memory(agent, inputs)
85
- result = {"facts": facts}
86
-
87
- if "data" in inputs:
88
- await memory_module.add_to_memory(agent, inputs)
89
- result = {"message": "Data added to memory"}
90
- return result
@@ -1,38 +0,0 @@
1
- from typing import Any
2
-
3
- from pydantic import Field
4
-
5
- from flock.core.flock_agent import FlockAgent
6
- from flock.core.flock_evaluator import FlockEvaluator, FlockEvaluatorConfig
7
- from flock.core.flock_registry import flock_component
8
- from flock.core.mixin.dspy_integration import DSPyIntegrationMixin
9
-
10
-
11
- class TestCaseEvaluatorConfig(FlockEvaluatorConfig):
12
- """Configuration for the TestCaseEvaluator."""
13
-
14
- pass
15
-
16
-
17
- @flock_component(config_class=TestCaseEvaluatorConfig)
18
- class TestCaseEvaluator(FlockEvaluator, DSPyIntegrationMixin):
19
- """Evaluator for test cases."""
20
-
21
- config: TestCaseEvaluatorConfig = Field(
22
- default_factory=TestCaseEvaluatorConfig,
23
- description="Evaluator configuration",
24
- )
25
-
26
- async def evaluate(
27
- self, agent: FlockAgent, inputs: dict[str, Any], tools: list[Any]
28
- ) -> dict[str, Any]:
29
- _dspy_signature = self.create_dspy_signature_class(
30
- agent.name,
31
- agent.description,
32
- f"{agent.input} -> {agent.output}",
33
- )
34
- output_field_names = list(_dspy_signature.output_fields.keys())
35
- result = {}
36
- for output_field_name in output_field_names:
37
- result[output_field_name] = "Test Result"
38
- return result
@@ -1,59 +0,0 @@
1
- from typing import Any
2
-
3
- from pydantic import Field
4
-
5
- from flock.core.flock_agent import FlockAgent
6
- from flock.core.flock_evaluator import FlockEvaluator, FlockEvaluatorConfig
7
- from flock.core.flock_registry import flock_component
8
- from flock.core.mixin.dspy_integration import DSPyIntegrationMixin
9
- from flock.core.mixin.prompt_parser import PromptParserMixin
10
- from flock.modules.zep.zep_module import ZepModule, ZepModuleConfig
11
-
12
-
13
- class ZepEvaluatorConfig(FlockEvaluatorConfig):
14
- zep_url: str = "http://localhost:8000"
15
- zep_api_key: str = "apikey"
16
- min_fact_rating: float = Field(
17
- default=0.7, description="Minimum rating for facts to be considered"
18
- )
19
-
20
-
21
- @flock_component(config_class=ZepEvaluatorConfig)
22
- class ZepEvaluator(FlockEvaluator, DSPyIntegrationMixin, PromptParserMixin):
23
- """Evaluator that uses DSPy for generation."""
24
-
25
- config: ZepEvaluatorConfig = Field(
26
- default_factory=ZepEvaluatorConfig,
27
- description="Evaluator configuration",
28
- )
29
-
30
- async def evaluate(
31
- self, agent: FlockAgent, inputs: dict[str, Any], tools: list[Any]
32
- ) -> dict[str, Any]:
33
- """Simple evaluator that uses Zep.
34
-
35
- if inputs contain "query", it searches memory for the query and returns the facts.
36
- if inputs contain "data", it adds the data to memory
37
- """
38
- result = {}
39
- zep = ZepModule(
40
- name=self.name,
41
- config=ZepModuleConfig(
42
- zep_api_key=self.config.zep_api_key,
43
- zep_url=self.config.zep_url,
44
- min_fact_rating=self.config.min_fact_rating,
45
- enable_read=True,
46
- enable_write=True,
47
- ),
48
- )
49
- client = zep.get_client()
50
- if "query" in inputs:
51
- query = inputs["query"]
52
- facts = zep.search_memory(query, client)
53
- result = {"facts": facts}
54
-
55
- if "data" in inputs:
56
- data = inputs["data"]
57
- zep.add_to_memory(data, client)
58
- result = {"message": "Data added to memory"}
59
- return result
flock/modules/__init__.py DELETED
@@ -1 +0,0 @@
1
- # Package for modules
@@ -1 +0,0 @@
1
- # Package for modules