flock-core 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (48) hide show
  1. flock/__init__.py +4 -0
  2. flock/agents/__init__.py +3 -0
  3. flock/agents/batch_agent.py +175 -0
  4. flock/agents/declarative_agent.py +166 -0
  5. flock/agents/loop_agent.py +178 -0
  6. flock/agents/trigger_agent.py +191 -0
  7. flock/agents/user_agent.py +230 -0
  8. flock/app/components/__init__.py +14 -0
  9. flock/app/components/charts/agent_workflow.py +14 -0
  10. flock/app/components/charts/core_architecture.py +14 -0
  11. flock/app/components/charts/tool_system.py +14 -0
  12. flock/app/components/history_grid.py +168 -0
  13. flock/app/components/history_grid_alt.py +189 -0
  14. flock/app/components/sidebar.py +19 -0
  15. flock/app/components/theme.py +9 -0
  16. flock/app/components/util.py +18 -0
  17. flock/app/hive_app.py +118 -0
  18. flock/app/html/d3.html +179 -0
  19. flock/app/modules/__init__.py +12 -0
  20. flock/app/modules/about.py +17 -0
  21. flock/app/modules/agent_detail.py +70 -0
  22. flock/app/modules/agent_list.py +59 -0
  23. flock/app/modules/playground.py +322 -0
  24. flock/app/modules/settings.py +96 -0
  25. flock/core/__init__.py +7 -0
  26. flock/core/agent.py +150 -0
  27. flock/core/agent_registry.py +162 -0
  28. flock/core/config/declarative_agent_config.py +0 -0
  29. flock/core/context.py +279 -0
  30. flock/core/context_vars.py +6 -0
  31. flock/core/flock.py +208 -0
  32. flock/core/handoff/handoff_base.py +12 -0
  33. flock/core/logging/__init__.py +18 -0
  34. flock/core/logging/error_handler.py +84 -0
  35. flock/core/logging/formatters.py +122 -0
  36. flock/core/logging/handlers.py +117 -0
  37. flock/core/logging/logger.py +107 -0
  38. flock/core/serializable.py +206 -0
  39. flock/core/tools/basic_tools.py +98 -0
  40. flock/workflow/activities.py +115 -0
  41. flock/workflow/agent_activities.py +26 -0
  42. flock/workflow/temporal_setup.py +37 -0
  43. flock/workflow/workflow.py +53 -0
  44. flock_core-0.1.1.dist-info/METADATA +449 -0
  45. flock_core-0.1.1.dist-info/RECORD +48 -0
  46. flock_core-0.1.1.dist-info/WHEEL +4 -0
  47. flock_core-0.1.1.dist-info/entry_points.txt +2 -0
  48. flock_core-0.1.1.dist-info/licenses/LICENSE +21 -0
flock/__init__.py ADDED
@@ -0,0 +1,4 @@
1
+ # Initialize error handling through our custom handler
2
+ from flock.core.logging import error_handler
3
+
4
+ error_handler.install()
@@ -0,0 +1,3 @@
1
+ from flock.agents.declarative_agent import DeclarativeAgent
2
+
3
+ __all__ = ["DeclarativeAgent"]
@@ -0,0 +1,175 @@
1
+ import asyncio
2
+ import uuid
3
+ from typing import Any
4
+
5
+ from pydantic import Field
6
+
7
+ from flock.core.context import FlockContext
8
+ from flock.core.logging import flock_logger, live_update_handler, performance_handler
9
+
10
+ from .declarative_agent import DeclarativeAgent
11
+
12
+
13
+ class BatchAgent(DeclarativeAgent):
14
+ """A DeclarativeAgent that processes an iterable input in batches.
15
+
16
+ Additional Attributes:
17
+ iter_input: The key in the FlockContext that holds the iterable (a list).
18
+ batch_site: The number of items per batch.
19
+
20
+ For each batch, the agent’s input dictionary is built from the FlockContext with the
21
+ value for the iter_input key overridden by the current batch. The outputs across batches
22
+ are then aggregated.
23
+ """
24
+
25
+ iter_input: str = Field(default="", description="Key of the iterable input (must be a list in the FlockContext)")
26
+ batch_size: int = Field(default=1, description="Batch size (number of items per batch)")
27
+
28
+ async def run(self, context: FlockContext) -> dict:
29
+ """Run the BatchAgent locally by partitioning the iterable and aggregating the results."""
30
+ try:
31
+ with performance_handler.track_time("batch_preparation"):
32
+ flock_logger.info(f"Starting batch processing for agent: {self.name}")
33
+ iterable = context.get_variable(self.iter_input)
34
+ if not isinstance(iterable, list):
35
+ error_msg = f"Expected a list for key '{self.iter_input}' in context."
36
+ flock_logger.error(error_msg)
37
+ return {"error": error_msg}
38
+
39
+ # Partition the iterable into batches
40
+ batches: list[list[Any]] = [
41
+ iterable[i : i + self.batch_size] for i in range(0, len(iterable), self.batch_size)
42
+ ]
43
+ num_batches = len(batches)
44
+ flock_logger.info(
45
+ "Prepared batches",
46
+ total_items=len(iterable),
47
+ batch_size=self.batch_size,
48
+ num_batches=num_batches,
49
+ )
50
+
51
+ # Process batches with progress tracking
52
+ with live_update_handler.progress_tracker(f"Processing {num_batches} batches") as update_progress:
53
+ tasks = []
54
+ for i, batch in enumerate(batches):
55
+ flock_logger.debug(f"Creating task for batch {i + 1}/{num_batches}", batch_size=len(batch))
56
+ tasks.append(self._evaluate(context, input_overrides={self.iter_input: batch}))
57
+ update_progress((i + 1) * 100 / num_batches)
58
+
59
+ with performance_handler.track_time("batch_processing"):
60
+ batch_results = await asyncio.gather(*tasks)
61
+ flock_logger.success(f"Completed processing {num_batches} batches")
62
+
63
+ # Aggregate the outputs
64
+ with performance_handler.track_time("result_aggregation"):
65
+ flock_logger.info("Aggregating batch results")
66
+ output_keys = self._parse_keys(self.output)
67
+ aggregated = {key: [] for key in output_keys}
68
+ for i, res in enumerate(batch_results):
69
+ flock_logger.debug(f"Aggregating results from batch {i + 1}/{num_batches}")
70
+ for key in output_keys:
71
+ aggregated[key].append(res.get(key))
72
+ aggregated["batch_results"] = batch_results
73
+ flock_logger.success("Successfully aggregated all batch results")
74
+ return aggregated
75
+
76
+ except Exception as e:
77
+ flock_logger.error(
78
+ "Batch processing failed",
79
+ error=str(e),
80
+ agent=self.name,
81
+ iter_input=self.iter_input,
82
+ )
83
+ raise
84
+
85
+ async def run_temporal(self, context: FlockContext) -> dict:
86
+ """Run the BatchAgent via Temporal.
87
+
88
+ For each batch, the agent's evaluation is performed as a separate Temporal activity.
89
+ The results are then aggregated.
90
+ """
91
+ try:
92
+ with performance_handler.track_time("temporal_setup"):
93
+ flock_logger.info(f"Starting temporal batch processing for agent: {self.name}")
94
+
95
+ from temporalio.client import Client
96
+
97
+ from flock.workflow.agent_activities import run_declarative_agent_activity
98
+ from flock.workflow.temporal_setup import run_activity
99
+
100
+ # Connect to Temporal
101
+ flock_logger.info("Connecting to Temporal service...")
102
+ client = await Client.connect("localhost:7233", namespace="default")
103
+
104
+ # Validate and prepare input
105
+ iterable = context.get_variable(self.iter_input)
106
+ if not isinstance(iterable, list):
107
+ error_msg = f"Expected a list for key '{self.iter_input}' in context."
108
+ flock_logger.error(error_msg)
109
+ return {"error": error_msg}
110
+
111
+ # Partition into batches
112
+ batches: list[list[Any]] = [
113
+ iterable[i : i + self.batch_size] for i in range(0, len(iterable), self.batch_size)
114
+ ]
115
+ num_batches = len(batches)
116
+ flock_logger.info(
117
+ "Prepared batches for temporal processing",
118
+ total_items=len(iterable),
119
+ batch_size=self.batch_size,
120
+ num_batches=num_batches,
121
+ )
122
+
123
+ # Process batches with status updates
124
+ with live_update_handler.update_workflow_status(
125
+ self.name, "Running", {"phase": "batch_processing", "total_batches": num_batches}
126
+ ):
127
+ tasks = []
128
+ for i, batch in enumerate(batches):
129
+ flock_logger.debug(f"Creating temporal task for batch {i + 1}/{num_batches}", batch_size=len(batch))
130
+ # Prepare context for this batch
131
+ new_state = context.state.copy()
132
+ new_state[self.iter_input] = batch
133
+ context_data = {
134
+ "state": new_state,
135
+ "history": [], # you might choose to pass along history if needed
136
+ "agent_definitions": [],
137
+ }
138
+ agent_data = self.dict()
139
+ task_id = f"{self.name}_{uuid.uuid4().hex[:4]}"
140
+
141
+ # Create temporal activity task
142
+ tasks.append(
143
+ run_activity(
144
+ client,
145
+ task_id,
146
+ run_declarative_agent_activity,
147
+ {"agent_data": agent_data, "context_data": context_data},
148
+ )
149
+ )
150
+
151
+ with performance_handler.track_time("temporal_batch_processing"):
152
+ batch_results = await asyncio.gather(*tasks)
153
+ flock_logger.success(f"Completed temporal processing of {num_batches} batches")
154
+
155
+ # Aggregate the outputs
156
+ with performance_handler.track_time("temporal_result_aggregation"):
157
+ flock_logger.info("Aggregating temporal batch results")
158
+ output_keys = self._parse_keys(self.output)
159
+ aggregated = {key: [] for key in output_keys}
160
+ for i, res in enumerate(batch_results):
161
+ flock_logger.debug(f"Aggregating results from temporal batch {i + 1}/{num_batches}")
162
+ for key in output_keys:
163
+ aggregated[key].append(res.get(key))
164
+ aggregated["batch_results"] = batch_results
165
+ flock_logger.success("Successfully aggregated all temporal batch results")
166
+ return aggregated
167
+
168
+ except Exception as e:
169
+ flock_logger.error(
170
+ "Temporal batch processing failed",
171
+ error=str(e),
172
+ agent=self.name,
173
+ iter_input=self.iter_input,
174
+ )
175
+ raise
@@ -0,0 +1,166 @@
1
+ from collections.abc import Callable
2
+ from typing import Any
3
+
4
+ import dspy # your dspy package for LM, Predict, ReAct, etc.
5
+ from pydantic import Field
6
+
7
+ from flock.core.agent import Agent
8
+ from flock.core.context import FlockContext
9
+ from flock.core.logging import flock_logger, performance_handler
10
+
11
+
12
+ class DeclarativeAgent(Agent):
13
+ """An agent that evaluates declarative inputs.
14
+
15
+ Attributes:
16
+ input: A comma‐separated list of input keys.
17
+ If a key is not found in the incoming FlockContext and only one input is expected,
18
+ then the value of "init_input" is used.
19
+ output: A comma‐separated list of output keys (any type annotations after ":" are ignored).
20
+ tools: An optional list of callables (tools) that the agent is allowed to use.
21
+ use_cache: Whether to cache agent results.
22
+ """
23
+
24
+ input: str = Field(description="Comma-separated input keys (e.g., 'blog_idea' or 'url, context')")
25
+ output: str = Field(description="Comma-separated output keys (e.g., 'title, headers')")
26
+ tools: list[Callable[..., Any]] | None = Field(default=None, description="Tools the agent is allowed to use")
27
+ use_cache: bool = Field(default=False, description="Whether to use the cache for this agent")
28
+
29
+ def _parse_keys(self, keys_str: str) -> list[str]:
30
+ """Split a comma‐separated string and strip any type annotations.
31
+ For example, "a, b: list[str]" becomes ["a", "b"].
32
+ """
33
+ keys = []
34
+ for part in keys_str.split(","):
35
+ part = part.strip()
36
+ if not part:
37
+ continue
38
+ # Remove any type annotation (everything after a colon)
39
+ if ":" in part:
40
+ key = part.split(":", 1)[0].strip()
41
+ else:
42
+ key = part
43
+ keys.append(key)
44
+ return keys
45
+
46
+ def _build_input(self, context: FlockContext) -> dict:
47
+ """Build the dictionary of inputs for the agent based on its input specification.
48
+
49
+ For each key in the agent's input string:
50
+ - If the key is "context" (case-insensitive), pass the entire FlockContext.
51
+ - Otherwise, use context.get_variable() to fetch the value.
52
+ - If the key is not found and only one key is expected, default to context["init_input"].
53
+ """
54
+ input_keys = self._parse_keys(self.input)
55
+ inputs = {}
56
+ for key in input_keys:
57
+ if key.lower() == "context":
58
+ inputs[key] = context
59
+ else:
60
+ value = context.get_variable(key)
61
+ if value is None and len(input_keys) == 1:
62
+ value = context.get_variable("init_input")
63
+ inputs[key] = value
64
+ return inputs
65
+
66
+ def _configure_task(self):
67
+ """Configure the dspy language model and choose a task constructor.
68
+ If tools are provided, ReAct is used; otherwise, Predict.
69
+ """
70
+ with performance_handler.track_time("model_configuration"):
71
+ flock_logger.debug(f"Configuring {self.model} for {'ReAct' if self.tools else 'Predict'} task")
72
+ lm = dspy.LM(self.model)
73
+ dspy.configure(lm=lm)
74
+ return dspy.ReAct if self.tools else dspy.Predict
75
+
76
+ async def _evaluate(self, context: FlockContext, input_overrides: dict | None = None) -> dict:
77
+ """Evaluate the agent by:
78
+ 1. Building an input dictionary from the FlockContext (merging any overrides),
79
+ 2. Building a prompt like "input1, input2 -> output1, output2",
80
+ 3. Instantiating and executing the dspy task (Predict or ReAct),
81
+ 4. Returning the resulting dictionary.
82
+ """
83
+ try:
84
+ # Build inputs
85
+ with performance_handler.track_time("input_preparation"):
86
+ inputs = self._build_input(context)
87
+ if input_overrides:
88
+ inputs.update(input_overrides)
89
+ input_keys = self._parse_keys(self.input)
90
+ output_keys = self._parse_keys(self.output)
91
+ prompt = f"{', '.join(input_keys)} -> {', '.join(output_keys)}"
92
+ flock_logger.debug("Prepared inputs", inputs=inputs, prompt=prompt)
93
+
94
+ # Configure and execute task
95
+ with performance_handler.track_time("task_execution"):
96
+ task_constructor = self._configure_task()
97
+ if self.tools:
98
+ flock_logger.info("Creating ReAct task with tools", num_tools=len(self.tools))
99
+ agent_task = task_constructor(prompt, tools=self.tools)
100
+ else:
101
+ flock_logger.info("Creating Predict task")
102
+ agent_task = task_constructor(prompt)
103
+
104
+ flock_logger.info("Executing task...")
105
+ result = agent_task(**inputs).toDict()
106
+
107
+ # Process result
108
+ for key in input_keys:
109
+ result.setdefault(key, inputs.get(key))
110
+ flock_logger.success("Task completed successfully", output_keys=list(result.keys()))
111
+ return result
112
+
113
+ except Exception as e:
114
+ flock_logger.error(
115
+ "Task execution failed",
116
+ error=str(e),
117
+ agent=self.name,
118
+ inputs=inputs,
119
+ prompt=prompt,
120
+ )
121
+ raise
122
+
123
+ async def run(self, context: FlockContext) -> dict:
124
+ """Run the agent on the provided FlockContext (locally)."""
125
+ flock_logger.info(f"Running agent locally: {self.name}")
126
+ return await self._evaluate(context)
127
+
128
+ async def run_temporal(self, context: FlockContext) -> dict:
129
+ """Run the agent via Temporal by serializing its parameters and the FlockContext and
130
+ calling a dedicated Temporal activity.
131
+ """
132
+ from temporalio.client import Client
133
+
134
+ from flock.workflow.agent_activities import run_declarative_agent_activity
135
+ from flock.workflow.temporal_setup import run_activity
136
+
137
+ try:
138
+ with performance_handler.track_time("temporal_setup"):
139
+ # Connect to Temporal (adjust the host/namespace as needed)
140
+ flock_logger.info("Connecting to Temporal...")
141
+ client = await Client.connect("localhost:7233", namespace="default")
142
+
143
+ # Serialize the FlockContext and agent
144
+ flock_logger.debug("Serializing context and agent data")
145
+ context_data = {
146
+ "state": context.state,
147
+ "history": [record.__dict__ for record in context.history],
148
+ "agent_definitions": [definition.__dict__ for definition in context.agent_definitions],
149
+ }
150
+ agent_data = self.dict()
151
+
152
+ # Run the activity
153
+ with performance_handler.track_time("temporal_activity"):
154
+ flock_logger.info(f"Starting Temporal activity: {self.name}")
155
+ result = await run_activity(
156
+ client,
157
+ self.name,
158
+ run_declarative_agent_activity,
159
+ {"agent_data": agent_data, "context_data": context_data},
160
+ )
161
+ flock_logger.success("Temporal activity completed successfully")
162
+ return result
163
+
164
+ except Exception as e:
165
+ flock_logger.error(f"Temporal execution failed: {e}", agent=self.name)
166
+ raise
@@ -0,0 +1,178 @@
1
+ from collections.abc import Callable
2
+ from typing import Any
3
+
4
+ from pydantic import Field
5
+
6
+ from flock.core.agent import Agent
7
+ from flock.core.context import FlockContext
8
+ from flock.core.logging import flock_logger, live_update_handler, performance_handler
9
+
10
+
11
+ class LoopAgent(Agent):
12
+ """An agent that executes its logic in a loop until a termination condition is met.
13
+
14
+ Attributes:
15
+ input: Input domain for the agent
16
+ output: Output types for the agent
17
+ tools: Tools the agent is allowed to use
18
+ max_iterations: Maximum number of iterations before forced termination
19
+ termination_condition: Optional callable that determines when to stop the loop
20
+ """
21
+
22
+ input: str = Field(default="", description="Input domain for the agent")
23
+ output: str = Field(default="", description="Output types for the agent")
24
+ tools: list[Callable] | None = Field(default=None, description="Tools the agent is allowed to use")
25
+ max_iterations: int = Field(default=10, description="Maximum number of iterations")
26
+ termination_condition: Callable[[dict[str, Any]], bool] | None = Field(
27
+ default=None, description="Optional function to determine loop termination"
28
+ )
29
+
30
+ async def _process_iteration(self, context: FlockContext, iteration: int) -> dict[str, Any]:
31
+ """Process a single iteration of the loop."""
32
+ try:
33
+ with performance_handler.track_time(f"iteration_{iteration}"):
34
+ flock_logger.debug(f"Processing iteration {iteration}", agent=self.name)
35
+ # Here you would implement the actual iteration logic
36
+ # For now, we'll just return a simple result
37
+ return {"iteration": iteration, "status": "completed"}
38
+ except Exception as e:
39
+ flock_logger.error(
40
+ f"Error in iteration {iteration}",
41
+ error=str(e),
42
+ agent=self.name,
43
+ )
44
+ raise
45
+
46
+ def _should_continue(self, result: dict[str, Any], iteration: int) -> bool:
47
+ """Determine if the loop should continue."""
48
+ if iteration >= self.max_iterations:
49
+ flock_logger.warning(
50
+ "Maximum iterations reached",
51
+ max_iterations=self.max_iterations,
52
+ agent=self.name,
53
+ )
54
+ return False
55
+
56
+ if self.termination_condition:
57
+ should_terminate = self.termination_condition(result)
58
+ if should_terminate:
59
+ flock_logger.info(
60
+ "Termination condition met",
61
+ iteration=iteration,
62
+ agent=self.name,
63
+ )
64
+ return not should_terminate
65
+
66
+ return True
67
+
68
+ async def run(self, context: FlockContext) -> dict[str, Any]:
69
+ """Run the agent in a loop until the termination condition is met or max iterations reached."""
70
+ try:
71
+ flock_logger.info(f"Starting loop agent: {self.name}")
72
+ results = []
73
+ iteration = 0
74
+
75
+ with live_update_handler.progress_tracker("Loop Progress") as update_progress:
76
+ while True:
77
+ # Update progress based on iteration count
78
+ progress = min((iteration + 1) * 100 / self.max_iterations, 100)
79
+ update_progress(progress)
80
+
81
+ # Process iteration with status tracking
82
+ with live_update_handler.update_activity_status(
83
+ f"{self.name}_iteration_{iteration}",
84
+ f"Iteration {iteration + 1}",
85
+ "Running",
86
+ {"max_iterations": self.max_iterations},
87
+ ):
88
+ result = await self._process_iteration(context, iteration)
89
+ results.append(result)
90
+
91
+ # Check termination conditions
92
+ if not self._should_continue(result, iteration):
93
+ break
94
+
95
+ iteration += 1
96
+
97
+ flock_logger.success(
98
+ f"Loop completed successfully",
99
+ total_iterations=iteration + 1,
100
+ agent=self.name,
101
+ )
102
+ return {
103
+ "iterations": iteration + 1,
104
+ "results": results,
105
+ "final_result": results[-1] if results else None,
106
+ }
107
+
108
+ except Exception as e:
109
+ flock_logger.error(
110
+ "Loop execution failed",
111
+ error=str(e),
112
+ agent=self.name,
113
+ iteration=iteration,
114
+ )
115
+ raise
116
+
117
+ async def run_temporal(self, context: FlockContext) -> dict[str, Any]:
118
+ """Run the loop agent via Temporal."""
119
+ try:
120
+ from temporalio.client import Client
121
+
122
+ from flock.workflow.agent_activities import run_agent_activity
123
+ from flock.workflow.temporal_setup import run_activity
124
+
125
+ with performance_handler.track_time("temporal_setup"):
126
+ flock_logger.info(f"Starting temporal loop agent: {self.name}")
127
+ client = await Client.connect("localhost:7233", namespace="default")
128
+
129
+ results = []
130
+ iteration = 0
131
+
132
+ with live_update_handler.update_workflow_status(
133
+ self.name, "Running", {"phase": "loop_execution", "max_iterations": self.max_iterations}
134
+ ):
135
+ while True:
136
+ # Process iteration as a temporal activity
137
+ with performance_handler.track_time(f"temporal_iteration_{iteration}"):
138
+ context_data = {
139
+ "state": context.state,
140
+ "history": [record.__dict__ for record in context.history],
141
+ "agent_definitions": [definition.__dict__ for definition in context.agent_definitions],
142
+ }
143
+ agent_data = self.dict()
144
+
145
+ flock_logger.info(f"Starting temporal iteration {iteration + 1}")
146
+ result = await run_activity(
147
+ client,
148
+ f"{self.name}_iteration_{iteration}",
149
+ run_agent_activity,
150
+ {"agent_data": agent_data, "context_data": context_data},
151
+ )
152
+ results.append(result)
153
+
154
+ # Check termination conditions
155
+ if not self._should_continue(result, iteration):
156
+ break
157
+
158
+ iteration += 1
159
+
160
+ flock_logger.success(
161
+ "Temporal loop completed successfully",
162
+ total_iterations=iteration + 1,
163
+ agent=self.name,
164
+ )
165
+ return {
166
+ "iterations": iteration + 1,
167
+ "results": results,
168
+ "final_result": results[-1] if results else None,
169
+ }
170
+
171
+ except Exception as e:
172
+ flock_logger.error(
173
+ "Temporal loop execution failed",
174
+ error=str(e),
175
+ agent=self.name,
176
+ iteration=iteration,
177
+ )
178
+ raise