flock-core 0.4.0b25__py3-none-any.whl → 0.4.0b27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

@@ -0,0 +1,228 @@
1
+ """Defines granular Temporal activities for executing a single agent
2
+ and determining the next agent in a Flock workflow.
3
+ """
4
+
5
+ from collections.abc import Callable
6
+
7
+ from opentelemetry import trace
8
+ from temporalio import activity
9
+
10
+ # Third-party imports only within activity functions if needed, or pass context
11
+ # For core flock types, import directly
12
+ from flock.core.context.context import FlockContext
13
+ from flock.core.context.context_vars import FLOCK_MODEL
14
+ from flock.core.flock_agent import FlockAgent # Import concrete class if needed
15
+ from flock.core.flock_registry import get_registry
16
+ from flock.core.flock_router import HandOffRequest
17
+ from flock.core.logging.logging import get_logger
18
+ from flock.core.util.input_resolver import resolve_inputs
19
+
20
+ logger = get_logger("agent_activity") # Using a distinct logger category
21
+ tracer = trace.get_tracer(__name__)
22
+ registry = get_registry() # Get registry instance once
23
+
24
+
25
+ @activity.defn
26
+ async def execute_single_agent(agent_name: str, context: FlockContext) -> dict:
27
+ """Executes a single specified agent and returns its result.
28
+
29
+ Args:
30
+ agent_name: The name of the agent to execute.
31
+ context: The current FlockContext (passed from the workflow).
32
+
33
+ Returns:
34
+ The raw result dictionary from the agent's execution.
35
+
36
+ Raises:
37
+ ValueError: If the agent is not found in the registry.
38
+ Exception: Propagates exceptions from agent execution for Temporal retries.
39
+ """
40
+ with tracer.start_as_current_span("execute_single_agent") as span:
41
+ span.set_attribute("agent.name", agent_name)
42
+ logger.info("Executing single agent", agent=agent_name)
43
+
44
+ agent = registry.get_agent(agent_name)
45
+ if not agent:
46
+ logger.error("Agent not found in registry", agent=agent_name)
47
+ # Raise error for Temporal to potentially retry/fail the activity
48
+ raise ValueError(f"Agent '{agent_name}' not found in registry.")
49
+
50
+ # Set agent's context reference (transient, for this execution)
51
+ agent.context = context
52
+
53
+ # Ensure model is set (using context value if needed)
54
+ # Consider if this should be done once when agent is added or workflow starts
55
+ if agent.model is None:
56
+ agent_model = context.get_variable(FLOCK_MODEL)
57
+ if agent_model:
58
+ agent.set_model(agent_model)
59
+ logger.debug(
60
+ f"Set model for agent '{agent_name}' from context: {agent_model}"
61
+ )
62
+
63
+ # Resolve agent-specific callables if necessary
64
+ # This might be better handled in the workflow before the loop starts
65
+ # or when agents are initially loaded. Assuming it's handled elsewhere for now.
66
+ # agent.resolve_callables(context=context)
67
+
68
+ # Resolve inputs for this specific agent run
69
+ previous_agent_name = (
70
+ context.get_last_agent_name()
71
+ ) # Relies on context method
72
+ logger.debug(
73
+ f"Resolving inputs for {agent_name} with previous agent {previous_agent_name}"
74
+ )
75
+ agent_inputs = resolve_inputs(agent.input, context, previous_agent_name)
76
+ span.add_event(
77
+ "resolved inputs", attributes={"inputs": str(agent_inputs)}
78
+ )
79
+
80
+ try:
81
+ # Execute just this agent
82
+ result = await agent.run_async(agent_inputs)
83
+ # Avoid logging potentially large results directly to span attributes
84
+ result_str = str(result)
85
+ span.set_attribute("result.type", type(result).__name__)
86
+ span.set_attribute(
87
+ "result.preview",
88
+ result_str[:500] + ("..." if len(result_str) > 500 else ""),
89
+ )
90
+ logger.info("Single agent execution completed", agent=agent_name)
91
+ return result
92
+ except Exception as e:
93
+ logger.error(
94
+ "Single agent execution failed",
95
+ agent=agent_name,
96
+ error=str(e),
97
+ exc_info=True,
98
+ )
99
+ span.record_exception(e)
100
+ # Re-raise the exception for Temporal to handle based on retry policy
101
+ raise
102
+
103
+
104
+ @activity.defn
105
+ async def determine_next_agent(
106
+ current_agent_name: str, result: dict, context: FlockContext
107
+ ) -> dict | None:
108
+ """Determines the next agent using the current agent's handoff router.
109
+
110
+ Args:
111
+ current_agent_name: The name of the agent that just ran.
112
+ result: The result produced by the current agent.
113
+ context: The current FlockContext.
114
+
115
+ Returns:
116
+ A dictionary representing the HandOffRequest (serialized via model_dump),
117
+ or None if no handoff occurs or router doesn't specify a next agent.
118
+
119
+ Raises:
120
+ ValueError: If the current agent cannot be found.
121
+ Exception: Propagates exceptions from router execution for Temporal retries.
122
+ """
123
+ with tracer.start_as_current_span("determine_next_agent") as span:
124
+ span.set_attribute("agent.name", current_agent_name)
125
+ logger.info("Determining next agent after", agent=current_agent_name)
126
+
127
+ agent = registry.get_agent(current_agent_name)
128
+ if not agent:
129
+ logger.error(
130
+ "Agent not found for routing", agent=current_agent_name
131
+ )
132
+ raise ValueError(
133
+ f"Agent '{current_agent_name}' not found for routing."
134
+ )
135
+
136
+ if not agent.handoff_router:
137
+ logger.info(
138
+ "No handoff router defined for agent", agent=current_agent_name
139
+ )
140
+ span.add_event("no_router")
141
+ return None # Indicate no handoff
142
+
143
+ logger.debug(
144
+ f"Using router {agent.handoff_router.__class__.__name__}",
145
+ agent=agent.name,
146
+ )
147
+ try:
148
+ # Execute the routing logic
149
+ handoff_data: (
150
+ HandOffRequest | Callable
151
+ ) = await agent.handoff_router.route(agent, result, context)
152
+
153
+ # Handle callable handoff functions - This is complex in distributed systems.
154
+ # Consider if this pattern should be supported or if routing should always
155
+ # return serializable data directly. Executing arbitrary code from context
156
+ # within an activity can have side effects and security implications.
157
+ # Assuming for now it MUST return HandOffRequest or structure convertible to it.
158
+ if callable(handoff_data):
159
+ logger.warning(
160
+ "Callable handoff detected - executing function.",
161
+ agent=agent.name,
162
+ )
163
+ # Ensure context is available if the callable needs it
164
+ try:
165
+ handoff_data = handoff_data(
166
+ context, result
167
+ ) # Potential side effects
168
+ if not isinstance(handoff_data, HandOffRequest):
169
+ logger.error(
170
+ "Handoff function did not return a HandOffRequest object.",
171
+ agent=agent.name,
172
+ )
173
+ raise TypeError(
174
+ "Handoff function must return a HandOffRequest object."
175
+ )
176
+ except Exception as e:
177
+ logger.error(
178
+ "Handoff function execution failed",
179
+ agent=agent.name,
180
+ error=str(e),
181
+ exc_info=True,
182
+ )
183
+ span.record_exception(e)
184
+ raise # Propagate error
185
+
186
+ # Ensure we have a HandOffRequest object after potentially calling function
187
+ if not isinstance(handoff_data, HandOffRequest):
188
+ logger.error(
189
+ "Router returned unexpected type",
190
+ type=type(handoff_data).__name__,
191
+ agent=agent.name,
192
+ )
193
+ raise TypeError(
194
+ f"Router for agent '{agent.name}' did not return a HandOffRequest object."
195
+ )
196
+
197
+ # Ensure agent instance is converted to name for serialization across boundaries
198
+ if isinstance(handoff_data.next_agent, FlockAgent):
199
+ handoff_data.next_agent = handoff_data.next_agent.name
200
+
201
+ # If router logic determines no further agent, return None
202
+ if not handoff_data.next_agent:
203
+ logger.info("Router determined no next agent", agent=agent.name)
204
+ span.add_event("no_next_agent_from_router")
205
+ return None
206
+
207
+ logger.info(
208
+ "Handoff determined",
209
+ next_agent=handoff_data.next_agent,
210
+ agent=agent.name,
211
+ )
212
+ span.set_attribute("next_agent", handoff_data.next_agent)
213
+ # Return the serializable HandOffRequest data using Pydantic's export method
214
+ return handoff_data.model_dump(
215
+ mode="json"
216
+ ) # Ensure JSON-serializable
217
+
218
+ except Exception as e:
219
+ # Catch potential errors during routing execution
220
+ logger.error(
221
+ "Router execution failed",
222
+ agent=agent.name,
223
+ error=str(e),
224
+ exc_info=True,
225
+ )
226
+ span.record_exception(e)
227
+ # Let Temporal handle the activity failure based on retry policy
228
+ raise
@@ -0,0 +1,225 @@
1
+ from datetime import timedelta
2
+ from typing import Any
3
+
4
+ from temporalio import workflow
5
+
6
+ # Import activities from the new file
7
+ with workflow.unsafe.imports_passed_through():
8
+ from flock.core.context.context import AgentDefinition, FlockContext
9
+ from flock.core.context.context_vars import FLOCK_CURRENT_AGENT
10
+ from flock.core.flock_router import HandOffRequest
11
+ from flock.core.logging.logging import get_logger
12
+ from flock.workflow.agent_execution_activity import (
13
+ determine_next_agent,
14
+ execute_single_agent,
15
+ )
16
+ from flock.workflow.temporal_config import (
17
+ TemporalActivityConfig,
18
+ TemporalRetryPolicyConfig,
19
+ )
20
+
21
+
22
+ logger = get_logger("workflow")
23
+
24
+
25
+ @workflow.defn
26
+ class FlockWorkflow:
27
+ # No need for __init__ storing context anymore if passed to run
28
+
29
+ @workflow.run
30
+ async def run(self, workflow_args: dict[str, Any]) -> dict:
31
+ # --- Workflow Initialization ---
32
+ # Arguments are packed into a single dictionary
33
+ context_dict = workflow_args["context_dict"]
34
+ default_retry_config_dict = workflow_args["default_retry_config_dict"]
35
+
36
+ # Deserialize context and default retry config
37
+ context = FlockContext.from_dict(context_dict)
38
+ default_retry_config = TemporalRetryPolicyConfig.model_validate(
39
+ default_retry_config_dict
40
+ )
41
+
42
+ context.workflow_id = workflow.info().workflow_id
43
+ context.workflow_timestamp = workflow.info().start_time.strftime(
44
+ "%Y-%m-%d %H:%M:%S"
45
+ )
46
+
47
+ current_agent_name = context.get_variable(FLOCK_CURRENT_AGENT)
48
+ final_result = None
49
+ previous_agent_name = (
50
+ None # Keep track of the agent that called the current one
51
+ )
52
+
53
+ logger.info(
54
+ "Starting workflow execution",
55
+ workflow_id=context.workflow_id,
56
+ start_time=context.workflow_timestamp,
57
+ initial_agent=current_agent_name,
58
+ )
59
+
60
+ try:
61
+ while current_agent_name:
62
+ logger.info(
63
+ "Executing agent activity", agent=current_agent_name
64
+ )
65
+
66
+ # --- Determine Activity Settings ---
67
+ agent_def: AgentDefinition | None = (
68
+ context.get_agent_definition(current_agent_name)
69
+ )
70
+ agent_activity_config: TemporalActivityConfig | None = None
71
+ final_retry_config = (
72
+ default_retry_config # Start with the workflow default
73
+ )
74
+
75
+ if agent_def and agent_def.agent_data.get(
76
+ "temporal_activity_config"
77
+ ):
78
+ try:
79
+ agent_activity_config = (
80
+ TemporalActivityConfig.model_validate(
81
+ agent_def.agent_data["temporal_activity_config"]
82
+ )
83
+ )
84
+ logger.debug(
85
+ f"Loaded agent-specific temporal config for {current_agent_name}"
86
+ )
87
+ except Exception as e:
88
+ logger.warn(
89
+ f"Failed to validate agent temporal config for {current_agent_name}: {e}. Using defaults."
90
+ )
91
+
92
+ # Layering logic: Agent config overrides workflow default config
93
+ activity_task_queue = (
94
+ workflow.info().task_queue
95
+ ) # Default to workflow task queue
96
+ activity_timeout = timedelta(
97
+ minutes=5
98
+ ) # Fallback default timeout
99
+
100
+ if agent_activity_config:
101
+ activity_task_queue = (
102
+ agent_activity_config.task_queue or activity_task_queue
103
+ )
104
+ activity_timeout = (
105
+ agent_activity_config.start_to_close_timeout
106
+ or activity_timeout
107
+ )
108
+ if agent_activity_config.retry_policy:
109
+ final_retry_config = agent_activity_config.retry_policy
110
+
111
+ # Convert config to actual Temporal object
112
+ final_retry_policy = final_retry_config.to_temporalio_policy()
113
+
114
+ logger.debug(
115
+ f"Final activity settings for {current_agent_name}: "
116
+ f"queue='{activity_task_queue}', timeout={activity_timeout}, "
117
+ f"retries={final_retry_policy.maximum_attempts}"
118
+ )
119
+
120
+ # --- Execute the current agent activity ---
121
+ agent_result = await workflow.execute_activity(
122
+ execute_single_agent,
123
+ args=[current_agent_name, context],
124
+ task_queue=activity_task_queue, # Use determined task queue
125
+ start_to_close_timeout=activity_timeout, # Use determined timeout
126
+ retry_policy=final_retry_policy, # Use determined retry policy
127
+ )
128
+
129
+ # Record the execution in the context history
130
+ # Note: The 'called_from' is the agent *before* this one
131
+ context.record(
132
+ agent_name=current_agent_name,
133
+ data=agent_result,
134
+ timestamp=workflow.now().isoformat(), # Use deterministic workflow time
135
+ hand_off=None, # Will be updated if handoff occurs
136
+ called_from=previous_agent_name, # Pass the correct previous agent
137
+ )
138
+
139
+ final_result = agent_result # Store the result of the last successful agent
140
+
141
+ logger.info(
142
+ "Determining next agent activity",
143
+ current_agent=current_agent_name,
144
+ )
145
+ # --- Determine the next agent activity (using workflow defaults for now) ---
146
+ # We could apply similar config logic to determine_next_agent if needed
147
+ handoff_data_dict = await workflow.execute_activity(
148
+ determine_next_agent,
149
+ args=[current_agent_name, agent_result, context],
150
+ # Using sensible defaults, but could be configured via workflow_config?
151
+ start_to_close_timeout=timedelta(minutes=1),
152
+ retry_policy=default_retry_config.to_temporalio_policy(), # Use default retry
153
+ )
154
+
155
+ # Update previous agent name for the next loop iteration
156
+ previous_agent_name = current_agent_name
157
+
158
+ if handoff_data_dict:
159
+ logger.debug(
160
+ "Handoff data received", data=handoff_data_dict
161
+ )
162
+ # Deserialize handoff data back into Pydantic model for easier access
163
+ handoff_request = HandOffRequest.model_validate(
164
+ handoff_data_dict
165
+ )
166
+
167
+ # Update context based on handoff overrides
168
+ if handoff_request.override_context:
169
+ context.state.update(handoff_request.override_context)
170
+ logger.info("Context updated based on handoff override")
171
+
172
+ # Update the last record's handoff information
173
+ if context.history:
174
+ context.history[-1].hand_off = handoff_data_dict
175
+
176
+ # Set the next agent
177
+ current_agent_name = handoff_request.next_agent
178
+ if current_agent_name:
179
+ context.set_variable(
180
+ FLOCK_CURRENT_AGENT, current_agent_name
181
+ )
182
+ logger.info("Next agent set", agent=current_agent_name)
183
+ else:
184
+ logger.info(
185
+ "Handoff requested termination (no next agent)"
186
+ )
187
+ break # Exit loop if router explicitly returned no next agent
188
+
189
+ else:
190
+ # No handoff data returned (no router or router returned None)
191
+ logger.info("No handoff occurred, workflow terminating.")
192
+ current_agent_name = None # End the loop
193
+
194
+ # --- Workflow Completion ---
195
+ logger.success(
196
+ "Workflow completed successfully",
197
+ final_agent=previous_agent_name,
198
+ )
199
+ context.set_variable(
200
+ "flock.result",
201
+ {
202
+ "result": final_result, # Return the last agent's result
203
+ "success": True,
204
+ },
205
+ )
206
+ return final_result # Return the actual result of the last agent
207
+
208
+ except Exception as e:
209
+ # Catch exceptions from activities (e.g., after retries fail)
210
+ # or workflow logic errors
211
+ logger.exception("Workflow execution failed", error=str(e))
212
+ context.set_variable(
213
+ "flock.result",
214
+ {
215
+ "result": f"Workflow failed: {e}",
216
+ "success": False,
217
+ },
218
+ )
219
+ # It's often better to let Temporal record the failure status
220
+ # by re-raising the exception rather than returning a custom error dict.
221
+ # However, returning the context might be useful for debugging.
222
+ # Consider re-raising: raise
223
+ return context.model_dump(
224
+ mode="json"
225
+ ) # Return context state on failure
@@ -0,0 +1,96 @@
1
+ # src/flock/config/temporal_config.py
2
+
3
+ """Pydantic models for configuring Temporal execution settings."""
4
+
5
+ from __future__ import annotations
6
+
7
+ from datetime import timedelta
8
+ from typing import TYPE_CHECKING
9
+
10
+ # Conditionally import for type hinting only
11
+ if TYPE_CHECKING:
12
+ from temporalio.common import RetryPolicy
13
+
14
+ # Note: Importing temporalio types directly into config models can complicate serialization
15
+ # if these models are meant to be purely data containers (e.g., for YAML/JSON).
16
+ # We define the structure and provide a helper method to convert to the actual Temporal object.
17
+ # Be careful if using workflow/activity decorators directly on methods within these config models.
18
+ from pydantic import BaseModel, Field
19
+
20
+
21
+ class TemporalRetryPolicyConfig(BaseModel):
22
+ """Configuration parameters for Temporal Retry Policies."""
23
+
24
+ initial_interval: timedelta = Field(
25
+ default=timedelta(seconds=1),
26
+ description="Initial delay before the first retry.",
27
+ )
28
+ backoff_coefficient: float = Field(
29
+ default=2.0, description="Multiplier for the delay between retries."
30
+ )
31
+ maximum_interval: timedelta | None = Field(
32
+ default=timedelta(seconds=100),
33
+ description="Maximum delay between retries.",
34
+ )
35
+ maximum_attempts: int = Field(
36
+ default=3,
37
+ description="Maximum number of retry attempts (0 means no retries after first failure).",
38
+ )
39
+ non_retryable_error_types: list[str] = Field(
40
+ default_factory=list,
41
+ description="List of error type names (strings) that should not be retried.",
42
+ )
43
+
44
+ # Helper to convert to actual Temporalio object when needed (e.g., in workflow/executor)
45
+ def to_temporalio_policy(self) -> RetryPolicy:
46
+ # Import locally to avoid making temporalio a hard dependency of the config module itself
47
+ # The type hint RetryPolicy is now available due to TYPE_CHECKING block
48
+ from temporalio.common import RetryPolicy
49
+
50
+ return RetryPolicy(
51
+ initial_interval=self.initial_interval,
52
+ backoff_coefficient=self.backoff_coefficient,
53
+ maximum_interval=self.maximum_interval,
54
+ maximum_attempts=self.maximum_attempts,
55
+ non_retryable_error_types=self.non_retryable_error_types,
56
+ )
57
+
58
+
59
+ class TemporalWorkflowConfig(BaseModel):
60
+ """Configuration specific to Temporal Workflow Execution for a Flock."""
61
+
62
+ task_queue: str = Field(
63
+ default="flock-queue",
64
+ description="Default task queue for the workflow execution.",
65
+ )
66
+ workflow_execution_timeout: timedelta | None = Field(
67
+ default=None, # Default to no timeout (Temporal server default)
68
+ description="Total time limit for the workflow execution.",
69
+ )
70
+ workflow_run_timeout: timedelta | None = Field(
71
+ default=None, # Default to no timeout (Temporal server default)
72
+ description="Time limit for a single workflow run attempt.",
73
+ )
74
+ # Default retry policy for activities if not specified per-agent
75
+ default_activity_retry_policy: TemporalRetryPolicyConfig = Field(
76
+ default_factory=TemporalRetryPolicyConfig,
77
+ description="Default retry policy applied to activities if not overridden by the agent.",
78
+ )
79
+
80
+
81
+ class TemporalActivityConfig(BaseModel):
82
+ """Configuration specific to Temporal Activity Execution (per Agent)."""
83
+
84
+ task_queue: str | None = Field(
85
+ default=None,
86
+ description="Specific task queue for this agent's activity execution (overrides workflow default).",
87
+ )
88
+ start_to_close_timeout: timedelta | None = Field(
89
+ default=timedelta(minutes=5), # Default to 5 minutes
90
+ description="Time limit for a single activity attempt.",
91
+ )
92
+ retry_policy: TemporalRetryPolicyConfig | None = Field(
93
+ default=None,
94
+ description="Specific retry policy for this activity (overrides workflow default).",
95
+ )
96
+ # Other timeouts like schedule_to_start, heartbeat_timeout could be added here if needed
@@ -1,4 +1,3 @@
1
- import asyncio
2
1
  import uuid
3
2
 
4
3
  from temporalio.client import Client
@@ -6,19 +5,42 @@ from temporalio.worker import Worker
6
5
 
7
6
 
8
7
  async def create_temporal_client() -> Client:
8
+ # Consider making the address configurable
9
9
  client = await Client.connect("localhost:7233")
10
10
  return client
11
11
 
12
12
 
13
- async def setup_worker(workflow, activity) -> Client:
14
- worker_client = await create_temporal_client()
15
- worker = Worker(worker_client, task_queue="flock-queue", workflows=[workflow], activities=[activity])
16
- asyncio.create_task(worker.run())
17
- await asyncio.sleep(1)
13
+ async def setup_worker(
14
+ client: Client, task_queue: str, workflow: type, activities: list
15
+ ) -> Worker:
16
+ """Creates and configures a worker instance, but does not run it.
17
+
18
+ Args:
19
+ client: The Temporal client to associate with the worker.
20
+ task_queue: The task queue the worker should listen on.
21
+ workflow: The workflow class definition.
22
+ activities: A list of activity functions.
23
+
24
+ Returns:
25
+ A configured Worker instance.
26
+ """
27
+ # Creates and configures the worker instance
28
+ worker = Worker(
29
+ client,
30
+ task_queue=task_queue,
31
+ workflows=[workflow],
32
+ activities=activities,
33
+ )
34
+ return worker # Return the configured worker instance
18
35
 
19
36
 
20
37
  async def run_worker(client: Client, task_queue: str, workflows, activities):
21
- worker = Worker(client, task_queue=task_queue, workflows=workflows, activities=activities)
38
+ worker = Worker(
39
+ client,
40
+ task_queue=task_queue,
41
+ workflows=workflows,
42
+ activities=activities,
43
+ )
22
44
  await worker.run()
23
45
 
24
46
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flock-core
3
- Version: 0.4.0b25
3
+ Version: 0.4.0b27
4
4
  Summary: Declarative LLM Orchestration at Scale
5
5
  Author-email: Andre Ratzenberger <andre.ratzenberger@whiteduck.de>
6
6
  License-File: LICENSE
@@ -216,13 +216,39 @@ if __name__ == "__main__":
216
216
 
217
217
  ## 🐤 New in Flock 0.4.0 `Magpie` 🐤
218
218
 
219
- ### REST API - Deploy Flock Agents as REST API Endpoints
219
+ Version 0.4.0 brings significant enhancements focused on usability, deployment, and robustness:
220
220
 
221
- ### Web UI - Test Flock Agents in the Browser
222
221
 
223
- ### CLI Tool - Manage Flock Agents via the Command Line
222
+ ### 🚀 REST API - Deploy Flock Agents as REST API Endpoints
224
223
 
225
- ### Serialization - Share, Deploy, and Run Flock Agents by human readable yaml files
224
+ Easily deploy your Flock agents as scalable REST API endpoints. Interact with your agent workflows via standard HTTP requests.
225
+
226
+ ### 🖥️ Web UI - Test Flock Agents in the Browser
227
+
228
+ Test and interact with your Flock agents directly in your browser through an integrated web interface.
229
+
230
+ ### ⌨️ CLI Tool - Manage Flock Agents via the Command Line
231
+
232
+ Manage Flock configurations, run agents, and inspect results directly from your command line.
233
+
234
+ ### 💾 Enhanced Serialization - Share, Deploy, and Run Flock Agents by human readable yaml files
235
+
236
+ Define and share entire Flock configurations, including agents and components, using human-readable YAML files. Load flocks directly from these files for easy deployment and versioning.
237
+
238
+ ### ⏱️ Robust Temporal Integration
239
+
240
+ Flock 0.4.0 introduces first-class support for Temporal.io, enabling you to build truly production-grade, reliable, and scalable agent workflows. Move beyond simple local execution and leverage Temporal's power for:
241
+
242
+ * **Fault Tolerance:** Workflows automatically resume from the last successful step after failures.
243
+ * **Retries:** Configure automatic retries for activities (like LLM calls or tool usage) with exponential backoff.
244
+ * **Scalability:** Distribute workflow and activity execution across multiple worker processes using Task Queues.
245
+ * **Observability:** Gain deep insights into workflow execution history via the Temporal UI.
246
+
247
+ Flock makes this easy with:
248
+
249
+ * **Declarative Configuration:** Define Temporal timeouts, retry policies, and task queues directly within your `Flock` and `FlockAgent` configurations (YAML or Python).
250
+ * **Correct Patterns:** Uses Temporal's recommended granular activity execution for better control and visibility.
251
+ * **Clear Worker Separation:** Provides guidance and flags for running dedicated Temporal workers, separating development convenience from production best practices.
226
252
 
227
253
  ### ✨ Utility: @flockclass Hydrator
228
254