flock-core 0.4.0b26__py3-none-any.whl → 0.4.0b27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

@@ -20,7 +20,7 @@ class AgentRunRecord(BaseModel):
20
20
  data: dict[str, Any] = Field(default_factory=dict)
21
21
  timestamp: str = Field(default="")
22
22
  hand_off: dict | None = Field(default_factory=dict)
23
- called_from: str = Field(default="")
23
+ called_from: str | None = Field(default=None)
24
24
 
25
25
 
26
26
  class AgentDefinition(BaseModel):
@@ -132,6 +132,15 @@ class FlockContext(Serializable, BaseModel):
132
132
  def get_agent_definition(self, agent_name: str) -> AgentDefinition | None:
133
133
  return self.agent_definitions.get(agent_name)
134
134
 
135
+ def get_last_agent_name(self) -> str | None:
136
+ """Returns the name of the agent from the most recent history record."""
137
+ if not self.history:
138
+ return None
139
+ last_record = self.history[-1]
140
+ # The 'called_from' field in the *next* record is the previous agent.
141
+ # However, to get the name of the *last executed agent*, we look at the 'agent' field.
142
+ return last_record.agent
143
+
135
144
  def add_agent_definition(
136
145
  self, agent_type: type, agent_name: str, agent_data: Any
137
146
  ) -> None:
@@ -1,10 +1,23 @@
1
1
  # src/your_package/core/execution/temporal_executor.py
2
2
 
3
+ import asyncio # Import asyncio
4
+ from typing import TYPE_CHECKING, Any
5
+
6
+ from temporalio.worker import Worker # Import Worker
7
+
8
+ if TYPE_CHECKING:
9
+ from flock.core.flock import Flock # Import Flock for type hinting
10
+
3
11
  from flock.core.context.context import FlockContext
4
12
  from flock.core.context.context_vars import FLOCK_RUN_ID
5
13
  from flock.core.logging.logging import get_logger
6
- from flock.workflow.activities import (
7
- run_agent, # Activity function used in Temporal
14
+ from flock.workflow.agent_execution_activity import (
15
+ determine_next_agent,
16
+ execute_single_agent,
17
+ )
18
+ from flock.workflow.temporal_config import (
19
+ TemporalRetryPolicyConfig,
20
+ TemporalWorkflowConfig,
8
21
  )
9
22
  from flock.workflow.temporal_setup import create_temporal_client, setup_worker
10
23
 
@@ -12,14 +25,18 @@ logger = get_logger("flock")
12
25
 
13
26
 
14
27
  async def run_temporal_workflow(
28
+ flock_instance: "Flock", # Accept Flock instance
15
29
  context: FlockContext,
16
30
  box_result: bool = True,
31
+ memo: dict[str, Any] | None = None, # Add memo argument
17
32
  ) -> dict:
18
33
  """Execute the agent workflow via Temporal for robust, distributed processing.
19
34
 
20
35
  Args:
36
+ flock_instance: The Flock instance.
21
37
  context: The FlockContext instance with state and history.
22
38
  box_result: If True, wraps the result in a Box for nicer display.
39
+ memo: Optional dictionary of metadata to attach to the Temporal workflow.
23
40
 
24
41
  Returns:
25
42
  A dictionary containing the workflow result.
@@ -29,29 +46,121 @@ async def run_temporal_workflow(
29
46
  FlockWorkflow, # Your workflow class
30
47
  )
31
48
 
32
- logger.info("Setting up Temporal workflow")
33
- await setup_worker(workflow=FlockWorkflow, activity=run_agent)
49
+ # Get workflow config from Flock instance or use defaults
50
+ wf_config = flock_instance.temporal_config or TemporalWorkflowConfig()
51
+
34
52
  logger.debug("Creating Temporal client")
35
53
  flock_client = await create_temporal_client()
36
- workflow_id = context.get_variable(FLOCK_RUN_ID)
37
- logger.info("Executing Temporal workflow", workflow_id=workflow_id)
38
- context_dict = context.model_dump()
39
- result = await flock_client.execute_workflow(
40
- FlockWorkflow.run,
41
- context_dict,
42
- id=workflow_id,
43
- task_queue="flock-queue",
44
- )
45
54
 
46
- agent_name = context.get_variable("FLOCK_CURRENT_AGENT")
47
- logger.debug("Formatting Temporal result", agent=agent_name)
55
+ # Determine if we need to manage an in-process worker
56
+ start_worker_locally = flock_instance.temporal_start_in_process_worker
57
+
58
+ # Setup worker instance
59
+ worker: Worker | None = None
60
+ worker_task: asyncio.Task | None = None
61
+
62
+ if start_worker_locally:
63
+ logger.info(
64
+ f"Setting up temporary in-process worker for task queue '{wf_config.task_queue}'"
65
+ )
66
+ worker = await setup_worker(
67
+ flock_client, # Pass the client
68
+ wf_config.task_queue, # Pass the task queue
69
+ FlockWorkflow,
70
+ [execute_single_agent, determine_next_agent],
71
+ )
72
+
73
+ # Run the worker in the background
74
+ worker_task = asyncio.create_task(worker.run())
75
+ logger.info("Temporal worker started in background.")
76
+
77
+ # Allow worker time to start polling (heuristic for local testing)
78
+ await asyncio.sleep(2)
79
+ else:
80
+ logger.info(
81
+ "Skipping in-process worker startup. Assuming dedicated workers are running."
82
+ )
83
+
84
+ try:
85
+ workflow_id = context.get_variable(FLOCK_RUN_ID)
86
+ logger.info(
87
+ "Executing Temporal workflow",
88
+ workflow_id=workflow_id,
89
+ task_queue=wf_config.task_queue,
90
+ )
91
+
92
+ # Prepare the single workflow argument dictionary
93
+ workflow_args_dict = {
94
+ "context_dict": context.model_dump(mode="json"),
95
+ "default_retry_config_dict": (
96
+ wf_config.default_activity_retry_policy.model_dump(
97
+ mode="json"
98
+ )
99
+ if wf_config.default_activity_retry_policy
100
+ else TemporalRetryPolicyConfig().model_dump(mode="json")
101
+ ),
102
+ }
103
+
104
+ # Start the workflow using start_workflow
105
+ handle = await flock_client.start_workflow(
106
+ FlockWorkflow.run,
107
+ # Pass the single dictionary as the only element in the args list
108
+ args=[workflow_args_dict],
109
+ id=workflow_id,
110
+ task_queue=wf_config.task_queue,
111
+ # Corrected timeout argument names
112
+ execution_timeout=wf_config.workflow_execution_timeout,
113
+ run_timeout=wf_config.workflow_run_timeout,
114
+ memo=memo or {}, # Pass memo if provided
115
+ )
116
+
117
+ logger.info(
118
+ "Workflow started, awaiting result...", workflow_id=handle.id
119
+ )
120
+ # Await the result from the handle
121
+ result = await handle.result()
122
+ logger.info("Workflow result received.")
123
+
124
+ agent_name = context.get_variable("FLOCK_CURRENT_AGENT")
125
+ logger.debug("Formatting Temporal result", agent=agent_name)
48
126
 
49
- if box_result:
50
- from box import Box
127
+ if box_result:
128
+ from box import Box
51
129
 
52
- logger.debug("Boxing Temporal result")
53
- return Box(result)
54
- return result
130
+ logger.debug("Boxing Temporal result")
131
+ return Box(result)
132
+ return result
133
+ except Exception as e:
134
+ logger.error(
135
+ "Error during Temporal workflow execution or result retrieval",
136
+ error=e,
137
+ )
138
+ raise e # Re-raise the exception after logging
139
+ finally:
140
+ # Ensure worker is shut down regardless of success or failure
141
+ if (
142
+ start_worker_locally
143
+ and worker
144
+ and worker_task
145
+ and not worker_task.done()
146
+ ):
147
+ logger.info("Shutting down temporal worker...")
148
+ await worker.shutdown() # Await the shutdown coroutine
149
+ try:
150
+ await asyncio.wait_for(
151
+ worker_task, timeout=10.0
152
+ ) # Wait for task to finish
153
+ logger.info("Temporal worker shut down gracefully.")
154
+ except asyncio.TimeoutError:
155
+ logger.warning(
156
+ "Temporal worker shutdown timed out. Cancelling task."
157
+ )
158
+ worker_task.cancel()
159
+ except Exception as shutdown_err:
160
+ logger.error(
161
+ f"Error during worker shutdown: {shutdown_err}",
162
+ exc_info=True,
163
+ )
55
164
  except Exception as e:
56
165
  logger.error("Error executing Temporal workflow", error=e)
57
166
  raise e
flock/core/flock.py CHANGED
@@ -39,6 +39,7 @@ from flock.core.flock_evaluator import FlockEvaluator
39
39
  from flock.core.logging.logging import LOGGERS, get_logger, get_module_loggers
40
40
  from flock.core.serialization.serializable import Serializable
41
41
  from flock.core.util.cli_helper import init_console
42
+ from flock.workflow.temporal_config import TemporalWorkflowConfig
42
43
 
43
44
  # Import FlockAgent using TYPE_CHECKING to avoid circular import at runtime
44
45
  if TYPE_CHECKING:
@@ -102,6 +103,16 @@ class Flock(BaseModel, Serializable):
102
103
  default=True,
103
104
  description="If True, show the Flock banner on console interactions.",
104
105
  )
106
+ # --- Temporal Configuration (Optional) ---
107
+ temporal_config: TemporalWorkflowConfig | None = Field(
108
+ default=None,
109
+ description="Optional Temporal settings specific to the workflow execution for this Flock.",
110
+ )
111
+ # --- Temporal Dev/Test Setting ---
112
+ temporal_start_in_process_worker: bool = Field(
113
+ default=True,
114
+ description="If True (default) and enable_temporal=True, start a temporary in-process worker for development/testing convenience. Set to False when using dedicated workers.",
115
+ )
105
116
  # Internal agent storage - not part of the Pydantic model for direct serialization
106
117
  _agents: dict[str, FlockAgent]
107
118
  _start_agent_name: str | None = None # For potential pre-configuration
@@ -122,6 +133,8 @@ class Flock(BaseModel, Serializable):
122
133
  enable_temporal: bool = False,
123
134
  enable_logging: bool | list[str] = False,
124
135
  agents: list[FlockAgent] | None = None,
136
+ temporal_config: TemporalWorkflowConfig | None = None,
137
+ temporal_start_in_process_worker: bool = True,
125
138
  **kwargs,
126
139
  ):
127
140
  """Initialize the Flock orchestrator."""
@@ -136,6 +149,8 @@ class Flock(BaseModel, Serializable):
136
149
  enable_temporal=enable_temporal,
137
150
  enable_logging=enable_logging,
138
151
  show_flock_banner=show_flock_banner,
152
+ temporal_config=temporal_config,
153
+ temporal_start_in_process_worker=temporal_start_in_process_worker,
139
154
  **kwargs,
140
155
  )
141
156
 
@@ -311,6 +326,7 @@ class Flock(BaseModel, Serializable):
311
326
  run_id: str = "",
312
327
  box_result: bool = True,
313
328
  agents: list[FlockAgent] | None = None,
329
+ memo: dict[str, Any] | None = None,
314
330
  ) -> Box | dict:
315
331
  """Entry point for running an agent system asynchronously."""
316
332
  # Import here to allow forward reference resolution
@@ -348,7 +364,17 @@ class Flock(BaseModel, Serializable):
348
364
 
349
365
  # Check if start_agent is in agents
350
366
  if start_agent_name not in self._agents:
351
- raise ValueError(f"Start agent '{start_agent_name}' not found.")
367
+ # Try loading from registry if not found locally yet
368
+ reg_agent = FlockRegistry.get_agent(start_agent_name)
369
+ if reg_agent:
370
+ self.add_agent(reg_agent)
371
+ logger.info(
372
+ f"Loaded start agent '{start_agent_name}' from registry."
373
+ )
374
+ else:
375
+ raise ValueError(
376
+ f"Start agent '{start_agent_name}' not found locally or in registry."
377
+ )
352
378
 
353
379
  run_input = input if input is not None else self._start_input
354
380
  effective_run_id = run_id or f"flockrun_{uuid.uuid4().hex[:8]}"
@@ -394,6 +420,15 @@ class Flock(BaseModel, Serializable):
394
420
  agent_data=agent_dict_repr, # Pass the serialized dict
395
421
  )
396
422
 
423
+ # Add temporal config to context if enabled
424
+ if self.enable_temporal and self.temporal_config:
425
+ # Store the workflow config dict for the executor/workflow to use
426
+ # Using a specific key to avoid potential clashes in state
427
+ run_context.set_variable(
428
+ "flock.temporal_workflow_config",
429
+ self.temporal_config.model_dump(mode="json"),
430
+ )
431
+
397
432
  logger.info(
398
433
  "Starting agent execution",
399
434
  agent=start_agent_name,
@@ -406,8 +441,15 @@ class Flock(BaseModel, Serializable):
406
441
  run_context, box_result=False
407
442
  )
408
443
  else:
444
+ # Pass the Flock instance itself to the executor
445
+ # so it can access the temporal_config directly if needed
446
+ # This avoids putting potentially large/complex config objects
447
+ # directly into the context state that gets passed around.
409
448
  result = await run_temporal_workflow(
410
- run_context, box_result=False
449
+ self, # Pass the Flock instance
450
+ run_context,
451
+ box_result=False,
452
+ memo=memo,
411
453
  )
412
454
 
413
455
  span.set_attribute("result.type", str(type(result)))
@@ -668,6 +710,7 @@ class Flock(BaseModel, Serializable):
668
710
  # Import locally to prevent circular imports at module level if structure is complex
669
711
  from flock.core.serialization.flock_serializer import FlockSerializer
670
712
 
713
+ # Assuming FlockSerializer handles the nested temporal_config serialization
671
714
  return FlockSerializer.serialize(self, path_type=path_type)
672
715
 
673
716
  @classmethod
@@ -676,6 +719,7 @@ class Flock(BaseModel, Serializable):
676
719
  # Import locally
677
720
  from flock.core.serialization.flock_serializer import FlockSerializer
678
721
 
722
+ # Assuming FlockSerializer handles the nested temporal_config deserialization
679
723
  return FlockSerializer.deserialize(cls, data)
680
724
 
681
725
  # --- Static Method Loader (Delegates to loader module) ---