flock-core 0.3.6__py3-none-any.whl → 0.3.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

@@ -0,0 +1,363 @@
1
+ """LLM-based router implementation for the Flock framework."""
2
+
3
+ import json
4
+ from typing import Any
5
+
6
+ import litellm
7
+
8
+ from flock.core.context.context import FlockContext
9
+ from flock.core.flock_agent import FlockAgent
10
+ from flock.core.flock_router import (
11
+ FlockRouter,
12
+ FlockRouterConfig,
13
+ HandOffRequest,
14
+ )
15
+ from flock.core.logging.logging import get_logger
16
+
17
+ logger = get_logger("llm_router")
18
+
19
+
20
+ class LLMRouterConfig(FlockRouterConfig):
21
+ """Configuration for the LLM router.
22
+
23
+ This class extends FlockRouterConfig with parameters specific to the LLM router.
24
+ """
25
+
26
+ temperature: float = 0.2
27
+ max_tokens: int = 500
28
+ confidence_threshold: float = 0.5
29
+ prompt: str = ""
30
+
31
+
32
+ class LLMRouter(FlockRouter):
33
+ """Router that uses an LLM to determine the next agent in a workflow.
34
+
35
+ This class is responsible for:
36
+ 1. Analyzing available agents in the registry
37
+ 2. Using an LLM to score each agent's suitability as the next step
38
+ 3. Selecting the highest-scoring agent
39
+ 4. Creating a HandOff object with the selected agent
40
+ """
41
+
42
+ def __init__(
43
+ self,
44
+ name: str = "llm_router",
45
+ config: LLMRouterConfig | None = None,
46
+ ):
47
+ """Initialize the LLMRouter.
48
+
49
+ Args:
50
+ registry: The agent registry containing all available agents
51
+ name: The name of the router
52
+ config: The router configuration
53
+ """
54
+ logger.info(f"Initializing LLM Router '{name}'")
55
+ super().__init__(name=name, config=config or LLMRouterConfig(name=name))
56
+ logger.debug(
57
+ "LLM Router configuration",
58
+ temperature=self.config.temperature,
59
+ max_tokens=self.config.max_tokens,
60
+ )
61
+
62
+ async def route(
63
+ self,
64
+ current_agent: FlockAgent,
65
+ result: dict[str, Any],
66
+ context: FlockContext,
67
+ ) -> HandOffRequest:
68
+ """Determine the next agent to hand off to based on the current agent's output.
69
+
70
+ Args:
71
+ current_agent: The agent that just completed execution
72
+ result: The output from the current agent
73
+ context: The global execution context
74
+
75
+ Returns:
76
+ A HandOff object containing the next agent and input data
77
+ """
78
+ logger.info(
79
+ f"Routing from agent '{current_agent.name}'",
80
+ current_agent=current_agent.name,
81
+ )
82
+ logger.debug("Current agent result", result=result)
83
+
84
+ agent_definitions = context.agent_definitions
85
+ # Get all available agents from the registry
86
+ available_agents = self._get_available_agents(
87
+ agent_definitions, current_agent.name
88
+ )
89
+ logger.debug(
90
+ "Available agents for routing",
91
+ count=len(available_agents),
92
+ agents=[a.agent_data["name"] for a in available_agents],
93
+ )
94
+
95
+ if not available_agents:
96
+ logger.warning(
97
+ "No available agents for routing",
98
+ current_agent=current_agent.name,
99
+ )
100
+ return HandOffRequest(
101
+ next_agent="", override_next_agent={}, override_context=None
102
+ )
103
+
104
+ # Use LLM to determine the best next agent
105
+ next_agent_name, score = await self._select_next_agent(
106
+ current_agent, result, available_agents
107
+ )
108
+ logger.info(
109
+ "Agent selection result",
110
+ next_agent=next_agent_name,
111
+ score=score,
112
+ )
113
+
114
+ if not next_agent_name or score < self.config.confidence_threshold:
115
+ logger.warning(
116
+ "No suitable next agent found",
117
+ best_score=score,
118
+ )
119
+ return HandOffRequest(
120
+ next_agent="", override_next_agent={}, override_context=None
121
+ )
122
+
123
+ # Get the next agent from the registry
124
+ next_agent = agent_definitions.get(next_agent_name)
125
+ if not next_agent:
126
+ logger.error(
127
+ "Selected agent not found in registry",
128
+ agent_name=next_agent_name,
129
+ )
130
+ return HandOffRequest(
131
+ next_agent="", override_next_agent={}, override_context=None
132
+ )
133
+
134
+ # Create input for the next agent
135
+
136
+ logger.success(
137
+ f"Successfully routed to agent '{next_agent_name}'",
138
+ score=score,
139
+ from_agent=current_agent.name,
140
+ )
141
+ return HandOffRequest(
142
+ next_agent=next_agent_name,
143
+ hand_off_mode="add",
144
+ override_next_agent=None,
145
+ override_context=None,
146
+ )
147
+
148
+ def _get_available_agents(
149
+ self, agent_definitions: dict[str, Any], current_agent_name: str
150
+ ) -> list[FlockAgent]:
151
+ """Get all available agents except the current one.
152
+
153
+ Args:
154
+ current_agent_name: Name of the current agent to exclude
155
+
156
+ Returns:
157
+ List of available agents
158
+ """
159
+ logger.debug(
160
+ "Getting available agents",
161
+ total_agents=len(agent_definitions),
162
+ current_agent=current_agent_name,
163
+ )
164
+ agents = []
165
+ for agent in agent_definitions:
166
+ if agent != current_agent_name:
167
+ agents.append(agent_definitions.get(agent))
168
+ return agents
169
+
170
+ async def _select_next_agent(
171
+ self,
172
+ current_agent: FlockAgent,
173
+ result: dict[str, Any],
174
+ available_agents: list[FlockAgent],
175
+ ) -> tuple[str, float]:
176
+ """Use an LLM to select the best next agent.
177
+
178
+ Args:
179
+ current_agent: The agent that just completed execution
180
+ result: The output from the current agent
181
+ available_agents: List of available agents to choose from
182
+
183
+ Returns:
184
+ Tuple of (selected_agent_name, confidence_score)
185
+ """
186
+ logger.debug(
187
+ "Selecting next agent",
188
+ current_agent=current_agent.name,
189
+ available_count=len(available_agents),
190
+ )
191
+
192
+ # Prepare the prompt for the LLM
193
+ prompt = self._create_selection_prompt(
194
+ current_agent, result, available_agents
195
+ )
196
+ logger.debug("Generated selection prompt", prompt_length=len(prompt))
197
+
198
+ try:
199
+ logger.info(
200
+ "Calling LLM for agent selection",
201
+ model=current_agent.model,
202
+ temperature=self.config.temperature,
203
+ )
204
+ # Call the LLM to get the next agent
205
+ response = await litellm.acompletion(
206
+ model=current_agent.model,
207
+ messages=[{"role": "user", "content": prompt}],
208
+ temperature=self.config.temperature
209
+ if isinstance(self.config, LLMRouterConfig)
210
+ else 0.2,
211
+ max_tokens=self.config.max_tokens
212
+ if isinstance(self.config, LLMRouterConfig)
213
+ else 500,
214
+ )
215
+
216
+ content = response.choices[0].message.content
217
+ # Parse the response to get the agent name and score
218
+ try:
219
+ # extract the json object from the response
220
+ content = content.split("```json")[1].split("```")[0]
221
+ data = json.loads(content)
222
+ next_agent = data.get("next_agent", "")
223
+ score = float(data.get("score", 0))
224
+ reasoning = data.get("reasoning", "")
225
+ logger.info(
226
+ "Successfully parsed LLM response",
227
+ next_agent=next_agent,
228
+ score=score,
229
+ reasoning=reasoning,
230
+ )
231
+ return next_agent, score
232
+ except (json.JSONDecodeError, ValueError) as e:
233
+ logger.error(
234
+ "Failed to parse LLM response",
235
+ error=str(e),
236
+ raw_response=content,
237
+ )
238
+ logger.debug("Attempting fallback parsing")
239
+
240
+ # Fallback: try to extract the agent name from the text
241
+ for agent in available_agents:
242
+ if agent.agent_data["name"] in content:
243
+ logger.info(
244
+ "Found agent name in response using fallback",
245
+ agent=agent.agent_data["name"],
246
+ )
247
+ return agent.agent_data[
248
+ "name"
249
+ ], 0.6 # Default score for fallback
250
+
251
+ return "", 0.0
252
+
253
+ except Exception as e:
254
+ logger.error(
255
+ "Error calling LLM for agent selection",
256
+ error=str(e),
257
+ current_agent=current_agent.name,
258
+ )
259
+ return "", 0.0
260
+
261
+ def _create_selection_prompt(
262
+ self,
263
+ current_agent: FlockAgent,
264
+ result: dict[str, Any],
265
+ available_agents: list[FlockAgent],
266
+ ) -> str:
267
+ """Create a prompt for the LLM to select the next agent.
268
+
269
+ Args:
270
+ current_agent: The agent that just completed execution
271
+ result: The output from the current agent
272
+ available_agents: List of available agents to choose from
273
+
274
+ Returns:
275
+ Prompt string for the LLM
276
+ """
277
+ # Format the current agent's output
278
+ result_str = json.dumps(result, indent=2)
279
+
280
+ # Format the available agents' information
281
+ agents_info = []
282
+ for agent in available_agents:
283
+ agent_info = {
284
+ "name": agent.agent_data["name"],
285
+ "description": agent.agent_data["description"]
286
+ if agent.agent_data["description"]
287
+ else "",
288
+ "input": agent.agent_data["input"],
289
+ "output": agent.agent_data["output"],
290
+ }
291
+ agents_info.append(agent_info)
292
+
293
+ agents_str = json.dumps(agents_info, indent=2)
294
+
295
+ # Create the prompt
296
+ if self.config.prompt:
297
+ prompt = self.config.prompt
298
+ else:
299
+ prompt = f"""
300
+ You are a workflow router that determines the next agent to execute in a multi-agent system.
301
+
302
+ CURRENT AGENT:
303
+ Name: {current_agent.name}
304
+ Description: {current_agent.description}
305
+ Input: {current_agent.input}
306
+ Output: {current_agent.output}
307
+
308
+ CURRENT AGENT'S OUTPUT:
309
+ {result_str}
310
+
311
+ AVAILABLE AGENTS:
312
+ {agents_str}
313
+
314
+ Based on the current agent's output and the available agents, determine which agent should be executed next.
315
+ Consider the following:
316
+ 1. Which agent's input requirements best match the current agent's output?
317
+ 2. Which agent's purpose and description make it the most logical next step?
318
+ 3. Which agent would provide the most value in continuing the workflow?
319
+
320
+ Respond with a JSON object containing:
321
+ 1. "next_agent": The name of the selected agent
322
+ 2. "score": A confidence score between 0 and 1 indicating how suitable this agent is
323
+ 3. "reasoning": A brief explanation of why this agent was selected
324
+
325
+ If no agent is suitable, set "next_agent" to an empty string and "score" to 0.
326
+
327
+ JSON Response:
328
+ """
329
+ return prompt
330
+
331
+ def _create_next_input(
332
+ self,
333
+ current_agent: FlockAgent,
334
+ result: dict[str, Any],
335
+ next_agent: FlockAgent,
336
+ ) -> dict[str, Any]:
337
+ """Create the input for the next agent, including the previous agent's output.
338
+
339
+ Args:
340
+ current_agent: The agent that just completed execution
341
+ result: The output from the current agent
342
+ next_agent: The next agent to execute
343
+
344
+ Returns:
345
+ Input dictionary for the next agent
346
+ """
347
+ # Start with an empty input
348
+ next_input = {}
349
+
350
+ # Add a special field for the previous agent's output
351
+ next_input["previous_agent_output"] = {
352
+ "agent_name": current_agent.name,
353
+ "result": result,
354
+ }
355
+
356
+ # Try to map the current agent's output to the next agent's input
357
+ # This is a simple implementation that could be enhanced with more sophisticated mapping
358
+ for key in result:
359
+ # If the next agent expects this key, add it directly
360
+ if key in next_agent.input:
361
+ next_input[key] = result[key]
362
+
363
+ return next_input
@@ -6,8 +6,9 @@ from opentelemetry import trace
6
6
  from temporalio import activity
7
7
 
8
8
  from flock.core.context.context import FlockContext
9
- from flock.core.context.context_vars import FLOCK_CURRENT_AGENT
10
- from flock.core.flock_agent import FlockAgent, HandOff
9
+ from flock.core.context.context_vars import FLOCK_CURRENT_AGENT, FLOCK_MODEL
10
+ from flock.core.flock_agent import FlockAgent
11
+ from flock.core.flock_router import HandOffRequest
11
12
  from flock.core.logging.logging import get_logger
12
13
  from flock.core.registry.agent_registry import Registry
13
14
  from flock.core.util.input_resolver import resolve_inputs
@@ -34,6 +35,8 @@ async def run_agent(context: FlockContext) -> dict:
34
35
  logger.info("Starting agent chain", initial_agent=current_agent_name)
35
36
 
36
37
  agent = registry.get_agent(current_agent_name)
38
+ if agent.model is None or agent.evaluator.config.model is None:
39
+ agent.set_model(context.get_variable(FLOCK_MODEL))
37
40
  agent.resolve_callables(context=context)
38
41
  if not agent:
39
42
  logger.error("Agent not found", agent=current_agent_name)
@@ -74,49 +77,83 @@ async def run_agent(context: FlockContext) -> dict:
74
77
  exec_span.record_exception(e)
75
78
  raise
76
79
 
77
- # If there is no handoff, record the result and finish.
78
- if not agent.hand_off:
79
- context.record(
80
- agent.name,
81
- result,
82
- timestamp=datetime.now().isoformat(),
83
- hand_off=None,
84
- called_from=previous_agent_name,
85
- )
80
+ # Determine the next agent using the handoff router if available
81
+ handoff_data = HandOffRequest()
82
+
83
+ if agent.handoff_router:
86
84
  logger.info(
87
- "No handoff defined, completing chain", agent=agent.name
85
+ f"Using handoff router: {agent.handoff_router.__class__.__name__}",
86
+ agent=agent.name,
88
87
  )
89
- iter_span.add_event("chain completed")
90
- return result
91
-
92
- # Determine the next agent.
93
- handoff_data = HandOff()
94
- if callable(agent.hand_off):
95
- logger.debug("Executing handoff function", agent=agent.name)
96
88
  try:
97
- handoff_data = agent.hand_off(context, result)
98
- if isinstance(handoff_data.next_agent, FlockAgent):
89
+ # Route to the next agent
90
+ handoff_data = await agent.handoff_router.route(
91
+ agent, result, context
92
+ )
93
+
94
+ if callable(handoff_data):
95
+ logger.debug(
96
+ "Executing handoff function", agent=agent.name
97
+ )
98
+ try:
99
+ handoff_data = handoff_data(context, result)
100
+ if isinstance(
101
+ handoff_data.next_agent, FlockAgent
102
+ ):
103
+ handoff_data.next_agent = (
104
+ handoff_data.next_agent.name
105
+ )
106
+ except Exception as e:
107
+ logger.error(
108
+ "Handoff function error {} {}",
109
+ agent=agent.name,
110
+ error=str(e),
111
+ )
112
+ iter_span.record_exception(e)
113
+ return {"error": f"Handoff function error: {e}"}
114
+ elif isinstance(handoff_data.next_agent, FlockAgent):
99
115
  handoff_data.next_agent = (
100
116
  handoff_data.next_agent.name
101
117
  )
118
+
119
+ if not handoff_data.next_agent:
120
+ logger.info(
121
+ "Router found no suitable next agent",
122
+ agent=agent.name,
123
+ )
124
+ context.record(
125
+ agent.name,
126
+ result,
127
+ timestamp=datetime.now().isoformat(),
128
+ hand_off=None,
129
+ called_from=previous_agent_name,
130
+ )
131
+ logger.info("Completing chain", agent=agent.name)
132
+ iter_span.add_event("chain completed")
133
+ return result
102
134
  except Exception as e:
103
135
  logger.error(
104
- "Handoff function error",
105
- agent=agent.name,
106
- error=str(e),
136
+ "Router error {} {}",
137
+ agent.name,
138
+ str(e),
107
139
  )
108
140
  iter_span.record_exception(e)
109
- return {"error": f"Handoff function error: {e}"}
110
- elif isinstance(agent.hand_off, str | FlockAgent):
111
- handoff_data.next_agent = (
112
- agent.hand_off
113
- if isinstance(agent.hand_off, str)
114
- else agent.hand_off.name
115
- )
141
+ return {"error": f"Router error: {e}"}
116
142
  else:
117
- logger.error("Unsupported hand_off type", agent=agent.name)
118
- iter_span.add_event("unsupported hand_off type")
119
- return {"error": "Unsupported hand_off type."}
143
+ # No router, so no handoff
144
+ logger.info(
145
+ "No handoff router defined, completing chain",
146
+ agent=agent.name,
147
+ )
148
+ context.record(
149
+ agent.name,
150
+ result,
151
+ timestamp=datetime.now().isoformat(),
152
+ hand_off=None,
153
+ called_from=previous_agent_name,
154
+ )
155
+ iter_span.add_event("chain completed")
156
+ return result
120
157
 
121
158
  # Record the agent run in the context.
122
159
  context.record(
@@ -127,10 +164,15 @@ async def run_agent(context: FlockContext) -> dict:
127
164
  called_from=previous_agent_name,
128
165
  )
129
166
  previous_agent_name = agent.name
167
+ previous_agent_output = agent.output
168
+ if handoff_data.override_context:
169
+ context.update(handoff_data.override_context)
130
170
 
131
171
  # Prepare the next agent.
132
172
  try:
133
173
  agent = registry.get_agent(handoff_data.next_agent)
174
+ if handoff_data.hand_off_mode == "add":
175
+ agent.input = previous_agent_output + ", " + agent.input
134
176
  agent.resolve_callables(context=context)
135
177
  if not agent:
136
178
  logger.error(
@@ -147,6 +189,7 @@ async def run_agent(context: FlockContext) -> dict:
147
189
  }
148
190
 
149
191
  context.set_variable(FLOCK_CURRENT_AGENT, agent.name)
192
+
150
193
  logger.info("Handing off to next agent", next=agent.name)
151
194
  iter_span.set_attribute("next.agent", agent.name)
152
195
  except Exception as e:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flock-core
3
- Version: 0.3.6
3
+ Version: 0.3.10
4
4
  Summary: Declarative LLM Orchestration at Scale
5
5
  Author-email: Andre Ratzenberger <andre.ratzenberger@whiteduck.de>
6
6
  License-File: LICENSE
@@ -30,17 +30,17 @@ Requires-Dist: python-box>=7.3.2
30
30
  Requires-Dist: python-decouple>=3.8
31
31
  Requires-Dist: questionary>=2.1.0
32
32
  Requires-Dist: rich>=13.9.4
33
- Requires-Dist: sentence-transformers>=3.4.1
34
33
  Requires-Dist: temporalio>=1.9.0
35
34
  Requires-Dist: tiktoken>=0.8.0
36
35
  Requires-Dist: toml>=0.10.2
37
36
  Requires-Dist: tqdm>=4.67.1
38
37
  Requires-Dist: uvicorn>=0.34.0
39
38
  Requires-Dist: zep-python>=2.0.2
40
- Provides-Extra: all-tools
41
- Requires-Dist: docling>=2.18.0; extra == 'all-tools'
42
- Requires-Dist: markdownify>=0.14.1; extra == 'all-tools'
43
- Requires-Dist: tavily-python>=0.5.0; extra == 'all-tools'
39
+ Provides-Extra: all
40
+ Requires-Dist: docling>=2.18.0; extra == 'all'
41
+ Requires-Dist: markdownify>=0.14.1; extra == 'all'
42
+ Requires-Dist: sentence-transformers>=3.4.1; extra == 'all'
43
+ Requires-Dist: tavily-python>=0.5.0; extra == 'all'
44
44
  Provides-Extra: tools
45
45
  Requires-Dist: markdownify>=0.14.1; extra == 'tools'
46
46
  Requires-Dist: tavily-python>=0.5.0; extra == 'tools'
@@ -8,17 +8,18 @@ flock/cli/load_examples.py,sha256=DkeLUlrb7rGx3nZ04aADU9HXXu5mZTf_DBwT0xhzIv4,7
8
8
  flock/cli/load_flock.py,sha256=3JdECvt5X7uyOG2vZS3-Zk5C5SI_84_QZjcsB3oJmfA,932
9
9
  flock/cli/load_release_notes.py,sha256=qFcgUrMddAE_TP6x1P-6ZywTUjTknfhTDW5LTxtg1yk,599
10
10
  flock/cli/settings.py,sha256=DkeLUlrb7rGx3nZ04aADU9HXXu5mZTf_DBwT0xhzIv4,7
11
- flock/cli/assets/release_notes.md,sha256=K-upUm5vuUuRSSU2FkMdgDfai_YlDk_vTCp0s4s2WO0,3419
11
+ flock/cli/assets/release_notes.md,sha256=-RuE-G9Sn8z1LWEdr9iqjuQN7N1K_JMaCzHYoyLR42U,4793
12
12
  flock/core/__init__.py,sha256=mPlvKc0SxC2qCvSlgYeP_7EyV8ptmdn24NO8mlQoCSo,559
13
- flock/core/flock.py,sha256=1LPMblsvT90Na35LXx0w3Us66yIaTzsokL7lF5fsVX8,19228
14
- flock/core/flock_agent.py,sha256=RzKX0GRrRJz16YbQFheMo8TqJPXOZSHWNloTbp35zwI,12229
13
+ flock/core/flock.py,sha256=Qch-6Z8XgLGzl-u5dbcXOOZYCqj40LjcAT0o22KrRE4,19263
14
+ flock/core/flock_agent.py,sha256=MydN_A-oXKPJIJArHS61XSQnUjDktZovPkRuZOBokN4,11857
15
15
  flock/core/flock_api.py,sha256=SKQVKgFCaNCqHtwvIcksnpqG6ajHodVhs3oaKUw-d8c,7192
16
16
  flock/core/flock_evaluator.py,sha256=j7riJj_KsWoBnKmLiGp-U0CRhxDyJbgEdLGN26tfKm8,1588
17
- flock/core/flock_factory.py,sha256=vyDq0eyFT4MyE_n2JyNU7YaFx2ljmjSDmZ07OIsmIOE,2694
18
- flock/core/flock_module.py,sha256=VWFlBiY2RHZLTlGYfcchuT41M3m_JrZcmzw07u7KayM,2581
17
+ flock/core/flock_factory.py,sha256=nh0tK5UzEPWP5EmFrRvhsAeaZEvaPG5e0tkYkHpTjy0,2606
18
+ flock/core/flock_module.py,sha256=3DmxOc39gQS-tiJcgUCjMaLr8QDDJR4acV_M76Xcf6I,2602
19
+ flock/core/flock_router.py,sha256=A5GaxcGvtiFlRLHBTW7okh5RDm3BdKam2uXvRHRaj7k,2187
19
20
  flock/core/context/context.py,sha256=AW0qKIAkgZucVroGsulrPVPc4WmWuqWIrVPHf2qaOLI,6380
20
- flock/core/context/context_manager.py,sha256=qMySVny_dbTNLh21RHK_YT0mNKIOrqJDZpi9ZVdBsxU,1103
21
- flock/core/context/context_vars.py,sha256=0Hn6fM2iNc0_jIIU0B7KX-K2o8qXqtZ5EYtwujETQ7U,272
21
+ flock/core/context/context_manager.py,sha256=FANSWa6DEhdhtZ7t_9Gza0v80UdpoDOhHbfVOccmjkA,1181
22
+ flock/core/context/context_vars.py,sha256=zYTMi9b6mNSSEHowEQUOTpEDurmAjaUcyBCgfKY6-cU,300
22
23
  flock/core/execution/local_executor.py,sha256=rnIQvaJOs6zZORUcR3vvyS6LPREDJTjaygl_Db0M8ao,952
23
24
  flock/core/execution/temporal_executor.py,sha256=OF_uXgQsoUGp6U1ZkcuaidAEKyH7XDtbfrtdF10XQ_4,1675
24
25
  flock/core/interpreter/python_interpreter.py,sha256=RaUMZuufsKBNQ4FAeSaOgUuxzs8VYu5TgUUs-xwaxxM,26376
@@ -41,21 +42,29 @@ flock/core/serialization/secure_serializer.py,sha256=n5-zRvvXddgJv1FFHsaQ2wuYdL3
41
42
  flock/core/serialization/serializable.py,sha256=SymJ0YrjBx48mOBItYSqoRpKuzIc4vKWRS6ScTzre7s,2573
42
43
  flock/core/tools/basic_tools.py,sha256=fI9r81_ktRiRhNLwT-jSJ9rkjl28LC1ZfL-njnno2iw,4761
43
44
  flock/core/tools/dev_tools/github.py,sha256=a2OTPXS7kWOVA4zrZHynQDcsmEi4Pac5MfSjQOLePzA,5308
44
- flock/core/util/cli_helper.py,sha256=IOl9r4cz_MJv_Bp5R8dhHX8f-unAqA9vDS6-0E90Vzk,49813
45
+ flock/core/util/cli_helper.py,sha256=QSpP10WRNcjXzVFwpTQA8lSBy7707Qlv7uCit1XjUms,49808
45
46
  flock/core/util/hydrator.py,sha256=6qNwOwCZB7r6y25BZ--0PGofrAlfMaXbDKFQeP5NLts,11196
46
47
  flock/core/util/input_resolver.py,sha256=g9vDPdY4OH-G7qjas5ksGEHueokHGFPMoLOvC-ngeLo,5984
47
48
  flock/evaluators/declarative/declarative_evaluator.py,sha256=f8ldgZZp94zC4CoGzBufKvbvtckCGBe9EHTOoAZfZK0,1695
48
49
  flock/evaluators/natural_language/natural_language_evaluator.py,sha256=6nVEeh8_uwv_h-d3FWlA0GbzDzRtdhvxCGKirHtyvOU,2012
49
- flock/evaluators/zep/zep_evaluator.py,sha256=hEHQdgIwGsbC4ci9RvtdA2k7f4M0yznIok4v4XltNwg,1885
50
+ flock/evaluators/zep/zep_evaluator.py,sha256=9NOELl7JAuUcx_FQrxY6b-_vN3MjwDyW7ZppPIGeCFc,1954
50
51
  flock/modules/callback/callback_module.py,sha256=hCCw-HNYjK4aHnUQfvw26ZP1Q_jdlKb9kDh3BHzbCQA,2916
51
52
  flock/modules/memory/memory_module.py,sha256=2grdmvw7FJWZvz0IjgASbDPCfyS1w4gWkRzOWtK7BFM,8214
52
53
  flock/modules/memory/memory_parser.py,sha256=2S7CmVEsm22gD7-MiFj4318FTg8wd_jB-RKMwXI14WM,4369
53
54
  flock/modules/memory/memory_storage.py,sha256=CNcLDMmvv0x7Z3YMKr6VveS_VCa7rKPw8l2d-XgqokA,27246
54
55
  flock/modules/output/output_module.py,sha256=_Hid4ycGEl14m7GEsVGE9wp88SYkQ3eq_x4avUQcTWI,6985
55
- flock/modules/performance/metrics_module.py,sha256=K5z5bizIjA4ZEUjBk5ShwTR9ZElR-Vmqa7H38dJ3z_0,16735
56
+ flock/modules/performance/metrics_module.py,sha256=JsLIVs-2PZ_A8GyYLNVBsNXdSFyrVid3YGd0fu4HXyM,16404
56
57
  flock/modules/zep/zep_module.py,sha256=BIJ5K-hg2bLeJmGKoDcVY1rVN7_0yYETiSaVrO-gtMI,5830
57
58
  flock/platform/docker_tools.py,sha256=fpA7-6rJBjPOUBLdQP4ny2QPgJ_042nmqRn5GtKnoYw,1445
58
59
  flock/platform/jaeger_install.py,sha256=MyOMJQx4TQSMYvdUJxfiGSo3YCtsfkbNXcAcQ9bjETA,2898
60
+ flock/routers/__init__.py,sha256=w9uL34Auuo26-q_EGlE8Z9iHsw6S8qutTAH_ZI7pn7M,39
61
+ flock/routers/agent/__init__.py,sha256=0ZOYpR8BMnR5iCGfcUiv99g7aT_g13xvm2Shl-XzybY,65
62
+ flock/routers/agent/agent_router.py,sha256=9s3AwcBqpyhpPXOTqyMSVtS8Bcme1RDdqSUfWIqEBfc,8139
63
+ flock/routers/agent/handoff_agent.py,sha256=p-0XEPXIyv1T3DGAhhXg2SYXmrwEaJ5pnuLgRSvbiZg,1903
64
+ flock/routers/default/__init__.py,sha256=DOatGX_aE2DWvf55a0Tv7qDK05QFD-hL3sm7g58hmLU,61
65
+ flock/routers/default/default_router.py,sha256=D9TCAAeNfzt3Se6QduGO2TmZ6038XlQLV6Y1u5IGI-0,2232
66
+ flock/routers/llm/__init__.py,sha256=OV89ebq8RPWZwCJTS2_P46Q0yKD_03rwq_fBOsETd08,63
67
+ flock/routers/llm/llm_router.py,sha256=3WXUK2TqZENYXSFb7o_WtpONq0SsebaZZpytCRr1daw,12217
59
68
  flock/themes/3024-day.toml,sha256=uOVHqEzSyHx0WlUk3D0lne4RBsNBAPCTy3C58yU7kEY,667
60
69
  flock/themes/3024-night.toml,sha256=qsXUwd6ZYz6J-R129_Ao2TKlvvK60svhZJJjB5c8Tfo,1667
61
70
  flock/themes/aardvark-blue.toml,sha256=5ZgsxP3pWLPN3yJ2Wd9ErCo7fy_VJpIfje4kriDKlqo,1667
@@ -393,12 +402,12 @@ flock/themes/zenburned.toml,sha256=UEmquBbcAO3Zj652XKUwCsNoC2iQSlIh-q5c6DH-7Kc,1
393
402
  flock/themes/zenwritten-dark.toml,sha256=To5l6520_3UqAGiEumpzGWsHhXxqu9ThrMildXKgIO0,1669
394
403
  flock/themes/zenwritten-light.toml,sha256=G1iEheCPfBNsMTGaVpEVpDzYBHA_T-MV27rolUYolmE,1666
395
404
  flock/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
396
- flock/workflow/activities.py,sha256=2zcYyDoCuYs9oQbnhLjCzBUdEi7d5IEIemKJ7TV_B8w,6932
405
+ flock/workflow/activities.py,sha256=JDfcmn99k5UTN3QNm5hAdn_eRjWRYhWSIw1U0kMOAh4,9014
397
406
  flock/workflow/agent_activities.py,sha256=NhBZscflEf2IMfSRa_pBM_TRP7uVEF_O0ROvWZ33eDc,963
398
407
  flock/workflow/temporal_setup.py,sha256=VWBgmBgfTBjwM5ruS_dVpA5AVxx6EZ7oFPGw4j3m0l0,1091
399
408
  flock/workflow/workflow.py,sha256=I9MryXW_bqYVTHx-nl2epbTqeRy27CAWHHA7ZZA0nAk,1696
400
- flock_core-0.3.6.dist-info/METADATA,sha256=-TPh-D8HF2et5uEyksY7uwhl9Fgc0b_RRf0PMe1B2hg,20494
401
- flock_core-0.3.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
402
- flock_core-0.3.6.dist-info/entry_points.txt,sha256=rWaS5KSpkTmWySURGFZk6PhbJ87TmvcFQDi2uzjlagQ,37
403
- flock_core-0.3.6.dist-info/licenses/LICENSE,sha256=iYEqWy0wjULzM9GAERaybP4LBiPeu7Z1NEliLUdJKSc,1072
404
- flock_core-0.3.6.dist-info/RECORD,,
409
+ flock_core-0.3.10.dist-info/METADATA,sha256=1i1xwFc1ogzx9IcQVrmWplHzId-MTk5B-55smWOhp8E,20487
410
+ flock_core-0.3.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
411
+ flock_core-0.3.10.dist-info/entry_points.txt,sha256=rWaS5KSpkTmWySURGFZk6PhbJ87TmvcFQDi2uzjlagQ,37
412
+ flock_core-0.3.10.dist-info/licenses/LICENSE,sha256=iYEqWy0wjULzM9GAERaybP4LBiPeu7Z1NEliLUdJKSc,1072
413
+ flock_core-0.3.10.dist-info/RECORD,,