fastworkflow 2.15.13__py3-none-any.whl → 2.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fastworkflow might be problematic. Click here for more details.

@@ -148,11 +148,20 @@ class ResponseGenerator:
148
148
  for_agents=is_agent_mode,
149
149
  )
150
150
 
151
+ # Check if the NLU pipeline stage is intent detection
152
+ success = False
153
+ nlu_pipeline_stage = workflow.context.get(
154
+ "NLU_Pipeline_Stage",
155
+ fastworkflow.NLUPipelineStage.INTENT_DETECTION)
156
+ if nlu_pipeline_stage == fastworkflow.NLUPipelineStage.INTENT_DETECTION:
157
+ success = True
158
+
151
159
  return fastworkflow.CommandOutput(
152
160
  workflow_id=workflow.id,
153
161
  command_responses=[
154
162
  fastworkflow.CommandResponse(
155
163
  response=response,
164
+ success=success
156
165
  )
157
166
  ]
158
167
  )
@@ -67,7 +67,10 @@ class ResponseGenerator:
67
67
  if cnp_output.command_name == 'ErrorCorrection/you_misunderstood':
68
68
  workflow_context["NLU_Pipeline_Stage"] = NLUPipelineStage.INTENT_MISUNDERSTANDING_CLARIFICATION
69
69
  workflow_context["command"] = command
70
- else:
70
+ elif (
71
+ nlu_pipeline_stage == fastworkflow.NLUPipelineStage.INTENT_DETECTION or
72
+ cnp_output.command_name == 'ErrorCorrection/abort'
73
+ ):
71
74
  workflow.end_command_processing()
72
75
  workflow.context = workflow_context
73
76
 
@@ -76,9 +79,13 @@ class ResponseGenerator:
76
79
  command=command,
77
80
  )
78
81
  command_output = CommandExecutor.perform_action(workflow, startup_action)
79
- command_output.command_responses[0].artifacts["command_handled"] = True
80
- # Set the additional attributes
81
- command_output.command_name = cnp_output.command_name
82
+ if (
83
+ nlu_pipeline_stage == fastworkflow.NLUPipelineStage.INTENT_DETECTION or
84
+ cnp_output.command_name == 'ErrorCorrection/abort'
85
+ ):
86
+ command_output.command_responses[0].artifacts["command_handled"] = True
87
+ # Set the additional attributes
88
+ command_output.command_name = cnp_output.command_name
82
89
  return command_output
83
90
 
84
91
  if nlu_pipeline_stage in {
@@ -138,7 +145,9 @@ class ResponseGenerator:
138
145
  workflow.context = workflow_context
139
146
 
140
147
  command_name = cnp_output.command_name
141
- extractor = ParameterExtraction(workflow, app_workflow, command_name, command)
148
+ # Use the preserved original command (with parameters) if available
149
+ preserved_command = f'{command_name}: {workflow.context.get("command", command)}'
150
+ extractor = ParameterExtraction(workflow, app_workflow, command_name, preserved_command)
142
151
  pe_output = extractor.extract()
143
152
  if not pe_output.parameters_are_valid:
144
153
  return CommandOutput(
@@ -161,7 +170,7 @@ class ResponseGenerator:
161
170
  CommandResponse(
162
171
  response="",
163
172
  artifacts={
164
- "command": command,
173
+ "command": preserved_command,
165
174
  "command_name": command_name,
166
175
  "cmd_parameters": pe_output.cmd_parameters,
167
176
  },
@@ -136,7 +136,7 @@ class CommandNamePrediction:
136
136
  NLUPipelineStage.INTENT_AMBIGUITY_CLARIFICATION,
137
137
  NLUPipelineStage.INTENT_MISUNDERSTANDING_CLARIFICATION
138
138
  ) and not command_name:
139
- command_name = "what_can_i_do"
139
+ command_name = "what can i do?"
140
140
 
141
141
  if not command_name or command_name == "wildcard":
142
142
  fully_qualified_command_name=None
@@ -35,7 +35,7 @@ class ChatWorker(Thread):
35
35
  """Process messages for the root workflow"""
36
36
  try:
37
37
  self.chat_session._status = SessionStatus.RUNNING
38
- workflow = ChatSession.get_active_workflow()
38
+ workflow = self.chat_session.get_active_workflow()
39
39
  logger.debug(f"Started root workflow {workflow.id}")
40
40
 
41
41
  # Run the workflow loop
@@ -44,39 +44,34 @@ class ChatWorker(Thread):
44
44
  finally:
45
45
  self.chat_session._status = SessionStatus.STOPPED
46
46
  # Ensure workflow is popped if thread terminates unexpectedly
47
- if ChatSession.get_active_workflow() is not None:
48
- ChatSession.pop_active_workflow()
47
+ if self.chat_session.get_active_workflow() is not None:
48
+ self.chat_session.pop_active_workflow()
49
49
 
50
50
  class ChatSession:
51
- _workflow_stack_lock = Lock()
52
- _workflow_stack: ClassVar[deque[fastworkflow.Workflow]] = deque() # Stack of workflow objects
53
-
54
- @classmethod
55
- def get_active_workflow(cls) -> Optional[fastworkflow.Workflow]:
51
+ def get_active_workflow(self) -> Optional[fastworkflow.Workflow]:
56
52
  """Get the currently active workflow (top of stack)"""
57
- with cls._workflow_stack_lock:
58
- return cls._workflow_stack[-1] if cls._workflow_stack else None
53
+ with self._workflow_stack_lock:
54
+ return self._workflow_stack[-1] if self._workflow_stack else None
59
55
 
60
- @classmethod
61
- def push_active_workflow(cls, workflow: fastworkflow.Workflow) -> None:
62
- with cls._workflow_stack_lock:
63
- cls._workflow_stack.append(workflow)
64
- logger.debug(f"Workflow stack: {[w.id for w in cls._workflow_stack]}")
56
+ def push_active_workflow(self, workflow: fastworkflow.Workflow) -> None:
57
+ """Push a workflow onto this session's stack"""
58
+ with self._workflow_stack_lock:
59
+ self._workflow_stack.append(workflow)
60
+ logger.debug(f"Workflow stack: {[w.id for w in self._workflow_stack]}")
65
61
 
66
- @classmethod
67
- def pop_active_workflow(cls) -> Optional[fastworkflow.Workflow]:
68
- with cls._workflow_stack_lock:
69
- if not cls._workflow_stack:
62
+ def pop_active_workflow(self) -> Optional[fastworkflow.Workflow]:
63
+ """Pop a workflow from this session's stack"""
64
+ with self._workflow_stack_lock:
65
+ if not self._workflow_stack:
70
66
  return None
71
- workflow = cls._workflow_stack.pop()
72
- logger.debug(f"Workflow stack after pop: {[w.id for w in cls._workflow_stack]}")
67
+ workflow = self._workflow_stack.pop()
68
+ logger.debug(f"Workflow stack after pop: {[w.id for w in self._workflow_stack]}")
73
69
  return workflow
74
70
 
75
- @classmethod
76
- def clear_workflow_stack(cls) -> None:
77
- """Clear the entire workflow stack"""
78
- with cls._workflow_stack_lock:
79
- cls._workflow_stack.clear()
71
+ def clear_workflow_stack(self) -> None:
72
+ """Clear the entire workflow stack for this session"""
73
+ with self._workflow_stack_lock:
74
+ self._workflow_stack.clear()
80
75
  logger.debug("Workflow stack cleared")
81
76
 
82
77
  def stop_workflow(self) -> None:
@@ -95,7 +90,7 @@ class ChatSession:
95
90
  logger.warning("Chat worker thread did not terminate within timeout")
96
91
 
97
92
  # Clear the workflow stack
98
- ChatSession.clear_workflow_stack()
93
+ self.clear_workflow_stack()
99
94
 
100
95
  # Reset status to stopped
101
96
  self._status = SessionStatus.STOPPED
@@ -116,6 +111,10 @@ class ChatSession:
116
111
  A chat session can run multiple workflows that share the same message queues.
117
112
  Use start_workflow() to start a specific workflow within this session.
118
113
  """
114
+ # Create instance-level workflow stack (supports nested workflows within this session)
115
+ self._workflow_stack: deque[fastworkflow.Workflow] = deque()
116
+ self._workflow_stack_lock = Lock()
117
+
119
118
  # Create queues for user messages and command outputs
120
119
  self._user_message_queue = Queue()
121
120
  self._command_output_queue = Queue()
@@ -197,7 +196,7 @@ class ChatSession:
197
196
 
198
197
  # Check if we need to stop the current workflow
199
198
  # Stop if this is a new root workflow (no parent, keep_alive=True)
200
- current_workflow = ChatSession.get_active_workflow()
199
+ current_workflow = self.get_active_workflow()
201
200
  if (current_workflow and
202
201
  parent_workflow_id is None and
203
202
  self._keep_alive):
@@ -244,7 +243,7 @@ class ChatSession:
244
243
  self._status = SessionStatus.STARTING
245
244
 
246
245
  # Push this workflow as active
247
- ChatSession.push_active_workflow(workflow)
246
+ self.push_active_workflow(workflow)
248
247
 
249
248
  # Initialize workflow tool agent if in agent mode
250
249
  # This must happen after pushing the workflow to the stack
@@ -304,12 +303,12 @@ class ChatSession:
304
303
 
305
304
  @property
306
305
  def workflow_is_complete(self) -> bool:
307
- workflow = ChatSession.get_active_workflow()
306
+ workflow = self.get_active_workflow()
308
307
  return workflow.is_complete if workflow else True
309
308
 
310
309
  @workflow_is_complete.setter
311
310
  def workflow_is_complete(self, value: bool) -> None:
312
- if workflow := ChatSession.get_active_workflow():
311
+ if workflow := self.get_active_workflow():
313
312
  workflow.is_complete = value
314
313
 
315
314
  @property
@@ -317,21 +316,22 @@ class ChatSession:
317
316
  """Return the conversation history."""
318
317
  return self._conversation_history
319
318
 
320
- def clear_conversation_history(self, trace_filename_suffix: Optional[str] = None) -> None:
319
+ # def clear_conversation_history(self, trace_filename_suffix: Optional[str] = None) -> None:
320
+ def clear_conversation_history(self) -> None:
321
321
  """
322
322
  Clear the conversation history.
323
323
  This resets the conversation history to an empty state.
324
324
  """
325
325
  self._conversation_history = dspy.History(messages=[])
326
326
  # Filename for conversation traces
327
- if trace_filename_suffix:
328
- self._conversation_traces_file_name: str = (
329
- f"conversation_traces_{trace_filename_suffix}"
330
- )
331
- else:
332
- self._conversation_traces_file_name: str = (
333
- f"conversation_traces_{datetime.now().strftime('%m_%d_%Y:%H_%M_%S')}.jsonl"
334
- )
327
+ # if trace_filename_suffix:
328
+ # self._conversation_traces_file_name: str = (
329
+ # f"conversation_traces_{trace_filename_suffix}"
330
+ # )
331
+ # else:
332
+ # self._conversation_traces_file_name: str = (
333
+ # f"conversation_traces_{datetime.now().strftime('%m_%d_%Y:%H_%M_%S')}.jsonl"
334
+ # )
335
335
 
336
336
  def _run_workflow_loop(self) -> Optional[fastworkflow.CommandOutput]:
337
337
  """
@@ -341,7 +341,7 @@ class ChatSession:
341
341
  - All outputs (success or failure) are sent to queue during processing
342
342
  """
343
343
  last_output = None
344
- workflow = ChatSession.get_active_workflow()
344
+ workflow = self.get_active_workflow()
345
345
 
346
346
  try:
347
347
  # Handle startup command/action
@@ -385,7 +385,7 @@ class ChatSession:
385
385
 
386
386
  finally:
387
387
  self._status = SessionStatus.STOPPED
388
- ChatSession.pop_active_workflow()
388
+ self.pop_active_workflow()
389
389
  logger.debug(f"Workflow {workflow.id if workflow else 'unknown'} completed")
390
390
 
391
391
  return None
@@ -401,7 +401,7 @@ class ChatSession:
401
401
  # def _process_mcp_tool_call(self, message: str) -> fastworkflow.CommandOutput:
402
402
  # # sourcery skip: class-extract-method, extract-method
403
403
  # """Process an MCP tool call message"""
404
- # workflow = ChatSession.get_active_workflow()
404
+ # workflow = self.get_active_workflow()
405
405
 
406
406
  # try:
407
407
  # # Parse JSON message
@@ -429,7 +429,7 @@ class ChatSession:
429
429
  # self.command_output_queue.put(command_output)
430
430
 
431
431
  # # Flush on successful or failed tool call – state may have changed.
432
- # if workflow := ChatSession.get_active_workflow():
432
+ # if workflow := self.get_active_workflow():
433
433
  # workflow.flush()
434
434
 
435
435
  # return command_output
@@ -500,11 +500,15 @@ class ChatSession:
500
500
  if os.path.exists("action.jsonl"):
501
501
  with open("action.jsonl", "r", encoding="utf-8") as f:
502
502
  actions = [json.loads(line) for line in f if line.strip()]
503
- conversation_summary = self._extract_conversation_summary(message, actions, result_text)
503
+ conversation_summary, conversation_traces = self._extract_conversation_summary(message, actions, result_text)
504
504
  command_response.artifacts["conversation_summary"] = conversation_summary
505
505
 
506
506
  self.conversation_history.messages.append(
507
- {f"conversation {len(self.conversation_history.messages) + 1}": conversation_summary}
507
+ {
508
+ "conversation summary": conversation_summary,
509
+ "conversation_traces": conversation_traces,
510
+ "feedback": None # Initialize feedback slot for this turn
511
+ }
508
512
  )
509
513
 
510
514
  command_output = fastworkflow.CommandOutput(
@@ -514,11 +518,11 @@ class ChatSession:
514
518
 
515
519
  # Put output in queue (following same pattern as _process_message)
516
520
  if (not command_output.success or self._keep_alive) and \
517
- self.command_output_queue:
521
+ self.command_output_queue:
518
522
  self.command_output_queue.put(command_output)
519
523
 
520
524
  # Persist workflow state changes
521
- if workflow := ChatSession.get_active_workflow():
525
+ if workflow := self.get_active_workflow():
522
526
  workflow.flush()
523
527
 
524
528
  return command_output
@@ -529,20 +533,77 @@ class ChatSession:
529
533
  # command_output = self.profile_invoke_command(message)
530
534
 
531
535
  command_output = self._CommandExecutor.invoke_command(self, message)
536
+
537
+ # Record assistant mode trace to action.jsonl (similar to agent mode in workflow_agent.py)
538
+ # This ensures assistant commands are captured even when interspersed with agent commands
539
+ response_text = ""
540
+ if command_output.command_responses:
541
+ response_text = command_output.command_responses[0].response or ""
542
+
543
+ # Convert parameters to dict if it's a Pydantic model or other complex object
544
+ params = command_output.command_parameters or {}
545
+ if hasattr(params, 'model_dump'):
546
+ params = params.model_dump()
547
+ elif hasattr(params, 'dict'):
548
+ params = params.dict()
549
+
550
+ record = {
551
+ "command": message,
552
+ "command_name": command_output.command_name or "",
553
+ "parameters": params,
554
+ "response": response_text
555
+ }
556
+
557
+ self.conversation_history.messages.append(
558
+ {
559
+ "conversation summary": "assistant_mode_command",
560
+ "conversation_traces": json.dumps(record),
561
+ "feedback": None # Initialize feedback slot for this turn
562
+ }
563
+ )
564
+
532
565
  if (not command_output.success or self._keep_alive) and \
533
566
  self.command_output_queue:
534
567
  self.command_output_queue.put(command_output)
535
568
 
536
569
  # Persist workflow state changes lazily accumulated during message processing.
537
- if workflow := ChatSession.get_active_workflow():
570
+ if workflow := self.get_active_workflow():
538
571
  workflow.flush()
539
572
 
540
573
  return command_output
541
574
 
542
575
  def _process_action(self, action: fastworkflow.Action) -> fastworkflow.CommandOutput:
543
576
  """Process a startup action"""
544
- workflow = ChatSession.get_active_workflow()
577
+ workflow = self.get_active_workflow()
545
578
  command_output = self._CommandExecutor.perform_action(workflow, action)
579
+
580
+ # Record action trace to action.jsonl
581
+ response_text = ""
582
+ if command_output.command_responses:
583
+ response_text = command_output.command_responses[0].response or ""
584
+
585
+ # Convert parameters to dict if it's a Pydantic model or other complex object
586
+ params = action.parameters or {}
587
+ if hasattr(params, 'model_dump'):
588
+ params = params.model_dump()
589
+ elif hasattr(params, 'dict'):
590
+ params = params.dict()
591
+
592
+ record = {
593
+ "command": "process_action",
594
+ "command_name": action.command_name,
595
+ "parameters": params,
596
+ "response": response_text
597
+ }
598
+
599
+ self.conversation_history.messages.append(
600
+ {
601
+ "conversation summary": "process_action command",
602
+ "conversation_traces": json.dumps(record),
603
+ "feedback": None # Initialize feedback slot for this turn
604
+ }
605
+ )
606
+
546
607
  if (not command_output.success or self._keep_alive) and \
547
608
  self.command_output_queue:
548
609
  self.command_output_queue.put(command_output)
@@ -573,15 +634,16 @@ class ChatSession:
573
634
  user_query: str, workflow_actions: list[dict[str, str]], final_agent_response: str) -> str:
574
635
  """
575
636
  Summarizes conversation based on original user query, workflow actions and agentt response.
637
+ Returns the conversation summary and the log entry
576
638
  """
577
639
  # Lets log everything to a file called action_log.jsonl, if it exists
578
- log_entry = {
640
+ conversation_traces = {
579
641
  "user_query": user_query,
580
642
  "agent_workflow_interactions": workflow_actions,
581
643
  "final_agent_response": final_agent_response
582
644
  }
583
- with open(self._conversation_traces_file_name, "a", encoding="utf-8") as f:
584
- f.write(json.dumps(log_entry) + "\n")
645
+ # with open(self._conversation_traces_file_name, "a", encoding="utf-8") as f:
646
+ # f.write(json.dumps(log_entry) + "\n")
585
647
 
586
648
  class ConversationSummarySignature(dspy.Signature):
587
649
  """
@@ -601,7 +663,7 @@ class ChatSession:
601
663
  user_query=user_query,
602
664
  workflow_actions=workflow_actions,
603
665
  final_agent_response=final_agent_response)
604
- return prediction.conversation_summary
666
+ return prediction.conversation_summary, json.dumps(conversation_traces)
605
667
 
606
668
 
607
669
  def profile_invoke_command(self, message: str):
@@ -56,7 +56,7 @@ class CommandExecutor(CommandExecutorInterface):
56
56
  command_name = command_output.command_responses[0].artifacts["command_name"]
57
57
  input_obj = command_output.command_responses[0].artifacts["cmd_parameters"]
58
58
 
59
- workflow = ChatSession.get_active_workflow()
59
+ workflow = chat_session.get_active_workflow()
60
60
  workflow_name = workflow.folderpath.split('/')[-1]
61
61
  context = workflow.current_command_context_displayname
62
62
 
@@ -3,6 +3,7 @@ LLM_PARAM_EXTRACTION=mistral/mistral-small-latest
3
3
  LLM_RESPONSE_GEN=mistral/mistral-small-latest
4
4
  LLM_PLANNER=mistral/mistral-small-latest
5
5
  LLM_AGENT=mistral/mistral-small-latest
6
+ LLM_CONVERSATION_STORE=mistral/mistral-small-latest
6
7
 
7
8
  SPEEDDICT_FOLDERNAME=___workflow_contexts
8
9
  SYNTHETIC_UTTERANCE_GEN_NUMOF_PERSONAS=4
@@ -3,4 +3,5 @@ LITELLM_API_KEY_SYNDATA_GEN=<API KEY for synthetic data generation model>
3
3
  LITELLM_API_KEY_PARAM_EXTRACTION=<API KEY for parameter extraction model>
4
4
  LITELLM_API_KEY_RESPONSE_GEN=<API KEY for response generation model>
5
5
  LITELLM_API_KEY_PLANNER=<API KEY for the agent's task planner model>
6
- LITELLM_API_KEY_AGENT=<API KEY for the agent model>
6
+ LITELLM_API_KEY_AGENT=<API KEY for the agent model>
7
+ LITELLM_API_KEY_CONVERSATION_STORE=<API KEY for conversation topic/summary generation model>
@@ -41,7 +41,7 @@ class FastWorkflowMCPServer:
41
41
  NOT_FOUND = fastworkflow.get_env_var('NOT_FOUND')
42
42
 
43
43
  # Get available commands from workflow
44
- workflow = fastworkflow.ChatSession.get_active_workflow()
44
+ workflow = fastworkflow.chat_session.get_active_workflow()
45
45
  workflow_folderpath = workflow.folderpath
46
46
  # Use cached routing definition instead of rebuilding every time
47
47
  routing = RoutingRegistry.get_definition(workflow_folderpath)
@@ -147,7 +147,7 @@ class FastWorkflowMCPServer:
147
147
  arguments=arguments
148
148
  )
149
149
 
150
- workflow = fastworkflow.ChatSession.get_active_workflow()
150
+ workflow = fastworkflow.chat_session.get_active_workflow()
151
151
  # Execute using MCP-compliant method
152
152
  return CommandExecutor.perform_mcp_tool_call(
153
153
  workflow,
@@ -203,7 +203,7 @@ class FastWorkflowMCPServer:
203
203
 
204
204
  Falls back to the first available path if the active context is none.
205
205
  """
206
- workflow = fastworkflow.ChatSession.get_active_workflow()
206
+ workflow = fastworkflow.chat_session.get_active_workflow()
207
207
  return workflow.current_command_context_name
208
208
 
209
209
 
@@ -174,8 +174,9 @@ def _ask_user_tool(clarification_request: str, chat_session_obj: fastworkflow.Ch
174
174
  Note that using the wrong command name can produce missing information errors. Double-check with the missing_information_guidance_tool to verify that the correct command name is being used
175
175
  """
176
176
  command_output = fastworkflow.CommandOutput(
177
- command_responses=[fastworkflow.CommandResponse(response=clarification_request)]
178
- )
177
+ command_responses=[fastworkflow.CommandResponse(response=clarification_request)],
178
+ workflow_name = chat_session_obj.get_active_workflow().folderpath.split('/')[-1]
179
+ )
179
180
  chat_session_obj.command_output_queue.put(command_output)
180
181
 
181
182
  user_query = chat_session_obj.user_message_queue.get()
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: fastworkflow
3
- Version: 2.15.13
3
+ Version: 2.16.0
4
4
  Summary: A framework for rapidly building large-scale, deterministic, interactive workflows with a fault-tolerant, conversational UX
5
5
  License: Apache-2.0
6
6
  Keywords: fastworkflow,ai,workflow,llm,openai
7
7
  Author: Dhar Rawal
8
8
  Author-email: drawal@radiantlogic.com
9
- Requires-Python: >=3.11
9
+ Requires-Python: >=3.11,<3.14
10
10
  Classifier: License :: OSI Approved :: Apache Software License
11
11
  Classifier: Programming Language :: Python :: 3
12
12
  Classifier: Programming Language :: Python :: 3.11
@@ -581,6 +581,7 @@ This single command will generate the `greet.py` command, `get_properties` and `
581
581
  | `LLM_RESPONSE_GEN` | LiteLLM model string for response generation | `run` | `mistral/mistral-small-latest` |
582
582
  | `LLM_PLANNER` | LiteLLM model string for the agent's task planner | `run` (agent mode) | `mistral/mistral-small-latest` |
583
583
  | `LLM_AGENT` | LiteLLM model string for the DSPy agent | `run` (agent mode) | `mistral/mistral-small-latest` |
584
+ | `LLM_CONVERSATION_STORE` | LiteLLM model string for conversation topic/summary generation | FastAPI service | `mistral/mistral-small-latest` |
584
585
  | `NOT_FOUND` | Placeholder value for missing parameters during extraction | Always | `"NOT_FOUND"` |
585
586
  | `MISSING_INFORMATION_ERRMSG` | Error message prefix for missing parameters | Always | `"Missing required..."` |
586
587
  | `INVALID_INFORMATION_ERRMSG` | Error message prefix for invalid parameters | Always | `"Invalid information..."` |
@@ -594,6 +595,7 @@ This single command will generate the `greet.py` command, `get_properties` and `
594
595
  | `LITELLM_API_KEY_RESPONSE_GEN`| API key for the `LLM_RESPONSE_GEN` model | `run` | *required* |
595
596
  | `LITELLM_API_KEY_PLANNER`| API key for the `LLM_PLANNER` model | `run` (agent mode) | *required* |
596
597
  | `LITELLM_API_KEY_AGENT`| API key for the `LLM_AGENT` model | `run` (agent mode) | *required* |
598
+ | `LITELLM_API_KEY_CONVERSATION_STORE`| API key for the `LLM_CONVERSATION_STORE` model | FastAPI service | *required* |
597
599
 
598
600
  > [!tip]
599
601
  > The example workflows are configured to use Mistral's models by default. You can get a free API key from [Mistral AI](https://mistral.ai) that works with the `mistral-small-latest` model.
@@ -7,12 +7,12 @@ fastworkflow/_workflows/command_metadata_extraction/_commands/ErrorCorrection/ab
7
7
  fastworkflow/_workflows/command_metadata_extraction/_commands/ErrorCorrection/you_misunderstood.py,sha256=VHfhwlqc1ceG9P_wL8Fl7dpJA2UlcSrcXhz7zZU9NpA,2517
8
8
  fastworkflow/_workflows/command_metadata_extraction/_commands/IntentDetection/go_up.py,sha256=K526OAf5ks95SwqVdRNVxLM_AWDfA1qXbkNYq0dANwg,1889
9
9
  fastworkflow/_workflows/command_metadata_extraction/_commands/IntentDetection/reset_context.py,sha256=xvInu6uDw0YRUHVXNyTZphSr75f8QiQgFwDtv7SlE9o,1346
10
- fastworkflow/_workflows/command_metadata_extraction/_commands/IntentDetection/what_can_i_do.py,sha256=Fw8tsk3wyCujf8nBfUgPDxnTP9c2IE513FzqAWGm8pU,6216
10
+ fastworkflow/_workflows/command_metadata_extraction/_commands/IntentDetection/what_can_i_do.py,sha256=9I9Y1VRvmiqft7vkz_8gGgCYTesMzBvP58wef3sjLZM,6593
11
11
  fastworkflow/_workflows/command_metadata_extraction/_commands/IntentDetection/what_is_current_context.py,sha256=S5RQLr62Q2MnKU85nw4IW_ueAK_FXvhcY9gXajFxujg,1464
12
12
  fastworkflow/_workflows/command_metadata_extraction/_commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- fastworkflow/_workflows/command_metadata_extraction/_commands/wildcard.py,sha256=TphAB_rwR7giCx00hovGZ2p9Qh-q1VAOZQNJF0DjRJY,7287
13
+ fastworkflow/_workflows/command_metadata_extraction/_commands/wildcard.py,sha256=Sqpc2hwM-DgmsqiHu3OoOuqo3XnHLkFlmyYCJA8nj_8,7843
14
14
  fastworkflow/_workflows/command_metadata_extraction/command_context_model.json,sha256=zGWBweQSmFf7WsfR_F2DE7AJ8S8-q7F9ZbvyccysJJI,117
15
- fastworkflow/_workflows/command_metadata_extraction/intent_detection.py,sha256=A0vzyHGMtBEWCzfTU_I9tnRx2Byu-ElJgfMugRlXkpA,14542
15
+ fastworkflow/_workflows/command_metadata_extraction/intent_detection.py,sha256=Ci0Cut_rH8wpUlLEsTMK-OhT3AO2nKgJmXQYIsoTzJw,14543
16
16
  fastworkflow/_workflows/command_metadata_extraction/parameter_extraction.py,sha256=MgNkPgA05E1-LSw9pNKDlXdsAphulYNhuDeTTqk5dBY,13686
17
17
  fastworkflow/build/__main__.py,sha256=NtedkZfM56qoEJ5vQECSURbE8AMTfwHN3tAZyZoWabk,15905
18
18
  fastworkflow/build/ast_class_extractor.py,sha256=F9OG4stkp7w3kadKqxMm8h3ZDSp_zg6mwcrKMl_XqdI,13527
@@ -35,11 +35,11 @@ fastworkflow/build/navigator_stub_generator.py,sha256=_DSvHC6r1xWQiFHtUgPhI51nQf
35
35
  fastworkflow/build/pydantic_model_generator.py,sha256=oNyoANyUWBpHG-fE3tGL911RNvDzQXjxAm0ssvuXUH4,1854
36
36
  fastworkflow/build/utterance_generator.py,sha256=UrtkF0wyAZ1hiFitHX0g8w7Wh-D0leLCrP1aUACSfHo,299
37
37
  fastworkflow/cache_matching.py,sha256=OoB--1tO6-O4BKCuCrUbB0CkUr76J62K4VAf6MShi-w,7984
38
- fastworkflow/chat_session.py,sha256=Nm0RZyaCxiFODlyh4s2fWY8uBQVwYbUVl7ThgJvdWwA,29044
38
+ fastworkflow/chat_session.py,sha256=p8n2rQx3mhZ_DoRb14sRbp0PGKidMS6H2wwC1TNPxnM,31592
39
39
  fastworkflow/cli.py,sha256=li9OFT05sxqz4BZJc9byKAeTmomjLfsWMVuy0OiRGSs,18953
40
40
  fastworkflow/command_context_model.py,sha256=nWxLP3TR7WJr3yWCedqcdFOxo_kwae_mS3VRN2cOmK8,13437
41
41
  fastworkflow/command_directory.py,sha256=aJ6UQCwevfF11KbcQB2Qz6mQ7Kj91pZtvHmQY6JFnao,29030
42
- fastworkflow/command_executor.py,sha256=UGM6JpOoZOYR3cbLOOLN3oziwNvUH-Cm-d1XFRzbW7k,8456
42
+ fastworkflow/command_executor.py,sha256=WTSrukv6UDQfWUDSNleIQ1TxwDnAQIKIimh4sQVwnig,8457
43
43
  fastworkflow/command_interfaces.py,sha256=PWIKlcp0G8nmYl0vkrg1o6QzJL0pxXkfrn1joqTa0eU,460
44
44
  fastworkflow/command_metadata_api.py,sha256=KtidE3PM9HYfY-nmEXZ8Y4nnaw2qn23p_gvwFVT3F8Y,39770
45
45
  fastworkflow/command_routing.py,sha256=R7194pcY0d2VHzmCu9ALacm1UvNuIRIvTn8mLp-EZIM,17219
@@ -50,8 +50,8 @@ fastworkflow/examples/extended_workflow_example/_commands/generate_report.py,sha
50
50
  fastworkflow/examples/extended_workflow_example/_commands/startup.py,sha256=V5Q29148SvXw6i3i0pKTuNWsv2xnkUMsHHuzt1ndxro,1028
51
51
  fastworkflow/examples/extended_workflow_example/simple_workflow_template.json,sha256=A-dAl5iD9ehdMGGn05O2Kjwq6ZetqQjAGzlM1st0K9U,1237
52
52
  fastworkflow/examples/extended_workflow_example/workflow_inheritance_model.json,sha256=TBk272pqfyRKzm4T-I6_nGfbcdmEzjwon7kFPWtgyhw,81
53
- fastworkflow/examples/fastworkflow.env,sha256=FNx41woSsG7ljcwI6H6Y3RCMw6mXiImcq8QMnqeqSao,623
54
- fastworkflow/examples/fastworkflow.passwords.env,sha256=CJemqCpj8HeSznTFUfCSc6I5r8noXuRWKfkrHaslD7E,424
53
+ fastworkflow/examples/fastworkflow.env,sha256=mLI1fWqkzjcp9uzfHw81mlOx4JFb8Ch_TBy8dX1Dsok,675
54
+ fastworkflow/examples/fastworkflow.passwords.env,sha256=9bI62EokFWT_YPcO0UAvO1ZTG2wM76Jbe5cKE7_KTRg,517
55
55
  fastworkflow/examples/hello_world/_commands/README.md,sha256=pYOTGqVx41ZIuNc6hPTEJzNcMQ2Vwx3PN74ifSlayvU,1297
56
56
  fastworkflow/examples/hello_world/_commands/add_two_numbers.py,sha256=0lFGK1llT6u6fByvzCDPdegjY6gWcerM2cvxVSo7lIw,2232
57
57
  fastworkflow/examples/hello_world/_commands/context_inheritance_model.json,sha256=RBNvo1WzZ4oRRq0W9-hknpT7T8If536DEMBg9hyq_4o,2
@@ -140,14 +140,11 @@ fastworkflow/examples/simple_workflow_template/application/__init__.py,sha256=47
140
140
  fastworkflow/examples/simple_workflow_template/application/workitem.py,sha256=Sm-QoX-EZvynkNf7uO3dViZF2VZqUlr6PAZZ7yjQEfk,40197
141
141
  fastworkflow/examples/simple_workflow_template/simple_workflow_template.json,sha256=A-dAl5iD9ehdMGGn05O2Kjwq6ZetqQjAGzlM1st0K9U,1237
142
142
  fastworkflow/examples/simple_workflow_template/startup_action.json,sha256=gj0-B4CqTYCs8OwHKhTu95H4uZbLsDf1th06IFfNXVs,75
143
- fastworkflow/mcp_server.py,sha256=f6vqHiG-cuMpeoeRY-mvsFxApmQ28cAJFfMtqoJYy5k,8864
143
+ fastworkflow/mcp_server.py,sha256=NxbLSKf2MA4lAHVcm6ZfiVuOjVO6IeV5Iw17wImFbxQ,8867
144
144
  fastworkflow/model_pipeline_training.py,sha256=P_9wrYSfJVSYCTu8VEPkgXJ16eH58LLCK4rCRbRFAVg,46740
145
145
  fastworkflow/refine/__main__.py,sha256=bDLpPNMcdp8U4EFnMdjxx1sPDQCZuEJoBURr2KebTng,3398
146
146
  fastworkflow/run/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
147
147
  fastworkflow/run/__main__.py,sha256=e3gTjcQwYuEz12LZIdL5w48aMRJ-42MdlyapINj7XWk,12430
148
- fastworkflow/run_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
149
- fastworkflow/run_agent/__main__.py,sha256=japXWziVDHdLl7Hk-jzwcYkImVY4j4k9SodMhiRK3ow,11986
150
- fastworkflow/run_agent/agent_module.py,sha256=XbOfX2K2CwmIRT45YtoROCN4en9Ud8gMQh2kGEhqw_A,8012
151
148
  fastworkflow/train/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
152
149
  fastworkflow/train/__main__.py,sha256=AeGja42d0QhslQkxvDVigIluxxL7DYLdQPXYFOKQ7QA,8536
153
150
  fastworkflow/train/generate_synthetic.py,sha256=sTDk-E5ewkS4o-0LJeofiEv4uXGpqdGcFRYKY_Yf36Y,5322
@@ -169,10 +166,10 @@ fastworkflow/utils/react.py,sha256=HubwmM4H9UzLaLaeIkJseKCNMjyrOXvMZz-8sw4ycCE,1
169
166
  fastworkflow/utils/signatures.py,sha256=QOLX3j-AJkRWIkDhogbhxQo8MIt668xIKwd4SWiS2LY,31734
170
167
  fastworkflow/utils/startup_progress.py,sha256=9icSdnpFAxzIq0sUliGpNaH0Efvrt5lDtGfURV5BD98,3539
171
168
  fastworkflow/workflow.py,sha256=37gn7e3ct-gdGw43zS6Ab_ADoJJBO4eJW2PywfUpjEg,18825
172
- fastworkflow/workflow_agent.py,sha256=pwS-tZUsLFlOCmVxtmWzZ1b7iFmtlHBcCy3N0Hv06TA,16254
169
+ fastworkflow/workflow_agent.py,sha256=-RXoHXH-vrEh6AWC6iYAwwR9CvaRynYuu-KrzOPCJbg,16348
173
170
  fastworkflow/workflow_inheritance_model.py,sha256=Pp-qSrQISgPfPjJVUfW84pc7HLmL2evuq0UVIYR51K0,7974
174
- fastworkflow-2.15.13.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
175
- fastworkflow-2.15.13.dist-info/METADATA,sha256=Pb-rVYTCwlvQMEMxgJ-orQPFGO30UE_Vk6u6OTJhLYk,30066
176
- fastworkflow-2.15.13.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
177
- fastworkflow-2.15.13.dist-info/entry_points.txt,sha256=m8HqoPzCyaZLAx-V5X8MJgw3Lx3GiPDlxNEZ7K-Gb-U,54
178
- fastworkflow-2.15.13.dist-info/RECORD,,
171
+ fastworkflow-2.16.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
172
+ fastworkflow-2.16.0.dist-info/METADATA,sha256=Mno8E51bhYTFQ4P7YRzmd71rjOBm_niCOKnFsTGmJNs,30336
173
+ fastworkflow-2.16.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
174
+ fastworkflow-2.16.0.dist-info/entry_points.txt,sha256=m8HqoPzCyaZLAx-V5X8MJgw3Lx3GiPDlxNEZ7K-Gb-U,54
175
+ fastworkflow-2.16.0.dist-info/RECORD,,
File without changes
@@ -1,294 +0,0 @@
1
- import argparse
2
- import contextlib
3
- import json
4
- import os
5
- import queue
6
- import time
7
- import threading
8
- from typing import Optional
9
- from dotenv import dotenv_values
10
- from queue import Empty
11
-
12
- import dspy
13
-
14
-
15
- # Instantiate a global console for consistent styling
16
- console = None
17
-
18
- def check_workflow_trained(workflow_path: str) -> bool:
19
- """
20
- Check if a workflow has been trained by looking for the tiny_ambiguous_threshold.json file
21
- in the ___command_info/global folder.
22
-
23
- Args:
24
- workflow_path: Path to the workflow folder
25
-
26
- Returns:
27
- bool: True if the workflow appears to be trained, False otherwise
28
- """
29
- # Path to the global command info directory
30
- global_cmd_info_path = os.path.join(workflow_path, "___command_info", "global")
31
-
32
- # Path to the tiny_ambiguous_threshold.json file
33
- threshold_file_path = os.path.join(global_cmd_info_path, "tiny_ambiguous_threshold.json")
34
-
35
- # Check if the file exists
36
- return os.path.exists(threshold_file_path)
37
-
38
- def main():
39
- # Third-party CLI prettification libraries
40
- from rich.console import Console
41
- from rich.panel import Panel
42
- from rich.table import Table
43
- from rich.text import Text
44
- from rich.console import Group
45
- from prompt_toolkit import PromptSession
46
- from prompt_toolkit.patch_stdout import patch_stdout
47
-
48
-
49
- import fastworkflow
50
- from fastworkflow.utils import dspy_utils
51
- from fastworkflow.command_executor import CommandExecutor
52
- from .agent_module import initialize_dspy_agent
53
-
54
- # Progress bar helper
55
- from fastworkflow.utils.startup_progress import StartupProgress
56
-
57
- # Instantiate a global console for consistent styling
58
- global console
59
- console = Console()
60
- prompt_session = PromptSession("User > ")
61
-
62
- def _build_artifact_table(artifacts: dict[str, str]) -> Table:
63
- """Return a rich.Table representation for artifact key-value pairs."""
64
- table = Table(show_header=True, header_style="bold cyan", box=None)
65
- table.add_column("Name", style="cyan", overflow="fold")
66
- table.add_column("Value", style="white", overflow="fold")
67
- for name, value in artifacts.items():
68
- table.add_row(str(name), str(value))
69
- return table
70
-
71
- def print_command_output(command_output):
72
- """Pretty-print workflow output using rich panels and tables."""
73
- for command_response in command_output.command_responses:
74
- workflow_id = "UnknownSession"
75
- with contextlib.suppress(Exception):
76
- workflow = fastworkflow.ChatSession.get_active_workflow()
77
- workflow_id = workflow.id if workflow else "UnknownSession"
78
-
79
- # Collect body elements for the panel content
80
- body_renderables = []
81
-
82
- if command_response.response:
83
- body_renderables.append(Text(command_response.response, style="green"))
84
-
85
- if command_response.artifacts:
86
- body_renderables.extend(
87
- (
88
- Text("Artifacts", style="bold cyan"),
89
- _build_artifact_table(command_response.artifacts),
90
- )
91
- )
92
- if command_response.next_actions:
93
- actions_table = Table(show_header=False, box=None)
94
- for act in command_response.next_actions:
95
- actions_table.add_row(Text(str(act), style="blue"))
96
- body_renderables.extend(
97
- (Text("Next Actions", style="bold blue"), actions_table)
98
- )
99
- if command_response.recommendations:
100
- rec_table = Table(show_header=False, box=None)
101
- for rec in command_response.recommendations:
102
- rec_table.add_row(Text(str(rec), style="magenta"))
103
- body_renderables.extend(
104
- (Text("Recommendations", style="bold magenta"), rec_table)
105
- )
106
-
107
- panel_title = f"[bold yellow]Workflow {workflow_id}[/bold yellow]"
108
- # Group all renderables together
109
- group = Group(*body_renderables)
110
- # Use the group in the panel
111
- panel = Panel.fit(group, title=panel_title, border_style="green")
112
- console.print(panel)
113
-
114
- parser = argparse.ArgumentParser(description="AI Assistant for workflow processing")
115
- parser.add_argument("workflow_path", help="Path to the workflow folder")
116
- parser.add_argument("env_file_path", help="Path to the environment file")
117
- parser.add_argument("passwords_file_path", help="Path to the passwords file")
118
- parser.add_argument(
119
- "--context_file_path", help="Optional context file path", default=""
120
- )
121
- parser.add_argument(
122
- "--startup_command", help="Optional startup command", default=""
123
- )
124
- parser.add_argument(
125
- "--startup_action", help="Optional startup action", default=""
126
- )
127
- parser.add_argument(
128
- "--keep_alive", help="Optional keep_alive", default=True
129
- )
130
- parser.add_argument(
131
- "--project_folderpath", help="Optional path to project folder containing application code", default=None
132
- )
133
-
134
- args = parser.parse_args()
135
-
136
- if not os.path.isdir(args.workflow_path):
137
- console.print(f"[bold red]Error:[/bold red] The specified workflow path '{args.workflow_path}' is not a valid directory.")
138
- exit(1)
139
-
140
- console.print(Panel(f"Running fastWorkflow: [bold]{args.workflow_path}[/bold]", title="[bold green]fastworkflow[/bold green]", border_style="green"))
141
- console.print("[bold green]Tip:[/bold green] Type 'exit' to quit the application.")
142
-
143
- # ------------------------------------------------------------------
144
- # Startup progress bar ------------------------------------------------
145
- # ------------------------------------------------------------------
146
- command_info_root = os.path.join(args.workflow_path, "___command_info")
147
- subdir_count = 0
148
- if os.path.isdir(command_info_root):
149
- subdir_count = len([d for d in os.listdir(command_info_root) if os.path.isdir(os.path.join(command_info_root, d))])
150
-
151
- StartupProgress.begin(total=3)
152
-
153
- StartupProgress.advance("Imported fastworkflow modules")
154
-
155
- env_vars = {
156
- **dotenv_values(args.env_file_path),
157
- **dotenv_values(args.passwords_file_path)
158
- }
159
- StartupProgress.advance("fastworkflow.init complete")
160
-
161
- fastworkflow.init(env_vars=env_vars)
162
-
163
- LLM_AGENT = fastworkflow.get_env_var("LLM_AGENT")
164
- if not LLM_AGENT:
165
- console.print("[bold red]Error:[/bold red] DSPy Language Model not provided. Set LLM_AGENT environment variable.")
166
- exit(1)
167
-
168
- # Check if the workflow has been trained
169
- if not check_workflow_trained(args.workflow_path):
170
- # Extract workflow name for the error message
171
- workflow_name = os.path.basename(args.workflow_path)
172
- console.print(Panel(
173
- f"To train this workflow, run:\n"
174
- f"[bold white]fastworkflow train {args.workflow_path}[/bold white]",
175
- title="[bold red]Workflow '{workflow_name}' has not been trained[/bold red]",
176
- border_style="red"
177
- ))
178
- exit(1)
179
-
180
- # this could be None
181
- lm = dspy_utils.get_lm("LLM_AGENT", "LITELLM_API_KEY_AGENT")
182
-
183
- startup_action: Optional[fastworkflow.Action] = None
184
- if args.startup_action:
185
- with open(args.startup_action, 'r') as file:
186
- startup_action_dict = json.load(file)
187
- startup_action = fastworkflow.Action(**startup_action_dict)
188
-
189
- context_dict = None
190
- if args.context_file_path:
191
- with open(args.context_file_path, 'r') as file:
192
- context_dict = json.load(file)
193
-
194
- # Create the chat session in agent mode
195
- fastworkflow.chat_session = fastworkflow.ChatSession(run_as_agent=True)
196
-
197
- # Start the workflow within the chat session
198
- fastworkflow.chat_session.start_workflow(
199
- args.workflow_path,
200
- workflow_context=context_dict,
201
- startup_command=args.startup_command,
202
- startup_action=startup_action,
203
- keep_alive=args.keep_alive,
204
- project_folderpath=args.project_folderpath
205
- )
206
-
207
- StartupProgress.advance("ChatSession ready")
208
- StartupProgress.end()
209
-
210
- try:
211
- react_agent = initialize_dspy_agent(fastworkflow.chat_session)
212
- except (EnvironmentError, RuntimeError) as e:
213
- console.print(f"[bold red]Failed to initialize DSPy agent:[/bold red] {e}")
214
- exit(1)
215
-
216
- with contextlib.suppress(queue.Empty):
217
- if command_output := fastworkflow.chat_session.command_output_queue.get(
218
- timeout=0.1
219
- ):
220
- console.print(Panel("Startup Command Output", border_style="dim"))
221
- print_command_output(command_output)
222
- console.print(Panel("End Startup Command Output", border_style="dim"))
223
-
224
- while True:
225
- if not args.keep_alive and fastworkflow.chat_session.workflow_is_complete:
226
- console.print("[blue]Workflow complete and keep_alive is false. Exiting...[/blue]")
227
- break
228
-
229
- with patch_stdout():
230
- user_input_str = prompt_session.prompt()
231
- if user_input_str.lower() == "exit":
232
- console.print("[blue]User requested exit. Exiting...[/blue]")
233
- break
234
-
235
- try:
236
- # Use a thread-safe way to store the agent response
237
- agent_response_container = {"response": None, "error": None}
238
-
239
- # Function to run agent processing in a separate thread
240
- def process_agent_query():
241
- try:
242
- with dspy.context(lm=lm):
243
- agent_response_container["response"] = react_agent(user_query=user_input_str)
244
- except Exception as e:
245
- agent_response_container["error"] = e
246
-
247
- # Start processing thread
248
- agent_thread = threading.Thread(target=process_agent_query)
249
- agent_thread.daemon = True
250
- agent_thread.start()
251
-
252
- # Queues used by the agent to request user clarification
253
- from fastworkflow.run_agent.agent_module import (
254
- clarification_request_queue,
255
- clarification_response_queue,
256
- )
257
-
258
- with console.status("[bold cyan]Processing command...[/bold cyan]", spinner="dots") as status:
259
- counter = 0
260
- while agent_thread.is_alive():
261
- # Handle any number of clarification requests
262
- with contextlib.suppress(Empty):
263
- while True:
264
- prompt_text = clarification_request_queue.get_nowait()
265
- # Stop spinner so prompt renders cleanly
266
- status.stop()
267
- console.print(f"[bold yellow]Agent -> User> {prompt_text}[/bold yellow]")
268
- user_answer = prompt_session.prompt("User > ")
269
- clarification_response_queue.put(user_answer)
270
- time.sleep(0.3)
271
- counter += 1
272
- if counter % 3 == 0:
273
- status.update(
274
- f"[bold cyan]Processing command... ({counter // 3}s)[/bold cyan]"
275
- )
276
-
277
- # Agent finished work
278
- agent_thread.join()
279
-
280
- # Check for errors or display response
281
- if agent_response_container["error"]:
282
- raise agent_response_container["error"]
283
-
284
- if agent_response_container["response"]:
285
- console.print(Panel(agent_response_container["response"].final_answer,
286
- title="[bold green]Agent Response[/bold green]",
287
- border_style="green"))
288
-
289
- except Exception as e: # pylint: disable=broad-except
290
- console.print(f"[bold red]Agent Error:[/bold red] An error occurred during agent processing: {e}")
291
-
292
- if __name__ == "__main__":
293
- print("Loading fastWorkflow...\n")
294
- main()
@@ -1,194 +0,0 @@
1
- # fastworkflow/run_agent/agent_module.py
2
- """
3
- High-level planning agent module for fastWorkflow.
4
- Uses the integrated workflow tool agent from ChatSession.
5
- """
6
- import functools
7
- import os
8
- from queue import Queue
9
- from typing import Any, Optional, List, Dict
10
-
11
- import dspy
12
- from colorama import Fore, Style
13
-
14
- import fastworkflow
15
- from fastworkflow.mcp_server import FastWorkflowMCPServer
16
-
17
- # Queues used to synchronise clarification requests between the agent thread
18
- # (where `_ask_user_tool` is executed) and the main thread that owns the TTY.
19
- clarification_request_queue: Queue[str] = Queue()
20
- clarification_response_queue: Queue[str] = Queue()
21
-
22
-
23
- # DSPy Signature for the High-Level Planning Agent
24
- class PlanningAgentSignature(dspy.Signature):
25
- """
26
- Create a minimal step based todo list based only on the commands in the user query
27
- Then, execute the plan for building the final answer using the WorkflowAssistant tool.
28
- Double-check that all the tasks in the todo list have been completed before returning the final answer.
29
- """
30
- user_query = dspy.InputField(desc="The user's full input or question.")
31
- final_answer = dspy.OutputField(desc="The agent's comprehensive response to the user after interacting with the workflow.")
32
-
33
-
34
- def _format_workflow_output_for_agent(command_output: Any) -> str:
35
- """
36
- Formats the structured CommandOutput from the workflow into a single string for the agent.
37
- Handles both regular command responses and MCP tool results.
38
- """
39
- # Check if this is an MCP result converted to CommandOutput
40
- if hasattr(command_output, '_mcp_source'):
41
- return _format_mcp_result_for_agent(command_output._mcp_source)
42
-
43
- # Otherwise use existing logic for regular command responses
44
- output_parts = []
45
- if not hasattr(command_output, 'command_responses') or not command_output.command_responses:
46
- return "Workflow produced no command responses or the response structure is unexpected."
47
-
48
- for command_response in command_output.command_responses:
49
- if response_text := getattr(command_response, 'response', None):
50
- output_parts.append(f"{response_text}")
51
-
52
- artifacts = getattr(command_response, 'artifacts', {})
53
- output_parts.extend(
54
- f"Artifact: {artifact_name}={artifact_value}"
55
- for artifact_name, artifact_value in artifacts.items()
56
- )
57
- next_actions = getattr(command_response, 'next_actions', [])
58
- output_parts.extend(f"Next Action: {action}" for action in next_actions)
59
-
60
- recommendations = getattr(command_response, 'recommendations', [])
61
- output_parts.extend(
62
- f"Recommendation: {recommendation}"
63
- for recommendation in recommendations
64
- )
65
-
66
- if not output_parts:
67
- return "Workflow executed but produced no specific output, actions, or recommendations."
68
- return "\n".join(output_parts)
69
-
70
-
71
- def _format_mcp_result_for_agent(mcp_result) -> str:
72
- """Format MCPToolResult specifically for agent consumption"""
73
- if mcp_result.isError:
74
- return f"Error: {mcp_result.content[0].text}"
75
- else:
76
- return mcp_result.content[0].text
77
-
78
-
79
- def _build_assistant_tool_documentation(available_tools: List[Dict]) -> str:
80
- """Build simplified tool documentation for the main agent's WorkflowAssistant tool."""
81
-
82
- # Guidance for the MAIN AGENT on how to call WorkflowAssistant
83
- main_agent_guidance = """
84
- Use the WorkflowAssistant to interact with a suite of underlying tools to assist the user.
85
- It takes a natural language query as input and delegates to an internal agent
86
- that will try to understand the request, select the most appropriate tool, and execute it.
87
- In normal mode: Example tool_args: {"tool_request": "<A single tool request with tool description and all required input parameter names and values>"}
88
- If workflow assistant reports back with parameter extraction errors: Example tool_args: {"tool_request": "<A strictly comma delimited list of just the requested parameter values>"}
89
-
90
- Available tools that WorkflowAssistant can access:
91
- """
92
-
93
- tool_docs = []
94
- for tool_def in available_tools:
95
- tool_name = tool_def['name']
96
- tool_desc = tool_def['description'].split("\n")[0]
97
-
98
- # Main agent does not need the detailed input schema, only name, description and parameters.
99
- tool_docs.append(
100
- f"\nTool Name: \"{tool_name}\""
101
- f"\nDescription: {tool_desc}"
102
- f"\nRequired Parameters: {tool_def['inputSchema']['required']}"
103
- )
104
-
105
- return main_agent_guidance + "\n".join(tool_docs)
106
-
107
-
108
- def _execute_workflow_command_tool_with_delegation(tool_request: str,
109
- *,
110
- chat_session: fastworkflow.ChatSession,
111
- **kwargs) -> str:
112
- """
113
- Delegate tool requests to the workflow via queues.
114
- This is used by the high-level planning agent in run_agent.
115
- """
116
- print(f"{Fore.CYAN}{Style.BRIGHT}Agent -> Workflow>{Style.RESET_ALL}{Fore.CYAN} {tool_request}{Style.RESET_ALL}")
117
-
118
- if extras := " ".join(f"{k}={v}" for k, v in kwargs.items()):
119
- # Send the request through the user message queue
120
- chat_session.user_message_queue.put(f"{tool_request} {extras}")
121
- else:
122
- # Send the request through the user message queue
123
- chat_session.user_message_queue.put(tool_request)
124
-
125
- # Get the response from the command output queue
126
- command_output = chat_session.command_output_queue.get()
127
-
128
- # Format the output for the agent
129
- result = _format_workflow_output_for_agent(command_output)
130
-
131
- print(f"{Fore.BLUE}{Style.BRIGHT}Workflow -> Agent>{Style.RESET_ALL}{Fore.BLUE} {result.replace(os.linesep, ' ')}{Style.RESET_ALL}")
132
- return result
133
-
134
-
135
- def _ask_user_tool(prompt: str) -> str:
136
- """Request extra information from the human user.
137
-
138
- Because the agent runs in a background thread, we cannot read from
139
- `stdin` here. Instead we send the prompt to the main thread via a
140
- queue and block until the main thread puts the user's answer in the
141
- response queue.
142
- """
143
- # Send the prompt to the main thread
144
- clarification_request_queue.put(prompt)
145
-
146
- return clarification_response_queue.get()
147
-
148
-
149
- def initialize_dspy_agent(chat_session: fastworkflow.ChatSession, max_iters: int = 25):
150
- """
151
- Configures and returns a high-level DSPy ReAct planning agent.
152
- The workflow tool agent is already integrated in the ChatSession.
153
-
154
- Args:
155
- chat_session: ChatSession instance (should be in agent mode)
156
- max_iters: Maximum iterations for the ReAct agent
157
-
158
- Raises:
159
- EnvironmentError: If LLM_AGENT is not set.
160
- """
161
- # Get available tools for documentation
162
- mcp_server = FastWorkflowMCPServer(chat_session)
163
- available_tools = mcp_server.list_tools()
164
-
165
- # WorkflowAssistant Tool - delegates to the integrated workflow tool agent
166
- _workflow_assistant_partial_func = functools.partial(
167
- _execute_workflow_command_tool_with_delegation,
168
- chat_session=chat_session
169
- )
170
- # Set the docstring for the partial object
171
- _workflow_assistant_partial_func.__doc__ = _build_assistant_tool_documentation(available_tools)
172
-
173
- workflow_assistant_instance = dspy.Tool(
174
- name="WorkflowAssistant",
175
- func=_workflow_assistant_partial_func
176
- )
177
-
178
- # AskUser Tool
179
- _ask_user_tool.__doc__ = (
180
- "Use this tool to get information from the user. "
181
- "Use it as the last resort if information is not available via any of the other tools. "
182
- "Args: prompt (str): A clear specific request with helpful context based on the information already gathered."
183
- )
184
-
185
- ask_user_instance = dspy.Tool(
186
- name="AskUser",
187
- func=_ask_user_tool
188
- )
189
-
190
- return dspy.ReAct(
191
- PlanningAgentSignature,
192
- tools=[workflow_assistant_instance, ask_user_instance],
193
- max_iters=max_iters,
194
- )