fastworkflow 2.15.5__py3-none-any.whl → 2.17.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fastworkflow/_workflows/command_metadata_extraction/_commands/ErrorCorrection/you_misunderstood.py +1 -1
- fastworkflow/_workflows/command_metadata_extraction/_commands/IntentDetection/what_can_i_do.py +16 -2
- fastworkflow/_workflows/command_metadata_extraction/_commands/wildcard.py +27 -570
- fastworkflow/_workflows/command_metadata_extraction/intent_detection.py +360 -0
- fastworkflow/_workflows/command_metadata_extraction/parameter_extraction.py +411 -0
- fastworkflow/chat_session.py +379 -206
- fastworkflow/cli.py +80 -165
- fastworkflow/command_context_model.py +73 -7
- fastworkflow/command_executor.py +14 -5
- fastworkflow/command_metadata_api.py +106 -6
- fastworkflow/examples/fastworkflow.env +2 -1
- fastworkflow/examples/fastworkflow.passwords.env +2 -1
- fastworkflow/examples/retail_workflow/_commands/exchange_delivered_order_items.py +32 -3
- fastworkflow/examples/retail_workflow/_commands/find_user_id_by_email.py +6 -5
- fastworkflow/examples/retail_workflow/_commands/modify_pending_order_items.py +32 -3
- fastworkflow/examples/retail_workflow/_commands/return_delivered_order_items.py +13 -2
- fastworkflow/examples/retail_workflow/_commands/transfer_to_human_agents.py +1 -1
- fastworkflow/intent_clarification_agent.py +131 -0
- fastworkflow/mcp_server.py +3 -3
- fastworkflow/run/__main__.py +33 -40
- fastworkflow/run_fastapi_mcp/README.md +373 -0
- fastworkflow/run_fastapi_mcp/__main__.py +1300 -0
- fastworkflow/run_fastapi_mcp/conversation_store.py +391 -0
- fastworkflow/run_fastapi_mcp/jwt_manager.py +341 -0
- fastworkflow/run_fastapi_mcp/mcp_specific.py +103 -0
- fastworkflow/run_fastapi_mcp/redoc_2_standalone_html.py +40 -0
- fastworkflow/run_fastapi_mcp/utils.py +517 -0
- fastworkflow/train/__main__.py +1 -1
- fastworkflow/utils/chat_adapter.py +99 -0
- fastworkflow/utils/python_utils.py +4 -4
- fastworkflow/utils/react.py +258 -0
- fastworkflow/utils/signatures.py +338 -139
- fastworkflow/workflow.py +1 -5
- fastworkflow/workflow_agent.py +185 -133
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/METADATA +16 -18
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/RECORD +40 -30
- fastworkflow/run_agent/__main__.py +0 -294
- fastworkflow/run_agent/agent_module.py +0 -194
- /fastworkflow/{run_agent → run_fastapi_mcp}/__init__.py +0 -0
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/LICENSE +0 -0
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/WHEEL +0 -0
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/entry_points.txt +0 -0
fastworkflow/chat_session.py
CHANGED
|
@@ -8,6 +8,8 @@ import contextlib
|
|
|
8
8
|
import uuid
|
|
9
9
|
from pathlib import Path
|
|
10
10
|
import os
|
|
11
|
+
import time
|
|
12
|
+
from datetime import datetime
|
|
11
13
|
|
|
12
14
|
import dspy
|
|
13
15
|
|
|
@@ -16,7 +18,6 @@ from fastworkflow.utils.logging import logger
|
|
|
16
18
|
from fastworkflow.utils import dspy_utils
|
|
17
19
|
from fastworkflow.model_pipeline_training import CommandRouter
|
|
18
20
|
from fastworkflow.utils.startup_progress import StartupProgress
|
|
19
|
-
from fastworkflow.command_metadata_api import CommandMetadataAPI
|
|
20
21
|
|
|
21
22
|
|
|
22
23
|
class SessionStatus(Enum):
|
|
@@ -35,7 +36,7 @@ class ChatWorker(Thread):
|
|
|
35
36
|
"""Process messages for the root workflow"""
|
|
36
37
|
try:
|
|
37
38
|
self.chat_session._status = SessionStatus.RUNNING
|
|
38
|
-
workflow =
|
|
39
|
+
workflow = self.chat_session.get_active_workflow()
|
|
39
40
|
logger.debug(f"Started root workflow {workflow.id}")
|
|
40
41
|
|
|
41
42
|
# Run the workflow loop
|
|
@@ -44,39 +45,34 @@ class ChatWorker(Thread):
|
|
|
44
45
|
finally:
|
|
45
46
|
self.chat_session._status = SessionStatus.STOPPED
|
|
46
47
|
# Ensure workflow is popped if thread terminates unexpectedly
|
|
47
|
-
if
|
|
48
|
-
|
|
48
|
+
if self.chat_session.get_active_workflow() is not None:
|
|
49
|
+
self.chat_session.pop_active_workflow()
|
|
49
50
|
|
|
50
51
|
class ChatSession:
|
|
51
|
-
|
|
52
|
-
_workflow_stack: ClassVar[deque[fastworkflow.Workflow]] = deque() # Stack of workflow objects
|
|
53
|
-
|
|
54
|
-
@classmethod
|
|
55
|
-
def get_active_workflow(cls) -> Optional[fastworkflow.Workflow]:
|
|
52
|
+
def get_active_workflow(self) -> Optional[fastworkflow.Workflow]:
|
|
56
53
|
"""Get the currently active workflow (top of stack)"""
|
|
57
|
-
with
|
|
58
|
-
return
|
|
54
|
+
with self._workflow_stack_lock:
|
|
55
|
+
return self._workflow_stack[-1] if self._workflow_stack else None
|
|
59
56
|
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
with
|
|
63
|
-
|
|
64
|
-
logger.debug(f"Workflow stack: {[w.id for w in
|
|
57
|
+
def push_active_workflow(self, workflow: fastworkflow.Workflow) -> None:
|
|
58
|
+
"""Push a workflow onto this session's stack"""
|
|
59
|
+
with self._workflow_stack_lock:
|
|
60
|
+
self._workflow_stack.append(workflow)
|
|
61
|
+
logger.debug(f"Workflow stack: {[w.id for w in self._workflow_stack]}")
|
|
65
62
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
with
|
|
69
|
-
if not
|
|
63
|
+
def pop_active_workflow(self) -> Optional[fastworkflow.Workflow]:
|
|
64
|
+
"""Pop a workflow from this session's stack"""
|
|
65
|
+
with self._workflow_stack_lock:
|
|
66
|
+
if not self._workflow_stack:
|
|
70
67
|
return None
|
|
71
|
-
workflow =
|
|
72
|
-
logger.debug(f"Workflow stack after pop: {[w.id for w in
|
|
68
|
+
workflow = self._workflow_stack.pop()
|
|
69
|
+
logger.debug(f"Workflow stack after pop: {[w.id for w in self._workflow_stack]}")
|
|
73
70
|
return workflow
|
|
74
71
|
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
cls._workflow_stack.clear()
|
|
72
|
+
def clear_workflow_stack(self) -> None:
|
|
73
|
+
"""Clear the entire workflow stack for this session"""
|
|
74
|
+
with self._workflow_stack_lock:
|
|
75
|
+
self._workflow_stack.clear()
|
|
80
76
|
logger.debug("Workflow stack cleared")
|
|
81
77
|
|
|
82
78
|
def stop_workflow(self) -> None:
|
|
@@ -95,7 +91,7 @@ class ChatSession:
|
|
|
95
91
|
logger.warning("Chat worker thread did not terminate within timeout")
|
|
96
92
|
|
|
97
93
|
# Clear the workflow stack
|
|
98
|
-
|
|
94
|
+
self.clear_workflow_stack()
|
|
99
95
|
|
|
100
96
|
# Reset status to stopped
|
|
101
97
|
self._status = SessionStatus.STOPPED
|
|
@@ -116,6 +112,10 @@ class ChatSession:
|
|
|
116
112
|
A chat session can run multiple workflows that share the same message queues.
|
|
117
113
|
Use start_workflow() to start a specific workflow within this session.
|
|
118
114
|
"""
|
|
115
|
+
# Create instance-level workflow stack (supports nested workflows within this session)
|
|
116
|
+
self._workflow_stack: deque[fastworkflow.Workflow] = deque()
|
|
117
|
+
self._workflow_stack_lock = Lock()
|
|
118
|
+
|
|
119
119
|
# Create queues for user messages and command outputs
|
|
120
120
|
self._user_message_queue = Queue()
|
|
121
121
|
self._command_output_queue = Queue()
|
|
@@ -123,7 +123,7 @@ class ChatSession:
|
|
|
123
123
|
self._status = SessionStatus.STOPPED
|
|
124
124
|
self._chat_worker = None
|
|
125
125
|
|
|
126
|
-
self._conversation_history = dspy.History(messages=[])
|
|
126
|
+
self._conversation_history: dspy.History = dspy.History(messages=[])
|
|
127
127
|
|
|
128
128
|
# Import here to avoid circular imports
|
|
129
129
|
from fastworkflow.command_executor import CommandExecutor
|
|
@@ -134,7 +134,8 @@ class ChatSession:
|
|
|
134
134
|
|
|
135
135
|
# Initialize agent-related attributes
|
|
136
136
|
self._run_as_agent = run_as_agent
|
|
137
|
-
self._workflow_tool_agent = None
|
|
137
|
+
self._workflow_tool_agent = None
|
|
138
|
+
self._intent_clarification_agent = None
|
|
138
139
|
|
|
139
140
|
# Create the command metadata extraction workflow with a unique ID
|
|
140
141
|
self._cme_workflow = fastworkflow.Workflow.create(
|
|
@@ -145,6 +146,10 @@ class ChatSession:
|
|
|
145
146
|
}
|
|
146
147
|
)
|
|
147
148
|
|
|
149
|
+
# this intializes the conversation traces file name also
|
|
150
|
+
# which is necessary when starting a brand new chat session
|
|
151
|
+
self.clear_conversation_history()
|
|
152
|
+
|
|
148
153
|
def start_workflow(self,
|
|
149
154
|
workflow_folderpath: str,
|
|
150
155
|
workflow_id_str: Optional[str] = None,
|
|
@@ -193,7 +198,7 @@ class ChatSession:
|
|
|
193
198
|
|
|
194
199
|
# Check if we need to stop the current workflow
|
|
195
200
|
# Stop if this is a new root workflow (no parent, keep_alive=True)
|
|
196
|
-
current_workflow =
|
|
201
|
+
current_workflow = self.get_active_workflow()
|
|
197
202
|
if (current_workflow and
|
|
198
203
|
parent_workflow_id is None and
|
|
199
204
|
self._keep_alive):
|
|
@@ -240,13 +245,13 @@ class ChatSession:
|
|
|
240
245
|
self._status = SessionStatus.STARTING
|
|
241
246
|
|
|
242
247
|
# Push this workflow as active
|
|
243
|
-
|
|
248
|
+
self.push_active_workflow(workflow)
|
|
244
249
|
|
|
245
250
|
# Initialize workflow tool agent if in agent mode
|
|
246
251
|
# This must happen after pushing the workflow to the stack
|
|
247
252
|
# so that get_active_workflow() returns the correct workflow
|
|
248
253
|
if self._run_as_agent:
|
|
249
|
-
self.
|
|
254
|
+
self._initialize_agent_functionality()
|
|
250
255
|
|
|
251
256
|
command_output = None
|
|
252
257
|
if self._keep_alive:
|
|
@@ -260,19 +265,32 @@ class ChatSession:
|
|
|
260
265
|
|
|
261
266
|
return command_output
|
|
262
267
|
|
|
263
|
-
def
|
|
268
|
+
def _initialize_agent_functionality(self):
|
|
264
269
|
"""
|
|
265
270
|
Initialize the workflow tool agent for agent mode.
|
|
266
271
|
This agent handles individual tool selection and execution.
|
|
267
272
|
"""
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
273
|
+
self._cme_workflow.context["run_as_agent"] = True
|
|
274
|
+
self._current_workflow.context["run_as_agent"] = True
|
|
275
|
+
|
|
276
|
+
# Initialize the workflow tool agent
|
|
277
|
+
from fastworkflow.workflow_agent import initialize_workflow_tool_agent
|
|
278
|
+
self._workflow_tool_agent = initialize_workflow_tool_agent(self)
|
|
279
|
+
|
|
280
|
+
# Initialize the intent clarification agent
|
|
281
|
+
from fastworkflow.intent_clarification_agent import initialize_intent_clarification_agent
|
|
282
|
+
self._intent_clarification_agent = initialize_intent_clarification_agent(self)
|
|
283
|
+
|
|
284
|
+
@property
|
|
285
|
+
def workflow_tool_agent(self):
|
|
286
|
+
"""Get the workflow tool agent for agent mode."""
|
|
287
|
+
return self._workflow_tool_agent
|
|
288
|
+
|
|
289
|
+
@property
|
|
290
|
+
def intent_clarification_agent(self):
|
|
291
|
+
"""Get the intent clarification agent for agent mode."""
|
|
292
|
+
return self._intent_clarification_agent
|
|
293
|
+
|
|
276
294
|
@property
|
|
277
295
|
def cme_workflow(self) -> fastworkflow.Workflow:
|
|
278
296
|
"""Get the command metadata extraction workflow."""
|
|
@@ -297,19 +315,35 @@ class ChatSession:
|
|
|
297
315
|
|
|
298
316
|
@property
|
|
299
317
|
def workflow_is_complete(self) -> bool:
|
|
300
|
-
workflow =
|
|
318
|
+
workflow = self.get_active_workflow()
|
|
301
319
|
return workflow.is_complete if workflow else True
|
|
302
320
|
|
|
303
321
|
@workflow_is_complete.setter
|
|
304
322
|
def workflow_is_complete(self, value: bool) -> None:
|
|
305
|
-
if workflow :=
|
|
323
|
+
if workflow := self.get_active_workflow():
|
|
306
324
|
workflow.is_complete = value
|
|
307
325
|
|
|
308
326
|
@property
|
|
309
327
|
def conversation_history(self) -> dspy.History:
|
|
310
328
|
"""Return the conversation history."""
|
|
311
329
|
return self._conversation_history
|
|
312
|
-
|
|
330
|
+
|
|
331
|
+
# def clear_conversation_history(self, trace_filename_suffix: Optional[str] = None) -> None:
|
|
332
|
+
def clear_conversation_history(self) -> None:
|
|
333
|
+
"""
|
|
334
|
+
Clear the conversation history.
|
|
335
|
+
This resets the conversation history to an empty state.
|
|
336
|
+
"""
|
|
337
|
+
self._conversation_history = dspy.History(messages=[])
|
|
338
|
+
# Filename for conversation traces
|
|
339
|
+
# if trace_filename_suffix:
|
|
340
|
+
# self._conversation_traces_file_name: str = (
|
|
341
|
+
# f"conversation_traces_{trace_filename_suffix}"
|
|
342
|
+
# )
|
|
343
|
+
# else:
|
|
344
|
+
# self._conversation_traces_file_name: str = (
|
|
345
|
+
# f"conversation_traces_{datetime.now().strftime('%m_%d_%Y:%H_%M_%S')}.jsonl"
|
|
346
|
+
# )
|
|
313
347
|
|
|
314
348
|
def _run_workflow_loop(self) -> Optional[fastworkflow.CommandOutput]:
|
|
315
349
|
"""
|
|
@@ -319,12 +353,12 @@ class ChatSession:
|
|
|
319
353
|
- All outputs (success or failure) are sent to queue during processing
|
|
320
354
|
"""
|
|
321
355
|
last_output = None
|
|
322
|
-
workflow =
|
|
356
|
+
workflow = self.get_active_workflow()
|
|
323
357
|
|
|
324
358
|
try:
|
|
325
359
|
# Handle startup command/action
|
|
326
360
|
if self._startup_command:
|
|
327
|
-
if self._run_as_agent:
|
|
361
|
+
if self._run_as_agent and not self._startup_command.startswith('/'):
|
|
328
362
|
# In agent mode, use workflow tool agent for processing
|
|
329
363
|
last_output = self._process_agent_message(self._startup_command)
|
|
330
364
|
else:
|
|
@@ -337,15 +371,24 @@ class ChatSession:
|
|
|
337
371
|
) and self._status != SessionStatus.STOPPING:
|
|
338
372
|
try:
|
|
339
373
|
message = self.user_message_queue.get()
|
|
340
|
-
|
|
341
|
-
#
|
|
342
|
-
if
|
|
343
|
-
|
|
344
|
-
last_output = self._process_agent_message(message)
|
|
345
|
-
elif self._is_mcp_tool_call(message):
|
|
346
|
-
last_output = self._process_mcp_tool_call(message)
|
|
374
|
+
|
|
375
|
+
# Handle Action objects directly
|
|
376
|
+
if isinstance(message, fastworkflow.Action):
|
|
377
|
+
last_output = self._process_action(message)
|
|
347
378
|
else:
|
|
348
|
-
|
|
379
|
+
if ((
|
|
380
|
+
"NLU_Pipeline_Stage" not in self._cme_workflow.context or
|
|
381
|
+
self._cme_workflow.context["NLU_Pipeline_Stage"] == fastworkflow.NLUPipelineStage.INTENT_DETECTION) and
|
|
382
|
+
message.startswith('/')
|
|
383
|
+
):
|
|
384
|
+
self._cme_workflow.context["is_assistant_mode_command"] = True
|
|
385
|
+
|
|
386
|
+
# Route based on mode and message type
|
|
387
|
+
if self._run_as_agent and "is_assistant_mode_command" not in self._cme_workflow.context:
|
|
388
|
+
# In agent mode, use workflow tool agent for processing
|
|
389
|
+
last_output = self._process_agent_message(message)
|
|
390
|
+
else:
|
|
391
|
+
last_output = self._process_message(message)
|
|
349
392
|
|
|
350
393
|
except Empty:
|
|
351
394
|
continue
|
|
@@ -356,91 +399,110 @@ class ChatSession:
|
|
|
356
399
|
|
|
357
400
|
finally:
|
|
358
401
|
self._status = SessionStatus.STOPPED
|
|
359
|
-
|
|
402
|
+
self.pop_active_workflow()
|
|
360
403
|
logger.debug(f"Workflow {workflow.id if workflow else 'unknown'} completed")
|
|
361
404
|
|
|
362
405
|
return None
|
|
363
406
|
|
|
364
|
-
def _is_mcp_tool_call(self, message: str) -> bool:
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
407
|
+
# def _is_mcp_tool_call(self, message: str) -> bool:
|
|
408
|
+
# """Detect if message is an MCP tool call JSON"""
|
|
409
|
+
# try:
|
|
410
|
+
# data = json.loads(message)
|
|
411
|
+
# return data.get("type") == "mcp_tool_call"
|
|
412
|
+
# except (json.JSONDecodeError, AttributeError):
|
|
413
|
+
# return False
|
|
371
414
|
|
|
372
|
-
def _process_mcp_tool_call(self, message: str) -> fastworkflow.CommandOutput:
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
415
|
+
# def _process_mcp_tool_call(self, message: str) -> fastworkflow.CommandOutput:
|
|
416
|
+
# # sourcery skip: class-extract-method, extract-method
|
|
417
|
+
# """Process an MCP tool call message"""
|
|
418
|
+
# workflow = self.get_active_workflow()
|
|
376
419
|
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
420
|
+
# try:
|
|
421
|
+
# # Parse JSON message
|
|
422
|
+
# data = json.loads(message)
|
|
423
|
+
# tool_call_data = data["tool_call"]
|
|
381
424
|
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
425
|
+
# # Create MCPToolCall object
|
|
426
|
+
# tool_call = fastworkflow.MCPToolCall(
|
|
427
|
+
# name=tool_call_data["name"],
|
|
428
|
+
# arguments=tool_call_data["arguments"]
|
|
429
|
+
# )
|
|
387
430
|
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
431
|
+
# # Execute via command executor
|
|
432
|
+
# mcp_result = self._CommandExecutor.perform_mcp_tool_call(
|
|
433
|
+
# workflow,
|
|
434
|
+
# tool_call,
|
|
435
|
+
# command_context=workflow.current_command_context_name
|
|
436
|
+
# )
|
|
394
437
|
|
|
395
|
-
|
|
396
|
-
|
|
438
|
+
# # Convert MCPToolResult back to CommandOutput for consistency
|
|
439
|
+
# command_output = self._convert_mcp_result_to_command_output(mcp_result)
|
|
397
440
|
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
441
|
+
# # Put in output queue if needed
|
|
442
|
+
# if (not command_output.success or self._keep_alive) and self.command_output_queue:
|
|
443
|
+
# self.command_output_queue.put(command_output)
|
|
401
444
|
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
445
|
+
# # Flush on successful or failed tool call – state may have changed.
|
|
446
|
+
# if workflow := self.get_active_workflow():
|
|
447
|
+
# workflow.flush()
|
|
405
448
|
|
|
406
|
-
|
|
449
|
+
# return command_output
|
|
407
450
|
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
451
|
+
# except Exception as e:
|
|
452
|
+
# logger.error(f"Error processing MCP tool call: {e}. Tool call content: {message}")
|
|
453
|
+
# return self._process_message(message) # process as a message
|
|
411
454
|
|
|
412
|
-
def _convert_mcp_result_to_command_output(self, mcp_result: fastworkflow.MCPToolResult) -> fastworkflow.CommandOutput:
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
455
|
+
# def _convert_mcp_result_to_command_output(self, mcp_result: fastworkflow.MCPToolResult) -> fastworkflow.CommandOutput:
|
|
456
|
+
# """Convert MCPToolResult to CommandOutput for compatibility"""
|
|
457
|
+
# command_response = fastworkflow.CommandResponse(
|
|
458
|
+
# response=mcp_result.content[0].text if mcp_result.content else "No response",
|
|
459
|
+
# success=not mcp_result.isError
|
|
460
|
+
# )
|
|
418
461
|
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
462
|
+
# command_output = fastworkflow.CommandOutput(command_responses=[command_response])
|
|
463
|
+
# command_output._mcp_source = mcp_result # Mark for special formatting
|
|
464
|
+
# return command_output
|
|
422
465
|
|
|
423
466
|
def _process_agent_message(self, message: str) -> fastworkflow.CommandOutput:
|
|
467
|
+
# sourcery skip: class-extract-method
|
|
424
468
|
"""Process a message in agent mode using workflow tool agent"""
|
|
425
469
|
# The agent processes the user's message and may make multiple tool calls
|
|
426
470
|
# to the workflow internally (directly via CommandExecutor)
|
|
427
471
|
|
|
428
472
|
# Ensure any prior action log is removed before a fresh agent run
|
|
429
|
-
if os.path.exists("action.
|
|
430
|
-
os.remove("action.
|
|
473
|
+
if os.path.exists("action.jsonl"):
|
|
474
|
+
os.remove("action.jsonl")
|
|
431
475
|
|
|
432
|
-
|
|
476
|
+
refined_user_query = self._refine_user_query(message, self.conversation_history)
|
|
477
|
+
|
|
478
|
+
from fastworkflow.workflow_agent import build_query_with_next_steps
|
|
479
|
+
command_info_and_refined_message_with_todolist = build_query_with_next_steps(
|
|
480
|
+
refined_user_query,
|
|
481
|
+
self
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
# Get available commands for current context and pass to agent.
|
|
485
|
+
# The CommandsSystemPreludeAdapter will inject these commands into the system
|
|
486
|
+
# message, keeping them out of the trajectory to avoid token bloat while still
|
|
487
|
+
# providing context-specific command info.
|
|
488
|
+
from fastworkflow.workflow_agent import _what_can_i_do
|
|
489
|
+
available_commands = _what_can_i_do(self)
|
|
433
490
|
|
|
434
491
|
lm = dspy_utils.get_lm("LLM_AGENT", "LITELLM_API_KEY_AGENT")
|
|
435
492
|
from dspy.utils.exceptions import AdapterParseError
|
|
493
|
+
from fastworkflow.utils.chat_adapter import CommandsSystemPreludeAdapter
|
|
494
|
+
|
|
495
|
+
# Use CommandsSystemPreludeAdapter specifically for workflow agent calls
|
|
496
|
+
agent_adapter = CommandsSystemPreludeAdapter()
|
|
497
|
+
|
|
436
498
|
# Retry logic for AdapterParseError
|
|
437
499
|
max_retries = 2
|
|
438
500
|
for attempt in range(max_retries):
|
|
439
501
|
try:
|
|
440
|
-
with dspy.context(lm=lm, adapter=
|
|
502
|
+
with dspy.context(lm=lm, adapter=agent_adapter):
|
|
441
503
|
agent_result = self._workflow_tool_agent(
|
|
442
|
-
user_query=
|
|
443
|
-
|
|
504
|
+
user_query=command_info_and_refined_message_with_todolist,
|
|
505
|
+
available_commands=available_commands
|
|
444
506
|
)
|
|
445
507
|
break # Success, exit retry loop
|
|
446
508
|
except AdapterParseError as _:
|
|
@@ -460,33 +522,232 @@ class ChatSession:
|
|
|
460
522
|
# Create CommandOutput with the agent's response
|
|
461
523
|
command_response = fastworkflow.CommandResponse(response=result_text)
|
|
462
524
|
|
|
463
|
-
|
|
525
|
+
conversation_traces = None
|
|
526
|
+
conversation_summary = message
|
|
464
527
|
# Attach actions captured during agent execution as artifacts if available
|
|
465
|
-
if os.path.exists("action.
|
|
466
|
-
with open("action.
|
|
528
|
+
if os.path.exists("action.jsonl"):
|
|
529
|
+
with open("action.jsonl", "r", encoding="utf-8") as f:
|
|
467
530
|
actions = [json.loads(line) for line in f if line.strip()]
|
|
468
|
-
|
|
469
|
-
command_response.artifacts["
|
|
531
|
+
conversation_summary, conversation_traces = self._extract_conversation_summary(message, actions, result_text)
|
|
532
|
+
command_response.artifacts["conversation_summary"] = conversation_summary
|
|
470
533
|
|
|
471
534
|
self.conversation_history.messages.append(
|
|
472
|
-
{
|
|
473
|
-
|
|
535
|
+
{
|
|
536
|
+
"conversation summary": conversation_summary,
|
|
537
|
+
"conversation_traces": conversation_traces,
|
|
538
|
+
"feedback": None # Initialize feedback slot for this turn
|
|
539
|
+
}
|
|
474
540
|
)
|
|
475
541
|
|
|
476
542
|
command_output = fastworkflow.CommandOutput(
|
|
477
543
|
command_responses=[command_response]
|
|
478
544
|
)
|
|
545
|
+
command_output.workflow_name = self._current_workflow.folderpath.split('/')[-1]
|
|
479
546
|
|
|
480
547
|
# Put output in queue (following same pattern as _process_message)
|
|
481
548
|
if (not command_output.success or self._keep_alive) and \
|
|
482
|
-
|
|
549
|
+
self.command_output_queue:
|
|
483
550
|
self.command_output_queue.put(command_output)
|
|
484
551
|
|
|
485
552
|
# Persist workflow state changes
|
|
486
|
-
if workflow :=
|
|
553
|
+
if workflow := self.get_active_workflow():
|
|
554
|
+
workflow.flush()
|
|
555
|
+
|
|
556
|
+
return command_output
|
|
557
|
+
|
|
558
|
+
def _process_message(self, message: str) -> fastworkflow.CommandOutput:
|
|
559
|
+
"""Process a single message"""
|
|
560
|
+
# Pre-execution trace
|
|
561
|
+
if self.command_trace_queue:
|
|
562
|
+
self.command_trace_queue.put(fastworkflow.CommandTraceEvent(
|
|
563
|
+
direction=fastworkflow.CommandTraceEventDirection.AGENT_TO_WORKFLOW,
|
|
564
|
+
raw_command=message,
|
|
565
|
+
command_name=None,
|
|
566
|
+
parameters=None,
|
|
567
|
+
response_text=None,
|
|
568
|
+
success=None,
|
|
569
|
+
timestamp_ms=int(time.time() * 1000),
|
|
570
|
+
))
|
|
571
|
+
|
|
572
|
+
# Execute command
|
|
573
|
+
command_output = self._CommandExecutor.invoke_command(self, message)
|
|
574
|
+
|
|
575
|
+
# Extract response text and parameters for traces
|
|
576
|
+
response_text = ""
|
|
577
|
+
if command_output.command_responses:
|
|
578
|
+
response_text = command_output.command_responses[0].response or ""
|
|
579
|
+
|
|
580
|
+
# Convert parameters to dict if it's a Pydantic model or other complex object
|
|
581
|
+
params = command_output.command_parameters or {}
|
|
582
|
+
if hasattr(params, 'model_dump'):
|
|
583
|
+
params_dict = params.model_dump()
|
|
584
|
+
elif hasattr(params, 'dict'):
|
|
585
|
+
params_dict = params.dict()
|
|
586
|
+
else:
|
|
587
|
+
params_dict = params
|
|
588
|
+
|
|
589
|
+
# Post-execution trace
|
|
590
|
+
if self.command_trace_queue:
|
|
591
|
+
self.command_trace_queue.put(fastworkflow.CommandTraceEvent(
|
|
592
|
+
direction=fastworkflow.CommandTraceEventDirection.WORKFLOW_TO_AGENT,
|
|
593
|
+
raw_command=None,
|
|
594
|
+
command_name=command_output.command_name or "",
|
|
595
|
+
parameters=params_dict,
|
|
596
|
+
response_text=response_text,
|
|
597
|
+
success=bool(command_output.success),
|
|
598
|
+
timestamp_ms=int(time.time() * 1000),
|
|
599
|
+
))
|
|
600
|
+
|
|
601
|
+
# Record assistant mode trace to action.jsonl (similar to agent mode in workflow_agent.py)
|
|
602
|
+
# This ensures assistant commands are captured even when interspersed with agent commands
|
|
603
|
+
record = {
|
|
604
|
+
"command": message,
|
|
605
|
+
"command_name": command_output.command_name or "",
|
|
606
|
+
"parameters": params_dict,
|
|
607
|
+
"response": response_text
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
self.conversation_history.messages.append(
|
|
611
|
+
{
|
|
612
|
+
"conversation summary": "assistant_mode_command",
|
|
613
|
+
"conversation_traces": json.dumps(record),
|
|
614
|
+
"feedback": None # Initialize feedback slot for this turn
|
|
615
|
+
}
|
|
616
|
+
)
|
|
617
|
+
|
|
618
|
+
if (not command_output.success or self._keep_alive) and \
|
|
619
|
+
self.command_output_queue:
|
|
620
|
+
self.command_output_queue.put(command_output)
|
|
621
|
+
|
|
622
|
+
# Persist workflow state changes lazily accumulated during message processing.
|
|
623
|
+
if workflow := self.get_active_workflow():
|
|
624
|
+
workflow.flush()
|
|
625
|
+
|
|
626
|
+
return command_output
|
|
627
|
+
|
|
628
|
+
def _process_action(self, action: fastworkflow.Action) -> fastworkflow.CommandOutput:
|
|
629
|
+
"""Process a startup action"""
|
|
630
|
+
workflow = self.get_active_workflow()
|
|
631
|
+
|
|
632
|
+
# Serialize action parameters for trace
|
|
633
|
+
params = action.parameters or {}
|
|
634
|
+
if hasattr(params, 'model_dump'):
|
|
635
|
+
params_dict = params.model_dump()
|
|
636
|
+
elif hasattr(params, 'dict'):
|
|
637
|
+
params_dict = params.dict()
|
|
638
|
+
else:
|
|
639
|
+
params_dict = params
|
|
640
|
+
|
|
641
|
+
# Pre-execution trace: serialize action as raw_command
|
|
642
|
+
raw_command = f"{action.command_name} {json.dumps(params_dict)}"
|
|
643
|
+
if self.command_trace_queue:
|
|
644
|
+
self.command_trace_queue.put(fastworkflow.CommandTraceEvent(
|
|
645
|
+
direction=fastworkflow.CommandTraceEventDirection.AGENT_TO_WORKFLOW,
|
|
646
|
+
raw_command=raw_command,
|
|
647
|
+
command_name=None,
|
|
648
|
+
parameters=None,
|
|
649
|
+
response_text=None,
|
|
650
|
+
success=None,
|
|
651
|
+
timestamp_ms=int(time.time() * 1000),
|
|
652
|
+
))
|
|
653
|
+
|
|
654
|
+
# Execute the action
|
|
655
|
+
command_output = self._CommandExecutor.perform_action(workflow, action)
|
|
656
|
+
|
|
657
|
+
# Extract response text for post-execution trace
|
|
658
|
+
response_text = ""
|
|
659
|
+
if command_output.command_responses:
|
|
660
|
+
response_text = command_output.command_responses[0].response or ""
|
|
661
|
+
|
|
662
|
+
# Post-execution trace
|
|
663
|
+
if self.command_trace_queue:
|
|
664
|
+
self.command_trace_queue.put(fastworkflow.CommandTraceEvent(
|
|
665
|
+
direction=fastworkflow.CommandTraceEventDirection.WORKFLOW_TO_AGENT,
|
|
666
|
+
raw_command=None,
|
|
667
|
+
command_name=command_output.command_name,
|
|
668
|
+
parameters=params_dict,
|
|
669
|
+
response_text=response_text,
|
|
670
|
+
success=bool(command_output.success),
|
|
671
|
+
timestamp_ms=int(time.time() * 1000),
|
|
672
|
+
))
|
|
673
|
+
|
|
674
|
+
# Record action trace to action.jsonl
|
|
675
|
+
record = {
|
|
676
|
+
"command": "process_action",
|
|
677
|
+
"command_name": action.command_name,
|
|
678
|
+
"parameters": params_dict,
|
|
679
|
+
"response": response_text
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
self.conversation_history.messages.append(
|
|
683
|
+
{
|
|
684
|
+
"conversation summary": "process_action command",
|
|
685
|
+
"conversation_traces": json.dumps(record),
|
|
686
|
+
"feedback": None # Initialize feedback slot for this turn
|
|
687
|
+
}
|
|
688
|
+
)
|
|
689
|
+
|
|
690
|
+
if (not command_output.success or self._keep_alive) and \
|
|
691
|
+
self.command_output_queue:
|
|
692
|
+
self.command_output_queue.put(command_output)
|
|
693
|
+
|
|
694
|
+
# Flush any pending workflow updates triggered by this startup action.
|
|
695
|
+
if workflow:
|
|
487
696
|
workflow.flush()
|
|
488
697
|
|
|
489
698
|
return command_output
|
|
699
|
+
|
|
700
|
+
def _refine_user_query(self, user_query: str, conversation_history: dspy.History) -> str:
|
|
701
|
+
"""
|
|
702
|
+
Refine user query using conversation history.
|
|
703
|
+
Return the refined user query
|
|
704
|
+
"""
|
|
705
|
+
if conversation_history.messages:
|
|
706
|
+
messages = []
|
|
707
|
+
for conv_dict in conversation_history.messages[-5:]:
|
|
708
|
+
messages.extend([
|
|
709
|
+
f'{k}: {v}' for k, v in conv_dict.items()
|
|
710
|
+
])
|
|
711
|
+
messages.append(f'new_user_query: {user_query}')
|
|
712
|
+
return '\n'.join(messages)
|
|
713
|
+
|
|
714
|
+
return user_query
|
|
715
|
+
|
|
716
|
+
def _extract_conversation_summary(self,
|
|
717
|
+
user_query: str, workflow_actions: list[dict[str, str]], final_agent_response: str) -> str:
|
|
718
|
+
"""
|
|
719
|
+
Summarizes conversation based on original user query, workflow actions and agent response.
|
|
720
|
+
Returns the conversation summary and the log entry
|
|
721
|
+
"""
|
|
722
|
+
# Lets log everything to a file called action_log.jsonl, if it exists
|
|
723
|
+
conversation_traces = {
|
|
724
|
+
"user_query": user_query,
|
|
725
|
+
"agent_workflow_interactions": workflow_actions,
|
|
726
|
+
"final_agent_response": final_agent_response
|
|
727
|
+
}
|
|
728
|
+
# with open(self._conversation_traces_file_name, "a", encoding="utf-8") as f:
|
|
729
|
+
# f.write(json.dumps(log_entry) + "\n")
|
|
730
|
+
|
|
731
|
+
class ConversationSummarySignature(dspy.Signature):
|
|
732
|
+
"""
|
|
733
|
+
A summary of conversation
|
|
734
|
+
Omit descriptions of action sequences
|
|
735
|
+
Capture relevant facts and parameter values from user query, workflow actions and agent response
|
|
736
|
+
"""
|
|
737
|
+
user_query: str = dspy.InputField()
|
|
738
|
+
workflow_actions: list[dict[str, str]] = dspy.InputField()
|
|
739
|
+
final_agent_response: str = dspy.InputField()
|
|
740
|
+
conversation_summary: str = dspy.OutputField(desc="A multiline paragraph summary")
|
|
741
|
+
|
|
742
|
+
planner_lm = dspy_utils.get_lm("LLM_PLANNER", "LITELLM_API_KEY_PLANNER")
|
|
743
|
+
with dspy.context(lm=planner_lm):
|
|
744
|
+
cs_func = dspy.ChainOfThought(ConversationSummarySignature)
|
|
745
|
+
prediction = cs_func(
|
|
746
|
+
user_query=user_query,
|
|
747
|
+
workflow_actions=workflow_actions,
|
|
748
|
+
final_agent_response=final_agent_response)
|
|
749
|
+
return prediction.conversation_summary, json.dumps(conversation_traces)
|
|
750
|
+
|
|
490
751
|
|
|
491
752
|
def profile_invoke_command(self, message: str):
|
|
492
753
|
"""
|
|
@@ -576,91 +837,3 @@ class ChatSession:
|
|
|
576
837
|
print(f"Detailed report saved to {os.path.abspath(report_file)}")
|
|
577
838
|
|
|
578
839
|
return result
|
|
579
|
-
|
|
580
|
-
def _process_message(self, message: str) -> fastworkflow.CommandOutput:
|
|
581
|
-
"""Process a single message"""
|
|
582
|
-
# Use our specialized profiling method
|
|
583
|
-
# command_output = self.profile_invoke_command(message)
|
|
584
|
-
|
|
585
|
-
command_output = self._CommandExecutor.invoke_command(self, message)
|
|
586
|
-
if (not command_output.success or self._keep_alive) and \
|
|
587
|
-
self.command_output_queue:
|
|
588
|
-
self.command_output_queue.put(command_output)
|
|
589
|
-
|
|
590
|
-
# Persist workflow state changes lazily accumulated during message processing.
|
|
591
|
-
if workflow := ChatSession.get_active_workflow():
|
|
592
|
-
workflow.flush()
|
|
593
|
-
|
|
594
|
-
return command_output
|
|
595
|
-
|
|
596
|
-
def _process_action(self, action: fastworkflow.Action) -> fastworkflow.CommandOutput:
|
|
597
|
-
"""Process a startup action"""
|
|
598
|
-
workflow = ChatSession.get_active_workflow()
|
|
599
|
-
command_output = self._CommandExecutor.perform_action(workflow, action)
|
|
600
|
-
if (not command_output.success or self._keep_alive) and \
|
|
601
|
-
self.command_output_queue:
|
|
602
|
-
self.command_output_queue.put(command_output)
|
|
603
|
-
|
|
604
|
-
# Flush any pending workflow updates triggered by this startup action.
|
|
605
|
-
if workflow:
|
|
606
|
-
workflow.flush()
|
|
607
|
-
|
|
608
|
-
return command_output
|
|
609
|
-
|
|
610
|
-
def _think_and_plan(self, user_query: str, conversation_history: dspy.History) -> str:
|
|
611
|
-
"""
|
|
612
|
-
Returns a refined plan by breaking down a user_query into simpler tasks.
|
|
613
|
-
"""
|
|
614
|
-
class TaskPlannerSignature(dspy.Signature):
|
|
615
|
-
"""
|
|
616
|
-
Break down a user_query into simpler tasks based only on available commands and conversation_history.
|
|
617
|
-
If user_query is simple, return a single todo that is the user_query as-is
|
|
618
|
-
"""
|
|
619
|
-
user_query: str = dspy.InputField()
|
|
620
|
-
conversation_history: dspy.History = dspy.InputField()
|
|
621
|
-
available_commands: list[str] = dspy.InputField()
|
|
622
|
-
todo_list: list[str] = dspy.OutputField(desc="task descriptions as short sentences")
|
|
623
|
-
|
|
624
|
-
current_workflow = ChatSession.get_active_workflow()
|
|
625
|
-
available_commands = CommandMetadataAPI.get_command_display_text(
|
|
626
|
-
subject_workflow_path=current_workflow.folderpath,
|
|
627
|
-
cme_workflow_path=fastworkflow.get_internal_workflow_path("command_metadata_extraction"),
|
|
628
|
-
active_context_name=current_workflow.current_command_context_name,
|
|
629
|
-
)
|
|
630
|
-
|
|
631
|
-
planner_lm = dspy_utils.get_lm("LLM_PLANNER", "LITELLM_API_KEY_PLANNER")
|
|
632
|
-
with dspy.context(lm=planner_lm):
|
|
633
|
-
task_planner_func = dspy.ChainOfThought(TaskPlannerSignature)
|
|
634
|
-
prediction = task_planner_func(
|
|
635
|
-
user_query=user_query,
|
|
636
|
-
conversation_history=conversation_history,
|
|
637
|
-
available_commands=available_commands)
|
|
638
|
-
|
|
639
|
-
if not prediction.todo_list or (len(prediction.todo_list) == 1 and prediction.todo_list[0] == user_query):
|
|
640
|
-
return user_query
|
|
641
|
-
|
|
642
|
-
steps_list = '\n'.join([f'{i + 1}. {task}' for i, task in enumerate(prediction.todo_list)])
|
|
643
|
-
return f"{user_query}\nNext steps:\n{steps_list}"
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
def _extract_user_instructions(self,
|
|
647
|
-
user_query: str, workflow_actions: list[dict[str, str]]) -> str:
|
|
648
|
-
"""
|
|
649
|
-
Summarizes user instructions based on original user query and subsequent user feedback in workflow actions.
|
|
650
|
-
"""
|
|
651
|
-
class UserInstructionCompilerSignature(dspy.Signature):
|
|
652
|
-
"""
|
|
653
|
-
Concise summary of user instructions based on their commands to the workflow.
|
|
654
|
-
Include parameter values passed in commands in the summary.
|
|
655
|
-
"""
|
|
656
|
-
commands_list: list[str] = dspy.InputField()
|
|
657
|
-
user_instructions_summary: str = dspy.OutputField(desc="A single paragraph summary")
|
|
658
|
-
|
|
659
|
-
commands_list: list[str] = [user_query]
|
|
660
|
-
commands_list.extend([wf_action['command'] for wf_action in workflow_actions if 'command' in wf_action])
|
|
661
|
-
|
|
662
|
-
planner_lm = dspy_utils.get_lm("LLM_PLANNER", "LITELLM_API_KEY_PLANNER")
|
|
663
|
-
with dspy.context(lm=planner_lm):
|
|
664
|
-
uic_func = dspy.ChainOfThought(UserInstructionCompilerSignature)
|
|
665
|
-
prediction = uic_func(commands_list=commands_list)
|
|
666
|
-
return prediction.user_instructions_summary
|