fastworkflow 2.15.6__py3-none-any.whl → 2.15.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fastworkflow/_workflows/command_metadata_extraction/_commands/ErrorCorrection/you_misunderstood.py +1 -1
- fastworkflow/_workflows/command_metadata_extraction/_commands/wildcard.py +85 -10
- fastworkflow/chat_session.py +158 -160
- fastworkflow/cli.py +4 -173
- fastworkflow/examples/fastworkflow.env +1 -1
- fastworkflow/examples/retail_workflow/_commands/exchange_delivered_order_items.py +32 -3
- fastworkflow/examples/retail_workflow/_commands/modify_pending_order_items.py +32 -3
- fastworkflow/examples/retail_workflow/_commands/return_delivered_order_items.py +13 -2
- fastworkflow/examples/retail_workflow/_commands/transfer_to_human_agents.py +1 -1
- fastworkflow/run/__main__.py +32 -33
- fastworkflow/utils/react.py +242 -0
- fastworkflow/utils/signatures.py +23 -19
- fastworkflow/workflow_agent.py +135 -122
- {fastworkflow-2.15.6.dist-info → fastworkflow-2.15.8.dist-info}/METADATA +5 -14
- {fastworkflow-2.15.6.dist-info → fastworkflow-2.15.8.dist-info}/RECORD +18 -17
- {fastworkflow-2.15.6.dist-info → fastworkflow-2.15.8.dist-info}/LICENSE +0 -0
- {fastworkflow-2.15.6.dist-info → fastworkflow-2.15.8.dist-info}/WHEEL +0 -0
- {fastworkflow-2.15.6.dist-info → fastworkflow-2.15.8.dist-info}/entry_points.txt +0 -0
|
@@ -4,6 +4,8 @@ import sys
|
|
|
4
4
|
from typing import Dict, List, Optional, Type, Union
|
|
5
5
|
import json
|
|
6
6
|
import os
|
|
7
|
+
from collections import Counter
|
|
8
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
7
9
|
|
|
8
10
|
from pydantic import BaseModel
|
|
9
11
|
from pydantic_core import PydanticUndefined
|
|
@@ -38,6 +40,67 @@ INVALID = fastworkflow.get_env_var("INVALID")
|
|
|
38
40
|
PARAMETER_EXTRACTION_ERROR_MSG = None
|
|
39
41
|
|
|
40
42
|
|
|
43
|
+
# TODO - generation is deterministic. They all return the same answer
|
|
44
|
+
# TODO - Need 'temperature' for intent detection pipeline
|
|
45
|
+
def majority_vote_predictions(command_router, command: str, n_predictions: int = 5) -> list[str]:
|
|
46
|
+
"""
|
|
47
|
+
Generate N prediction sets in parallel and return the set that wins the majority vote.
|
|
48
|
+
|
|
49
|
+
This function improves prediction reliability by running multiple parallel predictions
|
|
50
|
+
and selecting the most common result through majority voting. This helps reduce
|
|
51
|
+
the impact of random variations in model predictions.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
command_router: The CommandRouter instance to use for predictions
|
|
55
|
+
command: The input command string
|
|
56
|
+
n_predictions: Number of parallel predictions to generate (default: 5)
|
|
57
|
+
Can be configured via N_PARALLEL_PREDICTIONS environment variable
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
The prediction set that received the majority vote. Falls back to a single
|
|
61
|
+
prediction if all parallel predictions fail.
|
|
62
|
+
|
|
63
|
+
Note:
|
|
64
|
+
Uses ThreadPoolExecutor with max_workers limited to min(n_predictions, 10)
|
|
65
|
+
to avoid overwhelming the system with too many concurrent threads.
|
|
66
|
+
"""
|
|
67
|
+
def get_single_prediction():
|
|
68
|
+
"""Helper function to get a single prediction"""
|
|
69
|
+
return command_router.predict(command)
|
|
70
|
+
|
|
71
|
+
# Generate N predictions in parallel
|
|
72
|
+
prediction_sets = []
|
|
73
|
+
with ThreadPoolExecutor(max_workers=min(n_predictions, 10)) as executor:
|
|
74
|
+
# Submit all prediction tasks
|
|
75
|
+
futures = [executor.submit(get_single_prediction) for _ in range(n_predictions)]
|
|
76
|
+
|
|
77
|
+
# Collect results as they complete
|
|
78
|
+
for future in as_completed(futures):
|
|
79
|
+
try:
|
|
80
|
+
prediction_set = future.result()
|
|
81
|
+
prediction_sets.append(prediction_set)
|
|
82
|
+
except Exception as e:
|
|
83
|
+
logger.warning(f"Prediction failed: {e}")
|
|
84
|
+
# Continue with other predictions even if one fails
|
|
85
|
+
|
|
86
|
+
if not prediction_sets:
|
|
87
|
+
# Fallback to single prediction if all parallel predictions failed
|
|
88
|
+
logger.warning("All parallel predictions failed, falling back to single prediction")
|
|
89
|
+
return command_router.predict(command)
|
|
90
|
+
|
|
91
|
+
# Convert lists to tuples so they can be hashed and counted
|
|
92
|
+
prediction_tuples = [tuple(sorted(pred_set)) for pred_set in prediction_sets]
|
|
93
|
+
|
|
94
|
+
# Count occurrences of each unique prediction set
|
|
95
|
+
vote_counts = Counter(prediction_tuples)
|
|
96
|
+
|
|
97
|
+
# Get the prediction set with the most votes
|
|
98
|
+
winning_tuple = vote_counts.most_common(1)[0][0]
|
|
99
|
+
|
|
100
|
+
# Convert back to list and return
|
|
101
|
+
return list(winning_tuple)
|
|
102
|
+
|
|
103
|
+
|
|
41
104
|
class CommandNamePrediction:
|
|
42
105
|
class Output(BaseModel):
|
|
43
106
|
command_name: Optional[str] = None
|
|
@@ -140,12 +203,15 @@ class CommandNamePrediction:
|
|
|
140
203
|
command_name = cache_result
|
|
141
204
|
else:
|
|
142
205
|
predictions=command_router.predict(command)
|
|
206
|
+
# predictions = majority_vote_predictions(command_router, command)
|
|
143
207
|
|
|
144
208
|
if len(predictions)==1:
|
|
145
209
|
command_name = predictions[0].split('/')[-1]
|
|
146
210
|
else:
|
|
147
211
|
# If confidence is low, treat as ambiguous command (type 1)
|
|
148
|
-
error_msg = self._formulate_ambiguous_command_error_message(
|
|
212
|
+
error_msg = self._formulate_ambiguous_command_error_message(
|
|
213
|
+
predictions, "run_as_agent" in self.app_workflow.context)
|
|
214
|
+
|
|
149
215
|
# Store suggested commands
|
|
150
216
|
self._store_suggested_commands(self.path, predictions, 1)
|
|
151
217
|
return CommandNamePrediction.Output(error_msg=error_msg)
|
|
@@ -296,7 +362,8 @@ class CommandNamePrediction:
|
|
|
296
362
|
db.close()
|
|
297
363
|
|
|
298
364
|
@staticmethod
|
|
299
|
-
def _formulate_ambiguous_command_error_message(
|
|
365
|
+
def _formulate_ambiguous_command_error_message(
|
|
366
|
+
route_choice_list: list[str], run_as_agent: bool) -> str:
|
|
300
367
|
command_list = (
|
|
301
368
|
"\n".join([
|
|
302
369
|
f"{route_choice.split('/')[-1].lower()}"
|
|
@@ -305,10 +372,14 @@ class CommandNamePrediction:
|
|
|
305
372
|
)
|
|
306
373
|
|
|
307
374
|
return (
|
|
308
|
-
"The command is ambiguous.
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
375
|
+
"The command is ambiguous. "
|
|
376
|
+
+ (
|
|
377
|
+
"Choose the correct command name from these possible options and update your command:\n"
|
|
378
|
+
if run_as_agent
|
|
379
|
+
else "Please choose a command name from these possible options:\n"
|
|
380
|
+
)
|
|
381
|
+
+ f"{command_list}\n\nor type 'what can i do' to see all commands\n"
|
|
382
|
+
+ ("or type 'abort' to cancel" if run_as_agent else '')
|
|
312
383
|
)
|
|
313
384
|
|
|
314
385
|
class ParameterExtraction:
|
|
@@ -376,8 +447,11 @@ class ParameterExtraction:
|
|
|
376
447
|
if params_str := self._format_parameters_for_display(merged_params):
|
|
377
448
|
error_msg = f"Extracted parameters so far:\n{params_str}\n\n{error_msg}"
|
|
378
449
|
|
|
379
|
-
|
|
380
|
-
|
|
450
|
+
if "run_as_agent" not in self.app_workflow.context:
|
|
451
|
+
error_msg += "\nEnter 'abort' to get out of this error state and/or execute a different command."
|
|
452
|
+
error_msg += "\nEnter 'you misunderstood' if the wrong command was executed."
|
|
453
|
+
else:
|
|
454
|
+
error_msg += "\nCheck your command name if the wrong command was executed."
|
|
381
455
|
return self.Output(
|
|
382
456
|
parameters_are_valid=False,
|
|
383
457
|
error_msg=error_msg,
|
|
@@ -626,6 +700,7 @@ class ResponseGenerator:
|
|
|
626
700
|
workflow_context = workflow.context
|
|
627
701
|
if cnp_output.command_name == 'ErrorCorrection/you_misunderstood':
|
|
628
702
|
workflow_context["NLU_Pipeline_Stage"] = NLUPipelineStage.INTENT_MISUNDERSTANDING_CLARIFICATION
|
|
703
|
+
workflow_context["command"] = command
|
|
629
704
|
else:
|
|
630
705
|
workflow.end_command_processing()
|
|
631
706
|
workflow.context = workflow_context
|
|
@@ -664,6 +739,7 @@ class ResponseGenerator:
|
|
|
664
739
|
workflow_context = workflow.context
|
|
665
740
|
workflow_context["NLU_Pipeline_Stage"] = \
|
|
666
741
|
NLUPipelineStage.INTENT_MISUNDERSTANDING_CLARIFICATION
|
|
742
|
+
workflow_context["command"] = command
|
|
667
743
|
workflow.context = workflow_context
|
|
668
744
|
|
|
669
745
|
startup_action = Action(
|
|
@@ -686,10 +762,9 @@ class ResponseGenerator:
|
|
|
686
762
|
# move to the parameter extraction stage
|
|
687
763
|
workflow_context = workflow.context
|
|
688
764
|
workflow_context["NLU_Pipeline_Stage"] = NLUPipelineStage.PARAMETER_EXTRACTION
|
|
689
|
-
workflow_context["command_name"] = cnp_output.command_name
|
|
690
765
|
workflow.context = workflow_context
|
|
691
766
|
|
|
692
|
-
command_name =
|
|
767
|
+
command_name = cnp_output.command_name
|
|
693
768
|
extractor = ParameterExtraction(workflow, app_workflow, command_name, command)
|
|
694
769
|
pe_output = extractor.extract()
|
|
695
770
|
if not pe_output.parameters_are_valid:
|
fastworkflow/chat_session.py
CHANGED
|
@@ -16,7 +16,6 @@ from fastworkflow.utils.logging import logger
|
|
|
16
16
|
from fastworkflow.utils import dspy_utils
|
|
17
17
|
from fastworkflow.model_pipeline_training import CommandRouter
|
|
18
18
|
from fastworkflow.utils.startup_progress import StartupProgress
|
|
19
|
-
from fastworkflow.command_metadata_api import CommandMetadataAPI
|
|
20
19
|
|
|
21
20
|
|
|
22
21
|
class SessionStatus(Enum):
|
|
@@ -123,7 +122,7 @@ class ChatSession:
|
|
|
123
122
|
self._status = SessionStatus.STOPPED
|
|
124
123
|
self._chat_worker = None
|
|
125
124
|
|
|
126
|
-
self._conversation_history = dspy.History(messages=[])
|
|
125
|
+
self._conversation_history: dspy.History = dspy.History(messages=[])
|
|
127
126
|
|
|
128
127
|
# Import here to avoid circular imports
|
|
129
128
|
from fastworkflow.command_executor import CommandExecutor
|
|
@@ -246,7 +245,7 @@ class ChatSession:
|
|
|
246
245
|
# This must happen after pushing the workflow to the stack
|
|
247
246
|
# so that get_active_workflow() returns the correct workflow
|
|
248
247
|
if self._run_as_agent:
|
|
249
|
-
self.
|
|
248
|
+
self._initialize_agent_functionality()
|
|
250
249
|
|
|
251
250
|
command_output = None
|
|
252
251
|
if self._keep_alive:
|
|
@@ -260,19 +259,22 @@ class ChatSession:
|
|
|
260
259
|
|
|
261
260
|
return command_output
|
|
262
261
|
|
|
263
|
-
def
|
|
262
|
+
def _initialize_agent_functionality(self):
|
|
264
263
|
"""
|
|
265
264
|
Initialize the workflow tool agent for agent mode.
|
|
266
265
|
This agent handles individual tool selection and execution.
|
|
267
266
|
"""
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
267
|
+
self._cme_workflow.context["run_as_agent"] = True
|
|
268
|
+
self._current_workflow.context["run_as_agent"] = True
|
|
269
|
+
|
|
270
|
+
# Initialize the workflow tool agent
|
|
271
|
+
from fastworkflow.workflow_agent import initialize_workflow_tool_agent
|
|
272
|
+
self._workflow_tool_agent = initialize_workflow_tool_agent(self)
|
|
273
|
+
|
|
274
|
+
@property
|
|
275
|
+
def workflow_tool_agent(self):
|
|
276
|
+
"""Get the workflow tool agent for agent mode."""
|
|
277
|
+
return self._workflow_tool_agent
|
|
276
278
|
@property
|
|
277
279
|
def cme_workflow(self) -> fastworkflow.Workflow:
|
|
278
280
|
"""Get the command metadata extraction workflow."""
|
|
@@ -309,7 +311,13 @@ class ChatSession:
|
|
|
309
311
|
def conversation_history(self) -> dspy.History:
|
|
310
312
|
"""Return the conversation history."""
|
|
311
313
|
return self._conversation_history
|
|
312
|
-
|
|
314
|
+
|
|
315
|
+
def clear_conversation_history(self) -> None:
|
|
316
|
+
"""
|
|
317
|
+
Clear the conversation history.
|
|
318
|
+
This resets the conversation history to an empty state.
|
|
319
|
+
"""
|
|
320
|
+
self._conversation_history = dspy.History(messages=[])
|
|
313
321
|
|
|
314
322
|
def _run_workflow_loop(self) -> Optional[fastworkflow.CommandOutput]:
|
|
315
323
|
"""
|
|
@@ -324,7 +332,7 @@ class ChatSession:
|
|
|
324
332
|
try:
|
|
325
333
|
# Handle startup command/action
|
|
326
334
|
if self._startup_command:
|
|
327
|
-
if self._run_as_agent:
|
|
335
|
+
if self._run_as_agent and not self._startup_command.startswith('/'):
|
|
328
336
|
# In agent mode, use workflow tool agent for processing
|
|
329
337
|
last_output = self._process_agent_message(self._startup_command)
|
|
330
338
|
else:
|
|
@@ -339,11 +347,11 @@ class ChatSession:
|
|
|
339
347
|
message = self.user_message_queue.get()
|
|
340
348
|
|
|
341
349
|
# Route based on mode and message type
|
|
342
|
-
if self._run_as_agent:
|
|
350
|
+
if self._run_as_agent and not message.startswith('/'):
|
|
343
351
|
# In agent mode, use workflow tool agent for processing
|
|
344
352
|
last_output = self._process_agent_message(message)
|
|
345
|
-
elif self._is_mcp_tool_call(message):
|
|
346
|
-
|
|
353
|
+
# elif self._is_mcp_tool_call(message):
|
|
354
|
+
# last_output = self._process_mcp_tool_call(message)
|
|
347
355
|
else:
|
|
348
356
|
last_output = self._process_message(message)
|
|
349
357
|
|
|
@@ -361,66 +369,67 @@ class ChatSession:
|
|
|
361
369
|
|
|
362
370
|
return None
|
|
363
371
|
|
|
364
|
-
def _is_mcp_tool_call(self, message: str) -> bool:
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
372
|
+
# def _is_mcp_tool_call(self, message: str) -> bool:
|
|
373
|
+
# """Detect if message is an MCP tool call JSON"""
|
|
374
|
+
# try:
|
|
375
|
+
# data = json.loads(message)
|
|
376
|
+
# return data.get("type") == "mcp_tool_call"
|
|
377
|
+
# except (json.JSONDecodeError, AttributeError):
|
|
378
|
+
# return False
|
|
371
379
|
|
|
372
|
-
def _process_mcp_tool_call(self, message: str) -> fastworkflow.CommandOutput:
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
380
|
+
# def _process_mcp_tool_call(self, message: str) -> fastworkflow.CommandOutput:
|
|
381
|
+
# # sourcery skip: class-extract-method, extract-method
|
|
382
|
+
# """Process an MCP tool call message"""
|
|
383
|
+
# workflow = ChatSession.get_active_workflow()
|
|
376
384
|
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
385
|
+
# try:
|
|
386
|
+
# # Parse JSON message
|
|
387
|
+
# data = json.loads(message)
|
|
388
|
+
# tool_call_data = data["tool_call"]
|
|
381
389
|
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
390
|
+
# # Create MCPToolCall object
|
|
391
|
+
# tool_call = fastworkflow.MCPToolCall(
|
|
392
|
+
# name=tool_call_data["name"],
|
|
393
|
+
# arguments=tool_call_data["arguments"]
|
|
394
|
+
# )
|
|
387
395
|
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
396
|
+
# # Execute via command executor
|
|
397
|
+
# mcp_result = self._CommandExecutor.perform_mcp_tool_call(
|
|
398
|
+
# workflow,
|
|
399
|
+
# tool_call,
|
|
400
|
+
# command_context=workflow.current_command_context_name
|
|
401
|
+
# )
|
|
394
402
|
|
|
395
|
-
|
|
396
|
-
|
|
403
|
+
# # Convert MCPToolResult back to CommandOutput for consistency
|
|
404
|
+
# command_output = self._convert_mcp_result_to_command_output(mcp_result)
|
|
397
405
|
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
406
|
+
# # Put in output queue if needed
|
|
407
|
+
# if (not command_output.success or self._keep_alive) and self.command_output_queue:
|
|
408
|
+
# self.command_output_queue.put(command_output)
|
|
401
409
|
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
410
|
+
# # Flush on successful or failed tool call – state may have changed.
|
|
411
|
+
# if workflow := ChatSession.get_active_workflow():
|
|
412
|
+
# workflow.flush()
|
|
405
413
|
|
|
406
|
-
|
|
414
|
+
# return command_output
|
|
407
415
|
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
416
|
+
# except Exception as e:
|
|
417
|
+
# logger.error(f"Error processing MCP tool call: {e}. Tool call content: {message}")
|
|
418
|
+
# return self._process_message(message) # process as a message
|
|
411
419
|
|
|
412
|
-
def _convert_mcp_result_to_command_output(self, mcp_result: fastworkflow.MCPToolResult) -> fastworkflow.CommandOutput:
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
420
|
+
# def _convert_mcp_result_to_command_output(self, mcp_result: fastworkflow.MCPToolResult) -> fastworkflow.CommandOutput:
|
|
421
|
+
# """Convert MCPToolResult to CommandOutput for compatibility"""
|
|
422
|
+
# command_response = fastworkflow.CommandResponse(
|
|
423
|
+
# response=mcp_result.content[0].text if mcp_result.content else "No response",
|
|
424
|
+
# success=not mcp_result.isError
|
|
425
|
+
# )
|
|
418
426
|
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
427
|
+
# command_output = fastworkflow.CommandOutput(command_responses=[command_response])
|
|
428
|
+
# command_output._mcp_source = mcp_result # Mark for special formatting
|
|
429
|
+
# return command_output
|
|
422
430
|
|
|
423
431
|
def _process_agent_message(self, message: str) -> fastworkflow.CommandOutput:
|
|
432
|
+
# sourcery skip: class-extract-method
|
|
424
433
|
"""Process a message in agent mode using workflow tool agent"""
|
|
425
434
|
# The agent processes the user's message and may make multiple tool calls
|
|
426
435
|
# to the workflow internally (directly via CommandExecutor)
|
|
@@ -429,7 +438,13 @@ class ChatSession:
|
|
|
429
438
|
if os.path.exists("action.json"):
|
|
430
439
|
os.remove("action.json")
|
|
431
440
|
|
|
432
|
-
|
|
441
|
+
refined_user_query = self._refine_user_query(message, self.conversation_history)
|
|
442
|
+
|
|
443
|
+
from fastworkflow.workflow_agent import build_query_with_next_steps
|
|
444
|
+
command_info_and_refined_message_with_todolist = build_query_with_next_steps(
|
|
445
|
+
refined_user_query,
|
|
446
|
+
self
|
|
447
|
+
)
|
|
433
448
|
|
|
434
449
|
lm = dspy_utils.get_lm("LLM_AGENT", "LITELLM_API_KEY_AGENT")
|
|
435
450
|
from dspy.utils.exceptions import AdapterParseError
|
|
@@ -439,8 +454,7 @@ class ChatSession:
|
|
|
439
454
|
try:
|
|
440
455
|
with dspy.context(lm=lm, adapter=dspy.ChatAdapter()):
|
|
441
456
|
agent_result = self._workflow_tool_agent(
|
|
442
|
-
user_query=
|
|
443
|
-
conversation_history=self.conversation_history
|
|
457
|
+
user_query=command_info_and_refined_message_with_todolist
|
|
444
458
|
)
|
|
445
459
|
break # Success, exit retry loop
|
|
446
460
|
except AdapterParseError as _:
|
|
@@ -460,22 +474,22 @@ class ChatSession:
|
|
|
460
474
|
# Create CommandOutput with the agent's response
|
|
461
475
|
command_response = fastworkflow.CommandResponse(response=result_text)
|
|
462
476
|
|
|
463
|
-
|
|
477
|
+
conversation_summary = message
|
|
464
478
|
# Attach actions captured during agent execution as artifacts if available
|
|
465
479
|
if os.path.exists("action.json"):
|
|
466
480
|
with open("action.json", "r", encoding="utf-8") as f:
|
|
467
481
|
actions = [json.loads(line) for line in f if line.strip()]
|
|
468
|
-
|
|
469
|
-
command_response.artifacts["
|
|
482
|
+
conversation_summary = self._extract_conversation_summary(message, actions, result_text)
|
|
483
|
+
command_response.artifacts["conversation_summary"] = conversation_summary
|
|
470
484
|
|
|
471
485
|
self.conversation_history.messages.append(
|
|
472
|
-
{"
|
|
473
|
-
"agent_response": result_text}
|
|
486
|
+
{f"conversation {len(self.conversation_history.messages) + 1}": conversation_summary}
|
|
474
487
|
)
|
|
475
488
|
|
|
476
489
|
command_output = fastworkflow.CommandOutput(
|
|
477
490
|
command_responses=[command_response]
|
|
478
491
|
)
|
|
492
|
+
command_output.workflow_name = self._current_workflow.folderpath.split('/')[-1]
|
|
479
493
|
|
|
480
494
|
# Put output in queue (following same pattern as _process_message)
|
|
481
495
|
if (not command_output.success or self._keep_alive) and \
|
|
@@ -487,6 +501,78 @@ class ChatSession:
|
|
|
487
501
|
workflow.flush()
|
|
488
502
|
|
|
489
503
|
return command_output
|
|
504
|
+
|
|
505
|
+
def _process_message(self, message: str) -> fastworkflow.CommandOutput:
|
|
506
|
+
"""Process a single message"""
|
|
507
|
+
# Use our specialized profiling method
|
|
508
|
+
# command_output = self.profile_invoke_command(message)
|
|
509
|
+
|
|
510
|
+
command_output = self._CommandExecutor.invoke_command(self, message)
|
|
511
|
+
if (not command_output.success or self._keep_alive) and \
|
|
512
|
+
self.command_output_queue:
|
|
513
|
+
self.command_output_queue.put(command_output)
|
|
514
|
+
|
|
515
|
+
# Persist workflow state changes lazily accumulated during message processing.
|
|
516
|
+
if workflow := ChatSession.get_active_workflow():
|
|
517
|
+
workflow.flush()
|
|
518
|
+
|
|
519
|
+
return command_output
|
|
520
|
+
|
|
521
|
+
def _process_action(self, action: fastworkflow.Action) -> fastworkflow.CommandOutput:
|
|
522
|
+
"""Process a startup action"""
|
|
523
|
+
workflow = ChatSession.get_active_workflow()
|
|
524
|
+
command_output = self._CommandExecutor.perform_action(workflow, action)
|
|
525
|
+
if (not command_output.success or self._keep_alive) and \
|
|
526
|
+
self.command_output_queue:
|
|
527
|
+
self.command_output_queue.put(command_output)
|
|
528
|
+
|
|
529
|
+
# Flush any pending workflow updates triggered by this startup action.
|
|
530
|
+
if workflow:
|
|
531
|
+
workflow.flush()
|
|
532
|
+
|
|
533
|
+
return command_output
|
|
534
|
+
|
|
535
|
+
def _refine_user_query(self, user_query: str, conversation_history: dspy.History) -> str:
|
|
536
|
+
"""
|
|
537
|
+
Refine user query using conversation history.
|
|
538
|
+
Return the refined user query
|
|
539
|
+
"""
|
|
540
|
+
if conversation_history.messages:
|
|
541
|
+
messages = []
|
|
542
|
+
for conv_dict in conversation_history.messages[-5:]:
|
|
543
|
+
messages.extend([
|
|
544
|
+
f'{k}: {v}' for k, v in conv_dict.items()
|
|
545
|
+
])
|
|
546
|
+
messages.append(f'new_user_query: {user_query}')
|
|
547
|
+
return '\n'.join(messages)
|
|
548
|
+
|
|
549
|
+
return user_query
|
|
550
|
+
|
|
551
|
+
def _extract_conversation_summary(self,
|
|
552
|
+
user_query: str, workflow_actions: list[dict[str, str]], final_agent_response: str) -> str:
|
|
553
|
+
"""
|
|
554
|
+
Summarizes conversation based on original user query, workflow actions and agentt response.
|
|
555
|
+
"""
|
|
556
|
+
class ConversationSummarySignature(dspy.Signature):
|
|
557
|
+
"""
|
|
558
|
+
A summary of conversation
|
|
559
|
+
Omit descriptions of action sequences
|
|
560
|
+
Capture relevant facts and parameter values from user query, workflow actions and agent response
|
|
561
|
+
"""
|
|
562
|
+
user_query: str = dspy.InputField()
|
|
563
|
+
workflow_actions: list[dict[str, str]] = dspy.InputField()
|
|
564
|
+
final_agent_response: str = dspy.InputField()
|
|
565
|
+
conversation_summary: str = dspy.OutputField(desc="A multiline paragraph summary")
|
|
566
|
+
|
|
567
|
+
planner_lm = dspy_utils.get_lm("LLM_PLANNER", "LITELLM_API_KEY_PLANNER")
|
|
568
|
+
with dspy.context(lm=planner_lm):
|
|
569
|
+
cs_func = dspy.ChainOfThought(ConversationSummarySignature)
|
|
570
|
+
prediction = cs_func(
|
|
571
|
+
user_query=user_query,
|
|
572
|
+
workflow_actions=workflow_actions,
|
|
573
|
+
final_agent_response=final_agent_response)
|
|
574
|
+
return prediction.conversation_summary
|
|
575
|
+
|
|
490
576
|
|
|
491
577
|
def profile_invoke_command(self, message: str):
|
|
492
578
|
"""
|
|
@@ -576,91 +662,3 @@ class ChatSession:
|
|
|
576
662
|
print(f"Detailed report saved to {os.path.abspath(report_file)}")
|
|
577
663
|
|
|
578
664
|
return result
|
|
579
|
-
|
|
580
|
-
def _process_message(self, message: str) -> fastworkflow.CommandOutput:
|
|
581
|
-
"""Process a single message"""
|
|
582
|
-
# Use our specialized profiling method
|
|
583
|
-
# command_output = self.profile_invoke_command(message)
|
|
584
|
-
|
|
585
|
-
command_output = self._CommandExecutor.invoke_command(self, message)
|
|
586
|
-
if (not command_output.success or self._keep_alive) and \
|
|
587
|
-
self.command_output_queue:
|
|
588
|
-
self.command_output_queue.put(command_output)
|
|
589
|
-
|
|
590
|
-
# Persist workflow state changes lazily accumulated during message processing.
|
|
591
|
-
if workflow := ChatSession.get_active_workflow():
|
|
592
|
-
workflow.flush()
|
|
593
|
-
|
|
594
|
-
return command_output
|
|
595
|
-
|
|
596
|
-
def _process_action(self, action: fastworkflow.Action) -> fastworkflow.CommandOutput:
|
|
597
|
-
"""Process a startup action"""
|
|
598
|
-
workflow = ChatSession.get_active_workflow()
|
|
599
|
-
command_output = self._CommandExecutor.perform_action(workflow, action)
|
|
600
|
-
if (not command_output.success or self._keep_alive) and \
|
|
601
|
-
self.command_output_queue:
|
|
602
|
-
self.command_output_queue.put(command_output)
|
|
603
|
-
|
|
604
|
-
# Flush any pending workflow updates triggered by this startup action.
|
|
605
|
-
if workflow:
|
|
606
|
-
workflow.flush()
|
|
607
|
-
|
|
608
|
-
return command_output
|
|
609
|
-
|
|
610
|
-
def _think_and_plan(self, user_query: str, conversation_history: dspy.History) -> str:
|
|
611
|
-
"""
|
|
612
|
-
Returns a refined plan by breaking down a user_query into simpler tasks.
|
|
613
|
-
"""
|
|
614
|
-
class TaskPlannerSignature(dspy.Signature):
|
|
615
|
-
"""
|
|
616
|
-
Break down a user_query into simpler tasks based only on available commands and conversation_history.
|
|
617
|
-
If user_query is simple, return a single todo that is the user_query as-is
|
|
618
|
-
"""
|
|
619
|
-
user_query: str = dspy.InputField()
|
|
620
|
-
conversation_history: dspy.History = dspy.InputField()
|
|
621
|
-
available_commands: list[str] = dspy.InputField()
|
|
622
|
-
todo_list: list[str] = dspy.OutputField(desc="task descriptions as short sentences")
|
|
623
|
-
|
|
624
|
-
current_workflow = ChatSession.get_active_workflow()
|
|
625
|
-
available_commands = CommandMetadataAPI.get_command_display_text(
|
|
626
|
-
subject_workflow_path=current_workflow.folderpath,
|
|
627
|
-
cme_workflow_path=fastworkflow.get_internal_workflow_path("command_metadata_extraction"),
|
|
628
|
-
active_context_name=current_workflow.current_command_context_name,
|
|
629
|
-
)
|
|
630
|
-
|
|
631
|
-
planner_lm = dspy_utils.get_lm("LLM_PLANNER", "LITELLM_API_KEY_PLANNER")
|
|
632
|
-
with dspy.context(lm=planner_lm):
|
|
633
|
-
task_planner_func = dspy.ChainOfThought(TaskPlannerSignature)
|
|
634
|
-
prediction = task_planner_func(
|
|
635
|
-
user_query=user_query,
|
|
636
|
-
conversation_history=conversation_history,
|
|
637
|
-
available_commands=available_commands)
|
|
638
|
-
|
|
639
|
-
if not prediction.todo_list or (len(prediction.todo_list) == 1 and prediction.todo_list[0] == user_query):
|
|
640
|
-
return user_query
|
|
641
|
-
|
|
642
|
-
steps_list = '\n'.join([f'{i + 1}. {task}' for i, task in enumerate(prediction.todo_list)])
|
|
643
|
-
return f"{user_query}\nNext steps:\n{steps_list}"
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
def _extract_user_instructions(self,
|
|
647
|
-
user_query: str, workflow_actions: list[dict[str, str]]) -> str:
|
|
648
|
-
"""
|
|
649
|
-
Summarizes user instructions based on original user query and subsequent user feedback in workflow actions.
|
|
650
|
-
"""
|
|
651
|
-
class UserInstructionCompilerSignature(dspy.Signature):
|
|
652
|
-
"""
|
|
653
|
-
Concise summary of user instructions based on their commands to the workflow.
|
|
654
|
-
Include parameter values passed in commands in the summary.
|
|
655
|
-
"""
|
|
656
|
-
commands_list: list[str] = dspy.InputField()
|
|
657
|
-
user_instructions_summary: str = dspy.OutputField(desc="A single paragraph summary")
|
|
658
|
-
|
|
659
|
-
commands_list: list[str] = [user_query]
|
|
660
|
-
commands_list.extend([wf_action['command'] for wf_action in workflow_actions if 'command' in wf_action])
|
|
661
|
-
|
|
662
|
-
planner_lm = dspy_utils.get_lm("LLM_PLANNER", "LITELLM_API_KEY_PLANNER")
|
|
663
|
-
with dspy.context(lm=planner_lm):
|
|
664
|
-
uic_func = dspy.ChainOfThought(UserInstructionCompilerSignature)
|
|
665
|
-
prediction = uic_func(commands_list=commands_list)
|
|
666
|
-
return prediction.user_instructions_summary
|