fastworkflow 2.15.5__py3-none-any.whl → 2.17.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fastworkflow/_workflows/command_metadata_extraction/_commands/ErrorCorrection/you_misunderstood.py +1 -1
- fastworkflow/_workflows/command_metadata_extraction/_commands/IntentDetection/what_can_i_do.py +16 -2
- fastworkflow/_workflows/command_metadata_extraction/_commands/wildcard.py +27 -570
- fastworkflow/_workflows/command_metadata_extraction/intent_detection.py +360 -0
- fastworkflow/_workflows/command_metadata_extraction/parameter_extraction.py +411 -0
- fastworkflow/chat_session.py +379 -206
- fastworkflow/cli.py +80 -165
- fastworkflow/command_context_model.py +73 -7
- fastworkflow/command_executor.py +14 -5
- fastworkflow/command_metadata_api.py +106 -6
- fastworkflow/examples/fastworkflow.env +2 -1
- fastworkflow/examples/fastworkflow.passwords.env +2 -1
- fastworkflow/examples/retail_workflow/_commands/exchange_delivered_order_items.py +32 -3
- fastworkflow/examples/retail_workflow/_commands/find_user_id_by_email.py +6 -5
- fastworkflow/examples/retail_workflow/_commands/modify_pending_order_items.py +32 -3
- fastworkflow/examples/retail_workflow/_commands/return_delivered_order_items.py +13 -2
- fastworkflow/examples/retail_workflow/_commands/transfer_to_human_agents.py +1 -1
- fastworkflow/intent_clarification_agent.py +131 -0
- fastworkflow/mcp_server.py +3 -3
- fastworkflow/run/__main__.py +33 -40
- fastworkflow/run_fastapi_mcp/README.md +373 -0
- fastworkflow/run_fastapi_mcp/__main__.py +1300 -0
- fastworkflow/run_fastapi_mcp/conversation_store.py +391 -0
- fastworkflow/run_fastapi_mcp/jwt_manager.py +341 -0
- fastworkflow/run_fastapi_mcp/mcp_specific.py +103 -0
- fastworkflow/run_fastapi_mcp/redoc_2_standalone_html.py +40 -0
- fastworkflow/run_fastapi_mcp/utils.py +517 -0
- fastworkflow/train/__main__.py +1 -1
- fastworkflow/utils/chat_adapter.py +99 -0
- fastworkflow/utils/python_utils.py +4 -4
- fastworkflow/utils/react.py +258 -0
- fastworkflow/utils/signatures.py +338 -139
- fastworkflow/workflow.py +1 -5
- fastworkflow/workflow_agent.py +185 -133
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/METADATA +16 -18
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/RECORD +40 -30
- fastworkflow/run_agent/__main__.py +0 -294
- fastworkflow/run_agent/agent_module.py +0 -194
- /fastworkflow/{run_agent → run_fastapi_mcp}/__init__.py +0 -0
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/LICENSE +0 -0
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/WHEEL +0 -0
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
import os
|
|
3
|
+
from collections import Counter
|
|
4
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
from speedict import Rdict
|
|
8
|
+
|
|
9
|
+
import fastworkflow
|
|
10
|
+
from fastworkflow.utils.logging import logger
|
|
11
|
+
from fastworkflow import NLUPipelineStage
|
|
12
|
+
from fastworkflow.cache_matching import cache_match, store_utterance_cache
|
|
13
|
+
from fastworkflow.model_pipeline_training import (
|
|
14
|
+
CommandRouter
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
from fastworkflow.utils.fuzzy_match import find_best_matches
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class CommandNamePrediction:
|
|
21
|
+
class Output(BaseModel):
|
|
22
|
+
command_name: Optional[str] = None
|
|
23
|
+
error_msg: Optional[str] = None
|
|
24
|
+
is_cme_command: bool = False
|
|
25
|
+
|
|
26
|
+
def __init__(self, cme_workflow: fastworkflow.Workflow):
|
|
27
|
+
self.cme_workflow = cme_workflow
|
|
28
|
+
self.app_workflow = cme_workflow.context["app_workflow"]
|
|
29
|
+
self.app_workflow_folderpath = self.app_workflow.folderpath
|
|
30
|
+
self.app_workflow_id = self.app_workflow.id
|
|
31
|
+
|
|
32
|
+
self.convo_path = os.path.join(self.app_workflow_folderpath, "___convo_info")
|
|
33
|
+
self.cache_path = self._get_cache_path(self.app_workflow_id, self.convo_path)
|
|
34
|
+
self.path = self._get_cache_path_cache(self.convo_path)
|
|
35
|
+
|
|
36
|
+
def predict(self, command_context_name: str, command: str, nlu_pipeline_stage: NLUPipelineStage) -> "CommandNamePrediction.Output":
|
|
37
|
+
# sourcery skip: extract-duplicate-method
|
|
38
|
+
|
|
39
|
+
model_artifact_path = f"{self.app_workflow_folderpath}/___command_info/{command_context_name}"
|
|
40
|
+
command_router = CommandRouter(model_artifact_path)
|
|
41
|
+
|
|
42
|
+
# Re-use the already-built ModelPipeline attached to the router
|
|
43
|
+
# instead of instantiating a fresh one. This avoids reloading HF
|
|
44
|
+
# checkpoints and transferring tensors each time we see a new
|
|
45
|
+
# message for the same context.
|
|
46
|
+
modelpipeline = command_router.modelpipeline
|
|
47
|
+
|
|
48
|
+
crd = fastworkflow.RoutingRegistry.get_definition(
|
|
49
|
+
self.cme_workflow.folderpath)
|
|
50
|
+
cme_command_names = crd.get_command_names('IntentDetection')
|
|
51
|
+
|
|
52
|
+
valid_command_names = set()
|
|
53
|
+
if nlu_pipeline_stage == NLUPipelineStage.INTENT_AMBIGUITY_CLARIFICATION:
|
|
54
|
+
valid_command_names = self._get_suggested_commands(self.path)
|
|
55
|
+
elif nlu_pipeline_stage in (
|
|
56
|
+
NLUPipelineStage.INTENT_DETECTION, NLUPipelineStage.INTENT_MISUNDERSTANDING_CLARIFICATION):
|
|
57
|
+
app_crd = fastworkflow.RoutingRegistry.get_definition(
|
|
58
|
+
self.app_workflow_folderpath)
|
|
59
|
+
valid_command_names = (
|
|
60
|
+
set(cme_command_names) |
|
|
61
|
+
set(app_crd.get_command_names(command_context_name))
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
command_name_dict = {
|
|
65
|
+
fully_qualified_command_name.split('/')[-1]: fully_qualified_command_name
|
|
66
|
+
for fully_qualified_command_name in valid_command_names
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
if nlu_pipeline_stage == NLUPipelineStage.INTENT_AMBIGUITY_CLARIFICATION:
|
|
70
|
+
# what_can_i_do is special in INTENT_AMBIGUITY_CLARIFICATION
|
|
71
|
+
# We will not predict, just match plain utterances with exact or fuzzy match
|
|
72
|
+
command_name_dict |= {
|
|
73
|
+
plain_utterance: 'IntentDetection/what_can_i_do'
|
|
74
|
+
for plain_utterance in crd.command_directory.map_command_2_utterance_metadata[
|
|
75
|
+
'IntentDetection/what_can_i_do'
|
|
76
|
+
].plain_utterances
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
if nlu_pipeline_stage != NLUPipelineStage.INTENT_DETECTION:
|
|
80
|
+
# abort is special.
|
|
81
|
+
# We will not predict, just match plain utterances with exact or fuzzy match
|
|
82
|
+
command_name_dict |= {
|
|
83
|
+
plain_utterance: 'ErrorCorrection/abort'
|
|
84
|
+
for plain_utterance in crd.command_directory.map_command_2_utterance_metadata[
|
|
85
|
+
'ErrorCorrection/abort'
|
|
86
|
+
].plain_utterances
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
if nlu_pipeline_stage != NLUPipelineStage.INTENT_MISUNDERSTANDING_CLARIFICATION:
|
|
90
|
+
# you_misunderstood is special.
|
|
91
|
+
# We will not predict, just match plain utterances with exact or fuzzy match
|
|
92
|
+
command_name_dict |= {
|
|
93
|
+
plain_utterance: 'ErrorCorrection/you_misunderstood'
|
|
94
|
+
for plain_utterance in crd.command_directory.map_command_2_utterance_metadata[
|
|
95
|
+
'ErrorCorrection/you_misunderstood'
|
|
96
|
+
].plain_utterances
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
# See if the command starts with a command name followed by a space or a '('
|
|
100
|
+
tentative_command_name = command.split(" ", 1)[0].split("(", 1)[0]
|
|
101
|
+
normalized_command_name = tentative_command_name.lower()
|
|
102
|
+
command_name = None
|
|
103
|
+
if normalized_command_name in command_name_dict:
|
|
104
|
+
command_name = normalized_command_name
|
|
105
|
+
command = command.replace(f"{tentative_command_name}", "").strip().replace(" ", " ")
|
|
106
|
+
else:
|
|
107
|
+
# Use Levenshtein distance for fuzzy matching with the full command part after @
|
|
108
|
+
best_matched_commands, _ = find_best_matches(
|
|
109
|
+
command.replace(" ", "_"),
|
|
110
|
+
command_name_dict.keys(),
|
|
111
|
+
threshold=0.3 # Adjust threshold as needed
|
|
112
|
+
)
|
|
113
|
+
if best_matched_commands:
|
|
114
|
+
command_name = best_matched_commands[0]
|
|
115
|
+
|
|
116
|
+
if nlu_pipeline_stage == NLUPipelineStage.INTENT_DETECTION:
|
|
117
|
+
if not command_name:
|
|
118
|
+
if cache_result := cache_match(self.path, command, modelpipeline, 0.85):
|
|
119
|
+
command_name = cache_result
|
|
120
|
+
else:
|
|
121
|
+
predictions=command_router.predict(command)
|
|
122
|
+
# predictions = majority_vote_predictions(command_router, command)
|
|
123
|
+
|
|
124
|
+
if len(predictions)==1:
|
|
125
|
+
command_name = predictions[0].split('/')[-1]
|
|
126
|
+
else:
|
|
127
|
+
# If confidence is low, treat as ambiguous command (type 1)
|
|
128
|
+
error_msg = self._formulate_ambiguous_command_error_message(
|
|
129
|
+
predictions, "run_as_agent" in self.app_workflow.context)
|
|
130
|
+
|
|
131
|
+
# Store suggested commands
|
|
132
|
+
self._store_suggested_commands(self.path, predictions, 1)
|
|
133
|
+
return CommandNamePrediction.Output(error_msg=error_msg)
|
|
134
|
+
|
|
135
|
+
elif nlu_pipeline_stage in (
|
|
136
|
+
NLUPipelineStage.INTENT_AMBIGUITY_CLARIFICATION,
|
|
137
|
+
NLUPipelineStage.INTENT_MISUNDERSTANDING_CLARIFICATION
|
|
138
|
+
) and not command_name:
|
|
139
|
+
command_name = "what can i do?"
|
|
140
|
+
|
|
141
|
+
if not command_name or command_name == "wildcard":
|
|
142
|
+
fully_qualified_command_name=None
|
|
143
|
+
is_cme_command=False
|
|
144
|
+
else:
|
|
145
|
+
fully_qualified_command_name = command_name_dict[command_name]
|
|
146
|
+
is_cme_command=(
|
|
147
|
+
fully_qualified_command_name in cme_command_names or
|
|
148
|
+
fully_qualified_command_name in crd.get_command_names('ErrorCorrection')
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
if (
|
|
152
|
+
nlu_pipeline_stage
|
|
153
|
+
in (
|
|
154
|
+
NLUPipelineStage.INTENT_AMBIGUITY_CLARIFICATION,
|
|
155
|
+
NLUPipelineStage.INTENT_MISUNDERSTANDING_CLARIFICATION,
|
|
156
|
+
)
|
|
157
|
+
and not fully_qualified_command_name.endswith('abort')
|
|
158
|
+
and not fully_qualified_command_name.endswith('what_can_i_do')
|
|
159
|
+
and not fully_qualified_command_name.endswith('you_misunderstood')
|
|
160
|
+
):
|
|
161
|
+
command = self.cme_workflow.context["command"]
|
|
162
|
+
store_utterance_cache(self.path, command, command_name, modelpipeline)
|
|
163
|
+
|
|
164
|
+
return CommandNamePrediction.Output(
|
|
165
|
+
command_name=fully_qualified_command_name,
|
|
166
|
+
is_cme_command=is_cme_command
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
@staticmethod
|
|
170
|
+
def _get_cache_path(workflow_id, convo_path):
|
|
171
|
+
"""
|
|
172
|
+
Generate cache file path based on workflow ID
|
|
173
|
+
"""
|
|
174
|
+
base_dir = convo_path
|
|
175
|
+
# Create directory if it doesn't exist
|
|
176
|
+
os.makedirs(base_dir, exist_ok=True)
|
|
177
|
+
return os.path.join(base_dir, f"{workflow_id}.db")
|
|
178
|
+
|
|
179
|
+
@staticmethod
|
|
180
|
+
def _get_cache_path_cache(convo_path):
|
|
181
|
+
"""
|
|
182
|
+
Generate cache file path based on workflow ID
|
|
183
|
+
"""
|
|
184
|
+
base_dir = convo_path
|
|
185
|
+
# Create directory if it doesn't exist
|
|
186
|
+
os.makedirs(base_dir, exist_ok=True)
|
|
187
|
+
return os.path.join(base_dir, "cache.db")
|
|
188
|
+
|
|
189
|
+
# Store the suggested commands with the flag type
|
|
190
|
+
@staticmethod
|
|
191
|
+
def _store_suggested_commands(cache_path, command_list, flag_type):
|
|
192
|
+
"""
|
|
193
|
+
Store the list of suggested commands for the constrained selection
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
cache_path: Path to the cache database
|
|
197
|
+
command_list: List of suggested commands
|
|
198
|
+
flag_type: Type of constraint (1=ambiguous, 2=misclassified)
|
|
199
|
+
"""
|
|
200
|
+
db = Rdict(cache_path)
|
|
201
|
+
try:
|
|
202
|
+
db["suggested_commands"] = command_list
|
|
203
|
+
db["flag_type"] = flag_type
|
|
204
|
+
finally:
|
|
205
|
+
db.close()
|
|
206
|
+
|
|
207
|
+
# Get the suggested commands
|
|
208
|
+
@staticmethod
|
|
209
|
+
def _get_suggested_commands(cache_path):
|
|
210
|
+
"""
|
|
211
|
+
Get the list of suggested commands for the constrained selection
|
|
212
|
+
"""
|
|
213
|
+
db = Rdict(cache_path)
|
|
214
|
+
try:
|
|
215
|
+
return db.get("suggested_commands", [])
|
|
216
|
+
finally:
|
|
217
|
+
db.close()
|
|
218
|
+
|
|
219
|
+
@staticmethod
|
|
220
|
+
def _get_count(cache_path):
|
|
221
|
+
db = Rdict(cache_path)
|
|
222
|
+
try:
|
|
223
|
+
return db.get("utterance_count", 0) # Default to 0 if key doesn't exist
|
|
224
|
+
finally:
|
|
225
|
+
db.close()
|
|
226
|
+
|
|
227
|
+
@staticmethod
|
|
228
|
+
def _print_db_contents(cache_path):
|
|
229
|
+
db = Rdict(cache_path)
|
|
230
|
+
try:
|
|
231
|
+
print("All keys in database:", list(db.keys()))
|
|
232
|
+
for key in db.keys():
|
|
233
|
+
print(f"Key: {key}, Value: {db[key]}")
|
|
234
|
+
finally:
|
|
235
|
+
db.close()
|
|
236
|
+
|
|
237
|
+
@staticmethod
|
|
238
|
+
def _store_utterance(cache_path, utterance, label):
|
|
239
|
+
"""
|
|
240
|
+
Store utterance in existing or new database
|
|
241
|
+
Returns: The utterance count used
|
|
242
|
+
"""
|
|
243
|
+
# Open the database (creates if doesn't exist)
|
|
244
|
+
db = Rdict(cache_path)
|
|
245
|
+
|
|
246
|
+
try:
|
|
247
|
+
# Get existing counter or initialize to 0
|
|
248
|
+
utterance_count = db.get("utterance_count", 0)
|
|
249
|
+
|
|
250
|
+
# Create and store the utterance entry
|
|
251
|
+
utterance_data = {
|
|
252
|
+
"utterance": utterance,
|
|
253
|
+
"label": label
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
db[utterance_count] = utterance_data
|
|
257
|
+
|
|
258
|
+
# Increment and store the counter
|
|
259
|
+
utterance_count += 1
|
|
260
|
+
db["utterance_count"] = utterance_count
|
|
261
|
+
|
|
262
|
+
return utterance_count - 1 # Return the count used for this utterance
|
|
263
|
+
|
|
264
|
+
finally:
|
|
265
|
+
# Always close the database
|
|
266
|
+
db.close()
|
|
267
|
+
|
|
268
|
+
# Function to read from database
|
|
269
|
+
@staticmethod
|
|
270
|
+
def _read_utterance(cache_path, utterance_id):
|
|
271
|
+
"""
|
|
272
|
+
Read a specific utterance from the database
|
|
273
|
+
"""
|
|
274
|
+
db = Rdict(cache_path)
|
|
275
|
+
try:
|
|
276
|
+
return db.get(utterance_id)['utterance']
|
|
277
|
+
finally:
|
|
278
|
+
db.close()
|
|
279
|
+
|
|
280
|
+
@staticmethod
|
|
281
|
+
def _formulate_ambiguous_command_error_message(
|
|
282
|
+
route_choice_list: list[str], run_as_agent: bool) -> str:
|
|
283
|
+
command_list = (
|
|
284
|
+
"\n".join([
|
|
285
|
+
f"{route_choice.split('/')[-1].lower()}"
|
|
286
|
+
for route_choice in route_choice_list if route_choice != 'wildcard'
|
|
287
|
+
])
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
return (
|
|
291
|
+
"The command is ambiguous. "
|
|
292
|
+
+ (
|
|
293
|
+
"Choose the correct command name from these possible options and update your command:\n"
|
|
294
|
+
if run_as_agent
|
|
295
|
+
else "Please choose a command name from these possible options:\n"
|
|
296
|
+
)
|
|
297
|
+
+ f"{command_list}\n\nor type 'what can i do' to see all commands\n"
|
|
298
|
+
+ ("or type 'abort' to cancel" if run_as_agent else '')
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
# TODO - generation is deterministic. They all return the same answer
|
|
303
|
+
# TODO - Need 'temperature' for intent detection pipeline
|
|
304
|
+
def majority_vote_predictions(command_router, command: str, n_predictions: int = 5) -> list[str]:
|
|
305
|
+
"""
|
|
306
|
+
Generate N prediction sets in parallel and return the set that wins the majority vote.
|
|
307
|
+
|
|
308
|
+
This function improves prediction reliability by running multiple parallel predictions
|
|
309
|
+
and selecting the most common result through majority voting. This helps reduce
|
|
310
|
+
the impact of random variations in model predictions.
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
command_router: The CommandRouter instance to use for predictions
|
|
314
|
+
command: The input command string
|
|
315
|
+
n_predictions: Number of parallel predictions to generate (default: 5)
|
|
316
|
+
Can be configured via N_PARALLEL_PREDICTIONS environment variable
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
The prediction set that received the majority vote. Falls back to a single
|
|
320
|
+
prediction if all parallel predictions fail.
|
|
321
|
+
|
|
322
|
+
Note:
|
|
323
|
+
Uses ThreadPoolExecutor with max_workers limited to min(n_predictions, 10)
|
|
324
|
+
to avoid overwhelming the system with too many concurrent threads.
|
|
325
|
+
"""
|
|
326
|
+
def get_single_prediction():
|
|
327
|
+
"""Helper function to get a single prediction"""
|
|
328
|
+
return command_router.predict(command)
|
|
329
|
+
|
|
330
|
+
# Generate N predictions in parallel
|
|
331
|
+
prediction_sets = []
|
|
332
|
+
with ThreadPoolExecutor(max_workers=min(n_predictions, 10)) as executor:
|
|
333
|
+
# Submit all prediction tasks
|
|
334
|
+
futures = [executor.submit(get_single_prediction) for _ in range(n_predictions)]
|
|
335
|
+
|
|
336
|
+
# Collect results as they complete
|
|
337
|
+
for future in as_completed(futures):
|
|
338
|
+
try:
|
|
339
|
+
prediction_set = future.result()
|
|
340
|
+
prediction_sets.append(prediction_set)
|
|
341
|
+
except Exception as e:
|
|
342
|
+
logger.warning(f"Prediction failed: {e}")
|
|
343
|
+
# Continue with other predictions even if one fails
|
|
344
|
+
|
|
345
|
+
if not prediction_sets:
|
|
346
|
+
# Fallback to single prediction if all parallel predictions failed
|
|
347
|
+
logger.warning("All parallel predictions failed, falling back to single prediction")
|
|
348
|
+
return command_router.predict(command)
|
|
349
|
+
|
|
350
|
+
# Convert lists to tuples so they can be hashed and counted
|
|
351
|
+
prediction_tuples = [tuple(sorted(pred_set)) for pred_set in prediction_sets]
|
|
352
|
+
|
|
353
|
+
# Count occurrences of each unique prediction set
|
|
354
|
+
vote_counts = Counter(prediction_tuples)
|
|
355
|
+
|
|
356
|
+
# Get the prediction set with the most votes
|
|
357
|
+
winning_tuple = vote_counts.most_common(1)[0][0]
|
|
358
|
+
|
|
359
|
+
# Convert back to list and return
|
|
360
|
+
return list(winning_tuple)
|