fastworkflow 2.15.5__py3-none-any.whl → 2.17.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fastworkflow/_workflows/command_metadata_extraction/_commands/ErrorCorrection/you_misunderstood.py +1 -1
- fastworkflow/_workflows/command_metadata_extraction/_commands/IntentDetection/what_can_i_do.py +16 -2
- fastworkflow/_workflows/command_metadata_extraction/_commands/wildcard.py +27 -570
- fastworkflow/_workflows/command_metadata_extraction/intent_detection.py +360 -0
- fastworkflow/_workflows/command_metadata_extraction/parameter_extraction.py +411 -0
- fastworkflow/chat_session.py +379 -206
- fastworkflow/cli.py +80 -165
- fastworkflow/command_context_model.py +73 -7
- fastworkflow/command_executor.py +14 -5
- fastworkflow/command_metadata_api.py +106 -6
- fastworkflow/examples/fastworkflow.env +2 -1
- fastworkflow/examples/fastworkflow.passwords.env +2 -1
- fastworkflow/examples/retail_workflow/_commands/exchange_delivered_order_items.py +32 -3
- fastworkflow/examples/retail_workflow/_commands/find_user_id_by_email.py +6 -5
- fastworkflow/examples/retail_workflow/_commands/modify_pending_order_items.py +32 -3
- fastworkflow/examples/retail_workflow/_commands/return_delivered_order_items.py +13 -2
- fastworkflow/examples/retail_workflow/_commands/transfer_to_human_agents.py +1 -1
- fastworkflow/intent_clarification_agent.py +131 -0
- fastworkflow/mcp_server.py +3 -3
- fastworkflow/run/__main__.py +33 -40
- fastworkflow/run_fastapi_mcp/README.md +373 -0
- fastworkflow/run_fastapi_mcp/__main__.py +1300 -0
- fastworkflow/run_fastapi_mcp/conversation_store.py +391 -0
- fastworkflow/run_fastapi_mcp/jwt_manager.py +341 -0
- fastworkflow/run_fastapi_mcp/mcp_specific.py +103 -0
- fastworkflow/run_fastapi_mcp/redoc_2_standalone_html.py +40 -0
- fastworkflow/run_fastapi_mcp/utils.py +517 -0
- fastworkflow/train/__main__.py +1 -1
- fastworkflow/utils/chat_adapter.py +99 -0
- fastworkflow/utils/python_utils.py +4 -4
- fastworkflow/utils/react.py +258 -0
- fastworkflow/utils/signatures.py +338 -139
- fastworkflow/workflow.py +1 -5
- fastworkflow/workflow_agent.py +185 -133
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/METADATA +16 -18
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/RECORD +40 -30
- fastworkflow/run_agent/__main__.py +0 -294
- fastworkflow/run_agent/agent_module.py +0 -194
- /fastworkflow/{run_agent → run_fastapi_mcp}/__init__.py +0 -0
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/LICENSE +0 -0
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/WHEEL +0 -0
- {fastworkflow-2.15.5.dist-info → fastworkflow-2.17.13.dist-info}/entry_points.txt +0 -0
fastworkflow/utils/signatures.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import sys
|
|
2
|
+
import ast
|
|
2
3
|
import dspy
|
|
3
4
|
import os
|
|
4
5
|
from contextlib import suppress
|
|
@@ -156,7 +157,7 @@ Today's date is {today}.
|
|
|
156
157
|
def create_signature_from_pydantic_model(
|
|
157
158
|
pydantic_model: Type[BaseModel]
|
|
158
159
|
) -> Type[dspy.Signature]:
|
|
159
|
-
|
|
160
|
+
"""
|
|
160
161
|
Create a DSPy Signature class from a Pydantic model with type annotations.
|
|
161
162
|
|
|
162
163
|
Args:
|
|
@@ -165,79 +166,76 @@ Today's date is {today}.
|
|
|
165
166
|
Returns:
|
|
166
167
|
A DSPy Signature class
|
|
167
168
|
"""
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
169
|
+
signature_components = {
|
|
170
|
+
"statement": (str, dspy.InputField(desc="Statement according to Dhar"))
|
|
171
|
+
}
|
|
171
172
|
|
|
172
|
-
|
|
173
|
-
|
|
173
|
+
steps = []
|
|
174
|
+
field_num = 1
|
|
174
175
|
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
176
|
+
for attribute_name, attribute_metadata in pydantic_model.model_fields.items():
|
|
177
|
+
is_optional = False
|
|
178
|
+
attribute_type = attribute_metadata.annotation
|
|
178
179
|
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
NOT_FOUND = fastworkflow.get_env_var("NOT_FOUND")
|
|
186
|
-
if attribute_type is str:
|
|
187
|
-
default_value = NOT_FOUND
|
|
188
|
-
elif attribute_type is int:
|
|
189
|
-
default_value = INVALID_INT_VALUE
|
|
190
|
-
elif attribute_type is float:
|
|
191
|
-
default_value = -sys.float_info.max
|
|
192
|
-
else:
|
|
193
|
-
default_value = None
|
|
180
|
+
if hasattr(attribute_type, "__origin__") and attribute_type.__origin__ is Union:
|
|
181
|
+
union_elements = get_args(attribute_type)
|
|
182
|
+
if type(None) in union_elements:
|
|
183
|
+
is_optional = True
|
|
184
|
+
attribute_type = next((elem for elem in union_elements if elem is not type(None)), str)
|
|
194
185
|
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
186
|
+
NOT_FOUND = fastworkflow.get_env_var("NOT_FOUND")
|
|
187
|
+
if attribute_type is str:
|
|
188
|
+
default_value = NOT_FOUND
|
|
189
|
+
elif attribute_type is int:
|
|
190
|
+
default_value = INVALID_INT_VALUE
|
|
191
|
+
elif attribute_type is float:
|
|
192
|
+
default_value = -sys.float_info.max
|
|
193
|
+
else:
|
|
194
|
+
default_value = None
|
|
195
|
+
|
|
196
|
+
if (
|
|
197
|
+
attribute_metadata.default is not PydanticUndefined and
|
|
198
|
+
attribute_metadata.default is not None and
|
|
199
|
+
attribute_metadata.default != Ellipsis
|
|
200
|
+
):
|
|
201
|
+
default_value = attribute_metadata.default
|
|
201
202
|
|
|
202
|
-
|
|
203
|
+
info_text = attribute_metadata.description or f"The {attribute_name}"
|
|
203
204
|
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
205
|
+
if attribute_name != "query":
|
|
206
|
+
steps.append(f"Step {field_num}: Identify the {attribute_name} ({info_text}).")
|
|
207
|
+
field_num += 1
|
|
207
208
|
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
209
|
+
if isinstance(attribute_type, type) and issubclass(attribute_type, Enum):
|
|
210
|
+
possible_values = [f"'{option.value}'" for option in attribute_type]
|
|
211
|
+
info_text += f". Valid values: {', '.join(possible_values)}"
|
|
211
212
|
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
213
|
+
if attribute_metadata.examples:
|
|
214
|
+
sample_values = ", ".join([f"'{sample}'" for sample in attribute_metadata.examples])
|
|
215
|
+
info_text += f". Examples: {sample_values}"
|
|
215
216
|
|
|
216
|
-
|
|
217
|
-
|
|
217
|
+
requirement_status = "Optional" if is_optional else "Required"
|
|
218
|
+
info_text += f". This field is {requirement_status}."
|
|
218
219
|
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
220
|
+
if is_optional:
|
|
221
|
+
info_text += f" If not mentioned in the query, use: '{default_value or 'None'}'."
|
|
222
|
+
elif default_value is not None:
|
|
223
|
+
info_text += f" Default value: '{default_value}'."
|
|
223
224
|
|
|
224
|
-
|
|
225
|
-
|
|
225
|
+
field_definition = dspy.OutputField(desc=info_text, default=default_value)
|
|
226
|
+
signature_components[attribute_name] = (attribute_metadata.annotation, field_definition)
|
|
226
227
|
|
|
227
228
|
steps.extend((
|
|
228
|
-
f"Step {field_num}:
|
|
229
|
-
"
|
|
230
|
-
"For parameters specified as enums, return the default value if the parameter value is not explicitly specified in the query",
|
|
231
|
-
"Return None for the parameter value which is missing in the query",
|
|
232
|
-
"Always return the query in the output.",
|
|
229
|
+
f"Step {field_num}: ",
|
|
230
|
+
"For missing parameter values - return the default if it is specified otherwise return None",
|
|
233
231
|
))
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
232
|
+
|
|
233
|
+
generated_docstring = (
|
|
234
|
+
f"Extract parameter values from the statement according to Dhar. Today's date is {date.today()}.\n"
|
|
235
|
+
f"{chr(10).join(steps)}"
|
|
236
|
+
)
|
|
239
237
|
|
|
240
|
-
|
|
238
|
+
return dspy.Signature(signature_components, generated_docstring)
|
|
241
239
|
|
|
242
240
|
def extract_parameters(self, CommandParameters: Type[BaseModel] = None, subject_command_name: str = None, workflow_folderpath: str = None) -> BaseModel:
|
|
243
241
|
"""
|
|
@@ -269,7 +267,7 @@ Today's date is {today}.
|
|
|
269
267
|
self.predictor = dspy.ChainOfThought(signature)
|
|
270
268
|
|
|
271
269
|
def forward(self, command=None):
|
|
272
|
-
return self.predictor(
|
|
270
|
+
return self.predictor(statement=command)
|
|
273
271
|
|
|
274
272
|
param_extractor = ParamExtractor(params_signature)
|
|
275
273
|
|
|
@@ -285,8 +283,24 @@ Today's date is {today}.
|
|
|
285
283
|
trainset=trainset
|
|
286
284
|
)
|
|
287
285
|
|
|
286
|
+
def basic_checks(args, pred):
|
|
287
|
+
for field_name in field_names:
|
|
288
|
+
# return 0 if it extracts an example value instead of correct value | None
|
|
289
|
+
extracted_param_value = getattr(pred, field_name)
|
|
290
|
+
examples = model_class.model_fields[field_name].examples
|
|
291
|
+
if examples and extracted_param_value in examples:
|
|
292
|
+
return 0.0
|
|
293
|
+
return 1.0
|
|
294
|
+
|
|
295
|
+
# Create a refined module that tries up to 3 times
|
|
296
|
+
best_of_3 = dspy.BestOfN(
|
|
297
|
+
module=compiled_model,
|
|
298
|
+
N=3,
|
|
299
|
+
reward_fn=basic_checks,
|
|
300
|
+
threshold=1.0)
|
|
301
|
+
|
|
288
302
|
try:
|
|
289
|
-
dspy_result =
|
|
303
|
+
dspy_result = best_of_3(command=self.command)
|
|
290
304
|
for field_name in field_names:
|
|
291
305
|
default = model_class.model_fields[field_name].default
|
|
292
306
|
param_dict[field_name] = getattr(dspy_result, field_name, default)
|
|
@@ -302,7 +316,7 @@ Today's date is {today}.
|
|
|
302
316
|
def validate_parameters(self,
|
|
303
317
|
app_workflow: fastworkflow.Workflow,
|
|
304
318
|
subject_command_name: str,
|
|
305
|
-
cmd_parameters: BaseModel) -> Tuple[bool, str, Dict[str, List[str]]]:
|
|
319
|
+
cmd_parameters: BaseModel) -> Tuple[bool, str, Dict[str, List[str]], List[str]]:
|
|
306
320
|
"""
|
|
307
321
|
Check if the parameters are valid in the current context, including database lookups.
|
|
308
322
|
"""
|
|
@@ -319,62 +333,259 @@ Today's date is {today}.
|
|
|
319
333
|
invalid_fields = []
|
|
320
334
|
all_suggestions = {}
|
|
321
335
|
|
|
322
|
-
# Check required fields
|
|
323
336
|
for field_name, field_info in type(cmd_parameters).model_fields.items():
|
|
324
|
-
|
|
337
|
+
field_value = getattr(cmd_parameters, field_name, None)
|
|
338
|
+
|
|
339
|
+
if field_value not in [NOT_FOUND, None, INVALID_INT_VALUE, INVALID_FLOAT_VALUE]:
|
|
340
|
+
annotation = field_info.annotation
|
|
341
|
+
|
|
342
|
+
# Build list of candidate concrete types (exclude NoneType from Union)
|
|
343
|
+
candidate_types: List[Type[Any]] = []
|
|
344
|
+
if hasattr(annotation, "__origin__") and annotation.__origin__ is Union:
|
|
345
|
+
for t in get_args(annotation):
|
|
346
|
+
if t is not type(None): # noqa: E721
|
|
347
|
+
candidate_types.append(t) # type: ignore[arg-type]
|
|
348
|
+
else:
|
|
349
|
+
candidate_types = [annotation] # type: ignore[list-item]
|
|
350
|
+
|
|
351
|
+
def build_type_suggestion() -> List[str]:
|
|
352
|
+
examples = getattr(field_info, "examples", []) or []
|
|
353
|
+
example = examples[0] if examples else None
|
|
354
|
+
# Enum suggestions list valid values
|
|
355
|
+
enum_types = [t for t in candidate_types if isinstance(t, type) and issubclass(t, Enum)]
|
|
356
|
+
if enum_types:
|
|
357
|
+
opts = [f"'{opt.value}'" for t in enum_types for opt in t]
|
|
358
|
+
return [f"Please provide a value matching the expected type/format. Valid values: {', '.join(opts)}"]
|
|
359
|
+
# List suggestions
|
|
360
|
+
def _is_list_type(tt):
|
|
361
|
+
try:
|
|
362
|
+
return hasattr(tt, "__origin__") and tt.__origin__ in (list, List)
|
|
363
|
+
except Exception:
|
|
364
|
+
return False
|
|
365
|
+
list_types = [t for t in candidate_types if _is_list_type(t)]
|
|
366
|
+
if list_types:
|
|
367
|
+
inner_args = get_args(list_types[0])
|
|
368
|
+
inner = inner_args[0] if inner_args else str
|
|
369
|
+
inner_name = inner.__name__ if isinstance(inner, type) else str(inner)
|
|
370
|
+
hint = (
|
|
371
|
+
f"Please provide a list of {inner_name} values. Accepted formats: "
|
|
372
|
+
f"JSON list (e.g., [\"a\", \"b\"]), Python list (e.g., ['a', 'b']), "
|
|
373
|
+
f"or comma-separated (e.g., a,b)."
|
|
374
|
+
)
|
|
375
|
+
return [hint]
|
|
376
|
+
# Fallback: show expected type names (handles unions)
|
|
377
|
+
name_list: List[str] = []
|
|
378
|
+
for t in candidate_types:
|
|
379
|
+
if isinstance(t, type):
|
|
380
|
+
name_list.append(t.__name__)
|
|
381
|
+
else:
|
|
382
|
+
name_list.append(str(t))
|
|
383
|
+
base = f"Please provide a value matching the expected type/format: {' or '.join(name_list)}"
|
|
384
|
+
if example is not None:
|
|
385
|
+
base = f"{base} (e.g., {example})"
|
|
386
|
+
return [base]
|
|
387
|
+
|
|
388
|
+
valid_by_type = False
|
|
389
|
+
corrected_value: Optional[Any] = None
|
|
390
|
+
def _is_list_type(tt):
|
|
391
|
+
try:
|
|
392
|
+
return hasattr(tt, "__origin__") and tt.__origin__ in (list, List)
|
|
393
|
+
except Exception:
|
|
394
|
+
return False
|
|
395
|
+
|
|
396
|
+
def _parse_list_like_string(s: str) -> Optional[list]:
|
|
397
|
+
if not isinstance(s, str):
|
|
398
|
+
return None
|
|
399
|
+
text = s.strip()
|
|
400
|
+
if text.startswith("[") and text.endswith("]"):
|
|
401
|
+
with suppress(Exception):
|
|
402
|
+
parsed = json.loads(text)
|
|
403
|
+
if isinstance(parsed, list):
|
|
404
|
+
return parsed
|
|
405
|
+
# Try Python literal list
|
|
406
|
+
with suppress(Exception):
|
|
407
|
+
parsed = ast.literal_eval(text)
|
|
408
|
+
if isinstance(parsed, list):
|
|
409
|
+
return parsed
|
|
410
|
+
# Comma-separated values
|
|
411
|
+
if "," in text:
|
|
412
|
+
parts = [p.strip() for p in text.split(",")]
|
|
413
|
+
cleaned = [
|
|
414
|
+
(p[1:-1] if len(p) >= 2 and ((p[0] == p[-1] == '"') or (p[0] == p[-1] == "'")) else p)
|
|
415
|
+
for p in parts
|
|
416
|
+
]
|
|
417
|
+
return cleaned
|
|
418
|
+
# Single value - treat as a list with one element
|
|
419
|
+
if text:
|
|
420
|
+
# Remove quotes if present
|
|
421
|
+
if len(text) >= 2 and ((text[0] == text[-1] == '"') or (text[0] == text[-1] == "'")):
|
|
422
|
+
return [text[1:-1]]
|
|
423
|
+
return [text]
|
|
424
|
+
return None
|
|
425
|
+
|
|
426
|
+
def _coerce_scalar(expected_type: Type[Any], val: Any) -> Tuple[bool, Optional[Any]]:
|
|
427
|
+
# str
|
|
428
|
+
if expected_type is str:
|
|
429
|
+
return True, str(val)
|
|
430
|
+
# bool
|
|
431
|
+
if expected_type is bool:
|
|
432
|
+
if isinstance(val, bool):
|
|
433
|
+
return True, val
|
|
434
|
+
elif isinstance(val, str):
|
|
435
|
+
lower_val = val.lower().strip()
|
|
436
|
+
if lower_val in ('true', 'false'):
|
|
437
|
+
return True, lower_val == 'true'
|
|
438
|
+
# Also handle string representations of integers
|
|
439
|
+
elif lower_val in ('0', '1'):
|
|
440
|
+
return True, lower_val == '1'
|
|
441
|
+
elif isinstance(val, int):
|
|
442
|
+
return True, bool(val)
|
|
443
|
+
return False, None
|
|
444
|
+
# int
|
|
445
|
+
if expected_type is int:
|
|
446
|
+
if isinstance(val, bool):
|
|
447
|
+
return False, None
|
|
448
|
+
if isinstance(val, int):
|
|
449
|
+
return True, val
|
|
450
|
+
if isinstance(val, str):
|
|
451
|
+
with suppress(Exception):
|
|
452
|
+
return True, int(val.strip())
|
|
453
|
+
return False, None
|
|
454
|
+
# float
|
|
455
|
+
if expected_type is float:
|
|
456
|
+
if isinstance(val, (int, float)) and not isinstance(val, bool):
|
|
457
|
+
return True, float(val)
|
|
458
|
+
if isinstance(val, str):
|
|
459
|
+
with suppress(Exception):
|
|
460
|
+
return True, float(val.strip())
|
|
461
|
+
return False, None
|
|
462
|
+
# Enum
|
|
463
|
+
if isinstance(expected_type, type) and issubclass(expected_type, Enum):
|
|
464
|
+
ok, enum_val = _try_coerce_enum(expected_type, val)
|
|
465
|
+
return (ok, enum_val if ok else None)
|
|
466
|
+
# Unknown: accept if already instance
|
|
467
|
+
return (True, val) if isinstance(val, expected_type) else (False, None)
|
|
468
|
+
|
|
469
|
+
def _try_coerce_list(list_type: Any, value: Any) -> Tuple[bool, Optional[list]]:
|
|
470
|
+
inner_args = get_args(list_type)
|
|
471
|
+
inner_type = inner_args[0] if inner_args else str
|
|
472
|
+
raw_list: Optional[list] = None
|
|
473
|
+
if isinstance(value, list):
|
|
474
|
+
raw_list = value
|
|
475
|
+
elif isinstance(value, str):
|
|
476
|
+
raw_list = _parse_list_like_string(value)
|
|
477
|
+
if raw_list is None:
|
|
478
|
+
return False, None
|
|
479
|
+
coerced_list = []
|
|
480
|
+
for item in raw_list:
|
|
481
|
+
ok, coerced = _coerce_scalar(inner_type, item)
|
|
482
|
+
if not ok:
|
|
483
|
+
return False, None
|
|
484
|
+
coerced_list.append(coerced)
|
|
485
|
+
return True, coerced_list
|
|
486
|
+
|
|
487
|
+
def _try_coerce_enum(enum_cls: Type[Enum], val: Any) -> Tuple[bool, Optional[Enum]]:
|
|
488
|
+
if isinstance(val, enum_cls):
|
|
489
|
+
return True, val
|
|
490
|
+
if isinstance(val, str):
|
|
491
|
+
for member in enum_cls:
|
|
492
|
+
if val == member.value or val.lower() == str(member.name).lower():
|
|
493
|
+
return True, member
|
|
494
|
+
return False, None
|
|
495
|
+
|
|
496
|
+
for t in candidate_types:
|
|
497
|
+
# list[...] and typing.List[...] support
|
|
498
|
+
if _is_list_type(t):
|
|
499
|
+
ok, coerced_list = _try_coerce_list(t, field_value)
|
|
500
|
+
if ok:
|
|
501
|
+
corrected_value = coerced_list
|
|
502
|
+
valid_by_type = True
|
|
503
|
+
break
|
|
504
|
+
# str, bool, int, float - use _coerce_scalar for consistency
|
|
505
|
+
if t in (str, bool, int, float):
|
|
506
|
+
ok, coerced = _coerce_scalar(t, field_value)
|
|
507
|
+
if ok:
|
|
508
|
+
corrected_value = coerced
|
|
509
|
+
valid_by_type = True
|
|
510
|
+
break # Enum
|
|
511
|
+
if isinstance(t, type) and issubclass(t, Enum):
|
|
512
|
+
ok, enum_val = _try_coerce_enum(t, field_value)
|
|
513
|
+
if ok:
|
|
514
|
+
corrected_value = enum_val
|
|
515
|
+
valid_by_type = True
|
|
516
|
+
break
|
|
517
|
+
|
|
518
|
+
if valid_by_type:
|
|
519
|
+
if corrected_value is not None:
|
|
520
|
+
pattern = next(
|
|
521
|
+
(meta.pattern
|
|
522
|
+
for meta in getattr(field_info, "metadata", [])
|
|
523
|
+
if hasattr(meta, "pattern")),
|
|
524
|
+
None,
|
|
525
|
+
)
|
|
526
|
+
if pattern and field_value is not None and field_value != NOT_FOUND:
|
|
527
|
+
invalid_value = None
|
|
528
|
+
if hasattr(field_info, "json_schema_extra") and field_info.json_schema_extra:
|
|
529
|
+
invalid_value = field_info.json_schema_extra.get("invalid_value")
|
|
530
|
+
|
|
531
|
+
# if invalid_value and field_value == invalid_value:
|
|
532
|
+
# invalid_fields.append(f"{field_name} '{field_value}'")
|
|
533
|
+
# pattern_str = str(pattern)
|
|
534
|
+
# examples = getattr(field_info, "examples", [])
|
|
535
|
+
# example = examples[0] if examples else ""
|
|
536
|
+
# all_suggestions[field_name] = [f"Please use the format matching pattern {pattern_str} (e.g., {example})"]
|
|
537
|
+
# is_valid = False
|
|
538
|
+
|
|
539
|
+
# else:
|
|
540
|
+
pattern_regex = re.compile(pattern)
|
|
541
|
+
if not pattern_regex.fullmatch(str(field_value)):
|
|
542
|
+
invalid_fields.append(f"{field_name} '{field_value}'")
|
|
543
|
+
pattern_str = str(pattern)
|
|
544
|
+
examples = getattr(field_info, "examples", [])
|
|
545
|
+
example = examples[0] if examples else ""
|
|
546
|
+
|
|
547
|
+
invalid_fields.append(f"{field_name} '{field_value}'")
|
|
548
|
+
all_suggestions[field_name] = [f"Please use the format matching pattern {pattern_str} (e.g., {example})"]
|
|
549
|
+
is_valid = False
|
|
550
|
+
else:
|
|
551
|
+
try:
|
|
552
|
+
setattr(cmd_parameters, field_name, corrected_value)
|
|
553
|
+
except Exception as e:
|
|
554
|
+
logger.critical(f"Failed to set attribute {field_name} with value {corrected_value}")
|
|
555
|
+
raise e
|
|
556
|
+
else:
|
|
557
|
+
try:
|
|
558
|
+
setattr(cmd_parameters, field_name, corrected_value)
|
|
559
|
+
except Exception as e:
|
|
560
|
+
logger.critical(f"Failed to set attribute {field_name} with value {corrected_value}")
|
|
561
|
+
raise e
|
|
562
|
+
else:
|
|
563
|
+
invalid_fields.append(f"{field_name} '{field_value}'")
|
|
564
|
+
all_suggestions[field_name] = build_type_suggestion()
|
|
565
|
+
is_valid = False
|
|
325
566
|
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
is_required=True
|
|
334
|
-
if is_optional:
|
|
335
|
-
is_required=False
|
|
336
|
-
|
|
337
|
-
# Only add to missing fields if it's required AND has no value
|
|
338
|
-
if is_required and \
|
|
339
|
-
field_value in [
|
|
340
|
-
NOT_FOUND,
|
|
341
|
-
None,
|
|
342
|
-
INVALID_INT_VALUE,
|
|
343
|
-
INVALID_FLOAT_VALUE
|
|
344
|
-
]:
|
|
345
|
-
missing_fields.append(field_name)
|
|
346
|
-
is_valid = False
|
|
347
|
-
|
|
348
|
-
pattern = next(
|
|
349
|
-
(meta.pattern
|
|
350
|
-
for meta in getattr(field_info, "metadata", [])
|
|
351
|
-
if hasattr(meta, "pattern")),
|
|
352
|
-
None,
|
|
353
|
-
)
|
|
354
|
-
if pattern and field_value is not None and field_value != NOT_FOUND:
|
|
355
|
-
invalid_value = None
|
|
356
|
-
if hasattr(field_info, "json_schema_extra") and field_info.json_schema_extra:
|
|
357
|
-
invalid_value = field_info.json_schema_extra.get("invalid_value")
|
|
567
|
+
is_optional = False
|
|
568
|
+
attribute_type = field_info.annotation
|
|
569
|
+
if hasattr(attribute_type, "__origin__") and attribute_type.__origin__ is Union:
|
|
570
|
+
union_elements = get_args(attribute_type)
|
|
571
|
+
if type(None) in union_elements:
|
|
572
|
+
is_optional = True
|
|
358
573
|
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
574
|
+
is_required=True
|
|
575
|
+
if is_optional:
|
|
576
|
+
is_required=False
|
|
577
|
+
|
|
578
|
+
# Only add to missing fields if it's required AND has no value
|
|
579
|
+
if is_required and \
|
|
580
|
+
field_value in [
|
|
581
|
+
NOT_FOUND,
|
|
582
|
+
None,
|
|
583
|
+
INVALID_INT_VALUE,
|
|
584
|
+
INVALID_FLOAT_VALUE
|
|
585
|
+
]:
|
|
586
|
+
missing_fields.append(field_name)
|
|
365
587
|
is_valid = False
|
|
366
588
|
|
|
367
|
-
else:
|
|
368
|
-
pattern_regex = re.compile(pattern)
|
|
369
|
-
if not pattern_regex.fullmatch(str(field_value)):
|
|
370
|
-
invalid_fields.append(f"{field_name} '{field_value}'")
|
|
371
|
-
pattern_str = str(pattern)
|
|
372
|
-
examples = getattr(field_info, "examples", [])
|
|
373
|
-
example = examples[0] if examples else ""
|
|
374
|
-
all_suggestions[field_name] = [f"Please use the format matching pattern {pattern_str} (e.g., {example})"]
|
|
375
|
-
is_valid = False
|
|
376
|
-
|
|
377
|
-
|
|
378
589
|
for field_name, field_info in type(cmd_parameters).model_fields.items():
|
|
379
590
|
field_value = getattr(cmd_parameters, field_name, None)
|
|
380
591
|
|
|
@@ -404,20 +615,20 @@ Today's date is {today}.
|
|
|
404
615
|
if is_valid:
|
|
405
616
|
if not (
|
|
406
617
|
self.input_for_param_extraction_class and \
|
|
407
|
-
|
|
618
|
+
hasattr(self.input_for_param_extraction_class, 'validate_extracted_parameters')
|
|
408
619
|
):
|
|
409
|
-
return (True, "All required parameters are valid.", {})
|
|
410
|
-
|
|
620
|
+
return (True, "All required parameters are valid.", {}, [])
|
|
621
|
+
|
|
411
622
|
try:
|
|
412
623
|
is_valid, message = self.input_for_param_extraction_class.validate_extracted_parameters(app_workflow, subject_command_name, cmd_parameters)
|
|
413
624
|
except Exception as e:
|
|
414
625
|
message = f"Exception in {subject_command_name}'s validate_extracted_parameters function: {str(e)}"
|
|
415
626
|
logger.critical(message)
|
|
416
|
-
return (False, message, {})
|
|
417
|
-
|
|
627
|
+
return (False, message, {}, [])
|
|
628
|
+
|
|
418
629
|
if is_valid:
|
|
419
|
-
return (True, "All required parameters are valid.", {})
|
|
420
|
-
return (False, message, {})
|
|
630
|
+
return (True, "All required parameters are valid.", {}, [])
|
|
631
|
+
return (False, message, {}, [])
|
|
421
632
|
|
|
422
633
|
message = ''
|
|
423
634
|
if missing_fields:
|
|
@@ -428,27 +639,14 @@ Today's date is {today}.
|
|
|
428
639
|
if hasattr(type(cmd_parameters).model_fields.get(missing_field), "json_schema_extra") and type(cmd_parameters).model_fields.get(missing_field).json_schema_extra:
|
|
429
640
|
is_available_from = type(cmd_parameters).model_fields.get(missing_field).json_schema_extra.get("available_from")
|
|
430
641
|
if is_available_from:
|
|
431
|
-
|
|
642
|
+
msg_prefix = "abort and "
|
|
643
|
+
if "run_as_agent" in app_workflow.context:
|
|
644
|
+
msg_prefix = ""
|
|
645
|
+
message += f"{msg_prefix}use the {' or '.join(is_available_from)} command(s) to get {missing_field} information. OR...\n"
|
|
432
646
|
|
|
433
647
|
if invalid_fields:
|
|
434
648
|
message += f"{INVALID_INFORMATION_ERRMSG}" + ", ".join(invalid_fields) + "\n"
|
|
435
649
|
|
|
436
|
-
with suppress(Exception):
|
|
437
|
-
graph_path = os.path.join(app_workflow.folderpath, "command_dependency_graph.json")
|
|
438
|
-
suggestions_texts: list[str] = []
|
|
439
|
-
for field in missing_fields:
|
|
440
|
-
plans = get_dependency_suggestions(graph_path, subject_command_name, field, min_weight=0.7, max_depth=3)
|
|
441
|
-
if plans:
|
|
442
|
-
# Format a concise plan: main command and any immediate sub-steps
|
|
443
|
-
top = plans[0]
|
|
444
|
-
def format_plan(p):
|
|
445
|
-
if not p.get('sub_plans'):
|
|
446
|
-
return p['command']
|
|
447
|
-
return p['command'] + " -> " + " -> ".join(sp['command'] for sp in p['sub_plans'])
|
|
448
|
-
suggestions_texts.append(f"To get '{field}', try: {format_plan(top)}")
|
|
449
|
-
if suggestions_texts:
|
|
450
|
-
message += "\n" + "\n".join(suggestions_texts) + "\n"
|
|
451
|
-
|
|
452
650
|
for field, suggestions in all_suggestions.items():
|
|
453
651
|
if suggestions:
|
|
454
652
|
is_format_instruction = any(("format" in str(s).lower() or "pattern" in str(s).lower()) for s in suggestions)
|
|
@@ -468,6 +666,7 @@ Today's date is {today}.
|
|
|
468
666
|
if combined_fields:
|
|
469
667
|
combined_fields_str = ", ".join(combined_fields)
|
|
470
668
|
message += f"\nProvide corrected parameter values in the exact order specified below, separated by commas:\n{combined_fields_str}"
|
|
471
|
-
|
|
669
|
+
if "run_as_agent" not in app_workflow.context:
|
|
670
|
+
message += "\nFor parameter values that include a comma, provide separately from other values, and one at a time."
|
|
472
671
|
|
|
473
|
-
return (False, message, all_suggestions)
|
|
672
|
+
return (False, message, all_suggestions, combined_fields)
|
fastworkflow/workflow.py
CHANGED
|
@@ -288,21 +288,17 @@ class Workflow:
|
|
|
288
288
|
|
|
289
289
|
def end_command_processing(self) -> None:
|
|
290
290
|
"""Process the end of a command"""
|
|
291
|
-
mark_dirty = False
|
|
292
291
|
# important to clear the current command from the workflow context
|
|
293
292
|
if "command" in self._context:
|
|
294
293
|
del self._context["command"]
|
|
295
|
-
mark_dirty = True
|
|
296
294
|
|
|
297
295
|
# important to clear parameter extraction error state (if any)
|
|
298
296
|
if "stored_parameters" in self._context:
|
|
299
297
|
del self._context["stored_parameters"]
|
|
300
|
-
mark_dirty = True
|
|
301
298
|
|
|
302
299
|
self._context["NLU_Pipeline_Stage"] = fastworkflow.NLUPipelineStage.INTENT_DETECTION
|
|
303
300
|
|
|
304
|
-
|
|
305
|
-
self._mark_dirty()
|
|
301
|
+
self._mark_dirty()
|
|
306
302
|
|
|
307
303
|
def close(self) -> bool:
|
|
308
304
|
"""close the session"""
|