langwatch-scenario 0.7.11__tar.gz → 0.7.13__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/PKG-INFO +1 -1
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/langwatch_scenario.egg-info/PKG-INFO +1 -1
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/langwatch_scenario.egg-info/SOURCES.txt +3 -1
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/pyproject.toml +1 -1
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/config/model.py +25 -1
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/config/scenario.py +1 -1
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/judge_agent.py +23 -2
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/user_simulator_agent.py +23 -2
- langwatch_scenario-0.7.13/tests/test_judge_agent.py +151 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/tests/test_model_config.py +26 -0
- langwatch_scenario-0.7.13/tests/test_user_simulator_agent.py +118 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/README.md +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/langwatch_scenario.egg-info/dependency_links.txt +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/langwatch_scenario.egg-info/entry_points.txt +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/langwatch_scenario.egg-info/requires.txt +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/langwatch_scenario.egg-info/top_level.txt +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_error_messages.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_events/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_events/event_alert_message_logger.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_events/event_bus.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_events/event_reporter.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_events/events.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_events/messages.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_events/utils.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/README.md +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/delete_api_annotations_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/delete_api_prompts_by_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/delete_api_scenario_events.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_annotations.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_annotations_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_annotations_trace_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_dataset_by_slug_or_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_prompts.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_prompts_by_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_prompts_by_id_versions.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_trace_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/patch_api_annotations_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_annotations_trace_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_dataset_by_slug_entries.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_prompts.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_prompts_by_id_versions.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_scenario_events.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_trace_id_share.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_trace_id_unshare.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/put_api_prompts_by_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/traces/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/traces/post_api_trace_search.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/client.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/errors.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/annotation.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/dataset_post_entries.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/dataset_post_entries_entries_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_annotations_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_404.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/evaluation.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/evaluation_timestamps.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_200_data_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_200_data_item_entry.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_404.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_422.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_messages_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_messages_item_role.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0_json_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0_json_schema_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_404.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations_rows_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_inputs_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_inputs_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_messages_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_messages_item_role.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_outputs_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_outputs_item_json_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_outputs_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_prompting_technique.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_404.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_messages_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_messages_item_role.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0_json_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0_json_schema_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_error_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_evaluations_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_evaluations_item_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_evaluations_item_timestamps.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_input.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_metadata.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_metrics.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_output.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_error_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_input.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_input_value_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_metrics.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_output.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_output_value_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_params.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_timestamps.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_timestamps.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/input_.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/metadata.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/metrics.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/output.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/pagination.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/patch_api_annotations_id_body.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/patch_api_annotations_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_annotations_trace_id_body.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_body.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations_columns_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations_columns_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations_rows_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_inputs_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_inputs_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_messages_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_messages_item_role.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_outputs_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_outputs_item_json_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_outputs_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_prompting_technique.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations_rows_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_inputs_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_inputs_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_messages_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_messages_item_role.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_outputs_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_outputs_item_json_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_outputs_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_prompting_technique.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_404.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_messages_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_messages_item_role.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0_json_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0_json_schema_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_0_metadata.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1_results_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1_results_type_0_verdict.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1_status.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_1.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2_tool_calls_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2_tool_calls_item_function.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_3.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_4.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_201.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_trace_id_share_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_trace_id_unshare_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_body.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_404.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/search_request.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/search_request_filters.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/search_response.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/timestamps.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/trace.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/py.typed +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/lang_watch_api_client/types.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_generated/langwatch_api_client/pyproject.toml +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_utils/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_utils/ids.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_utils/message_conversion.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/_utils/utils.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/agent_adapter.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/cache.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/config/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/config/langwatch.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/py.typed +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/pytest_plugin.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/scenario_executor.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/scenario_state.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/script.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/scenario/types.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/setup.cfg +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/setup.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/tests/test_event_reporter.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/tests/test_scenario.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/tests/test_scenario_agent.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/tests/test_scenario_event_bus.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/tests/test_scenario_executor.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/tests/test_scenario_executor_events.py +0 -0
{langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/langwatch_scenario.egg-info/SOURCES.txt
RENAMED
|
@@ -245,9 +245,11 @@ scenario/config/langwatch.py
|
|
|
245
245
|
scenario/config/model.py
|
|
246
246
|
scenario/config/scenario.py
|
|
247
247
|
tests/test_event_reporter.py
|
|
248
|
+
tests/test_judge_agent.py
|
|
248
249
|
tests/test_model_config.py
|
|
249
250
|
tests/test_scenario.py
|
|
250
251
|
tests/test_scenario_agent.py
|
|
251
252
|
tests/test_scenario_event_bus.py
|
|
252
253
|
tests/test_scenario_executor.py
|
|
253
|
-
tests/test_scenario_executor_events.py
|
|
254
|
+
tests/test_scenario_executor_events.py
|
|
255
|
+
tests/test_user_simulator_agent.py
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "langwatch-scenario"
|
|
7
|
-
version = "0.7.
|
|
7
|
+
version = "0.7.13"
|
|
8
8
|
description = "The end-to-end agent testing library"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
authors = [{ name = "LangWatch Team", email = "support@langwatch.ai" }]
|
|
@@ -6,7 +6,7 @@ user simulator and judge agents in the Scenario framework.
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
from typing import Optional
|
|
9
|
-
from pydantic import BaseModel
|
|
9
|
+
from pydantic import BaseModel, ConfigDict
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
class ModelConfig(BaseModel):
|
|
@@ -16,6 +16,9 @@ class ModelConfig(BaseModel):
|
|
|
16
16
|
This class encapsulates all the parameters needed to configure an LLM model
|
|
17
17
|
for use with user simulator and judge agents in the Scenario framework.
|
|
18
18
|
|
|
19
|
+
The ModelConfig accepts any additional parameters that litellm supports,
|
|
20
|
+
including headers, timeout, client, and other provider-specific options.
|
|
21
|
+
|
|
19
22
|
Attributes:
|
|
20
23
|
model: The model identifier (e.g., "openai/gpt-4.1", "anthropic/claude-3-sonnet")
|
|
21
24
|
api_base: Optional base URL where the model is hosted
|
|
@@ -25,6 +28,7 @@ class ModelConfig(BaseModel):
|
|
|
25
28
|
|
|
26
29
|
Example:
|
|
27
30
|
```
|
|
31
|
+
# Basic configuration
|
|
28
32
|
model_config = ModelConfig(
|
|
29
33
|
model="openai/gpt-4.1",
|
|
30
34
|
api_base="https://api.openai.com/v1",
|
|
@@ -32,9 +36,29 @@ class ModelConfig(BaseModel):
|
|
|
32
36
|
temperature=0.1,
|
|
33
37
|
max_tokens=1000
|
|
34
38
|
)
|
|
39
|
+
|
|
40
|
+
# With custom headers and timeout
|
|
41
|
+
model_config = ModelConfig(
|
|
42
|
+
model="openai/gpt-4",
|
|
43
|
+
headers={"X-Custom-Header": "value"},
|
|
44
|
+
timeout=60,
|
|
45
|
+
num_retries=3
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
# With custom OpenAI client
|
|
49
|
+
from openai import OpenAI
|
|
50
|
+
model_config = ModelConfig(
|
|
51
|
+
model="openai/gpt-4",
|
|
52
|
+
client=OpenAI(
|
|
53
|
+
base_url="https://custom.com",
|
|
54
|
+
default_headers={"X-Auth": "token"}
|
|
55
|
+
)
|
|
56
|
+
)
|
|
35
57
|
```
|
|
36
58
|
"""
|
|
37
59
|
|
|
60
|
+
model_config = ConfigDict(extra="allow")
|
|
61
|
+
|
|
38
62
|
model: str
|
|
39
63
|
api_base: Optional[str] = None
|
|
40
64
|
api_key: Optional[str] = None
|
|
@@ -65,7 +65,7 @@ class ScenarioConfig(BaseModel):
|
|
|
65
65
|
@classmethod
|
|
66
66
|
def configure(
|
|
67
67
|
cls,
|
|
68
|
-
default_model: Optional[str] = None,
|
|
68
|
+
default_model: Optional[Union[str, ModelConfig]] = None,
|
|
69
69
|
max_turns: Optional[int] = None,
|
|
70
70
|
verbose: Optional[Union[bool, int]] = None,
|
|
71
71
|
cache_key: Optional[str] = None,
|
|
@@ -105,6 +105,7 @@ class JudgeAgent(AgentAdapter):
|
|
|
105
105
|
max_tokens: Optional[int]
|
|
106
106
|
criteria: List[str]
|
|
107
107
|
system_prompt: Optional[str]
|
|
108
|
+
_extra_params: dict
|
|
108
109
|
|
|
109
110
|
def __init__(
|
|
110
111
|
self,
|
|
@@ -116,6 +117,7 @@ class JudgeAgent(AgentAdapter):
|
|
|
116
117
|
temperature: float = 0.0,
|
|
117
118
|
max_tokens: Optional[int] = None,
|
|
118
119
|
system_prompt: Optional[str] = None,
|
|
120
|
+
**extra_params,
|
|
119
121
|
):
|
|
120
122
|
"""
|
|
121
123
|
Initialize a judge agent with evaluation criteria.
|
|
@@ -159,8 +161,12 @@ class JudgeAgent(AgentAdapter):
|
|
|
159
161
|
system_prompt="You are a senior software engineer reviewing code for production use."
|
|
160
162
|
)
|
|
161
163
|
```
|
|
164
|
+
|
|
165
|
+
Note:
|
|
166
|
+
Advanced usage: Additional parameters can be passed as keyword arguments
|
|
167
|
+
(e.g., headers, timeout, client) for specialized configurations. These are
|
|
168
|
+
experimental and may not be supported in future versions.
|
|
162
169
|
"""
|
|
163
|
-
# Override the default system prompt for the judge agent
|
|
164
170
|
self.criteria = criteria or []
|
|
165
171
|
self.api_base = api_base
|
|
166
172
|
self.api_key = api_key
|
|
@@ -175,6 +181,7 @@ class JudgeAgent(AgentAdapter):
|
|
|
175
181
|
ScenarioConfig.default_config.default_model, str
|
|
176
182
|
):
|
|
177
183
|
self.model = model or ScenarioConfig.default_config.default_model
|
|
184
|
+
self._extra_params = extra_params
|
|
178
185
|
elif ScenarioConfig.default_config is not None and isinstance(
|
|
179
186
|
ScenarioConfig.default_config.default_model, ModelConfig
|
|
180
187
|
):
|
|
@@ -191,9 +198,22 @@ class JudgeAgent(AgentAdapter):
|
|
|
191
198
|
self.max_tokens = (
|
|
192
199
|
max_tokens or ScenarioConfig.default_config.default_model.max_tokens
|
|
193
200
|
)
|
|
201
|
+
# Extract extra params from ModelConfig
|
|
202
|
+
config_dict = ScenarioConfig.default_config.default_model.model_dump(
|
|
203
|
+
exclude_none=True
|
|
204
|
+
)
|
|
205
|
+
config_dict.pop("model", None)
|
|
206
|
+
config_dict.pop("api_base", None)
|
|
207
|
+
config_dict.pop("api_key", None)
|
|
208
|
+
config_dict.pop("temperature", None)
|
|
209
|
+
config_dict.pop("max_tokens", None)
|
|
210
|
+
# Merge: config extras < agent extra_params
|
|
211
|
+
self._extra_params = {**config_dict, **extra_params}
|
|
212
|
+
else:
|
|
213
|
+
self._extra_params = extra_params
|
|
194
214
|
|
|
195
215
|
if not hasattr(self, "model"):
|
|
196
|
-
raise Exception(agent_not_configured_error_message("
|
|
216
|
+
raise Exception(agent_not_configured_error_message("JudgeAgent"))
|
|
197
217
|
|
|
198
218
|
@scenario_cache()
|
|
199
219
|
async def call(
|
|
@@ -370,6 +390,7 @@ if you don't have enough information to make a verdict, say inconclusive with ma
|
|
|
370
390
|
if (is_last_message or enforce_judgment) and has_criteria
|
|
371
391
|
else "required"
|
|
372
392
|
),
|
|
393
|
+
**self._extra_params,
|
|
373
394
|
),
|
|
374
395
|
)
|
|
375
396
|
|
|
@@ -87,6 +87,7 @@ class UserSimulatorAgent(AgentAdapter):
|
|
|
87
87
|
temperature: float
|
|
88
88
|
max_tokens: Optional[int]
|
|
89
89
|
system_prompt: Optional[str]
|
|
90
|
+
_extra_params: dict
|
|
90
91
|
|
|
91
92
|
def __init__(
|
|
92
93
|
self,
|
|
@@ -97,6 +98,7 @@ class UserSimulatorAgent(AgentAdapter):
|
|
|
97
98
|
temperature: float = 0.0,
|
|
98
99
|
max_tokens: Optional[int] = None,
|
|
99
100
|
system_prompt: Optional[str] = None,
|
|
101
|
+
**extra_params,
|
|
100
102
|
):
|
|
101
103
|
"""
|
|
102
104
|
Initialize a user simulator agent.
|
|
@@ -133,8 +135,12 @@ class UserSimulatorAgent(AgentAdapter):
|
|
|
133
135
|
'''
|
|
134
136
|
)
|
|
135
137
|
```
|
|
138
|
+
|
|
139
|
+
Note:
|
|
140
|
+
Advanced usage: Additional parameters can be passed as keyword arguments
|
|
141
|
+
(e.g., headers, timeout, client) for specialized configurations. These are
|
|
142
|
+
experimental and may not be supported in future versions.
|
|
136
143
|
"""
|
|
137
|
-
# Override the default system prompt for the user simulator agent
|
|
138
144
|
self.api_base = api_base
|
|
139
145
|
self.api_key = api_key
|
|
140
146
|
self.temperature = temperature
|
|
@@ -148,6 +154,7 @@ class UserSimulatorAgent(AgentAdapter):
|
|
|
148
154
|
ScenarioConfig.default_config.default_model, str
|
|
149
155
|
):
|
|
150
156
|
self.model = model or ScenarioConfig.default_config.default_model
|
|
157
|
+
self._extra_params = extra_params
|
|
151
158
|
elif ScenarioConfig.default_config is not None and isinstance(
|
|
152
159
|
ScenarioConfig.default_config.default_model, ModelConfig
|
|
153
160
|
):
|
|
@@ -164,9 +171,22 @@ class UserSimulatorAgent(AgentAdapter):
|
|
|
164
171
|
self.max_tokens = (
|
|
165
172
|
max_tokens or ScenarioConfig.default_config.default_model.max_tokens
|
|
166
173
|
)
|
|
174
|
+
# Extract extra params from ModelConfig
|
|
175
|
+
config_dict = ScenarioConfig.default_config.default_model.model_dump(
|
|
176
|
+
exclude_none=True
|
|
177
|
+
)
|
|
178
|
+
config_dict.pop("model", None)
|
|
179
|
+
config_dict.pop("api_base", None)
|
|
180
|
+
config_dict.pop("api_key", None)
|
|
181
|
+
config_dict.pop("temperature", None)
|
|
182
|
+
config_dict.pop("max_tokens", None)
|
|
183
|
+
# Merge: config extras < agent extra_params
|
|
184
|
+
self._extra_params = {**config_dict, **extra_params}
|
|
185
|
+
else:
|
|
186
|
+
self._extra_params = extra_params
|
|
167
187
|
|
|
168
188
|
if not hasattr(self, "model"):
|
|
169
|
-
raise Exception(agent_not_configured_error_message("
|
|
189
|
+
raise Exception(agent_not_configured_error_message("UserSimulatorAgent"))
|
|
170
190
|
|
|
171
191
|
@scenario_cache()
|
|
172
192
|
async def call(
|
|
@@ -237,6 +257,7 @@ Your goal (assistant) is to interact with the Agent Under Test (user) as if you
|
|
|
237
257
|
api_base=self.api_base,
|
|
238
258
|
max_tokens=self.max_tokens,
|
|
239
259
|
tools=[],
|
|
260
|
+
**self._extra_params,
|
|
240
261
|
),
|
|
241
262
|
)
|
|
242
263
|
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from unittest.mock import patch, MagicMock
|
|
3
|
+
from openai import OpenAI
|
|
4
|
+
from scenario import JudgeAgent
|
|
5
|
+
from scenario.config import ModelConfig, ScenarioConfig
|
|
6
|
+
from scenario.types import AgentInput
|
|
7
|
+
from scenario.cache import context_scenario
|
|
8
|
+
from scenario.scenario_executor import ScenarioExecutor
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class FakeOpenAIClient:
|
|
12
|
+
"""Fake client for testing without requiring API keys."""
|
|
13
|
+
|
|
14
|
+
def __init__(self, base_url=None, default_headers=None):
|
|
15
|
+
self.base_url = base_url
|
|
16
|
+
self.default_headers = default_headers
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@pytest.mark.asyncio
|
|
20
|
+
async def test_judge_agent_merges_global_config_and_agent_params():
|
|
21
|
+
"""JudgeAgent merges ModelConfig defaults with agent-specific overrides, including custom client."""
|
|
22
|
+
# Setup custom client
|
|
23
|
+
custom_client = FakeOpenAIClient(
|
|
24
|
+
base_url="https://custom.com", default_headers={"X-Global": "global-value"}
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# Setup global config with extra params
|
|
28
|
+
ScenarioConfig.default_config = ScenarioConfig(
|
|
29
|
+
default_model=ModelConfig(
|
|
30
|
+
model="openai/gpt-4",
|
|
31
|
+
api_base="https://custom.com",
|
|
32
|
+
headers={"X-Global": "global-value"}, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
33
|
+
timeout=30, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
34
|
+
client=custom_client, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
35
|
+
)
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
judge = JudgeAgent(
|
|
39
|
+
criteria=["Test criterion"],
|
|
40
|
+
temperature=0.5,
|
|
41
|
+
timeout=60,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# Create mock input
|
|
45
|
+
mock_scenario_state = MagicMock()
|
|
46
|
+
mock_scenario_state.description = "Test scenario"
|
|
47
|
+
mock_scenario_state.current_turn = 1
|
|
48
|
+
mock_scenario_state.config.max_turns = 10
|
|
49
|
+
|
|
50
|
+
agent_input = AgentInput(
|
|
51
|
+
thread_id="test",
|
|
52
|
+
messages=[{"role": "user", "content": "Hello"}],
|
|
53
|
+
new_messages=[],
|
|
54
|
+
judgment_request=True,
|
|
55
|
+
scenario_state=mock_scenario_state,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Mock litellm.completion response
|
|
59
|
+
mock_response = MagicMock()
|
|
60
|
+
mock_response.choices = [MagicMock()]
|
|
61
|
+
mock_response.choices[0].message.tool_calls = [MagicMock()]
|
|
62
|
+
mock_response.choices[0].message.tool_calls[0].function.name = "finish_test"
|
|
63
|
+
mock_response.choices[0].message.tool_calls[
|
|
64
|
+
0
|
|
65
|
+
].function.arguments = '{"verdict": "success", "reasoning": "Test passed", "criteria": {"test_criterion": true}}'
|
|
66
|
+
|
|
67
|
+
# Mock scenario context for cache decorator
|
|
68
|
+
mock_executor = MagicMock()
|
|
69
|
+
mock_executor.config = MagicMock()
|
|
70
|
+
mock_executor.config.cache_key = None
|
|
71
|
+
token = context_scenario.set(mock_executor)
|
|
72
|
+
|
|
73
|
+
try:
|
|
74
|
+
with patch(
|
|
75
|
+
"scenario.judge_agent.litellm.completion", return_value=mock_response
|
|
76
|
+
) as mock_completion:
|
|
77
|
+
await judge.call(agent_input)
|
|
78
|
+
|
|
79
|
+
assert mock_completion.called
|
|
80
|
+
call_kwargs = mock_completion.call_args.kwargs
|
|
81
|
+
|
|
82
|
+
# Verify merged params: config defaults + agent overrides
|
|
83
|
+
assert call_kwargs["model"] == "openai/gpt-4"
|
|
84
|
+
assert call_kwargs["api_base"] == "https://custom.com"
|
|
85
|
+
assert call_kwargs["temperature"] == 0.5 # Agent override
|
|
86
|
+
assert call_kwargs["timeout"] == 60 # Agent override
|
|
87
|
+
assert call_kwargs["headers"] == {"X-Global": "global-value"} # From config
|
|
88
|
+
assert (
|
|
89
|
+
call_kwargs["client"] == custom_client
|
|
90
|
+
) # Custom client passed through
|
|
91
|
+
finally:
|
|
92
|
+
context_scenario.reset(token)
|
|
93
|
+
# Cleanup
|
|
94
|
+
ScenarioConfig.default_config = None
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
@pytest.mark.asyncio
|
|
98
|
+
async def test_judge_agent_with_string_default_model_config():
|
|
99
|
+
"""JudgeAgent should initialize _extra_params when default_model is a string."""
|
|
100
|
+
# Setup global config with string default_model
|
|
101
|
+
ScenarioConfig.default_config = ScenarioConfig(default_model="openai/gpt-4")
|
|
102
|
+
|
|
103
|
+
judge = JudgeAgent(
|
|
104
|
+
criteria=["Test criterion"],
|
|
105
|
+
temperature=0.5,
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# Create mock input
|
|
109
|
+
mock_scenario_state = MagicMock()
|
|
110
|
+
mock_scenario_state.description = "Test scenario"
|
|
111
|
+
mock_scenario_state.current_turn = 1
|
|
112
|
+
mock_scenario_state.config.max_turns = 10
|
|
113
|
+
|
|
114
|
+
agent_input = AgentInput(
|
|
115
|
+
thread_id="test",
|
|
116
|
+
messages=[{"role": "user", "content": "Hello"}],
|
|
117
|
+
new_messages=[],
|
|
118
|
+
judgment_request=True,
|
|
119
|
+
scenario_state=mock_scenario_state,
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# Mock litellm.completion response
|
|
123
|
+
mock_response = MagicMock()
|
|
124
|
+
mock_response.choices = [MagicMock()]
|
|
125
|
+
mock_response.choices[0].message.tool_calls = [MagicMock()]
|
|
126
|
+
mock_response.choices[0].message.tool_calls[0].function.name = "finish_test"
|
|
127
|
+
mock_response.choices[0].message.tool_calls[
|
|
128
|
+
0
|
|
129
|
+
].function.arguments = '{"verdict": "success", "reasoning": "Test passed", "criteria": {"test_criterion": true}}'
|
|
130
|
+
|
|
131
|
+
# Mock scenario context for cache decorator
|
|
132
|
+
mock_executor = MagicMock()
|
|
133
|
+
mock_executor.config = MagicMock()
|
|
134
|
+
mock_executor.config.cache_key = None
|
|
135
|
+
token = context_scenario.set(mock_executor)
|
|
136
|
+
|
|
137
|
+
try:
|
|
138
|
+
with patch(
|
|
139
|
+
"scenario.judge_agent.litellm.completion", return_value=mock_response
|
|
140
|
+
) as mock_completion:
|
|
141
|
+
# This should not raise AttributeError: 'JudgeAgent' object has no attribute '_extra_params'
|
|
142
|
+
await judge.call(agent_input)
|
|
143
|
+
|
|
144
|
+
assert mock_completion.called
|
|
145
|
+
call_kwargs = mock_completion.call_args.kwargs
|
|
146
|
+
assert call_kwargs["model"] == "openai/gpt-4"
|
|
147
|
+
assert call_kwargs["temperature"] == 0.5
|
|
148
|
+
finally:
|
|
149
|
+
context_scenario.reset(token)
|
|
150
|
+
# Cleanup
|
|
151
|
+
ScenarioConfig.default_config = None
|
|
@@ -35,3 +35,29 @@ async def test_user_simulator_agent_uses_modelconfig_api_base():
|
|
|
35
35
|
mock_completion.call_args.kwargs["api_base"]
|
|
36
36
|
== "https://custom-api-base.example.com"
|
|
37
37
|
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def test_modelconfig_accepts_extra_litellm_params():
|
|
41
|
+
"""ModelConfig accepts arbitrary litellm parameters via extra='allow'."""
|
|
42
|
+
from openai import OpenAI
|
|
43
|
+
|
|
44
|
+
custom_client = MagicMock(spec=OpenAI)
|
|
45
|
+
|
|
46
|
+
config = ModelConfig(
|
|
47
|
+
model="openai/gpt-4",
|
|
48
|
+
api_base="https://custom.com",
|
|
49
|
+
headers={"X-Custom-Header": "test-value"}, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
50
|
+
timeout=60, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
51
|
+
num_retries=3, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
52
|
+
client=custom_client, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
assert config.model == "openai/gpt-4"
|
|
56
|
+
assert config.api_base == "https://custom.com"
|
|
57
|
+
|
|
58
|
+
# Verify extra params are stored
|
|
59
|
+
dump = config.model_dump()
|
|
60
|
+
assert dump["headers"] == {"X-Custom-Header": "test-value"}
|
|
61
|
+
assert dump["timeout"] == 60
|
|
62
|
+
assert dump["num_retries"] == 3
|
|
63
|
+
assert dump["client"] == custom_client
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from unittest.mock import patch, MagicMock
|
|
3
|
+
from scenario import UserSimulatorAgent
|
|
4
|
+
from scenario.config import ModelConfig, ScenarioConfig
|
|
5
|
+
from scenario.types import AgentInput
|
|
6
|
+
from scenario.cache import context_scenario
|
|
7
|
+
from scenario.scenario_executor import ScenarioExecutor
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@pytest.mark.asyncio
|
|
11
|
+
async def test_user_simulator_agent_merges_global_config_and_agent_params():
|
|
12
|
+
"""UserSimulatorAgent merges ModelConfig defaults with agent-specific overrides."""
|
|
13
|
+
# Setup global config with extra params
|
|
14
|
+
ScenarioConfig.default_config = ScenarioConfig(
|
|
15
|
+
default_model=ModelConfig(
|
|
16
|
+
model="openai/gpt-4",
|
|
17
|
+
headers={"X-Auth": "token-123"}, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
18
|
+
max_retries=5, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
19
|
+
)
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
user_sim = UserSimulatorAgent(
|
|
23
|
+
temperature=0.7,
|
|
24
|
+
num_retries=2,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# Create mock input
|
|
28
|
+
mock_scenario_state = MagicMock()
|
|
29
|
+
mock_scenario_state.description = "Test scenario"
|
|
30
|
+
|
|
31
|
+
agent_input = AgentInput(
|
|
32
|
+
thread_id="test",
|
|
33
|
+
messages=[],
|
|
34
|
+
new_messages=[],
|
|
35
|
+
scenario_state=mock_scenario_state,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# Mock litellm.completion response
|
|
39
|
+
mock_response = MagicMock()
|
|
40
|
+
mock_response.choices = [MagicMock()]
|
|
41
|
+
mock_response.choices[0].message.content = "test user message"
|
|
42
|
+
|
|
43
|
+
# Mock scenario context for cache decorator
|
|
44
|
+
mock_executor = MagicMock()
|
|
45
|
+
mock_executor.config = MagicMock()
|
|
46
|
+
mock_executor.config.cache_key = None
|
|
47
|
+
token = context_scenario.set(mock_executor)
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
with patch(
|
|
51
|
+
"scenario.user_simulator_agent.litellm.completion",
|
|
52
|
+
return_value=mock_response,
|
|
53
|
+
) as mock_completion:
|
|
54
|
+
await user_sim.call(agent_input)
|
|
55
|
+
|
|
56
|
+
assert mock_completion.called
|
|
57
|
+
call_kwargs = mock_completion.call_args.kwargs
|
|
58
|
+
|
|
59
|
+
# Verify merged params
|
|
60
|
+
assert call_kwargs["model"] == "openai/gpt-4"
|
|
61
|
+
assert call_kwargs["temperature"] == 0.7 # Agent override
|
|
62
|
+
assert call_kwargs["headers"] == {"X-Auth": "token-123"} # From config
|
|
63
|
+
assert call_kwargs["max_retries"] == 5 # From config
|
|
64
|
+
assert call_kwargs["num_retries"] == 2 # Agent-specific
|
|
65
|
+
finally:
|
|
66
|
+
context_scenario.reset(token)
|
|
67
|
+
# Cleanup
|
|
68
|
+
ScenarioConfig.default_config = None
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
@pytest.mark.asyncio
|
|
72
|
+
async def test_user_simulator_agent_with_string_default_model_config():
|
|
73
|
+
"""UserSimulatorAgent should initialize _extra_params when default_model is a string."""
|
|
74
|
+
# Setup global config with string default_model
|
|
75
|
+
ScenarioConfig.default_config = ScenarioConfig(default_model="openai/gpt-4")
|
|
76
|
+
|
|
77
|
+
user_sim = UserSimulatorAgent(
|
|
78
|
+
temperature=0.7,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# Create mock input
|
|
82
|
+
mock_scenario_state = MagicMock()
|
|
83
|
+
mock_scenario_state.description = "Test scenario"
|
|
84
|
+
|
|
85
|
+
agent_input = AgentInput(
|
|
86
|
+
thread_id="test",
|
|
87
|
+
messages=[],
|
|
88
|
+
new_messages=[],
|
|
89
|
+
scenario_state=mock_scenario_state,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Mock litellm.completion response
|
|
93
|
+
mock_response = MagicMock()
|
|
94
|
+
mock_response.choices = [MagicMock()]
|
|
95
|
+
mock_response.choices[0].message.content = "test user message"
|
|
96
|
+
|
|
97
|
+
# Mock scenario context for cache decorator
|
|
98
|
+
mock_executor = MagicMock()
|
|
99
|
+
mock_executor.config = MagicMock()
|
|
100
|
+
mock_executor.config.cache_key = None
|
|
101
|
+
token = context_scenario.set(mock_executor)
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
with patch(
|
|
105
|
+
"scenario.user_simulator_agent.litellm.completion",
|
|
106
|
+
return_value=mock_response,
|
|
107
|
+
) as mock_completion:
|
|
108
|
+
# This should not raise AttributeError: 'UserSimulatorAgent' object has no attribute '_extra_params'
|
|
109
|
+
await user_sim.call(agent_input)
|
|
110
|
+
|
|
111
|
+
assert mock_completion.called
|
|
112
|
+
call_kwargs = mock_completion.call_args.kwargs
|
|
113
|
+
assert call_kwargs["model"] == "openai/gpt-4"
|
|
114
|
+
assert call_kwargs["temperature"] == 0.7
|
|
115
|
+
finally:
|
|
116
|
+
context_scenario.reset(token)
|
|
117
|
+
# Cleanup
|
|
118
|
+
ScenarioConfig.default_config = None
|
|
File without changes
|
|
File without changes
|
{langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/langwatch_scenario.egg-info/entry_points.txt
RENAMED
|
File without changes
|
{langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/langwatch_scenario.egg-info/requires.txt
RENAMED
|
File without changes
|
{langwatch_scenario-0.7.11 → langwatch_scenario-0.7.13}/langwatch_scenario.egg-info/top_level.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|