langwatch-scenario 0.7.11__tar.gz → 0.7.12__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/PKG-INFO +1 -1
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/langwatch_scenario.egg-info/PKG-INFO +1 -1
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/langwatch_scenario.egg-info/SOURCES.txt +3 -1
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/pyproject.toml +1 -1
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/config/model.py +25 -1
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/config/scenario.py +1 -1
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/judge_agent.py +22 -2
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/user_simulator_agent.py +22 -2
- langwatch_scenario-0.7.12/tests/test_judge_agent.py +94 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/tests/test_model_config.py +26 -0
- langwatch_scenario-0.7.12/tests/test_user_simulator_agent.py +68 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/README.md +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/langwatch_scenario.egg-info/dependency_links.txt +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/langwatch_scenario.egg-info/entry_points.txt +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/langwatch_scenario.egg-info/requires.txt +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/langwatch_scenario.egg-info/top_level.txt +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_error_messages.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_events/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_events/event_alert_message_logger.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_events/event_bus.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_events/event_reporter.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_events/events.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_events/messages.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_events/utils.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/README.md +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/delete_api_annotations_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/delete_api_prompts_by_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/delete_api_scenario_events.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_annotations.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_annotations_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_annotations_trace_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_dataset_by_slug_or_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_prompts.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_prompts_by_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_prompts_by_id_versions.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/get_api_trace_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/patch_api_annotations_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_annotations_trace_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_dataset_by_slug_entries.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_prompts.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_prompts_by_id_versions.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_scenario_events.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_trace_id_share.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/post_api_trace_id_unshare.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/default/put_api_prompts_by_id.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/traces/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/api/traces/post_api_trace_search.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/client.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/errors.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/annotation.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/dataset_post_entries.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/dataset_post_entries_entries_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_annotations_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_404.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_prompts_by_id_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/delete_api_scenario_events_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/evaluation.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/evaluation_timestamps.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_200_data_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_200_data_item_entry.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_404.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_422.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_dataset_by_slug_or_id_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_messages_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_messages_item_role.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0_json_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0_json_schema_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_200_response_format_type_0_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_404.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_demonstrations_rows_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_inputs_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_inputs_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_messages_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_messages_item_role.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_outputs_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_outputs_item_json_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_outputs_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_200_config_data_prompting_technique.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_404.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_by_id_versions_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_messages_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_messages_item_role.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0_json_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0_json_schema_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_200_item_response_format_type_0_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_prompts_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_error_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_evaluations_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_evaluations_item_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_evaluations_item_timestamps.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_input.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_metadata.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_metrics.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_output.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_error_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_input.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_input_value_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_metrics.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_output.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_output_value_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_params.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_spans_item_timestamps.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/get_api_trace_id_response_200_timestamps.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/input_.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/metadata.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/metrics.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/output.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/pagination.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/patch_api_annotations_id_body.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/patch_api_annotations_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_annotations_trace_id_body.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_body.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations_columns_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations_columns_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_demonstrations_rows_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_inputs_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_inputs_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_messages_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_messages_item_role.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_outputs_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_outputs_item_json_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_outputs_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_body_config_data_prompting_technique.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations_columns_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_demonstrations_rows_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_inputs_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_inputs_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_messages_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_messages_item_role.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_outputs_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_outputs_item_json_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_outputs_item_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_200_config_data_prompting_technique.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_404.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_by_id_versions_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_messages_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_messages_item_role.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0_json_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0_json_schema_schema.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_200_response_format_type_0_type.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_prompts_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_0_metadata.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1_results_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1_results_type_0_verdict.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_1_status.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_0.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_1.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2_tool_calls_item.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_2_tool_calls_item_function.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_3.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_body_type_2_messages_item_type_4.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_201.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_scenario_events_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_trace_id_share_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/post_api_trace_id_unshare_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_body.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_200.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_400.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_400_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_401.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_401_error.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_404.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/put_api_prompts_by_id_response_500.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/search_request.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/search_request_filters.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/search_response.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/timestamps.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/models/trace.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/py.typed +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/lang_watch_api_client/types.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_generated/langwatch_api_client/pyproject.toml +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_utils/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_utils/ids.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_utils/message_conversion.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/_utils/utils.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/agent_adapter.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/cache.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/config/__init__.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/config/langwatch.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/py.typed +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/pytest_plugin.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/scenario_executor.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/scenario_state.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/script.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/scenario/types.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/setup.cfg +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/setup.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/tests/test_event_reporter.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/tests/test_scenario.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/tests/test_scenario_agent.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/tests/test_scenario_event_bus.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/tests/test_scenario_executor.py +0 -0
- {langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/tests/test_scenario_executor_events.py +0 -0
{langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/langwatch_scenario.egg-info/SOURCES.txt
RENAMED
|
@@ -245,9 +245,11 @@ scenario/config/langwatch.py
|
|
|
245
245
|
scenario/config/model.py
|
|
246
246
|
scenario/config/scenario.py
|
|
247
247
|
tests/test_event_reporter.py
|
|
248
|
+
tests/test_judge_agent.py
|
|
248
249
|
tests/test_model_config.py
|
|
249
250
|
tests/test_scenario.py
|
|
250
251
|
tests/test_scenario_agent.py
|
|
251
252
|
tests/test_scenario_event_bus.py
|
|
252
253
|
tests/test_scenario_executor.py
|
|
253
|
-
tests/test_scenario_executor_events.py
|
|
254
|
+
tests/test_scenario_executor_events.py
|
|
255
|
+
tests/test_user_simulator_agent.py
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "langwatch-scenario"
|
|
7
|
-
version = "0.7.
|
|
7
|
+
version = "0.7.12"
|
|
8
8
|
description = "The end-to-end agent testing library"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
authors = [{ name = "LangWatch Team", email = "support@langwatch.ai" }]
|
|
@@ -6,7 +6,7 @@ user simulator and judge agents in the Scenario framework.
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
from typing import Optional
|
|
9
|
-
from pydantic import BaseModel
|
|
9
|
+
from pydantic import BaseModel, ConfigDict
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
class ModelConfig(BaseModel):
|
|
@@ -16,6 +16,9 @@ class ModelConfig(BaseModel):
|
|
|
16
16
|
This class encapsulates all the parameters needed to configure an LLM model
|
|
17
17
|
for use with user simulator and judge agents in the Scenario framework.
|
|
18
18
|
|
|
19
|
+
The ModelConfig accepts any additional parameters that litellm supports,
|
|
20
|
+
including headers, timeout, client, and other provider-specific options.
|
|
21
|
+
|
|
19
22
|
Attributes:
|
|
20
23
|
model: The model identifier (e.g., "openai/gpt-4.1", "anthropic/claude-3-sonnet")
|
|
21
24
|
api_base: Optional base URL where the model is hosted
|
|
@@ -25,6 +28,7 @@ class ModelConfig(BaseModel):
|
|
|
25
28
|
|
|
26
29
|
Example:
|
|
27
30
|
```
|
|
31
|
+
# Basic configuration
|
|
28
32
|
model_config = ModelConfig(
|
|
29
33
|
model="openai/gpt-4.1",
|
|
30
34
|
api_base="https://api.openai.com/v1",
|
|
@@ -32,9 +36,29 @@ class ModelConfig(BaseModel):
|
|
|
32
36
|
temperature=0.1,
|
|
33
37
|
max_tokens=1000
|
|
34
38
|
)
|
|
39
|
+
|
|
40
|
+
# With custom headers and timeout
|
|
41
|
+
model_config = ModelConfig(
|
|
42
|
+
model="openai/gpt-4",
|
|
43
|
+
headers={"X-Custom-Header": "value"},
|
|
44
|
+
timeout=60,
|
|
45
|
+
num_retries=3
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
# With custom OpenAI client
|
|
49
|
+
from openai import OpenAI
|
|
50
|
+
model_config = ModelConfig(
|
|
51
|
+
model="openai/gpt-4",
|
|
52
|
+
client=OpenAI(
|
|
53
|
+
base_url="https://custom.com",
|
|
54
|
+
default_headers={"X-Auth": "token"}
|
|
55
|
+
)
|
|
56
|
+
)
|
|
35
57
|
```
|
|
36
58
|
"""
|
|
37
59
|
|
|
60
|
+
model_config = ConfigDict(extra="allow")
|
|
61
|
+
|
|
38
62
|
model: str
|
|
39
63
|
api_base: Optional[str] = None
|
|
40
64
|
api_key: Optional[str] = None
|
|
@@ -65,7 +65,7 @@ class ScenarioConfig(BaseModel):
|
|
|
65
65
|
@classmethod
|
|
66
66
|
def configure(
|
|
67
67
|
cls,
|
|
68
|
-
default_model: Optional[str] = None,
|
|
68
|
+
default_model: Optional[Union[str, ModelConfig]] = None,
|
|
69
69
|
max_turns: Optional[int] = None,
|
|
70
70
|
verbose: Optional[Union[bool, int]] = None,
|
|
71
71
|
cache_key: Optional[str] = None,
|
|
@@ -105,6 +105,7 @@ class JudgeAgent(AgentAdapter):
|
|
|
105
105
|
max_tokens: Optional[int]
|
|
106
106
|
criteria: List[str]
|
|
107
107
|
system_prompt: Optional[str]
|
|
108
|
+
_extra_params: dict
|
|
108
109
|
|
|
109
110
|
def __init__(
|
|
110
111
|
self,
|
|
@@ -116,6 +117,7 @@ class JudgeAgent(AgentAdapter):
|
|
|
116
117
|
temperature: float = 0.0,
|
|
117
118
|
max_tokens: Optional[int] = None,
|
|
118
119
|
system_prompt: Optional[str] = None,
|
|
120
|
+
**extra_params,
|
|
119
121
|
):
|
|
120
122
|
"""
|
|
121
123
|
Initialize a judge agent with evaluation criteria.
|
|
@@ -159,8 +161,12 @@ class JudgeAgent(AgentAdapter):
|
|
|
159
161
|
system_prompt="You are a senior software engineer reviewing code for production use."
|
|
160
162
|
)
|
|
161
163
|
```
|
|
164
|
+
|
|
165
|
+
Note:
|
|
166
|
+
Advanced usage: Additional parameters can be passed as keyword arguments
|
|
167
|
+
(e.g., headers, timeout, client) for specialized configurations. These are
|
|
168
|
+
experimental and may not be supported in future versions.
|
|
162
169
|
"""
|
|
163
|
-
# Override the default system prompt for the judge agent
|
|
164
170
|
self.criteria = criteria or []
|
|
165
171
|
self.api_base = api_base
|
|
166
172
|
self.api_key = api_key
|
|
@@ -191,9 +197,22 @@ class JudgeAgent(AgentAdapter):
|
|
|
191
197
|
self.max_tokens = (
|
|
192
198
|
max_tokens or ScenarioConfig.default_config.default_model.max_tokens
|
|
193
199
|
)
|
|
200
|
+
# Extract extra params from ModelConfig
|
|
201
|
+
config_dict = ScenarioConfig.default_config.default_model.model_dump(
|
|
202
|
+
exclude_none=True
|
|
203
|
+
)
|
|
204
|
+
config_dict.pop("model", None)
|
|
205
|
+
config_dict.pop("api_base", None)
|
|
206
|
+
config_dict.pop("api_key", None)
|
|
207
|
+
config_dict.pop("temperature", None)
|
|
208
|
+
config_dict.pop("max_tokens", None)
|
|
209
|
+
# Merge: config extras < agent extra_params
|
|
210
|
+
self._extra_params = {**config_dict, **extra_params}
|
|
211
|
+
else:
|
|
212
|
+
self._extra_params = extra_params
|
|
194
213
|
|
|
195
214
|
if not hasattr(self, "model"):
|
|
196
|
-
raise Exception(agent_not_configured_error_message("
|
|
215
|
+
raise Exception(agent_not_configured_error_message("JudgeAgent"))
|
|
197
216
|
|
|
198
217
|
@scenario_cache()
|
|
199
218
|
async def call(
|
|
@@ -370,6 +389,7 @@ if you don't have enough information to make a verdict, say inconclusive with ma
|
|
|
370
389
|
if (is_last_message or enforce_judgment) and has_criteria
|
|
371
390
|
else "required"
|
|
372
391
|
),
|
|
392
|
+
**self._extra_params,
|
|
373
393
|
),
|
|
374
394
|
)
|
|
375
395
|
|
|
@@ -87,6 +87,7 @@ class UserSimulatorAgent(AgentAdapter):
|
|
|
87
87
|
temperature: float
|
|
88
88
|
max_tokens: Optional[int]
|
|
89
89
|
system_prompt: Optional[str]
|
|
90
|
+
_extra_params: dict
|
|
90
91
|
|
|
91
92
|
def __init__(
|
|
92
93
|
self,
|
|
@@ -97,6 +98,7 @@ class UserSimulatorAgent(AgentAdapter):
|
|
|
97
98
|
temperature: float = 0.0,
|
|
98
99
|
max_tokens: Optional[int] = None,
|
|
99
100
|
system_prompt: Optional[str] = None,
|
|
101
|
+
**extra_params,
|
|
100
102
|
):
|
|
101
103
|
"""
|
|
102
104
|
Initialize a user simulator agent.
|
|
@@ -133,8 +135,12 @@ class UserSimulatorAgent(AgentAdapter):
|
|
|
133
135
|
'''
|
|
134
136
|
)
|
|
135
137
|
```
|
|
138
|
+
|
|
139
|
+
Note:
|
|
140
|
+
Advanced usage: Additional parameters can be passed as keyword arguments
|
|
141
|
+
(e.g., headers, timeout, client) for specialized configurations. These are
|
|
142
|
+
experimental and may not be supported in future versions.
|
|
136
143
|
"""
|
|
137
|
-
# Override the default system prompt for the user simulator agent
|
|
138
144
|
self.api_base = api_base
|
|
139
145
|
self.api_key = api_key
|
|
140
146
|
self.temperature = temperature
|
|
@@ -164,9 +170,22 @@ class UserSimulatorAgent(AgentAdapter):
|
|
|
164
170
|
self.max_tokens = (
|
|
165
171
|
max_tokens or ScenarioConfig.default_config.default_model.max_tokens
|
|
166
172
|
)
|
|
173
|
+
# Extract extra params from ModelConfig
|
|
174
|
+
config_dict = ScenarioConfig.default_config.default_model.model_dump(
|
|
175
|
+
exclude_none=True
|
|
176
|
+
)
|
|
177
|
+
config_dict.pop("model", None)
|
|
178
|
+
config_dict.pop("api_base", None)
|
|
179
|
+
config_dict.pop("api_key", None)
|
|
180
|
+
config_dict.pop("temperature", None)
|
|
181
|
+
config_dict.pop("max_tokens", None)
|
|
182
|
+
# Merge: config extras < agent extra_params
|
|
183
|
+
self._extra_params = {**config_dict, **extra_params}
|
|
184
|
+
else:
|
|
185
|
+
self._extra_params = extra_params
|
|
167
186
|
|
|
168
187
|
if not hasattr(self, "model"):
|
|
169
|
-
raise Exception(agent_not_configured_error_message("
|
|
188
|
+
raise Exception(agent_not_configured_error_message("UserSimulatorAgent"))
|
|
170
189
|
|
|
171
190
|
@scenario_cache()
|
|
172
191
|
async def call(
|
|
@@ -237,6 +256,7 @@ Your goal (assistant) is to interact with the Agent Under Test (user) as if you
|
|
|
237
256
|
api_base=self.api_base,
|
|
238
257
|
max_tokens=self.max_tokens,
|
|
239
258
|
tools=[],
|
|
259
|
+
**self._extra_params,
|
|
240
260
|
),
|
|
241
261
|
)
|
|
242
262
|
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from unittest.mock import patch, MagicMock
|
|
3
|
+
from openai import OpenAI
|
|
4
|
+
from scenario import JudgeAgent
|
|
5
|
+
from scenario.config import ModelConfig, ScenarioConfig
|
|
6
|
+
from scenario.types import AgentInput
|
|
7
|
+
from scenario.cache import context_scenario
|
|
8
|
+
from scenario.scenario_executor import ScenarioExecutor
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class FakeOpenAIClient:
|
|
12
|
+
"""Fake client for testing without requiring API keys."""
|
|
13
|
+
|
|
14
|
+
def __init__(self, base_url=None, default_headers=None):
|
|
15
|
+
self.base_url = base_url
|
|
16
|
+
self.default_headers = default_headers
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@pytest.mark.asyncio
|
|
20
|
+
async def test_judge_agent_merges_global_config_and_agent_params():
|
|
21
|
+
"""JudgeAgent merges ModelConfig defaults with agent-specific overrides, including custom client."""
|
|
22
|
+
# Setup custom client
|
|
23
|
+
custom_client = FakeOpenAIClient(
|
|
24
|
+
base_url="https://custom.com", default_headers={"X-Global": "global-value"}
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# Setup global config with extra params
|
|
28
|
+
ScenarioConfig.default_config = ScenarioConfig(
|
|
29
|
+
default_model=ModelConfig(
|
|
30
|
+
model="openai/gpt-4",
|
|
31
|
+
api_base="https://custom.com",
|
|
32
|
+
headers={"X-Global": "global-value"}, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
33
|
+
timeout=30, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
34
|
+
client=custom_client, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
35
|
+
)
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
judge = JudgeAgent(
|
|
39
|
+
criteria=["Test criterion"],
|
|
40
|
+
temperature=0.5,
|
|
41
|
+
timeout=60,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# Create mock input
|
|
45
|
+
mock_scenario_state = MagicMock()
|
|
46
|
+
mock_scenario_state.description = "Test scenario"
|
|
47
|
+
mock_scenario_state.current_turn = 1
|
|
48
|
+
mock_scenario_state.config.max_turns = 10
|
|
49
|
+
|
|
50
|
+
agent_input = AgentInput(
|
|
51
|
+
thread_id="test",
|
|
52
|
+
messages=[{"role": "user", "content": "Hello"}],
|
|
53
|
+
new_messages=[],
|
|
54
|
+
judgment_request=True,
|
|
55
|
+
scenario_state=mock_scenario_state,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Mock litellm.completion response
|
|
59
|
+
mock_response = MagicMock()
|
|
60
|
+
mock_response.choices = [MagicMock()]
|
|
61
|
+
mock_response.choices[0].message.tool_calls = [MagicMock()]
|
|
62
|
+
mock_response.choices[0].message.tool_calls[0].function.name = "finish_test"
|
|
63
|
+
mock_response.choices[0].message.tool_calls[
|
|
64
|
+
0
|
|
65
|
+
].function.arguments = '{"verdict": "success", "reasoning": "Test passed", "criteria": {"test_criterion": true}}'
|
|
66
|
+
|
|
67
|
+
# Mock scenario context for cache decorator
|
|
68
|
+
mock_executor = MagicMock()
|
|
69
|
+
mock_executor.config = MagicMock()
|
|
70
|
+
mock_executor.config.cache_key = None
|
|
71
|
+
token = context_scenario.set(mock_executor)
|
|
72
|
+
|
|
73
|
+
try:
|
|
74
|
+
with patch(
|
|
75
|
+
"scenario.judge_agent.litellm.completion", return_value=mock_response
|
|
76
|
+
) as mock_completion:
|
|
77
|
+
await judge.call(agent_input)
|
|
78
|
+
|
|
79
|
+
assert mock_completion.called
|
|
80
|
+
call_kwargs = mock_completion.call_args.kwargs
|
|
81
|
+
|
|
82
|
+
# Verify merged params: config defaults + agent overrides
|
|
83
|
+
assert call_kwargs["model"] == "openai/gpt-4"
|
|
84
|
+
assert call_kwargs["api_base"] == "https://custom.com"
|
|
85
|
+
assert call_kwargs["temperature"] == 0.5 # Agent override
|
|
86
|
+
assert call_kwargs["timeout"] == 60 # Agent override
|
|
87
|
+
assert call_kwargs["headers"] == {"X-Global": "global-value"} # From config
|
|
88
|
+
assert (
|
|
89
|
+
call_kwargs["client"] == custom_client
|
|
90
|
+
) # Custom client passed through
|
|
91
|
+
finally:
|
|
92
|
+
context_scenario.reset(token)
|
|
93
|
+
# Cleanup
|
|
94
|
+
ScenarioConfig.default_config = None
|
|
@@ -35,3 +35,29 @@ async def test_user_simulator_agent_uses_modelconfig_api_base():
|
|
|
35
35
|
mock_completion.call_args.kwargs["api_base"]
|
|
36
36
|
== "https://custom-api-base.example.com"
|
|
37
37
|
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def test_modelconfig_accepts_extra_litellm_params():
|
|
41
|
+
"""ModelConfig accepts arbitrary litellm parameters via extra='allow'."""
|
|
42
|
+
from openai import OpenAI
|
|
43
|
+
|
|
44
|
+
custom_client = MagicMock(spec=OpenAI)
|
|
45
|
+
|
|
46
|
+
config = ModelConfig(
|
|
47
|
+
model="openai/gpt-4",
|
|
48
|
+
api_base="https://custom.com",
|
|
49
|
+
headers={"X-Custom-Header": "test-value"}, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
50
|
+
timeout=60, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
51
|
+
num_retries=3, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
52
|
+
client=custom_client, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
assert config.model == "openai/gpt-4"
|
|
56
|
+
assert config.api_base == "https://custom.com"
|
|
57
|
+
|
|
58
|
+
# Verify extra params are stored
|
|
59
|
+
dump = config.model_dump()
|
|
60
|
+
assert dump["headers"] == {"X-Custom-Header": "test-value"}
|
|
61
|
+
assert dump["timeout"] == 60
|
|
62
|
+
assert dump["num_retries"] == 3
|
|
63
|
+
assert dump["client"] == custom_client
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from unittest.mock import patch, MagicMock
|
|
3
|
+
from scenario import UserSimulatorAgent
|
|
4
|
+
from scenario.config import ModelConfig, ScenarioConfig
|
|
5
|
+
from scenario.types import AgentInput
|
|
6
|
+
from scenario.cache import context_scenario
|
|
7
|
+
from scenario.scenario_executor import ScenarioExecutor
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@pytest.mark.asyncio
|
|
11
|
+
async def test_user_simulator_agent_merges_global_config_and_agent_params():
|
|
12
|
+
"""UserSimulatorAgent merges ModelConfig defaults with agent-specific overrides."""
|
|
13
|
+
# Setup global config with extra params
|
|
14
|
+
ScenarioConfig.default_config = ScenarioConfig(
|
|
15
|
+
default_model=ModelConfig(
|
|
16
|
+
model="openai/gpt-4",
|
|
17
|
+
headers={"X-Auth": "token-123"}, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
18
|
+
max_retries=5, # type: ignore # extra param via ConfigDict(extra="allow")
|
|
19
|
+
)
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
user_sim = UserSimulatorAgent(
|
|
23
|
+
temperature=0.7,
|
|
24
|
+
num_retries=2,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# Create mock input
|
|
28
|
+
mock_scenario_state = MagicMock()
|
|
29
|
+
mock_scenario_state.description = "Test scenario"
|
|
30
|
+
|
|
31
|
+
agent_input = AgentInput(
|
|
32
|
+
thread_id="test",
|
|
33
|
+
messages=[],
|
|
34
|
+
new_messages=[],
|
|
35
|
+
scenario_state=mock_scenario_state,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# Mock litellm.completion response
|
|
39
|
+
mock_response = MagicMock()
|
|
40
|
+
mock_response.choices = [MagicMock()]
|
|
41
|
+
mock_response.choices[0].message.content = "test user message"
|
|
42
|
+
|
|
43
|
+
# Mock scenario context for cache decorator
|
|
44
|
+
mock_executor = MagicMock()
|
|
45
|
+
mock_executor.config = MagicMock()
|
|
46
|
+
mock_executor.config.cache_key = None
|
|
47
|
+
token = context_scenario.set(mock_executor)
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
with patch(
|
|
51
|
+
"scenario.user_simulator_agent.litellm.completion",
|
|
52
|
+
return_value=mock_response,
|
|
53
|
+
) as mock_completion:
|
|
54
|
+
await user_sim.call(agent_input)
|
|
55
|
+
|
|
56
|
+
assert mock_completion.called
|
|
57
|
+
call_kwargs = mock_completion.call_args.kwargs
|
|
58
|
+
|
|
59
|
+
# Verify merged params
|
|
60
|
+
assert call_kwargs["model"] == "openai/gpt-4"
|
|
61
|
+
assert call_kwargs["temperature"] == 0.7 # Agent override
|
|
62
|
+
assert call_kwargs["headers"] == {"X-Auth": "token-123"} # From config
|
|
63
|
+
assert call_kwargs["max_retries"] == 5 # From config
|
|
64
|
+
assert call_kwargs["num_retries"] == 2 # Agent-specific
|
|
65
|
+
finally:
|
|
66
|
+
context_scenario.reset(token)
|
|
67
|
+
# Cleanup
|
|
68
|
+
ScenarioConfig.default_config = None
|
|
File without changes
|
|
File without changes
|
{langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/langwatch_scenario.egg-info/entry_points.txt
RENAMED
|
File without changes
|
{langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/langwatch_scenario.egg-info/requires.txt
RENAMED
|
File without changes
|
{langwatch_scenario-0.7.11 → langwatch_scenario-0.7.12}/langwatch_scenario.egg-info/top_level.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|