azure-ai-evaluation 1.0.0b4__tar.gz → 1.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-evaluation might be problematic. Click here for more details.
- azure_ai_evaluation-1.0.1/CHANGELOG.md +220 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/MANIFEST.in +2 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/NOTICE.txt +20 -0
- azure_ai_evaluation-1.0.1/PKG-INFO +600 -0
- azure_ai_evaluation-1.0.1/README.md +345 -0
- azure_ai_evaluation-1.0.1/TROUBLESHOOTING.md +61 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/__init__.py +22 -0
- {azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/simulator/_helpers → azure_ai_evaluation-1.0.1/azure/ai/evaluation/_common}/_experimental.py +4 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_common/constants.py +5 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_common/math.py +89 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_common/rai_service.py +250 -62
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_common/utils.py +196 -23
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_constants.py +7 -6
- {azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluate/_batch_run_client → azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluate/_batch_run}/__init__.py +3 -2
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluate/_batch_run_client/batch_run_context.py → azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluate/_batch_run/eval_run_context.py +13 -4
- {azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluate/_batch_run_client → azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluate/_batch_run}/proxy_client.py +19 -6
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +46 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluate/_eval_run.py +55 -14
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluate/_evaluate.py +312 -228
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluate/_telemetry/__init__.py +7 -6
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluate/_utils.py +46 -11
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_bleu/_bleu.py +17 -18
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_coherence/_coherence.py +107 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +99 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_common/_base_eval.py +37 -24
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +21 -9
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +52 -16
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +144 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +129 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +123 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +125 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_content_safety/_violence.py +126 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_eci/_eci.py +31 -4
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +20 -13
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_fluency/_fluency.py +104 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +86 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_gleu/_gleu.py +14 -16
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +144 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +113 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +99 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_meteor/_meteor.py +20 -27
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_multimodal/__init__.py +20 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +132 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +55 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +100 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +124 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +100 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +100 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_multimodal/_violence.py +100 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +113 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_qa/_qa.py +23 -31
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_relevance/_relevance.py +114 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +100 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +112 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +93 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_rouge/_rouge.py +26 -27
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +148 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_similarity/_similarity.py +37 -28
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluators/_xpia/xpia.py +125 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_exceptions.py +19 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/_model_configurations.py +123 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_version.py +1 -1
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/__init__.py +2 -1
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_adversarial_scenario.py +20 -1
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_adversarial_simulator.py +29 -35
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_constants.py +11 -1
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
- azure_ai_evaluation-1.0.1/azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_direct_attack_simulator.py +17 -9
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_helpers/__init__.py +1 -2
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +22 -1
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_indirect_attack_simulator.py +90 -35
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +4 -2
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_model_tools/_rai_client.py +8 -4
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +4 -4
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +6 -1
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_simulator.py +165 -105
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_utils.py +31 -13
- azure_ai_evaluation-1.0.1/azure_ai_evaluation.egg-info/PKG-INFO +600 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure_ai_evaluation.egg-info/SOURCES.txt +27 -7
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/pyproject.toml +1 -2
- azure_ai_evaluation-1.0.1/samples/README.md +57 -0
- azure_ai_evaluation-1.0.1/samples/data/evaluate_test_data.jsonl +3 -0
- azure_ai_evaluation-1.0.1/samples/evaluation_samples_common.py +60 -0
- azure_ai_evaluation-1.0.1/samples/evaluation_samples_evaluate.py +395 -0
- azure_ai_evaluation-1.0.1/samples/evaluation_samples_simulate.py +249 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/setup.py +2 -1
- azure_ai_evaluation-1.0.1/tests/__pf_service_isolation.py +28 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/conftest.py +37 -9
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/e2etests/target_fn.py +18 -0
- azure_ai_evaluation-1.0.1/tests/e2etests/test_builtin_evaluators.py +997 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/e2etests/test_evaluate.py +418 -23
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/e2etests/test_sim_and_eval.py +5 -9
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_batch_run_context.py +8 -8
- azure_ai_evaluation-1.0.1/tests/unittests/test_built_in_evaluator.py +128 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_content_safety_rai_script.py +61 -15
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_eval_run.py +28 -2
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_evaluate.py +124 -44
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_non_adv_simulator.py +12 -9
- azure_ai_evaluation-1.0.1/tests/unittests/test_utils.py +258 -0
- azure_ai_evaluation-1.0.0b4/CHANGELOG.md +0 -115
- azure_ai_evaluation-1.0.0b4/PKG-INFO +0 -535
- azure_ai_evaluation-1.0.0b4/README.md +0 -385
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_common/math.py +0 -18
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_coherence/_coherence.py +0 -71
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +0 -57
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +0 -101
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +0 -322
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +0 -55
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +0 -55
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +0 -55
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_content_safety/_violence.py +0 -55
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_fluency/_fluency.py +0 -73
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +0 -56
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +0 -72
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -49
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +0 -57
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_relevance/_relevance.py +0 -78
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +0 -64
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +0 -154
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +0 -43
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluators/_xpia/xpia.py +0 -64
- azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_model_configurations.py +0 -55
- azure_ai_evaluation-1.0.0b4/azure_ai_evaluation.egg-info/PKG-INFO +0 -535
- azure_ai_evaluation-1.0.0b4/tests/e2etests/test_builtin_evaluators.py +0 -474
- azure_ai_evaluation-1.0.0b4/tests/unittests/test_built_in_evaluator.py +0 -41
- azure_ai_evaluation-1.0.0b4/tests/unittests/test_utils.py +0 -20
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_common/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluate/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4/azure/ai/evaluation/_evaluate/_batch_run_client → azure_ai_evaluation-1.0.1/azure/ai/evaluation/_evaluate/_batch_run}/code_client.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_bleu/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_coherence/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_common/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_content_safety/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_eci/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_f1_score/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_fluency/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_gleu/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_groundedness/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_meteor/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_protected_material/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_qa/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_relevance/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_retrieval/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_rouge/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_similarity/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_evaluators/_xpia/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_http_utils.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_user_agent.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_vendor/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_vendor/rouge_score/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_vendor/rouge_score/scoring.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_vendor/rouge_score/tokenize.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/py.typed +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_conversation/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_conversation/_conversation.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_conversation/constants.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_helpers/_language_suffix_mapping.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_model_tools/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_model_tools/_template_handler.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_model_tools/models.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_prompty/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure/ai/evaluation/simulator/_tracing.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure_ai_evaluation.egg-info/dependency_links.txt +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure_ai_evaluation.egg-info/not-zip-safe +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure_ai_evaluation.egg-info/requires.txt +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/azure_ai_evaluation.egg-info/top_level.txt +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/setup.cfg +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/__openai_patcher.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/e2etests/__init__.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/e2etests/custom_evaluators/answer_length_with_aggregation.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/e2etests/test_adv_simulator.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/e2etests/test_metrics_upload.py +1 -1
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_content_safety_defect_rate.py +1 -1
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_evaluate_telemetry.py +1 -1
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_evaluators/apology_dag/apology.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_evaluators/test_inputs_evaluators.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_jailbreak_simulator.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_save_eval.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_simulator.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_synthetic_callback_conv_bot.py +0 -0
- {azure_ai_evaluation-1.0.0b4 → azure_ai_evaluation-1.0.1}/tests/unittests/test_synthetic_conversation_bot.py +1 -1
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
# Release History
|
|
2
|
+
|
|
3
|
+
## 1.0.1 (2024-11-15)
|
|
4
|
+
|
|
5
|
+
### Bugs Fixed
|
|
6
|
+
- Fixed `[remote]` extra to be needed only when tracking results in Azure AI Studio.
|
|
7
|
+
- Removing `azure-ai-inference` as dependency.
|
|
8
|
+
|
|
9
|
+
## 1.0.0 (2024-11-13)
|
|
10
|
+
|
|
11
|
+
### Breaking Changes
|
|
12
|
+
- The `parallel` parameter has been removed from composite evaluators: `QAEvaluator`, `ContentSafetyChatEvaluator`, and `ContentSafetyMultimodalEvaluator`. To control evaluator parallelism, you can now use the `_parallel` keyword argument, though please note that this private parameter may change in the future.
|
|
13
|
+
- Parameters `query_response_generating_prompty_kwargs` and `user_simulator_prompty_kwargs` have been renamed to `query_response_generating_prompty_options` and `user_simulator_prompty_options` in the Simulator's __call__ method.
|
|
14
|
+
|
|
15
|
+
### Bugs Fixed
|
|
16
|
+
- Fixed an issue where the `output_path` parameter in the `evaluate` API did not support relative path.
|
|
17
|
+
- Output of adversarial simulators are of type `JsonLineList` and the helper function `to_eval_qr_json_lines` now outputs context from both user and assistant turns along with `category` if it exists in the conversation
|
|
18
|
+
- Fixed an issue where during long-running simulations, API token expires causing "Forbidden" error. Instead, users can now set an environment variable `AZURE_TOKEN_REFRESH_INTERVAL` to refresh the token more frequently to prevent expiration and ensure continuous operation of the simulation.
|
|
19
|
+
- Fix `evaluate` function not producing aggregated metrics if ANY values to be aggregated were None, NaN, or
|
|
20
|
+
otherwise difficult to process. Such values are ignored fully, so the aggregated metric of `[1, 2, 3, NaN]`
|
|
21
|
+
would be 2, not 1.5.
|
|
22
|
+
|
|
23
|
+
### Other Changes
|
|
24
|
+
- Refined error messages for serviced-based evaluators and simulators.
|
|
25
|
+
- Tracing has been disabled due to Cosmos DB initialization issue.
|
|
26
|
+
- Introduced environment variable `AI_EVALS_DISABLE_EXPERIMENTAL_WARNING` to disable the warning message for experimental features.
|
|
27
|
+
- Changed the randomization pattern for `AdversarialSimulator` such that there is an almost equal number of Adversarial harm categories (e.g. Hate + Unfairness, Self-Harm, Violence, Sex) represented in the `AdversarialSimulator` outputs. Previously, for 200 `max_simulation_results` a user might see 140 results belonging to the 'Hate + Unfairness' category and 40 results belonging to the 'Self-Harm' category. Now, user will see 50 results for each of Hate + Unfairness, Self-Harm, Violence, and Sex.
|
|
28
|
+
- For the `DirectAttackSimulator`, the prompt templates used to generate simulated outputs for each Adversarial harm category will no longer be in a randomized order by default. To override this behavior, pass `randomize_order=True` when you call the `DirectAttackSimulator`, for example:
|
|
29
|
+
```python
|
|
30
|
+
adversarial_simulator = DirectAttackSimulator(azure_ai_project=azure_ai_project, credential=DefaultAzureCredential())
|
|
31
|
+
outputs = asyncio.run(
|
|
32
|
+
adversarial_simulator(
|
|
33
|
+
scenario=scenario,
|
|
34
|
+
target=callback,
|
|
35
|
+
randomize_order=True
|
|
36
|
+
)
|
|
37
|
+
)
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## 1.0.0b5 (2024-10-28)
|
|
41
|
+
|
|
42
|
+
### Features Added
|
|
43
|
+
- Added `GroundednessProEvaluator`, which is a service-based evaluator for determining response groundedness.
|
|
44
|
+
- Groundedness detection in Non Adversarial Simulator via query/context pairs
|
|
45
|
+
```python
|
|
46
|
+
import importlib.resources as pkg_resources
|
|
47
|
+
package = "azure.ai.evaluation.simulator._data_sources"
|
|
48
|
+
resource_name = "grounding.json"
|
|
49
|
+
custom_simulator = Simulator(model_config=model_config)
|
|
50
|
+
conversation_turns = []
|
|
51
|
+
with pkg_resources.path(package, resource_name) as grounding_file:
|
|
52
|
+
with open(grounding_file, "r") as file:
|
|
53
|
+
data = json.load(file)
|
|
54
|
+
for item in data:
|
|
55
|
+
conversation_turns.append([item])
|
|
56
|
+
outputs = asyncio.run(custom_simulator(
|
|
57
|
+
target=callback,
|
|
58
|
+
conversation_turns=conversation_turns,
|
|
59
|
+
max_conversation_turns=1,
|
|
60
|
+
))
|
|
61
|
+
```
|
|
62
|
+
- Adding evaluator for multimodal use cases
|
|
63
|
+
|
|
64
|
+
### Breaking Changes
|
|
65
|
+
- Renamed environment variable `PF_EVALS_BATCH_USE_ASYNC` to `AI_EVALS_BATCH_USE_ASYNC`.
|
|
66
|
+
- `RetrievalEvaluator` now requires a `context` input in addition to `query` in single-turn evaluation.
|
|
67
|
+
- `RelevanceEvaluator` no longer takes `context` as an input. It now only takes `query` and `response` in single-turn evaluation.
|
|
68
|
+
- `FluencyEvaluator` no longer takes `query` as an input. It now only takes `response` in single-turn evaluation.
|
|
69
|
+
- AdversarialScenario enum does not include `ADVERSARIAL_INDIRECT_JAILBREAK`, invoking IndirectJailbreak or XPIA should be done with `IndirectAttackSimulator`
|
|
70
|
+
- Outputs of `Simulator` and `AdversarialSimulator` previously had `to_eval_qa_json_lines` and now has `to_eval_qr_json_lines`. Where `to_eval_qa_json_lines` had:
|
|
71
|
+
```json
|
|
72
|
+
{"question": <user_message>, "answer": <assistant_message>}
|
|
73
|
+
```
|
|
74
|
+
`to_eval_qr_json_lines` now has:
|
|
75
|
+
```json
|
|
76
|
+
{"query": <user_message>, "response": assistant_message}
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### Bugs Fixed
|
|
80
|
+
- Non adversarial simulator works with `gpt-4o` models using the `json_schema` response format
|
|
81
|
+
- Fixed an issue where the `evaluate` API would fail with "[WinError 32] The process cannot access the file because it is being used by another process" when venv folder and target function file are in the same directory.
|
|
82
|
+
- Fix evaluate API failure when `trace.destination` is set to `none`
|
|
83
|
+
- Non adversarial simulator now accepts context from the callback
|
|
84
|
+
|
|
85
|
+
### Other Changes
|
|
86
|
+
- Improved error messages for the `evaluate` API by enhancing the validation of input parameters. This update provides more detailed and actionable error descriptions.
|
|
87
|
+
- `GroundednessEvaluator` now supports `query` as an optional input in single-turn evaluation. If `query` is provided, a different prompt template will be used for the evaluation.
|
|
88
|
+
- To align with our support of a diverse set of models, the following evaluators will now have a new key in their result output without the `gpt_` prefix. To maintain backwards compatibility, the old key with the `gpt_` prefix will still be present in the output; however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
|
|
89
|
+
- `CoherenceEvaluator`
|
|
90
|
+
- `RelevanceEvaluator`
|
|
91
|
+
- `FluencyEvaluator`
|
|
92
|
+
- `GroundednessEvaluator`
|
|
93
|
+
- `SimilarityEvaluator`
|
|
94
|
+
- `RetrievalEvaluator`
|
|
95
|
+
- The following evaluators will now have a new key in their result output including LLM reasoning behind the score. The new key will follow the pattern "<metric_name>_reason". The reasoning is the result of a more detailed prompt template being used to generate the LLM response. Note that this requires the maximum number of tokens used to run these evaluators to be increased.
|
|
96
|
+
|
|
97
|
+
| Evaluator | New `max_token` for Generation |
|
|
98
|
+
| --- | --- |
|
|
99
|
+
| `CoherenceEvaluator` | 800 |
|
|
100
|
+
| `RelevanceEvaluator` | 800 |
|
|
101
|
+
| `FluencyEvaluator` | 800 |
|
|
102
|
+
| `GroundednessEvaluator` | 800 |
|
|
103
|
+
| `RetrievalEvaluator` | 1600 |
|
|
104
|
+
- Improved the error message for storage access permission issues to provide clearer guidance for users.
|
|
105
|
+
|
|
106
|
+
## 1.0.0b4 (2024-10-16)
|
|
107
|
+
|
|
108
|
+
### Breaking Changes
|
|
109
|
+
|
|
110
|
+
- Removed `numpy` dependency. All NaN values returned by the SDK have been changed to from `numpy.nan` to `math.nan`.
|
|
111
|
+
- `credential` is now required to be passed in for all content safety evaluators and `ProtectedMaterialsEvaluator`. `DefaultAzureCredential` will no longer be chosen if a credential is not passed.
|
|
112
|
+
- Changed package extra name from "pf-azure" to "remote".
|
|
113
|
+
|
|
114
|
+
### Bugs Fixed
|
|
115
|
+
- Adversarial Conversation simulations would fail with `Forbidden`. Added logic to re-fetch token in the exponential retry logic to retrive RAI Service response.
|
|
116
|
+
- Fixed an issue where the Evaluate API did not fail due to missing inputs when the target did not return columns required by the evaluators.
|
|
117
|
+
|
|
118
|
+
### Other Changes
|
|
119
|
+
- Enhance the error message to provide clearer instruction when required packages for the remote tracking feature are missing.
|
|
120
|
+
- Print the per-evaluator run summary at the end of the Evaluate API call to make troubleshooting row-level failures easier.
|
|
121
|
+
|
|
122
|
+
## 1.0.0b3 (2024-10-01)
|
|
123
|
+
|
|
124
|
+
### Features Added
|
|
125
|
+
|
|
126
|
+
- Added `type` field to `AzureOpenAIModelConfiguration` and `OpenAIModelConfiguration`
|
|
127
|
+
- The following evaluators now support `conversation` as an alternative input to their usual single-turn inputs:
|
|
128
|
+
- `ViolenceEvaluator`
|
|
129
|
+
- `SexualEvaluator`
|
|
130
|
+
- `SelfHarmEvaluator`
|
|
131
|
+
- `HateUnfairnessEvaluator`
|
|
132
|
+
- `ProtectedMaterialEvaluator`
|
|
133
|
+
- `IndirectAttackEvaluator`
|
|
134
|
+
- `CoherenceEvaluator`
|
|
135
|
+
- `RelevanceEvaluator`
|
|
136
|
+
- `FluencyEvaluator`
|
|
137
|
+
- `GroundednessEvaluator`
|
|
138
|
+
- Surfaced `RetrievalScoreEvaluator`, formally an internal part of `ChatEvaluator` as a standalone conversation-only evaluator.
|
|
139
|
+
|
|
140
|
+
### Breaking Changes
|
|
141
|
+
|
|
142
|
+
- Removed `ContentSafetyChatEvaluator` and `ChatEvaluator`
|
|
143
|
+
- The `evaluator_config` parameter of `evaluate` now maps in evaluator name to a dictionary `EvaluatorConfig`, which is a `TypedDict`. The
|
|
144
|
+
`column_mapping` between `data` or `target` and evaluator field names should now be specified inside this new dictionary:
|
|
145
|
+
|
|
146
|
+
Before:
|
|
147
|
+
```python
|
|
148
|
+
evaluate(
|
|
149
|
+
...,
|
|
150
|
+
evaluator_config={
|
|
151
|
+
"hate_unfairness": {
|
|
152
|
+
"query": "${data.question}",
|
|
153
|
+
"response": "${data.answer}",
|
|
154
|
+
}
|
|
155
|
+
},
|
|
156
|
+
...
|
|
157
|
+
)
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
After
|
|
161
|
+
```python
|
|
162
|
+
evaluate(
|
|
163
|
+
...,
|
|
164
|
+
evaluator_config={
|
|
165
|
+
"hate_unfairness": {
|
|
166
|
+
"column_mapping": {
|
|
167
|
+
"query": "${data.question}",
|
|
168
|
+
"response": "${data.answer}",
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
},
|
|
172
|
+
...
|
|
173
|
+
)
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
- Simulator now requires a model configuration to call the prompty instead of an Azure AI project scope. This enables the usage of simulator with Entra ID based auth.
|
|
177
|
+
Before:
|
|
178
|
+
```python
|
|
179
|
+
azure_ai_project = {
|
|
180
|
+
"subscription_id": os.environ.get("AZURE_SUBSCRIPTION_ID"),
|
|
181
|
+
"resource_group_name": os.environ.get("RESOURCE_GROUP"),
|
|
182
|
+
"project_name": os.environ.get("PROJECT_NAME"),
|
|
183
|
+
}
|
|
184
|
+
sim = Simulator(azure_ai_project=azure_ai_project, credentails=DefaultAzureCredentials())
|
|
185
|
+
```
|
|
186
|
+
After:
|
|
187
|
+
```python
|
|
188
|
+
model_config = {
|
|
189
|
+
"azure_endpoint": os.environ.get("AZURE_OPENAI_ENDPOINT"),
|
|
190
|
+
"azure_deployment": os.environ.get("AZURE_DEPLOYMENT"),
|
|
191
|
+
}
|
|
192
|
+
sim = Simulator(model_config=model_config)
|
|
193
|
+
```
|
|
194
|
+
If `api_key` is not included in the `model_config`, the prompty runtime in `promptflow-core` will pick up `DefaultAzureCredential`.
|
|
195
|
+
|
|
196
|
+
### Bugs Fixed
|
|
197
|
+
|
|
198
|
+
- Fixed issue where Entra ID authentication was not working with `AzureOpenAIModelConfiguration`
|
|
199
|
+
|
|
200
|
+
## 1.0.0b2 (2024-09-24)
|
|
201
|
+
|
|
202
|
+
### Breaking Changes
|
|
203
|
+
|
|
204
|
+
- `data` and `evaluators` are now required keywords in `evaluate`.
|
|
205
|
+
|
|
206
|
+
## 1.0.0b1 (2024-09-20)
|
|
207
|
+
|
|
208
|
+
### Breaking Changes
|
|
209
|
+
|
|
210
|
+
- The `synthetic` namespace has been renamed to `simulator`, and sub-namespaces under this module have been removed
|
|
211
|
+
- The `evaluate` and `evaluators` namespaces have been removed, and everything previously exposed in those modules has been added to the root namespace `azure.ai.evaluation`
|
|
212
|
+
- The parameter name `project_scope` in content safety evaluators have been renamed to `azure_ai_project` for consistency with evaluate API and simulators.
|
|
213
|
+
- Model configurations classes are now of type `TypedDict` and are exposed in the `azure.ai.evaluation` module instead of coming from `promptflow.core`.
|
|
214
|
+
- Updated the parameter names for `question` and `answer` in built-in evaluators to more generic terms: `query` and `response`.
|
|
215
|
+
|
|
216
|
+
### Features Added
|
|
217
|
+
|
|
218
|
+
- First preview
|
|
219
|
+
- This package is port of `promptflow-evals`. New features will be added only to this package moving forward.
|
|
220
|
+
- Added a `TypedDict` for `AzureAIProject` that allows for better intellisense and type checking when passing in project information
|
|
@@ -48,3 +48,23 @@ distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
48
48
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
49
49
|
See the License for the specific language governing permissions and
|
|
50
50
|
limitations under the License.
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
License notice for [Is GPT-4 a reliable rater? Evaluating consistency in GPT-4's text ratings](https://www.frontiersin.org/journals/education/articles/10.3389/feduc.2023.1272229/full)
|
|
54
|
+
------------------------------------------------------------------------------------------------------------------
|
|
55
|
+
Copyright © 2023 Hackl, Müller, Granitzer and Sailer. This work is openly licensed via [CC BY 4.0](http://creativecommons.org/licenses/by/4.0/).
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
License notice for [Is ChatGPT a Good NLG Evaluator? A Preliminary Study](https://aclanthology.org/2023.newsum-1.1) (Wang et al., NewSum 2023)
|
|
59
|
+
------------------------------------------------------------------------------------------------------------------
|
|
60
|
+
Copyright © 2023. This work is openly licensed via [CC BY 4.0](http://creativecommons.org/licenses/by/4.0/).
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
License notice for [SummEval: Re-evaluating Summarization Evaluation.](https://doi.org/10.1162/tacl_a_00373) (Fabbri et al.)
|
|
64
|
+
------------------------------------------------------------------------------------------------------------------
|
|
65
|
+
© 2021 Association for Computational Linguistics. This work is openly licensed via [CC BY 4.0](http://creativecommons.org/licenses/by/4.0/).
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
License notice for [Evaluation Metrics in the Era of GPT-4: Reliably Evaluating Large Language Models on Sequence to Sequence Tasks](https://aclanthology.org/2023.emnlp-main.543) (Sottana et al., EMNLP 2023)
|
|
69
|
+
------------------------------------------------------------------------------------------------------------------
|
|
70
|
+
© 2023 Association for Computational Linguistics. This work is openly licensed via [CC BY 4.0](http://creativecommons.org/licenses/by/4.0/).
|