azure-ai-evaluation 1.0.0b2__py3-none-any.whl → 1.13.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (299) hide show
  1. azure/ai/evaluation/__init__.py +100 -5
  2. azure/ai/evaluation/{_evaluators/_chat → _aoai}/__init__.py +3 -2
  3. azure/ai/evaluation/_aoai/aoai_grader.py +140 -0
  4. azure/ai/evaluation/_aoai/label_grader.py +68 -0
  5. azure/ai/evaluation/_aoai/python_grader.py +86 -0
  6. azure/ai/evaluation/_aoai/score_model_grader.py +94 -0
  7. azure/ai/evaluation/_aoai/string_check_grader.py +66 -0
  8. azure/ai/evaluation/_aoai/text_similarity_grader.py +80 -0
  9. azure/ai/evaluation/_azure/__init__.py +3 -0
  10. azure/ai/evaluation/_azure/_clients.py +204 -0
  11. azure/ai/evaluation/_azure/_envs.py +207 -0
  12. azure/ai/evaluation/_azure/_models.py +227 -0
  13. azure/ai/evaluation/_azure/_token_manager.py +129 -0
  14. azure/ai/evaluation/_common/__init__.py +9 -1
  15. azure/ai/evaluation/{simulator/_helpers → _common}/_experimental.py +24 -9
  16. azure/ai/evaluation/_common/constants.py +131 -2
  17. azure/ai/evaluation/_common/evaluation_onedp_client.py +169 -0
  18. azure/ai/evaluation/_common/math.py +89 -0
  19. azure/ai/evaluation/_common/onedp/__init__.py +32 -0
  20. azure/ai/evaluation/_common/onedp/_client.py +166 -0
  21. azure/ai/evaluation/_common/onedp/_configuration.py +72 -0
  22. azure/ai/evaluation/_common/onedp/_model_base.py +1232 -0
  23. azure/ai/evaluation/_common/onedp/_patch.py +21 -0
  24. azure/ai/evaluation/_common/onedp/_serialization.py +2032 -0
  25. azure/ai/evaluation/_common/onedp/_types.py +21 -0
  26. azure/ai/evaluation/_common/onedp/_utils/__init__.py +6 -0
  27. azure/ai/evaluation/_common/onedp/_utils/model_base.py +1232 -0
  28. azure/ai/evaluation/_common/onedp/_utils/serialization.py +2032 -0
  29. azure/ai/evaluation/_common/onedp/_validation.py +66 -0
  30. azure/ai/evaluation/_common/onedp/_vendor.py +50 -0
  31. azure/ai/evaluation/_common/onedp/_version.py +9 -0
  32. azure/ai/evaluation/_common/onedp/aio/__init__.py +29 -0
  33. azure/ai/evaluation/_common/onedp/aio/_client.py +168 -0
  34. azure/ai/evaluation/_common/onedp/aio/_configuration.py +72 -0
  35. azure/ai/evaluation/_common/onedp/aio/_patch.py +21 -0
  36. azure/ai/evaluation/_common/onedp/aio/operations/__init__.py +49 -0
  37. azure/ai/evaluation/_common/onedp/aio/operations/_operations.py +7143 -0
  38. azure/ai/evaluation/_common/onedp/aio/operations/_patch.py +21 -0
  39. azure/ai/evaluation/_common/onedp/models/__init__.py +358 -0
  40. azure/ai/evaluation/_common/onedp/models/_enums.py +447 -0
  41. azure/ai/evaluation/_common/onedp/models/_models.py +5963 -0
  42. azure/ai/evaluation/_common/onedp/models/_patch.py +21 -0
  43. azure/ai/evaluation/_common/onedp/operations/__init__.py +49 -0
  44. azure/ai/evaluation/_common/onedp/operations/_operations.py +8951 -0
  45. azure/ai/evaluation/_common/onedp/operations/_patch.py +21 -0
  46. azure/ai/evaluation/_common/onedp/py.typed +1 -0
  47. azure/ai/evaluation/_common/onedp/servicepatterns/__init__.py +1 -0
  48. azure/ai/evaluation/_common/onedp/servicepatterns/aio/__init__.py +1 -0
  49. azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/__init__.py +25 -0
  50. azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/_operations.py +34 -0
  51. azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/_patch.py +20 -0
  52. azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/__init__.py +1 -0
  53. azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/__init__.py +1 -0
  54. azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/__init__.py +22 -0
  55. azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/_operations.py +29 -0
  56. azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/_patch.py +20 -0
  57. azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/__init__.py +22 -0
  58. azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/_operations.py +29 -0
  59. azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/_patch.py +20 -0
  60. azure/ai/evaluation/_common/onedp/servicepatterns/operations/__init__.py +25 -0
  61. azure/ai/evaluation/_common/onedp/servicepatterns/operations/_operations.py +34 -0
  62. azure/ai/evaluation/_common/onedp/servicepatterns/operations/_patch.py +20 -0
  63. azure/ai/evaluation/_common/rai_service.py +831 -142
  64. azure/ai/evaluation/_common/raiclient/__init__.py +34 -0
  65. azure/ai/evaluation/_common/raiclient/_client.py +128 -0
  66. azure/ai/evaluation/_common/raiclient/_configuration.py +87 -0
  67. azure/ai/evaluation/_common/raiclient/_model_base.py +1235 -0
  68. azure/ai/evaluation/_common/raiclient/_patch.py +20 -0
  69. azure/ai/evaluation/_common/raiclient/_serialization.py +2050 -0
  70. azure/ai/evaluation/_common/raiclient/_version.py +9 -0
  71. azure/ai/evaluation/_common/raiclient/aio/__init__.py +29 -0
  72. azure/ai/evaluation/_common/raiclient/aio/_client.py +130 -0
  73. azure/ai/evaluation/_common/raiclient/aio/_configuration.py +87 -0
  74. azure/ai/evaluation/_common/raiclient/aio/_patch.py +20 -0
  75. azure/ai/evaluation/_common/raiclient/aio/operations/__init__.py +25 -0
  76. azure/ai/evaluation/_common/raiclient/aio/operations/_operations.py +981 -0
  77. azure/ai/evaluation/_common/raiclient/aio/operations/_patch.py +20 -0
  78. azure/ai/evaluation/_common/raiclient/models/__init__.py +60 -0
  79. azure/ai/evaluation/_common/raiclient/models/_enums.py +18 -0
  80. azure/ai/evaluation/_common/raiclient/models/_models.py +651 -0
  81. azure/ai/evaluation/_common/raiclient/models/_patch.py +20 -0
  82. azure/ai/evaluation/_common/raiclient/operations/__init__.py +25 -0
  83. azure/ai/evaluation/_common/raiclient/operations/_operations.py +1238 -0
  84. azure/ai/evaluation/_common/raiclient/operations/_patch.py +20 -0
  85. azure/ai/evaluation/_common/raiclient/py.typed +1 -0
  86. azure/ai/evaluation/_common/utils.py +870 -34
  87. azure/ai/evaluation/_constants.py +167 -6
  88. azure/ai/evaluation/_converters/__init__.py +3 -0
  89. azure/ai/evaluation/_converters/_ai_services.py +899 -0
  90. azure/ai/evaluation/_converters/_models.py +467 -0
  91. azure/ai/evaluation/_converters/_sk_services.py +495 -0
  92. azure/ai/evaluation/_eval_mapping.py +83 -0
  93. azure/ai/evaluation/_evaluate/_batch_run/__init__.py +17 -0
  94. azure/ai/evaluation/_evaluate/_batch_run/_run_submitter_client.py +176 -0
  95. azure/ai/evaluation/_evaluate/_batch_run/batch_clients.py +82 -0
  96. azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/code_client.py +47 -25
  97. azure/ai/evaluation/_evaluate/{_batch_run_client/batch_run_context.py → _batch_run/eval_run_context.py} +42 -13
  98. azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +124 -0
  99. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +62 -0
  100. azure/ai/evaluation/_evaluate/_eval_run.py +102 -59
  101. azure/ai/evaluation/_evaluate/_evaluate.py +2134 -311
  102. azure/ai/evaluation/_evaluate/_evaluate_aoai.py +992 -0
  103. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +14 -99
  104. azure/ai/evaluation/_evaluate/_utils.py +289 -40
  105. azure/ai/evaluation/_evaluator_definition.py +76 -0
  106. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +93 -42
  107. azure/ai/evaluation/_evaluators/_code_vulnerability/__init__.py +5 -0
  108. azure/ai/evaluation/_evaluators/_code_vulnerability/_code_vulnerability.py +119 -0
  109. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +117 -91
  110. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +76 -39
  111. azure/ai/evaluation/_evaluators/_common/__init__.py +15 -0
  112. azure/ai/evaluation/_evaluators/_common/_base_eval.py +742 -0
  113. azure/ai/evaluation/_evaluators/_common/_base_multi_eval.py +63 -0
  114. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +345 -0
  115. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +198 -0
  116. azure/ai/evaluation/_evaluators/_common/_conversation_aggregators.py +49 -0
  117. azure/ai/evaluation/_evaluators/_content_safety/__init__.py +0 -4
  118. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +144 -86
  119. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +138 -57
  120. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +123 -55
  121. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +133 -54
  122. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +134 -54
  123. azure/ai/evaluation/_evaluators/_document_retrieval/__init__.py +7 -0
  124. azure/ai/evaluation/_evaluators/_document_retrieval/_document_retrieval.py +442 -0
  125. azure/ai/evaluation/_evaluators/_eci/_eci.py +49 -56
  126. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +102 -60
  127. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +115 -92
  128. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +66 -41
  129. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +90 -37
  130. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +318 -82
  131. azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +114 -0
  132. azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +104 -0
  133. azure/ai/evaluation/{_evaluate/_batch_run_client → _evaluators/_intent_resolution}/__init__.py +3 -4
  134. azure/ai/evaluation/_evaluators/_intent_resolution/_intent_resolution.py +196 -0
  135. azure/ai/evaluation/_evaluators/_intent_resolution/intent_resolution.prompty +275 -0
  136. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +107 -61
  137. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +104 -77
  138. azure/ai/evaluation/_evaluators/_qa/_qa.py +115 -63
  139. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +182 -98
  140. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +178 -49
  141. azure/ai/evaluation/_evaluators/_response_completeness/__init__.py +7 -0
  142. azure/ai/evaluation/_evaluators/_response_completeness/_response_completeness.py +202 -0
  143. azure/ai/evaluation/_evaluators/_response_completeness/response_completeness.prompty +84 -0
  144. azure/ai/evaluation/_evaluators/{_chat/retrieval → _retrieval}/__init__.py +2 -2
  145. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +148 -0
  146. azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +93 -0
  147. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +189 -50
  148. azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
  149. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +179 -0
  150. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +102 -91
  151. azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +0 -5
  152. azure/ai/evaluation/_evaluators/_task_adherence/__init__.py +7 -0
  153. azure/ai/evaluation/_evaluators/_task_adherence/_task_adherence.py +226 -0
  154. azure/ai/evaluation/_evaluators/_task_adherence/task_adherence.prompty +101 -0
  155. azure/ai/evaluation/_evaluators/_task_completion/__init__.py +7 -0
  156. azure/ai/evaluation/_evaluators/_task_completion/_task_completion.py +177 -0
  157. azure/ai/evaluation/_evaluators/_task_completion/task_completion.prompty +220 -0
  158. azure/ai/evaluation/_evaluators/_task_navigation_efficiency/__init__.py +7 -0
  159. azure/ai/evaluation/_evaluators/_task_navigation_efficiency/_task_navigation_efficiency.py +384 -0
  160. azure/ai/evaluation/_evaluators/_tool_call_accuracy/__init__.py +9 -0
  161. azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py +298 -0
  162. azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty +166 -0
  163. azure/ai/evaluation/_evaluators/_tool_input_accuracy/__init__.py +9 -0
  164. azure/ai/evaluation/_evaluators/_tool_input_accuracy/_tool_input_accuracy.py +263 -0
  165. azure/ai/evaluation/_evaluators/_tool_input_accuracy/tool_input_accuracy.prompty +76 -0
  166. azure/ai/evaluation/_evaluators/_tool_output_utilization/__init__.py +7 -0
  167. azure/ai/evaluation/_evaluators/_tool_output_utilization/_tool_output_utilization.py +225 -0
  168. azure/ai/evaluation/_evaluators/_tool_output_utilization/tool_output_utilization.prompty +221 -0
  169. azure/ai/evaluation/_evaluators/_tool_selection/__init__.py +9 -0
  170. azure/ai/evaluation/_evaluators/_tool_selection/_tool_selection.py +266 -0
  171. azure/ai/evaluation/_evaluators/_tool_selection/tool_selection.prompty +104 -0
  172. azure/ai/evaluation/_evaluators/_tool_success/__init__.py +7 -0
  173. azure/ai/evaluation/_evaluators/_tool_success/_tool_success.py +301 -0
  174. azure/ai/evaluation/_evaluators/_tool_success/tool_success.prompty +321 -0
  175. azure/ai/evaluation/_evaluators/_ungrounded_attributes/__init__.py +5 -0
  176. azure/ai/evaluation/_evaluators/_ungrounded_attributes/_ungrounded_attributes.py +102 -0
  177. azure/ai/evaluation/_evaluators/_xpia/xpia.py +109 -107
  178. azure/ai/evaluation/_exceptions.py +51 -7
  179. azure/ai/evaluation/_http_utils.py +210 -137
  180. azure/ai/evaluation/_legacy/__init__.py +3 -0
  181. azure/ai/evaluation/_legacy/_adapters/__init__.py +7 -0
  182. azure/ai/evaluation/_legacy/_adapters/_check.py +17 -0
  183. azure/ai/evaluation/_legacy/_adapters/_configuration.py +45 -0
  184. azure/ai/evaluation/_legacy/_adapters/_constants.py +10 -0
  185. azure/ai/evaluation/_legacy/_adapters/_errors.py +29 -0
  186. azure/ai/evaluation/_legacy/_adapters/_flows.py +28 -0
  187. azure/ai/evaluation/_legacy/_adapters/_service.py +16 -0
  188. azure/ai/evaluation/_legacy/_adapters/client.py +51 -0
  189. azure/ai/evaluation/_legacy/_adapters/entities.py +26 -0
  190. azure/ai/evaluation/_legacy/_adapters/tracing.py +28 -0
  191. azure/ai/evaluation/_legacy/_adapters/types.py +15 -0
  192. azure/ai/evaluation/_legacy/_adapters/utils.py +31 -0
  193. azure/ai/evaluation/_legacy/_batch_engine/__init__.py +9 -0
  194. azure/ai/evaluation/_legacy/_batch_engine/_config.py +48 -0
  195. azure/ai/evaluation/_legacy/_batch_engine/_engine.py +477 -0
  196. azure/ai/evaluation/_legacy/_batch_engine/_exceptions.py +88 -0
  197. azure/ai/evaluation/_legacy/_batch_engine/_openai_injector.py +132 -0
  198. azure/ai/evaluation/_legacy/_batch_engine/_result.py +107 -0
  199. azure/ai/evaluation/_legacy/_batch_engine/_run.py +127 -0
  200. azure/ai/evaluation/_legacy/_batch_engine/_run_storage.py +128 -0
  201. azure/ai/evaluation/_legacy/_batch_engine/_run_submitter.py +262 -0
  202. azure/ai/evaluation/_legacy/_batch_engine/_status.py +25 -0
  203. azure/ai/evaluation/_legacy/_batch_engine/_trace.py +97 -0
  204. azure/ai/evaluation/_legacy/_batch_engine/_utils.py +97 -0
  205. azure/ai/evaluation/_legacy/_batch_engine/_utils_deprecated.py +131 -0
  206. azure/ai/evaluation/_legacy/_common/__init__.py +3 -0
  207. azure/ai/evaluation/_legacy/_common/_async_token_provider.py +117 -0
  208. azure/ai/evaluation/_legacy/_common/_logging.py +292 -0
  209. azure/ai/evaluation/_legacy/_common/_thread_pool_executor_with_context.py +17 -0
  210. azure/ai/evaluation/_legacy/prompty/__init__.py +36 -0
  211. azure/ai/evaluation/_legacy/prompty/_connection.py +119 -0
  212. azure/ai/evaluation/_legacy/prompty/_exceptions.py +139 -0
  213. azure/ai/evaluation/_legacy/prompty/_prompty.py +430 -0
  214. azure/ai/evaluation/_legacy/prompty/_utils.py +663 -0
  215. azure/ai/evaluation/_legacy/prompty/_yaml_utils.py +99 -0
  216. azure/ai/evaluation/_model_configurations.py +130 -8
  217. azure/ai/evaluation/_safety_evaluation/__init__.py +3 -0
  218. azure/ai/evaluation/_safety_evaluation/_generated_rai_client.py +0 -0
  219. azure/ai/evaluation/_safety_evaluation/_safety_evaluation.py +917 -0
  220. azure/ai/evaluation/_user_agent.py +32 -1
  221. azure/ai/evaluation/_vendor/__init__.py +3 -0
  222. azure/ai/evaluation/_vendor/rouge_score/__init__.py +14 -0
  223. azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +324 -0
  224. azure/ai/evaluation/_vendor/rouge_score/scoring.py +59 -0
  225. azure/ai/evaluation/_vendor/rouge_score/tokenize.py +59 -0
  226. azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +53 -0
  227. azure/ai/evaluation/_version.py +2 -1
  228. azure/ai/evaluation/red_team/__init__.py +22 -0
  229. azure/ai/evaluation/red_team/_agent/__init__.py +3 -0
  230. azure/ai/evaluation/red_team/_agent/_agent_functions.py +261 -0
  231. azure/ai/evaluation/red_team/_agent/_agent_tools.py +461 -0
  232. azure/ai/evaluation/red_team/_agent/_agent_utils.py +89 -0
  233. azure/ai/evaluation/red_team/_agent/_semantic_kernel_plugin.py +228 -0
  234. azure/ai/evaluation/red_team/_attack_objective_generator.py +268 -0
  235. azure/ai/evaluation/red_team/_attack_strategy.py +49 -0
  236. azure/ai/evaluation/red_team/_callback_chat_target.py +115 -0
  237. azure/ai/evaluation/red_team/_default_converter.py +21 -0
  238. azure/ai/evaluation/red_team/_evaluation_processor.py +505 -0
  239. azure/ai/evaluation/red_team/_mlflow_integration.py +430 -0
  240. azure/ai/evaluation/red_team/_orchestrator_manager.py +803 -0
  241. azure/ai/evaluation/red_team/_red_team.py +1717 -0
  242. azure/ai/evaluation/red_team/_red_team_result.py +661 -0
  243. azure/ai/evaluation/red_team/_result_processor.py +1708 -0
  244. azure/ai/evaluation/red_team/_utils/__init__.py +37 -0
  245. azure/ai/evaluation/red_team/_utils/_rai_service_eval_chat_target.py +128 -0
  246. azure/ai/evaluation/red_team/_utils/_rai_service_target.py +601 -0
  247. azure/ai/evaluation/red_team/_utils/_rai_service_true_false_scorer.py +114 -0
  248. azure/ai/evaluation/red_team/_utils/constants.py +72 -0
  249. azure/ai/evaluation/red_team/_utils/exception_utils.py +345 -0
  250. azure/ai/evaluation/red_team/_utils/file_utils.py +266 -0
  251. azure/ai/evaluation/red_team/_utils/formatting_utils.py +365 -0
  252. azure/ai/evaluation/red_team/_utils/logging_utils.py +139 -0
  253. azure/ai/evaluation/red_team/_utils/metric_mapping.py +73 -0
  254. azure/ai/evaluation/red_team/_utils/objective_utils.py +46 -0
  255. azure/ai/evaluation/red_team/_utils/progress_utils.py +252 -0
  256. azure/ai/evaluation/red_team/_utils/retry_utils.py +218 -0
  257. azure/ai/evaluation/red_team/_utils/strategy_utils.py +218 -0
  258. azure/ai/evaluation/simulator/__init__.py +2 -1
  259. azure/ai/evaluation/simulator/_adversarial_scenario.py +26 -1
  260. azure/ai/evaluation/simulator/_adversarial_simulator.py +270 -144
  261. azure/ai/evaluation/simulator/_constants.py +12 -1
  262. azure/ai/evaluation/simulator/_conversation/__init__.py +151 -23
  263. azure/ai/evaluation/simulator/_conversation/_conversation.py +10 -6
  264. azure/ai/evaluation/simulator/_conversation/constants.py +1 -1
  265. azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
  266. azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
  267. azure/ai/evaluation/simulator/_direct_attack_simulator.py +54 -75
  268. azure/ai/evaluation/simulator/_helpers/__init__.py +1 -2
  269. azure/ai/evaluation/simulator/_helpers/_language_suffix_mapping.py +1 -0
  270. azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +26 -5
  271. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +145 -104
  272. azure/ai/evaluation/simulator/_model_tools/__init__.py +2 -1
  273. azure/ai/evaluation/simulator/_model_tools/_generated_rai_client.py +225 -0
  274. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +80 -30
  275. azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +117 -45
  276. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +109 -7
  277. azure/ai/evaluation/simulator/_model_tools/_template_handler.py +97 -33
  278. azure/ai/evaluation/simulator/_model_tools/models.py +30 -27
  279. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +6 -10
  280. azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +6 -5
  281. azure/ai/evaluation/simulator/_simulator.py +302 -208
  282. azure/ai/evaluation/simulator/_utils.py +31 -13
  283. azure_ai_evaluation-1.13.3.dist-info/METADATA +939 -0
  284. azure_ai_evaluation-1.13.3.dist-info/RECORD +305 -0
  285. {azure_ai_evaluation-1.0.0b2.dist-info → azure_ai_evaluation-1.13.3.dist-info}/WHEEL +1 -1
  286. azure_ai_evaluation-1.13.3.dist-info/licenses/NOTICE.txt +70 -0
  287. azure/ai/evaluation/_evaluate/_batch_run_client/proxy_client.py +0 -71
  288. azure/ai/evaluation/_evaluators/_chat/_chat.py +0 -357
  289. azure/ai/evaluation/_evaluators/_chat/retrieval/_retrieval.py +0 -157
  290. azure/ai/evaluation/_evaluators/_chat/retrieval/retrieval.prompty +0 -48
  291. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_base.py +0 -65
  292. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +0 -301
  293. azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -54
  294. azure/ai/evaluation/_evaluators/_protected_materials/__init__.py +0 -5
  295. azure/ai/evaluation/_evaluators/_protected_materials/_protected_materials.py +0 -104
  296. azure/ai/evaluation/simulator/_tracing.py +0 -89
  297. azure_ai_evaluation-1.0.0b2.dist-info/METADATA +0 -449
  298. azure_ai_evaluation-1.0.0b2.dist-info/RECORD +0 -99
  299. {azure_ai_evaluation-1.0.0b2.dist-info → azure_ai_evaluation-1.13.3.dist-info}/top_level.txt +0 -0
@@ -3,38 +3,71 @@
3
3
  # ---------------------------------------------------------
4
4
 
5
5
  from collections import Counter
6
- from typing import List
6
+ from typing import List, Dict
7
+ from typing_extensions import overload, override
7
8
 
8
- from promptflow._utils.async_utils import async_run_allowing_running_loop
9
+ from azure.ai.evaluation._evaluators._common import EvaluatorBase
10
+ from azure.ai.evaluation._constants import EVALUATION_PASS_FAIL_MAPPING
9
11
 
10
- from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
11
12
 
13
+ class F1ScoreEvaluator(EvaluatorBase):
14
+ """
15
+ Calculates the F1 score for a given response and ground truth or a multi-turn conversation.
16
+
17
+ F1 Scores range from 0 to 1, with 1 being the best possible score.
18
+
19
+ The F1-score computes the ratio of the number of shared words between the model generation and
20
+ the ground truth. Ratio is computed over the individual words in the generated response against those in the ground
21
+ truth answer. The number of shared words between the generation and the truth is the basis of the F1 score:
22
+ precision is the ratio of the number of shared words to the total number of words in the generation, and recall
23
+ is the ratio of the number of shared words to the total number of words in the ground truth.
24
+
25
+ Use the F1 score when you want a single comprehensive metric that combines both recall and precision in your
26
+ model's responses. It provides a balanced evaluation of your model's performance in terms of capturing accurate
27
+ information in the response.
28
+
29
+ :param threshold: The threshold for the F1 score evaluator. Default is 0.5.
30
+ :type threshold: float
31
+
32
+ .. admonition:: Example:
33
+
34
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
35
+ :start-after: [START f1_score_evaluator]
36
+ :end-before: [END f1_score_evaluator]
37
+ :language: python
38
+ :dedent: 8
39
+ :caption: Initialize and call an F1ScoreEvaluator.
40
+
41
+ .. admonition:: Example using Azure AI Project URL:
42
+
43
+ .. literalinclude:: ../samples/evaluation_samples_evaluate_fdp.py
44
+ :start-after: [START f1_score_evaluator]
45
+ :end-before: [END f1_score_evaluator]
46
+ :language: python
47
+ :dedent: 8
48
+ :caption: Initialize and call F1ScoreEvaluator using Azure AI Project URL in following format
49
+ https://{resource_name}.services.ai.azure.com/api/projects/{project_name}
50
+
51
+ .. admonition:: Example with Threshold:
52
+
53
+ .. literalinclude:: ../samples/evaluation_samples_threshold.py
54
+ :start-after: [START threshold_f1_score_evaluator]
55
+ :end-before: [END threshold_f1_score_evaluator]
56
+ :language: python
57
+ :dedent: 8
58
+ :caption: Initialize with threshold and call an F1ScoreEvaluator.
59
+ """
12
60
 
13
- class _AsyncF1ScoreEvaluator:
14
- def __init__(self):
15
- pass
61
+ id = "azureai://built-in/evaluators/f1_score"
62
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
16
63
 
17
- async def __call__(self, *, response: str, ground_truth: str, **kwargs):
18
- # Validate inputs
19
- if not (response and response.strip() and response != "None") or not (
20
- ground_truth and ground_truth.strip() and ground_truth != "None"
21
- ):
22
- msg = "Both 'response' and 'ground_truth' must be non-empty strings."
23
- raise EvaluationException(
24
- message=msg,
25
- internal_message=msg,
26
- error_category=ErrorCategory.MISSING_FIELD,
27
- error_blame=ErrorBlame.USER_ERROR,
28
- error_target=ErrorTarget.F1_EVALUATOR,
29
- )
30
-
31
- # Run f1 score computation.
32
- f1_result = self._compute_f1_score(response=response, ground_truth=ground_truth)
33
-
34
- return {"f1_score": f1_result}
64
+ def __init__(self, *, threshold=0.5):
65
+ self._threshold = threshold
66
+ self._higher_is_better = True
67
+ super().__init__(threshold=threshold, _higher_is_better=self._higher_is_better)
35
68
 
36
69
  @classmethod
37
- def _compute_f1_score(cls, response: str, ground_truth: str) -> str:
70
+ def _compute_f1_score(cls, response: str, ground_truth: str) -> float:
38
71
  import re
39
72
  import string
40
73
 
@@ -76,11 +109,9 @@ class _AsyncF1ScoreEvaluator:
76
109
 
77
110
  return white_space_fix(remove_articles(remove_punctuation(lower(text))))
78
111
 
79
- prediction_tokens = normalize_text(response)
80
- reference_tokens = normalize_text(ground_truth)
81
112
  tokenizer = QASplitTokenizer()
82
- prediction_tokens = tokenizer(prediction_tokens)
83
- reference_tokens = tokenizer(reference_tokens)
113
+ prediction_tokens = tokenizer(normalize_text(response))
114
+ reference_tokens = tokenizer(normalize_text(ground_truth))
84
115
 
85
116
  common_tokens = Counter(prediction_tokens) & Counter(reference_tokens)
86
117
  num_common_tokens = sum(common_tokens.values())
@@ -95,34 +126,34 @@ class _AsyncF1ScoreEvaluator:
95
126
 
96
127
  return f1
97
128
 
129
+ @override
130
+ async def _do_eval(self, eval_input: Dict) -> Dict[str, float]:
131
+ """Produce an f1 score evaluation result.
98
132
 
99
- class F1ScoreEvaluator:
100
- """
101
- Initialize a f1 score evaluator for calculating F1 score.
102
-
103
- **Usage**
104
-
105
- .. code-block:: python
106
-
107
- eval_fn = F1ScoreEvaluator()
108
- result = eval_fn(
109
- response="The capital of Japan is Tokyo.",
110
- ground_truth="Tokyo is Japan's capital, known for its blend of traditional culture \
111
- and technological advancements.")
112
-
113
- **Output format**
114
-
115
- .. code-block:: python
116
-
117
- {
118
- "f1_score": 0.42
133
+ :param eval_input: The input to the evaluation function.
134
+ :type eval_input: Dict
135
+ :return: The evaluation result.
136
+ :rtype: Dict
137
+ """
138
+ ground_truth = eval_input["ground_truth"]
139
+ response = eval_input["response"]
140
+ # Run f1 score computation.
141
+ f1_result = self._compute_f1_score(response=response, ground_truth=ground_truth)
142
+ binary_result = False
143
+ if self._higher_is_better:
144
+ if f1_result >= self._threshold:
145
+ binary_result = True
146
+ else:
147
+ if f1_result <= self._threshold:
148
+ binary_result = True
149
+ return {
150
+ "f1_score": f1_result,
151
+ "f1_result": EVALUATION_PASS_FAIL_MAPPING[binary_result],
152
+ "f1_threshold": self._threshold,
119
153
  }
120
- """
121
154
 
122
- def __init__(self):
123
- self._async_evaluator = _AsyncF1ScoreEvaluator()
124
-
125
- def __call__(self, *, response: str, ground_truth: str, **kwargs):
155
+ @overload # type: ignore
156
+ def __call__(self, *, response: str, ground_truth: str) -> Dict[str, float]:
126
157
  """
127
158
  Evaluate F1 score.
128
159
 
@@ -131,12 +162,23 @@ class F1ScoreEvaluator:
131
162
  :keyword ground_truth: The ground truth to be evaluated.
132
163
  :paramtype ground_truth: str
133
164
  :return: The F1 score.
134
- :rtype: dict
165
+ :rtype: Dict[str, float]
135
166
  """
136
167
 
137
- return async_run_allowing_running_loop(
138
- self._async_evaluator, response=response, ground_truth=ground_truth, **kwargs
139
- )
168
+ @override
169
+ def __call__( # pylint: disable=docstring-missing-param
170
+ self,
171
+ *args,
172
+ **kwargs,
173
+ ):
174
+ """
175
+ Evaluate F1 score.
140
176
 
141
- def _to_async(self):
142
- return self._async_evaluator
177
+ :keyword response: The response to be evaluated.
178
+ :paramtype response: str
179
+ :keyword ground_truth: The ground truth to be evaluated.
180
+ :paramtype ground_truth: str
181
+ :return: The F1 score.
182
+ :rtype: Dict[str, float]
183
+ """
184
+ return super().__call__(*args, **kwargs)
@@ -3,115 +3,138 @@
3
3
  # ---------------------------------------------------------
4
4
 
5
5
  import os
6
- import re
6
+ from typing import Dict, List, Union
7
7
 
8
- import numpy as np
9
- from promptflow._utils.async_utils import async_run_allowing_running_loop
10
- from promptflow.core import AsyncPrompty
8
+ from typing_extensions import overload, override
11
9
 
12
- from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
10
+ from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
11
+ from azure.ai.evaluation._model_configurations import Conversation
13
12
 
14
- from ..._common.utils import ensure_api_version_in_aoai_model_config, ensure_user_agent_in_aoai_model_config
15
13
 
16
- try:
17
- from ..._user_agent import USER_AGENT
18
- except ImportError:
19
- USER_AGENT = None
20
-
21
-
22
- class _AsyncFluencyEvaluator:
23
- # Constants must be defined within eval's directory to be save/loadable
24
- PROMPTY_FILE = "fluency.prompty"
25
- LLM_CALL_TIMEOUT = 600
26
- DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
27
-
28
- def __init__(self, model_config: dict):
29
- ensure_api_version_in_aoai_model_config(model_config, self.DEFAULT_OPEN_API_VERSION)
30
-
31
- prompty_model_config = {"configuration": model_config, "parameters": {"extra_headers": {}}}
32
-
33
- # Handle "RuntimeError: Event loop is closed" from httpx AsyncClient
34
- # https://github.com/encode/httpx/discussions/2959
35
- prompty_model_config["parameters"]["extra_headers"].update({"Connection": "close"})
36
-
37
- ensure_user_agent_in_aoai_model_config(
38
- model_config,
39
- prompty_model_config,
40
- USER_AGENT,
41
- )
42
-
43
- current_dir = os.path.dirname(__file__)
44
- prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
45
- self._flow = AsyncPrompty.load(source=prompty_path, model=prompty_model_config)
46
-
47
- async def __call__(self, *, query: str, response: str, **kwargs):
48
- # Validate input parameters
49
- query = str(query or "")
50
- response = str(response or "")
51
-
52
- if not (query.strip() and response.strip()):
53
- msg = "Both 'query' and 'response' must be non-empty strings."
54
- raise EvaluationException(
55
- message=msg,
56
- internal_message=msg,
57
- error_category=ErrorCategory.MISSING_FIELD,
58
- error_blame=ErrorBlame.USER_ERROR,
59
- error_target=ErrorTarget.F1_EVALUATOR,
60
- )
61
-
62
- # Run the evaluation flow
63
- llm_output = await self._flow(query=query, response=response, timeout=self.LLM_CALL_TIMEOUT, **kwargs)
64
-
65
- score = np.nan
66
- if llm_output:
67
- match = re.search(r"\d", llm_output)
68
- if match:
69
- score = float(match.group())
14
+ class FluencyEvaluator(PromptyEvaluatorBase[Union[str, float]]):
15
+ """
16
+ Evaluates the fluency of a given response or a multi-turn conversation, including reasoning.
70
17
 
71
- return {"gpt_fluency": float(score)}
18
+ The fluency measure assesses the extent to which the generated text conforms to grammatical rules, syntactic
19
+ structures, and appropriate vocabulary usage, resulting in linguistically correct responses.
72
20
 
73
-
74
- class FluencyEvaluator:
75
- """
76
- Initialize a fluency evaluator configured for a specific Azure OpenAI model.
21
+ Fluency scores range from 1 to 5, with 1 being the least fluent and 5 being the most fluent.
77
22
 
78
23
  :param model_config: Configuration for the Azure OpenAI model.
79
24
  :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
80
25
  ~azure.ai.evaluation.OpenAIModelConfiguration]
26
+ :param threshold: The threshold for the fluency evaluator. Default is 3.
27
+ :type threshold: int
28
+ :param credential: The credential for authenticating to Azure AI service.
29
+ :type credential: ~azure.core.credentials.TokenCredential
30
+ :keyword is_reasoning_model: If True, the evaluator will use reasoning model configuration (o1/o3 models).
31
+ This will adjust parameters like max_completion_tokens and remove unsupported parameters. Default is False.
32
+ :paramtype is_reasoning_model: bool
33
+
34
+ .. admonition:: Example:
35
+
36
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
37
+ :start-after: [START fluency_evaluator]
38
+ :end-before: [END fluency_evaluator]
39
+ :language: python
40
+ :dedent: 8
41
+ :caption: Initialize and call a FluencyEvaluator.
42
+
43
+ .. admonition:: Example with Threshold:
44
+
45
+ .. literalinclude:: ../samples/evaluation_samples_threshold.py
46
+ :start-after: [START threshold_fluency_evaluator]
47
+ :end-before: [END threshold_fluency_evaluator]
48
+ :language: python
49
+ :dedent: 8
50
+ :caption: Initialize with threshold and call a FluencyEvaluator.
51
+
52
+ .. admonition:: Example using Azure AI Project URL:
53
+
54
+ .. literalinclude:: ../samples/evaluation_samples_evaluate_fdp.py
55
+ :start-after: [START fluency_evaluator]
56
+ :end-before: [END fluency_evaluator]
57
+ :language: python
58
+ :dedent: 8
59
+ :caption: Initialize and call FluencyEvaluator using Azure AI Project URL in the following format
60
+ https://{resource_name}.services.ai.azure.com/api/projects/{project_name}
61
+
62
+ .. note::
63
+
64
+ To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
65
+ To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
66
+ however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
67
+ """
81
68
 
82
- **Usage**
83
-
84
- .. code-block:: python
85
-
86
- eval_fn = FluencyEvaluator(model_config)
87
- result = eval_fn(
88
- query="What is the capital of Japan?",
89
- response="The capital of Japan is Tokyo.")
69
+ _PROMPTY_FILE = "fluency.prompty"
70
+ _RESULT_KEY = "fluency"
90
71
 
91
- **Output format**
72
+ id = "azureai://built-in/evaluators/fluency"
73
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
92
74
 
93
- .. code-block:: python
75
+ @override
76
+ def __init__(self, model_config, *, credential=None, threshold=3, **kwargs):
77
+ current_dir = os.path.dirname(__file__)
78
+ prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
79
+ self._threshold = threshold
80
+ self._higher_is_better = True
81
+ super().__init__(
82
+ model_config=model_config,
83
+ prompty_file=prompty_path,
84
+ result_key=self._RESULT_KEY,
85
+ threshold=threshold,
86
+ credential=credential,
87
+ _higher_is_better=self._higher_is_better,
88
+ **kwargs,
89
+ )
94
90
 
95
- {
96
- "gpt_fluency": 4.0
97
- }
98
- """
91
+ @overload
92
+ def __call__(
93
+ self,
94
+ *,
95
+ response: str,
96
+ ) -> Dict[str, Union[str, float]]:
97
+ """Evaluate fluency in given response
99
98
 
100
- def __init__(self, model_config: dict):
101
- self._async_evaluator = _AsyncFluencyEvaluator(model_config)
99
+ :keyword response: The response to be evaluated.
100
+ :paramtype response: str
101
+ :return: The fluency score
102
+ :rtype: Dict[str, float]
103
+ """
102
104
 
103
- def __call__(self, *, query: str, response: str, **kwargs):
105
+ @overload
106
+ def __call__(
107
+ self,
108
+ *,
109
+ conversation: Conversation,
110
+ ) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
111
+ """Evaluate fluency for a conversation
112
+
113
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
114
+ key "messages", and potentially a global context under the key "context". Conversation turns are expected
115
+ to be dictionaries with keys "content", "role", and possibly "context".
116
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
117
+ :return: The fluency score
118
+ :rtype: Dict[str, Union[float, Dict[str, List[float]]]]
104
119
  """
105
- Evaluate fluency.
106
120
 
107
- :keyword query: The query to be evaluated.
108
- :paramtype query: str
109
- :keyword response: The response to be evaluated.
110
- :paramtype response: str
121
+ @override
122
+ def __call__( # pylint: disable=docstring-missing-param
123
+ self,
124
+ *args,
125
+ **kwargs,
126
+ ):
127
+ """
128
+ Evaluate fluency. Accepts either a response for a single evaluation,
129
+ or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
130
+ the evaluator will aggregate the results of each turn.
131
+
132
+ :keyword response: The response to be evaluated. Mutually exclusive with the "conversation" parameter.
133
+ :paramtype response: Optional[str]
134
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
135
+ key "messages". Conversation turns are expected to be dictionaries with keys "content" and "role".
136
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
111
137
  :return: The fluency score.
112
- :rtype: dict
138
+ :rtype: Union[Dict[str, float], Dict[str, Union[float, Dict[str, List[float]]]]]
113
139
  """
114
- return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
115
-
116
- def _to_async(self):
117
- return self._async_evaluator
140
+ return super().__call__(*args, **kwargs)
@@ -3,14 +3,9 @@ name: Fluency
3
3
  description: Evaluates fluency score for QA scenario
4
4
  model:
5
5
  api: chat
6
- configuration:
7
- type: azure_openai
8
- azure_deployment: ${env:AZURE_DEPLOYMENT}
9
- api_key: ${env:AZURE_OPENAI_API_KEY}
10
- azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
11
6
  parameters:
12
7
  temperature: 0.0
13
- max_tokens: 1
8
+ max_tokens: 800
14
9
  top_p: 1.0
15
10
  presence_penalty: 0
16
11
  frequency_penalty: 0
@@ -18,44 +13,74 @@ model:
18
13
  type: text
19
14
 
20
15
  inputs:
21
- query:
22
- type: string
23
16
  response:
24
17
  type: string
25
18
 
26
19
  ---
27
20
  system:
28
- You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. You should return a single integer value between 1 to 5 representing the evaluation metric. You will include no other text or information.
21
+ # Instruction
22
+ ## Goal
23
+ ### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
24
+ - **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
25
+ - **Data**: Your input data include a RESPONSE.
26
+ - **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
27
+
29
28
  user:
30
- Fluency measures the quality of individual sentences in the answer, and whether they are well-written and grammatically correct. Consider the quality of individual sentences when evaluating fluency. Given the question and answer, score the fluency of the answer between one to five stars using the following rating scale:
31
- One star: the answer completely lacks fluency
32
- Two stars: the answer mostly lacks fluency
33
- Three stars: the answer is partially fluent
34
- Four stars: the answer is mostly fluent
35
- Five stars: the answer has perfect fluency
36
-
37
- This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5.
38
-
39
- question: What did you have for breakfast today?
40
- answer: Breakfast today, me eating cereal and orange juice very good.
41
- stars: 1
42
-
43
- question: How do you feel when you travel alone?
44
- answer: Alone travel, nervous, but excited also. I feel adventure and like its time.
45
- stars: 2
46
-
47
- question: When was the last time you went on a family vacation?
48
- answer: Last family vacation, it took place in last summer. We traveled to a beach destination, very fun.
49
- stars: 3
50
-
51
- question: What is your favorite thing about your job?
52
- answer: My favorite aspect of my job is the chance to interact with diverse people. I am constantly learning from their experiences and stories.
53
- stars: 4
54
-
55
- question: Can you describe your morning routine?
56
- answer: Every morning, I wake up at 6 am, drink a glass of water, and do some light stretching. After that, I take a shower and get dressed for work. Then, I have a healthy breakfast, usually consisting of oatmeal and fruits, before leaving the house around 7:30 am.
57
- stars: 5
58
-
59
- question: {{query}}
60
- answer: {{response}}
61
- stars:
29
+ # Definition
30
+ **Fluency** refers to the effectiveness and clarity of written communication, focusing on grammatical accuracy, vocabulary range, sentence complexity, coherence, and overall readability. It assesses how smoothly ideas are conveyed and how easily the text can be understood by the reader.
31
+
32
+ # Ratings
33
+ ## [Fluency: 1] (Emergent Fluency)
34
+ **Definition:** The response shows minimal command of the language. It contains pervasive grammatical errors, extremely limited vocabulary, and fragmented or incoherent sentences. The message is largely incomprehensible, making understanding very difficult.
35
+
36
+ **Examples:**
37
+ **Response:** Free time I. Go park. Not fun. Alone.
38
+
39
+ **Response:** Like food pizza. Good cheese eat.
40
+
41
+ ## [Fluency: 2] (Basic Fluency)
42
+ **Definition:** The response communicates simple ideas but has frequent grammatical errors and limited vocabulary. Sentences are short and may be improperly constructed, leading to partial understanding. Repetition and awkward phrasing are common.
43
+
44
+ **Examples:**
45
+ **Response:** I like play soccer. I watch movie. It fun.
46
+
47
+ **Response:** My town small. Many people. We have market.
48
+
49
+ ## [Fluency: 3] (Competent Fluency)
50
+ **Definition:** The response clearly conveys ideas with occasional grammatical errors. Vocabulary is adequate but not extensive. Sentences are generally correct but may lack complexity and variety. The text is coherent, and the message is easily understood with minimal effort.
51
+
52
+ **Examples:**
53
+ **Response:** I'm planning to visit friends and maybe see a movie together.
54
+
55
+ **Response:** I try to eat healthy food and exercise regularly by jogging.
56
+
57
+ ## [Fluency: 4] (Proficient Fluency)
58
+ **Definition:** The response is well-articulated with good control of grammar and a varied vocabulary. Sentences are complex and well-structured, demonstrating coherence and cohesion. Minor errors may occur but do not affect overall understanding. The text flows smoothly, and ideas are connected logically.
59
+
60
+ **Examples:**
61
+ **Response:** My interest in mathematics and problem-solving inspired me to become an engineer, as I enjoy designing solutions that improve people's lives.
62
+
63
+ **Response:** Environmental conservation is crucial because it protects ecosystems, preserves biodiversity, and ensures natural resources are available for future generations.
64
+
65
+ ## [Fluency: 5] (Exceptional Fluency)
66
+ **Definition:** The response demonstrates an exceptional command of language with sophisticated vocabulary and complex, varied sentence structures. It is coherent, cohesive, and engaging, with precise and nuanced expression. Grammar is flawless, and the text reflects a high level of eloquence and style.
67
+
68
+ **Examples:**
69
+ **Response:** Globalization exerts a profound influence on cultural diversity by facilitating unprecedented cultural exchange while simultaneously risking the homogenization of distinct cultural identities, which can diminish the richness of global heritage.
70
+
71
+ **Response:** Technology revolutionizes modern education by providing interactive learning platforms, enabling personalized learning experiences, and connecting students worldwide, thereby transforming how knowledge is acquired and shared.
72
+
73
+
74
+ # Data
75
+ RESPONSE: {{response}}
76
+
77
+
78
+ # Tasks
79
+ ## Please provide your assessment Score for the previous RESPONSE based on the Definitions above. Your output should include the following information:
80
+ - **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
81
+ - **Explanation**: a very short explanation of why you think the input Data should get that Score.
82
+ - **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
83
+
84
+
85
+ ## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
86
+ # Output