azure-ai-evaluation 1.12.0__py3-none-any.whl → 1.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- azure/ai/evaluation/__init__.py +2 -0
- azure/ai/evaluation/_aoai/aoai_grader.py +6 -9
- azure/ai/evaluation/_aoai/label_grader.py +6 -10
- azure/ai/evaluation/_aoai/python_grader.py +7 -10
- azure/ai/evaluation/_aoai/score_model_grader.py +5 -7
- azure/ai/evaluation/_aoai/string_check_grader.py +4 -9
- azure/ai/evaluation/_aoai/text_similarity_grader.py +7 -21
- azure/ai/evaluation/_common/__init__.py +2 -1
- azure/ai/evaluation/_common/constants.py +109 -0
- azure/ai/evaluation/_common/evaluation_onedp_client.py +5 -5
- azure/ai/evaluation/_common/onedp/__init__.py +2 -2
- azure/ai/evaluation/_common/onedp/_client.py +44 -14
- azure/ai/evaluation/_common/onedp/_configuration.py +9 -7
- azure/ai/evaluation/_common/onedp/_utils/model_base.py +1 -1
- azure/ai/evaluation/_common/onedp/_validation.py +18 -2
- azure/ai/evaluation/_common/onedp/aio/__init__.py +2 -2
- azure/ai/evaluation/_common/onedp/aio/_client.py +44 -14
- azure/ai/evaluation/_common/onedp/aio/_configuration.py +9 -7
- azure/ai/evaluation/_common/onedp/aio/operations/__init__.py +12 -0
- azure/ai/evaluation/_common/onedp/aio/operations/_operations.py +3942 -1631
- azure/ai/evaluation/_common/onedp/models/__init__.py +196 -6
- azure/ai/evaluation/_common/onedp/models/_enums.py +217 -0
- azure/ai/evaluation/_common/onedp/models/_models.py +3876 -603
- azure/ai/evaluation/_common/onedp/operations/__init__.py +12 -0
- azure/ai/evaluation/_common/onedp/operations/_operations.py +5422 -2577
- azure/ai/evaluation/_common/rai_service.py +299 -2
- azure/ai/evaluation/_common/utils.py +241 -39
- azure/ai/evaluation/_constants.py +100 -0
- azure/ai/evaluation/_eval_mapping.py +10 -2
- azure/ai/evaluation/_evaluate/_batch_run/_run_submitter_client.py +10 -0
- azure/ai/evaluation/_evaluate/_evaluate.py +1019 -5
- azure/ai/evaluation/_evaluate/_evaluate_aoai.py +86 -11
- azure/ai/evaluation/_evaluate/_utils.py +10 -3
- azure/ai/evaluation/_evaluator_definition.py +76 -0
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +1 -0
- azure/ai/evaluation/_evaluators/_code_vulnerability/_code_vulnerability.py +0 -17
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +16 -4
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +181 -3
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +107 -45
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +30 -29
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +19 -14
- azure/ai/evaluation/_evaluators/_intent_resolution/_intent_resolution.py +23 -4
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +14 -6
- azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +36 -19
- azure/ai/evaluation/_evaluators/_response_completeness/_response_completeness.py +43 -20
- azure/ai/evaluation/_evaluators/_task_adherence/_task_adherence.py +32 -6
- azure/ai/evaluation/_evaluators/{_path_efficiency → _task_completion}/__init__.py +2 -2
- azure/ai/evaluation/_evaluators/{_task_success/_task_success.py → _task_completion/_task_completion.py} +39 -30
- azure/ai/evaluation/_evaluators/{_task_success/task_success.prompty → _task_completion/task_completion.prompty} +2 -2
- azure/ai/evaluation/_evaluators/_task_navigation_efficiency/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/{_path_efficiency/_path_efficiency.py → _task_navigation_efficiency/_task_navigation_efficiency.py} +115 -73
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py +23 -127
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/_tool_input_accuracy.py +263 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/tool_input_accuracy.prompty +76 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/_tool_output_utilization.py +225 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/tool_output_utilization.prompty +221 -0
- azure/ai/evaluation/_evaluators/_tool_selection/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_selection/_tool_selection.py +266 -0
- azure/ai/evaluation/_evaluators/_tool_selection/tool_selection.prompty +104 -0
- azure/ai/evaluation/_evaluators/{_task_success → _tool_success}/__init__.py +2 -2
- azure/ai/evaluation/_evaluators/_tool_success/_tool_success.py +301 -0
- azure/ai/evaluation/_evaluators/_tool_success/tool_success.prompty +321 -0
- azure/ai/evaluation/_evaluators/_ungrounded_attributes/_ungrounded_attributes.py +0 -19
- azure/ai/evaluation/_exceptions.py +6 -1
- azure/ai/evaluation/_legacy/_batch_engine/_run_submitter.py +14 -1
- azure/ai/evaluation/_legacy/prompty/_prompty.py +2 -1
- azure/ai/evaluation/_legacy/prompty/_utils.py +54 -6
- azure/ai/evaluation/_model_configurations.py +26 -0
- azure/ai/evaluation/_version.py +1 -1
- azure/ai/evaluation/red_team/_attack_objective_generator.py +3 -1
- azure/ai/evaluation/red_team/_attack_strategy.py +1 -0
- azure/ai/evaluation/red_team/_callback_chat_target.py +45 -14
- azure/ai/evaluation/red_team/_evaluation_processor.py +129 -12
- azure/ai/evaluation/red_team/_mlflow_integration.py +41 -352
- azure/ai/evaluation/red_team/_orchestrator_manager.py +309 -51
- azure/ai/evaluation/red_team/_red_team.py +494 -37
- azure/ai/evaluation/red_team/_red_team_result.py +48 -28
- azure/ai/evaluation/red_team/_result_processor.py +558 -29
- azure/ai/evaluation/red_team/_utils/constants.py +1 -0
- azure/ai/evaluation/red_team/_utils/formatting_utils.py +125 -24
- azure/ai/evaluation/red_team/_utils/metric_mapping.py +10 -7
- azure/ai/evaluation/red_team/_utils/strategy_utils.py +3 -25
- azure/ai/evaluation/simulator/_adversarial_simulator.py +1 -1
- azure/ai/evaluation/simulator/_conversation/__init__.py +1 -1
- azure/ai/evaluation/simulator/_conversation/_conversation.py +1 -1
- azure/ai/evaluation/simulator/_direct_attack_simulator.py +1 -1
- azure/ai/evaluation/simulator/_indirect_attack_simulator.py +1 -1
- azure/ai/evaluation/simulator/_model_tools/_generated_rai_client.py +25 -2
- azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +1 -0
- azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +1 -1
- azure/ai/evaluation/simulator/_model_tools/_template_handler.py +1 -1
- azure/ai/evaluation/simulator/_model_tools/models.py +1 -1
- {azure_ai_evaluation-1.12.0.dist-info → azure_ai_evaluation-1.13.0.dist-info}/METADATA +38 -8
- {azure_ai_evaluation-1.12.0.dist-info → azure_ai_evaluation-1.13.0.dist-info}/RECORD +99 -86
- {azure_ai_evaluation-1.12.0.dist-info → azure_ai_evaluation-1.13.0.dist-info}/WHEEL +0 -0
- {azure_ai_evaluation-1.12.0.dist-info → azure_ai_evaluation-1.13.0.dist-info}/licenses/NOTICE.txt +0 -0
- {azure_ai_evaluation-1.12.0.dist-info → azure_ai_evaluation-1.13.0.dist-info}/top_level.txt +0 -0
|
@@ -19,9 +19,17 @@ from typing import Any, Dict, List, Optional, Union, cast
|
|
|
19
19
|
import pandas as pd
|
|
20
20
|
|
|
21
21
|
# Local imports
|
|
22
|
-
from ._red_team_result import
|
|
22
|
+
from ._red_team_result import (
|
|
23
|
+
RedTeamResult,
|
|
24
|
+
RedTeamingScorecard,
|
|
25
|
+
RedTeamingParameters,
|
|
26
|
+
ScanResult,
|
|
27
|
+
RedTeamRun,
|
|
28
|
+
OutputItemsList,
|
|
29
|
+
)
|
|
23
30
|
from ._attack_objective_generator import RiskCategory
|
|
24
31
|
from ._utils.constants import ATTACK_STRATEGY_COMPLEXITY_MAP
|
|
32
|
+
from .._common.utils import get_default_threshold_for_evaluator, get_harm_severity_level
|
|
25
33
|
from ._utils.formatting_utils import list_mean_nan_safe, is_none_or_nan, get_attack_success
|
|
26
34
|
|
|
27
35
|
|
|
@@ -35,6 +43,7 @@ class ResultProcessor:
|
|
|
35
43
|
application_scenario,
|
|
36
44
|
risk_categories,
|
|
37
45
|
ai_studio_url=None,
|
|
46
|
+
mlflow_integration=None,
|
|
38
47
|
):
|
|
39
48
|
"""Initialize the result processor.
|
|
40
49
|
|
|
@@ -43,18 +52,38 @@ class ResultProcessor:
|
|
|
43
52
|
:param application_scenario: Application scenario description
|
|
44
53
|
:param risk_categories: List of risk categories being evaluated
|
|
45
54
|
:param ai_studio_url: URL to the AI Studio run
|
|
55
|
+
:param mlflow_integration: MLflow integration instance for reusing payload building logic
|
|
46
56
|
"""
|
|
47
57
|
self.logger = logger
|
|
48
58
|
self.attack_success_thresholds = attack_success_thresholds
|
|
49
59
|
self.application_scenario = application_scenario
|
|
50
60
|
self.risk_categories = risk_categories
|
|
51
61
|
self.ai_studio_url = ai_studio_url
|
|
62
|
+
self.mlflow_integration = mlflow_integration
|
|
52
63
|
|
|
53
|
-
def to_red_team_result(
|
|
64
|
+
def to_red_team_result(
|
|
65
|
+
self,
|
|
66
|
+
red_team_info: Dict,
|
|
67
|
+
eval_run: Optional[Any] = None,
|
|
68
|
+
scan_name: Optional[str] = None,
|
|
69
|
+
run_id_override: Optional[str] = None,
|
|
70
|
+
eval_id_override: Optional[str] = None,
|
|
71
|
+
created_at_override: Optional[int] = None,
|
|
72
|
+
) -> RedTeamResult:
|
|
54
73
|
"""Convert tracking data from red_team_info to the RedTeamResult format.
|
|
55
74
|
|
|
56
75
|
:param red_team_info: Dictionary containing red team tracking information
|
|
57
76
|
:type red_team_info: Dict
|
|
77
|
+
:param eval_run: The MLFlow run object (optional)
|
|
78
|
+
:type eval_run: Optional[Any]
|
|
79
|
+
:param scan_name: Name of the scan (optional)
|
|
80
|
+
:type scan_name: Optional[str]
|
|
81
|
+
:param run_id_override: Override for run ID (optional)
|
|
82
|
+
:type run_id_override: Optional[str]
|
|
83
|
+
:param eval_id_override: Override for eval ID (optional)
|
|
84
|
+
:type eval_id_override: Optional[str]
|
|
85
|
+
:param created_at_override: Override for created timestamp (optional)
|
|
86
|
+
:type created_at_override: Optional[int]
|
|
58
87
|
:return: Structured red team agent results
|
|
59
88
|
:rtype: RedTeamResult
|
|
60
89
|
"""
|
|
@@ -180,7 +209,35 @@ class ResultProcessor:
|
|
|
180
209
|
if eval_result:
|
|
181
210
|
eval_row = eval_row_lookup.get(conversation_key)
|
|
182
211
|
if eval_row:
|
|
183
|
-
|
|
212
|
+
# Check for attack success from _eval_run_output_item first
|
|
213
|
+
eval_run_output_item = eval_row.get("_eval_run_output_item")
|
|
214
|
+
if eval_run_output_item and isinstance(eval_run_output_item, dict):
|
|
215
|
+
results_list = eval_run_output_item.get("results", [])
|
|
216
|
+
for result_item in results_list:
|
|
217
|
+
if isinstance(result_item, dict):
|
|
218
|
+
result_name = result_item.get("name") or result_item.get(
|
|
219
|
+
"metric"
|
|
220
|
+
)
|
|
221
|
+
if result_name == risk_category:
|
|
222
|
+
properties = result_item.get("properties", {})
|
|
223
|
+
if isinstance(properties, dict):
|
|
224
|
+
score_properties = properties.get(
|
|
225
|
+
"scoreProperties", {}
|
|
226
|
+
)
|
|
227
|
+
if isinstance(score_properties, dict):
|
|
228
|
+
safe_value = score_properties.get("safe")
|
|
229
|
+
# safe="false" means attack was successful
|
|
230
|
+
if safe_value is not None:
|
|
231
|
+
attack_success = (
|
|
232
|
+
str(safe_value).lower() == "false"
|
|
233
|
+
)
|
|
234
|
+
break
|
|
235
|
+
|
|
236
|
+
# Fall back to legacy _result field if scoreProperties not available
|
|
237
|
+
if (
|
|
238
|
+
attack_success is None
|
|
239
|
+
and f"outputs.{risk_category}.{risk_category}_result" in eval_row
|
|
240
|
+
):
|
|
184
241
|
attack_success = get_attack_success(
|
|
185
242
|
eval_row[f"outputs.{risk_category}.{risk_category}_result"]
|
|
186
243
|
)
|
|
@@ -246,6 +303,9 @@ class ResultProcessor:
|
|
|
246
303
|
attack_threshold = 3
|
|
247
304
|
|
|
248
305
|
# Add conversation object
|
|
306
|
+
# Clean messages for old format - remove context and filter tool_calls
|
|
307
|
+
cleaned_messages = self._clean_attack_detail_messages(messages)
|
|
308
|
+
|
|
249
309
|
conversation = {
|
|
250
310
|
"attack_success": attack_success,
|
|
251
311
|
"attack_technique": strategy_name.replace("Converter", "").replace(
|
|
@@ -253,10 +313,15 @@ class ResultProcessor:
|
|
|
253
313
|
),
|
|
254
314
|
"attack_complexity": complexity_level,
|
|
255
315
|
"risk_category": risk_category,
|
|
256
|
-
"conversation":
|
|
316
|
+
"conversation": cleaned_messages,
|
|
257
317
|
"risk_assessment": (risk_assessment if risk_assessment else None),
|
|
258
318
|
"attack_success_threshold": attack_threshold,
|
|
259
319
|
}
|
|
320
|
+
|
|
321
|
+
# Add risk_sub_type if present in the data
|
|
322
|
+
if "risk_sub_type" in conv_data:
|
|
323
|
+
conversation["risk_sub_type"] = conv_data["risk_sub_type"]
|
|
324
|
+
|
|
260
325
|
conversation_index = len(conversations)
|
|
261
326
|
conversations.append(conversation)
|
|
262
327
|
|
|
@@ -328,14 +393,39 @@ class ResultProcessor:
|
|
|
328
393
|
self.logger.info("RedTeamResult creation completed")
|
|
329
394
|
|
|
330
395
|
# Create the final result
|
|
331
|
-
|
|
396
|
+
scan_result = ScanResult(
|
|
332
397
|
scorecard=cast(RedTeamingScorecard, scorecard),
|
|
333
398
|
parameters=cast(RedTeamingParameters, redteaming_parameters),
|
|
334
399
|
attack_details=conversations,
|
|
335
|
-
output_items=ordered_output_items,
|
|
336
400
|
studio_url=self.ai_studio_url or None,
|
|
337
401
|
)
|
|
338
402
|
|
|
403
|
+
# Build AOAI-compatible summary and row results
|
|
404
|
+
# Create a temporary RedTeamResult to pass to _build_results_payload
|
|
405
|
+
red_team_result = RedTeamResult(
|
|
406
|
+
scan_result=scan_result,
|
|
407
|
+
attack_details=conversations,
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
results_payload = self._build_results_payload(
|
|
411
|
+
redteam_result=red_team_result,
|
|
412
|
+
output_items=ordered_output_items,
|
|
413
|
+
eval_run=eval_run,
|
|
414
|
+
red_team_info=red_team_info,
|
|
415
|
+
scan_name=scan_name,
|
|
416
|
+
run_id_override=run_id_override,
|
|
417
|
+
eval_id_override=eval_id_override,
|
|
418
|
+
created_at_override=created_at_override,
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
# Populate AOAI-compatible fields
|
|
422
|
+
red_team_result.scan_result["AOAI_Compatible_Summary"] = results_payload
|
|
423
|
+
|
|
424
|
+
# Store all output items (entire objects, not just nested results)
|
|
425
|
+
red_team_result.scan_result["AOAI_Compatible_Row_Results"] = (
|
|
426
|
+
ordered_output_items if ordered_output_items else None
|
|
427
|
+
)
|
|
428
|
+
|
|
339
429
|
return red_team_result
|
|
340
430
|
|
|
341
431
|
def _build_output_item(
|
|
@@ -351,28 +441,34 @@ class ResultProcessor:
|
|
|
351
441
|
created_time = self._resolve_created_time(eval_row)
|
|
352
442
|
datasource_item_id = self._resolve_datasource_item_id(eval_row, raw_conversation, conversation_index)
|
|
353
443
|
datasource_item = self._build_datasource_item(eval_row, raw_conversation, datasource_item_id)
|
|
354
|
-
sample_payload = self._build_sample_payload(conversation, raw_conversation)
|
|
444
|
+
sample_payload = self._build_sample_payload(conversation, raw_conversation, eval_row)
|
|
355
445
|
results = self._build_output_result(
|
|
356
446
|
conversation,
|
|
357
447
|
eval_row,
|
|
358
|
-
sample_payload=
|
|
448
|
+
sample_payload=None,
|
|
359
449
|
)
|
|
360
450
|
output_item_id = self._resolve_output_item_id(
|
|
361
451
|
eval_row, datasource_item_id, conversation_key, conversation_index
|
|
362
452
|
)
|
|
363
453
|
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
454
|
+
# Status reflects whether attack/evaluation completed successfully (no errors)
|
|
455
|
+
# "pass" = completed without errors
|
|
456
|
+
# "fail" = had errors or incomplete
|
|
457
|
+
# This is independent of attack_success (whether agent was compromised)
|
|
458
|
+
status = "pass" # Default to pass (completed) unless we detect errors
|
|
459
|
+
|
|
460
|
+
# Check if there were any errors in the conversation or evaluation
|
|
461
|
+
if conversation.get("error") or conversation.get("exception"):
|
|
462
|
+
status = "fail"
|
|
463
|
+
elif not results:
|
|
464
|
+
status = "fail" # No results means something went wrong
|
|
370
465
|
|
|
371
466
|
output_item: Dict[str, Any] = {
|
|
372
467
|
"object": "eval.run.output_item",
|
|
373
468
|
"id": output_item_id,
|
|
374
469
|
"created_time": created_time,
|
|
375
470
|
"status": status,
|
|
471
|
+
"sample": sample_payload,
|
|
376
472
|
"results": results,
|
|
377
473
|
}
|
|
378
474
|
|
|
@@ -387,6 +483,7 @@ class ResultProcessor:
|
|
|
387
483
|
self,
|
|
388
484
|
conversation: Dict[str, Any],
|
|
389
485
|
raw_conversation: Dict[str, Any],
|
|
486
|
+
eval_row: Optional[Dict[str, Any]] = None,
|
|
390
487
|
) -> Dict[str, Any]:
|
|
391
488
|
"""Create the sample payload for an output item."""
|
|
392
489
|
|
|
@@ -426,10 +523,36 @@ class ResultProcessor:
|
|
|
426
523
|
"output": output_messages,
|
|
427
524
|
}
|
|
428
525
|
|
|
526
|
+
# Extract token usage from raw_conversation messages (from callback target only)
|
|
527
|
+
conversation_payload = raw_conversation.get("conversation")
|
|
528
|
+
if isinstance(conversation_payload, dict) and "messages" in conversation_payload:
|
|
529
|
+
messages_list = conversation_payload.get("messages", [])
|
|
530
|
+
# Look for token_usage in the assistant (last) message
|
|
531
|
+
for message in reversed(messages_list):
|
|
532
|
+
if isinstance(message, dict) and message.get("role") == "assistant":
|
|
533
|
+
token_usage_from_msg = message.get("token_usage")
|
|
534
|
+
if token_usage_from_msg and isinstance(token_usage_from_msg, dict):
|
|
535
|
+
# Use callback format directly (already has prompt_tokens, completion_tokens, total_tokens, model_name, etc.)
|
|
536
|
+
usage_dict = {}
|
|
537
|
+
if "model_name" in token_usage_from_msg:
|
|
538
|
+
usage_dict["model_name"] = token_usage_from_msg["model_name"]
|
|
539
|
+
if "prompt_tokens" in token_usage_from_msg:
|
|
540
|
+
usage_dict["prompt_tokens"] = token_usage_from_msg["prompt_tokens"]
|
|
541
|
+
if "completion_tokens" in token_usage_from_msg:
|
|
542
|
+
usage_dict["completion_tokens"] = token_usage_from_msg["completion_tokens"]
|
|
543
|
+
if "total_tokens" in token_usage_from_msg:
|
|
544
|
+
usage_dict["total_tokens"] = token_usage_from_msg["total_tokens"]
|
|
545
|
+
if "cached_tokens" in token_usage_from_msg:
|
|
546
|
+
usage_dict["cached_tokens"] = token_usage_from_msg["cached_tokens"]
|
|
547
|
+
if usage_dict:
|
|
548
|
+
sample_payload["usage"] = usage_dict
|
|
549
|
+
break
|
|
550
|
+
|
|
551
|
+
# Exclude risk_sub_type and _eval_run_output_item from metadata
|
|
429
552
|
metadata = {
|
|
430
553
|
key: value
|
|
431
554
|
for key, value in raw_conversation.items()
|
|
432
|
-
if key not in {"conversation"} and not self._is_missing(value)
|
|
555
|
+
if key not in {"conversation", "risk_sub_type", "_eval_run_output_item"} and not self._is_missing(value)
|
|
433
556
|
}
|
|
434
557
|
if metadata:
|
|
435
558
|
sample_payload["metadata"] = metadata
|
|
@@ -440,19 +563,55 @@ class ResultProcessor:
|
|
|
440
563
|
def _normalize_sample_message(message: Dict[str, Any]) -> Dict[str, Any]:
|
|
441
564
|
"""Return a shallow copy of a message limited to supported fields."""
|
|
442
565
|
|
|
443
|
-
allowed_keys = {"role", "content", "name"
|
|
566
|
+
allowed_keys = {"role", "content", "name"}
|
|
444
567
|
normalized: Dict[str, Any] = {}
|
|
445
568
|
|
|
446
569
|
for key, value in message.items():
|
|
447
570
|
if key not in allowed_keys or value is None:
|
|
448
571
|
continue
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
572
|
+
normalized[key] = value
|
|
573
|
+
|
|
574
|
+
# Only include tool_calls for assistant role messages
|
|
575
|
+
if message.get("role") == "assistant" and "tool_calls" in message:
|
|
576
|
+
tool_calls_value = message["tool_calls"]
|
|
577
|
+
if isinstance(tool_calls_value, list):
|
|
578
|
+
normalized["tool_calls"] = [call for call in tool_calls_value if isinstance(call, dict)]
|
|
453
579
|
|
|
454
580
|
return normalized
|
|
455
581
|
|
|
582
|
+
@staticmethod
|
|
583
|
+
def _clean_attack_detail_messages(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
584
|
+
"""Clean messages for attack_details in old format files.
|
|
585
|
+
|
|
586
|
+
Removes context field and only includes tool_calls in assistant messages.
|
|
587
|
+
"""
|
|
588
|
+
cleaned_messages = []
|
|
589
|
+
for message in messages:
|
|
590
|
+
if not isinstance(message, dict):
|
|
591
|
+
continue
|
|
592
|
+
|
|
593
|
+
cleaned = {}
|
|
594
|
+
# Always include role and content
|
|
595
|
+
if "role" in message:
|
|
596
|
+
cleaned["role"] = message["role"]
|
|
597
|
+
if "content" in message:
|
|
598
|
+
cleaned["content"] = message["content"]
|
|
599
|
+
if "name" in message:
|
|
600
|
+
cleaned["name"] = message["name"]
|
|
601
|
+
|
|
602
|
+
# Only include tool_calls for assistant messages
|
|
603
|
+
if message.get("role") == "assistant" and "tool_calls" in message:
|
|
604
|
+
tool_calls_value = message["tool_calls"]
|
|
605
|
+
if isinstance(tool_calls_value, list):
|
|
606
|
+
cleaned["tool_calls"] = [call for call in tool_calls_value if isinstance(call, dict)]
|
|
607
|
+
|
|
608
|
+
# Do NOT include context field in attack_details
|
|
609
|
+
|
|
610
|
+
if cleaned:
|
|
611
|
+
cleaned_messages.append(cleaned)
|
|
612
|
+
|
|
613
|
+
return cleaned_messages
|
|
614
|
+
|
|
456
615
|
def _build_datasource_item(
|
|
457
616
|
self,
|
|
458
617
|
eval_row: Optional[Dict[str, Any]],
|
|
@@ -483,9 +642,11 @@ class ResultProcessor:
|
|
|
483
642
|
severity_label: Optional[str] = None
|
|
484
643
|
|
|
485
644
|
if isinstance(eval_row, dict):
|
|
645
|
+
# For red teaming: if we have eval results, the evaluation completed successfully
|
|
646
|
+
# So passed should be True (evaluation ran), regardless of the result value
|
|
486
647
|
result_value = eval_row.get(f"outputs.{risk_value}.{risk_value}_result")
|
|
487
648
|
if result_value is not None:
|
|
488
|
-
passed =
|
|
649
|
+
passed = True # Evaluation completed
|
|
489
650
|
score = self._normalize_numeric(eval_row.get(f"outputs.{risk_value}.{risk_value}_score"))
|
|
490
651
|
threshold = self._normalize_numeric(eval_row.get(f"outputs.{risk_value}.{risk_value}_threshold"))
|
|
491
652
|
reason = eval_row.get(f"outputs.{risk_value}.{risk_value}_reason")
|
|
@@ -500,6 +661,7 @@ class ResultProcessor:
|
|
|
500
661
|
attack_technique = conversation.get("attack_technique")
|
|
501
662
|
attack_complexity = conversation.get("attack_complexity")
|
|
502
663
|
attack_success = conversation.get("attack_success")
|
|
664
|
+
risk_sub_type = conversation.get("risk_sub_type")
|
|
503
665
|
|
|
504
666
|
if attack_technique is not None:
|
|
505
667
|
properties["attack_technique"] = attack_technique
|
|
@@ -507,6 +669,30 @@ class ResultProcessor:
|
|
|
507
669
|
properties["attack_complexity"] = attack_complexity
|
|
508
670
|
if attack_success is not None:
|
|
509
671
|
properties["attack_success"] = attack_success
|
|
672
|
+
if risk_sub_type is not None:
|
|
673
|
+
properties["risk_sub_type"] = risk_sub_type
|
|
674
|
+
|
|
675
|
+
# Extract additional properties from _eval_run_output_item if available
|
|
676
|
+
if isinstance(eval_row, dict):
|
|
677
|
+
eval_run_output_item = eval_row.get("_eval_run_output_item")
|
|
678
|
+
if eval_run_output_item and isinstance(eval_run_output_item, dict):
|
|
679
|
+
results_list = eval_run_output_item.get("results", [])
|
|
680
|
+
for result_item in results_list:
|
|
681
|
+
if isinstance(result_item, dict):
|
|
682
|
+
result_name = result_item.get("name") or result_item.get("metric")
|
|
683
|
+
if result_name == risk_value:
|
|
684
|
+
item_properties = result_item.get("properties", {})
|
|
685
|
+
if isinstance(item_properties, dict):
|
|
686
|
+
# Don't include scoreProperties or outcome in output - only use internally
|
|
687
|
+
# But DO include metrics for token usage aggregation
|
|
688
|
+
metrics = item_properties.get("metrics")
|
|
689
|
+
if metrics:
|
|
690
|
+
properties["metrics"] = metrics
|
|
691
|
+
# Include reasoning if present and not already set as reason
|
|
692
|
+
reasoning = item_properties.get("reasoning")
|
|
693
|
+
if reasoning and not reason:
|
|
694
|
+
reason = reasoning
|
|
695
|
+
break
|
|
510
696
|
|
|
511
697
|
if (
|
|
512
698
|
passed is None
|
|
@@ -520,12 +706,17 @@ class ResultProcessor:
|
|
|
520
706
|
if threshold is None and attack_threshold is not None and risk_value == conversation.get("risk_category"):
|
|
521
707
|
threshold = attack_threshold
|
|
522
708
|
|
|
709
|
+
# passed reflects completion status (whether evaluation ran successfully)
|
|
710
|
+
# attack_success (in properties) reflects whether agent was compromised
|
|
711
|
+
# These are independent concepts
|
|
712
|
+
|
|
523
713
|
result_entry: Dict[str, Any] = {
|
|
524
714
|
"object": "eval.run.output_item.result",
|
|
525
715
|
"type": "azure_ai_evaluator" if isinstance(eval_row, dict) else "azure_ai_red_team",
|
|
526
716
|
"name": risk_value,
|
|
527
717
|
"metric": risk_value,
|
|
528
718
|
"passed": passed,
|
|
719
|
+
"label": "pass" if passed is True else ("fail" if passed is False else None),
|
|
529
720
|
"score": score,
|
|
530
721
|
"threshold": threshold,
|
|
531
722
|
"reason": reason,
|
|
@@ -534,9 +725,6 @@ class ResultProcessor:
|
|
|
534
725
|
if properties:
|
|
535
726
|
result_entry["properties"] = properties
|
|
536
727
|
|
|
537
|
-
if sample_payload:
|
|
538
|
-
result_entry["sample"] = sample_payload
|
|
539
|
-
|
|
540
728
|
results.append(result_entry)
|
|
541
729
|
|
|
542
730
|
if not results:
|
|
@@ -546,6 +734,7 @@ class ResultProcessor:
|
|
|
546
734
|
attack_technique = conversation.get("attack_technique")
|
|
547
735
|
attack_complexity = conversation.get("attack_complexity")
|
|
548
736
|
attack_success = conversation.get("attack_success")
|
|
737
|
+
risk_sub_type = conversation.get("risk_sub_type")
|
|
549
738
|
|
|
550
739
|
if attack_technique is not None:
|
|
551
740
|
properties["attack_technique"] = attack_technique
|
|
@@ -553,6 +742,8 @@ class ResultProcessor:
|
|
|
553
742
|
properties["attack_complexity"] = attack_complexity
|
|
554
743
|
if attack_success is not None:
|
|
555
744
|
properties["attack_success"] = attack_success
|
|
745
|
+
if risk_sub_type is not None:
|
|
746
|
+
properties["risk_sub_type"] = risk_sub_type
|
|
556
747
|
|
|
557
748
|
assessment = risk_assessment.get(risk_value) if risk_assessment else None
|
|
558
749
|
fallback_reason: Optional[str] = None
|
|
@@ -566,6 +757,7 @@ class ResultProcessor:
|
|
|
566
757
|
"name": risk_value,
|
|
567
758
|
"metric": risk_value,
|
|
568
759
|
"passed": None,
|
|
760
|
+
"label": None,
|
|
569
761
|
"score": None,
|
|
570
762
|
"threshold": attack_threshold,
|
|
571
763
|
"reason": fallback_reason,
|
|
@@ -574,9 +766,6 @@ class ResultProcessor:
|
|
|
574
766
|
if properties:
|
|
575
767
|
fallback_result["properties"] = properties
|
|
576
768
|
|
|
577
|
-
if sample_payload:
|
|
578
|
-
fallback_result["sample"] = sample_payload
|
|
579
|
-
|
|
580
769
|
results.append(fallback_result)
|
|
581
770
|
|
|
582
771
|
return results
|
|
@@ -997,7 +1186,347 @@ class ResultProcessor:
|
|
|
997
1186
|
risk_cat_value = risk_category.value
|
|
998
1187
|
# Only add default if not already present as a custom threshold
|
|
999
1188
|
if risk_cat_value not in formatted_thresholds:
|
|
1000
|
-
#
|
|
1001
|
-
formatted_thresholds[risk_cat_value] =
|
|
1189
|
+
# Get pattern-specific default threshold for this evaluator
|
|
1190
|
+
formatted_thresholds[risk_cat_value] = get_default_threshold_for_evaluator(risk_cat_value)
|
|
1002
1191
|
|
|
1003
1192
|
return formatted_thresholds
|
|
1193
|
+
|
|
1194
|
+
@staticmethod
|
|
1195
|
+
def _compute_result_count(output_items: List[Dict[str, Any]]) -> Dict[str, int]:
|
|
1196
|
+
"""Aggregate run-level pass/fail counts from individual output items.
|
|
1197
|
+
|
|
1198
|
+
Counts reflect completion status:
|
|
1199
|
+
- passed: attack/evaluation completed successfully
|
|
1200
|
+
- failed: attack/evaluation had errors
|
|
1201
|
+
- errored: unknown/no results
|
|
1202
|
+
"""
|
|
1203
|
+
|
|
1204
|
+
total = len(output_items)
|
|
1205
|
+
passed = failed = errored = 0
|
|
1206
|
+
|
|
1207
|
+
for item in output_items:
|
|
1208
|
+
# Use item-level status which reflects completion
|
|
1209
|
+
item_status_str = item.get("status")
|
|
1210
|
+
|
|
1211
|
+
if item_status_str == "pass":
|
|
1212
|
+
passed += 1
|
|
1213
|
+
elif item_status_str == "fail":
|
|
1214
|
+
failed += 1
|
|
1215
|
+
else:
|
|
1216
|
+
errored += 1
|
|
1217
|
+
|
|
1218
|
+
return {
|
|
1219
|
+
"total": total,
|
|
1220
|
+
"passed": passed,
|
|
1221
|
+
"failed": failed,
|
|
1222
|
+
"errored": errored,
|
|
1223
|
+
}
|
|
1224
|
+
|
|
1225
|
+
@staticmethod
|
|
1226
|
+
def _compute_per_model_usage(output_items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
1227
|
+
"""Compute aggregated token usage across all output items.
|
|
1228
|
+
|
|
1229
|
+
:param output_items: List of output items
|
|
1230
|
+
:return: List containing model usage statistics grouped by model_name
|
|
1231
|
+
"""
|
|
1232
|
+
# Track usage by model name
|
|
1233
|
+
model_usage: Dict[str, Dict[str, int]] = {}
|
|
1234
|
+
for item in output_items:
|
|
1235
|
+
if not isinstance(item, dict):
|
|
1236
|
+
continue
|
|
1237
|
+
|
|
1238
|
+
# Aggregate usage from sample (callback target)
|
|
1239
|
+
sample = item.get("sample")
|
|
1240
|
+
if isinstance(sample, dict):
|
|
1241
|
+
usage = sample.get("usage")
|
|
1242
|
+
if isinstance(usage, dict):
|
|
1243
|
+
# Get model name from usage if present, otherwise use default
|
|
1244
|
+
model_name = usage.get("model_name", "azure_ai_system_model")
|
|
1245
|
+
|
|
1246
|
+
if model_name not in model_usage:
|
|
1247
|
+
model_usage[model_name] = {
|
|
1248
|
+
"invocation_count": 0,
|
|
1249
|
+
"prompt_tokens": 0,
|
|
1250
|
+
"completion_tokens": 0,
|
|
1251
|
+
"total_tokens": 0,
|
|
1252
|
+
"cached_tokens": 0,
|
|
1253
|
+
}
|
|
1254
|
+
|
|
1255
|
+
model_usage[model_name]["invocation_count"] += 1
|
|
1256
|
+
# Convert to int to handle cases where values come as strings
|
|
1257
|
+
model_usage[model_name]["prompt_tokens"] += int(usage.get("prompt_tokens", 0) or 0)
|
|
1258
|
+
model_usage[model_name]["completion_tokens"] += int(usage.get("completion_tokens", 0) or 0)
|
|
1259
|
+
model_usage[model_name]["total_tokens"] += int(usage.get("total_tokens", 0) or 0)
|
|
1260
|
+
model_usage[model_name]["cached_tokens"] += int(usage.get("cached_tokens", 0) or 0)
|
|
1261
|
+
|
|
1262
|
+
# Always aggregate evaluator usage from results (separate from target usage)
|
|
1263
|
+
results_list = item.get("results", [])
|
|
1264
|
+
for result in results_list:
|
|
1265
|
+
if not isinstance(result, dict):
|
|
1266
|
+
continue
|
|
1267
|
+
properties = result.get("properties", {})
|
|
1268
|
+
if not isinstance(properties, dict):
|
|
1269
|
+
continue
|
|
1270
|
+
metrics = properties.get("metrics", {})
|
|
1271
|
+
if isinstance(metrics, dict) and metrics:
|
|
1272
|
+
# Evaluator usage uses azure_ai_system_model
|
|
1273
|
+
model_name = "azure_ai_system_model"
|
|
1274
|
+
|
|
1275
|
+
if model_name not in model_usage:
|
|
1276
|
+
model_usage[model_name] = {
|
|
1277
|
+
"invocation_count": 0,
|
|
1278
|
+
"prompt_tokens": 0,
|
|
1279
|
+
"completion_tokens": 0,
|
|
1280
|
+
"total_tokens": 0,
|
|
1281
|
+
"cached_tokens": 0,
|
|
1282
|
+
}
|
|
1283
|
+
|
|
1284
|
+
prompt_tokens = metrics.get("promptTokens", 0)
|
|
1285
|
+
completion_tokens = metrics.get("completionTokens", 0)
|
|
1286
|
+
|
|
1287
|
+
if prompt_tokens or completion_tokens:
|
|
1288
|
+
model_usage[model_name]["invocation_count"] += 1
|
|
1289
|
+
# Convert to int to handle cases where values come as strings
|
|
1290
|
+
model_usage[model_name]["prompt_tokens"] += int(prompt_tokens or 0)
|
|
1291
|
+
model_usage[model_name]["completion_tokens"] += int(completion_tokens or 0)
|
|
1292
|
+
model_usage[model_name]["total_tokens"] += int(prompt_tokens or 0) + int(completion_tokens or 0)
|
|
1293
|
+
|
|
1294
|
+
if not model_usage:
|
|
1295
|
+
return []
|
|
1296
|
+
|
|
1297
|
+
# Convert to list format with model_name as a field
|
|
1298
|
+
return [
|
|
1299
|
+
{
|
|
1300
|
+
"model_name": model_name,
|
|
1301
|
+
**stats,
|
|
1302
|
+
}
|
|
1303
|
+
for model_name, stats in sorted(model_usage.items())
|
|
1304
|
+
]
|
|
1305
|
+
|
|
1306
|
+
@staticmethod
|
|
1307
|
+
def _compute_per_testing_criteria(output_items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
1308
|
+
"""Build aggregated pass/fail counts per testing criteria (risk category and attack strategy)."""
|
|
1309
|
+
|
|
1310
|
+
# Track by risk category (testing_criteria)
|
|
1311
|
+
criteria: Dict[str, Dict[str, int]] = {}
|
|
1312
|
+
# Track by attack strategy
|
|
1313
|
+
strategy_criteria: Dict[str, Dict[str, int]] = {}
|
|
1314
|
+
|
|
1315
|
+
for item in output_items:
|
|
1316
|
+
for result in item.get("results", []):
|
|
1317
|
+
if not isinstance(result, dict):
|
|
1318
|
+
continue
|
|
1319
|
+
name = result.get("name")
|
|
1320
|
+
if not name:
|
|
1321
|
+
continue
|
|
1322
|
+
passed_value = result.get("passed")
|
|
1323
|
+
if passed_value is None:
|
|
1324
|
+
continue
|
|
1325
|
+
|
|
1326
|
+
# Track by risk category
|
|
1327
|
+
bucket = criteria.setdefault(str(name), {"passed": 0, "failed": 0})
|
|
1328
|
+
if passed_value:
|
|
1329
|
+
bucket["passed"] += 1
|
|
1330
|
+
else:
|
|
1331
|
+
bucket["failed"] += 1
|
|
1332
|
+
|
|
1333
|
+
# Track by attack strategy from properties
|
|
1334
|
+
properties = result.get("properties", {})
|
|
1335
|
+
if isinstance(properties, dict):
|
|
1336
|
+
attack_technique = properties.get("attack_technique")
|
|
1337
|
+
if attack_technique:
|
|
1338
|
+
strategy_bucket = strategy_criteria.setdefault(
|
|
1339
|
+
str(attack_technique), {"passed": 0, "failed": 0}
|
|
1340
|
+
)
|
|
1341
|
+
if passed_value:
|
|
1342
|
+
strategy_bucket["passed"] += 1
|
|
1343
|
+
else:
|
|
1344
|
+
strategy_bucket["failed"] += 1
|
|
1345
|
+
|
|
1346
|
+
# Build results list with risk categories
|
|
1347
|
+
results = [
|
|
1348
|
+
{
|
|
1349
|
+
"testing_criteria": criteria_name,
|
|
1350
|
+
"passed": counts["passed"],
|
|
1351
|
+
"failed": counts["failed"],
|
|
1352
|
+
}
|
|
1353
|
+
for criteria_name, counts in sorted(criteria.items())
|
|
1354
|
+
]
|
|
1355
|
+
|
|
1356
|
+
# Add attack strategy summaries
|
|
1357
|
+
for strategy_name, counts in sorted(strategy_criteria.items()):
|
|
1358
|
+
results.append(
|
|
1359
|
+
{
|
|
1360
|
+
"testing_criteria": strategy_name,
|
|
1361
|
+
"attack_strategy": strategy_name,
|
|
1362
|
+
"passed": counts["passed"],
|
|
1363
|
+
"failed": counts["failed"],
|
|
1364
|
+
}
|
|
1365
|
+
)
|
|
1366
|
+
|
|
1367
|
+
return results
|
|
1368
|
+
|
|
1369
|
+
@staticmethod
|
|
1370
|
+
def _build_data_source_section(parameters: Dict[str, Any], red_team_info: Optional[Dict]) -> Dict[str, Any]:
|
|
1371
|
+
"""Build the data_source portion of the run payload for red-team scans."""
|
|
1372
|
+
|
|
1373
|
+
attack_strategies: List[str] = []
|
|
1374
|
+
if isinstance(red_team_info, dict):
|
|
1375
|
+
attack_strategies = sorted(str(strategy) for strategy in red_team_info.keys())
|
|
1376
|
+
|
|
1377
|
+
item_generation_params: Dict[str, Any] = {"type": "red_team"}
|
|
1378
|
+
if attack_strategies:
|
|
1379
|
+
item_generation_params["attack_strategies"] = attack_strategies
|
|
1380
|
+
|
|
1381
|
+
# Attempt to infer turns from parameters if available
|
|
1382
|
+
num_turns = parameters.get("max_turns") if isinstance(parameters, dict) else None
|
|
1383
|
+
if isinstance(num_turns, int) and num_turns > 0:
|
|
1384
|
+
item_generation_params["num_turns"] = num_turns
|
|
1385
|
+
|
|
1386
|
+
data_source: Dict[str, Any] = {"type": "azure_ai_red_team", "target": {}}
|
|
1387
|
+
if item_generation_params:
|
|
1388
|
+
data_source["item_generation_params"] = item_generation_params
|
|
1389
|
+
|
|
1390
|
+
return data_source
|
|
1391
|
+
|
|
1392
|
+
def _determine_run_status(
|
|
1393
|
+
self,
|
|
1394
|
+
scan_result: Dict[str, Any],
|
|
1395
|
+
red_team_info: Optional[Dict],
|
|
1396
|
+
output_items: List[Dict[str, Any]],
|
|
1397
|
+
) -> str:
|
|
1398
|
+
"""Determine the run-level status based on red team info status values."""
|
|
1399
|
+
|
|
1400
|
+
# Check if any tasks are still incomplete/failed
|
|
1401
|
+
if isinstance(red_team_info, dict):
|
|
1402
|
+
for risk_data in red_team_info.values():
|
|
1403
|
+
if not isinstance(risk_data, dict):
|
|
1404
|
+
continue
|
|
1405
|
+
for details in risk_data.values():
|
|
1406
|
+
if not isinstance(details, dict):
|
|
1407
|
+
continue
|
|
1408
|
+
status = details.get("status", "").lower()
|
|
1409
|
+
if status in ("incomplete", "failed", "timeout"):
|
|
1410
|
+
return "failed"
|
|
1411
|
+
elif status in ("running", "pending"):
|
|
1412
|
+
return "in_progress"
|
|
1413
|
+
|
|
1414
|
+
return "completed"
|
|
1415
|
+
|
|
1416
|
+
def _build_results_payload(
|
|
1417
|
+
self,
|
|
1418
|
+
redteam_result: RedTeamResult,
|
|
1419
|
+
output_items: List[Dict[str, Any]],
|
|
1420
|
+
eval_run: Optional[Any] = None,
|
|
1421
|
+
red_team_info: Optional[Dict] = None,
|
|
1422
|
+
scan_name: Optional[str] = None,
|
|
1423
|
+
run_id_override: Optional[str] = None,
|
|
1424
|
+
eval_id_override: Optional[str] = None,
|
|
1425
|
+
created_at_override: Optional[int] = None,
|
|
1426
|
+
) -> RedTeamRun:
|
|
1427
|
+
"""Assemble the new structure for results.json with eval.run format.
|
|
1428
|
+
|
|
1429
|
+
:param redteam_result: The red team result containing scan data
|
|
1430
|
+
:param output_items: List of output items containing results for each conversation
|
|
1431
|
+
:param eval_run: The MLFlow run object (optional)
|
|
1432
|
+
:param red_team_info: Red team tracking information (optional)
|
|
1433
|
+
:param scan_name: Name of the scan (optional)
|
|
1434
|
+
:param run_id_override: Override for run ID (optional)
|
|
1435
|
+
:param eval_id_override: Override for eval ID (optional)
|
|
1436
|
+
:param created_at_override: Override for created timestamp (optional)
|
|
1437
|
+
:return: RedTeamRun payload
|
|
1438
|
+
"""
|
|
1439
|
+
|
|
1440
|
+
scan_result = cast(Dict[str, Any], redteam_result.scan_result or {})
|
|
1441
|
+
scorecard = cast(Dict[str, Any], scan_result.get("scorecard") or {})
|
|
1442
|
+
parameters = cast(Dict[str, Any], scan_result.get("parameters") or {})
|
|
1443
|
+
|
|
1444
|
+
run_id = run_id_override
|
|
1445
|
+
eval_id = eval_id_override
|
|
1446
|
+
run_name: Optional[str] = None
|
|
1447
|
+
created_at = created_at_override
|
|
1448
|
+
|
|
1449
|
+
if eval_run is not None:
|
|
1450
|
+
run_info = getattr(eval_run, "info", None)
|
|
1451
|
+
|
|
1452
|
+
if run_id is None:
|
|
1453
|
+
candidate_run_id = (
|
|
1454
|
+
getattr(run_info, "run_id", None)
|
|
1455
|
+
or getattr(eval_run, "run_id", None)
|
|
1456
|
+
or getattr(eval_run, "id", None)
|
|
1457
|
+
)
|
|
1458
|
+
if candidate_run_id is not None:
|
|
1459
|
+
run_id = str(candidate_run_id)
|
|
1460
|
+
|
|
1461
|
+
if eval_id is None:
|
|
1462
|
+
candidate_eval_id = (
|
|
1463
|
+
getattr(run_info, "experiment_id", None)
|
|
1464
|
+
or getattr(eval_run, "experiment_id", None)
|
|
1465
|
+
or getattr(eval_run, "eval_id", None)
|
|
1466
|
+
)
|
|
1467
|
+
if candidate_eval_id is not None:
|
|
1468
|
+
eval_id = str(candidate_eval_id)
|
|
1469
|
+
|
|
1470
|
+
if run_name is None:
|
|
1471
|
+
candidate_run_name = (
|
|
1472
|
+
getattr(run_info, "run_name", None)
|
|
1473
|
+
or getattr(eval_run, "run_name", None)
|
|
1474
|
+
or getattr(eval_run, "display_name", None)
|
|
1475
|
+
or getattr(eval_run, "name", None)
|
|
1476
|
+
)
|
|
1477
|
+
if candidate_run_name is not None:
|
|
1478
|
+
run_name = str(candidate_run_name)
|
|
1479
|
+
|
|
1480
|
+
if created_at is None:
|
|
1481
|
+
raw_created = (
|
|
1482
|
+
getattr(run_info, "created_time", None)
|
|
1483
|
+
or getattr(eval_run, "created_at", None)
|
|
1484
|
+
or getattr(eval_run, "created_time", None)
|
|
1485
|
+
)
|
|
1486
|
+
if isinstance(raw_created, datetime):
|
|
1487
|
+
created_at = int(raw_created.timestamp())
|
|
1488
|
+
elif isinstance(raw_created, (int, float)):
|
|
1489
|
+
created_at = int(raw_created)
|
|
1490
|
+
elif isinstance(raw_created, str):
|
|
1491
|
+
try:
|
|
1492
|
+
created_at = int(float(raw_created))
|
|
1493
|
+
except ValueError:
|
|
1494
|
+
created_at = None
|
|
1495
|
+
|
|
1496
|
+
if run_id is None:
|
|
1497
|
+
run_id = str(uuid.uuid4())
|
|
1498
|
+
if eval_id is None:
|
|
1499
|
+
eval_id = str(uuid.uuid4())
|
|
1500
|
+
if created_at is None:
|
|
1501
|
+
created_at = int(datetime.now().timestamp())
|
|
1502
|
+
if run_name is None:
|
|
1503
|
+
run_name = scan_name or f"redteam-run-{run_id[:8]}"
|
|
1504
|
+
|
|
1505
|
+
result_count = self._compute_result_count(output_items)
|
|
1506
|
+
per_testing_results = self._compute_per_testing_criteria(output_items)
|
|
1507
|
+
data_source = self._build_data_source_section(parameters, red_team_info)
|
|
1508
|
+
status = self._determine_run_status(scan_result, red_team_info, output_items)
|
|
1509
|
+
per_model_usage = self._compute_per_model_usage(output_items)
|
|
1510
|
+
|
|
1511
|
+
list_wrapper: OutputItemsList = {
|
|
1512
|
+
"object": "list",
|
|
1513
|
+
"data": output_items,
|
|
1514
|
+
}
|
|
1515
|
+
|
|
1516
|
+
run_payload: RedTeamRun = {
|
|
1517
|
+
"object": "eval.run",
|
|
1518
|
+
"id": run_id,
|
|
1519
|
+
"eval_id": eval_id,
|
|
1520
|
+
"created_at": created_at,
|
|
1521
|
+
"status": status,
|
|
1522
|
+
"name": run_name,
|
|
1523
|
+
"report_url": scan_result.get("studio_url") or self.ai_studio_url,
|
|
1524
|
+
"data_source": data_source,
|
|
1525
|
+
"metadata": {},
|
|
1526
|
+
"result_counts": result_count,
|
|
1527
|
+
"per_model_usage": per_model_usage,
|
|
1528
|
+
"per_testing_criteria_results": per_testing_results,
|
|
1529
|
+
"output_items": list_wrapper,
|
|
1530
|
+
}
|
|
1531
|
+
|
|
1532
|
+
return run_payload
|