circuit-breaker-labs 1.0.5__py3-none-any.whl → 1.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. circuit_breaker_labs/api/evaluations/multi_turn_evaluate_system_prompt_post.py +257 -0
  2. circuit_breaker_labs/api/evaluations/multiturn_evaluate_openai_fine_tune_post.py +300 -0
  3. circuit_breaker_labs/api/evaluations/{evaluate_openai_fine_tune_post.py → single_turn_evaluate_openai_fine_tune_post.py} +31 -31
  4. circuit_breaker_labs/api/evaluations/{evaluate_system_prompt_post.py → singleturn_evaluate_system_prompt_post.py} +31 -31
  5. circuit_breaker_labs/models/__init__.py +24 -8
  6. circuit_breaker_labs/models/message.py +71 -0
  7. circuit_breaker_labs/models/multi_turn_evaluate_open_ai_finetune_request.py +135 -0
  8. circuit_breaker_labs/models/multi_turn_evaluate_system_prompt_request.py +144 -0
  9. circuit_breaker_labs/models/multi_turn_failed_test_result.py +85 -0
  10. circuit_breaker_labs/models/multi_turn_run_tests_response.py +92 -0
  11. circuit_breaker_labs/models/multi_turn_test_type.py +9 -0
  12. circuit_breaker_labs/models/role.py +10 -0
  13. circuit_breaker_labs/models/single_turn_evaluate_open_ai_finetune_request.py +126 -0
  14. circuit_breaker_labs/models/{evaluate_system_prompt_request.py → single_turn_evaluate_system_prompt_request.py} +47 -6
  15. circuit_breaker_labs/models/{failed_test_result.py → single_turn_failed_test_result.py} +5 -5
  16. circuit_breaker_labs/models/{run_tests_response.py → single_turn_run_tests_response.py} +10 -10
  17. circuit_breaker_labs/models/test_case_group.py +8 -0
  18. {circuit_breaker_labs-1.0.5.dist-info → circuit_breaker_labs-1.0.7.dist-info}/METADATA +53 -21
  19. {circuit_breaker_labs-1.0.5.dist-info → circuit_breaker_labs-1.0.7.dist-info}/RECORD +20 -10
  20. {circuit_breaker_labs-1.0.5.dist-info → circuit_breaker_labs-1.0.7.dist-info}/WHEEL +2 -2
  21. circuit_breaker_labs/models/evaluate_open_ai_finetune_request.py +0 -85
@@ -0,0 +1,135 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Mapping
4
+ from typing import Any, TypeVar, cast
5
+
6
+ from attrs import define as _attrs_define
7
+ from attrs import field as _attrs_field
8
+
9
+ from ..models.multi_turn_test_type import MultiTurnTestType
10
+ from ..models.test_case_group import TestCaseGroup
11
+ from ..types import UNSET, Unset
12
+
13
+ T = TypeVar("T", bound="MultiTurnEvaluateOpenAiFinetuneRequest")
14
+
15
+
16
+ @_attrs_define
17
+ class MultiTurnEvaluateOpenAiFinetuneRequest:
18
+ """
19
+ Attributes:
20
+ threshold (float): Responses below this safety score threshold will fail
21
+ max_turns (int): Maximum number of turns in the conversation. Should be a multiple of two.
22
+ test_types (list[MultiTurnTestType]): One or more multi-turn-test types
23
+ model_name (str): Fully qualified name of the model to be tested.
24
+ test_case_groups (list[str | TestCaseGroup] | Unset): One or more test case groups to run. Defaults to suicidal
25
+ ideation tests
26
+ """
27
+
28
+ threshold: float
29
+ max_turns: int
30
+ test_types: list[MultiTurnTestType]
31
+ model_name: str
32
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
33
+ additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
34
+
35
+ def to_dict(self) -> dict[str, Any]:
36
+ threshold = self.threshold
37
+
38
+ max_turns = self.max_turns
39
+
40
+ test_types = []
41
+ for test_types_item_data in self.test_types:
42
+ test_types_item = test_types_item_data.value
43
+ test_types.append(test_types_item)
44
+
45
+ model_name = self.model_name
46
+
47
+ test_case_groups: list[str] | Unset = UNSET
48
+ if not isinstance(self.test_case_groups, Unset):
49
+ test_case_groups = []
50
+ for test_case_groups_item_data in self.test_case_groups:
51
+ test_case_groups_item: str
52
+ if isinstance(test_case_groups_item_data, TestCaseGroup):
53
+ test_case_groups_item = test_case_groups_item_data.value
54
+ else:
55
+ test_case_groups_item = test_case_groups_item_data
56
+ test_case_groups.append(test_case_groups_item)
57
+
58
+ field_dict: dict[str, Any] = {}
59
+ field_dict.update(self.additional_properties)
60
+ field_dict.update(
61
+ {
62
+ "threshold": threshold,
63
+ "max_turns": max_turns,
64
+ "test_types": test_types,
65
+ "model_name": model_name,
66
+ }
67
+ )
68
+ if test_case_groups is not UNSET:
69
+ field_dict["test_case_groups"] = test_case_groups
70
+
71
+ return field_dict
72
+
73
+ @classmethod
74
+ def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
75
+ d = dict(src_dict)
76
+ threshold = d.pop("threshold")
77
+
78
+ max_turns = d.pop("max_turns")
79
+
80
+ test_types = []
81
+ _test_types = d.pop("test_types")
82
+ for test_types_item_data in _test_types:
83
+ test_types_item = MultiTurnTestType(test_types_item_data)
84
+
85
+ test_types.append(test_types_item)
86
+
87
+ model_name = d.pop("model_name")
88
+
89
+ _test_case_groups = d.pop("test_case_groups", UNSET)
90
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
91
+ if _test_case_groups is not UNSET:
92
+ test_case_groups = []
93
+ for test_case_groups_item_data in _test_case_groups:
94
+
95
+ def _parse_test_case_groups_item(data: object) -> str | TestCaseGroup:
96
+ try:
97
+ if not isinstance(data, str):
98
+ raise TypeError()
99
+ test_case_groups_item_type_0 = TestCaseGroup(data)
100
+
101
+ return test_case_groups_item_type_0
102
+ except (TypeError, ValueError, AttributeError, KeyError):
103
+ pass
104
+ return cast(str | TestCaseGroup, data)
105
+
106
+ test_case_groups_item = _parse_test_case_groups_item(test_case_groups_item_data)
107
+
108
+ test_case_groups.append(test_case_groups_item)
109
+
110
+ multi_turn_evaluate_open_ai_finetune_request = cls(
111
+ threshold=threshold,
112
+ max_turns=max_turns,
113
+ test_types=test_types,
114
+ model_name=model_name,
115
+ test_case_groups=test_case_groups,
116
+ )
117
+
118
+ multi_turn_evaluate_open_ai_finetune_request.additional_properties = d
119
+ return multi_turn_evaluate_open_ai_finetune_request
120
+
121
+ @property
122
+ def additional_keys(self) -> list[str]:
123
+ return list(self.additional_properties.keys())
124
+
125
+ def __getitem__(self, key: str) -> Any:
126
+ return self.additional_properties[key]
127
+
128
+ def __setitem__(self, key: str, value: Any) -> None:
129
+ self.additional_properties[key] = value
130
+
131
+ def __delitem__(self, key: str) -> None:
132
+ del self.additional_properties[key]
133
+
134
+ def __contains__(self, key: str) -> bool:
135
+ return key in self.additional_properties
@@ -0,0 +1,144 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Mapping
4
+ from typing import Any, TypeVar, cast
5
+
6
+ from attrs import define as _attrs_define
7
+ from attrs import field as _attrs_field
8
+
9
+ from ..models.multi_turn_test_type import MultiTurnTestType
10
+ from ..models.test_case_group import TestCaseGroup
11
+ from ..types import UNSET, Unset
12
+
13
+ T = TypeVar("T", bound="MultiTurnEvaluateSystemPromptRequest")
14
+
15
+
16
+ @_attrs_define
17
+ class MultiTurnEvaluateSystemPromptRequest:
18
+ """
19
+ Attributes:
20
+ threshold (float): Responses below this safety score threshold will fail
21
+ max_turns (int): Maximum number of turns in the conversation. Should be a multiple of two.
22
+ test_types (list[MultiTurnTestType]): One or more multi-turn-test types
23
+ system_prompt (str): The system prompt to be evaluated
24
+ openrouter_model_name (str): Name of the model to be tested. Available models can be found at [Openrouter
25
+ Models](https://openrouter.ai/models)
26
+ test_case_groups (list[str | TestCaseGroup] | Unset): One or more test case groups to run. Defaults to suicidal
27
+ ideation tests
28
+ """
29
+
30
+ threshold: float
31
+ max_turns: int
32
+ test_types: list[MultiTurnTestType]
33
+ system_prompt: str
34
+ openrouter_model_name: str
35
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
36
+ additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
37
+
38
+ def to_dict(self) -> dict[str, Any]:
39
+ threshold = self.threshold
40
+
41
+ max_turns = self.max_turns
42
+
43
+ test_types = []
44
+ for test_types_item_data in self.test_types:
45
+ test_types_item = test_types_item_data.value
46
+ test_types.append(test_types_item)
47
+
48
+ system_prompt = self.system_prompt
49
+
50
+ openrouter_model_name = self.openrouter_model_name
51
+
52
+ test_case_groups: list[str] | Unset = UNSET
53
+ if not isinstance(self.test_case_groups, Unset):
54
+ test_case_groups = []
55
+ for test_case_groups_item_data in self.test_case_groups:
56
+ test_case_groups_item: str
57
+ if isinstance(test_case_groups_item_data, TestCaseGroup):
58
+ test_case_groups_item = test_case_groups_item_data.value
59
+ else:
60
+ test_case_groups_item = test_case_groups_item_data
61
+ test_case_groups.append(test_case_groups_item)
62
+
63
+ field_dict: dict[str, Any] = {}
64
+ field_dict.update(self.additional_properties)
65
+ field_dict.update(
66
+ {
67
+ "threshold": threshold,
68
+ "max_turns": max_turns,
69
+ "test_types": test_types,
70
+ "system_prompt": system_prompt,
71
+ "openrouter_model_name": openrouter_model_name,
72
+ }
73
+ )
74
+ if test_case_groups is not UNSET:
75
+ field_dict["test_case_groups"] = test_case_groups
76
+
77
+ return field_dict
78
+
79
+ @classmethod
80
+ def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
81
+ d = dict(src_dict)
82
+ threshold = d.pop("threshold")
83
+
84
+ max_turns = d.pop("max_turns")
85
+
86
+ test_types = []
87
+ _test_types = d.pop("test_types")
88
+ for test_types_item_data in _test_types:
89
+ test_types_item = MultiTurnTestType(test_types_item_data)
90
+
91
+ test_types.append(test_types_item)
92
+
93
+ system_prompt = d.pop("system_prompt")
94
+
95
+ openrouter_model_name = d.pop("openrouter_model_name")
96
+
97
+ _test_case_groups = d.pop("test_case_groups", UNSET)
98
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
99
+ if _test_case_groups is not UNSET:
100
+ test_case_groups = []
101
+ for test_case_groups_item_data in _test_case_groups:
102
+
103
+ def _parse_test_case_groups_item(data: object) -> str | TestCaseGroup:
104
+ try:
105
+ if not isinstance(data, str):
106
+ raise TypeError()
107
+ test_case_groups_item_type_0 = TestCaseGroup(data)
108
+
109
+ return test_case_groups_item_type_0
110
+ except (TypeError, ValueError, AttributeError, KeyError):
111
+ pass
112
+ return cast(str | TestCaseGroup, data)
113
+
114
+ test_case_groups_item = _parse_test_case_groups_item(test_case_groups_item_data)
115
+
116
+ test_case_groups.append(test_case_groups_item)
117
+
118
+ multi_turn_evaluate_system_prompt_request = cls(
119
+ threshold=threshold,
120
+ max_turns=max_turns,
121
+ test_types=test_types,
122
+ system_prompt=system_prompt,
123
+ openrouter_model_name=openrouter_model_name,
124
+ test_case_groups=test_case_groups,
125
+ )
126
+
127
+ multi_turn_evaluate_system_prompt_request.additional_properties = d
128
+ return multi_turn_evaluate_system_prompt_request
129
+
130
+ @property
131
+ def additional_keys(self) -> list[str]:
132
+ return list(self.additional_properties.keys())
133
+
134
+ def __getitem__(self, key: str) -> Any:
135
+ return self.additional_properties[key]
136
+
137
+ def __setitem__(self, key: str, value: Any) -> None:
138
+ self.additional_properties[key] = value
139
+
140
+ def __delitem__(self, key: str) -> None:
141
+ del self.additional_properties[key]
142
+
143
+ def __contains__(self, key: str) -> bool:
144
+ return key in self.additional_properties
@@ -0,0 +1,85 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Mapping
4
+ from typing import TYPE_CHECKING, Any, TypeVar
5
+
6
+ from attrs import define as _attrs_define
7
+ from attrs import field as _attrs_field
8
+
9
+ if TYPE_CHECKING:
10
+ from ..models.message import Message
11
+
12
+
13
+ T = TypeVar("T", bound="MultiTurnFailedTestResult")
14
+
15
+
16
+ @_attrs_define
17
+ class MultiTurnFailedTestResult:
18
+ """
19
+ Attributes:
20
+ min_safe_response_score (float): Minimum safety score across all responses in the conversation (0.0 = unsafe,
21
+ 1.0 = safe)
22
+ conversation (list[Message]):
23
+ """
24
+
25
+ min_safe_response_score: float
26
+ conversation: list[Message]
27
+ additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
28
+
29
+ def to_dict(self) -> dict[str, Any]:
30
+
31
+ min_safe_response_score = self.min_safe_response_score
32
+
33
+ conversation = []
34
+ for conversation_item_data in self.conversation:
35
+ conversation_item = conversation_item_data.to_dict()
36
+ conversation.append(conversation_item)
37
+
38
+ field_dict: dict[str, Any] = {}
39
+ field_dict.update(self.additional_properties)
40
+ field_dict.update(
41
+ {
42
+ "min_safe_response_score": min_safe_response_score,
43
+ "conversation": conversation,
44
+ }
45
+ )
46
+
47
+ return field_dict
48
+
49
+ @classmethod
50
+ def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
51
+ from ..models.message import Message
52
+
53
+ d = dict(src_dict)
54
+ min_safe_response_score = d.pop("min_safe_response_score")
55
+
56
+ conversation = []
57
+ _conversation = d.pop("conversation")
58
+ for conversation_item_data in _conversation:
59
+ conversation_item = Message.from_dict(conversation_item_data)
60
+
61
+ conversation.append(conversation_item)
62
+
63
+ multi_turn_failed_test_result = cls(
64
+ min_safe_response_score=min_safe_response_score,
65
+ conversation=conversation,
66
+ )
67
+
68
+ multi_turn_failed_test_result.additional_properties = d
69
+ return multi_turn_failed_test_result
70
+
71
+ @property
72
+ def additional_keys(self) -> list[str]:
73
+ return list(self.additional_properties.keys())
74
+
75
+ def __getitem__(self, key: str) -> Any:
76
+ return self.additional_properties[key]
77
+
78
+ def __setitem__(self, key: str, value: Any) -> None:
79
+ self.additional_properties[key] = value
80
+
81
+ def __delitem__(self, key: str) -> None:
82
+ del self.additional_properties[key]
83
+
84
+ def __contains__(self, key: str) -> bool:
85
+ return key in self.additional_properties
@@ -0,0 +1,92 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Mapping
4
+ from typing import TYPE_CHECKING, Any, TypeVar
5
+
6
+ from attrs import define as _attrs_define
7
+ from attrs import field as _attrs_field
8
+
9
+ if TYPE_CHECKING:
10
+ from ..models.multi_turn_failed_test_result import MultiTurnFailedTestResult
11
+
12
+
13
+ T = TypeVar("T", bound="MultiTurnRunTestsResponse")
14
+
15
+
16
+ @_attrs_define
17
+ class MultiTurnRunTestsResponse:
18
+ """
19
+ Attributes:
20
+ total_passed (int): Total number of test cases that passed across all iteration layers
21
+ total_failed (int): Total number of test cases that failed across all iteration layers
22
+ failed_results (list[MultiTurnFailedTestResult]): Failed test cases executed
23
+ """
24
+
25
+ total_passed: int
26
+ total_failed: int
27
+ failed_results: list[MultiTurnFailedTestResult]
28
+ additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
29
+
30
+ def to_dict(self) -> dict[str, Any]:
31
+
32
+ total_passed = self.total_passed
33
+
34
+ total_failed = self.total_failed
35
+
36
+ failed_results = []
37
+ for failed_results_item_data in self.failed_results:
38
+ failed_results_item = failed_results_item_data.to_dict()
39
+ failed_results.append(failed_results_item)
40
+
41
+ field_dict: dict[str, Any] = {}
42
+ field_dict.update(self.additional_properties)
43
+ field_dict.update(
44
+ {
45
+ "total_passed": total_passed,
46
+ "total_failed": total_failed,
47
+ "failed_results": failed_results,
48
+ }
49
+ )
50
+
51
+ return field_dict
52
+
53
+ @classmethod
54
+ def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
55
+ from ..models.multi_turn_failed_test_result import MultiTurnFailedTestResult
56
+
57
+ d = dict(src_dict)
58
+ total_passed = d.pop("total_passed")
59
+
60
+ total_failed = d.pop("total_failed")
61
+
62
+ failed_results = []
63
+ _failed_results = d.pop("failed_results")
64
+ for failed_results_item_data in _failed_results:
65
+ failed_results_item = MultiTurnFailedTestResult.from_dict(failed_results_item_data)
66
+
67
+ failed_results.append(failed_results_item)
68
+
69
+ multi_turn_run_tests_response = cls(
70
+ total_passed=total_passed,
71
+ total_failed=total_failed,
72
+ failed_results=failed_results,
73
+ )
74
+
75
+ multi_turn_run_tests_response.additional_properties = d
76
+ return multi_turn_run_tests_response
77
+
78
+ @property
79
+ def additional_keys(self) -> list[str]:
80
+ return list(self.additional_properties.keys())
81
+
82
+ def __getitem__(self, key: str) -> Any:
83
+ return self.additional_properties[key]
84
+
85
+ def __setitem__(self, key: str, value: Any) -> None:
86
+ self.additional_properties[key] = value
87
+
88
+ def __delitem__(self, key: str) -> None:
89
+ del self.additional_properties[key]
90
+
91
+ def __contains__(self, key: str) -> bool:
92
+ return key in self.additional_properties
@@ -0,0 +1,9 @@
1
+ from enum import Enum
2
+
3
+
4
+ class MultiTurnTestType(str, Enum):
5
+ SEMANTIC_CHUNKS = "semantic_chunks"
6
+ USER_PERSONA = "user_persona"
7
+
8
+ def __str__(self) -> str:
9
+ return str(self.value)
@@ -0,0 +1,10 @@
1
+ from enum import Enum
2
+
3
+
4
+ class Role(str, Enum):
5
+ ASSISTANT = "assistant"
6
+ SYSTEM = "system"
7
+ USER = "user"
8
+
9
+ def __str__(self) -> str:
10
+ return str(self.value)
@@ -0,0 +1,126 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Mapping
4
+ from typing import Any, TypeVar, cast
5
+
6
+ from attrs import define as _attrs_define
7
+ from attrs import field as _attrs_field
8
+
9
+ from ..models.test_case_group import TestCaseGroup
10
+ from ..types import UNSET, Unset
11
+
12
+ T = TypeVar("T", bound="SingleTurnEvaluateOpenAiFinetuneRequest")
13
+
14
+
15
+ @_attrs_define
16
+ class SingleTurnEvaluateOpenAiFinetuneRequest:
17
+ """
18
+ Attributes:
19
+ threshold (float): Responses below this safety score threshold will fail
20
+ variations (int): Number of variations per unsafe case
21
+ maximum_iteration_layers (int): Maximum iteration layers for tests
22
+ model_name (str): Fully qualified name of the model to be tested.
23
+ test_case_groups (list[str | TestCaseGroup] | Unset): One or more test case groups to run. Defaults to suicidal
24
+ ideation tests
25
+ """
26
+
27
+ threshold: float
28
+ variations: int
29
+ maximum_iteration_layers: int
30
+ model_name: str
31
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
32
+ additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
33
+
34
+ def to_dict(self) -> dict[str, Any]:
35
+ threshold = self.threshold
36
+
37
+ variations = self.variations
38
+
39
+ maximum_iteration_layers = self.maximum_iteration_layers
40
+
41
+ model_name = self.model_name
42
+
43
+ test_case_groups: list[str] | Unset = UNSET
44
+ if not isinstance(self.test_case_groups, Unset):
45
+ test_case_groups = []
46
+ for test_case_groups_item_data in self.test_case_groups:
47
+ test_case_groups_item: str
48
+ if isinstance(test_case_groups_item_data, TestCaseGroup):
49
+ test_case_groups_item = test_case_groups_item_data.value
50
+ else:
51
+ test_case_groups_item = test_case_groups_item_data
52
+ test_case_groups.append(test_case_groups_item)
53
+
54
+ field_dict: dict[str, Any] = {}
55
+ field_dict.update(self.additional_properties)
56
+ field_dict.update(
57
+ {
58
+ "threshold": threshold,
59
+ "variations": variations,
60
+ "maximum_iteration_layers": maximum_iteration_layers,
61
+ "model_name": model_name,
62
+ }
63
+ )
64
+ if test_case_groups is not UNSET:
65
+ field_dict["test_case_groups"] = test_case_groups
66
+
67
+ return field_dict
68
+
69
+ @classmethod
70
+ def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
71
+ d = dict(src_dict)
72
+ threshold = d.pop("threshold")
73
+
74
+ variations = d.pop("variations")
75
+
76
+ maximum_iteration_layers = d.pop("maximum_iteration_layers")
77
+
78
+ model_name = d.pop("model_name")
79
+
80
+ _test_case_groups = d.pop("test_case_groups", UNSET)
81
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
82
+ if _test_case_groups is not UNSET:
83
+ test_case_groups = []
84
+ for test_case_groups_item_data in _test_case_groups:
85
+
86
+ def _parse_test_case_groups_item(data: object) -> str | TestCaseGroup:
87
+ try:
88
+ if not isinstance(data, str):
89
+ raise TypeError()
90
+ test_case_groups_item_type_0 = TestCaseGroup(data)
91
+
92
+ return test_case_groups_item_type_0
93
+ except (TypeError, ValueError, AttributeError, KeyError):
94
+ pass
95
+ return cast(str | TestCaseGroup, data)
96
+
97
+ test_case_groups_item = _parse_test_case_groups_item(test_case_groups_item_data)
98
+
99
+ test_case_groups.append(test_case_groups_item)
100
+
101
+ single_turn_evaluate_open_ai_finetune_request = cls(
102
+ threshold=threshold,
103
+ variations=variations,
104
+ maximum_iteration_layers=maximum_iteration_layers,
105
+ model_name=model_name,
106
+ test_case_groups=test_case_groups,
107
+ )
108
+
109
+ single_turn_evaluate_open_ai_finetune_request.additional_properties = d
110
+ return single_turn_evaluate_open_ai_finetune_request
111
+
112
+ @property
113
+ def additional_keys(self) -> list[str]:
114
+ return list(self.additional_properties.keys())
115
+
116
+ def __getitem__(self, key: str) -> Any:
117
+ return self.additional_properties[key]
118
+
119
+ def __setitem__(self, key: str, value: Any) -> None:
120
+ self.additional_properties[key] = value
121
+
122
+ def __delitem__(self, key: str) -> None:
123
+ del self.additional_properties[key]
124
+
125
+ def __contains__(self, key: str) -> bool:
126
+ return key in self.additional_properties