circuit-breaker-labs 1.0.6__py3-none-any.whl → 1.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -20,7 +20,7 @@ from .single_turn_evaluate_open_ai_finetune_request import SingleTurnEvaluateOpe
20
20
  from .single_turn_evaluate_system_prompt_request import SingleTurnEvaluateSystemPromptRequest
21
21
  from .single_turn_failed_test_result import SingleTurnFailedTestResult
22
22
  from .single_turn_run_tests_response import SingleTurnRunTestsResponse
23
- from .test_case_pack import TestCasePack
23
+ from .test_case_group import TestCaseGroup
24
24
  from .unauthorized_error import UnauthorizedError
25
25
  from .unauthorized_response import UnauthorizedResponse
26
26
  from .validate_api_key_response import ValidateApiKeyResponse
@@ -48,7 +48,7 @@ __all__ = (
48
48
  "SingleTurnEvaluateSystemPromptRequest",
49
49
  "SingleTurnFailedTestResult",
50
50
  "SingleTurnRunTestsResponse",
51
- "TestCasePack",
51
+ "TestCaseGroup",
52
52
  "UnauthorizedError",
53
53
  "UnauthorizedResponse",
54
54
  "ValidateApiKeyResponse",
@@ -1,13 +1,13 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from collections.abc import Mapping
4
- from typing import Any, TypeVar
4
+ from typing import Any, TypeVar, cast
5
5
 
6
6
  from attrs import define as _attrs_define
7
7
  from attrs import field as _attrs_field
8
8
 
9
9
  from ..models.multi_turn_test_type import MultiTurnTestType
10
- from ..models.test_case_pack import TestCasePack
10
+ from ..models.test_case_group import TestCaseGroup
11
11
  from ..types import UNSET, Unset
12
12
 
13
13
  T = TypeVar("T", bound="MultiTurnEvaluateOpenAiFinetuneRequest")
@@ -21,15 +21,15 @@ class MultiTurnEvaluateOpenAiFinetuneRequest:
21
21
  max_turns (int): Maximum number of turns in the conversation. Should be a multiple of two.
22
22
  test_types (list[MultiTurnTestType]): One or more multi-turn-test types
23
23
  model_name (str): Fully qualified name of the model to be tested.
24
- test_case_packs (list[TestCasePack] | Unset): One or more test case packs to run. Defaults to suicidal ideation
25
- tests
24
+ test_case_groups (list[str | TestCaseGroup] | Unset): One or more test case groups to run. Defaults to suicidal
25
+ ideation tests
26
26
  """
27
27
 
28
28
  threshold: float
29
29
  max_turns: int
30
30
  test_types: list[MultiTurnTestType]
31
31
  model_name: str
32
- test_case_packs: list[TestCasePack] | Unset = UNSET
32
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
33
33
  additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
34
34
 
35
35
  def to_dict(self) -> dict[str, Any]:
@@ -44,12 +44,16 @@ class MultiTurnEvaluateOpenAiFinetuneRequest:
44
44
 
45
45
  model_name = self.model_name
46
46
 
47
- test_case_packs: list[str] | Unset = UNSET
48
- if not isinstance(self.test_case_packs, Unset):
49
- test_case_packs = []
50
- for test_case_packs_item_data in self.test_case_packs:
51
- test_case_packs_item = test_case_packs_item_data.value
52
- test_case_packs.append(test_case_packs_item)
47
+ test_case_groups: list[str] | Unset = UNSET
48
+ if not isinstance(self.test_case_groups, Unset):
49
+ test_case_groups = []
50
+ for test_case_groups_item_data in self.test_case_groups:
51
+ test_case_groups_item: str
52
+ if isinstance(test_case_groups_item_data, TestCaseGroup):
53
+ test_case_groups_item = test_case_groups_item_data.value
54
+ else:
55
+ test_case_groups_item = test_case_groups_item_data
56
+ test_case_groups.append(test_case_groups_item)
53
57
 
54
58
  field_dict: dict[str, Any] = {}
55
59
  field_dict.update(self.additional_properties)
@@ -61,8 +65,8 @@ class MultiTurnEvaluateOpenAiFinetuneRequest:
61
65
  "model_name": model_name,
62
66
  }
63
67
  )
64
- if test_case_packs is not UNSET:
65
- field_dict["test_case_packs"] = test_case_packs
68
+ if test_case_groups is not UNSET:
69
+ field_dict["test_case_groups"] = test_case_groups
66
70
 
67
71
  return field_dict
68
72
 
@@ -82,21 +86,33 @@ class MultiTurnEvaluateOpenAiFinetuneRequest:
82
86
 
83
87
  model_name = d.pop("model_name")
84
88
 
85
- _test_case_packs = d.pop("test_case_packs", UNSET)
86
- test_case_packs: list[TestCasePack] | Unset = UNSET
87
- if _test_case_packs is not UNSET:
88
- test_case_packs = []
89
- for test_case_packs_item_data in _test_case_packs:
90
- test_case_packs_item = TestCasePack(test_case_packs_item_data)
89
+ _test_case_groups = d.pop("test_case_groups", UNSET)
90
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
91
+ if _test_case_groups is not UNSET:
92
+ test_case_groups = []
93
+ for test_case_groups_item_data in _test_case_groups:
91
94
 
92
- test_case_packs.append(test_case_packs_item)
95
+ def _parse_test_case_groups_item(data: object) -> str | TestCaseGroup:
96
+ try:
97
+ if not isinstance(data, str):
98
+ raise TypeError()
99
+ test_case_groups_item_type_0 = TestCaseGroup(data)
100
+
101
+ return test_case_groups_item_type_0
102
+ except (TypeError, ValueError, AttributeError, KeyError):
103
+ pass
104
+ return cast(str | TestCaseGroup, data)
105
+
106
+ test_case_groups_item = _parse_test_case_groups_item(test_case_groups_item_data)
107
+
108
+ test_case_groups.append(test_case_groups_item)
93
109
 
94
110
  multi_turn_evaluate_open_ai_finetune_request = cls(
95
111
  threshold=threshold,
96
112
  max_turns=max_turns,
97
113
  test_types=test_types,
98
114
  model_name=model_name,
99
- test_case_packs=test_case_packs,
115
+ test_case_groups=test_case_groups,
100
116
  )
101
117
 
102
118
  multi_turn_evaluate_open_ai_finetune_request.additional_properties = d
@@ -1,13 +1,13 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from collections.abc import Mapping
4
- from typing import Any, TypeVar
4
+ from typing import Any, TypeVar, cast
5
5
 
6
6
  from attrs import define as _attrs_define
7
7
  from attrs import field as _attrs_field
8
8
 
9
9
  from ..models.multi_turn_test_type import MultiTurnTestType
10
- from ..models.test_case_pack import TestCasePack
10
+ from ..models.test_case_group import TestCaseGroup
11
11
  from ..types import UNSET, Unset
12
12
 
13
13
  T = TypeVar("T", bound="MultiTurnEvaluateSystemPromptRequest")
@@ -23,8 +23,8 @@ class MultiTurnEvaluateSystemPromptRequest:
23
23
  system_prompt (str): The system prompt to be evaluated
24
24
  openrouter_model_name (str): Name of the model to be tested. Available models can be found at [Openrouter
25
25
  Models](https://openrouter.ai/models)
26
- test_case_packs (list[TestCasePack] | Unset): One or more test case packs to run. Defaults to suicidal ideation
27
- tests
26
+ test_case_groups (list[str | TestCaseGroup] | Unset): One or more test case groups to run. Defaults to suicidal
27
+ ideation tests
28
28
  """
29
29
 
30
30
  threshold: float
@@ -32,7 +32,7 @@ class MultiTurnEvaluateSystemPromptRequest:
32
32
  test_types: list[MultiTurnTestType]
33
33
  system_prompt: str
34
34
  openrouter_model_name: str
35
- test_case_packs: list[TestCasePack] | Unset = UNSET
35
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
36
36
  additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
37
37
 
38
38
  def to_dict(self) -> dict[str, Any]:
@@ -49,12 +49,16 @@ class MultiTurnEvaluateSystemPromptRequest:
49
49
 
50
50
  openrouter_model_name = self.openrouter_model_name
51
51
 
52
- test_case_packs: list[str] | Unset = UNSET
53
- if not isinstance(self.test_case_packs, Unset):
54
- test_case_packs = []
55
- for test_case_packs_item_data in self.test_case_packs:
56
- test_case_packs_item = test_case_packs_item_data.value
57
- test_case_packs.append(test_case_packs_item)
52
+ test_case_groups: list[str] | Unset = UNSET
53
+ if not isinstance(self.test_case_groups, Unset):
54
+ test_case_groups = []
55
+ for test_case_groups_item_data in self.test_case_groups:
56
+ test_case_groups_item: str
57
+ if isinstance(test_case_groups_item_data, TestCaseGroup):
58
+ test_case_groups_item = test_case_groups_item_data.value
59
+ else:
60
+ test_case_groups_item = test_case_groups_item_data
61
+ test_case_groups.append(test_case_groups_item)
58
62
 
59
63
  field_dict: dict[str, Any] = {}
60
64
  field_dict.update(self.additional_properties)
@@ -67,8 +71,8 @@ class MultiTurnEvaluateSystemPromptRequest:
67
71
  "openrouter_model_name": openrouter_model_name,
68
72
  }
69
73
  )
70
- if test_case_packs is not UNSET:
71
- field_dict["test_case_packs"] = test_case_packs
74
+ if test_case_groups is not UNSET:
75
+ field_dict["test_case_groups"] = test_case_groups
72
76
 
73
77
  return field_dict
74
78
 
@@ -90,14 +94,26 @@ class MultiTurnEvaluateSystemPromptRequest:
90
94
 
91
95
  openrouter_model_name = d.pop("openrouter_model_name")
92
96
 
93
- _test_case_packs = d.pop("test_case_packs", UNSET)
94
- test_case_packs: list[TestCasePack] | Unset = UNSET
95
- if _test_case_packs is not UNSET:
96
- test_case_packs = []
97
- for test_case_packs_item_data in _test_case_packs:
98
- test_case_packs_item = TestCasePack(test_case_packs_item_data)
97
+ _test_case_groups = d.pop("test_case_groups", UNSET)
98
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
99
+ if _test_case_groups is not UNSET:
100
+ test_case_groups = []
101
+ for test_case_groups_item_data in _test_case_groups:
99
102
 
100
- test_case_packs.append(test_case_packs_item)
103
+ def _parse_test_case_groups_item(data: object) -> str | TestCaseGroup:
104
+ try:
105
+ if not isinstance(data, str):
106
+ raise TypeError()
107
+ test_case_groups_item_type_0 = TestCaseGroup(data)
108
+
109
+ return test_case_groups_item_type_0
110
+ except (TypeError, ValueError, AttributeError, KeyError):
111
+ pass
112
+ return cast(str | TestCaseGroup, data)
113
+
114
+ test_case_groups_item = _parse_test_case_groups_item(test_case_groups_item_data)
115
+
116
+ test_case_groups.append(test_case_groups_item)
101
117
 
102
118
  multi_turn_evaluate_system_prompt_request = cls(
103
119
  threshold=threshold,
@@ -105,7 +121,7 @@ class MultiTurnEvaluateSystemPromptRequest:
105
121
  test_types=test_types,
106
122
  system_prompt=system_prompt,
107
123
  openrouter_model_name=openrouter_model_name,
108
- test_case_packs=test_case_packs,
124
+ test_case_groups=test_case_groups,
109
125
  )
110
126
 
111
127
  multi_turn_evaluate_system_prompt_request.additional_properties = d
@@ -1,12 +1,12 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from collections.abc import Mapping
4
- from typing import Any, TypeVar
4
+ from typing import Any, TypeVar, cast
5
5
 
6
6
  from attrs import define as _attrs_define
7
7
  from attrs import field as _attrs_field
8
8
 
9
- from ..models.test_case_pack import TestCasePack
9
+ from ..models.test_case_group import TestCaseGroup
10
10
  from ..types import UNSET, Unset
11
11
 
12
12
  T = TypeVar("T", bound="SingleTurnEvaluateOpenAiFinetuneRequest")
@@ -20,15 +20,15 @@ class SingleTurnEvaluateOpenAiFinetuneRequest:
20
20
  variations (int): Number of variations per unsafe case
21
21
  maximum_iteration_layers (int): Maximum iteration layers for tests
22
22
  model_name (str): Fully qualified name of the model to be tested.
23
- test_case_packs (list[TestCasePack] | Unset): One or more test case packs to run. Defaults to suicidal ideation
24
- tests
23
+ test_case_groups (list[str | TestCaseGroup] | Unset): One or more test case groups to run. Defaults to suicidal
24
+ ideation tests
25
25
  """
26
26
 
27
27
  threshold: float
28
28
  variations: int
29
29
  maximum_iteration_layers: int
30
30
  model_name: str
31
- test_case_packs: list[TestCasePack] | Unset = UNSET
31
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
32
32
  additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
33
33
 
34
34
  def to_dict(self) -> dict[str, Any]:
@@ -40,12 +40,16 @@ class SingleTurnEvaluateOpenAiFinetuneRequest:
40
40
 
41
41
  model_name = self.model_name
42
42
 
43
- test_case_packs: list[str] | Unset = UNSET
44
- if not isinstance(self.test_case_packs, Unset):
45
- test_case_packs = []
46
- for test_case_packs_item_data in self.test_case_packs:
47
- test_case_packs_item = test_case_packs_item_data.value
48
- test_case_packs.append(test_case_packs_item)
43
+ test_case_groups: list[str] | Unset = UNSET
44
+ if not isinstance(self.test_case_groups, Unset):
45
+ test_case_groups = []
46
+ for test_case_groups_item_data in self.test_case_groups:
47
+ test_case_groups_item: str
48
+ if isinstance(test_case_groups_item_data, TestCaseGroup):
49
+ test_case_groups_item = test_case_groups_item_data.value
50
+ else:
51
+ test_case_groups_item = test_case_groups_item_data
52
+ test_case_groups.append(test_case_groups_item)
49
53
 
50
54
  field_dict: dict[str, Any] = {}
51
55
  field_dict.update(self.additional_properties)
@@ -57,8 +61,8 @@ class SingleTurnEvaluateOpenAiFinetuneRequest:
57
61
  "model_name": model_name,
58
62
  }
59
63
  )
60
- if test_case_packs is not UNSET:
61
- field_dict["test_case_packs"] = test_case_packs
64
+ if test_case_groups is not UNSET:
65
+ field_dict["test_case_groups"] = test_case_groups
62
66
 
63
67
  return field_dict
64
68
 
@@ -73,21 +77,33 @@ class SingleTurnEvaluateOpenAiFinetuneRequest:
73
77
 
74
78
  model_name = d.pop("model_name")
75
79
 
76
- _test_case_packs = d.pop("test_case_packs", UNSET)
77
- test_case_packs: list[TestCasePack] | Unset = UNSET
78
- if _test_case_packs is not UNSET:
79
- test_case_packs = []
80
- for test_case_packs_item_data in _test_case_packs:
81
- test_case_packs_item = TestCasePack(test_case_packs_item_data)
80
+ _test_case_groups = d.pop("test_case_groups", UNSET)
81
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
82
+ if _test_case_groups is not UNSET:
83
+ test_case_groups = []
84
+ for test_case_groups_item_data in _test_case_groups:
82
85
 
83
- test_case_packs.append(test_case_packs_item)
86
+ def _parse_test_case_groups_item(data: object) -> str | TestCaseGroup:
87
+ try:
88
+ if not isinstance(data, str):
89
+ raise TypeError()
90
+ test_case_groups_item_type_0 = TestCaseGroup(data)
91
+
92
+ return test_case_groups_item_type_0
93
+ except (TypeError, ValueError, AttributeError, KeyError):
94
+ pass
95
+ return cast(str | TestCaseGroup, data)
96
+
97
+ test_case_groups_item = _parse_test_case_groups_item(test_case_groups_item_data)
98
+
99
+ test_case_groups.append(test_case_groups_item)
84
100
 
85
101
  single_turn_evaluate_open_ai_finetune_request = cls(
86
102
  threshold=threshold,
87
103
  variations=variations,
88
104
  maximum_iteration_layers=maximum_iteration_layers,
89
105
  model_name=model_name,
90
- test_case_packs=test_case_packs,
106
+ test_case_groups=test_case_groups,
91
107
  )
92
108
 
93
109
  single_turn_evaluate_open_ai_finetune_request.additional_properties = d
@@ -1,12 +1,12 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from collections.abc import Mapping
4
- from typing import Any, TypeVar
4
+ from typing import Any, TypeVar, cast
5
5
 
6
6
  from attrs import define as _attrs_define
7
7
  from attrs import field as _attrs_field
8
8
 
9
- from ..models.test_case_pack import TestCasePack
9
+ from ..models.test_case_group import TestCaseGroup
10
10
  from ..types import UNSET, Unset
11
11
 
12
12
  T = TypeVar("T", bound="SingleTurnEvaluateSystemPromptRequest")
@@ -22,8 +22,8 @@ class SingleTurnEvaluateSystemPromptRequest:
22
22
  system_prompt (str): The system prompt to be evaluated
23
23
  openrouter_model_name (str): Name of the model to be tested. Available models can be found at [Openrouter
24
24
  Models](https://openrouter.ai/models)
25
- test_case_packs (list[TestCasePack] | Unset): One or more test case packs to run. Defaults to suicidal ideation
26
- tests
25
+ test_case_groups (list[str | TestCaseGroup] | Unset): One or more test case groups to run. Defaults to suicidal
26
+ ideation tests
27
27
  """
28
28
 
29
29
  threshold: float
@@ -31,7 +31,7 @@ class SingleTurnEvaluateSystemPromptRequest:
31
31
  maximum_iteration_layers: int
32
32
  system_prompt: str
33
33
  openrouter_model_name: str
34
- test_case_packs: list[TestCasePack] | Unset = UNSET
34
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
35
35
  additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
36
36
 
37
37
  def to_dict(self) -> dict[str, Any]:
@@ -45,12 +45,16 @@ class SingleTurnEvaluateSystemPromptRequest:
45
45
 
46
46
  openrouter_model_name = self.openrouter_model_name
47
47
 
48
- test_case_packs: list[str] | Unset = UNSET
49
- if not isinstance(self.test_case_packs, Unset):
50
- test_case_packs = []
51
- for test_case_packs_item_data in self.test_case_packs:
52
- test_case_packs_item = test_case_packs_item_data.value
53
- test_case_packs.append(test_case_packs_item)
48
+ test_case_groups: list[str] | Unset = UNSET
49
+ if not isinstance(self.test_case_groups, Unset):
50
+ test_case_groups = []
51
+ for test_case_groups_item_data in self.test_case_groups:
52
+ test_case_groups_item: str
53
+ if isinstance(test_case_groups_item_data, TestCaseGroup):
54
+ test_case_groups_item = test_case_groups_item_data.value
55
+ else:
56
+ test_case_groups_item = test_case_groups_item_data
57
+ test_case_groups.append(test_case_groups_item)
54
58
 
55
59
  field_dict: dict[str, Any] = {}
56
60
  field_dict.update(self.additional_properties)
@@ -63,8 +67,8 @@ class SingleTurnEvaluateSystemPromptRequest:
63
67
  "openrouter_model_name": openrouter_model_name,
64
68
  }
65
69
  )
66
- if test_case_packs is not UNSET:
67
- field_dict["test_case_packs"] = test_case_packs
70
+ if test_case_groups is not UNSET:
71
+ field_dict["test_case_groups"] = test_case_groups
68
72
 
69
73
  return field_dict
70
74
 
@@ -81,14 +85,26 @@ class SingleTurnEvaluateSystemPromptRequest:
81
85
 
82
86
  openrouter_model_name = d.pop("openrouter_model_name")
83
87
 
84
- _test_case_packs = d.pop("test_case_packs", UNSET)
85
- test_case_packs: list[TestCasePack] | Unset = UNSET
86
- if _test_case_packs is not UNSET:
87
- test_case_packs = []
88
- for test_case_packs_item_data in _test_case_packs:
89
- test_case_packs_item = TestCasePack(test_case_packs_item_data)
88
+ _test_case_groups = d.pop("test_case_groups", UNSET)
89
+ test_case_groups: list[str | TestCaseGroup] | Unset = UNSET
90
+ if _test_case_groups is not UNSET:
91
+ test_case_groups = []
92
+ for test_case_groups_item_data in _test_case_groups:
90
93
 
91
- test_case_packs.append(test_case_packs_item)
94
+ def _parse_test_case_groups_item(data: object) -> str | TestCaseGroup:
95
+ try:
96
+ if not isinstance(data, str):
97
+ raise TypeError()
98
+ test_case_groups_item_type_0 = TestCaseGroup(data)
99
+
100
+ return test_case_groups_item_type_0
101
+ except (TypeError, ValueError, AttributeError, KeyError):
102
+ pass
103
+ return cast(str | TestCaseGroup, data)
104
+
105
+ test_case_groups_item = _parse_test_case_groups_item(test_case_groups_item_data)
106
+
107
+ test_case_groups.append(test_case_groups_item)
92
108
 
93
109
  single_turn_evaluate_system_prompt_request = cls(
94
110
  threshold=threshold,
@@ -96,7 +112,7 @@ class SingleTurnEvaluateSystemPromptRequest:
96
112
  maximum_iteration_layers=maximum_iteration_layers,
97
113
  system_prompt=system_prompt,
98
114
  openrouter_model_name=openrouter_model_name,
99
- test_case_packs=test_case_packs,
115
+ test_case_groups=test_case_groups,
100
116
  )
101
117
 
102
118
  single_turn_evaluate_system_prompt_request.additional_properties = d
@@ -1,7 +1,7 @@
1
1
  from enum import Enum
2
2
 
3
3
 
4
- class TestCasePack(str, Enum):
4
+ class TestCaseGroup(str, Enum):
5
5
  SUICIDAL_IDEATION = "suicidal_ideation"
6
6
 
7
7
  def __str__(self) -> str:
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: circuit-breaker-labs
3
- Version: 1.0.6
3
+ Version: 1.0.7
4
4
  Summary: A client library for accessing Circuit Breaker Labs API
5
5
  Requires-Dist: httpx>=0.23.0,<0.29.0
6
6
  Requires-Dist: attrs>=22.2.0
7
7
  Requires-Dist: python-dateutil>=2.8.0,<3
8
- Requires-Dist: types-python-dateutil>=2.9.0.20251115
8
+ Requires-Dist: types-python-dateutil>=2.9.0.20260124
9
9
  Requires-Python: >=3.10
10
10
  Description-Content-Type: text/markdown
11
11
 
@@ -91,6 +91,38 @@ async with client as client:
91
91
  )
92
92
  ```
93
93
 
94
+ Want to test multi-turn conversations instead? Use the multi-turn endpoint (async version also available):
95
+
96
+ ```python
97
+ import os
98
+
99
+ from circuit_breaker_labs.api.evaluations import multi_turn_evaluate_system_prompt_post
100
+ from circuit_breaker_labs.models import (
101
+ MultiTurnEvaluateSystemPromptRequest,
102
+ MultiTurnTestType,
103
+ TestCasePack,
104
+ )
105
+
106
+ with client as client:
107
+ request = MultiTurnEvaluateSystemPromptRequest(
108
+ threshold=0.6,
109
+ max_turns=6,
110
+ test_types=[
111
+ MultiTurnTestType.SEMANTIC_CHUNKS,
112
+ MultiTurnTestType.USER_PERSONA,
113
+ ],
114
+ system_prompt=os.getenv("SYSTEM_PROMPT"),
115
+ openrouter_model_name="anthropic/claude-3.7-sonnet",
116
+ test_case_packs=[TestCasePack.SUICIDAL_IDEATION],
117
+ )
118
+
119
+ run_tests_response = multi_turn_evaluate_system_prompt_post.sync(
120
+ client=client,
121
+ cbl_api_key=os.getenv("CBL_API_KEY"),
122
+ body=request,
123
+ )
124
+ ```
125
+
94
126
  Things to know:
95
127
 
96
128
  1. Every path/method combo becomes a Python module with four functions:
@@ -13,14 +13,14 @@ circuit_breaker_labs/api/health_checks/ping_get.py,sha256=zOacpnV1c_g_v3OKqXr2Gq
13
13
  circuit_breaker_labs/api/health_checks/version_get.py,sha256=ykAbGhlVlOHajrsSQcd4bp0P34K7z16mKE_O3MstI3c,3293
14
14
  circuit_breaker_labs/client.py,sha256=-rT3epMc77Y7QMTy5o1oH5hkGLufY9qFrD1rb7qItFU,12384
15
15
  circuit_breaker_labs/errors.py,sha256=gO8GBmKqmSNgAg-E5oT-oOyxztvp7V_6XG7OUTT15q0,546
16
- circuit_breaker_labs/models/__init__.py,sha256=24BRj2TLUb3Ld2zsesFsaUm-xuiwdKCU8UBfO599dHI,2378
16
+ circuit_breaker_labs/models/__init__.py,sha256=kJnVDOp97pXQHUwu_NRqalC39kjbIVNTwamxP4zMvrM,2381
17
17
  circuit_breaker_labs/models/http_validation_error.py,sha256=S2z4QBSSZFeQ23Xnlk-8u7H_I_EwewePiFJbSEKdSp0,2318
18
18
  circuit_breaker_labs/models/internal_server_error.py,sha256=XjwdxWWYhIEn2aKrPuCXQivQOJ4QLqA-lRaDi2z4sBU,1892
19
19
  circuit_breaker_labs/models/internal_server_error_response.py,sha256=EmHyRvXjagCVuUIdWr9DMDaCcVUyOiERU3BGrPa1TY4,1907
20
20
  circuit_breaker_labs/models/message.py,sha256=GCtAUsc6_IX_PtS3R7WFPS9nYcxzq0lu2cpHT4dcuSw,1669
21
21
  circuit_breaker_labs/models/monthly_quota_response.py,sha256=QOCKISN1sSWXz4jlVGKme9CkJSqXw6VtukCYLc-gSpU,2069
22
- circuit_breaker_labs/models/multi_turn_evaluate_open_ai_finetune_request.py,sha256=r12EzH6b26MulmqtggFTjWZB9DlBPlWTcTFBz221F0I,4084
23
- circuit_breaker_labs/models/multi_turn_evaluate_system_prompt_request.py,sha256=EiPoAPO7R5t9H01e7Y5SCTFm3Y86JLnnL3qaBx_2X3k,4527
22
+ circuit_breaker_labs/models/multi_turn_evaluate_open_ai_finetune_request.py,sha256=DroSSX8AYCtkNHoqbszvEN36pb0CZ0kqGZuMexY4vho,4887
23
+ circuit_breaker_labs/models/multi_turn_evaluate_system_prompt_request.py,sha256=Yg_wvJJmg5QbmSdaTDkI9BkaEOdoUZdlQc5Z-hERMuE,5330
24
24
  circuit_breaker_labs/models/multi_turn_failed_test_result.py,sha256=Y6z-zM7Wp2DVIyE7XDCQ4RYrn9yhI28L323jw3VMGjw,2577
25
25
  circuit_breaker_labs/models/multi_turn_run_tests_response.py,sha256=ltuhksaaG-G3Gu2-4RWEyvu82mAuZzSvYo1SF2GX5hI,2924
26
26
  circuit_breaker_labs/models/multi_turn_test_type.py,sha256=gzS6knYWi82lM3SNhKfCAnxADN9aXtnELljTS6hXgkA,196
@@ -30,11 +30,11 @@ circuit_breaker_labs/models/ping_response.py,sha256=DgbkCdzHn1HDQ53aG9VCVBr1kDTc
30
30
  circuit_breaker_labs/models/quota_exceeded_error.py,sha256=M_JGaKG1pUFCEl-ZSrw8TeDjmNVA_5-ZchicLMFs2Sc,1884
31
31
  circuit_breaker_labs/models/quota_exceeded_response.py,sha256=cGM-2vgb0XByFl01cviQMMSJRan8ndDgCMdVS_VQw50,1871
32
32
  circuit_breaker_labs/models/role.py,sha256=FGDt50yYv1_eN1QAyBwsJFR-tjNi1soxpQaEaSBUfzo,177
33
- circuit_breaker_labs/models/single_turn_evaluate_open_ai_finetune_request.py,sha256=YxJGPAy45qbKE9ccRGcJOXmejw15yG0RtQYNjajeym8,3767
34
- circuit_breaker_labs/models/single_turn_evaluate_system_prompt_request.py,sha256=FUWAx-70srCIuFMhtz7e7N5IlZsFTebENmKAqgxLKvY,4210
33
+ circuit_breaker_labs/models/single_turn_evaluate_open_ai_finetune_request.py,sha256=2liCrGulF9SpAWk-BcxLV0j75IL2OwEg8rzT6cVJHAs,4570
34
+ circuit_breaker_labs/models/single_turn_evaluate_system_prompt_request.py,sha256=8dntgLROPo5pnjcwVCTlyctLIOFZg6wqZXpUQzEo9PE,5013
35
35
  circuit_breaker_labs/models/single_turn_failed_test_result.py,sha256=mxwflRSpiHiM6rYocTTcTSISjBCRj6EdPgViXGAI__I,2313
36
36
  circuit_breaker_labs/models/single_turn_run_tests_response.py,sha256=jG6Fp40Vusay41Dd8LPbeXztsodkEmgFBt-z_oO1cMw,3416
37
- circuit_breaker_labs/models/test_case_pack.py,sha256=xX-LjItO82VgMC1_--Ju1CD0TRmhXmN1memrtruxYOc,161
37
+ circuit_breaker_labs/models/test_case_group.py,sha256=3B31r384JL4qxlidoXRhjBVEdKSvJEje8MjSw-monWY,162
38
38
  circuit_breaker_labs/models/unauthorized_error.py,sha256=vIiMl68kdwOKIwCp6t0eLLmToHar5xKroVI8Z_KVfGQ,1858
39
39
  circuit_breaker_labs/models/unauthorized_response.py,sha256=c1xQ9mtazPAW_LoTfZYNl7FmGSTSetiL7iw4B0JxuCo,1838
40
40
  circuit_breaker_labs/models/validate_api_key_response.py,sha256=dFWXa0Mc9sRJvI3CXHZ8A0samQQLc68MRPuJsW6AO2A,1580
@@ -42,6 +42,6 @@ circuit_breaker_labs/models/validation_error.py,sha256=n8d_ZobQV26pm0KyDAKvIo93u
42
42
  circuit_breaker_labs/models/version_response.py,sha256=Ptaax1q1oTfbXfcC2ta6GtSNcUY38HCn7_oN1op1EYU,1535
43
43
  circuit_breaker_labs/py.typed,sha256=8ZJUsxZiuOy1oJeVhsTWQhTG_6pTVHVXk5hJL79ebTk,25
44
44
  circuit_breaker_labs/types.py,sha256=0We4NPvhIYASRpQ3le41nmJeEAVm42-2VKdzlJ4Ogok,1343
45
- circuit_breaker_labs-1.0.6.dist-info/WHEEL,sha256=KSLUh82mDPEPk0Bx0ScXlWL64bc8KmzIPNcpQZFV-6E,79
46
- circuit_breaker_labs-1.0.6.dist-info/METADATA,sha256=ru4vWVtHkOzGiIkt6ABZZm9TgN3IdfupjMmu2yRxjfI,5354
47
- circuit_breaker_labs-1.0.6.dist-info/RECORD,,
45
+ circuit_breaker_labs-1.0.7.dist-info/WHEEL,sha256=iHtWm8nRfs0VRdCYVXocAWFW8ppjHL-uTJkAdZJKOBM,80
46
+ circuit_breaker_labs-1.0.7.dist-info/METADATA,sha256=OzxBRLGkUbtoBNh7F3lP_7S2AKVIURkQxeTgJ2CpeoQ,6289
47
+ circuit_breaker_labs-1.0.7.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: uv 0.9.22
2
+ Generator: uv 0.9.30
3
3
  Root-Is-Purelib: true
4
- Tag: py3-none-any
4
+ Tag: py3-none-any