kiln-ai 0.16.0__py3-none-any.whl → 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. kiln_ai/adapters/__init__.py +2 -0
  2. kiln_ai/adapters/adapter_registry.py +22 -44
  3. kiln_ai/adapters/chat/__init__.py +8 -0
  4. kiln_ai/adapters/chat/chat_formatter.py +233 -0
  5. kiln_ai/adapters/chat/test_chat_formatter.py +131 -0
  6. kiln_ai/adapters/data_gen/data_gen_prompts.py +121 -36
  7. kiln_ai/adapters/data_gen/data_gen_task.py +49 -36
  8. kiln_ai/adapters/data_gen/test_data_gen_task.py +330 -40
  9. kiln_ai/adapters/eval/base_eval.py +7 -6
  10. kiln_ai/adapters/eval/eval_runner.py +9 -2
  11. kiln_ai/adapters/eval/g_eval.py +40 -17
  12. kiln_ai/adapters/eval/test_base_eval.py +174 -17
  13. kiln_ai/adapters/eval/test_eval_runner.py +3 -0
  14. kiln_ai/adapters/eval/test_g_eval.py +116 -5
  15. kiln_ai/adapters/fine_tune/base_finetune.py +3 -8
  16. kiln_ai/adapters/fine_tune/dataset_formatter.py +135 -273
  17. kiln_ai/adapters/fine_tune/test_base_finetune.py +10 -10
  18. kiln_ai/adapters/fine_tune/test_dataset_formatter.py +287 -353
  19. kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py +3 -3
  20. kiln_ai/adapters/fine_tune/test_openai_finetune.py +6 -6
  21. kiln_ai/adapters/fine_tune/test_together_finetune.py +1 -0
  22. kiln_ai/adapters/fine_tune/test_vertex_finetune.py +6 -11
  23. kiln_ai/adapters/fine_tune/together_finetune.py +13 -2
  24. kiln_ai/adapters/ml_model_list.py +370 -84
  25. kiln_ai/adapters/model_adapters/base_adapter.py +73 -26
  26. kiln_ai/adapters/model_adapters/litellm_adapter.py +88 -97
  27. kiln_ai/adapters/model_adapters/litellm_config.py +3 -2
  28. kiln_ai/adapters/model_adapters/test_base_adapter.py +235 -61
  29. kiln_ai/adapters/model_adapters/test_litellm_adapter.py +104 -21
  30. kiln_ai/adapters/model_adapters/test_saving_adapter_results.py +41 -0
  31. kiln_ai/adapters/model_adapters/test_structured_output.py +44 -12
  32. kiln_ai/adapters/parsers/parser_registry.py +0 -2
  33. kiln_ai/adapters/parsers/r1_parser.py +0 -1
  34. kiln_ai/adapters/prompt_builders.py +0 -16
  35. kiln_ai/adapters/provider_tools.py +27 -9
  36. kiln_ai/adapters/remote_config.py +66 -0
  37. kiln_ai/adapters/repair/repair_task.py +1 -6
  38. kiln_ai/adapters/repair/test_repair_task.py +24 -3
  39. kiln_ai/adapters/test_adapter_registry.py +88 -28
  40. kiln_ai/adapters/test_ml_model_list.py +176 -0
  41. kiln_ai/adapters/test_prompt_adaptors.py +17 -7
  42. kiln_ai/adapters/test_prompt_builders.py +3 -16
  43. kiln_ai/adapters/test_provider_tools.py +69 -20
  44. kiln_ai/adapters/test_remote_config.py +100 -0
  45. kiln_ai/datamodel/__init__.py +0 -2
  46. kiln_ai/datamodel/datamodel_enums.py +38 -13
  47. kiln_ai/datamodel/eval.py +32 -0
  48. kiln_ai/datamodel/finetune.py +12 -8
  49. kiln_ai/datamodel/task.py +68 -7
  50. kiln_ai/datamodel/task_output.py +0 -2
  51. kiln_ai/datamodel/task_run.py +0 -2
  52. kiln_ai/datamodel/test_basemodel.py +2 -1
  53. kiln_ai/datamodel/test_dataset_split.py +0 -8
  54. kiln_ai/datamodel/test_eval_model.py +146 -4
  55. kiln_ai/datamodel/test_models.py +33 -10
  56. kiln_ai/datamodel/test_task.py +168 -2
  57. kiln_ai/utils/config.py +3 -2
  58. kiln_ai/utils/dataset_import.py +1 -1
  59. kiln_ai/utils/logging.py +166 -0
  60. kiln_ai/utils/test_config.py +23 -0
  61. kiln_ai/utils/test_dataset_import.py +30 -0
  62. {kiln_ai-0.16.0.dist-info → kiln_ai-0.18.0.dist-info}/METADATA +2 -2
  63. kiln_ai-0.18.0.dist-info/RECORD +115 -0
  64. kiln_ai-0.16.0.dist-info/RECORD +0 -108
  65. {kiln_ai-0.16.0.dist-info → kiln_ai-0.18.0.dist-info}/WHEEL +0 -0
  66. {kiln_ai-0.16.0.dist-info → kiln_ai-0.18.0.dist-info}/licenses/LICENSE.txt +0 -0
@@ -24,13 +24,14 @@ class StructuredOutputMode(str, Enum):
24
24
  """
25
25
  Enumeration of supported structured output modes.
26
26
 
27
- - default: let the adapter decide
28
27
  - json_schema: request json using API capabilities for json_schema
29
28
  - function_calling: request json using API capabilities for function calling
30
29
  - json_mode: request json using API's JSON mode, which should return valid JSON, but isn't checking/passing the schema
31
30
  - json_instructions: append instructions to the prompt to request json matching the schema. No API capabilities are used. You should have a custom parser on these models as they will be returning strings.
32
31
  - json_instruction_and_object: append instructions to the prompt to request json matching the schema. Also request the response as json_mode via API capabilities (returning dictionaries).
33
32
  - json_custom_instructions: The model should output JSON, but custom instructions are already included in the system prompt. Don't append additional JSON instructions.
33
+ - default: let the adapter decide (legacy, do not use for new use cases)
34
+ - unknown: used for cases where the structured output mode is not known (on old models where it wasn't saved). Should lookup best option at runtime.
34
35
  """
35
36
 
36
37
  default = "default"
@@ -41,6 +42,7 @@ class StructuredOutputMode(str, Enum):
41
42
  json_instructions = "json_instructions"
42
43
  json_instruction_and_object = "json_instruction_and_object"
43
44
  json_custom_instructions = "json_custom_instructions"
45
+ unknown = "unknown"
44
46
 
45
47
 
46
48
  class FineTuneStatusType(str, Enum):
@@ -55,20 +57,43 @@ class FineTuneStatusType(str, Enum):
55
57
  failed = "failed"
56
58
 
57
59
 
58
- class FinetuneDataStrategy(str, Enum):
59
- """Strategy for what data to include when fine-tuning a model."""
60
+ class ChatStrategy(str, Enum):
61
+ """Strategy for how a chat is structured."""
60
62
 
61
- # Only train on the final response, ignoring any intermediate steps or chain of thought
62
- final_only = "final_only"
63
+ # Single turn, immediately return the answer
64
+ single_turn = "final_only"
65
+ # Two turn, first turn is the thinking, second turn is the answer. Legacy format - used for old fine tunes but not new trains.
66
+ two_message_cot_legacy = "final_and_intermediate"
67
+ # Two turn, first turn is the thinking, second turn is the answer. New format - used for new trains.
68
+ two_message_cot = "two_message_cot"
69
+ # Single turn, with both the thinking and the answer in the same message, using R1-style thinking format in <think> tags
70
+ single_turn_r1_thinking = "final_and_intermediate_r1_compatible"
63
71
 
64
- # Train on both the final response and any intermediate steps/chain of thought
65
- final_and_intermediate = "final_and_intermediate"
66
72
 
67
- # Train using R1-style thinking format, which includes the reasoning in <think> tags in the message
68
- final_and_intermediate_r1_compatible = "final_and_intermediate_r1_compatible"
73
+ THINKING_DATA_STRATEGIES: list[ChatStrategy] = [
74
+ ChatStrategy.two_message_cot_legacy,
75
+ ChatStrategy.single_turn_r1_thinking,
76
+ ChatStrategy.two_message_cot,
77
+ ]
69
78
 
70
79
 
71
- THINKING_DATA_STRATEGIES: list[FinetuneDataStrategy] = [
72
- FinetuneDataStrategy.final_and_intermediate,
73
- FinetuneDataStrategy.final_and_intermediate_r1_compatible,
74
- ]
80
+ class ModelProviderName(str, Enum):
81
+ """
82
+ Enumeration of supported AI model providers.
83
+ """
84
+
85
+ openai = "openai"
86
+ groq = "groq"
87
+ amazon_bedrock = "amazon_bedrock"
88
+ ollama = "ollama"
89
+ openrouter = "openrouter"
90
+ fireworks_ai = "fireworks_ai"
91
+ kiln_fine_tune = "kiln_fine_tune"
92
+ kiln_custom_registry = "kiln_custom_registry"
93
+ openai_compatible = "openai_compatible"
94
+ anthropic = "anthropic"
95
+ gemini_api = "gemini_api"
96
+ azure_openai = "azure_openai"
97
+ huggingface = "huggingface"
98
+ vertex = "vertex"
99
+ together_ai = "together_ai"
kiln_ai/datamodel/eval.py CHANGED
@@ -14,6 +14,7 @@ from kiln_ai.datamodel.basemodel import (
14
14
  from kiln_ai.datamodel.datamodel_enums import TaskOutputRatingType
15
15
  from kiln_ai.datamodel.dataset_filters import DatasetFilterId
16
16
  from kiln_ai.datamodel.json_schema import string_to_json_key
17
+ from kiln_ai.datamodel.task_run import Usage
17
18
  from kiln_ai.utils.exhaustive_error import raise_exhaustive_enum_error
18
19
 
19
20
  if TYPE_CHECKING:
@@ -28,6 +29,7 @@ class EvalTemplateId(str, Enum):
28
29
  """
29
30
 
30
31
  kiln_requirements = "kiln_requirements"
32
+ issue = "kiln_issue"
31
33
  toxicity = "toxicity"
32
34
  bias = "bias"
33
35
  maliciousness = "maliciousness"
@@ -110,6 +112,10 @@ class EvalRun(KilnParentedModel):
110
112
  scores: EvalScores = Field(
111
113
  description="The output scores of the evaluator (aligning to those required by the grand-parent Eval this object is a child of)."
112
114
  )
115
+ task_run_usage: Usage | None = Field(
116
+ default=None,
117
+ description="The usage of the task run that produced this eval run output (not the usage by the evaluation model).",
118
+ )
113
119
 
114
120
  def parent_eval_config(self) -> Union["EvalConfig", None]:
115
121
  if self.parent is not None and self.parent.__class__.__name__ != "EvalConfig":
@@ -280,6 +286,10 @@ class Eval(KilnParentedModel, KilnParentModel, parent_of={"configs": EvalConfig}
280
286
  default=False,
281
287
  description="Whether this eval is a favourite of the user. Rendered as a star icon in the UI.",
282
288
  )
289
+ template_properties: dict[str, str | int | bool | float] = Field(
290
+ default={},
291
+ description="Properties to be used to execute the eval. This is template_type specific and should serialize to a json dict.",
292
+ )
283
293
 
284
294
  # Workaround to return typed parent without importing Task
285
295
  def parent_task(self) -> Union["Task", None]:
@@ -304,3 +314,25 @@ class Eval(KilnParentedModel, KilnParentModel, parent_of={"configs": EvalConfig}
304
314
  f"output_scores must have unique names (once transformed to JSON keys). Got: [{', '.join(output_score_keys)}]"
305
315
  )
306
316
  return self
317
+
318
+ @model_validator(mode="after")
319
+ def validate_template_properties(self) -> Self:
320
+ # Check for properties that are required for the issue template
321
+ if self.template == EvalTemplateId.issue:
322
+ if "issue_prompt" not in self.template_properties or not isinstance(
323
+ self.template_properties["issue_prompt"], str
324
+ ):
325
+ raise ValueError("issue_prompt is required for issue template")
326
+ if "failure_example" in self.template_properties and not isinstance(
327
+ self.template_properties["failure_example"], str
328
+ ):
329
+ raise ValueError(
330
+ "failure_example is optional for issue template, but if provided must be a string"
331
+ )
332
+ if "pass_example" in self.template_properties and not isinstance(
333
+ self.template_properties["pass_example"], str
334
+ ):
335
+ raise ValueError(
336
+ "pass_example is optional for issue template, but if provided must be a string"
337
+ )
338
+ return self
@@ -5,8 +5,7 @@ from typing_extensions import Self
5
5
 
6
6
  from kiln_ai.datamodel.basemodel import NAME_FIELD, KilnParentedModel
7
7
  from kiln_ai.datamodel.datamodel_enums import (
8
- THINKING_DATA_STRATEGIES,
9
- FinetuneDataStrategy,
8
+ ChatStrategy,
10
9
  FineTuneStatusType,
11
10
  StructuredOutputMode,
12
11
  )
@@ -14,6 +13,11 @@ from kiln_ai.datamodel.datamodel_enums import (
14
13
  if TYPE_CHECKING:
15
14
  from kiln_ai.datamodel.task import Task
16
15
 
16
+ DATA_STRATIGIES_REQUIRED_THINKING_INSTRUCTIONS = [
17
+ ChatStrategy.two_message_cot_legacy,
18
+ ChatStrategy.two_message_cot,
19
+ ]
20
+
17
21
 
18
22
  class Finetune(KilnParentedModel):
19
23
  """
@@ -76,8 +80,8 @@ class Finetune(KilnParentedModel):
76
80
  default={},
77
81
  description="Properties of the fine-tune. Different providers may use different properties.",
78
82
  )
79
- data_strategy: FinetuneDataStrategy = Field(
80
- default=FinetuneDataStrategy.final_only,
83
+ data_strategy: ChatStrategy = Field(
84
+ default=ChatStrategy.single_turn,
81
85
  description="The strategy to use for training the model. 'final_only' will only train on the final response. 'final_and_intermediate' will train on the final response and intermediate outputs (chain of thought or reasoning).",
82
86
  )
83
87
 
@@ -91,16 +95,16 @@ class Finetune(KilnParentedModel):
91
95
  def validate_thinking_instructions(self) -> Self:
92
96
  if (
93
97
  self.thinking_instructions is not None
94
- and self.data_strategy != FinetuneDataStrategy.final_and_intermediate
98
+ and self.data_strategy not in DATA_STRATIGIES_REQUIRED_THINKING_INSTRUCTIONS
95
99
  ):
96
100
  raise ValueError(
97
- "Thinking instructions can only be used when data_strategy is final_and_intermediate"
101
+ f"Thinking instructions can only be used when data_strategy is one of the following: {DATA_STRATIGIES_REQUIRED_THINKING_INSTRUCTIONS}"
98
102
  )
99
103
  if (
100
104
  self.thinking_instructions is None
101
- and self.data_strategy == FinetuneDataStrategy.final_and_intermediate
105
+ and self.data_strategy in DATA_STRATIGIES_REQUIRED_THINKING_INSTRUCTIONS
102
106
  ):
103
107
  raise ValueError(
104
- "Thinking instructions are required when data_strategy is final_and_intermediate"
108
+ f"Thinking instructions are required when data_strategy is one of the following: {DATA_STRATIGIES_REQUIRED_THINKING_INSTRUCTIONS}"
105
109
  )
106
110
  return self
kiln_ai/datamodel/task.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from typing import TYPE_CHECKING, Dict, List, Union
2
2
 
3
- from pydantic import BaseModel, Field
3
+ from pydantic import BaseModel, Field, ValidationInfo, model_validator
4
+ from typing_extensions import Self
4
5
 
5
6
  from kiln_ai.datamodel import Finetune
6
7
  from kiln_ai.datamodel.basemodel import (
@@ -11,7 +12,12 @@ from kiln_ai.datamodel.basemodel import (
11
12
  KilnParentedModel,
12
13
  KilnParentModel,
13
14
  )
14
- from kiln_ai.datamodel.datamodel_enums import Priority, TaskOutputRatingType
15
+ from kiln_ai.datamodel.datamodel_enums import (
16
+ ModelProviderName,
17
+ Priority,
18
+ StructuredOutputMode,
19
+ TaskOutputRatingType,
20
+ )
15
21
  from kiln_ai.datamodel.dataset_split import DatasetSplit
16
22
  from kiln_ai.datamodel.eval import Eval
17
23
  from kiln_ai.datamodel.json_schema import JsonObjectSchema, schema_from_json_str
@@ -47,12 +53,33 @@ class RunConfigProperties(BaseModel):
47
53
  """
48
54
 
49
55
  model_name: str = Field(description="The model to use for this run config.")
50
- model_provider_name: str = Field(
56
+ model_provider_name: ModelProviderName = Field(
51
57
  description="The provider to use for this run config."
52
58
  )
53
59
  prompt_id: PromptId = Field(
54
60
  description="The prompt to use for this run config. Defaults to building a simple prompt from the task if not provided.",
55
61
  )
62
+ top_p: float = Field(
63
+ default=1.0,
64
+ description="The top-p value to use for this run config. Defaults to 1.0.",
65
+ )
66
+ temperature: float = Field(
67
+ default=1.0,
68
+ description="The temperature to use for this run config. Defaults to 1.0.",
69
+ )
70
+ structured_output_mode: StructuredOutputMode = Field(
71
+ description="The structured output mode to use for this run config.",
72
+ )
73
+
74
+ @model_validator(mode="after")
75
+ def validate_required_fields(self) -> Self:
76
+ if not (0 <= self.top_p <= 1):
77
+ raise ValueError("top_p must be between 0 and 1")
78
+
79
+ elif self.temperature < 0 or self.temperature > 2:
80
+ raise ValueError("temperature must be between 0 and 2")
81
+
82
+ return self
56
83
 
57
84
 
58
85
  class RunConfig(RunConfigProperties):
@@ -101,12 +128,46 @@ class TaskRunConfig(KilnParentedModel):
101
128
  parent_task = self.parent_task()
102
129
  if parent_task is None:
103
130
  raise ValueError("Run config must be parented to a task")
104
- return RunConfig(
131
+ return run_config_from_run_config_properties(
105
132
  task=parent_task,
106
- model_name=self.run_config_properties.model_name,
107
- model_provider_name=self.run_config_properties.model_provider_name,
108
- prompt_id=self.run_config_properties.prompt_id,
133
+ run_config_properties=self.run_config_properties,
134
+ )
135
+
136
+ # Previously we didn't store structured_output_mode in the run_config_properties. Updgrade old models when loading from file.
137
+ @model_validator(mode="before")
138
+ def upgrade_old_entries(cls, data: dict, info: ValidationInfo) -> dict:
139
+ if not info.context or not info.context.get("loading_from_file", False):
140
+ # Not loading from file, so no need to upgrade
141
+ return data
142
+
143
+ if not isinstance(data, dict):
144
+ return data
145
+
146
+ structured_output_mode = data.get("run_config_properties", {}).get(
147
+ "structured_output_mode", None
109
148
  )
149
+ if structured_output_mode is None and "run_config_properties" in data:
150
+ # Default to unknown. Adapter will have to guess at runtime.
151
+ data["run_config_properties"]["structured_output_mode"] = (
152
+ StructuredOutputMode.unknown
153
+ )
154
+
155
+ return data
156
+
157
+
158
+ def run_config_from_run_config_properties(
159
+ task: "Task",
160
+ run_config_properties: RunConfigProperties,
161
+ ) -> RunConfig:
162
+ return RunConfig(
163
+ task=task,
164
+ model_name=run_config_properties.model_name,
165
+ model_provider_name=run_config_properties.model_provider_name,
166
+ prompt_id=run_config_properties.prompt_id,
167
+ top_p=run_config_properties.top_p,
168
+ temperature=run_config_properties.temperature,
169
+ structured_output_mode=run_config_properties.structured_output_mode,
170
+ )
110
171
 
111
172
 
112
173
  class Task(
@@ -2,8 +2,6 @@ import json
2
2
  from enum import Enum
3
3
  from typing import TYPE_CHECKING, Dict, List, Type, Union
4
4
 
5
- import jsonschema
6
- import jsonschema.exceptions
7
5
  from pydantic import BaseModel, Field, ValidationInfo, model_validator
8
6
  from typing_extensions import Self
9
7
 
@@ -1,8 +1,6 @@
1
1
  import json
2
2
  from typing import TYPE_CHECKING, Dict, List, Union
3
3
 
4
- import jsonschema
5
- import jsonschema.exceptions
6
4
  from pydantic import BaseModel, Field, ValidationInfo, model_validator
7
5
  from typing_extensions import Self
8
6
 
@@ -500,8 +500,9 @@ def adapter(base_task):
500
500
  run_config=RunConfig(
501
501
  task=base_task,
502
502
  model_name="test_model",
503
- model_provider_name="test_provider",
503
+ model_provider_name="openai",
504
504
  prompt_id="simple_prompt_builder",
505
+ structured_output_mode="json_schema",
505
506
  ),
506
507
  )
507
508
 
@@ -72,14 +72,6 @@ def sample_task_runs(sample_task):
72
72
  return task_runs
73
73
 
74
74
 
75
- @pytest.fixture
76
- def standard_splitstandard_splitss():
77
- return [
78
- DatasetSplitDefinition(name="train", percentage=0.8),
79
- DatasetSplitDefinition(name="test", percentage=0.2),
80
- ]
81
-
82
-
83
75
  @pytest.fixture
84
76
  def task_run():
85
77
  return TaskRun(
@@ -1,7 +1,6 @@
1
1
  import pytest
2
2
  from pydantic import ValidationError
3
3
 
4
- from kiln_ai.datamodel import BasePrompt
5
4
  from kiln_ai.datamodel.basemodel import KilnParentModel
6
5
  from kiln_ai.datamodel.eval import (
7
6
  Eval,
@@ -9,11 +8,10 @@ from kiln_ai.datamodel.eval import (
9
8
  EvalConfigType,
10
9
  EvalOutputScore,
11
10
  EvalRun,
11
+ EvalTemplateId,
12
12
  )
13
13
  from kiln_ai.datamodel.task import Task
14
- from kiln_ai.datamodel.task_output import (
15
- TaskOutputRatingType,
16
- )
14
+ from kiln_ai.datamodel.task_output import TaskOutputRatingType
17
15
 
18
16
 
19
17
  @pytest.fixture
@@ -633,3 +631,147 @@ def test_eval_run_eval_config_eval_validation():
633
631
  output="test output",
634
632
  scores={"score": 1.0},
635
633
  )
634
+
635
+
636
+ @pytest.mark.parametrize(
637
+ "template_properties,should_raise,expected_error",
638
+ [
639
+ # Valid cases
640
+ (
641
+ {"issue_prompt": "Test issue prompt"},
642
+ False,
643
+ None,
644
+ ),
645
+ (
646
+ {
647
+ "issue_prompt": "Test issue prompt",
648
+ "failure_example": "Test failure example",
649
+ },
650
+ False,
651
+ None,
652
+ ),
653
+ (
654
+ {
655
+ "issue_prompt": "Test issue prompt",
656
+ "failure_example": "Test failure example",
657
+ "pass_example": "Test pass example",
658
+ },
659
+ False,
660
+ None,
661
+ ),
662
+ (
663
+ {
664
+ "issue_prompt": "",
665
+ "failure_example": "",
666
+ "pass_example": "",
667
+ },
668
+ False,
669
+ None,
670
+ ),
671
+ # Invalid cases
672
+ (
673
+ {},
674
+ True,
675
+ "issue_prompt is required for issue template",
676
+ ),
677
+ (
678
+ {"failure_example": "Test failure example"},
679
+ True,
680
+ "issue_prompt is required for issue template",
681
+ ),
682
+ (
683
+ {"issue_prompt": 123},
684
+ True,
685
+ "issue_prompt is required for issue template",
686
+ ),
687
+ (
688
+ {
689
+ "issue_prompt": "Test issue prompt",
690
+ "failure_example": 456,
691
+ },
692
+ True,
693
+ "failure_example is optional for issue template, but if provided must be a string",
694
+ ),
695
+ (
696
+ {
697
+ "issue_prompt": "Test issue prompt",
698
+ "failure_example": "Test failure example",
699
+ "pass_example": 789,
700
+ },
701
+ True,
702
+ "pass_example is optional for issue template, but if provided must be a string",
703
+ ),
704
+ ],
705
+ )
706
+ def test_eval_template_properties_issue_template_validation(
707
+ template_properties, should_raise, expected_error
708
+ ):
709
+ """Test issue template validation with various property combinations"""
710
+ if should_raise:
711
+ with pytest.raises(ValueError, match=expected_error):
712
+ Eval(
713
+ name="Test Eval",
714
+ template=EvalTemplateId.issue,
715
+ eval_set_filter_id="tag::tag1",
716
+ eval_configs_filter_id="tag::tag2",
717
+ output_scores=[
718
+ EvalOutputScore(
719
+ name="score",
720
+ type=TaskOutputRatingType.pass_fail,
721
+ )
722
+ ],
723
+ template_properties=template_properties,
724
+ )
725
+ else:
726
+ eval = Eval(
727
+ name="Test Eval",
728
+ template=EvalTemplateId.issue,
729
+ eval_set_filter_id="tag::tag1",
730
+ eval_configs_filter_id="tag::tag2",
731
+ output_scores=[
732
+ EvalOutputScore(
733
+ name="score",
734
+ type=TaskOutputRatingType.pass_fail,
735
+ )
736
+ ],
737
+ template_properties=template_properties,
738
+ )
739
+ assert eval.template == EvalTemplateId.issue
740
+ for key, value in template_properties.items():
741
+ assert eval.template_properties[key] == value
742
+
743
+
744
+ @pytest.mark.parametrize(
745
+ "template,template_properties",
746
+ [
747
+ (EvalTemplateId.kiln_requirements, {"random_property": "random_value"}),
748
+ (EvalTemplateId.toxicity, {}),
749
+ (EvalTemplateId.bias, {"some_property": 123}),
750
+ (EvalTemplateId.maliciousness, {"test": True}),
751
+ (EvalTemplateId.factual_correctness, {"score": 4.5}),
752
+ (EvalTemplateId.jailbreak, {"prompt": "test"}),
753
+ (
754
+ None,
755
+ {"issue_prompt": "This should not be validated", "failure_example": 123},
756
+ ),
757
+ ],
758
+ )
759
+ def test_eval_template_properties_non_issue_templates(template, template_properties):
760
+ """Test that non-issue templates pass validation regardless of template_properties"""
761
+ eval = Eval(
762
+ name="Test Eval",
763
+ template=template,
764
+ eval_set_filter_id="tag::tag1",
765
+ eval_configs_filter_id="tag::tag2",
766
+ output_scores=[
767
+ EvalOutputScore(
768
+ name="score",
769
+ type=TaskOutputRatingType.pass_fail,
770
+ )
771
+ ],
772
+ template_properties=template_properties,
773
+ )
774
+
775
+ assert eval.template == template
776
+ for key, value in template_properties.items():
777
+ assert eval.template_properties[key] == value
@@ -9,13 +9,13 @@ from kiln_ai.datamodel import (
9
9
  DataSource,
10
10
  DataSourceType,
11
11
  Finetune,
12
- FinetuneDataStrategy,
13
12
  Project,
14
13
  Prompt,
15
14
  Task,
16
15
  TaskOutput,
17
16
  TaskRun,
18
17
  )
18
+ from kiln_ai.datamodel.datamodel_enums import ChatStrategy
19
19
  from kiln_ai.datamodel.test_json_schema import json_joke_schema
20
20
 
21
21
 
@@ -536,44 +536,58 @@ def test_prompt_parent_task():
536
536
  # Test 1: Valid case - no thinking instructions with final_only
537
537
  (
538
538
  None,
539
- FinetuneDataStrategy.final_only,
539
+ ChatStrategy.single_turn,
540
540
  False,
541
541
  None,
542
542
  ),
543
543
  # Test 2: Valid case - thinking instructions with final_and_intermediate
544
544
  (
545
545
  "Think step by step",
546
- FinetuneDataStrategy.final_and_intermediate,
546
+ ChatStrategy.two_message_cot_legacy,
547
547
  False,
548
548
  None,
549
549
  ),
550
550
  # Test 3: Valid case - no thinking instructions with final_and_intermediate_r1_compatible
551
551
  (
552
552
  None,
553
- FinetuneDataStrategy.final_and_intermediate_r1_compatible,
553
+ ChatStrategy.single_turn_r1_thinking,
554
554
  False,
555
555
  None,
556
556
  ),
557
557
  # Test 4: Invalid case - thinking instructions with final_only
558
558
  (
559
559
  "Think step by step",
560
- FinetuneDataStrategy.final_only,
560
+ ChatStrategy.single_turn,
561
561
  True,
562
- "Thinking instructions can only be used when data_strategy is final_and_intermediate",
562
+ "Thinking instructions can only be used when data_strategy is",
563
563
  ),
564
564
  # Test 5: Invalid case - no thinking instructions with final_and_intermediate
565
565
  (
566
566
  None,
567
- FinetuneDataStrategy.final_and_intermediate,
567
+ ChatStrategy.two_message_cot_legacy,
568
568
  True,
569
- "Thinking instructions are required when data_strategy is final_and_intermediate",
569
+ "Thinking instructions are required when data_strategy is",
570
570
  ),
571
571
  # Test 6: Invalid case - thinking instructions with final_and_intermediate_r1_compatible
572
572
  (
573
573
  "Think step by step",
574
- FinetuneDataStrategy.final_and_intermediate_r1_compatible,
574
+ ChatStrategy.single_turn_r1_thinking,
575
575
  True,
576
- "Thinking instructions can only be used when data_strategy is final_and_intermediate",
576
+ "Thinking instructions can only be used when data_strategy is",
577
+ ),
578
+ # Test 7: new COT format
579
+ (
580
+ "Think step by step",
581
+ ChatStrategy.two_message_cot,
582
+ False,
583
+ None,
584
+ ),
585
+ # Test 8: new COT format
586
+ (
587
+ None,
588
+ ChatStrategy.two_message_cot,
589
+ True,
590
+ "Thinking instructions are required when data_strategy is",
577
591
  ),
578
592
  ],
579
593
  )
@@ -665,3 +679,12 @@ def test_task_run_thinking_training_data(intermediate_outputs, expected):
665
679
  intermediate_outputs=intermediate_outputs,
666
680
  )
667
681
  assert task_run.thinking_training_data() == expected
682
+
683
+
684
+ def test_chat_strategy_enum():
685
+ # This has to align to the old FinetuneDataStrategy enum
686
+ assert ChatStrategy.single_turn == "final_only"
687
+ assert ChatStrategy.two_message_cot_legacy == "final_and_intermediate"
688
+ assert (
689
+ ChatStrategy.single_turn_r1_thinking == "final_and_intermediate_r1_compatible"
690
+ )