kiln-ai 0.5.1__py3-none-any.whl → 0.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kiln-ai might be problematic. Click here for more details.

@@ -2,12 +2,14 @@ from __future__ import annotations
2
2
 
3
3
  import json
4
4
  from enum import Enum, IntEnum
5
- from typing import TYPE_CHECKING, Dict, List, Self, Type, Union
5
+ from typing import TYPE_CHECKING, Dict, List, Type, Union
6
6
 
7
7
  import jsonschema
8
8
  import jsonschema.exceptions
9
- from kiln_ai.datamodel.json_schema import JsonObjectSchema, schema_from_json_str
10
9
  from pydantic import BaseModel, Field, model_validator
10
+ from typing_extensions import Self
11
+
12
+ from kiln_ai.datamodel.json_schema import JsonObjectSchema, schema_from_json_str
11
13
 
12
14
  from .basemodel import (
13
15
  ID_FIELD,
@@ -21,6 +23,25 @@ from .json_schema import validate_schema
21
23
  if TYPE_CHECKING:
22
24
  from . import Task
23
25
 
26
+
27
+ __all__ = [
28
+ "basemodel",
29
+ "json_schema",
30
+ "Task",
31
+ "Project",
32
+ "TaskRun",
33
+ "TaskOutput",
34
+ "TaskOutputRating",
35
+ "Priority",
36
+ "DataSource",
37
+ "DataSourceType",
38
+ "DataSourceProperty",
39
+ "TaskOutputRatingType",
40
+ "TaskRequirement",
41
+ "TaskDeterminism",
42
+ ]
43
+
44
+
24
45
  # Conventions:
25
46
  # 1) Names are filename safe as they may be used as file names. They are informational and not to be used in prompts/training/validation.
26
47
  # 2) Descrptions are for Kiln users to describe/understanding the purpose of this object. They must never be used in prompts/training/validation. Use "instruction/requirements" instead.
@@ -32,6 +53,8 @@ SHORT_NAME_FIELD = Field(min_length=1, max_length=20, pattern=NAME_REGEX)
32
53
 
33
54
 
34
55
  class Priority(IntEnum):
56
+ """Defines priority levels for tasks and requirements, where P0 is highest priority."""
57
+
35
58
  p0 = 0
36
59
  p1 = 1
37
60
  p2 = 2
@@ -40,6 +63,8 @@ class Priority(IntEnum):
40
63
 
41
64
  # Only one rating type for now, but this allows for extensibility if we want to add more in the future
42
65
  class TaskOutputRatingType(str, Enum):
66
+ """Defines the types of rating systems available for task outputs."""
67
+
43
68
  five_star = "five_star"
44
69
  custom = "custom"
45
70
 
@@ -90,22 +115,13 @@ class TaskOutputRating(KilnBaseModel):
90
115
  f"{rating_name.capitalize()} of type five_star must be between 1 and 5 stars"
91
116
  )
92
117
 
93
- def validate_requirement_rating_keys(self, task: Task) -> Self:
94
- if len(self.requirement_ratings) == 0:
95
- return self
96
-
97
- valid_requirement_ids = {req.id for req in task.requirements}
98
- for key in self.requirement_ratings.keys():
99
- if key not in valid_requirement_ids:
100
- raise ValueError(
101
- f"Requirement ID '{key}' is not a valid requirement ID for this task"
102
- )
103
- return self
104
-
105
118
 
106
119
  class TaskOutput(KilnBaseModel):
107
120
  """
108
121
  An output for a specific task run.
122
+
123
+ Contains the actual output content, its source (human or synthetic),
124
+ and optional rating information.
109
125
  """
110
126
 
111
127
  output: str = Field(
@@ -132,7 +148,10 @@ class TaskOutput(KilnBaseModel):
132
148
 
133
149
  class DataSourceType(str, Enum):
134
150
  """
135
- The source of a piece of data.
151
+ The source type of a piece of data.
152
+
153
+ Human: a human created the data
154
+ Synthetic: a model created the data
136
155
  """
137
156
 
138
157
  human = "human"
@@ -140,6 +159,13 @@ class DataSourceType(str, Enum):
140
159
 
141
160
 
142
161
  class DataSourceProperty(BaseModel):
162
+ """
163
+ Defines a property that can be associated with a data source.
164
+
165
+ Includes validation rules for when properties are required or not allowed
166
+ based on the data source type.
167
+ """
168
+
143
169
  name: str
144
170
  type: Type[Union[str, int, float]]
145
171
  required_for: List[DataSourceType] = []
@@ -147,6 +173,13 @@ class DataSourceProperty(BaseModel):
147
173
 
148
174
 
149
175
  class DataSource(BaseModel):
176
+ """
177
+ Represents the origin of data, either human or synthetic, with associated properties.
178
+
179
+ Properties vary based on the source type - for synthetic sources this includes
180
+ model information, for human sources this includes creator information.
181
+ """
182
+
150
183
  type: DataSourceType
151
184
  properties: Dict[str, str | int | float] = Field(
152
185
  default={},
@@ -225,7 +258,10 @@ class DataSource(BaseModel):
225
258
 
226
259
  class TaskRun(KilnParentedModel):
227
260
  """
228
- An run of a specific Task, including the input and output.
261
+ Represents a single execution of a Task.
262
+
263
+ Contains the input used, its source, the output produced, and optional
264
+ repair information if the output needed correction.
229
265
  """
230
266
 
231
267
  input: str = Field(
@@ -276,19 +312,6 @@ class TaskRun(KilnParentedModel):
276
312
  self.output.validate_output_format(task)
277
313
  return self
278
314
 
279
- @model_validator(mode="after")
280
- def validate_requirement_ratings(self) -> Self:
281
- task = self.parent_task()
282
- if task is None:
283
- return self
284
-
285
- if self.output.rating is not None:
286
- self.output.rating.validate_requirement_rating_keys(task)
287
- if self.repaired_output is not None and self.repaired_output.rating is not None:
288
- self.repaired_output.rating.validate_requirement_rating_keys(task)
289
-
290
- return self
291
-
292
315
  @model_validator(mode="after")
293
316
  def validate_repaired_output(self) -> Self:
294
317
  if self.repaired_output is not None:
@@ -308,6 +331,13 @@ class TaskRun(KilnParentedModel):
308
331
 
309
332
 
310
333
  class TaskRequirement(BaseModel):
334
+ """
335
+ Defines a specific requirement that should be met by task outputs.
336
+
337
+ Includes an identifier, name, description, instruction for meeting the requirement,
338
+ and priority level.
339
+ """
340
+
311
341
  id: ID_TYPE = ID_FIELD
312
342
  name: str = SHORT_NAME_FIELD
313
343
  description: str | None = Field(default=None)
@@ -316,6 +346,14 @@ class TaskRequirement(BaseModel):
316
346
 
317
347
 
318
348
  class TaskDeterminism(str, Enum):
349
+ """
350
+ Defines how strictly task outputs should match expected results.
351
+
352
+ - deterministic: Requires exact matches
353
+ - semantic_match: Allows different wording with same meaning
354
+ - flexible: Allows variation in both wording and meaning within requirements
355
+ """
356
+
319
357
  deterministic = "deterministic" # Expect exact match
320
358
  semantic_match = "semantic_match" # Expect same meaning, but flexible on expression of the meaning
321
359
  flexible = "flexible" # Flexible on semantic output. Eval should be custom based on parsing requirements.
@@ -326,6 +364,13 @@ class Task(
326
364
  KilnParentModel,
327
365
  parent_of={"runs": TaskRun},
328
366
  ):
367
+ """
368
+ Represents a specific task to be performed, with associated requirements and validation rules.
369
+
370
+ Contains the task definition, requirements, input/output schemas, and maintains
371
+ a collection of task runs.
372
+ """
373
+
329
374
  name: str = NAME_FIELD
330
375
  description: str = Field(default="")
331
376
  priority: Priority = Field(default=Priority.p2)
@@ -352,6 +397,13 @@ class Task(
352
397
 
353
398
 
354
399
  class Project(KilnParentModel, parent_of={"tasks": Task}):
400
+ """
401
+ A collection of related tasks.
402
+
403
+ Projects organize tasks into logical groups and provide high-level descriptions
404
+ of the overall goals.
405
+ """
406
+
355
407
  name: str = NAME_FIELD
356
408
  description: str | None = Field(
357
409
  default=None,
@@ -1,4 +1,5 @@
1
1
  import json
2
+ import shutil
2
3
  import uuid
3
4
  from abc import ABCMeta
4
5
  from builtins import classmethod
@@ -10,13 +11,10 @@ from typing import (
10
11
  Dict,
11
12
  List,
12
13
  Optional,
13
- Self,
14
14
  Type,
15
15
  TypeVar,
16
16
  )
17
17
 
18
- from kiln_ai.utils.config import Config
19
- from kiln_ai.utils.formatting import snake_case
20
18
  from pydantic import (
21
19
  BaseModel,
22
20
  ConfigDict,
@@ -26,6 +24,10 @@ from pydantic import (
26
24
  model_validator,
27
25
  )
28
26
  from pydantic_core import ErrorDetails
27
+ from typing_extensions import Self
28
+
29
+ from kiln_ai.utils.config import Config
30
+ from kiln_ai.utils.formatting import snake_case
29
31
 
30
32
  # ID is a 12 digit random integer string.
31
33
  # Should be unique per item, at least inside the context of a parent/child relationship.
@@ -38,6 +40,16 @@ PT = TypeVar("PT", bound="KilnParentedModel")
38
40
 
39
41
 
40
42
  class KilnBaseModel(BaseModel):
43
+ """Base model for all Kiln data models with common functionality for persistence and versioning.
44
+
45
+ Attributes:
46
+ v (int): Schema version number for migration support
47
+ id (str): Unique identifier for the model instance
48
+ path (Path): File system path where the model is stored
49
+ created_at (datetime): Timestamp when the model was created
50
+ created_by (str): User ID of the creator
51
+ """
52
+
41
53
  model_config = ConfigDict(validate_assignment=True)
42
54
 
43
55
  v: int = Field(default=1) # schema_version
@@ -62,11 +74,30 @@ class KilnBaseModel(BaseModel):
62
74
 
63
75
  @classmethod
64
76
  def load_from_folder(cls: Type[T], folderPath: Path) -> T:
77
+ """Load a model instance from a folder using the default filename.
78
+
79
+ Args:
80
+ folderPath (Path): Directory path containing the model file
81
+
82
+ Returns:
83
+ T: Instance of the model
84
+ """
65
85
  path = folderPath / cls.base_filename()
66
86
  return cls.load_from_file(path)
67
87
 
68
88
  @classmethod
69
89
  def load_from_file(cls: Type[T], path: Path) -> T:
90
+ """Load a model instance from a specific file path.
91
+
92
+ Args:
93
+ path (Path): Path to the model file
94
+
95
+ Returns:
96
+ T: Instance of the model
97
+
98
+ Raises:
99
+ ValueError: If the loaded model is not of the expected type or version
100
+ """
70
101
  with open(path, "r") as file:
71
102
  file_data = file.read()
72
103
  # TODO P2 perf: parsing the JSON twice here.
@@ -92,6 +123,11 @@ class KilnBaseModel(BaseModel):
92
123
  return m
93
124
 
94
125
  def save_to_file(self) -> None:
126
+ """Save the model instance to a file.
127
+
128
+ Raises:
129
+ ValueError: If the path is not set
130
+ """
95
131
  path = self.build_path()
96
132
  if path is None:
97
133
  raise ValueError(
@@ -105,6 +141,15 @@ class KilnBaseModel(BaseModel):
105
141
  # save the path so even if something like name changes, the file doesn't move
106
142
  self.path = path
107
143
 
144
+ def delete(self) -> None:
145
+ if self.path is None:
146
+ raise ValueError("Cannot delete model because path is not set")
147
+ dir_path = self.path.parent if self.path.is_file() else self.path
148
+ if dir_path is None:
149
+ raise ValueError("Cannot delete model because path is not set")
150
+ shutil.rmtree(dir_path)
151
+ self.path = None
152
+
108
153
  def build_path(self) -> Path | None:
109
154
  if self.path is not None:
110
155
  return self.path
@@ -116,6 +161,15 @@ class KilnBaseModel(BaseModel):
116
161
 
117
162
 
118
163
  class KilnParentedModel(KilnBaseModel, metaclass=ABCMeta):
164
+ """Base model for Kiln models that have a parent-child relationship. This base class is for child models.
165
+
166
+ This class provides functionality for managing hierarchical relationships between models,
167
+ including parent reference handling and file system organization.
168
+
169
+ Attributes:
170
+ _parent (KilnBaseModel): Reference to the parent model instance
171
+ """
172
+
119
173
  _parent: KilnBaseModel | None = None
120
174
 
121
175
  # workaround to tell typechecker that we support the parent property, even though it's not a stock property
@@ -129,6 +183,11 @@ class KilnParentedModel(KilnBaseModel, metaclass=ABCMeta):
129
183
 
130
184
  @property
131
185
  def parent(self) -> Optional[KilnBaseModel]:
186
+ """Get the parent model instance, loading it from disk if necessary.
187
+
188
+ Returns:
189
+ Optional[KilnBaseModel]: The parent model instance or None if not set
190
+ """
132
191
  if self._parent is not None:
133
192
  return self._parent
134
193
  # lazy load parent from path
@@ -244,6 +303,15 @@ class KilnParentedModel(KilnBaseModel, metaclass=ABCMeta):
244
303
  # Parent create methods for all child relationships
245
304
  # You must pass in parent_of in the subclass definition, defining the child relationships
246
305
  class KilnParentModel(KilnBaseModel, metaclass=ABCMeta):
306
+ """Base model for Kiln models that can have child models.
307
+
308
+ This class provides functionality for managing collections of child models and their persistence.
309
+ Child relationships must be defined using the parent_of parameter in the class definition.
310
+
311
+ Args:
312
+ parent_of (Dict[str, Type[KilnParentedModel]]): Mapping of relationship names to child model types
313
+ """
314
+
247
315
  @classmethod
248
316
  def _create_child_method(
249
317
  cls, relationship_name: str, child_class: Type[KilnParentedModel]
@@ -288,6 +356,19 @@ class KilnParentModel(KilnBaseModel, metaclass=ABCMeta):
288
356
  path: Path | None = None,
289
357
  parent: KilnBaseModel | None = None,
290
358
  ):
359
+ """Validate and save a model instance along with all its nested child relationships.
360
+
361
+ Args:
362
+ data (Dict[str, Any]): Model data including child relationships
363
+ path (Path, optional): Path where the model should be saved
364
+ parent (KilnBaseModel, optional): Parent model instance for parented models
365
+
366
+ Returns:
367
+ KilnParentModel: The validated and saved model instance
368
+
369
+ Raises:
370
+ ValidationError: If validation fails for the model or any of its children
371
+ """
291
372
  # Validate first, then save. Don't want error half way through, and partly persisted
292
373
  # TODO P2: save to tmp dir, then move atomically. But need to merge directories so later.
293
374
  cls._validate_nested(data, save=False, path=path, parent=parent)
@@ -10,21 +10,55 @@ JsonObjectSchema = Annotated[
10
10
  str,
11
11
  AfterValidator(lambda v: _check_json_schema(v)),
12
12
  ]
13
+ """A pydantic type that validates strings containing JSON schema definitions.
14
+ Must be a valid JSON schema object with 'type': 'object' and 'properties' defined.
15
+ """
13
16
 
14
17
 
15
18
  def _check_json_schema(v: str) -> str:
16
- # parsing returns needed errors
19
+ """Internal validation function for JSON schema strings.
20
+
21
+ Args:
22
+ v: String containing a JSON schema definition
23
+
24
+ Returns:
25
+ The input string if valid
26
+
27
+ Raises:
28
+ ValueError: If the schema is invalid
29
+ """
17
30
  schema_from_json_str(v)
18
31
  return v
19
32
 
20
33
 
21
34
  def validate_schema(instance: Dict, schema_str: str) -> None:
35
+ """Validate a dictionary against a JSON schema.
36
+
37
+ Args:
38
+ instance: Dictionary to validate
39
+ schema_str: JSON schema string to validate against
40
+
41
+ Raises:
42
+ jsonschema.exceptions.ValidationError: If validation fails
43
+ ValueError: If the schema is invalid
44
+ """
22
45
  schema = schema_from_json_str(schema_str)
23
46
  v = jsonschema.Draft202012Validator(schema)
24
47
  return v.validate(instance)
25
48
 
26
49
 
27
50
  def schema_from_json_str(v: str) -> Dict:
51
+ """Parse and validate a JSON schema string.
52
+
53
+ Args:
54
+ v: String containing a JSON schema definition
55
+
56
+ Returns:
57
+ Dict containing the parsed JSON schema
58
+
59
+ Raises:
60
+ ValueError: If the input is not a valid JSON schema object with required properties
61
+ """
28
62
  try:
29
63
  parsed = json.loads(v)
30
64
  jsonschema.Draft202012Validator.check_schema(parsed)
@@ -4,6 +4,7 @@ from pathlib import Path
4
4
  from typing import Optional
5
5
 
6
6
  import pytest
7
+
7
8
  from kiln_ai.datamodel.basemodel import KilnBaseModel, KilnParentedModel
8
9
 
9
10
 
@@ -275,3 +276,33 @@ def test_lazy_load_parent(tmp_path):
275
276
  # Verify that the _parent attribute is now set
276
277
  assert hasattr(loaded_child, "_parent")
277
278
  assert loaded_child._parent is loaded_parent
279
+
280
+
281
+ def test_delete(tmp_path):
282
+ # Test deleting a file
283
+ file_path = tmp_path / "test.kiln"
284
+ model = KilnBaseModel(path=file_path)
285
+ model.save_to_file()
286
+ assert file_path.exists()
287
+ model.delete()
288
+ assert not file_path.exists()
289
+ assert not file_path.parent.exists()
290
+ assert model.path is None
291
+
292
+
293
+ def test_delete_dir(tmp_path):
294
+ # Test deleting a directory
295
+ dir_path = tmp_path / "test_dir"
296
+ dir_path.mkdir(parents=True)
297
+ model = KilnBaseModel(path=dir_path)
298
+ assert dir_path.exists()
299
+ model.delete()
300
+ assert not dir_path.exists()
301
+ assert model.path is None
302
+
303
+
304
+ def test_delete_no_path():
305
+ # Test deleting with no path
306
+ model = KilnBaseModel()
307
+ with pytest.raises(ValueError, match="Cannot delete model because path is not set"):
308
+ model.delete()
@@ -1,7 +1,8 @@
1
1
  import pytest
2
- from kiln_ai.datamodel import DataSource, DataSourceType
3
2
  from pydantic import ValidationError
4
3
 
4
+ from kiln_ai.datamodel import DataSource, DataSourceType
5
+
5
6
 
6
7
  def test_valid_human_data_source():
7
8
  data_source = DataSource(
@@ -29,16 +30,14 @@ def test_valid_synthetic_data_source():
29
30
 
30
31
 
31
32
  def test_missing_required_property():
32
- with pytest.raises(
33
- ValidationError, match="'created_by' is required for DataSourceType.human data"
34
- ):
33
+ with pytest.raises(ValidationError, match="'created_by' is required for"):
35
34
  DataSource(type=DataSourceType.human)
36
35
 
37
36
 
38
37
  def test_wrong_property_type():
39
38
  with pytest.raises(
40
39
  ValidationError,
41
- match="'model_name' must be of type str for DataSourceType.synthetic data",
40
+ match="'model_name' must be of type str for",
42
41
  ):
43
42
  DataSource(
44
43
  type=DataSourceType.synthetic,
@@ -49,7 +48,7 @@ def test_wrong_property_type():
49
48
  def test_not_allowed_property():
50
49
  with pytest.raises(
51
50
  ValidationError,
52
- match="'created_by' is not allowed for DataSourceType.synthetic data",
51
+ match="'created_by' is not allowed for",
53
52
  ):
54
53
  DataSource(
55
54
  type=DataSourceType.synthetic,
@@ -1,6 +1,9 @@
1
1
  import json
2
+ import sys
2
3
 
3
4
  import pytest
5
+ from pydantic import ValidationError
6
+
4
7
  from kiln_ai.datamodel import (
5
8
  DataSource,
6
9
  DataSourceType,
@@ -13,7 +16,6 @@ from kiln_ai.datamodel import (
13
16
  TaskRequirement,
14
17
  TaskRun,
15
18
  )
16
- from pydantic import ValidationError
17
19
 
18
20
 
19
21
  @pytest.fixture
@@ -55,10 +57,11 @@ def test_task_model_validation(valid_task_run):
55
57
  with pytest.raises(ValidationError, match="Input should be"):
56
58
  DataSource(type="invalid")
57
59
 
58
- with pytest.raises(ValidationError, match="Invalid data source type"):
59
- task_run = valid_task_run.model_copy(deep=True)
60
- task_run.input_source.type = "invalid"
61
- DataSource.model_validate(task_run.input_source, strict=True)
60
+ if sys.version_info >= (3, 12):
61
+ with pytest.raises(ValidationError, match="Invalid data source type"):
62
+ task_run = valid_task_run.model_copy(deep=True)
63
+ task_run.input_source.type = "invalid"
64
+ DataSource.model_validate(task_run.input_source, strict=True)
62
65
 
63
66
  # Missing required field
64
67
  with pytest.raises(ValidationError, match="Input should be a valid string"):
@@ -72,9 +75,7 @@ def test_task_model_validation(valid_task_run):
72
75
  DataSource.model_validate(task_run.input_source, strict=True)
73
76
 
74
77
  # Test we catch nested validation errors
75
- with pytest.raises(
76
- ValidationError, match="'created_by' is required for DataSourceType.human"
77
- ):
78
+ with pytest.raises(ValidationError, match="'created_by' is required for"):
78
79
  task_run = TaskRun(
79
80
  input="Test input",
80
81
  input_source=DataSource(
@@ -270,34 +271,6 @@ def test_task_output_requirement_rating_keys(tmp_path):
270
271
  task_run.save_to_file()
271
272
  assert task_run.output.rating.requirement_ratings is not None
272
273
 
273
- # Invalid case: unknown requirement ID
274
- with pytest.raises(
275
- ValueError,
276
- match="Requirement ID .* is not a valid requirement ID for this task",
277
- ):
278
- task_run = TaskRun(
279
- input="Test input",
280
- input_source=DataSource(
281
- type=DataSourceType.human,
282
- properties={"created_by": "john_doe"},
283
- ),
284
- parent=task,
285
- output=TaskOutput(
286
- output="Test output",
287
- source=DataSource(
288
- type=DataSourceType.human,
289
- properties={"created_by": "john_doe"},
290
- ),
291
- rating=TaskOutputRating(
292
- value=4,
293
- requirement_ratings={
294
- "unknown_id": 5,
295
- },
296
- ),
297
- ),
298
- )
299
- task_run.save_to_file()
300
-
301
274
 
302
275
  def test_task_output_schema_validation(tmp_path):
303
276
  # Create a project, task, and example hierarchy
@@ -434,9 +407,7 @@ def test_valid_human_task_output():
434
407
 
435
408
 
436
409
  def test_invalid_human_task_output_missing_created_by():
437
- with pytest.raises(
438
- ValidationError, match="'created_by' is required for DataSourceType.human"
439
- ):
410
+ with pytest.raises(ValidationError, match="'created_by' is required for"):
440
411
  TaskOutput(
441
412
  output="Test output",
442
413
  source=DataSource(
@@ -482,7 +453,7 @@ def test_valid_synthetic_task_output():
482
453
  def test_invalid_synthetic_task_output_missing_keys():
483
454
  with pytest.raises(
484
455
  ValidationError,
485
- match="'model_provider' is required for DataSourceType.synthetic",
456
+ match="'model_provider' is required for",
486
457
  ):
487
458
  TaskOutput(
488
459
  output="Test output",
@@ -1,10 +1,11 @@
1
1
  import pytest
2
+ from pydantic import BaseModel
3
+
2
4
  from kiln_ai.datamodel.json_schema import (
3
5
  JsonObjectSchema,
4
6
  schema_from_json_str,
5
7
  validate_schema,
6
8
  )
7
- from pydantic import BaseModel
8
9
 
9
10
 
10
11
  class ExampleModel(BaseModel):
@@ -1,9 +1,10 @@
1
1
  import json
2
2
 
3
3
  import pytest
4
+ from pydantic import ValidationError
5
+
4
6
  from kiln_ai.datamodel import Priority, Project, Task, TaskDeterminism
5
7
  from kiln_ai.datamodel.test_json_schema import json_joke_schema
6
- from pydantic import ValidationError
7
8
 
8
9
 
9
10
  @pytest.fixture
@@ -1,7 +1,8 @@
1
1
  import pytest
2
- from kiln_ai.datamodel.basemodel import KilnParentedModel, KilnParentModel
3
2
  from pydantic import Field, ValidationError
4
3
 
4
+ from kiln_ai.datamodel.basemodel import KilnParentedModel, KilnParentModel
5
+
5
6
 
6
7
  class ModelC(KilnParentedModel):
7
8
  code: str = Field(..., pattern=r"^[A-Z]{3}$")
@@ -1,7 +1,8 @@
1
1
  import pytest
2
- from kiln_ai.datamodel import TaskOutputRating, TaskOutputRatingType
3
2
  from pydantic import ValidationError
4
3
 
4
+ from kiln_ai.datamodel import TaskOutputRating, TaskOutputRatingType
5
+
5
6
 
6
7
  def test_valid_task_output_rating():
7
8
  rating = TaskOutputRating(value=4.0, requirement_ratings={"req1": 5.0, "req2": 3.0})
@@ -0,0 +1,12 @@
1
+ """
2
+ # Utils
3
+
4
+ Misc utilities used in the kiln_ai library.
5
+ """
6
+
7
+ from . import config, formatting
8
+
9
+ __all__ = [
10
+ "config",
11
+ "formatting",
12
+ ]
@@ -4,6 +4,7 @@ from unittest.mock import patch
4
4
 
5
5
  import pytest
6
6
  import yaml
7
+
7
8
  from kiln_ai.utils.config import Config, ConfigProperty, _get_user_id
8
9
 
9
10