kiln-ai 0.20.1__py3-none-any.whl → 0.21.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kiln-ai might be problematic. Click here for more details.
- kiln_ai/adapters/__init__.py +6 -0
- kiln_ai/adapters/adapter_registry.py +43 -226
- kiln_ai/adapters/chunkers/__init__.py +13 -0
- kiln_ai/adapters/chunkers/base_chunker.py +42 -0
- kiln_ai/adapters/chunkers/chunker_registry.py +16 -0
- kiln_ai/adapters/chunkers/fixed_window_chunker.py +39 -0
- kiln_ai/adapters/chunkers/helpers.py +23 -0
- kiln_ai/adapters/chunkers/test_base_chunker.py +63 -0
- kiln_ai/adapters/chunkers/test_chunker_registry.py +28 -0
- kiln_ai/adapters/chunkers/test_fixed_window_chunker.py +346 -0
- kiln_ai/adapters/chunkers/test_helpers.py +75 -0
- kiln_ai/adapters/data_gen/test_data_gen_task.py +9 -3
- kiln_ai/adapters/embedding/__init__.py +0 -0
- kiln_ai/adapters/embedding/base_embedding_adapter.py +44 -0
- kiln_ai/adapters/embedding/embedding_registry.py +32 -0
- kiln_ai/adapters/embedding/litellm_embedding_adapter.py +199 -0
- kiln_ai/adapters/embedding/test_base_embedding_adapter.py +283 -0
- kiln_ai/adapters/embedding/test_embedding_registry.py +166 -0
- kiln_ai/adapters/embedding/test_litellm_embedding_adapter.py +1149 -0
- kiln_ai/adapters/eval/eval_runner.py +6 -2
- kiln_ai/adapters/eval/test_base_eval.py +1 -3
- kiln_ai/adapters/eval/test_g_eval.py +1 -1
- kiln_ai/adapters/extractors/__init__.py +18 -0
- kiln_ai/adapters/extractors/base_extractor.py +72 -0
- kiln_ai/adapters/extractors/encoding.py +20 -0
- kiln_ai/adapters/extractors/extractor_registry.py +44 -0
- kiln_ai/adapters/extractors/extractor_runner.py +112 -0
- kiln_ai/adapters/extractors/litellm_extractor.py +386 -0
- kiln_ai/adapters/extractors/test_base_extractor.py +244 -0
- kiln_ai/adapters/extractors/test_encoding.py +54 -0
- kiln_ai/adapters/extractors/test_extractor_registry.py +181 -0
- kiln_ai/adapters/extractors/test_extractor_runner.py +181 -0
- kiln_ai/adapters/extractors/test_litellm_extractor.py +1192 -0
- kiln_ai/adapters/fine_tune/test_dataset_formatter.py +2 -2
- kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py +2 -6
- kiln_ai/adapters/fine_tune/test_together_finetune.py +2 -6
- kiln_ai/adapters/ml_embedding_model_list.py +192 -0
- kiln_ai/adapters/ml_model_list.py +382 -4
- kiln_ai/adapters/model_adapters/litellm_adapter.py +7 -69
- kiln_ai/adapters/model_adapters/test_litellm_adapter.py +1 -1
- kiln_ai/adapters/model_adapters/test_structured_output.py +3 -1
- kiln_ai/adapters/ollama_tools.py +69 -12
- kiln_ai/adapters/provider_tools.py +190 -46
- kiln_ai/adapters/rag/deduplication.py +49 -0
- kiln_ai/adapters/rag/progress.py +252 -0
- kiln_ai/adapters/rag/rag_runners.py +844 -0
- kiln_ai/adapters/rag/test_deduplication.py +195 -0
- kiln_ai/adapters/rag/test_progress.py +785 -0
- kiln_ai/adapters/rag/test_rag_runners.py +2376 -0
- kiln_ai/adapters/remote_config.py +80 -8
- kiln_ai/adapters/test_adapter_registry.py +579 -86
- kiln_ai/adapters/test_ml_embedding_model_list.py +429 -0
- kiln_ai/adapters/test_ml_model_list.py +212 -0
- kiln_ai/adapters/test_ollama_tools.py +340 -1
- kiln_ai/adapters/test_prompt_builders.py +1 -1
- kiln_ai/adapters/test_provider_tools.py +199 -8
- kiln_ai/adapters/test_remote_config.py +551 -56
- kiln_ai/adapters/vector_store/__init__.py +1 -0
- kiln_ai/adapters/vector_store/base_vector_store_adapter.py +83 -0
- kiln_ai/adapters/vector_store/lancedb_adapter.py +389 -0
- kiln_ai/adapters/vector_store/test_base_vector_store.py +160 -0
- kiln_ai/adapters/vector_store/test_lancedb_adapter.py +1841 -0
- kiln_ai/adapters/vector_store/test_vector_store_registry.py +199 -0
- kiln_ai/adapters/vector_store/vector_store_registry.py +33 -0
- kiln_ai/datamodel/__init__.py +16 -13
- kiln_ai/datamodel/basemodel.py +170 -1
- kiln_ai/datamodel/chunk.py +158 -0
- kiln_ai/datamodel/datamodel_enums.py +27 -0
- kiln_ai/datamodel/embedding.py +64 -0
- kiln_ai/datamodel/extraction.py +303 -0
- kiln_ai/datamodel/project.py +33 -1
- kiln_ai/datamodel/rag.py +79 -0
- kiln_ai/datamodel/test_attachment.py +649 -0
- kiln_ai/datamodel/test_basemodel.py +1 -1
- kiln_ai/datamodel/test_chunk_models.py +317 -0
- kiln_ai/datamodel/test_dataset_split.py +1 -1
- kiln_ai/datamodel/test_embedding_models.py +448 -0
- kiln_ai/datamodel/test_eval_model.py +6 -6
- kiln_ai/datamodel/test_extraction_chunk.py +206 -0
- kiln_ai/datamodel/test_extraction_model.py +470 -0
- kiln_ai/datamodel/test_rag.py +641 -0
- kiln_ai/datamodel/test_tool_id.py +81 -0
- kiln_ai/datamodel/test_vector_store.py +320 -0
- kiln_ai/datamodel/tool_id.py +22 -0
- kiln_ai/datamodel/vector_store.py +141 -0
- kiln_ai/tools/mcp_session_manager.py +4 -1
- kiln_ai/tools/rag_tools.py +157 -0
- kiln_ai/tools/test_mcp_session_manager.py +1 -1
- kiln_ai/tools/test_rag_tools.py +848 -0
- kiln_ai/tools/test_tool_registry.py +91 -2
- kiln_ai/tools/tool_registry.py +21 -0
- kiln_ai/utils/__init__.py +3 -0
- kiln_ai/utils/async_job_runner.py +62 -17
- kiln_ai/utils/config.py +2 -2
- kiln_ai/utils/env.py +15 -0
- kiln_ai/utils/filesystem.py +14 -0
- kiln_ai/utils/filesystem_cache.py +60 -0
- kiln_ai/utils/litellm.py +94 -0
- kiln_ai/utils/lock.py +100 -0
- kiln_ai/utils/mime_type.py +38 -0
- kiln_ai/utils/pdf_utils.py +38 -0
- kiln_ai/utils/test_async_job_runner.py +151 -35
- kiln_ai/utils/test_env.py +142 -0
- kiln_ai/utils/test_filesystem_cache.py +316 -0
- kiln_ai/utils/test_litellm.py +206 -0
- kiln_ai/utils/test_lock.py +185 -0
- kiln_ai/utils/test_mime_type.py +66 -0
- kiln_ai/utils/test_pdf_utils.py +73 -0
- kiln_ai/utils/test_uuid.py +111 -0
- kiln_ai/utils/test_validation.py +524 -0
- kiln_ai/utils/uuid.py +9 -0
- kiln_ai/utils/validation.py +90 -0
- {kiln_ai-0.20.1.dist-info → kiln_ai-0.21.0.dist-info}/METADATA +7 -1
- kiln_ai-0.21.0.dist-info/RECORD +211 -0
- kiln_ai-0.20.1.dist-info/RECORD +0 -138
- {kiln_ai-0.20.1.dist-info → kiln_ai-0.21.0.dist-info}/WHEEL +0 -0
- {kiln_ai-0.20.1.dist-info → kiln_ai-0.21.0.dist-info}/licenses/LICENSE.txt +0 -0
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING, List, Union
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field, model_validator
|
|
4
|
+
|
|
5
|
+
from kiln_ai.datamodel.basemodel import ID_TYPE, FilenameString, KilnParentedModel
|
|
6
|
+
from kiln_ai.datamodel.datamodel_enums import ModelProviderName
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from kiln_ai.datamodel.chunk import ChunkedDocument
|
|
10
|
+
from kiln_ai.datamodel.project import Project
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class EmbeddingConfig(KilnParentedModel):
|
|
14
|
+
name: FilenameString = Field(
|
|
15
|
+
description="A name to identify the embedding config.",
|
|
16
|
+
)
|
|
17
|
+
description: str | None = Field(
|
|
18
|
+
default=None,
|
|
19
|
+
description="A description for your reference, not shared with embedding models.",
|
|
20
|
+
)
|
|
21
|
+
model_provider_name: ModelProviderName = Field(
|
|
22
|
+
description="The provider to use to generate embeddings.",
|
|
23
|
+
)
|
|
24
|
+
model_name: str = Field(
|
|
25
|
+
description="The model to use to generate embeddings.",
|
|
26
|
+
)
|
|
27
|
+
properties: dict[str, str | int | float | bool] = Field(
|
|
28
|
+
description="Properties to be used to execute the embedding config.",
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# Workaround to return typed parent without importing Project
|
|
32
|
+
def parent_project(self) -> Union["Project", None]:
|
|
33
|
+
if self.parent is None or self.parent.__class__.__name__ != "Project":
|
|
34
|
+
return None
|
|
35
|
+
return self.parent # type: ignore
|
|
36
|
+
|
|
37
|
+
@model_validator(mode="after")
|
|
38
|
+
def validate_properties(self):
|
|
39
|
+
if "dimensions" in self.properties:
|
|
40
|
+
if (
|
|
41
|
+
not isinstance(self.properties["dimensions"], int)
|
|
42
|
+
or self.properties["dimensions"] <= 0
|
|
43
|
+
):
|
|
44
|
+
raise ValueError("Dimensions must be a positive integer")
|
|
45
|
+
|
|
46
|
+
return self
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class Embedding(BaseModel):
|
|
50
|
+
vector: List[float] = Field(description="The vector of the embedding.")
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class ChunkEmbeddings(KilnParentedModel):
|
|
54
|
+
embedding_config_id: ID_TYPE = Field(
|
|
55
|
+
description="The ID of the embedding config used to generate the embeddings.",
|
|
56
|
+
)
|
|
57
|
+
embeddings: List[Embedding] = Field(
|
|
58
|
+
description="The embeddings of the chunks. The embedding at index i corresponds to the chunk at index i in the parent chunked document."
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
def parent_chunked_document(self) -> Union["ChunkedDocument", None]:
|
|
62
|
+
if self.parent is None or self.parent.__class__.__name__ != "ChunkedDocument":
|
|
63
|
+
return None
|
|
64
|
+
return self.parent # type: ignore
|
|
@@ -0,0 +1,303 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from enum import Enum
|
|
3
|
+
from typing import TYPE_CHECKING, Any, List, Union
|
|
4
|
+
|
|
5
|
+
import anyio
|
|
6
|
+
from pydantic import (
|
|
7
|
+
BaseModel,
|
|
8
|
+
Field,
|
|
9
|
+
SerializationInfo,
|
|
10
|
+
ValidationInfo,
|
|
11
|
+
field_serializer,
|
|
12
|
+
field_validator,
|
|
13
|
+
model_validator,
|
|
14
|
+
)
|
|
15
|
+
from typing_extensions import Self
|
|
16
|
+
|
|
17
|
+
from kiln_ai.datamodel.basemodel import (
|
|
18
|
+
ID_TYPE,
|
|
19
|
+
FilenameString,
|
|
20
|
+
KilnAttachmentModel,
|
|
21
|
+
KilnParentedModel,
|
|
22
|
+
KilnParentModel,
|
|
23
|
+
)
|
|
24
|
+
from kiln_ai.datamodel.chunk import ChunkedDocument
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
if TYPE_CHECKING:
|
|
29
|
+
from kiln_ai.datamodel.project import Project
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class Kind(str, Enum):
|
|
35
|
+
DOCUMENT = "document"
|
|
36
|
+
IMAGE = "image"
|
|
37
|
+
VIDEO = "video"
|
|
38
|
+
AUDIO = "audio"
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class OutputFormat(str, Enum):
|
|
42
|
+
TEXT = "text/plain"
|
|
43
|
+
MARKDOWN = "text/markdown"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class ExtractorType(str, Enum):
|
|
47
|
+
LITELLM = "litellm"
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
SUPPORTED_MIME_TYPES = {
|
|
51
|
+
Kind.DOCUMENT: {
|
|
52
|
+
"application/pdf",
|
|
53
|
+
"text/plain",
|
|
54
|
+
"text/markdown",
|
|
55
|
+
"text/html",
|
|
56
|
+
"text/md",
|
|
57
|
+
},
|
|
58
|
+
Kind.IMAGE: {
|
|
59
|
+
"image/png",
|
|
60
|
+
"image/jpeg",
|
|
61
|
+
},
|
|
62
|
+
Kind.VIDEO: {
|
|
63
|
+
"video/mp4",
|
|
64
|
+
"video/quicktime",
|
|
65
|
+
},
|
|
66
|
+
Kind.AUDIO: {
|
|
67
|
+
"audio/wav",
|
|
68
|
+
"audio/mpeg",
|
|
69
|
+
"audio/ogg",
|
|
70
|
+
},
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class ExtractionModel(BaseModel):
|
|
75
|
+
name: str
|
|
76
|
+
label: str
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def validate_prompt(prompt: Any, name: str):
|
|
80
|
+
if not isinstance(prompt, str):
|
|
81
|
+
raise ValueError(f"{name} must be a string.")
|
|
82
|
+
if prompt == "":
|
|
83
|
+
raise ValueError(f"{name} cannot be empty.")
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class ExtractionSource(str, Enum):
|
|
87
|
+
PROCESSED = "processed"
|
|
88
|
+
PASSTHROUGH = "passthrough"
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class Extraction(
|
|
92
|
+
KilnParentedModel, KilnParentModel, parent_of={"chunked_documents": ChunkedDocument}
|
|
93
|
+
):
|
|
94
|
+
source: ExtractionSource = Field(
|
|
95
|
+
description="The source of the extraction.",
|
|
96
|
+
)
|
|
97
|
+
extractor_config_id: ID_TYPE = Field(
|
|
98
|
+
description="The ID of the extractor config used to extract the data.",
|
|
99
|
+
)
|
|
100
|
+
output: KilnAttachmentModel = Field(
|
|
101
|
+
description="The extraction output.",
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
def parent_document(self) -> Union["Document", None]:
|
|
105
|
+
if self.parent is None or self.parent.__class__.__name__ != "Document":
|
|
106
|
+
return None
|
|
107
|
+
return self.parent # type: ignore
|
|
108
|
+
|
|
109
|
+
async def output_content(self) -> str | None:
|
|
110
|
+
if not self.path:
|
|
111
|
+
raise ValueError(
|
|
112
|
+
"Failed to resolve the path of extraction output attachment because the extraction does not have a path."
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
full_path = self.output.resolve_path(self.path.parent)
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
return await anyio.Path(full_path).read_text(encoding="utf-8")
|
|
119
|
+
except Exception as e:
|
|
120
|
+
logger.error(
|
|
121
|
+
f"Failed to read extraction output for {full_path}: {e}", exc_info=True
|
|
122
|
+
)
|
|
123
|
+
raise ValueError(f"Failed to read extraction output: {e}")
|
|
124
|
+
|
|
125
|
+
def chunked_documents(self, readonly: bool = False) -> list[ChunkedDocument]:
|
|
126
|
+
return super().chunked_documents(readonly=readonly) # type: ignore
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class ExtractorConfig(KilnParentedModel):
|
|
130
|
+
name: FilenameString = Field(
|
|
131
|
+
description="A name to identify the extractor config.",
|
|
132
|
+
)
|
|
133
|
+
is_archived: bool = Field(
|
|
134
|
+
default=False,
|
|
135
|
+
description="Whether the extractor config is archived. Archived extractor configs are not shown in the UI and are not available for use.",
|
|
136
|
+
)
|
|
137
|
+
description: str | None = Field(
|
|
138
|
+
default=None, description="The description of the extractor config"
|
|
139
|
+
)
|
|
140
|
+
model_provider_name: str = Field(
|
|
141
|
+
description="The name of the model provider to use for the extractor config.",
|
|
142
|
+
)
|
|
143
|
+
model_name: str = Field(
|
|
144
|
+
description="The name of the model to use for the extractor config.",
|
|
145
|
+
)
|
|
146
|
+
output_format: OutputFormat = Field(
|
|
147
|
+
default=OutputFormat.MARKDOWN,
|
|
148
|
+
description="The format to use for the output.",
|
|
149
|
+
)
|
|
150
|
+
passthrough_mimetypes: list[OutputFormat] = Field(
|
|
151
|
+
default_factory=list,
|
|
152
|
+
description="If the mimetype is in this list, the extractor will not be used and the text content of the file will be returned as is.",
|
|
153
|
+
)
|
|
154
|
+
extractor_type: ExtractorType = Field(
|
|
155
|
+
description="This is used to determine the type of extractor to use.",
|
|
156
|
+
)
|
|
157
|
+
properties: dict[str, str | int | float | bool | dict[str, str] | None] = Field(
|
|
158
|
+
default_factory=dict,
|
|
159
|
+
description="Properties to be used to execute the extractor config. This is extractor_type specific and should serialize to a json dict.",
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
@field_validator("properties")
|
|
163
|
+
@classmethod
|
|
164
|
+
def validate_properties(
|
|
165
|
+
cls, properties: dict[str, Any], info: ValidationInfo
|
|
166
|
+
) -> dict[str, Any]:
|
|
167
|
+
def get_property(key: str) -> str:
|
|
168
|
+
value = properties.get(key)
|
|
169
|
+
if value is None or value == "" or not isinstance(value, str):
|
|
170
|
+
raise ValueError(f"Prompt for {key} must be a string")
|
|
171
|
+
return value
|
|
172
|
+
|
|
173
|
+
return {
|
|
174
|
+
"prompt_document": get_property(
|
|
175
|
+
"prompt_document",
|
|
176
|
+
),
|
|
177
|
+
"prompt_image": get_property(
|
|
178
|
+
"prompt_image",
|
|
179
|
+
),
|
|
180
|
+
"prompt_video": get_property(
|
|
181
|
+
"prompt_video",
|
|
182
|
+
),
|
|
183
|
+
"prompt_audio": get_property(
|
|
184
|
+
"prompt_audio",
|
|
185
|
+
),
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
def prompt_document(self) -> str | None:
|
|
189
|
+
prompt = self.properties.get("prompt_document")
|
|
190
|
+
if prompt is None:
|
|
191
|
+
return None
|
|
192
|
+
if not isinstance(prompt, str):
|
|
193
|
+
raise ValueError(
|
|
194
|
+
"Invalid prompt_document. prompt_document must be a string."
|
|
195
|
+
)
|
|
196
|
+
return prompt
|
|
197
|
+
|
|
198
|
+
def prompt_video(self) -> str | None:
|
|
199
|
+
prompt = self.properties.get("prompt_video")
|
|
200
|
+
if prompt is None:
|
|
201
|
+
return None
|
|
202
|
+
if not isinstance(prompt, str):
|
|
203
|
+
raise ValueError("Invalid prompt_video. prompt_video must be a string.")
|
|
204
|
+
return prompt
|
|
205
|
+
|
|
206
|
+
def prompt_audio(self) -> str | None:
|
|
207
|
+
prompt = self.properties.get("prompt_audio")
|
|
208
|
+
if prompt is None:
|
|
209
|
+
return None
|
|
210
|
+
if not isinstance(prompt, str):
|
|
211
|
+
raise ValueError("Invalid prompt_audio. prompt_audio must be a string.")
|
|
212
|
+
return prompt
|
|
213
|
+
|
|
214
|
+
def prompt_image(self) -> str | None:
|
|
215
|
+
prompt = self.properties.get("prompt_image")
|
|
216
|
+
if prompt is None:
|
|
217
|
+
return None
|
|
218
|
+
if not isinstance(prompt, str):
|
|
219
|
+
raise ValueError("Invalid prompt_image. prompt_image must be a string.")
|
|
220
|
+
return prompt
|
|
221
|
+
|
|
222
|
+
# Workaround to return typed parent without importing Project
|
|
223
|
+
def parent_project(self) -> Union["Project", None]:
|
|
224
|
+
if self.parent is None or self.parent.__class__.__name__ != "Project":
|
|
225
|
+
return None
|
|
226
|
+
return self.parent # type: ignore
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
class FileInfo(BaseModel):
|
|
230
|
+
filename: str = Field(description="The filename of the file")
|
|
231
|
+
|
|
232
|
+
size: int = Field(description="The size of the file in bytes")
|
|
233
|
+
|
|
234
|
+
mime_type: str = Field(description="The MIME type of the file")
|
|
235
|
+
|
|
236
|
+
attachment: KilnAttachmentModel = Field(
|
|
237
|
+
description="The attachment to the file",
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
@field_serializer("attachment")
|
|
241
|
+
def serialize_attachment(
|
|
242
|
+
self, attachment: KilnAttachmentModel, info: SerializationInfo
|
|
243
|
+
) -> dict:
|
|
244
|
+
context = info.context or {}
|
|
245
|
+
context["filename_prefix"] = "attachment"
|
|
246
|
+
return attachment.model_dump(mode="json", context=context)
|
|
247
|
+
|
|
248
|
+
@field_validator("mime_type")
|
|
249
|
+
@classmethod
|
|
250
|
+
def validate_mime_type(cls, mime_type: str, info: ValidationInfo) -> str:
|
|
251
|
+
filename = info.data.get("filename") or ""
|
|
252
|
+
|
|
253
|
+
for mime_types in SUPPORTED_MIME_TYPES.values():
|
|
254
|
+
if mime_type in mime_types:
|
|
255
|
+
return mime_type
|
|
256
|
+
raise ValueError(f"MIME type is not supported: {mime_type} (for {filename})")
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
class Document(
|
|
260
|
+
KilnParentedModel, KilnParentModel, parent_of={"extractions": Extraction}
|
|
261
|
+
):
|
|
262
|
+
name: FilenameString = Field(
|
|
263
|
+
description="A name to identify the document.",
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
description: str = Field(description="A description for the file")
|
|
267
|
+
|
|
268
|
+
original_file: FileInfo = Field(description="The original file")
|
|
269
|
+
|
|
270
|
+
kind: Kind = Field(
|
|
271
|
+
description="The kind of document. The kind is a broad family of filetypes that can be handled in a similar way"
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
tags: List[str] = Field(
|
|
275
|
+
default_factory=list,
|
|
276
|
+
description="Tags for the document. Tags are used to categorize documents for filtering and reporting.",
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
@model_validator(mode="after")
|
|
280
|
+
def validate_tags(self) -> Self:
|
|
281
|
+
for tag in self.tags:
|
|
282
|
+
if not tag:
|
|
283
|
+
raise ValueError("Tags cannot be empty strings")
|
|
284
|
+
if " " in tag:
|
|
285
|
+
raise ValueError("Tags cannot contain spaces. Try underscores.")
|
|
286
|
+
|
|
287
|
+
return self
|
|
288
|
+
|
|
289
|
+
# Workaround to return typed parent without importing Project
|
|
290
|
+
def parent_project(self) -> Union["Project", None]:
|
|
291
|
+
if self.parent is None or self.parent.__class__.__name__ != "Project":
|
|
292
|
+
return None
|
|
293
|
+
return self.parent # type: ignore
|
|
294
|
+
|
|
295
|
+
def extractions(self, readonly: bool = False) -> list[Extraction]:
|
|
296
|
+
return super().extractions(readonly=readonly) # type: ignore
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
def get_kind_from_mime_type(mime_type: str) -> Kind | None:
|
|
300
|
+
for kind, mime_types in SUPPORTED_MIME_TYPES.items():
|
|
301
|
+
if mime_type in mime_types:
|
|
302
|
+
return kind
|
|
303
|
+
return None
|
kiln_ai/datamodel/project.py
CHANGED
|
@@ -1,13 +1,27 @@
|
|
|
1
1
|
from pydantic import Field
|
|
2
2
|
|
|
3
3
|
from kiln_ai.datamodel.basemodel import FilenameString, KilnParentModel
|
|
4
|
+
from kiln_ai.datamodel.chunk import ChunkerConfig
|
|
5
|
+
from kiln_ai.datamodel.embedding import EmbeddingConfig
|
|
4
6
|
from kiln_ai.datamodel.external_tool_server import ExternalToolServer
|
|
7
|
+
from kiln_ai.datamodel.extraction import Document, ExtractorConfig
|
|
8
|
+
from kiln_ai.datamodel.rag import RagConfig
|
|
5
9
|
from kiln_ai.datamodel.task import Task
|
|
10
|
+
from kiln_ai.datamodel.vector_store import VectorStoreConfig
|
|
6
11
|
|
|
7
12
|
|
|
8
13
|
class Project(
|
|
9
14
|
KilnParentModel,
|
|
10
|
-
parent_of={
|
|
15
|
+
parent_of={
|
|
16
|
+
"tasks": Task,
|
|
17
|
+
"documents": Document,
|
|
18
|
+
"extractor_configs": ExtractorConfig,
|
|
19
|
+
"chunker_configs": ChunkerConfig,
|
|
20
|
+
"embedding_configs": EmbeddingConfig,
|
|
21
|
+
"rag_configs": RagConfig,
|
|
22
|
+
"vector_store_configs": VectorStoreConfig,
|
|
23
|
+
"external_tool_servers": ExternalToolServer,
|
|
24
|
+
},
|
|
11
25
|
):
|
|
12
26
|
"""
|
|
13
27
|
A collection of related tasks.
|
|
@@ -26,5 +40,23 @@ class Project(
|
|
|
26
40
|
def tasks(self) -> list[Task]:
|
|
27
41
|
return super().tasks() # type: ignore
|
|
28
42
|
|
|
43
|
+
def documents(self, readonly: bool = False) -> list[Document]:
|
|
44
|
+
return super().documents(readonly=readonly) # type: ignore
|
|
45
|
+
|
|
46
|
+
def extractor_configs(self, readonly: bool = False) -> list[ExtractorConfig]:
|
|
47
|
+
return super().extractor_configs(readonly=readonly) # type: ignore
|
|
48
|
+
|
|
49
|
+
def chunker_configs(self, readonly: bool = False) -> list[ChunkerConfig]:
|
|
50
|
+
return super().chunker_configs(readonly=readonly) # type: ignore
|
|
51
|
+
|
|
52
|
+
def embedding_configs(self, readonly: bool = False) -> list[EmbeddingConfig]:
|
|
53
|
+
return super().embedding_configs(readonly=readonly) # type: ignore
|
|
54
|
+
|
|
55
|
+
def vector_store_configs(self, readonly: bool = False) -> list[VectorStoreConfig]:
|
|
56
|
+
return super().vector_store_configs(readonly=readonly) # type: ignore
|
|
57
|
+
|
|
58
|
+
def rag_configs(self, readonly: bool = False) -> list[RagConfig]:
|
|
59
|
+
return super().rag_configs(readonly=readonly) # type: ignore
|
|
60
|
+
|
|
29
61
|
def external_tool_servers(self, readonly: bool = False) -> list[ExternalToolServer]:
|
|
30
62
|
return super().external_tool_servers(readonly=readonly) # type: ignore
|
kiln_ai/datamodel/rag.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING, Union
|
|
2
|
+
|
|
3
|
+
from pydantic import Field, model_validator
|
|
4
|
+
|
|
5
|
+
from kiln_ai.datamodel.basemodel import ID_TYPE, FilenameString, KilnParentedModel
|
|
6
|
+
from kiln_ai.utils.validation import ToolNameString
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from kiln_ai.datamodel.project import Project
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class RagConfig(KilnParentedModel):
|
|
13
|
+
name: FilenameString = Field(
|
|
14
|
+
description="A name to identify this RAG configuration for your own reference.",
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
is_archived: bool = Field(
|
|
18
|
+
default=False,
|
|
19
|
+
description="Whether the RAG configuration is archived. Archived RAG configurations are not shown in the UI and are not available for use.",
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
description: str | None = Field(
|
|
23
|
+
default=None,
|
|
24
|
+
description="A description of the RAG configuration for you and your team. Will not be used in prompts/training/validation.",
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
tool_name: ToolNameString = Field(
|
|
28
|
+
description="A name for the model to identify the Search Tool in conversations.",
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
tool_description: str = Field(
|
|
32
|
+
description="A description of the purpose of the tool. The model will use this description to understand the tool's capabilities.",
|
|
33
|
+
max_length=128,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
extractor_config_id: ID_TYPE = Field(
|
|
37
|
+
description="The ID of the extractor config used to extract the documents.",
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
chunker_config_id: ID_TYPE = Field(
|
|
41
|
+
description="The ID of the chunker config used to chunk the documents.",
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
embedding_config_id: ID_TYPE = Field(
|
|
45
|
+
description="The ID of the embedding config used to embed the documents.",
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
vector_store_config_id: ID_TYPE = Field(
|
|
49
|
+
description="The ID of the vector store config used to store the documents.",
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
tags: list[str] | None = Field(
|
|
53
|
+
default=None,
|
|
54
|
+
description="List of document tags to filter by. If None, all documents in the project are used.",
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# Workaround to return typed parent without importing Project
|
|
58
|
+
def parent_project(self) -> Union["Project", None]:
|
|
59
|
+
if self.parent is None or self.parent.__class__.__name__ != "Project":
|
|
60
|
+
return None
|
|
61
|
+
return self.parent # type: ignore
|
|
62
|
+
|
|
63
|
+
@model_validator(mode="after")
|
|
64
|
+
def validate_tags(self):
|
|
65
|
+
if self.tags is not None:
|
|
66
|
+
if len(self.tags) == 0:
|
|
67
|
+
raise ValueError("Tags cannot be an empty list.")
|
|
68
|
+
for tag in self.tags:
|
|
69
|
+
if not tag:
|
|
70
|
+
raise ValueError("Tags cannot be empty.")
|
|
71
|
+
if " " in tag:
|
|
72
|
+
raise ValueError("Tags cannot contain spaces. Try underscores.")
|
|
73
|
+
|
|
74
|
+
if self.tool_name.strip() == "":
|
|
75
|
+
raise ValueError("Tool name cannot be empty.")
|
|
76
|
+
if self.tool_description.strip() == "":
|
|
77
|
+
raise ValueError("Tool description cannot be empty.")
|
|
78
|
+
|
|
79
|
+
return self
|