kiln-ai 0.12.0__py3-none-any.whl → 0.13.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kiln-ai might be problematic. Click here for more details.
- kiln_ai/adapters/__init__.py +4 -0
- kiln_ai/adapters/adapter_registry.py +157 -28
- kiln_ai/adapters/eval/__init__.py +28 -0
- kiln_ai/adapters/eval/eval_runner.py +4 -1
- kiln_ai/adapters/eval/g_eval.py +19 -3
- kiln_ai/adapters/eval/test_base_eval.py +1 -0
- kiln_ai/adapters/eval/test_eval_runner.py +1 -0
- kiln_ai/adapters/eval/test_g_eval.py +13 -7
- kiln_ai/adapters/fine_tune/base_finetune.py +16 -2
- kiln_ai/adapters/fine_tune/finetune_registry.py +2 -0
- kiln_ai/adapters/fine_tune/fireworks_finetune.py +8 -1
- kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py +19 -0
- kiln_ai/adapters/fine_tune/test_together_finetune.py +533 -0
- kiln_ai/adapters/fine_tune/together_finetune.py +327 -0
- kiln_ai/adapters/ml_model_list.py +638 -155
- kiln_ai/adapters/model_adapters/__init__.py +2 -4
- kiln_ai/adapters/model_adapters/base_adapter.py +14 -11
- kiln_ai/adapters/model_adapters/litellm_adapter.py +391 -0
- kiln_ai/adapters/model_adapters/litellm_config.py +13 -0
- kiln_ai/adapters/model_adapters/test_litellm_adapter.py +407 -0
- kiln_ai/adapters/model_adapters/test_structured_output.py +23 -5
- kiln_ai/adapters/ollama_tools.py +3 -2
- kiln_ai/adapters/parsers/r1_parser.py +19 -14
- kiln_ai/adapters/parsers/test_r1_parser.py +17 -5
- kiln_ai/adapters/provider_tools.py +52 -60
- kiln_ai/adapters/repair/test_repair_task.py +3 -3
- kiln_ai/adapters/run_output.py +1 -1
- kiln_ai/adapters/test_adapter_registry.py +17 -20
- kiln_ai/adapters/test_generate_docs.py +2 -2
- kiln_ai/adapters/test_prompt_adaptors.py +30 -19
- kiln_ai/adapters/test_provider_tools.py +27 -82
- kiln_ai/datamodel/basemodel.py +2 -0
- kiln_ai/datamodel/datamodel_enums.py +2 -0
- kiln_ai/datamodel/json_schema.py +1 -1
- kiln_ai/datamodel/task_output.py +13 -6
- kiln_ai/datamodel/test_basemodel.py +9 -0
- kiln_ai/datamodel/test_datasource.py +19 -0
- kiln_ai/utils/config.py +46 -0
- kiln_ai/utils/dataset_import.py +232 -0
- kiln_ai/utils/test_dataset_import.py +596 -0
- {kiln_ai-0.12.0.dist-info → kiln_ai-0.13.2.dist-info}/METADATA +51 -7
- {kiln_ai-0.12.0.dist-info → kiln_ai-0.13.2.dist-info}/RECORD +44 -41
- kiln_ai/adapters/model_adapters/langchain_adapters.py +0 -309
- kiln_ai/adapters/model_adapters/openai_compatible_config.py +0 -10
- kiln_ai/adapters/model_adapters/openai_model_adapter.py +0 -289
- kiln_ai/adapters/model_adapters/test_langchain_adapter.py +0 -343
- kiln_ai/adapters/model_adapters/test_openai_model_adapter.py +0 -216
- {kiln_ai-0.12.0.dist-info → kiln_ai-0.13.2.dist-info}/WHEEL +0 -0
- {kiln_ai-0.12.0.dist-info → kiln_ai-0.13.2.dist-info}/licenses/LICENSE.txt +0 -0
|
@@ -1,216 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
from unittest.mock import Mock, patch
|
|
3
|
-
|
|
4
|
-
import pytest
|
|
5
|
-
from openai import AsyncOpenAI
|
|
6
|
-
|
|
7
|
-
from kiln_ai.adapters.ml_model_list import StructuredOutputMode
|
|
8
|
-
from kiln_ai.adapters.model_adapters.base_adapter import AdapterConfig
|
|
9
|
-
from kiln_ai.adapters.model_adapters.openai_compatible_config import (
|
|
10
|
-
OpenAICompatibleConfig,
|
|
11
|
-
)
|
|
12
|
-
from kiln_ai.adapters.model_adapters.openai_model_adapter import OpenAICompatibleAdapter
|
|
13
|
-
from kiln_ai.datamodel import Project, Task
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
@pytest.fixture
|
|
17
|
-
def mock_task(tmp_path):
|
|
18
|
-
# Create a project first since Task requires a parent
|
|
19
|
-
project_path = tmp_path / "test_project" / "project.kiln"
|
|
20
|
-
project_path.parent.mkdir()
|
|
21
|
-
|
|
22
|
-
project = Project(name="Test Project", path=str(project_path))
|
|
23
|
-
project.save_to_file()
|
|
24
|
-
|
|
25
|
-
schema = {
|
|
26
|
-
"type": "object",
|
|
27
|
-
"properties": {"test": {"type": "string"}},
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
task = Task(
|
|
31
|
-
name="Test Task",
|
|
32
|
-
instruction="Test instruction",
|
|
33
|
-
parent=project,
|
|
34
|
-
output_json_schema=json.dumps(schema),
|
|
35
|
-
)
|
|
36
|
-
task.save_to_file()
|
|
37
|
-
return task
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
@pytest.fixture
|
|
41
|
-
def config():
|
|
42
|
-
return OpenAICompatibleConfig(
|
|
43
|
-
api_key="test_key",
|
|
44
|
-
base_url="https://api.test.com",
|
|
45
|
-
model_name="test-model",
|
|
46
|
-
provider_name="openrouter",
|
|
47
|
-
default_headers={"X-Test": "test"},
|
|
48
|
-
)
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
def test_initialization(config, mock_task):
|
|
52
|
-
adapter = OpenAICompatibleAdapter(
|
|
53
|
-
config=config,
|
|
54
|
-
kiln_task=mock_task,
|
|
55
|
-
prompt_id="simple_prompt_builder",
|
|
56
|
-
base_adapter_config=AdapterConfig(default_tags=["test-tag"]),
|
|
57
|
-
)
|
|
58
|
-
|
|
59
|
-
assert isinstance(adapter.client, AsyncOpenAI)
|
|
60
|
-
assert adapter.config == config
|
|
61
|
-
assert adapter.run_config.task == mock_task
|
|
62
|
-
assert adapter.run_config.prompt_id == "simple_prompt_builder"
|
|
63
|
-
assert adapter.base_adapter_config.default_tags == ["test-tag"]
|
|
64
|
-
assert adapter.run_config.model_name == config.model_name
|
|
65
|
-
assert adapter.run_config.model_provider_name == config.provider_name
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
def test_adapter_info(config, mock_task):
|
|
69
|
-
adapter = OpenAICompatibleAdapter(config=config, kiln_task=mock_task)
|
|
70
|
-
|
|
71
|
-
assert adapter.adapter_name() == "kiln_openai_compatible_adapter"
|
|
72
|
-
|
|
73
|
-
assert adapter.run_config.model_name == config.model_name
|
|
74
|
-
assert adapter.run_config.model_provider_name == config.provider_name
|
|
75
|
-
assert adapter.run_config.prompt_id == "simple_prompt_builder"
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
@pytest.mark.asyncio
|
|
79
|
-
async def test_response_format_options_unstructured(config, mock_task):
|
|
80
|
-
adapter = OpenAICompatibleAdapter(config=config, kiln_task=mock_task)
|
|
81
|
-
|
|
82
|
-
# Mock has_structured_output to return False
|
|
83
|
-
with patch.object(adapter, "has_structured_output", return_value=False):
|
|
84
|
-
options = await adapter.response_format_options()
|
|
85
|
-
assert options == {}
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
@pytest.mark.parametrize(
|
|
89
|
-
"mode",
|
|
90
|
-
[
|
|
91
|
-
StructuredOutputMode.json_mode,
|
|
92
|
-
StructuredOutputMode.json_instruction_and_object,
|
|
93
|
-
],
|
|
94
|
-
)
|
|
95
|
-
@pytest.mark.asyncio
|
|
96
|
-
async def test_response_format_options_json_mode(config, mock_task, mode):
|
|
97
|
-
adapter = OpenAICompatibleAdapter(config=config, kiln_task=mock_task)
|
|
98
|
-
|
|
99
|
-
with (
|
|
100
|
-
patch.object(adapter, "has_structured_output", return_value=True),
|
|
101
|
-
patch.object(adapter, "model_provider") as mock_provider,
|
|
102
|
-
):
|
|
103
|
-
mock_provider.return_value.structured_output_mode = mode
|
|
104
|
-
|
|
105
|
-
options = await adapter.response_format_options()
|
|
106
|
-
assert options == {"response_format": {"type": "json_object"}}
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
@pytest.mark.parametrize(
|
|
110
|
-
"mode",
|
|
111
|
-
[
|
|
112
|
-
StructuredOutputMode.default,
|
|
113
|
-
StructuredOutputMode.function_calling,
|
|
114
|
-
],
|
|
115
|
-
)
|
|
116
|
-
@pytest.mark.asyncio
|
|
117
|
-
async def test_response_format_options_function_calling(config, mock_task, mode):
|
|
118
|
-
adapter = OpenAICompatibleAdapter(config=config, kiln_task=mock_task)
|
|
119
|
-
|
|
120
|
-
with (
|
|
121
|
-
patch.object(adapter, "has_structured_output", return_value=True),
|
|
122
|
-
patch.object(adapter, "model_provider") as mock_provider,
|
|
123
|
-
):
|
|
124
|
-
mock_provider.return_value.structured_output_mode = mode
|
|
125
|
-
|
|
126
|
-
options = await adapter.response_format_options()
|
|
127
|
-
assert "tools" in options
|
|
128
|
-
# full tool structure validated below
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
@pytest.mark.asyncio
|
|
132
|
-
async def test_response_format_options_json_instructions(config, mock_task):
|
|
133
|
-
adapter = OpenAICompatibleAdapter(config=config, kiln_task=mock_task)
|
|
134
|
-
|
|
135
|
-
with (
|
|
136
|
-
patch.object(adapter, "has_structured_output", return_value=True),
|
|
137
|
-
patch.object(adapter, "model_provider") as mock_provider,
|
|
138
|
-
):
|
|
139
|
-
mock_provider.return_value.structured_output_mode = (
|
|
140
|
-
StructuredOutputMode.json_instructions
|
|
141
|
-
)
|
|
142
|
-
options = await adapter.response_format_options()
|
|
143
|
-
assert options == {}
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
@pytest.mark.asyncio
|
|
147
|
-
async def test_response_format_options_json_schema(config, mock_task):
|
|
148
|
-
adapter = OpenAICompatibleAdapter(config=config, kiln_task=mock_task)
|
|
149
|
-
|
|
150
|
-
with (
|
|
151
|
-
patch.object(adapter, "has_structured_output", return_value=True),
|
|
152
|
-
patch.object(adapter, "model_provider") as mock_provider,
|
|
153
|
-
):
|
|
154
|
-
mock_provider.return_value.structured_output_mode = (
|
|
155
|
-
StructuredOutputMode.json_schema
|
|
156
|
-
)
|
|
157
|
-
options = await adapter.response_format_options()
|
|
158
|
-
assert options == {
|
|
159
|
-
"response_format": {
|
|
160
|
-
"type": "json_schema",
|
|
161
|
-
"json_schema": {
|
|
162
|
-
"name": "task_response",
|
|
163
|
-
"schema": mock_task.output_schema(),
|
|
164
|
-
},
|
|
165
|
-
}
|
|
166
|
-
}
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
def test_tool_call_params_weak(config, mock_task):
|
|
170
|
-
adapter = OpenAICompatibleAdapter(config=config, kiln_task=mock_task)
|
|
171
|
-
|
|
172
|
-
params = adapter.tool_call_params(strict=False)
|
|
173
|
-
expected_schema = mock_task.output_schema()
|
|
174
|
-
expected_schema["additionalProperties"] = False
|
|
175
|
-
|
|
176
|
-
assert params == {
|
|
177
|
-
"tools": [
|
|
178
|
-
{
|
|
179
|
-
"type": "function",
|
|
180
|
-
"function": {
|
|
181
|
-
"name": "task_response",
|
|
182
|
-
"parameters": expected_schema,
|
|
183
|
-
},
|
|
184
|
-
}
|
|
185
|
-
],
|
|
186
|
-
"tool_choice": {
|
|
187
|
-
"type": "function",
|
|
188
|
-
"function": {"name": "task_response"},
|
|
189
|
-
},
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
def test_tool_call_params_strict(config, mock_task):
|
|
194
|
-
config.provider_name = "openai"
|
|
195
|
-
adapter = OpenAICompatibleAdapter(config=config, kiln_task=mock_task)
|
|
196
|
-
|
|
197
|
-
params = adapter.tool_call_params(strict=True)
|
|
198
|
-
expected_schema = mock_task.output_schema()
|
|
199
|
-
expected_schema["additionalProperties"] = False
|
|
200
|
-
|
|
201
|
-
assert params == {
|
|
202
|
-
"tools": [
|
|
203
|
-
{
|
|
204
|
-
"type": "function",
|
|
205
|
-
"function": {
|
|
206
|
-
"name": "task_response",
|
|
207
|
-
"parameters": expected_schema,
|
|
208
|
-
"strict": True,
|
|
209
|
-
},
|
|
210
|
-
}
|
|
211
|
-
],
|
|
212
|
-
"tool_choice": {
|
|
213
|
-
"type": "function",
|
|
214
|
-
"function": {"name": "task_response"},
|
|
215
|
-
},
|
|
216
|
-
}
|
|
File without changes
|
|
File without changes
|