kiln-ai 0.19.0__py3-none-any.whl → 0.20.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kiln-ai might be problematic. Click here for more details.
- kiln_ai/adapters/__init__.py +2 -2
- kiln_ai/adapters/adapter_registry.py +19 -1
- kiln_ai/adapters/chat/chat_formatter.py +8 -12
- kiln_ai/adapters/chat/test_chat_formatter.py +6 -2
- kiln_ai/adapters/docker_model_runner_tools.py +119 -0
- kiln_ai/adapters/eval/base_eval.py +2 -2
- kiln_ai/adapters/eval/eval_runner.py +3 -1
- kiln_ai/adapters/eval/g_eval.py +2 -2
- kiln_ai/adapters/eval/test_base_eval.py +1 -1
- kiln_ai/adapters/eval/test_g_eval.py +3 -4
- kiln_ai/adapters/fine_tune/__init__.py +1 -1
- kiln_ai/adapters/fine_tune/openai_finetune.py +14 -4
- kiln_ai/adapters/fine_tune/test_openai_finetune.py +108 -111
- kiln_ai/adapters/ml_model_list.py +380 -34
- kiln_ai/adapters/model_adapters/base_adapter.py +51 -21
- kiln_ai/adapters/model_adapters/litellm_adapter.py +383 -79
- kiln_ai/adapters/model_adapters/test_base_adapter.py +193 -17
- kiln_ai/adapters/model_adapters/test_litellm_adapter.py +406 -1
- kiln_ai/adapters/model_adapters/test_litellm_adapter_tools.py +1103 -0
- kiln_ai/adapters/model_adapters/test_saving_adapter_results.py +5 -5
- kiln_ai/adapters/model_adapters/test_structured_output.py +110 -4
- kiln_ai/adapters/parsers/__init__.py +1 -1
- kiln_ai/adapters/provider_tools.py +15 -1
- kiln_ai/adapters/repair/test_repair_task.py +12 -9
- kiln_ai/adapters/run_output.py +3 -0
- kiln_ai/adapters/test_adapter_registry.py +80 -1
- kiln_ai/adapters/test_docker_model_runner_tools.py +305 -0
- kiln_ai/adapters/test_ml_model_list.py +39 -1
- kiln_ai/adapters/test_prompt_adaptors.py +13 -6
- kiln_ai/adapters/test_provider_tools.py +55 -0
- kiln_ai/adapters/test_remote_config.py +98 -0
- kiln_ai/datamodel/__init__.py +23 -21
- kiln_ai/datamodel/datamodel_enums.py +1 -0
- kiln_ai/datamodel/eval.py +1 -1
- kiln_ai/datamodel/external_tool_server.py +298 -0
- kiln_ai/datamodel/json_schema.py +25 -10
- kiln_ai/datamodel/project.py +8 -1
- kiln_ai/datamodel/registry.py +0 -15
- kiln_ai/datamodel/run_config.py +62 -0
- kiln_ai/datamodel/task.py +2 -77
- kiln_ai/datamodel/task_output.py +6 -1
- kiln_ai/datamodel/task_run.py +41 -0
- kiln_ai/datamodel/test_basemodel.py +3 -3
- kiln_ai/datamodel/test_example_models.py +175 -0
- kiln_ai/datamodel/test_external_tool_server.py +691 -0
- kiln_ai/datamodel/test_registry.py +8 -3
- kiln_ai/datamodel/test_task.py +15 -47
- kiln_ai/datamodel/test_tool_id.py +239 -0
- kiln_ai/datamodel/tool_id.py +83 -0
- kiln_ai/tools/__init__.py +8 -0
- kiln_ai/tools/base_tool.py +82 -0
- kiln_ai/tools/built_in_tools/__init__.py +13 -0
- kiln_ai/tools/built_in_tools/math_tools.py +124 -0
- kiln_ai/tools/built_in_tools/test_math_tools.py +204 -0
- kiln_ai/tools/mcp_server_tool.py +95 -0
- kiln_ai/tools/mcp_session_manager.py +243 -0
- kiln_ai/tools/test_base_tools.py +199 -0
- kiln_ai/tools/test_mcp_server_tool.py +457 -0
- kiln_ai/tools/test_mcp_session_manager.py +1585 -0
- kiln_ai/tools/test_tool_registry.py +473 -0
- kiln_ai/tools/tool_registry.py +64 -0
- kiln_ai/utils/config.py +22 -0
- kiln_ai/utils/open_ai_types.py +94 -0
- kiln_ai/utils/project_utils.py +17 -0
- kiln_ai/utils/test_config.py +138 -1
- kiln_ai/utils/test_open_ai_types.py +131 -0
- {kiln_ai-0.19.0.dist-info → kiln_ai-0.20.1.dist-info}/METADATA +6 -5
- {kiln_ai-0.19.0.dist-info → kiln_ai-0.20.1.dist-info}/RECORD +70 -47
- {kiln_ai-0.19.0.dist-info → kiln_ai-0.20.1.dist-info}/WHEEL +0 -0
- {kiln_ai-0.19.0.dist-info → kiln_ai-0.20.1.dist-info}/licenses/LICENSE.txt +0 -0
kiln_ai/adapters/__init__.py
CHANGED
|
@@ -52,7 +52,7 @@ def adapter_for_task(
|
|
|
52
52
|
base_url=getenv("SILICONFLOW_BASE_URL")
|
|
53
53
|
or "https://api.siliconflow.cn/v1",
|
|
54
54
|
default_headers={
|
|
55
|
-
"HTTP-Referer": "https://
|
|
55
|
+
"HTTP-Referer": "https://kiln.tech/siliconflow",
|
|
56
56
|
"X-Title": "KilnAI",
|
|
57
57
|
},
|
|
58
58
|
additional_body_options={
|
|
@@ -123,6 +123,24 @@ def adapter_for_task(
|
|
|
123
123
|
},
|
|
124
124
|
),
|
|
125
125
|
)
|
|
126
|
+
case ModelProviderName.docker_model_runner:
|
|
127
|
+
docker_base_url = (
|
|
128
|
+
Config.shared().docker_model_runner_base_url
|
|
129
|
+
or "http://localhost:12434/engines/llama.cpp"
|
|
130
|
+
)
|
|
131
|
+
return LiteLlmAdapter(
|
|
132
|
+
kiln_task=kiln_task,
|
|
133
|
+
base_adapter_config=base_adapter_config,
|
|
134
|
+
config=LiteLlmConfig(
|
|
135
|
+
run_config_properties=run_config_properties,
|
|
136
|
+
# Docker Model Runner uses OpenAI-compatible API at /v1 endpoint
|
|
137
|
+
base_url=docker_base_url + "/v1",
|
|
138
|
+
additional_body_options={
|
|
139
|
+
# LiteLLM errors without an api_key, even though Docker Model Runner doesn't require one.
|
|
140
|
+
"api_key": "DMR",
|
|
141
|
+
},
|
|
142
|
+
),
|
|
143
|
+
)
|
|
126
144
|
case ModelProviderName.fireworks_ai:
|
|
127
145
|
return LiteLlmAdapter(
|
|
128
146
|
kiln_task=kiln_task,
|
|
@@ -106,14 +106,12 @@ class TwoMessageCotLegacyFormatter(ChatFormatter):
|
|
|
106
106
|
if self._state == "awaiting_thinking":
|
|
107
107
|
if previous_output is None:
|
|
108
108
|
raise ValueError("previous_output required for thinking step")
|
|
109
|
-
msgs = [
|
|
110
|
-
ChatMessage("assistant", previous_output),
|
|
111
|
-
ChatMessage("user", COT_FINAL_ANSWER_PROMPT),
|
|
112
|
-
]
|
|
113
109
|
self._intermediate_outputs["chain_of_thought"] = previous_output
|
|
114
110
|
self._state = "awaiting_final"
|
|
115
|
-
|
|
116
|
-
|
|
111
|
+
cot_message = ChatMessage("user", COT_FINAL_ANSWER_PROMPT)
|
|
112
|
+
self._messages.append(ChatMessage("assistant", previous_output))
|
|
113
|
+
self._messages.append(cot_message)
|
|
114
|
+
return ChatTurn(messages=[cot_message], final_call=True)
|
|
117
115
|
|
|
118
116
|
if self._state == "awaiting_final":
|
|
119
117
|
if previous_output is None:
|
|
@@ -155,14 +153,12 @@ class TwoMessageCotFormatter(ChatFormatter):
|
|
|
155
153
|
if self._state == "awaiting_thinking":
|
|
156
154
|
if previous_output is None:
|
|
157
155
|
raise ValueError("previous_output required for thinking step")
|
|
158
|
-
msgs = [
|
|
159
|
-
ChatMessage("assistant", previous_output),
|
|
160
|
-
ChatMessage("user", COT_FINAL_ANSWER_PROMPT),
|
|
161
|
-
]
|
|
162
156
|
self._intermediate_outputs["chain_of_thought"] = previous_output
|
|
163
157
|
self._state = "awaiting_final"
|
|
164
|
-
self._messages.
|
|
165
|
-
|
|
158
|
+
self._messages.append(ChatMessage("assistant", previous_output))
|
|
159
|
+
cot_message = ChatMessage("user", COT_FINAL_ANSWER_PROMPT)
|
|
160
|
+
self._messages.append(cot_message)
|
|
161
|
+
return ChatTurn(messages=[cot_message], final_call=True)
|
|
166
162
|
|
|
167
163
|
if self._state == "awaiting_final":
|
|
168
164
|
if previous_output is None:
|
|
@@ -46,12 +46,14 @@ def test_chat_formatter_final_and_intermediate():
|
|
|
46
46
|
)
|
|
47
47
|
|
|
48
48
|
first = formatter.next_turn()
|
|
49
|
+
assert first is not None
|
|
49
50
|
assert [m.__dict__ for m in first.messages] == expected[:3]
|
|
50
51
|
assert not first.final_call
|
|
51
52
|
assert formatter.intermediate_outputs() == {}
|
|
52
53
|
|
|
53
54
|
second = formatter.next_turn("thinking output")
|
|
54
|
-
assert
|
|
55
|
+
assert second is not None
|
|
56
|
+
assert [m.__dict__ for m in second.messages] == expected[4:5]
|
|
55
57
|
assert second.final_call
|
|
56
58
|
assert formatter.intermediate_outputs() == {"chain_of_thought": "thinking output"}
|
|
57
59
|
|
|
@@ -78,12 +80,14 @@ def test_chat_formatter_two_message_cot():
|
|
|
78
80
|
)
|
|
79
81
|
|
|
80
82
|
first = formatter.next_turn()
|
|
83
|
+
assert first is not None
|
|
81
84
|
assert [m.__dict__ for m in first.messages] == expected[:2]
|
|
82
85
|
assert not first.final_call
|
|
83
86
|
assert formatter.intermediate_outputs() == {}
|
|
84
87
|
|
|
85
88
|
second = formatter.next_turn("thinking output")
|
|
86
|
-
assert
|
|
89
|
+
assert second is not None
|
|
90
|
+
assert [m.__dict__ for m in second.messages] == expected[3:4]
|
|
87
91
|
assert second.final_call
|
|
88
92
|
assert formatter.intermediate_outputs() == {"chain_of_thought": "thinking output"}
|
|
89
93
|
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
import openai
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
from kiln_ai.adapters.ml_model_list import ModelProviderName, built_in_models
|
|
8
|
+
from kiln_ai.utils.config import Config
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def docker_model_runner_base_url() -> str:
|
|
12
|
+
"""
|
|
13
|
+
Gets the base URL for Docker Model Runner API connections.
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
The base URL to use for Docker Model Runner API calls, using environment variable if set
|
|
17
|
+
or falling back to localhost default
|
|
18
|
+
"""
|
|
19
|
+
config_base_url = Config.shared().docker_model_runner_base_url
|
|
20
|
+
if config_base_url:
|
|
21
|
+
return config_base_url
|
|
22
|
+
return "http://localhost:12434/engines/llama.cpp"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
async def docker_model_runner_online() -> bool:
|
|
26
|
+
"""
|
|
27
|
+
Checks if the Docker Model Runner service is available and responding.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
True if Docker Model Runner is available and responding, False otherwise
|
|
31
|
+
"""
|
|
32
|
+
try:
|
|
33
|
+
base_url = docker_model_runner_base_url()
|
|
34
|
+
# Docker Model Runner uses OpenAI-compatible endpoints
|
|
35
|
+
async with httpx.AsyncClient() as client:
|
|
36
|
+
response = await client.get(f"{base_url}/v1/models", timeout=5.0)
|
|
37
|
+
response.raise_for_status()
|
|
38
|
+
except httpx.RequestError:
|
|
39
|
+
return False
|
|
40
|
+
return True
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class DockerModelRunnerConnection(BaseModel):
|
|
44
|
+
message: str
|
|
45
|
+
version: str | None = None
|
|
46
|
+
supported_models: List[str]
|
|
47
|
+
untested_models: List[str] = Field(default_factory=list)
|
|
48
|
+
|
|
49
|
+
def all_models(self) -> List[str]:
|
|
50
|
+
return self.supported_models + self.untested_models
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# Parse the Docker Model Runner /v1/models response
|
|
54
|
+
def parse_docker_model_runner_models(
|
|
55
|
+
models: List[openai.types.Model],
|
|
56
|
+
) -> DockerModelRunnerConnection | None:
|
|
57
|
+
# Build a list of models we support for Docker Model Runner from the built-in model list
|
|
58
|
+
supported_docker_models = [
|
|
59
|
+
provider.model_id
|
|
60
|
+
for model in built_in_models
|
|
61
|
+
for provider in model.providers
|
|
62
|
+
if provider.name == ModelProviderName.docker_model_runner
|
|
63
|
+
]
|
|
64
|
+
# Note: Docker Model Runner aliases will be added when we configure models
|
|
65
|
+
|
|
66
|
+
model_names = [model.id for model in models]
|
|
67
|
+
available_supported_models = []
|
|
68
|
+
untested_models = []
|
|
69
|
+
|
|
70
|
+
for model_name in model_names:
|
|
71
|
+
if model_name in supported_docker_models:
|
|
72
|
+
available_supported_models.append(model_name)
|
|
73
|
+
else:
|
|
74
|
+
untested_models.append(model_name)
|
|
75
|
+
|
|
76
|
+
if available_supported_models or untested_models:
|
|
77
|
+
return DockerModelRunnerConnection(
|
|
78
|
+
message="Docker Model Runner connected",
|
|
79
|
+
supported_models=available_supported_models,
|
|
80
|
+
untested_models=untested_models,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
return DockerModelRunnerConnection(
|
|
84
|
+
message="Docker Model Runner is running, but no supported models are available. Ensure models like 'ai/llama3.2:3B-Q4_K_M', 'ai/qwen3:8B-Q4_K_M', or 'ai/gemma3n:4B-Q4_K_M' are loaded.",
|
|
85
|
+
supported_models=[],
|
|
86
|
+
untested_models=[],
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
async def get_docker_model_runner_connection(
|
|
91
|
+
custom_url: str | None = None,
|
|
92
|
+
) -> DockerModelRunnerConnection | None:
|
|
93
|
+
"""
|
|
94
|
+
Gets the connection status for Docker Model Runner.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
custom_url: Optional custom URL to use instead of the configured one
|
|
98
|
+
"""
|
|
99
|
+
try:
|
|
100
|
+
base_url = custom_url or docker_model_runner_base_url()
|
|
101
|
+
# Use OpenAI client to get models list
|
|
102
|
+
client = openai.OpenAI(
|
|
103
|
+
api_key="dummy", # Docker Model Runner doesn't require API key
|
|
104
|
+
base_url=f"{base_url}/v1",
|
|
105
|
+
max_retries=0,
|
|
106
|
+
)
|
|
107
|
+
models_response = client.models.list()
|
|
108
|
+
|
|
109
|
+
except (openai.APIConnectionError, openai.APIError, httpx.RequestError):
|
|
110
|
+
return None
|
|
111
|
+
|
|
112
|
+
return parse_docker_model_runner_models(list(models_response))
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def docker_model_runner_model_installed(
|
|
116
|
+
conn: DockerModelRunnerConnection, model_name: str
|
|
117
|
+
) -> bool:
|
|
118
|
+
all_models = conn.all_models()
|
|
119
|
+
return model_name in all_models
|
|
@@ -7,7 +7,7 @@ from kiln_ai.adapters.ml_model_list import ModelProviderName
|
|
|
7
7
|
from kiln_ai.adapters.model_adapters.base_adapter import AdapterConfig
|
|
8
8
|
from kiln_ai.datamodel.eval import Eval, EvalConfig, EvalScores
|
|
9
9
|
from kiln_ai.datamodel.json_schema import validate_schema_with_value_error
|
|
10
|
-
from kiln_ai.datamodel.task import
|
|
10
|
+
from kiln_ai.datamodel.task import RunConfigProperties, TaskOutputRatingType, TaskRun
|
|
11
11
|
from kiln_ai.utils.exhaustive_error import raise_exhaustive_enum_error
|
|
12
12
|
|
|
13
13
|
|
|
@@ -18,7 +18,7 @@ class BaseEval:
|
|
|
18
18
|
Should be subclassed, and the run_eval method implemented.
|
|
19
19
|
"""
|
|
20
20
|
|
|
21
|
-
def __init__(self, eval_config: EvalConfig, run_config:
|
|
21
|
+
def __init__(self, eval_config: EvalConfig, run_config: RunConfigProperties | None):
|
|
22
22
|
self.eval_config = eval_config
|
|
23
23
|
eval = eval_config.parent_eval()
|
|
24
24
|
if not eval:
|
|
@@ -169,7 +169,9 @@ class EvalRunner:
|
|
|
169
169
|
# Create the evaluator for this eval config/run config pair
|
|
170
170
|
evaluator = eval_adapter_from_type(job.eval_config.config_type)(
|
|
171
171
|
job.eval_config,
|
|
172
|
-
job.task_run_config.
|
|
172
|
+
job.task_run_config.run_config_properties
|
|
173
|
+
if job.task_run_config
|
|
174
|
+
else None,
|
|
173
175
|
)
|
|
174
176
|
if not isinstance(evaluator, BaseEval):
|
|
175
177
|
raise ValueError("Not able to create evaluator from eval config")
|
kiln_ai/adapters/eval/g_eval.py
CHANGED
|
@@ -12,7 +12,7 @@ from kiln_ai.adapters.model_adapters.base_adapter import AdapterConfig, RunOutpu
|
|
|
12
12
|
from kiln_ai.adapters.prompt_builders import PromptGenerators
|
|
13
13
|
from kiln_ai.datamodel import Project, Task, TaskRun
|
|
14
14
|
from kiln_ai.datamodel.eval import EvalConfig, EvalConfigType, EvalScores
|
|
15
|
-
from kiln_ai.datamodel.task import
|
|
15
|
+
from kiln_ai.datamodel.task import RunConfigProperties, StructuredOutputMode
|
|
16
16
|
|
|
17
17
|
# all the tokens we score for, and their float scores.
|
|
18
18
|
TOKEN_TO_SCORE_MAP: Dict[str, float] = {
|
|
@@ -89,7 +89,7 @@ class GEval(BaseEval):
|
|
|
89
89
|
}
|
|
90
90
|
"""
|
|
91
91
|
|
|
92
|
-
def __init__(self, eval_config: EvalConfig, run_config:
|
|
92
|
+
def __init__(self, eval_config: EvalConfig, run_config: RunConfigProperties | None):
|
|
93
93
|
if (
|
|
94
94
|
eval_config.config_type != EvalConfigType.g_eval
|
|
95
95
|
and eval_config.config_type != EvalConfigType.llm_as_judge
|
|
@@ -380,7 +380,7 @@ async def test_run_task_and_eval():
|
|
|
380
380
|
async def run_eval(self, task_run):
|
|
381
381
|
return {"overall_rating": 5, "quality": 4}, {"thinking": "test thinking"}
|
|
382
382
|
|
|
383
|
-
evaluator = MockEval(eval_config, run_config.
|
|
383
|
+
evaluator = MockEval(eval_config, run_config.run_config_properties)
|
|
384
384
|
|
|
385
385
|
# Mock dependencies
|
|
386
386
|
mock_adapter = AsyncMock()
|
|
@@ -19,7 +19,7 @@ from kiln_ai.datamodel import (
|
|
|
19
19
|
TaskRun,
|
|
20
20
|
)
|
|
21
21
|
from kiln_ai.datamodel.eval import Eval, EvalConfig, EvalConfigType, EvalOutputScore
|
|
22
|
-
from kiln_ai.datamodel.task import
|
|
22
|
+
from kiln_ai.datamodel.task import RunConfigProperties
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
@pytest.fixture
|
|
@@ -93,11 +93,10 @@ def test_eval_config(test_task):
|
|
|
93
93
|
|
|
94
94
|
|
|
95
95
|
@pytest.fixture
|
|
96
|
-
def test_run_config(
|
|
97
|
-
return
|
|
96
|
+
def test_run_config():
|
|
97
|
+
return RunConfigProperties(
|
|
98
98
|
model_name="llama_3_1_8b",
|
|
99
99
|
model_provider_name="groq",
|
|
100
|
-
task=test_task,
|
|
101
100
|
prompt_id="simple_prompt_builder",
|
|
102
101
|
structured_output_mode="json_schema",
|
|
103
102
|
)
|
|
@@ -13,9 +13,16 @@ from kiln_ai.adapters.fine_tune.dataset_formatter import DatasetFormat, DatasetF
|
|
|
13
13
|
from kiln_ai.datamodel import DatasetSplit, StructuredOutputMode, Task
|
|
14
14
|
from kiln_ai.utils.config import Config
|
|
15
15
|
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
)
|
|
16
|
+
|
|
17
|
+
def _get_openai_client():
|
|
18
|
+
key = Config.shared().open_ai_api_key
|
|
19
|
+
if not key:
|
|
20
|
+
raise RuntimeError(
|
|
21
|
+
"OpenAI API key not set. You must connect OpenAI in settings."
|
|
22
|
+
)
|
|
23
|
+
return openai.AsyncOpenAI(
|
|
24
|
+
api_key=key,
|
|
25
|
+
)
|
|
19
26
|
|
|
20
27
|
|
|
21
28
|
class OpenAIFinetune(BaseFinetuneAdapter):
|
|
@@ -45,6 +52,7 @@ class OpenAIFinetune(BaseFinetuneAdapter):
|
|
|
45
52
|
|
|
46
53
|
try:
|
|
47
54
|
# Will raise an error if the job is not found, or for other issues
|
|
55
|
+
oai_client = _get_openai_client()
|
|
48
56
|
response = await oai_client.fine_tuning.jobs.retrieve(
|
|
49
57
|
self.datamodel.provider_id
|
|
50
58
|
)
|
|
@@ -79,7 +87,7 @@ class OpenAIFinetune(BaseFinetuneAdapter):
|
|
|
79
87
|
)
|
|
80
88
|
return FineTuneStatus(
|
|
81
89
|
status=FineTuneStatusType.unknown,
|
|
82
|
-
message=f"Unknown error: [{
|
|
90
|
+
message=f"Unknown error: [{e!s}]",
|
|
83
91
|
)
|
|
84
92
|
|
|
85
93
|
if not response or not isinstance(response, FineTuningJob):
|
|
@@ -145,6 +153,7 @@ class OpenAIFinetune(BaseFinetuneAdapter):
|
|
|
145
153
|
if k in ["n_epochs", "learning_rate_multiplier", "batch_size"]
|
|
146
154
|
}
|
|
147
155
|
|
|
156
|
+
oai_client = _get_openai_client()
|
|
148
157
|
ft = await oai_client.fine_tuning.jobs.create(
|
|
149
158
|
training_file=train_file_id,
|
|
150
159
|
model=self.datamodel.base_model_id,
|
|
@@ -168,6 +177,7 @@ class OpenAIFinetune(BaseFinetuneAdapter):
|
|
|
168
177
|
)
|
|
169
178
|
path = formatter.dump_to_file(split_name, format, self.datamodel.data_strategy)
|
|
170
179
|
|
|
180
|
+
oai_client = _get_openai_client()
|
|
171
181
|
response = await oai_client.files.create(
|
|
172
182
|
file=open(path, "rb"),
|
|
173
183
|
purpose="fine-tune",
|