kiln-ai 0.18.0__py3-none-any.whl → 0.20.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kiln-ai might be problematic. Click here for more details.

Files changed (89) hide show
  1. kiln_ai/adapters/__init__.py +2 -2
  2. kiln_ai/adapters/adapter_registry.py +46 -0
  3. kiln_ai/adapters/chat/chat_formatter.py +8 -12
  4. kiln_ai/adapters/chat/test_chat_formatter.py +6 -2
  5. kiln_ai/adapters/data_gen/data_gen_task.py +2 -2
  6. kiln_ai/adapters/data_gen/test_data_gen_task.py +7 -3
  7. kiln_ai/adapters/docker_model_runner_tools.py +119 -0
  8. kiln_ai/adapters/eval/base_eval.py +2 -2
  9. kiln_ai/adapters/eval/eval_runner.py +3 -1
  10. kiln_ai/adapters/eval/g_eval.py +2 -2
  11. kiln_ai/adapters/eval/test_base_eval.py +1 -1
  12. kiln_ai/adapters/eval/test_eval_runner.py +6 -12
  13. kiln_ai/adapters/eval/test_g_eval.py +3 -4
  14. kiln_ai/adapters/eval/test_g_eval_data.py +1 -1
  15. kiln_ai/adapters/fine_tune/__init__.py +1 -1
  16. kiln_ai/adapters/fine_tune/base_finetune.py +1 -0
  17. kiln_ai/adapters/fine_tune/fireworks_finetune.py +32 -20
  18. kiln_ai/adapters/fine_tune/openai_finetune.py +14 -4
  19. kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py +30 -21
  20. kiln_ai/adapters/fine_tune/test_openai_finetune.py +108 -111
  21. kiln_ai/adapters/ml_model_list.py +1009 -111
  22. kiln_ai/adapters/model_adapters/base_adapter.py +62 -28
  23. kiln_ai/adapters/model_adapters/litellm_adapter.py +397 -80
  24. kiln_ai/adapters/model_adapters/test_base_adapter.py +194 -18
  25. kiln_ai/adapters/model_adapters/test_litellm_adapter.py +428 -4
  26. kiln_ai/adapters/model_adapters/test_litellm_adapter_tools.py +1103 -0
  27. kiln_ai/adapters/model_adapters/test_saving_adapter_results.py +5 -5
  28. kiln_ai/adapters/model_adapters/test_structured_output.py +120 -14
  29. kiln_ai/adapters/parsers/__init__.py +1 -1
  30. kiln_ai/adapters/parsers/test_r1_parser.py +1 -1
  31. kiln_ai/adapters/provider_tools.py +35 -20
  32. kiln_ai/adapters/remote_config.py +57 -10
  33. kiln_ai/adapters/repair/repair_task.py +1 -1
  34. kiln_ai/adapters/repair/test_repair_task.py +12 -9
  35. kiln_ai/adapters/run_output.py +3 -0
  36. kiln_ai/adapters/test_adapter_registry.py +109 -2
  37. kiln_ai/adapters/test_docker_model_runner_tools.py +305 -0
  38. kiln_ai/adapters/test_ml_model_list.py +51 -1
  39. kiln_ai/adapters/test_prompt_adaptors.py +13 -6
  40. kiln_ai/adapters/test_provider_tools.py +73 -12
  41. kiln_ai/adapters/test_remote_config.py +470 -16
  42. kiln_ai/datamodel/__init__.py +23 -21
  43. kiln_ai/datamodel/basemodel.py +54 -28
  44. kiln_ai/datamodel/datamodel_enums.py +3 -0
  45. kiln_ai/datamodel/dataset_split.py +5 -3
  46. kiln_ai/datamodel/eval.py +4 -4
  47. kiln_ai/datamodel/external_tool_server.py +298 -0
  48. kiln_ai/datamodel/finetune.py +2 -2
  49. kiln_ai/datamodel/json_schema.py +25 -10
  50. kiln_ai/datamodel/project.py +11 -4
  51. kiln_ai/datamodel/prompt.py +2 -2
  52. kiln_ai/datamodel/prompt_id.py +4 -4
  53. kiln_ai/datamodel/registry.py +0 -15
  54. kiln_ai/datamodel/run_config.py +62 -0
  55. kiln_ai/datamodel/task.py +8 -83
  56. kiln_ai/datamodel/task_output.py +7 -2
  57. kiln_ai/datamodel/task_run.py +41 -0
  58. kiln_ai/datamodel/test_basemodel.py +213 -21
  59. kiln_ai/datamodel/test_eval_model.py +6 -6
  60. kiln_ai/datamodel/test_example_models.py +175 -0
  61. kiln_ai/datamodel/test_external_tool_server.py +691 -0
  62. kiln_ai/datamodel/test_model_perf.py +1 -1
  63. kiln_ai/datamodel/test_prompt_id.py +5 -1
  64. kiln_ai/datamodel/test_registry.py +8 -3
  65. kiln_ai/datamodel/test_task.py +20 -47
  66. kiln_ai/datamodel/test_tool_id.py +239 -0
  67. kiln_ai/datamodel/tool_id.py +83 -0
  68. kiln_ai/tools/__init__.py +8 -0
  69. kiln_ai/tools/base_tool.py +82 -0
  70. kiln_ai/tools/built_in_tools/__init__.py +13 -0
  71. kiln_ai/tools/built_in_tools/math_tools.py +124 -0
  72. kiln_ai/tools/built_in_tools/test_math_tools.py +204 -0
  73. kiln_ai/tools/mcp_server_tool.py +95 -0
  74. kiln_ai/tools/mcp_session_manager.py +243 -0
  75. kiln_ai/tools/test_base_tools.py +199 -0
  76. kiln_ai/tools/test_mcp_server_tool.py +457 -0
  77. kiln_ai/tools/test_mcp_session_manager.py +1585 -0
  78. kiln_ai/tools/test_tool_registry.py +473 -0
  79. kiln_ai/tools/tool_registry.py +64 -0
  80. kiln_ai/utils/config.py +32 -0
  81. kiln_ai/utils/open_ai_types.py +94 -0
  82. kiln_ai/utils/project_utils.py +17 -0
  83. kiln_ai/utils/test_config.py +138 -1
  84. kiln_ai/utils/test_open_ai_types.py +131 -0
  85. {kiln_ai-0.18.0.dist-info → kiln_ai-0.20.1.dist-info}/METADATA +37 -6
  86. kiln_ai-0.20.1.dist-info/RECORD +138 -0
  87. kiln_ai-0.18.0.dist-info/RECORD +0 -115
  88. {kiln_ai-0.18.0.dist-info → kiln_ai-0.20.1.dist-info}/WHEEL +0 -0
  89. {kiln_ai-0.18.0.dist-info → kiln_ai-0.20.1.dist-info}/licenses/LICENSE.txt +0 -0
@@ -21,6 +21,25 @@ from kiln_ai.datamodel.dataset_split import Train80Test20SplitDefinition
21
21
  from kiln_ai.utils.config import Config
22
22
 
23
23
 
24
+ @pytest.fixture
25
+ def mock_openai_client():
26
+ """Mock the OpenAI client returned by _get_openai_client()"""
27
+ from unittest.mock import AsyncMock
28
+
29
+ with patch(
30
+ "kiln_ai.adapters.fine_tune.openai_finetune._get_openai_client"
31
+ ) as mock_get_client:
32
+ mock_client = MagicMock()
33
+
34
+ # Use AsyncMock for async methods
35
+ mock_client.fine_tuning.jobs.retrieve = AsyncMock()
36
+ mock_client.fine_tuning.jobs.create = AsyncMock()
37
+ mock_client.files.create = AsyncMock()
38
+
39
+ mock_get_client.return_value = mock_client
40
+ yield mock_client
41
+
42
+
24
43
  @pytest.fixture
25
44
  def openai_finetune(tmp_path):
26
45
  tmp_file = tmp_path / "test-finetune.kiln"
@@ -122,15 +141,12 @@ async def test_setup(openai_finetune):
122
141
  ],
123
142
  )
124
143
  async def test_status_api_errors(
125
- openai_finetune, exception, expected_status, expected_message
144
+ openai_finetune, mock_openai_client, exception, expected_status, expected_message
126
145
  ):
127
- with patch(
128
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.fine_tuning.jobs.retrieve",
129
- side_effect=exception,
130
- ):
131
- status = await openai_finetune.status()
132
- assert status.status == expected_status
133
- assert expected_message in status.message
146
+ mock_openai_client.fine_tuning.jobs.retrieve.side_effect = exception
147
+ status = await openai_finetune.status()
148
+ assert status.status == expected_status
149
+ assert expected_message in status.message
134
150
 
135
151
 
136
152
  @pytest.mark.parametrize(
@@ -151,63 +167,57 @@ async def test_status_api_errors(
151
167
  )
152
168
  async def test_status_job_states(
153
169
  openai_finetune,
170
+ mock_openai_client,
154
171
  mock_response,
155
172
  job_status,
156
173
  expected_status,
157
174
  message_contains,
158
175
  ):
159
176
  mock_response.status = job_status
177
+ mock_openai_client.fine_tuning.jobs.retrieve.return_value = mock_response
160
178
 
161
- with patch(
162
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.fine_tuning.jobs.retrieve",
163
- return_value=mock_response,
164
- ):
165
- status = await openai_finetune.status()
166
- assert status.status == expected_status
167
- assert message_contains in status.message
179
+ status = await openai_finetune.status()
180
+ assert status.status == expected_status
181
+ assert message_contains in status.message
168
182
 
169
183
 
170
- async def test_status_with_error_response(openai_finetune, mock_response):
184
+ async def test_status_with_error_response(
185
+ openai_finetune, mock_openai_client, mock_response
186
+ ):
171
187
  mock_response.error = MagicMock()
172
188
  mock_response.error.message = "Something went wrong"
189
+ mock_openai_client.fine_tuning.jobs.retrieve.return_value = mock_response
173
190
 
174
- with patch(
175
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.fine_tuning.jobs.retrieve",
176
- return_value=mock_response,
177
- ):
178
- status = await openai_finetune.status()
179
- assert status.status == FineTuneStatusType.failed
180
- assert status.message.startswith("Something went wrong [Code:")
191
+ status = await openai_finetune.status()
192
+ assert status.status == FineTuneStatusType.failed
193
+ assert status.message.startswith("Something went wrong [Code:")
181
194
 
182
195
 
183
- async def test_status_with_estimated_finish_time(openai_finetune, mock_response):
196
+ async def test_status_with_estimated_finish_time(
197
+ openai_finetune, mock_openai_client, mock_response
198
+ ):
184
199
  current_time = time.time()
185
200
  mock_response.status = "running"
186
201
  mock_response.estimated_finish = current_time + 300 # 5 minutes from now
202
+ mock_openai_client.fine_tuning.jobs.retrieve.return_value = mock_response
187
203
 
188
- with patch(
189
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.fine_tuning.jobs.retrieve",
190
- return_value=mock_response,
191
- ):
192
- status = await openai_finetune.status()
193
- assert status.status == FineTuneStatusType.running
194
- assert (
195
- "Estimated finish time: 299 seconds" in status.message
196
- ) # non zero time passes
204
+ status = await openai_finetune.status()
205
+ assert status.status == FineTuneStatusType.running
206
+ assert (
207
+ "Estimated finish time: 299 seconds" in status.message
208
+ ) # non zero time passes
197
209
 
198
210
 
199
- async def test_status_empty_response(openai_finetune):
200
- with patch(
201
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.fine_tuning.jobs.retrieve",
202
- return_value=mock_response,
203
- ):
204
- status = await openai_finetune.status()
205
- assert status.status == FineTuneStatusType.unknown
206
- assert "Invalid response from OpenAI" in status.message
211
+ async def test_status_empty_response(openai_finetune, mock_openai_client):
212
+ mock_openai_client.fine_tuning.jobs.retrieve.return_value = None
213
+
214
+ status = await openai_finetune.status()
215
+ assert status.status == FineTuneStatusType.unknown
216
+ assert "Invalid response from OpenAI" in status.message
207
217
 
208
218
 
209
219
  async def test_generate_and_upload_jsonl_success(
210
- openai_finetune, mock_dataset, mock_task
220
+ openai_finetune, mock_openai_client, mock_dataset, mock_task
211
221
  ):
212
222
  mock_path = Path("mock_path.jsonl")
213
223
  mock_file_id = "file-123"
@@ -219,16 +229,13 @@ async def test_generate_and_upload_jsonl_success(
219
229
  # Mock the file response
220
230
  mock_file_response = MagicMock()
221
231
  mock_file_response.id = mock_file_id
232
+ mock_openai_client.files.create.return_value = mock_file_response
222
233
 
223
234
  with (
224
235
  patch(
225
236
  "kiln_ai.adapters.fine_tune.openai_finetune.DatasetFormatter",
226
237
  return_value=mock_formatter,
227
238
  ) as mock_formatter_class,
228
- patch(
229
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.files.create",
230
- return_value=mock_file_response,
231
- ) as mock_create,
232
239
  patch("builtins.open") as mock_open,
233
240
  ):
234
241
  result = await openai_finetune.generate_and_upload_jsonl(
@@ -252,13 +259,13 @@ async def test_generate_and_upload_jsonl_success(
252
259
 
253
260
  # Verify file was opened and uploaded
254
261
  mock_open.assert_called_once_with(mock_path, "rb")
255
- mock_create.assert_called_once()
262
+ mock_openai_client.files.create.assert_called_once()
256
263
 
257
264
  assert result == mock_file_id
258
265
 
259
266
 
260
267
  async def test_generate_and_upload_jsonl_schema_success(
261
- openai_finetune, mock_dataset, mock_task
268
+ openai_finetune, mock_openai_client, mock_dataset, mock_task
262
269
  ):
263
270
  mock_path = Path("mock_path.jsonl")
264
271
  mock_file_id = "file-123"
@@ -271,16 +278,13 @@ async def test_generate_and_upload_jsonl_schema_success(
271
278
  # Mock the file response
272
279
  mock_file_response = MagicMock()
273
280
  mock_file_response.id = mock_file_id
281
+ mock_openai_client.files.create.return_value = mock_file_response
274
282
 
275
283
  with (
276
284
  patch(
277
285
  "kiln_ai.adapters.fine_tune.openai_finetune.DatasetFormatter",
278
286
  return_value=mock_formatter,
279
287
  ) as mock_formatter_class,
280
- patch(
281
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.files.create",
282
- return_value=mock_file_response,
283
- ) as mock_create,
284
288
  patch("builtins.open") as mock_open,
285
289
  ):
286
290
  result = await openai_finetune.generate_and_upload_jsonl(
@@ -304,13 +308,13 @@ async def test_generate_and_upload_jsonl_schema_success(
304
308
 
305
309
  # Verify file was opened and uploaded
306
310
  mock_open.assert_called_once_with(mock_path, "rb")
307
- mock_create.assert_called_once()
311
+ mock_openai_client.files.create.assert_called_once()
308
312
 
309
313
  assert result == mock_file_id
310
314
 
311
315
 
312
316
  async def test_generate_and_upload_jsonl_upload_failure(
313
- openai_finetune, mock_dataset, mock_task
317
+ openai_finetune, mock_openai_client, mock_dataset, mock_task
314
318
  ):
315
319
  mock_path = Path("mock_path.jsonl")
316
320
 
@@ -320,16 +324,13 @@ async def test_generate_and_upload_jsonl_upload_failure(
320
324
  # Mock response with no ID
321
325
  mock_file_response = MagicMock()
322
326
  mock_file_response.id = None
327
+ mock_openai_client.files.create.return_value = mock_file_response
323
328
 
324
329
  with (
325
330
  patch(
326
331
  "kiln_ai.adapters.fine_tune.openai_finetune.DatasetFormatter",
327
332
  return_value=mock_formatter,
328
333
  ),
329
- patch(
330
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.files.create",
331
- return_value=mock_file_response,
332
- ),
333
334
  patch("builtins.open"),
334
335
  ):
335
336
  with pytest.raises(ValueError, match="Failed to upload file to OpenAI"):
@@ -339,24 +340,21 @@ async def test_generate_and_upload_jsonl_upload_failure(
339
340
 
340
341
 
341
342
  async def test_generate_and_upload_jsonl_api_error(
342
- openai_finetune, mock_dataset, mock_task
343
+ openai_finetune, mock_openai_client, mock_dataset, mock_task
343
344
  ):
344
345
  mock_path = Path("mock_path.jsonl")
345
346
 
346
347
  mock_formatter = MagicMock(spec=DatasetFormatter)
347
348
  mock_formatter.dump_to_file.return_value = mock_path
349
+ mock_openai_client.files.create.side_effect = openai.APIError(
350
+ message="API error", request=MagicMock(), body={}
351
+ )
348
352
 
349
353
  with (
350
354
  patch(
351
355
  "kiln_ai.adapters.fine_tune.openai_finetune.DatasetFormatter",
352
356
  return_value=mock_formatter,
353
357
  ),
354
- patch(
355
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.files.create",
356
- side_effect=openai.APIError(
357
- message="API error", request=MagicMock(), body={}
358
- ),
359
- ),
360
358
  patch("builtins.open"),
361
359
  ):
362
360
  with pytest.raises(openai.APIError):
@@ -378,6 +376,7 @@ async def test_generate_and_upload_jsonl_api_error(
378
376
  )
379
377
  async def test_start_success(
380
378
  openai_finetune,
379
+ mock_openai_client,
381
380
  mock_dataset,
382
381
  mock_task,
383
382
  output_schema,
@@ -401,6 +400,7 @@ async def test_start_success(
401
400
  mock_ft_response.id = "ft-123"
402
401
  mock_ft_response.fine_tuned_model = None
403
402
  mock_ft_response.model = "gpt-4o-mini-2024-07-18"
403
+ mock_openai_client.fine_tuning.jobs.create.return_value = mock_ft_response
404
404
 
405
405
  with (
406
406
  patch.object(
@@ -408,10 +408,6 @@ async def test_start_success(
408
408
  "generate_and_upload_jsonl",
409
409
  side_effect=["train-file-123", "val-file-123"],
410
410
  ) as mock_upload,
411
- patch(
412
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.fine_tuning.jobs.create",
413
- return_value=mock_ft_response,
414
- ) as mock_create,
415
411
  ):
416
412
  await openai_finetune._start(mock_dataset)
417
413
 
@@ -425,7 +421,7 @@ async def test_start_success(
425
421
  )
426
422
 
427
423
  # Verify fine-tune creation
428
- mock_create.assert_called_once_with(
424
+ mock_openai_client.fine_tuning.jobs.create.assert_called_once_with(
429
425
  training_file="train-file-123",
430
426
  model="gpt-4o",
431
427
  validation_file=None,
@@ -444,7 +440,9 @@ async def test_start_success(
444
440
  assert openai_finetune.datamodel.structured_output_mode == expected_mode
445
441
 
446
442
 
447
- async def test_start_with_validation(openai_finetune, mock_dataset, mock_task):
443
+ async def test_start_with_validation(
444
+ openai_finetune, mock_openai_client, mock_dataset, mock_task
445
+ ):
448
446
  openai_finetune.datamodel.parent = mock_task
449
447
  openai_finetune.datamodel.validation_split_name = "validation"
450
448
 
@@ -452,6 +450,7 @@ async def test_start_with_validation(openai_finetune, mock_dataset, mock_task):
452
450
  mock_ft_response.id = "ft-123"
453
451
  mock_ft_response.fine_tuned_model = None
454
452
  mock_ft_response.model = "gpt-4o-mini-2024-07-18"
453
+ mock_openai_client.fine_tuning.jobs.create.return_value = mock_ft_response
455
454
 
456
455
  with (
457
456
  patch.object(
@@ -459,10 +458,6 @@ async def test_start_with_validation(openai_finetune, mock_dataset, mock_task):
459
458
  "generate_and_upload_jsonl",
460
459
  side_effect=["train-file-123", "val-file-123"],
461
460
  ) as mock_upload,
462
- patch(
463
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.fine_tuning.jobs.create",
464
- return_value=mock_ft_response,
465
- ) as mock_create,
466
461
  ):
467
462
  await openai_finetune._start(mock_dataset)
468
463
 
@@ -486,8 +481,11 @@ async def test_start_with_validation(openai_finetune, mock_dataset, mock_task):
486
481
  )
487
482
 
488
483
  # Verify validation file was included
489
- mock_create.assert_called_once()
490
- assert mock_create.call_args[1]["validation_file"] == "val-file-123"
484
+ mock_openai_client.fine_tuning.jobs.create.assert_called_once()
485
+ assert (
486
+ mock_openai_client.fine_tuning.jobs.create.call_args[1]["validation_file"]
487
+ == "val-file-123"
488
+ )
491
489
 
492
490
 
493
491
  async def test_start_no_task(openai_finetune, mock_dataset):
@@ -498,7 +496,9 @@ async def test_start_no_task(openai_finetune, mock_dataset):
498
496
  await openai_finetune._start(mock_dataset)
499
497
 
500
498
 
501
- async def test_status_updates_model_ids(openai_finetune, mock_response):
499
+ async def test_status_updates_model_ids(
500
+ openai_finetune, mock_openai_client, mock_response
501
+ ):
502
502
  # Set up initial model IDs
503
503
  openai_finetune.datamodel.fine_tune_model_id = "old-ft-model"
504
504
  openai_finetune.datamodel.base_model_id = "old-base-model"
@@ -507,49 +507,41 @@ async def test_status_updates_model_ids(openai_finetune, mock_response):
507
507
  mock_response.fine_tuned_model = "new-ft-model"
508
508
  mock_response.model = "new-base-model"
509
509
  mock_response.status = "succeeded"
510
+ mock_openai_client.fine_tuning.jobs.retrieve.return_value = mock_response
510
511
 
511
- with (
512
- patch(
513
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.fine_tuning.jobs.retrieve",
514
- return_value=mock_response,
515
- ),
516
- ):
517
- status = await openai_finetune.status()
512
+ status = await openai_finetune.status()
518
513
 
519
- # Verify model IDs were updated
520
- assert openai_finetune.datamodel.fine_tune_model_id == "new-ft-model"
521
- assert openai_finetune.datamodel.base_model_id == "new-base-model"
514
+ # Verify model IDs were updated
515
+ assert openai_finetune.datamodel.fine_tune_model_id == "new-ft-model"
516
+ assert openai_finetune.datamodel.base_model_id == "new-base-model"
522
517
 
523
- # Verify save was called
524
- # This isn't properly mocked, so not checking
525
- # assert openai_finetune.datamodel.save.called
518
+ # Verify save was called
519
+ # This isn't properly mocked, so not checking
520
+ # assert openai_finetune.datamodel.save.called
526
521
 
527
- # Verify status is still returned correctly
528
- assert status.status == FineTuneStatusType.completed
529
- assert status.message == "Training job completed"
522
+ # Verify status is still returned correctly
523
+ assert status.status == FineTuneStatusType.completed
524
+ assert status.message == "Training job completed"
530
525
 
531
526
 
532
- async def test_status_updates_latest_status(openai_finetune, mock_response):
527
+ async def test_status_updates_latest_status(
528
+ openai_finetune, mock_openai_client, mock_response
529
+ ):
533
530
  # Set initial status
534
531
  openai_finetune.datamodel.latest_status = FineTuneStatusType.running
535
532
  assert openai_finetune.datamodel.latest_status == FineTuneStatusType.running
536
533
  mock_response.status = "succeeded"
534
+ mock_openai_client.fine_tuning.jobs.retrieve.return_value = mock_response
537
535
 
538
- with (
539
- patch(
540
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.fine_tuning.jobs.retrieve",
541
- return_value=mock_response,
542
- ),
543
- ):
544
- status = await openai_finetune.status()
536
+ status = await openai_finetune.status()
545
537
 
546
- # Verify status was updated in datamodel
547
- assert openai_finetune.datamodel.latest_status == FineTuneStatusType.completed
548
- assert status.status == FineTuneStatusType.completed
549
- assert status.message == "Training job completed"
538
+ # Verify status was updated in datamodel
539
+ assert openai_finetune.datamodel.latest_status == FineTuneStatusType.completed
540
+ assert status.status == FineTuneStatusType.completed
541
+ assert status.message == "Training job completed"
550
542
 
551
- # Verify file was saved
552
- assert openai_finetune.datamodel.path.exists()
543
+ # Verify file was saved
544
+ assert openai_finetune.datamodel.path.exists()
553
545
 
554
546
 
555
547
  @pytest.mark.parametrize(
@@ -595,11 +587,16 @@ async def test_generate_and_upload_jsonl_with_data_strategy(
595
587
  return_value=mock_formatter,
596
588
  ),
597
589
  patch(
598
- "kiln_ai.adapters.fine_tune.openai_finetune.oai_client.files.create",
599
- return_value=mock_file_response,
600
- ),
590
+ "kiln_ai.adapters.fine_tune.openai_finetune._get_openai_client"
591
+ ) as mock_get_client,
601
592
  patch("builtins.open"),
602
593
  ):
594
+ from unittest.mock import AsyncMock
595
+
596
+ mock_client = MagicMock()
597
+ mock_client.files.create = AsyncMock(return_value=mock_file_response)
598
+ mock_get_client.return_value = mock_client
599
+
603
600
  result = await openai_finetune.generate_and_upload_jsonl(
604
601
  mock_dataset, "train", mock_task, DatasetFormat.OPENAI_CHAT_JSONL
605
602
  )