kiln-ai 0.13.0__py3-none-any.whl → 0.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kiln-ai might be problematic. Click here for more details.
- kiln_ai/adapters/adapter_registry.py +4 -0
- kiln_ai/adapters/eval/g_eval.py +17 -2
- kiln_ai/adapters/eval/test_g_eval.py +12 -7
- kiln_ai/adapters/fine_tune/base_finetune.py +0 -20
- kiln_ai/adapters/fine_tune/fireworks_finetune.py +169 -15
- kiln_ai/adapters/fine_tune/test_base_finetune.py +0 -9
- kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py +513 -8
- kiln_ai/adapters/fine_tune/test_together_finetune.py +2 -0
- kiln_ai/adapters/fine_tune/together_finetune.py +2 -0
- kiln_ai/adapters/ml_model_list.py +1 -6
- kiln_ai/adapters/provider_tools.py +2 -2
- kiln_ai/adapters/test_provider_tools.py +2 -2
- kiln_ai/utils/config.py +9 -0
- {kiln_ai-0.13.0.dist-info → kiln_ai-0.14.0.dist-info}/METADATA +2 -2
- {kiln_ai-0.13.0.dist-info → kiln_ai-0.14.0.dist-info}/RECORD +17 -17
- {kiln_ai-0.13.0.dist-info → kiln_ai-0.14.0.dist-info}/WHEEL +0 -0
- {kiln_ai-0.13.0.dist-info → kiln_ai-0.14.0.dist-info}/licenses/LICENSE.txt +0 -0
|
@@ -108,6 +108,10 @@ def adapter_for_task(
|
|
|
108
108
|
# 1. To use the correct base URL
|
|
109
109
|
# 2. We use Ollama's OpenAI compatible API (/v1), and don't just let litellm use the Ollama API. We use more advanced features like json_schema.
|
|
110
110
|
base_url=ollama_base_url + "/v1",
|
|
111
|
+
additional_body_options={
|
|
112
|
+
# LiteLLM errors without an api_key, even though Ollama doesn't support one.
|
|
113
|
+
"api_key": "NA",
|
|
114
|
+
},
|
|
111
115
|
),
|
|
112
116
|
)
|
|
113
117
|
case ModelProviderName.fireworks_ai:
|
kiln_ai/adapters/eval/g_eval.py
CHANGED
|
@@ -297,9 +297,12 @@ The model produced the following output for the task:
|
|
|
297
297
|
|
|
298
298
|
total_score = 0.0
|
|
299
299
|
total_probability = 0.0
|
|
300
|
+
top_logprobs_contains_primary_token = False
|
|
300
301
|
|
|
301
|
-
# Process all valid scoring tokens
|
|
302
|
+
# Process all valid scoring tokens from alternatives
|
|
302
303
|
for top_logprob in token_logprob.top_logprobs:
|
|
304
|
+
if top_logprob.token == token_logprob.token:
|
|
305
|
+
top_logprobs_contains_primary_token = True
|
|
303
306
|
token_score = self.score_from_token_string(top_logprob.token)
|
|
304
307
|
if token_score is not None:
|
|
305
308
|
# Convert logprob to probability
|
|
@@ -307,9 +310,21 @@ The model produced the following output for the task:
|
|
|
307
310
|
total_score += token_score * probability
|
|
308
311
|
total_probability += probability
|
|
309
312
|
|
|
313
|
+
# Weird OpenAI 4o bug - sometimes the primary token is included in the top logprobs, sometimes not.
|
|
314
|
+
# Add the primary token back in if excluded
|
|
315
|
+
if not top_logprobs_contains_primary_token:
|
|
316
|
+
if token_logprob.logprob == -9999.0:
|
|
317
|
+
# Another "bug" - sometimes the logprob is -9999.0. This seems to happen when the rest of the logprobs are tiny probability.
|
|
318
|
+
total_score += primary_token_score * 1.0
|
|
319
|
+
total_probability += 1.0
|
|
320
|
+
else:
|
|
321
|
+
probability = math.exp(token_logprob.logprob)
|
|
322
|
+
total_score += primary_token_score * probability
|
|
323
|
+
total_probability += probability
|
|
324
|
+
|
|
310
325
|
if total_probability <= 0.0:
|
|
311
326
|
raise RuntimeError(
|
|
312
|
-
f"No valid scoring tokens found for {token_logprob.token}. This should never happen. Please file a bug if you see this."
|
|
327
|
+
f"No valid scoring tokens found for {token_logprob.token}. This should never happen as the token has a valid score (so it must be excluded from top logprobs). Please file a bug if you see this."
|
|
313
328
|
)
|
|
314
329
|
|
|
315
330
|
# Normalize by total probability of valid tokens (LLM may have wanted to generate other non-rating tokens, these shouldn't lower score of rating tokens)
|
|
@@ -393,12 +393,13 @@ def test_rating_token_to_score(test_eval_config, test_run_config):
|
|
|
393
393
|
self.logprob = logprob
|
|
394
394
|
|
|
395
395
|
class MockTokenLogprob:
|
|
396
|
-
def __init__(self, token, top_logprobs):
|
|
396
|
+
def __init__(self, token, top_logprobs, logprob):
|
|
397
397
|
self.token = token
|
|
398
398
|
self.top_logprobs = [MockTopLogprob(t, lp) for t, lp in top_logprobs]
|
|
399
|
+
self.logprob = logprob
|
|
399
400
|
|
|
400
401
|
# Test single token case
|
|
401
|
-
token_logprob = MockTokenLogprob("5", [("5", 0.0)]) # log(1) = 0
|
|
402
|
+
token_logprob = MockTokenLogprob("5", [("5", 0.0)], logprob=1e-8) # log(1) = 0
|
|
402
403
|
score = g_eval.rating_token_to_score(token_logprob)
|
|
403
404
|
assert score == 5.0
|
|
404
405
|
|
|
@@ -409,18 +410,22 @@ def test_rating_token_to_score(test_eval_config, test_run_config):
|
|
|
409
410
|
("4", math.log(0.6)), # 60% probability
|
|
410
411
|
("5", math.log(0.4)), # 40% probability
|
|
411
412
|
],
|
|
413
|
+
logprob=math.log(0.6),
|
|
412
414
|
)
|
|
413
415
|
score = g_eval.rating_token_to_score(token_logprob)
|
|
414
416
|
assert pytest.approx(score) == 4.4 # (4 * 0.6 + 5 * 0.4)
|
|
415
417
|
|
|
416
418
|
# Test invalid token
|
|
417
|
-
token_logprob = MockTokenLogprob(":", [(":", 0.0)])
|
|
419
|
+
token_logprob = MockTokenLogprob(":", [(":", 0.0)], logprob=1e-8)
|
|
418
420
|
assert g_eval.rating_token_to_score(token_logprob) is None
|
|
419
421
|
|
|
420
|
-
# Test
|
|
421
|
-
token_logprob = MockTokenLogprob("5", [])
|
|
422
|
-
|
|
423
|
-
|
|
422
|
+
# Test missing from top logprobs
|
|
423
|
+
token_logprob = MockTokenLogprob("5", [], logprob=1e-8)
|
|
424
|
+
assert pytest.approx(g_eval.rating_token_to_score(token_logprob)) == 5.0
|
|
425
|
+
|
|
426
|
+
# Test missing from top logprobs, with special case logprob
|
|
427
|
+
token_logprob = MockTokenLogprob("5", [], logprob=-9999)
|
|
428
|
+
assert pytest.approx(g_eval.rating_token_to_score(token_logprob)) == 5.0
|
|
424
429
|
|
|
425
430
|
|
|
426
431
|
def test_g_eval_system_instruction():
|
|
@@ -72,8 +72,6 @@ class BaseFinetuneAdapter(ABC):
|
|
|
72
72
|
Create and start a fine-tune.
|
|
73
73
|
"""
|
|
74
74
|
|
|
75
|
-
cls.check_valid_provider_model(provider_id, provider_base_model_id)
|
|
76
|
-
|
|
77
75
|
if not dataset.id:
|
|
78
76
|
raise ValueError("Dataset must have an id")
|
|
79
77
|
|
|
@@ -184,21 +182,3 @@ class BaseFinetuneAdapter(ABC):
|
|
|
184
182
|
for parameter_key in parameters:
|
|
185
183
|
if parameter_key not in allowed_parameters:
|
|
186
184
|
raise ValueError(f"Parameter {parameter_key} is not available")
|
|
187
|
-
|
|
188
|
-
@classmethod
|
|
189
|
-
def check_valid_provider_model(
|
|
190
|
-
cls, provider_id: str, provider_base_model_id: str
|
|
191
|
-
) -> None:
|
|
192
|
-
"""
|
|
193
|
-
Check if the provider and base model are valid.
|
|
194
|
-
"""
|
|
195
|
-
for model in built_in_models:
|
|
196
|
-
for provider in model.providers:
|
|
197
|
-
if (
|
|
198
|
-
provider.name == provider_id
|
|
199
|
-
and provider.provider_finetune_id == provider_base_model_id
|
|
200
|
-
):
|
|
201
|
-
return
|
|
202
|
-
raise ValueError(
|
|
203
|
-
f"Provider {provider_id} with base model {provider_base_model_id} is not available"
|
|
204
|
-
)
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
|
|
1
|
+
import logging
|
|
2
|
+
from typing import List, Tuple
|
|
2
3
|
from uuid import uuid4
|
|
3
4
|
|
|
4
5
|
import httpx
|
|
@@ -13,6 +14,14 @@ from kiln_ai.adapters.fine_tune.dataset_formatter import DatasetFormat, DatasetF
|
|
|
13
14
|
from kiln_ai.datamodel import DatasetSplit, StructuredOutputMode, Task
|
|
14
15
|
from kiln_ai.utils.config import Config
|
|
15
16
|
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
# https://docs.fireworks.ai/fine-tuning/fine-tuning-models#supported-base-models-loras-on-serverless
|
|
20
|
+
serverless_models = [
|
|
21
|
+
"accounts/fireworks/models/llama-v3p1-8b-instruct",
|
|
22
|
+
"accounts/fireworks/models/llama-v3p1-70b-instruct",
|
|
23
|
+
]
|
|
24
|
+
|
|
16
25
|
|
|
17
26
|
class FireworksFinetune(BaseFinetuneAdapter):
|
|
18
27
|
"""
|
|
@@ -132,11 +141,18 @@ class FireworksFinetune(BaseFinetuneAdapter):
|
|
|
132
141
|
:60
|
|
133
142
|
]
|
|
134
143
|
)
|
|
135
|
-
payload = {
|
|
144
|
+
payload: dict[str, str | dict[str, str | bool]] = {
|
|
136
145
|
"dataset": f"accounts/{account_id}/datasets/{train_file_id}",
|
|
137
146
|
"displayName": display_name,
|
|
138
147
|
"baseModel": self.datamodel.base_model_id,
|
|
139
148
|
}
|
|
149
|
+
# Add W&B config if API key is set
|
|
150
|
+
if Config.shared().wandb_api_key:
|
|
151
|
+
payload["wandbConfig"] = {
|
|
152
|
+
"enabled": True,
|
|
153
|
+
"project": "Kiln_AI",
|
|
154
|
+
"apiKey": Config.shared().wandb_api_key,
|
|
155
|
+
}
|
|
140
156
|
hyperparameters = self.create_payload_parameters(self.datamodel.parameters)
|
|
141
157
|
payload.update(hyperparameters)
|
|
142
158
|
headers = {
|
|
@@ -276,32 +292,54 @@ class FireworksFinetune(BaseFinetuneAdapter):
|
|
|
276
292
|
return {k: v for k, v in payload.items() if v is not None}
|
|
277
293
|
|
|
278
294
|
async def _deploy(self) -> bool:
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
# https://docs.fireworks.ai/models/deploying#deploying-to-serverless
|
|
284
|
-
# This endpoint will return 400 if already deployed with code 9, so we consider that a success.
|
|
295
|
+
if self.datamodel.base_model_id in serverless_models:
|
|
296
|
+
return await self._deploy_serverless()
|
|
297
|
+
else:
|
|
298
|
+
return await self._check_or_deploy_server()
|
|
285
299
|
|
|
300
|
+
def api_key_and_account_id(self) -> Tuple[str, str]:
|
|
286
301
|
api_key = Config.shared().fireworks_api_key
|
|
287
302
|
account_id = Config.shared().fireworks_account_id
|
|
288
303
|
if not api_key or not account_id:
|
|
289
304
|
raise ValueError("Fireworks API key or account ID not set")
|
|
305
|
+
return api_key, account_id
|
|
306
|
+
|
|
307
|
+
def deployment_display_name(self) -> str:
|
|
308
|
+
# Limit the display name to 60 characters
|
|
309
|
+
display_name = f"Kiln AI fine-tuned model [ID:{self.datamodel.id}][name:{self.datamodel.name}]"[
|
|
310
|
+
:60
|
|
311
|
+
]
|
|
312
|
+
return display_name
|
|
290
313
|
|
|
314
|
+
async def model_id_checking_status(self) -> str | None:
|
|
291
315
|
# Model ID != fine tune ID on Fireworks. Model is the result of the tune job. Call status to get it.
|
|
292
316
|
status, model_id = await self._status()
|
|
293
317
|
if status.status != FineTuneStatusType.completed:
|
|
294
|
-
return
|
|
318
|
+
return None
|
|
295
319
|
if not model_id or not isinstance(model_id, str):
|
|
296
|
-
return
|
|
320
|
+
return None
|
|
321
|
+
return model_id
|
|
322
|
+
|
|
323
|
+
async def _deploy_serverless(self) -> bool:
|
|
324
|
+
# Now we "deploy" the model using PEFT serverless.
|
|
325
|
+
# A bit complicated: most fireworks deploys are server based.
|
|
326
|
+
# However, a Lora can be serverless (PEFT).
|
|
327
|
+
# By calling the deploy endpoint WITHOUT first creating a deployment ID, it will only deploy if it can be done serverless.
|
|
328
|
+
# https://docs.fireworks.ai/models/deploying#deploying-to-serverless
|
|
329
|
+
# This endpoint will return 400 if already deployed with code 9, so we consider that a success.
|
|
330
|
+
|
|
331
|
+
api_key, account_id = self.api_key_and_account_id()
|
|
297
332
|
|
|
298
333
|
url = f"https://api.fireworks.ai/v1/accounts/{account_id}/deployedModels"
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
334
|
+
model_id = await self.model_id_checking_status()
|
|
335
|
+
if not model_id:
|
|
336
|
+
logger.error(
|
|
337
|
+
"Model ID not found - can't deploy model to Fireworks serverless"
|
|
338
|
+
)
|
|
339
|
+
return False
|
|
340
|
+
|
|
303
341
|
payload = {
|
|
304
|
-
"displayName":
|
|
342
|
+
"displayName": self.deployment_display_name(),
|
|
305
343
|
"model": model_id,
|
|
306
344
|
}
|
|
307
345
|
headers = {
|
|
@@ -320,4 +358,120 @@ class FireworksFinetune(BaseFinetuneAdapter):
|
|
|
320
358
|
self.datamodel.save_to_file()
|
|
321
359
|
return True
|
|
322
360
|
|
|
361
|
+
logger.error(
|
|
362
|
+
f"Failed to deploy model to Fireworks serverless: [{response.status_code}] {response.text}"
|
|
363
|
+
)
|
|
323
364
|
return False
|
|
365
|
+
|
|
366
|
+
async def _check_or_deploy_server(self) -> bool:
|
|
367
|
+
"""
|
|
368
|
+
Check if the model is already deployed. If not, deploy it to a dedicated server.
|
|
369
|
+
"""
|
|
370
|
+
|
|
371
|
+
# Check if the model is already deployed
|
|
372
|
+
# If it's fine_tune_model_id is set, it might be deployed. However, Fireworks deletes them over time so we need to check.
|
|
373
|
+
if self.datamodel.fine_tune_model_id:
|
|
374
|
+
deployments = await self._fetch_all_deployments()
|
|
375
|
+
for deployment in deployments:
|
|
376
|
+
if deployment[
|
|
377
|
+
"baseModel"
|
|
378
|
+
] == self.datamodel.fine_tune_model_id and deployment["state"] in [
|
|
379
|
+
"READY",
|
|
380
|
+
"CREATING",
|
|
381
|
+
]:
|
|
382
|
+
return True
|
|
383
|
+
|
|
384
|
+
# If the model is not deployed, deploy it
|
|
385
|
+
return await self._deploy_server()
|
|
386
|
+
|
|
387
|
+
async def _deploy_server(self) -> bool:
|
|
388
|
+
# For models that are not serverless, we just need to deploy the model to a server.
|
|
389
|
+
# We use a scale-to-zero on-demand deployment. If you stop using it, it
|
|
390
|
+
# will scale to zero and charges will stop.
|
|
391
|
+
model_id = await self.model_id_checking_status()
|
|
392
|
+
if not model_id:
|
|
393
|
+
logger.error("Model ID not found - can't deploy model to Fireworks server")
|
|
394
|
+
return False
|
|
395
|
+
|
|
396
|
+
api_key, account_id = self.api_key_and_account_id()
|
|
397
|
+
url = f"https://api.fireworks.ai/v1/accounts/{account_id}/deployments"
|
|
398
|
+
|
|
399
|
+
payload = {
|
|
400
|
+
"displayName": self.deployment_display_name(),
|
|
401
|
+
"description": "Deployed by Kiln AI",
|
|
402
|
+
# Allow scale to zero
|
|
403
|
+
"minReplicaCount": 0,
|
|
404
|
+
"autoscalingPolicy": {
|
|
405
|
+
"scaleUpWindow": "30s",
|
|
406
|
+
"scaleDownWindow": "300s",
|
|
407
|
+
# Scale to zero after 5 minutes of inactivity - this is the minimum allowed
|
|
408
|
+
"scaleToZeroWindow": "300s",
|
|
409
|
+
},
|
|
410
|
+
"baseModel": model_id,
|
|
411
|
+
}
|
|
412
|
+
headers = {
|
|
413
|
+
"Authorization": f"Bearer {api_key}",
|
|
414
|
+
"Content-Type": "application/json",
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
async with httpx.AsyncClient() as client:
|
|
418
|
+
response = await client.post(url, json=payload, headers=headers)
|
|
419
|
+
|
|
420
|
+
if response.status_code == 200:
|
|
421
|
+
basemodel = response.json().get("baseModel")
|
|
422
|
+
if basemodel is not None and isinstance(basemodel, str):
|
|
423
|
+
self.datamodel.fine_tune_model_id = basemodel
|
|
424
|
+
if self.datamodel.path:
|
|
425
|
+
self.datamodel.save_to_file()
|
|
426
|
+
return True
|
|
427
|
+
|
|
428
|
+
logger.error(
|
|
429
|
+
f"Failed to deploy model to Fireworks server: [{response.status_code}] {response.text}"
|
|
430
|
+
)
|
|
431
|
+
return False
|
|
432
|
+
|
|
433
|
+
async def _fetch_all_deployments(self) -> List[dict]:
|
|
434
|
+
"""
|
|
435
|
+
Fetch all deployments for an account.
|
|
436
|
+
"""
|
|
437
|
+
api_key, account_id = self.api_key_and_account_id()
|
|
438
|
+
|
|
439
|
+
url = f"https://api.fireworks.ai/v1/accounts/{account_id}/deployments"
|
|
440
|
+
|
|
441
|
+
params = {
|
|
442
|
+
# Note: filter param does not work for baseModel, which would have been ideal, and ideally would have been documented. Instead we'll fetch all and filter.
|
|
443
|
+
# Max page size
|
|
444
|
+
"pageSize": 200,
|
|
445
|
+
}
|
|
446
|
+
headers = {
|
|
447
|
+
"Authorization": f"Bearer {api_key}",
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
deployments = []
|
|
451
|
+
|
|
452
|
+
# Paginate through all deployments
|
|
453
|
+
async with httpx.AsyncClient() as client:
|
|
454
|
+
while True:
|
|
455
|
+
response = await client.get(url, params=params, headers=headers)
|
|
456
|
+
json = response.json()
|
|
457
|
+
if "deployments" not in json or not isinstance(
|
|
458
|
+
json["deployments"], list
|
|
459
|
+
):
|
|
460
|
+
raise ValueError(
|
|
461
|
+
f"Invalid response from Fireworks. Expected list of deployments in 'deployments' key: [{response.status_code}] {response.text}"
|
|
462
|
+
)
|
|
463
|
+
deployments.extend(json["deployments"])
|
|
464
|
+
next_page_token = json.get("nextPageToken")
|
|
465
|
+
if (
|
|
466
|
+
next_page_token
|
|
467
|
+
and isinstance(next_page_token, str)
|
|
468
|
+
and len(next_page_token) > 0
|
|
469
|
+
):
|
|
470
|
+
params = {
|
|
471
|
+
"pageSize": 200,
|
|
472
|
+
"pageToken": next_page_token,
|
|
473
|
+
}
|
|
474
|
+
else:
|
|
475
|
+
break
|
|
476
|
+
|
|
477
|
+
return deployments
|
|
@@ -261,15 +261,6 @@ async def test_create_and_start_no_parent_task_path():
|
|
|
261
261
|
)
|
|
262
262
|
|
|
263
263
|
|
|
264
|
-
def test_check_valid_provider_model():
|
|
265
|
-
MockFinetune.check_valid_provider_model("openai", "gpt-4o-mini-2024-07-18")
|
|
266
|
-
|
|
267
|
-
with pytest.raises(
|
|
268
|
-
ValueError, match="Provider openai with base model gpt-99 is not available"
|
|
269
|
-
):
|
|
270
|
-
MockFinetune.check_valid_provider_model("openai", "gpt-99")
|
|
271
|
-
|
|
272
|
-
|
|
273
264
|
async def test_create_and_start_invalid_train_split(mock_dataset):
|
|
274
265
|
# Test with an invalid train split name
|
|
275
266
|
mock_dataset.split_contents = {"valid_train": [], "valid_test": []}
|
|
@@ -340,6 +340,7 @@ async def test_start_success(
|
|
|
340
340
|
expected_mode,
|
|
341
341
|
expected_format,
|
|
342
342
|
):
|
|
343
|
+
Config.shared().wandb_api_key = "test-api-key"
|
|
343
344
|
mock_task.output_json_schema = output_schema
|
|
344
345
|
|
|
345
346
|
fireworks_finetune.datamodel.parent = mock_task
|
|
@@ -378,6 +379,24 @@ async def test_start_success(
|
|
|
378
379
|
assert fireworks_finetune.datamodel.structured_output_mode == expected_mode
|
|
379
380
|
assert fireworks_finetune.datamodel.properties["endpoint_version"] == "v2"
|
|
380
381
|
|
|
382
|
+
# check mockclent.post call values
|
|
383
|
+
assert mock_client.post.call_count == 1
|
|
384
|
+
submit_call_values = mock_client.post.call_args[1]
|
|
385
|
+
assert submit_call_values["json"]["wandbConfig"] == {
|
|
386
|
+
"enabled": True,
|
|
387
|
+
"project": "Kiln_AI",
|
|
388
|
+
"apiKey": "test-api-key",
|
|
389
|
+
}
|
|
390
|
+
assert submit_call_values["json"]["baseModel"] == "llama-v2-7b"
|
|
391
|
+
assert (
|
|
392
|
+
submit_call_values["json"]["dataset"]
|
|
393
|
+
== f"accounts/{Config.shared().fireworks_account_id}/datasets/{mock_dataset_id}"
|
|
394
|
+
)
|
|
395
|
+
assert (
|
|
396
|
+
submit_call_values["json"]["displayName"]
|
|
397
|
+
== f"Kiln AI fine-tuning [ID:{fireworks_finetune.datamodel.id}][name:{fireworks_finetune.datamodel.name}]"
|
|
398
|
+
)
|
|
399
|
+
|
|
381
400
|
|
|
382
401
|
async def test_start_api_error(
|
|
383
402
|
fireworks_finetune, mock_dataset, mock_task, mock_api_key
|
|
@@ -429,7 +448,7 @@ def test_available_parameters(fireworks_finetune):
|
|
|
429
448
|
assert payload_parameters == {"loraRank": 16, "epochs": 3}
|
|
430
449
|
|
|
431
450
|
|
|
432
|
-
async def
|
|
451
|
+
async def test_deploy_serverless_success(fireworks_finetune, mock_api_key):
|
|
433
452
|
# Mock response for successful deployment
|
|
434
453
|
success_response = MagicMock(spec=httpx.Response)
|
|
435
454
|
success_response.status_code = 200
|
|
@@ -448,12 +467,12 @@ async def test_deploy_success(fireworks_finetune, mock_api_key):
|
|
|
448
467
|
mock_client.post.return_value = success_response
|
|
449
468
|
mock_client_class.return_value.__aenter__.return_value = mock_client
|
|
450
469
|
|
|
451
|
-
result = await fireworks_finetune.
|
|
470
|
+
result = await fireworks_finetune._deploy_serverless()
|
|
452
471
|
assert result is True
|
|
453
472
|
assert fireworks_finetune.datamodel.fine_tune_model_id == "ftm-123"
|
|
454
473
|
|
|
455
474
|
|
|
456
|
-
async def
|
|
475
|
+
async def test_deploy_serverless_already_deployed(fireworks_finetune, mock_api_key):
|
|
457
476
|
# Mock response for already deployed model
|
|
458
477
|
already_deployed_response = MagicMock(spec=httpx.Response)
|
|
459
478
|
already_deployed_response.status_code = 400
|
|
@@ -475,12 +494,12 @@ async def test_deploy_already_deployed(fireworks_finetune, mock_api_key):
|
|
|
475
494
|
mock_client.post.return_value = already_deployed_response
|
|
476
495
|
mock_client_class.return_value.__aenter__.return_value = mock_client
|
|
477
496
|
|
|
478
|
-
result = await fireworks_finetune.
|
|
497
|
+
result = await fireworks_finetune._deploy_serverless()
|
|
479
498
|
assert result is True
|
|
480
499
|
assert fireworks_finetune.datamodel.fine_tune_model_id == "ftm-123"
|
|
481
500
|
|
|
482
501
|
|
|
483
|
-
async def
|
|
502
|
+
async def test_deploy_serverless_failure(fireworks_finetune, mock_api_key):
|
|
484
503
|
# Mock response for failed deployment
|
|
485
504
|
failure_response = MagicMock(spec=httpx.Response)
|
|
486
505
|
failure_response.status_code = 500
|
|
@@ -491,18 +510,28 @@ async def test_deploy_failure(fireworks_finetune, mock_api_key):
|
|
|
491
510
|
mock_client.post.return_value = failure_response
|
|
492
511
|
mock_client_class.return_value.__aenter__.return_value = mock_client
|
|
493
512
|
|
|
494
|
-
result = await fireworks_finetune.
|
|
513
|
+
result = await fireworks_finetune._deploy_serverless()
|
|
495
514
|
assert result is False
|
|
496
515
|
|
|
497
516
|
|
|
498
|
-
async def
|
|
517
|
+
async def test_deploy_serverless_missing_credentials(fireworks_finetune):
|
|
499
518
|
# Test missing API key or account ID
|
|
500
519
|
with patch.object(Config, "shared") as mock_config:
|
|
501
520
|
mock_config.return_value.fireworks_api_key = None
|
|
502
521
|
mock_config.return_value.fireworks_account_id = None
|
|
503
522
|
|
|
504
523
|
with pytest.raises(ValueError, match="Fireworks API key or account ID not set"):
|
|
505
|
-
await fireworks_finetune.
|
|
524
|
+
await fireworks_finetune._deploy_serverless()
|
|
525
|
+
|
|
526
|
+
|
|
527
|
+
async def test_deploy_server_missing_credentials(fireworks_finetune):
|
|
528
|
+
# Test missing API key or account ID
|
|
529
|
+
with patch.object(Config, "shared") as mock_config:
|
|
530
|
+
mock_config.return_value.fireworks_api_key = None
|
|
531
|
+
mock_config.return_value.fireworks_account_id = None
|
|
532
|
+
|
|
533
|
+
response = await fireworks_finetune._check_or_deploy_server()
|
|
534
|
+
assert response is False
|
|
506
535
|
|
|
507
536
|
|
|
508
537
|
async def test_deploy_missing_model_id(fireworks_finetune, mock_api_key):
|
|
@@ -545,3 +574,479 @@ async def test_status_with_deploy(fireworks_finetune, mock_api_key):
|
|
|
545
574
|
# Verify message was updated due to failed deployment
|
|
546
575
|
assert status.status == FineTuneStatusType.completed
|
|
547
576
|
assert status.message == "Fine-tuning job completed but failed to deploy model."
|
|
577
|
+
|
|
578
|
+
|
|
579
|
+
@pytest.mark.paid
|
|
580
|
+
async def test_fetch_all_deployments(fireworks_finetune):
|
|
581
|
+
deployments = await fireworks_finetune._fetch_all_deployments()
|
|
582
|
+
assert isinstance(deployments, list)
|
|
583
|
+
|
|
584
|
+
|
|
585
|
+
async def test_api_key_and_account_id(fireworks_finetune, mock_api_key):
|
|
586
|
+
# Test successful retrieval of API key and account ID
|
|
587
|
+
api_key, account_id = fireworks_finetune.api_key_and_account_id()
|
|
588
|
+
assert api_key == "test-api-key"
|
|
589
|
+
assert account_id == "test-account-id"
|
|
590
|
+
|
|
591
|
+
|
|
592
|
+
async def test_api_key_and_account_id_missing_credentials(fireworks_finetune):
|
|
593
|
+
# Test missing API key or account ID
|
|
594
|
+
with patch.object(Config, "shared") as mock_config:
|
|
595
|
+
mock_config.return_value.fireworks_api_key = None
|
|
596
|
+
mock_config.return_value.fireworks_account_id = None
|
|
597
|
+
|
|
598
|
+
with pytest.raises(ValueError, match="Fireworks API key or account ID not set"):
|
|
599
|
+
fireworks_finetune.api_key_and_account_id()
|
|
600
|
+
|
|
601
|
+
|
|
602
|
+
def test_deployment_display_name(fireworks_finetune):
|
|
603
|
+
# Test with default ID and name
|
|
604
|
+
display_name = fireworks_finetune.deployment_display_name()
|
|
605
|
+
expected = f"Kiln AI fine-tuned model [ID:{fireworks_finetune.datamodel.id}][name:test-finetune]"[
|
|
606
|
+
:60
|
|
607
|
+
]
|
|
608
|
+
assert display_name == expected
|
|
609
|
+
|
|
610
|
+
# Test with a very long name to ensure 60 character limit
|
|
611
|
+
fireworks_finetune.datamodel.name = "x" * 100
|
|
612
|
+
display_name = fireworks_finetune.deployment_display_name()
|
|
613
|
+
assert len(display_name) == 60
|
|
614
|
+
assert display_name.startswith("Kiln AI fine-tuned model [ID:")
|
|
615
|
+
|
|
616
|
+
|
|
617
|
+
async def test_model_id_checking_status_completed(fireworks_finetune):
|
|
618
|
+
# Test with completed status and valid model ID
|
|
619
|
+
status_response = (
|
|
620
|
+
FineTuneStatus(status=FineTuneStatusType.completed, message=""),
|
|
621
|
+
"model-123",
|
|
622
|
+
)
|
|
623
|
+
|
|
624
|
+
with patch.object(fireworks_finetune, "_status", return_value=status_response):
|
|
625
|
+
model_id = await fireworks_finetune.model_id_checking_status()
|
|
626
|
+
assert model_id == "model-123"
|
|
627
|
+
|
|
628
|
+
|
|
629
|
+
async def test_model_id_checking_status_not_completed(fireworks_finetune):
|
|
630
|
+
# Test with non-completed status
|
|
631
|
+
status_response = (
|
|
632
|
+
FineTuneStatus(status=FineTuneStatusType.running, message=""),
|
|
633
|
+
"model-123",
|
|
634
|
+
)
|
|
635
|
+
|
|
636
|
+
with patch.object(fireworks_finetune, "_status", return_value=status_response):
|
|
637
|
+
model_id = await fireworks_finetune.model_id_checking_status()
|
|
638
|
+
assert model_id is None
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
async def test_model_id_checking_status_invalid_model_id(fireworks_finetune):
|
|
642
|
+
# Test with completed status but invalid model ID
|
|
643
|
+
status_response = (
|
|
644
|
+
FineTuneStatus(status=FineTuneStatusType.completed, message=""),
|
|
645
|
+
None,
|
|
646
|
+
)
|
|
647
|
+
|
|
648
|
+
with patch.object(fireworks_finetune, "_status", return_value=status_response):
|
|
649
|
+
model_id = await fireworks_finetune.model_id_checking_status()
|
|
650
|
+
assert model_id is None
|
|
651
|
+
|
|
652
|
+
# Test with non-string model ID
|
|
653
|
+
status_response = (
|
|
654
|
+
FineTuneStatus(status=FineTuneStatusType.completed, message=""),
|
|
655
|
+
{"id": "model-123"}, # Not a string
|
|
656
|
+
)
|
|
657
|
+
|
|
658
|
+
with patch.object(fireworks_finetune, "_status", return_value=status_response):
|
|
659
|
+
model_id = await fireworks_finetune.model_id_checking_status()
|
|
660
|
+
assert model_id is None
|
|
661
|
+
|
|
662
|
+
|
|
663
|
+
@pytest.mark.parametrize(
|
|
664
|
+
"base_model_id,expected_method",
|
|
665
|
+
[
|
|
666
|
+
("accounts/fireworks/models/llama-v3p1-8b-instruct", "_deploy_serverless"),
|
|
667
|
+
("accounts/fireworks/models/llama-v3p1-70b-instruct", "_deploy_serverless"),
|
|
668
|
+
("some-other-model", "_check_or_deploy_server"),
|
|
669
|
+
],
|
|
670
|
+
)
|
|
671
|
+
async def test_deploy_model_selection(
|
|
672
|
+
fireworks_finetune, base_model_id, expected_method, mock_api_key
|
|
673
|
+
):
|
|
674
|
+
# Set the base model ID
|
|
675
|
+
fireworks_finetune.datamodel.base_model_id = base_model_id
|
|
676
|
+
|
|
677
|
+
# Mock the deployment methods
|
|
678
|
+
with (
|
|
679
|
+
patch.object(
|
|
680
|
+
fireworks_finetune, "_deploy_serverless", return_value=True
|
|
681
|
+
) as mock_serverless,
|
|
682
|
+
patch.object(
|
|
683
|
+
fireworks_finetune, "_check_or_deploy_server", return_value=True
|
|
684
|
+
) as mock_server,
|
|
685
|
+
):
|
|
686
|
+
result = await fireworks_finetune._deploy()
|
|
687
|
+
|
|
688
|
+
# Verify the correct method was called based on the model
|
|
689
|
+
if expected_method == "_deploy_serverless":
|
|
690
|
+
mock_serverless.assert_called_once()
|
|
691
|
+
mock_server.assert_not_called()
|
|
692
|
+
else:
|
|
693
|
+
mock_serverless.assert_not_called()
|
|
694
|
+
mock_server.assert_called_once()
|
|
695
|
+
|
|
696
|
+
assert result is True
|
|
697
|
+
|
|
698
|
+
|
|
699
|
+
async def test_fetch_all_deployments_request_error(fireworks_finetune, mock_api_key):
|
|
700
|
+
# Test with error response
|
|
701
|
+
error_response = MagicMock(spec=httpx.Response)
|
|
702
|
+
error_response.status_code = 500
|
|
703
|
+
error_response.text = "Internal Server Error"
|
|
704
|
+
|
|
705
|
+
with patch("httpx.AsyncClient") as mock_client_class:
|
|
706
|
+
mock_client = AsyncMock()
|
|
707
|
+
mock_client.get.side_effect = Exception("API request failed")
|
|
708
|
+
mock_client_class.return_value.__aenter__.return_value = mock_client
|
|
709
|
+
|
|
710
|
+
with pytest.raises(Exception, match="API request failed"):
|
|
711
|
+
await fireworks_finetune._fetch_all_deployments()
|
|
712
|
+
|
|
713
|
+
# Verify API was called with correct parameters
|
|
714
|
+
mock_client.get.assert_called_once()
|
|
715
|
+
call_args = mock_client.get.call_args[1]
|
|
716
|
+
assert "params" in call_args
|
|
717
|
+
assert call_args["params"]["pageSize"] == 200
|
|
718
|
+
|
|
719
|
+
|
|
720
|
+
async def test_fetch_all_deployments_standard_case(fireworks_finetune, mock_api_key):
|
|
721
|
+
# Test with single page of results
|
|
722
|
+
mock_deployments = [
|
|
723
|
+
{"id": "deploy-1", "baseModel": "model-1", "state": "READY"},
|
|
724
|
+
{"id": "deploy-2", "baseModel": "model-2", "state": "READY"},
|
|
725
|
+
]
|
|
726
|
+
|
|
727
|
+
success_response = MagicMock(spec=httpx.Response)
|
|
728
|
+
success_response.status_code = 200
|
|
729
|
+
success_response.json.return_value = {
|
|
730
|
+
"deployments": mock_deployments,
|
|
731
|
+
"nextPageToken": None,
|
|
732
|
+
}
|
|
733
|
+
|
|
734
|
+
with patch("httpx.AsyncClient") as mock_client_class:
|
|
735
|
+
mock_client = AsyncMock()
|
|
736
|
+
mock_client.get.return_value = success_response
|
|
737
|
+
mock_client_class.return_value.__aenter__.return_value = mock_client
|
|
738
|
+
|
|
739
|
+
deployments = await fireworks_finetune._fetch_all_deployments()
|
|
740
|
+
|
|
741
|
+
# Verify API was called correctly
|
|
742
|
+
mock_client.get.assert_called_once()
|
|
743
|
+
|
|
744
|
+
# Verify correct deployments were returned
|
|
745
|
+
assert deployments == mock_deployments
|
|
746
|
+
assert len(deployments) == 2
|
|
747
|
+
assert deployments[0]["id"] == "deploy-1"
|
|
748
|
+
assert deployments[1]["id"] == "deploy-2"
|
|
749
|
+
|
|
750
|
+
|
|
751
|
+
async def test_fetch_all_deployments_paged_case(fireworks_finetune, mock_api_key):
|
|
752
|
+
# Test with multiple pages of results
|
|
753
|
+
mock_deployments_page1 = [
|
|
754
|
+
{"id": "deploy-1", "baseModel": "model-1", "state": "READY"},
|
|
755
|
+
{"id": "deploy-2", "baseModel": "model-2", "state": "READY"},
|
|
756
|
+
]
|
|
757
|
+
|
|
758
|
+
mock_deployments_page2 = [
|
|
759
|
+
{"id": "deploy-3", "baseModel": "model-3", "state": "READY"},
|
|
760
|
+
{"id": "deploy-4", "baseModel": "model-4", "state": "READY"},
|
|
761
|
+
]
|
|
762
|
+
|
|
763
|
+
page1_response = MagicMock(spec=httpx.Response)
|
|
764
|
+
page1_response.status_code = 200
|
|
765
|
+
page1_response.json.return_value = {
|
|
766
|
+
"deployments": mock_deployments_page1,
|
|
767
|
+
"nextPageToken": "page2token",
|
|
768
|
+
}
|
|
769
|
+
|
|
770
|
+
page2_response = MagicMock(spec=httpx.Response)
|
|
771
|
+
page2_response.status_code = 200
|
|
772
|
+
page2_response.json.return_value = {
|
|
773
|
+
"deployments": mock_deployments_page2,
|
|
774
|
+
"nextPageToken": None,
|
|
775
|
+
}
|
|
776
|
+
|
|
777
|
+
with patch("httpx.AsyncClient") as mock_client_class:
|
|
778
|
+
mock_client = AsyncMock()
|
|
779
|
+
mock_client.get.side_effect = [page1_response, page2_response]
|
|
780
|
+
mock_client_class.return_value.__aenter__.return_value = mock_client
|
|
781
|
+
|
|
782
|
+
deployments = await fireworks_finetune._fetch_all_deployments()
|
|
783
|
+
|
|
784
|
+
# Verify API was called twice (once for each page)
|
|
785
|
+
assert mock_client.get.call_count == 2
|
|
786
|
+
|
|
787
|
+
# Verify first call had no page token
|
|
788
|
+
first_call_args = mock_client.get.call_args_list[0][1]
|
|
789
|
+
assert "pageToken" not in first_call_args["params"]
|
|
790
|
+
|
|
791
|
+
# Verify second call included the page token
|
|
792
|
+
second_call_args = mock_client.get.call_args_list[1][1]
|
|
793
|
+
assert second_call_args["params"]["pageToken"] == "page2token"
|
|
794
|
+
|
|
795
|
+
# Verify all deployments from both pages were returned
|
|
796
|
+
assert len(deployments) == 4
|
|
797
|
+
assert deployments == mock_deployments_page1 + mock_deployments_page2
|
|
798
|
+
for deployment in deployments:
|
|
799
|
+
assert deployment["id"] in [
|
|
800
|
+
"deploy-1",
|
|
801
|
+
"deploy-2",
|
|
802
|
+
"deploy-3",
|
|
803
|
+
"deploy-4",
|
|
804
|
+
]
|
|
805
|
+
|
|
806
|
+
|
|
807
|
+
async def test_deploy_server_success(fireworks_finetune, mock_api_key):
|
|
808
|
+
# Mock response for successful deployment
|
|
809
|
+
success_response = MagicMock(spec=httpx.Response)
|
|
810
|
+
success_response.status_code = 200
|
|
811
|
+
success_response.json.return_value = {"baseModel": "model-123"}
|
|
812
|
+
|
|
813
|
+
status_response = (
|
|
814
|
+
FineTuneStatus(status=FineTuneStatusType.completed, message=""),
|
|
815
|
+
"model-123",
|
|
816
|
+
)
|
|
817
|
+
|
|
818
|
+
with (
|
|
819
|
+
patch("httpx.AsyncClient") as mock_client_class,
|
|
820
|
+
patch.object(
|
|
821
|
+
fireworks_finetune, "model_id_checking_status", return_value="model-123"
|
|
822
|
+
),
|
|
823
|
+
):
|
|
824
|
+
mock_client = AsyncMock()
|
|
825
|
+
mock_client.post.return_value = success_response
|
|
826
|
+
mock_client_class.return_value.__aenter__.return_value = mock_client
|
|
827
|
+
|
|
828
|
+
result = await fireworks_finetune._deploy_server()
|
|
829
|
+
|
|
830
|
+
# Verify result
|
|
831
|
+
assert result is True
|
|
832
|
+
|
|
833
|
+
# Verify fine_tune_model_id was updated
|
|
834
|
+
assert fireworks_finetune.datamodel.fine_tune_model_id == "model-123"
|
|
835
|
+
|
|
836
|
+
# Verify API was called with correct parameters
|
|
837
|
+
mock_client.post.assert_called_once()
|
|
838
|
+
call_args = mock_client.post.call_args[1]
|
|
839
|
+
assert "json" in call_args
|
|
840
|
+
assert call_args["json"]["baseModel"] == "model-123"
|
|
841
|
+
assert call_args["json"]["minReplicaCount"] == 0
|
|
842
|
+
assert "autoscalingPolicy" in call_args["json"]
|
|
843
|
+
assert call_args["json"]["autoscalingPolicy"]["scaleToZeroWindow"] == "300s"
|
|
844
|
+
|
|
845
|
+
# load the datamodel from the file and confirm the fine_tune_model_id was updated
|
|
846
|
+
loaded_datamodel = FinetuneModel.load_from_file(
|
|
847
|
+
fireworks_finetune.datamodel.path
|
|
848
|
+
)
|
|
849
|
+
assert loaded_datamodel.fine_tune_model_id == "model-123"
|
|
850
|
+
|
|
851
|
+
|
|
852
|
+
async def test_deploy_server_failure(fireworks_finetune, mock_api_key):
|
|
853
|
+
# Mock response for failed deployment
|
|
854
|
+
failure_response = MagicMock(spec=httpx.Response)
|
|
855
|
+
failure_response.status_code = 500
|
|
856
|
+
failure_response.text = "Internal Server Error"
|
|
857
|
+
|
|
858
|
+
with (
|
|
859
|
+
patch("httpx.AsyncClient") as mock_client_class,
|
|
860
|
+
patch.object(
|
|
861
|
+
fireworks_finetune, "model_id_checking_status", return_value="model-123"
|
|
862
|
+
),
|
|
863
|
+
):
|
|
864
|
+
mock_client = AsyncMock()
|
|
865
|
+
mock_client.post.return_value = failure_response
|
|
866
|
+
mock_client_class.return_value.__aenter__.return_value = mock_client
|
|
867
|
+
|
|
868
|
+
result = await fireworks_finetune._deploy_server()
|
|
869
|
+
|
|
870
|
+
# Verify result
|
|
871
|
+
assert result is False
|
|
872
|
+
|
|
873
|
+
# Verify API was called
|
|
874
|
+
mock_client.post.assert_called_once()
|
|
875
|
+
|
|
876
|
+
|
|
877
|
+
async def test_deploy_server_non_200_but_valid_response(
|
|
878
|
+
fireworks_finetune, mock_api_key
|
|
879
|
+
):
|
|
880
|
+
# Mock response with non-200 status but valid JSON response
|
|
881
|
+
mixed_response = MagicMock(spec=httpx.Response)
|
|
882
|
+
mixed_response.status_code = 200
|
|
883
|
+
mixed_response.json.return_value = {"not_baseModel": "something-else"}
|
|
884
|
+
|
|
885
|
+
with (
|
|
886
|
+
patch("httpx.AsyncClient") as mock_client_class,
|
|
887
|
+
patch.object(
|
|
888
|
+
fireworks_finetune, "model_id_checking_status", return_value="model-123"
|
|
889
|
+
),
|
|
890
|
+
):
|
|
891
|
+
mock_client = AsyncMock()
|
|
892
|
+
mock_client.post.return_value = mixed_response
|
|
893
|
+
mock_client_class.return_value.__aenter__.return_value = mock_client
|
|
894
|
+
|
|
895
|
+
result = await fireworks_finetune._deploy_server()
|
|
896
|
+
|
|
897
|
+
# Verify result - should fail because baseModel is missing
|
|
898
|
+
assert result is False
|
|
899
|
+
|
|
900
|
+
|
|
901
|
+
async def test_deploy_server_missing_model_id(fireworks_finetune, mock_api_key):
|
|
902
|
+
# Test when model_id_checking_status returns None
|
|
903
|
+
with patch.object(
|
|
904
|
+
fireworks_finetune, "model_id_checking_status", return_value=None
|
|
905
|
+
):
|
|
906
|
+
result = await fireworks_finetune._deploy_server()
|
|
907
|
+
|
|
908
|
+
# Verify result - should fail because model ID is missing
|
|
909
|
+
assert result is False
|
|
910
|
+
|
|
911
|
+
|
|
912
|
+
@pytest.mark.parametrize(
|
|
913
|
+
"state,expected_already_deployed",
|
|
914
|
+
[
|
|
915
|
+
("READY", True),
|
|
916
|
+
("CREATING", True),
|
|
917
|
+
("FAILED", False),
|
|
918
|
+
],
|
|
919
|
+
)
|
|
920
|
+
async def test_check_or_deploy_server_already_deployed(
|
|
921
|
+
fireworks_finetune, mock_api_key, state, expected_already_deployed
|
|
922
|
+
):
|
|
923
|
+
# Test when model is already deployed (should return True without calling _deploy_server)
|
|
924
|
+
|
|
925
|
+
# Set a fine_tune_model_id so we search for deployments
|
|
926
|
+
fireworks_finetune.datamodel.fine_tune_model_id = "model-123"
|
|
927
|
+
|
|
928
|
+
# Mock deployments including one matching our model ID
|
|
929
|
+
mock_deployments = [
|
|
930
|
+
{"id": "deploy-1", "baseModel": "different-model", "state": "READY"},
|
|
931
|
+
{"id": "deploy-2", "baseModel": "model-123", "state": state},
|
|
932
|
+
]
|
|
933
|
+
|
|
934
|
+
with (
|
|
935
|
+
patch.object(
|
|
936
|
+
fireworks_finetune, "_fetch_all_deployments", return_value=mock_deployments
|
|
937
|
+
) as mock_fetch,
|
|
938
|
+
patch.object(fireworks_finetune, "_deploy_server") as mock_deploy,
|
|
939
|
+
):
|
|
940
|
+
mock_deploy.return_value = True
|
|
941
|
+
result = await fireworks_finetune._check_or_deploy_server()
|
|
942
|
+
# Even true if the model is in a non-ready state, as we'll call deploy (checked below)
|
|
943
|
+
assert result is True
|
|
944
|
+
|
|
945
|
+
if expected_already_deployed:
|
|
946
|
+
assert mock_deploy.call_count == 0
|
|
947
|
+
else:
|
|
948
|
+
assert mock_deploy.call_count == 1
|
|
949
|
+
|
|
950
|
+
# Verify _fetch_all_deployments was called
|
|
951
|
+
mock_fetch.assert_called_once()
|
|
952
|
+
|
|
953
|
+
|
|
954
|
+
async def test_check_or_deploy_server_not_deployed(fireworks_finetune, mock_api_key):
|
|
955
|
+
# Test when model exists but isn't deployed (should call _deploy_server)
|
|
956
|
+
|
|
957
|
+
# Set a fine_tune_model_id so we search for deployments
|
|
958
|
+
fireworks_finetune.datamodel.fine_tune_model_id = "model-123"
|
|
959
|
+
|
|
960
|
+
# Mock deployments without our model ID
|
|
961
|
+
mock_deployments = [
|
|
962
|
+
{"id": "deploy-1", "baseModel": "different-model-1", "state": "READY"},
|
|
963
|
+
{"id": "deploy-2", "baseModel": "different-model-2", "state": "READY"},
|
|
964
|
+
]
|
|
965
|
+
|
|
966
|
+
with (
|
|
967
|
+
patch.object(
|
|
968
|
+
fireworks_finetune, "_fetch_all_deployments", return_value=mock_deployments
|
|
969
|
+
) as mock_fetch,
|
|
970
|
+
patch.object(
|
|
971
|
+
fireworks_finetune, "_deploy_server", return_value=True
|
|
972
|
+
) as mock_deploy,
|
|
973
|
+
):
|
|
974
|
+
result = await fireworks_finetune._check_or_deploy_server()
|
|
975
|
+
|
|
976
|
+
# Verify method returned True (from _deploy_server)
|
|
977
|
+
assert result is True
|
|
978
|
+
|
|
979
|
+
# Verify _fetch_all_deployments was called
|
|
980
|
+
mock_fetch.assert_called_once()
|
|
981
|
+
|
|
982
|
+
# Verify _deploy_server was called since model is not deployed
|
|
983
|
+
mock_deploy.assert_called_once()
|
|
984
|
+
|
|
985
|
+
|
|
986
|
+
async def test_check_or_deploy_server_no_model_id(fireworks_finetune, mock_api_key):
|
|
987
|
+
# Test when no fine_tune_model_id exists (should skip fetch and call _deploy_server directly)
|
|
988
|
+
|
|
989
|
+
# Ensure no fine_tune_model_id is set
|
|
990
|
+
fireworks_finetune.datamodel.fine_tune_model_id = None
|
|
991
|
+
|
|
992
|
+
with (
|
|
993
|
+
patch.object(fireworks_finetune, "_fetch_all_deployments") as mock_fetch,
|
|
994
|
+
patch.object(
|
|
995
|
+
fireworks_finetune, "_deploy_server", return_value=True
|
|
996
|
+
) as mock_deploy,
|
|
997
|
+
):
|
|
998
|
+
result = await fireworks_finetune._check_or_deploy_server()
|
|
999
|
+
|
|
1000
|
+
# Verify method returned True (from _deploy_server)
|
|
1001
|
+
assert result is True
|
|
1002
|
+
|
|
1003
|
+
# Verify _fetch_all_deployments was NOT called
|
|
1004
|
+
mock_fetch.assert_not_called()
|
|
1005
|
+
|
|
1006
|
+
# Verify _deploy_server was called directly
|
|
1007
|
+
mock_deploy.assert_called_once()
|
|
1008
|
+
|
|
1009
|
+
|
|
1010
|
+
async def test_check_or_deploy_server_deploy_fails(fireworks_finetune, mock_api_key):
|
|
1011
|
+
# Test when deployment fails
|
|
1012
|
+
|
|
1013
|
+
# Ensure no fine_tune_model_id is set
|
|
1014
|
+
fireworks_finetune.datamodel.fine_tune_model_id = None
|
|
1015
|
+
|
|
1016
|
+
with (
|
|
1017
|
+
patch.object(
|
|
1018
|
+
fireworks_finetune, "_deploy_server", return_value=False
|
|
1019
|
+
) as mock_deploy,
|
|
1020
|
+
):
|
|
1021
|
+
result = await fireworks_finetune._check_or_deploy_server()
|
|
1022
|
+
|
|
1023
|
+
# Verify method returned False (from _deploy_server)
|
|
1024
|
+
assert result is False
|
|
1025
|
+
|
|
1026
|
+
# Verify _deploy_server was called
|
|
1027
|
+
mock_deploy.assert_called_once()
|
|
1028
|
+
|
|
1029
|
+
|
|
1030
|
+
async def test_fetch_all_deployments_invalid_json(fireworks_finetune, mock_api_key):
|
|
1031
|
+
# Test with invalid JSON response (missing 'deployments' key)
|
|
1032
|
+
invalid_response = MagicMock(spec=httpx.Response)
|
|
1033
|
+
invalid_response.status_code = 200
|
|
1034
|
+
invalid_response.json.return_value = {
|
|
1035
|
+
"some_other_key": "value",
|
|
1036
|
+
# No 'deployments' key
|
|
1037
|
+
}
|
|
1038
|
+
invalid_response.text = '{"some_other_key": "value"}'
|
|
1039
|
+
|
|
1040
|
+
with patch("httpx.AsyncClient") as mock_client_class:
|
|
1041
|
+
mock_client = AsyncMock()
|
|
1042
|
+
mock_client.get.return_value = invalid_response
|
|
1043
|
+
mock_client_class.return_value.__aenter__.return_value = mock_client
|
|
1044
|
+
|
|
1045
|
+
with pytest.raises(
|
|
1046
|
+
ValueError,
|
|
1047
|
+
match="Invalid response from Fireworks. Expected list of deployments in 'deployments' key",
|
|
1048
|
+
):
|
|
1049
|
+
await fireworks_finetune._fetch_all_deployments()
|
|
1050
|
+
|
|
1051
|
+
# Verify API was called
|
|
1052
|
+
mock_client.get.assert_called_once()
|
|
@@ -356,6 +356,8 @@ async def test_start_success(
|
|
|
356
356
|
model=together_finetune.datamodel.base_model_id,
|
|
357
357
|
lora=True,
|
|
358
358
|
suffix=f"kiln_ai_{together_finetune.datamodel.id}"[:40],
|
|
359
|
+
wandb_api_key=Config.shared().wandb_api_key,
|
|
360
|
+
wandb_project_name="Kiln_AI",
|
|
359
361
|
)
|
|
360
362
|
|
|
361
363
|
# Check that datamodel was updated correctly
|
|
@@ -130,6 +130,8 @@ class TogetherFinetune(BaseFinetuneAdapter):
|
|
|
130
130
|
training_file=train_file_id,
|
|
131
131
|
validation_file=validation_file_id,
|
|
132
132
|
model=self.datamodel.base_model_id,
|
|
133
|
+
wandb_api_key=Config.shared().wandb_api_key,
|
|
134
|
+
wandb_project_name="Kiln_AI" if Config.shared().wandb_api_key else None,
|
|
133
135
|
**self._build_finetune_parameters(),
|
|
134
136
|
)
|
|
135
137
|
|
|
@@ -133,7 +133,7 @@ class KilnModelProvider(BaseModel):
|
|
|
133
133
|
supports_structured_output: Whether the provider supports structured output formats
|
|
134
134
|
supports_data_gen: Whether the provider supports data generation
|
|
135
135
|
untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable.
|
|
136
|
-
provider_finetune_id: The finetune ID for the provider, if applicable
|
|
136
|
+
provider_finetune_id: The finetune ID for the provider, if applicable. Some providers like Fireworks load these from an API.
|
|
137
137
|
structured_output_mode: The mode we should use to call the model for structured output, if it was trained with structured output.
|
|
138
138
|
parser: A parser to use for the model, if applicable
|
|
139
139
|
reasoning_capable: Whether the model is designed to output thinking in a structured format (eg <think></think>). If so we don't use COT across 2 calls, and ask for thinking and final response in the same call.
|
|
@@ -576,7 +576,6 @@ built_in_models: List[KilnModel] = [
|
|
|
576
576
|
# JSON mode not ideal (no schema), but tool calling doesn't work on 8b
|
|
577
577
|
structured_output_mode=StructuredOutputMode.json_instruction_and_object,
|
|
578
578
|
supports_data_gen=False,
|
|
579
|
-
provider_finetune_id="accounts/fireworks/models/llama-v3p1-8b-instruct",
|
|
580
579
|
model_id="accounts/fireworks/models/llama-v3p1-8b-instruct",
|
|
581
580
|
),
|
|
582
581
|
KilnModelProvider(
|
|
@@ -618,7 +617,6 @@ built_in_models: List[KilnModel] = [
|
|
|
618
617
|
name=ModelProviderName.fireworks_ai,
|
|
619
618
|
# Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
|
|
620
619
|
structured_output_mode=StructuredOutputMode.function_calling_weak,
|
|
621
|
-
provider_finetune_id="accounts/fireworks/models/llama-v3p1-70b-instruct",
|
|
622
620
|
model_id="accounts/fireworks/models/llama-v3p1-70b-instruct",
|
|
623
621
|
),
|
|
624
622
|
KilnModelProvider(
|
|
@@ -764,7 +762,6 @@ built_in_models: List[KilnModel] = [
|
|
|
764
762
|
),
|
|
765
763
|
KilnModelProvider(
|
|
766
764
|
name=ModelProviderName.fireworks_ai,
|
|
767
|
-
provider_finetune_id="accounts/fireworks/models/llama-v3p2-3b-instruct",
|
|
768
765
|
supports_structured_output=False,
|
|
769
766
|
supports_data_gen=False,
|
|
770
767
|
model_id="accounts/fireworks/models/llama-v3p2-3b-instruct",
|
|
@@ -890,8 +887,6 @@ built_in_models: List[KilnModel] = [
|
|
|
890
887
|
),
|
|
891
888
|
KilnModelProvider(
|
|
892
889
|
name=ModelProviderName.fireworks_ai,
|
|
893
|
-
# Finetuning not live yet
|
|
894
|
-
# provider_finetune_id="accounts/fireworks/models/llama-v3p3-70b-instruct",
|
|
895
890
|
# Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
|
|
896
891
|
structured_output_mode=StructuredOutputMode.function_calling_weak,
|
|
897
892
|
model_id="accounts/fireworks/models/llama-v3p3-70b-instruct",
|
|
@@ -197,8 +197,8 @@ def lite_llm_config(
|
|
|
197
197
|
if provider is None:
|
|
198
198
|
raise ValueError(f"OpenAI compatible provider {openai_provider_name} not found")
|
|
199
199
|
|
|
200
|
-
# API key optional some providers don't use it
|
|
201
|
-
api_key = provider.get("api_key")
|
|
200
|
+
# API key optional - some providers like Ollama don't use it, but LiteLLM errors without one
|
|
201
|
+
api_key = provider.get("api_key") or "NA"
|
|
202
202
|
base_url = provider.get("base_url")
|
|
203
203
|
if base_url is None:
|
|
204
204
|
raise ValueError(
|
|
@@ -550,14 +550,14 @@ def test_litellm_provider_model_success(mock_shared_config):
|
|
|
550
550
|
|
|
551
551
|
|
|
552
552
|
def test_lite_llm_config_no_api_key(mock_shared_config):
|
|
553
|
-
"""Test provider creation without API key (should work as some providers don't require it)"""
|
|
553
|
+
"""Test provider creation without API key (should work as some providers don't require it, but should pass NA to LiteLLM as it requires one)"""
|
|
554
554
|
model_id = "no_key_provider::gpt-4"
|
|
555
555
|
|
|
556
556
|
config = lite_llm_config(model_id)
|
|
557
557
|
|
|
558
558
|
assert config.provider_name == ModelProviderName.openai_compatible
|
|
559
559
|
assert config.model_name == "gpt-4"
|
|
560
|
-
assert config.additional_body_options == {"api_key":
|
|
560
|
+
assert config.additional_body_options == {"api_key": "NA"}
|
|
561
561
|
assert config.base_url == "https://api.nokey.com"
|
|
562
562
|
|
|
563
563
|
|
kiln_ai/utils/config.py
CHANGED
|
@@ -119,6 +119,15 @@ class Config:
|
|
|
119
119
|
env_var="TOGETHERAI_API_KEY",
|
|
120
120
|
sensitive=True,
|
|
121
121
|
),
|
|
122
|
+
"wandb_api_key": ConfigProperty(
|
|
123
|
+
str,
|
|
124
|
+
env_var="WANDB_API_KEY",
|
|
125
|
+
sensitive=True,
|
|
126
|
+
),
|
|
127
|
+
"wandb_base_url": ConfigProperty(
|
|
128
|
+
str,
|
|
129
|
+
env_var="WANDB_BASE_URL",
|
|
130
|
+
),
|
|
122
131
|
"custom_models": ConfigProperty(
|
|
123
132
|
list,
|
|
124
133
|
default_lambda=lambda: [],
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: kiln-ai
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.14.0
|
|
4
4
|
Summary: Kiln AI
|
|
5
5
|
Project-URL: Homepage, https://getkiln.ai
|
|
6
6
|
Project-URL: Repository, https://github.com/Kiln-AI/kiln
|
|
@@ -26,7 +26,7 @@ Requires-Dist: pydantic>=2.9.2
|
|
|
26
26
|
Requires-Dist: pytest-benchmark>=5.1.0
|
|
27
27
|
Requires-Dist: pytest-cov>=6.0.0
|
|
28
28
|
Requires-Dist: pyyaml>=6.0.2
|
|
29
|
-
Requires-Dist: together
|
|
29
|
+
Requires-Dist: together
|
|
30
30
|
Requires-Dist: typing-extensions>=4.12.2
|
|
31
31
|
Description-Content-Type: text/markdown
|
|
32
32
|
|
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
kiln_ai/__init__.py,sha256=Sc4z8LRVFMwJUoc_DPVUriSXTZ6PO9MaJ80PhRbKyB8,34
|
|
2
2
|
kiln_ai/adapters/__init__.py,sha256=XjGmWagEyOEVwVIAxjN5rYNsQWIEACT5DB7MMTxdPss,1005
|
|
3
|
-
kiln_ai/adapters/adapter_registry.py,sha256=
|
|
4
|
-
kiln_ai/adapters/ml_model_list.py,sha256=
|
|
3
|
+
kiln_ai/adapters/adapter_registry.py,sha256=KmMHYQ3mxpjVLE6D-hMNWCGt6Cw9JvnFn6nMb48GE8Y,9166
|
|
4
|
+
kiln_ai/adapters/ml_model_list.py,sha256=f_z1daFR_w4-ccJ4OWwqlIMY0ILFJt4X5LdQb3AMt_c,58592
|
|
5
5
|
kiln_ai/adapters/ollama_tools.py,sha256=uObtLWfqKb9RXHN-TGGw2Y1FQlEMe0u8FgszI0zQn6U,3550
|
|
6
6
|
kiln_ai/adapters/prompt_builders.py,sha256=LYHTIaisQMBFtWDRIGo1QJgOsmQ-NBpQ8fI4eImHxaQ,15269
|
|
7
|
-
kiln_ai/adapters/provider_tools.py,sha256=
|
|
7
|
+
kiln_ai/adapters/provider_tools.py,sha256=UL3XEnnxs1TrbqPPxxHSvnL7aBd84ggh38lI0yEsX6A,14725
|
|
8
8
|
kiln_ai/adapters/run_output.py,sha256=RAi2Qp6dmqJVNm3CxbNTdAuhitHfH5NiUGbf6ygUP-k,257
|
|
9
9
|
kiln_ai/adapters/test_adapter_registry.py,sha256=eDLHqv9mwgdde221pa47bTV87vCXwkUyjqsas-iFUrY,6123
|
|
10
10
|
kiln_ai/adapters/test_generate_docs.py,sha256=M-uKcgF3hQmlEFOJ0o7DyL-9RgitGzkfROV-Dxtooec,2770
|
|
11
11
|
kiln_ai/adapters/test_ollama_tools.py,sha256=xAUzL0IVmmXadVehJu1WjqbhpKEYGAgGt3pWx7hrubc,2514
|
|
12
12
|
kiln_ai/adapters/test_prompt_adaptors.py,sha256=J1ZGZ8GG7SxP3_J3Zw0e6XmZY4NyPmUGX3IPgjh2LD8,7767
|
|
13
13
|
kiln_ai/adapters/test_prompt_builders.py,sha256=5Xvfr-oQg_LLrle6UqfpRHWcPUYa8ywG3aL1rM7q1Jw,22054
|
|
14
|
-
kiln_ai/adapters/test_provider_tools.py,sha256=
|
|
14
|
+
kiln_ai/adapters/test_provider_tools.py,sha256=mzMubpUupQu8pXhjDTj0_Kgrr-xcu_crj9xpcgcAzzA,26671
|
|
15
15
|
kiln_ai/adapters/data_gen/__init__.py,sha256=QTZWaf7kq5BorhPvexJfwDEKmjRmIbhwW9ei8LW2SIs,276
|
|
16
16
|
kiln_ai/adapters/data_gen/data_gen_prompts.py,sha256=kudjHnAz7L3q0k_NLyTlaIV7M0uRFrxXNcfcnjOE2uc,5810
|
|
17
17
|
kiln_ai/adapters/data_gen/data_gen_task.py,sha256=0PuYCcj09BtpgNj23mKj_L45mKZBdV5VreUeZ-Tj_xM,6642
|
|
@@ -19,24 +19,24 @@ kiln_ai/adapters/data_gen/test_data_gen_task.py,sha256=cRKUKMvC0uVompbmPTKwbnQ_N
|
|
|
19
19
|
kiln_ai/adapters/eval/__init__.py,sha256=0ptbK0ZxWuraxGn_WMgmE1tcaq0k5t-g-52kVohvWCg,693
|
|
20
20
|
kiln_ai/adapters/eval/base_eval.py,sha256=jVXMiVBC07ZnLEuZVAjUAYewsnuV99put39n_GZcG1M,7261
|
|
21
21
|
kiln_ai/adapters/eval/eval_runner.py,sha256=h3DvRFM5J5LDJqaLzNJ-q9i5LRycv2J9Ev5nw1mUDUQ,10806
|
|
22
|
-
kiln_ai/adapters/eval/g_eval.py,sha256=
|
|
22
|
+
kiln_ai/adapters/eval/g_eval.py,sha256=d3UcBsZWeDt7cWp4uvDcfG7qdGLsGaZEBsIEqkpiWh4,15253
|
|
23
23
|
kiln_ai/adapters/eval/registry.py,sha256=gZ_s0VgEx79Fswkgi1tS4yOl7lzpkvUBJZ62RldhM_w,626
|
|
24
24
|
kiln_ai/adapters/eval/test_base_eval.py,sha256=_1CiOUOiBt1R_gGYMcRblrPkHf-H4uIlvfcHj5-Wh7o,10724
|
|
25
25
|
kiln_ai/adapters/eval/test_eval_runner.py,sha256=82WPE_frNRTSQ2lylqT0inkqcDgM72nWt8GEuoDkJ7w,18568
|
|
26
|
-
kiln_ai/adapters/eval/test_g_eval.py,sha256
|
|
26
|
+
kiln_ai/adapters/eval/test_g_eval.py,sha256=-Stx7E0D-WAH1HWrRSp48CiGsf-no1SHeFF9IqVXeMI,16433
|
|
27
27
|
kiln_ai/adapters/eval/test_g_eval_data.py,sha256=8caiZfLWnXVX8alrBPrH7L7gqqSS9vO7u6PzcHurQcA,27769
|
|
28
28
|
kiln_ai/adapters/fine_tune/__init__.py,sha256=DxdTR60chwgck1aEoVYWyfWi6Ed2ZkdJj0lar-SEAj4,257
|
|
29
|
-
kiln_ai/adapters/fine_tune/base_finetune.py,sha256=
|
|
29
|
+
kiln_ai/adapters/fine_tune/base_finetune.py,sha256=ORTclQTQYksMWPu7vNoD7wBzOIqNVK0YOwFEnvsKPWA,5759
|
|
30
30
|
kiln_ai/adapters/fine_tune/dataset_formatter.py,sha256=qRhSSkMhTWn13OMb6LKPVwAU7uY4bB49GDiVSuhDkNg,14449
|
|
31
31
|
kiln_ai/adapters/fine_tune/finetune_registry.py,sha256=CvcEVxtKwjgCMA-oYH9Tpjn1DVWmMzgHpXJOZ0YQA8k,610
|
|
32
|
-
kiln_ai/adapters/fine_tune/fireworks_finetune.py,sha256=
|
|
32
|
+
kiln_ai/adapters/fine_tune/fireworks_finetune.py,sha256=OlXp8j6Afwvk6-ySwA3Q7iuqBlKO7VLeAfNCnB3pZPI,19963
|
|
33
33
|
kiln_ai/adapters/fine_tune/openai_finetune.py,sha256=Dz9E_0BWfrIkvv8ArZe-RKPwbIKPZ3v8rfbc3JELyTY,8571
|
|
34
|
-
kiln_ai/adapters/fine_tune/test_base_finetune.py,sha256=
|
|
34
|
+
kiln_ai/adapters/fine_tune/test_base_finetune.py,sha256=sjuDgJDA_dynGRelx9_wXdssaxAYIuEG-Z8NzRx9Hl0,10559
|
|
35
35
|
kiln_ai/adapters/fine_tune/test_dataset_formatter.py,sha256=T3jbFZooLVBaGCE0LUVxwPxzM3l8IY41zUj3jPk-Zi8,24027
|
|
36
|
-
kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py,sha256=
|
|
36
|
+
kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py,sha256=oLyLEG4TwW452lV2mvUo-wImLxzSwOuoKKeYFuGh3k8,36744
|
|
37
37
|
kiln_ai/adapters/fine_tune/test_openai_finetune.py,sha256=H63Xk2PNHbt5Ev5IQpdR9JZ4uz-Huo2gfuC4mHHqe0w,20011
|
|
38
|
-
kiln_ai/adapters/fine_tune/test_together_finetune.py,sha256=
|
|
39
|
-
kiln_ai/adapters/fine_tune/together_finetune.py,sha256=
|
|
38
|
+
kiln_ai/adapters/fine_tune/test_together_finetune.py,sha256=BUJFsyq_g77gU0JN3hg6FMBvqb0DIyTeAek-wxomKIg,18090
|
|
39
|
+
kiln_ai/adapters/fine_tune/together_finetune.py,sha256=EbMPsTyKMubfwOalkFLiNFlMFIRKxLibzMTyLeUkle4,14010
|
|
40
40
|
kiln_ai/adapters/model_adapters/__init__.py,sha256=m5GRtOHwVVvp_XDOss8c1X3NFf1wQQlC2eBgI4tXQhM,212
|
|
41
41
|
kiln_ai/adapters/model_adapters/base_adapter.py,sha256=ifPJMg0nEKamfOSmBIsnp_MRFfBs47FLeQrLbav34yA,9872
|
|
42
42
|
kiln_ai/adapters/model_adapters/litellm_adapter.py,sha256=c4J_tIpM96KWS2qzoPaQmBj7X7mHyRMShdkmEh7_EHM,16129
|
|
@@ -89,7 +89,7 @@ kiln_ai/datamodel/test_prompt_id.py,sha256=ihyXVPQi0dSLGnBM7rTXRnVaiWXhh7HJmSy4n
|
|
|
89
89
|
kiln_ai/datamodel/test_registry.py,sha256=PhS4anLi5Bf_023obuTlO5DALhtPB8WIc_bX12Yg6Po,2705
|
|
90
90
|
kiln_ai/datamodel/test_task.py,sha256=FYyoEqJXQIy8rcBsLTdki4-1z9COnZQk1-aoS3ZoNuU,5307
|
|
91
91
|
kiln_ai/utils/__init__.py,sha256=PTD0MwBCKAMIOGsTAwsFaJOusTJJoRFTfOGqRvCaU-E,142
|
|
92
|
-
kiln_ai/utils/config.py,sha256=
|
|
92
|
+
kiln_ai/utils/config.py,sha256=kAgb_4nSnb-IWbLVS4FBygYmnHypJADjDEk_Fh-eLeg,8479
|
|
93
93
|
kiln_ai/utils/dataset_import.py,sha256=HvTCdK9OO8WE3Runn8_Vsks5KpCTckGIzAA7JKe-cWI,6956
|
|
94
94
|
kiln_ai/utils/exhaustive_error.py,sha256=TkkRixIAR3CPEKHeAJzyv0mtxp6BxUBKMvobA3vzQug,262
|
|
95
95
|
kiln_ai/utils/formatting.py,sha256=VtB9oag0lOGv17dwT7OPX_3HzBfaU9GsLH-iLete0yM,97
|
|
@@ -97,7 +97,7 @@ kiln_ai/utils/name_generator.py,sha256=v26TgpCwQbhQFcZvzgjZvURinjrOyyFhxpsI6NQrH
|
|
|
97
97
|
kiln_ai/utils/test_config.py,sha256=Jw3nMFeIgZUsZDRJJY2HpB-2EkR2NoZ-rDe_o9oA7ws,9174
|
|
98
98
|
kiln_ai/utils/test_dataset_import.py,sha256=ZZOt7zqtaEIlMMx0VNXyRegDvnVqbWY2bcz-iMY_Oag,17427
|
|
99
99
|
kiln_ai/utils/test_name_geneator.py,sha256=9-hSTBshyakqlPbFnNcggwLrL7lcPTitauBYHg9jFWI,1513
|
|
100
|
-
kiln_ai-0.
|
|
101
|
-
kiln_ai-0.
|
|
102
|
-
kiln_ai-0.
|
|
103
|
-
kiln_ai-0.
|
|
100
|
+
kiln_ai-0.14.0.dist-info/METADATA,sha256=EjgZOnknE7P9uW5BsIFJZYQAN-aUQ817SAEXjtqtjK0,12231
|
|
101
|
+
kiln_ai-0.14.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
102
|
+
kiln_ai-0.14.0.dist-info/licenses/LICENSE.txt,sha256=_NA5pnTYgRRr4qH6lE3X-TuZJ8iRcMUi5ASoGr-lEx8,1209
|
|
103
|
+
kiln_ai-0.14.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|