kiln-ai 0.11.1__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kiln-ai might be problematic. Click here for more details.

Files changed (80) hide show
  1. kiln_ai/adapters/__init__.py +4 -0
  2. kiln_ai/adapters/adapter_registry.py +163 -39
  3. kiln_ai/adapters/data_gen/data_gen_task.py +18 -0
  4. kiln_ai/adapters/eval/__init__.py +28 -0
  5. kiln_ai/adapters/eval/base_eval.py +164 -0
  6. kiln_ai/adapters/eval/eval_runner.py +270 -0
  7. kiln_ai/adapters/eval/g_eval.py +368 -0
  8. kiln_ai/adapters/eval/registry.py +16 -0
  9. kiln_ai/adapters/eval/test_base_eval.py +325 -0
  10. kiln_ai/adapters/eval/test_eval_runner.py +641 -0
  11. kiln_ai/adapters/eval/test_g_eval.py +498 -0
  12. kiln_ai/adapters/eval/test_g_eval_data.py +4 -0
  13. kiln_ai/adapters/fine_tune/base_finetune.py +16 -2
  14. kiln_ai/adapters/fine_tune/finetune_registry.py +2 -0
  15. kiln_ai/adapters/fine_tune/test_dataset_formatter.py +4 -1
  16. kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py +1 -1
  17. kiln_ai/adapters/fine_tune/test_openai_finetune.py +1 -1
  18. kiln_ai/adapters/fine_tune/test_together_finetune.py +531 -0
  19. kiln_ai/adapters/fine_tune/together_finetune.py +325 -0
  20. kiln_ai/adapters/ml_model_list.py +758 -163
  21. kiln_ai/adapters/model_adapters/__init__.py +2 -4
  22. kiln_ai/adapters/model_adapters/base_adapter.py +61 -43
  23. kiln_ai/adapters/model_adapters/litellm_adapter.py +391 -0
  24. kiln_ai/adapters/model_adapters/litellm_config.py +13 -0
  25. kiln_ai/adapters/model_adapters/test_base_adapter.py +22 -13
  26. kiln_ai/adapters/model_adapters/test_litellm_adapter.py +407 -0
  27. kiln_ai/adapters/model_adapters/test_saving_adapter_results.py +41 -19
  28. kiln_ai/adapters/model_adapters/test_structured_output.py +59 -35
  29. kiln_ai/adapters/ollama_tools.py +3 -3
  30. kiln_ai/adapters/parsers/r1_parser.py +19 -14
  31. kiln_ai/adapters/parsers/test_r1_parser.py +17 -5
  32. kiln_ai/adapters/prompt_builders.py +80 -42
  33. kiln_ai/adapters/provider_tools.py +50 -58
  34. kiln_ai/adapters/repair/repair_task.py +9 -21
  35. kiln_ai/adapters/repair/test_repair_task.py +6 -6
  36. kiln_ai/adapters/run_output.py +3 -0
  37. kiln_ai/adapters/test_adapter_registry.py +26 -29
  38. kiln_ai/adapters/test_generate_docs.py +4 -4
  39. kiln_ai/adapters/test_ollama_tools.py +0 -1
  40. kiln_ai/adapters/test_prompt_adaptors.py +47 -33
  41. kiln_ai/adapters/test_prompt_builders.py +91 -31
  42. kiln_ai/adapters/test_provider_tools.py +26 -81
  43. kiln_ai/datamodel/__init__.py +50 -952
  44. kiln_ai/datamodel/basemodel.py +2 -0
  45. kiln_ai/datamodel/datamodel_enums.py +60 -0
  46. kiln_ai/datamodel/dataset_filters.py +114 -0
  47. kiln_ai/datamodel/dataset_split.py +170 -0
  48. kiln_ai/datamodel/eval.py +298 -0
  49. kiln_ai/datamodel/finetune.py +105 -0
  50. kiln_ai/datamodel/json_schema.py +7 -1
  51. kiln_ai/datamodel/project.py +23 -0
  52. kiln_ai/datamodel/prompt.py +37 -0
  53. kiln_ai/datamodel/prompt_id.py +83 -0
  54. kiln_ai/datamodel/strict_mode.py +24 -0
  55. kiln_ai/datamodel/task.py +181 -0
  56. kiln_ai/datamodel/task_output.py +328 -0
  57. kiln_ai/datamodel/task_run.py +164 -0
  58. kiln_ai/datamodel/test_basemodel.py +19 -11
  59. kiln_ai/datamodel/test_dataset_filters.py +71 -0
  60. kiln_ai/datamodel/test_dataset_split.py +32 -8
  61. kiln_ai/datamodel/test_datasource.py +22 -2
  62. kiln_ai/datamodel/test_eval_model.py +635 -0
  63. kiln_ai/datamodel/test_example_models.py +9 -13
  64. kiln_ai/datamodel/test_json_schema.py +23 -0
  65. kiln_ai/datamodel/test_models.py +2 -2
  66. kiln_ai/datamodel/test_prompt_id.py +129 -0
  67. kiln_ai/datamodel/test_task.py +159 -0
  68. kiln_ai/utils/config.py +43 -1
  69. kiln_ai/utils/dataset_import.py +232 -0
  70. kiln_ai/utils/test_dataset_import.py +596 -0
  71. {kiln_ai-0.11.1.dist-info → kiln_ai-0.13.0.dist-info}/METADATA +86 -6
  72. kiln_ai-0.13.0.dist-info/RECORD +103 -0
  73. kiln_ai/adapters/model_adapters/langchain_adapters.py +0 -302
  74. kiln_ai/adapters/model_adapters/openai_compatible_config.py +0 -11
  75. kiln_ai/adapters/model_adapters/openai_model_adapter.py +0 -246
  76. kiln_ai/adapters/model_adapters/test_langchain_adapter.py +0 -350
  77. kiln_ai/adapters/model_adapters/test_openai_model_adapter.py +0 -225
  78. kiln_ai-0.11.1.dist-info/RECORD +0 -76
  79. {kiln_ai-0.11.1.dist-info → kiln_ai-0.13.0.dist-info}/WHEEL +0 -0
  80. {kiln_ai-0.11.1.dist-info → kiln_ai-0.13.0.dist-info}/licenses/LICENSE.txt +0 -0
@@ -11,7 +11,6 @@ from kiln_ai.datamodel import (
11
11
  Finetune,
12
12
  Project,
13
13
  Task,
14
- TaskDeterminism,
15
14
  TaskOutput,
16
15
  TaskOutputRating,
17
16
  TaskOutputRatingType,
@@ -125,7 +124,6 @@ def test_structured_output_workflow(tmp_path):
125
124
  name="Structured Output Task",
126
125
  parent=project,
127
126
  instruction="Generate a JSON object with name and age",
128
- determinism=TaskDeterminism.semantic_match,
129
127
  output_json_schema=json.dumps(
130
128
  {
131
129
  "type": "object",
@@ -142,7 +140,7 @@ def test_structured_output_workflow(tmp_path):
142
140
 
143
141
  # Create runs
144
142
  runs = []
145
- for source in DataSourceType:
143
+ for source in [DataSourceType.human, DataSourceType.synthetic]:
146
144
  for _ in range(2):
147
145
  task_run = TaskRun(
148
146
  input="Generate info for John Doe",
@@ -157,7 +155,7 @@ def test_structured_output_workflow(tmp_path):
157
155
  "adapter_name": "TestAdapter",
158
156
  "model_name": "GPT-4",
159
157
  "model_provider": "OpenAI",
160
- "prompt_builder_name": "TestPromptBuilder",
158
+ "prompt_id": "simple_prompt_builder",
161
159
  },
162
160
  ),
163
161
  parent=task,
@@ -216,9 +214,9 @@ def test_structured_output_workflow(tmp_path):
216
214
 
217
215
  assert loaded_task.name == "Structured Output Task"
218
216
  assert len(loaded_task.requirements) == 2
219
- assert len(loaded_task.runs()) == 5
220
-
221
217
  loaded_runs = loaded_task.runs()
218
+ assert len(loaded_runs) == 5
219
+
222
220
  for task_run in loaded_runs:
223
221
  output = task_run.output
224
222
  assert output.rating is not None
@@ -472,7 +470,7 @@ def test_valid_synthetic_task_output():
472
470
  "adapter_name": "TestAdapter",
473
471
  "model_name": "GPT-4",
474
472
  "model_provider": "OpenAI",
475
- "prompt_builder_name": "TestPromptBuilder",
473
+ "prompt_id": "simple_prompt_builder",
476
474
  },
477
475
  ),
478
476
  )
@@ -480,7 +478,7 @@ def test_valid_synthetic_task_output():
480
478
  assert output.source.properties["adapter_name"] == "TestAdapter"
481
479
  assert output.source.properties["model_name"] == "GPT-4"
482
480
  assert output.source.properties["model_provider"] == "OpenAI"
483
- assert output.source.properties["prompt_builder_name"] == "TestPromptBuilder"
481
+ assert output.source.properties["prompt_id"] == "simple_prompt_builder"
484
482
 
485
483
 
486
484
  def test_invalid_synthetic_task_output_missing_keys():
@@ -509,23 +507,21 @@ def test_invalid_synthetic_task_output_empty_values():
509
507
  "adapter_name": "TestAdapter",
510
508
  "model_name": "",
511
509
  "model_provider": "OpenAI",
512
- "prompt_builder_name": "TestPromptBuilder",
510
+ "prompt_id": "simple_prompt_builder",
513
511
  },
514
512
  ),
515
513
  )
516
514
 
517
515
 
518
516
  def test_invalid_synthetic_task_output_non_string_values():
519
- with pytest.raises(
520
- ValidationError, match="'prompt_builder_name' must be of type str"
521
- ):
517
+ with pytest.raises(ValidationError, match="'prompt_id' must be of type str"):
522
518
  DataSource(
523
519
  type=DataSourceType.synthetic,
524
520
  properties={
525
521
  "adapter_name": "TestAdapter",
526
522
  "model_name": "GPT-4",
527
523
  "model_provider": "OpenAI",
528
- "prompt_builder_name": 123,
524
+ "prompt_id": 123,
529
525
  },
530
526
  )
531
527
 
@@ -4,6 +4,7 @@ from pydantic import BaseModel
4
4
  from kiln_ai.datamodel.json_schema import (
5
5
  JsonObjectSchema,
6
6
  schema_from_json_str,
7
+ string_to_json_key,
7
8
  validate_schema,
8
9
  )
9
10
 
@@ -123,3 +124,25 @@ def test_triangle_schema():
123
124
  validate_schema({"a": 1, "b": 2, "c": 3}, json_triangle_schema)
124
125
  with pytest.raises(Exception):
125
126
  validate_schema({"a": 1, "b": 2, "c": "3"}, json_triangle_schema)
127
+
128
+
129
+ @pytest.mark.parametrize(
130
+ "input_str,expected",
131
+ [
132
+ ("hello world", "hello_world"),
133
+ ("Hello World", "hello_world"),
134
+ ("hello_world", "hello_world"),
135
+ ("HELLO WORLD", "hello_world"),
136
+ ("hello123", "hello123"),
137
+ ("hello-world", "helloworld"),
138
+ ("hello!@#$%^&*()world", "helloworld"),
139
+ (" hello world ", "hello__world"),
140
+ ("hello__world", "hello__world"),
141
+ ("", ""),
142
+ ("!@#$%", ""),
143
+ ("snake_case_string", "snake_case_string"),
144
+ ("camelCaseString", "camelcasestring"),
145
+ ],
146
+ )
147
+ def test_string_to_json_key(input_str: str, expected: str):
148
+ assert string_to_json_key(input_str) == expected
@@ -385,7 +385,7 @@ def test_task_run_input_source_validation(tmp_path):
385
385
  assert task_run.input_source is not None
386
386
 
387
387
  # Test 3: Creating without input_source should fail when strict mode is on
388
- with patch("kiln_ai.datamodel.strict_mode", return_value=True):
388
+ with patch("kiln_ai.datamodel.task_run.strict_mode", return_value=True):
389
389
  with pytest.raises(ValueError) as exc_info:
390
390
  task_run = TaskRun(
391
391
  input="test input 3",
@@ -442,7 +442,7 @@ def test_task_output_source_validation(tmp_path):
442
442
  assert task_output.source is not None
443
443
 
444
444
  # Test 3: Creating without source should fail when strict mode is on
445
- with patch("kiln_ai.datamodel.strict_mode", return_value=True):
445
+ with patch("kiln_ai.datamodel.task_output.strict_mode", return_value=True):
446
446
  with pytest.raises(ValueError) as exc_info:
447
447
  task_output = TaskOutput(
448
448
  output="test output 3",
@@ -0,0 +1,129 @@
1
+ import pytest
2
+ from pydantic import BaseModel, ValidationError
3
+
4
+ from kiln_ai.datamodel import (
5
+ PromptGenerators,
6
+ PromptId,
7
+ )
8
+ from kiln_ai.datamodel.prompt_id import is_frozen_prompt
9
+
10
+
11
+ # Test model to validate the PromptId type
12
+ class ModelTester(BaseModel):
13
+ prompt_id: PromptId
14
+
15
+
16
+ def test_valid_prompt_generator_names():
17
+ """Test that valid prompt generator names are accepted"""
18
+ for generator in PromptGenerators:
19
+ model = ModelTester(prompt_id=generator.value)
20
+ assert model.prompt_id == generator.value
21
+
22
+
23
+ def test_valid_saved_prompt_id():
24
+ """Test that valid saved prompt IDs are accepted"""
25
+ valid_id = "id::prompt_789"
26
+ model = ModelTester(prompt_id=valid_id)
27
+ assert model.prompt_id == valid_id
28
+
29
+
30
+ def test_valid_fine_tune_prompt_id():
31
+ """Test that valid fine-tune prompt IDs are accepted"""
32
+ valid_id = "fine_tune_prompt::ft_123456"
33
+ model = ModelTester(prompt_id=valid_id)
34
+ assert model.prompt_id == valid_id
35
+
36
+
37
+ @pytest.mark.parametrize(
38
+ "invalid_id",
39
+ [
40
+ pytest.param("id::project_123::task_456", id="missing_prompt_id"),
41
+ pytest.param("id::task_456::prompt_789", id="too_many_parts"),
42
+ pytest.param("id::", id="empty_parts"),
43
+ ],
44
+ )
45
+ def test_invalid_saved_prompt_id_format(invalid_id):
46
+ """Test that invalid saved prompt ID formats are rejected"""
47
+ with pytest.raises(ValidationError, match="Invalid saved prompt ID"):
48
+ ModelTester(prompt_id=invalid_id)
49
+
50
+
51
+ @pytest.mark.parametrize(
52
+ "invalid_id,expected_error",
53
+ [
54
+ ("fine_tune_prompt::", "Invalid fine-tune prompt ID: fine_tune_prompt::"),
55
+ ("fine_tune_prompt", "Invalid prompt ID: fine_tune_prompt"),
56
+ ],
57
+ )
58
+ def test_invalid_fine_tune_prompt_id_format(invalid_id, expected_error):
59
+ """Test that invalid fine-tune prompt ID formats are rejected"""
60
+ with pytest.raises(ValidationError, match=expected_error):
61
+ ModelTester(prompt_id=invalid_id)
62
+
63
+
64
+ def test_completely_invalid_formats():
65
+ """Test that completely invalid formats are rejected"""
66
+ invalid_ids = [
67
+ "", # Empty string
68
+ "invalid_format", # Random string
69
+ "id:wrong_format", # Almost correct but wrong separator
70
+ "fine_tune:wrong_format", # Almost correct but wrong prefix
71
+ ":::", # Just separators
72
+ ]
73
+
74
+ for invalid_id in invalid_ids:
75
+ with pytest.raises(ValidationError, match="Invalid prompt ID"):
76
+ ModelTester(prompt_id=invalid_id)
77
+
78
+
79
+ def test_prompt_generator_case_sensitivity():
80
+ """Test that prompt generator names are case sensitive"""
81
+ # Take first generator and modify its case
82
+ first_generator = next(iter(PromptGenerators)).value
83
+ wrong_case = first_generator.upper()
84
+ if wrong_case == first_generator:
85
+ wrong_case = first_generator.lower()
86
+
87
+ with pytest.raises(ValidationError):
88
+ ModelTester(prompt_id=wrong_case)
89
+
90
+
91
+ @pytest.mark.parametrize(
92
+ "valid_id",
93
+ [
94
+ "task_run_config::project_123::task_456::config_123", # Valid task run config prompt ID
95
+ ],
96
+ )
97
+ def test_valid_task_run_config_prompt_id(valid_id):
98
+ """Test that valid eval prompt IDs are accepted"""
99
+ model = ModelTester(prompt_id=valid_id)
100
+ assert model.prompt_id == valid_id
101
+
102
+
103
+ @pytest.mark.parametrize(
104
+ "invalid_id,expected_error",
105
+ [
106
+ ("task_run_config::", "Invalid task run config prompt ID"),
107
+ ("task_run_config::p1", "Invalid task run config prompt ID"),
108
+ ("task_run_config::p1::t1", "Invalid task run config prompt ID"),
109
+ ("task_run_config::p1::t1::c1::extra", "Invalid task run config prompt ID"),
110
+ ],
111
+ )
112
+ def test_invalid_eval_prompt_id_format(invalid_id, expected_error):
113
+ """Test that invalid eval prompt ID formats are rejected"""
114
+ with pytest.raises(ValidationError, match=expected_error):
115
+ ModelTester(prompt_id=invalid_id)
116
+
117
+
118
+ @pytest.mark.parametrize(
119
+ "id,should_be_frozen",
120
+ [
121
+ ("simple_prompt_builder", False),
122
+ ("id::prompt_123", True),
123
+ ("task_run_config::p1::t1", True),
124
+ ("fine_tune_prompt::ft_123", True),
125
+ ],
126
+ )
127
+ def test_is_frozen_prompt(id, should_be_frozen):
128
+ """Test that the is_frozen_prompt function works"""
129
+ assert is_frozen_prompt(id) == should_be_frozen
@@ -0,0 +1,159 @@
1
+ import pytest
2
+ from pydantic import ValidationError
3
+
4
+ from kiln_ai.datamodel.datamodel_enums import TaskOutputRatingType
5
+ from kiln_ai.datamodel.prompt_id import PromptGenerators
6
+ from kiln_ai.datamodel.task import RunConfig, RunConfigProperties, Task, TaskRunConfig
7
+ from kiln_ai.datamodel.task_output import normalize_rating
8
+
9
+
10
+ def test_runconfig_valid_creation():
11
+ task = Task(id="task1", name="Test Task", instruction="Do something")
12
+
13
+ config = RunConfig(
14
+ task=task,
15
+ model_name="gpt-4",
16
+ model_provider_name="openai",
17
+ prompt_id=PromptGenerators.SIMPLE,
18
+ )
19
+
20
+ assert config.task == task
21
+ assert config.model_name == "gpt-4"
22
+ assert config.model_provider_name == "openai"
23
+ assert config.prompt_id == PromptGenerators.SIMPLE # Check default value
24
+
25
+
26
+ def test_runconfig_missing_required_fields():
27
+ with pytest.raises(ValidationError) as exc_info:
28
+ RunConfig()
29
+
30
+ errors = exc_info.value.errors()
31
+ assert (
32
+ len(errors) == 4
33
+ ) # task, model_name, model_provider_name, and prompt_id are required
34
+ assert any(error["loc"][0] == "task" for error in errors)
35
+ assert any(error["loc"][0] == "model_name" for error in errors)
36
+ assert any(error["loc"][0] == "model_provider_name" for error in errors)
37
+ assert any(error["loc"][0] == "prompt_id" for error in errors)
38
+
39
+
40
+ def test_runconfig_custom_prompt_id():
41
+ task = Task(id="task1", name="Test Task", instruction="Do something")
42
+
43
+ config = RunConfig(
44
+ task=task,
45
+ model_name="gpt-4",
46
+ model_provider_name="openai",
47
+ prompt_id=PromptGenerators.SIMPLE_CHAIN_OF_THOUGHT,
48
+ )
49
+
50
+ assert config.prompt_id == PromptGenerators.SIMPLE_CHAIN_OF_THOUGHT
51
+
52
+
53
+ @pytest.fixture
54
+ def sample_task():
55
+ return Task(name="Test Task", instruction="Test instruction")
56
+
57
+
58
+ @pytest.fixture
59
+ def sample_run_config_props(sample_task):
60
+ return RunConfigProperties(
61
+ model_name="gpt-4",
62
+ model_provider_name="openai",
63
+ prompt_id=PromptGenerators.SIMPLE,
64
+ )
65
+
66
+
67
+ def test_task_run_config_valid_creation(sample_task, sample_run_config_props):
68
+ config = TaskRunConfig(
69
+ name="Test Config",
70
+ description="Test description",
71
+ run_config_properties=sample_run_config_props,
72
+ parent=sample_task,
73
+ )
74
+
75
+ assert config.name == "Test Config"
76
+ assert config.description == "Test description"
77
+ assert config.run_config_properties == sample_run_config_props
78
+ assert config.parent_task() == sample_task
79
+
80
+
81
+ def test_task_run_config_minimal_creation(sample_task, sample_run_config_props):
82
+ # Test creation with only required fields
83
+ config = TaskRunConfig(
84
+ name="Test Config",
85
+ run_config_properties=sample_run_config_props,
86
+ parent=sample_task,
87
+ )
88
+
89
+ assert config.name == "Test Config"
90
+ assert config.description is None
91
+ assert config.run_config_properties == sample_run_config_props
92
+
93
+
94
+ def test_task_run_config_missing_required_fields(sample_task):
95
+ # Test missing name
96
+ with pytest.raises(ValidationError) as exc_info:
97
+ TaskRunConfig(
98
+ run_config_properties=RunConfigProperties(
99
+ task=sample_task, model_name="gpt-4", model_provider_name="openai"
100
+ ),
101
+ parent=sample_task,
102
+ )
103
+ assert "Field required" in str(exc_info.value)
104
+
105
+ # Test missing run_config
106
+ with pytest.raises(ValidationError) as exc_info:
107
+ TaskRunConfig(name="Test Config", parent=sample_task)
108
+ assert "Field required" in str(exc_info.value)
109
+
110
+
111
+ def test_task_run_config_missing_task_in_run_config(sample_task):
112
+ with pytest.raises(
113
+ ValidationError, match="Input should be a valid dictionary or instance of Task"
114
+ ):
115
+ # Create a run config without a task
116
+ RunConfig(
117
+ model_name="gpt-4",
118
+ model_provider_name="openai",
119
+ task=None, # type: ignore
120
+ )
121
+
122
+
123
+ @pytest.mark.parametrize(
124
+ "rating_type,rating,expected",
125
+ [
126
+ (TaskOutputRatingType.five_star, 1, 0),
127
+ (TaskOutputRatingType.five_star, 2, 0.25),
128
+ (TaskOutputRatingType.five_star, 3, 0.5),
129
+ (TaskOutputRatingType.five_star, 4, 0.75),
130
+ (TaskOutputRatingType.five_star, 5, 1),
131
+ (TaskOutputRatingType.pass_fail, 0, 0),
132
+ (TaskOutputRatingType.pass_fail, 1, 1),
133
+ (TaskOutputRatingType.pass_fail, 0.5, 0.5),
134
+ (TaskOutputRatingType.pass_fail_critical, -1, 0),
135
+ (TaskOutputRatingType.pass_fail_critical, 0, 0.5),
136
+ (TaskOutputRatingType.pass_fail_critical, 1, 1),
137
+ (TaskOutputRatingType.pass_fail_critical, 0.5, 0.75),
138
+ ],
139
+ )
140
+ def test_normalize_rating(rating_type, rating, expected):
141
+ assert normalize_rating(rating, rating_type) == expected
142
+
143
+
144
+ @pytest.mark.parametrize(
145
+ "rating_type,rating",
146
+ [
147
+ (TaskOutputRatingType.five_star, 0),
148
+ (TaskOutputRatingType.five_star, 6),
149
+ (TaskOutputRatingType.pass_fail, -0.5),
150
+ (TaskOutputRatingType.pass_fail, 1.5),
151
+ (TaskOutputRatingType.pass_fail_critical, -1.5),
152
+ (TaskOutputRatingType.pass_fail_critical, 1.5),
153
+ (TaskOutputRatingType.custom, 0),
154
+ (TaskOutputRatingType.custom, 99),
155
+ ],
156
+ )
157
+ def test_normalize_rating_errors(rating_type, rating):
158
+ with pytest.raises(ValueError):
159
+ normalize_rating(rating, rating_type)
kiln_ai/utils/config.py CHANGED
@@ -78,10 +78,47 @@ class Config:
78
78
  str,
79
79
  env_var="FIREWORKS_ACCOUNT_ID",
80
80
  ),
81
+ "anthropic_api_key": ConfigProperty(
82
+ str,
83
+ env_var="ANTHROPIC_API_KEY",
84
+ sensitive=True,
85
+ ),
86
+ "gemini_api_key": ConfigProperty(
87
+ str,
88
+ env_var="GEMINI_API_KEY",
89
+ sensitive=True,
90
+ ),
81
91
  "projects": ConfigProperty(
82
92
  list,
83
93
  default_lambda=lambda: [],
84
94
  ),
95
+ "azure_openai_api_key": ConfigProperty(
96
+ str,
97
+ env_var="AZURE_OPENAI_API_KEY",
98
+ sensitive=True,
99
+ ),
100
+ "azure_openai_endpoint": ConfigProperty(
101
+ str,
102
+ env_var="AZURE_OPENAI_ENDPOINT",
103
+ ),
104
+ "huggingface_api_key": ConfigProperty(
105
+ str,
106
+ env_var="HUGGINGFACE_API_KEY",
107
+ sensitive=True,
108
+ ),
109
+ "vertex_project_id": ConfigProperty(
110
+ str,
111
+ env_var="VERTEX_PROJECT_ID",
112
+ ),
113
+ "vertex_location": ConfigProperty(
114
+ str,
115
+ env_var="VERTEX_LOCATION",
116
+ ),
117
+ "together_api_key": ConfigProperty(
118
+ str,
119
+ env_var="TOGETHERAI_API_KEY",
120
+ sensitive=True,
121
+ ),
85
122
  "custom_models": ConfigProperty(
86
123
  list,
87
124
  default_lambda=lambda: [],
@@ -142,10 +179,15 @@ class Config:
142
179
  raise AttributeError(f"Config has no attribute '{name}'")
143
180
 
144
181
  @classmethod
145
- def settings_path(cls, create=True):
182
+ def settings_dir(cls, create=True):
146
183
  settings_dir = os.path.join(Path.home(), ".kiln_ai")
147
184
  if create and not os.path.exists(settings_dir):
148
185
  os.makedirs(settings_dir)
186
+ return settings_dir
187
+
188
+ @classmethod
189
+ def settings_path(cls, create=True):
190
+ settings_dir = cls.settings_dir(create)
149
191
  return os.path.join(settings_dir, "settings.yaml")
150
192
 
151
193
  @classmethod