kiln-ai 0.19.0__py3-none-any.whl → 0.21.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kiln-ai might be problematic. Click here for more details.

Files changed (158) hide show
  1. kiln_ai/adapters/__init__.py +8 -2
  2. kiln_ai/adapters/adapter_registry.py +43 -208
  3. kiln_ai/adapters/chat/chat_formatter.py +8 -12
  4. kiln_ai/adapters/chat/test_chat_formatter.py +6 -2
  5. kiln_ai/adapters/chunkers/__init__.py +13 -0
  6. kiln_ai/adapters/chunkers/base_chunker.py +42 -0
  7. kiln_ai/adapters/chunkers/chunker_registry.py +16 -0
  8. kiln_ai/adapters/chunkers/fixed_window_chunker.py +39 -0
  9. kiln_ai/adapters/chunkers/helpers.py +23 -0
  10. kiln_ai/adapters/chunkers/test_base_chunker.py +63 -0
  11. kiln_ai/adapters/chunkers/test_chunker_registry.py +28 -0
  12. kiln_ai/adapters/chunkers/test_fixed_window_chunker.py +346 -0
  13. kiln_ai/adapters/chunkers/test_helpers.py +75 -0
  14. kiln_ai/adapters/data_gen/test_data_gen_task.py +9 -3
  15. kiln_ai/adapters/docker_model_runner_tools.py +119 -0
  16. kiln_ai/adapters/embedding/__init__.py +0 -0
  17. kiln_ai/adapters/embedding/base_embedding_adapter.py +44 -0
  18. kiln_ai/adapters/embedding/embedding_registry.py +32 -0
  19. kiln_ai/adapters/embedding/litellm_embedding_adapter.py +199 -0
  20. kiln_ai/adapters/embedding/test_base_embedding_adapter.py +283 -0
  21. kiln_ai/adapters/embedding/test_embedding_registry.py +166 -0
  22. kiln_ai/adapters/embedding/test_litellm_embedding_adapter.py +1149 -0
  23. kiln_ai/adapters/eval/base_eval.py +2 -2
  24. kiln_ai/adapters/eval/eval_runner.py +9 -3
  25. kiln_ai/adapters/eval/g_eval.py +2 -2
  26. kiln_ai/adapters/eval/test_base_eval.py +2 -4
  27. kiln_ai/adapters/eval/test_g_eval.py +4 -5
  28. kiln_ai/adapters/extractors/__init__.py +18 -0
  29. kiln_ai/adapters/extractors/base_extractor.py +72 -0
  30. kiln_ai/adapters/extractors/encoding.py +20 -0
  31. kiln_ai/adapters/extractors/extractor_registry.py +44 -0
  32. kiln_ai/adapters/extractors/extractor_runner.py +112 -0
  33. kiln_ai/adapters/extractors/litellm_extractor.py +386 -0
  34. kiln_ai/adapters/extractors/test_base_extractor.py +244 -0
  35. kiln_ai/adapters/extractors/test_encoding.py +54 -0
  36. kiln_ai/adapters/extractors/test_extractor_registry.py +181 -0
  37. kiln_ai/adapters/extractors/test_extractor_runner.py +181 -0
  38. kiln_ai/adapters/extractors/test_litellm_extractor.py +1192 -0
  39. kiln_ai/adapters/fine_tune/__init__.py +1 -1
  40. kiln_ai/adapters/fine_tune/openai_finetune.py +14 -4
  41. kiln_ai/adapters/fine_tune/test_dataset_formatter.py +2 -2
  42. kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py +2 -6
  43. kiln_ai/adapters/fine_tune/test_openai_finetune.py +108 -111
  44. kiln_ai/adapters/fine_tune/test_together_finetune.py +2 -6
  45. kiln_ai/adapters/ml_embedding_model_list.py +192 -0
  46. kiln_ai/adapters/ml_model_list.py +761 -37
  47. kiln_ai/adapters/model_adapters/base_adapter.py +51 -21
  48. kiln_ai/adapters/model_adapters/litellm_adapter.py +380 -138
  49. kiln_ai/adapters/model_adapters/test_base_adapter.py +193 -17
  50. kiln_ai/adapters/model_adapters/test_litellm_adapter.py +407 -2
  51. kiln_ai/adapters/model_adapters/test_litellm_adapter_tools.py +1103 -0
  52. kiln_ai/adapters/model_adapters/test_saving_adapter_results.py +5 -5
  53. kiln_ai/adapters/model_adapters/test_structured_output.py +113 -5
  54. kiln_ai/adapters/ollama_tools.py +69 -12
  55. kiln_ai/adapters/parsers/__init__.py +1 -1
  56. kiln_ai/adapters/provider_tools.py +205 -47
  57. kiln_ai/adapters/rag/deduplication.py +49 -0
  58. kiln_ai/adapters/rag/progress.py +252 -0
  59. kiln_ai/adapters/rag/rag_runners.py +844 -0
  60. kiln_ai/adapters/rag/test_deduplication.py +195 -0
  61. kiln_ai/adapters/rag/test_progress.py +785 -0
  62. kiln_ai/adapters/rag/test_rag_runners.py +2376 -0
  63. kiln_ai/adapters/remote_config.py +80 -8
  64. kiln_ai/adapters/repair/test_repair_task.py +12 -9
  65. kiln_ai/adapters/run_output.py +3 -0
  66. kiln_ai/adapters/test_adapter_registry.py +657 -85
  67. kiln_ai/adapters/test_docker_model_runner_tools.py +305 -0
  68. kiln_ai/adapters/test_ml_embedding_model_list.py +429 -0
  69. kiln_ai/adapters/test_ml_model_list.py +251 -1
  70. kiln_ai/adapters/test_ollama_tools.py +340 -1
  71. kiln_ai/adapters/test_prompt_adaptors.py +13 -6
  72. kiln_ai/adapters/test_prompt_builders.py +1 -1
  73. kiln_ai/adapters/test_provider_tools.py +254 -8
  74. kiln_ai/adapters/test_remote_config.py +651 -58
  75. kiln_ai/adapters/vector_store/__init__.py +1 -0
  76. kiln_ai/adapters/vector_store/base_vector_store_adapter.py +83 -0
  77. kiln_ai/adapters/vector_store/lancedb_adapter.py +389 -0
  78. kiln_ai/adapters/vector_store/test_base_vector_store.py +160 -0
  79. kiln_ai/adapters/vector_store/test_lancedb_adapter.py +1841 -0
  80. kiln_ai/adapters/vector_store/test_vector_store_registry.py +199 -0
  81. kiln_ai/adapters/vector_store/vector_store_registry.py +33 -0
  82. kiln_ai/datamodel/__init__.py +39 -34
  83. kiln_ai/datamodel/basemodel.py +170 -1
  84. kiln_ai/datamodel/chunk.py +158 -0
  85. kiln_ai/datamodel/datamodel_enums.py +28 -0
  86. kiln_ai/datamodel/embedding.py +64 -0
  87. kiln_ai/datamodel/eval.py +1 -1
  88. kiln_ai/datamodel/external_tool_server.py +298 -0
  89. kiln_ai/datamodel/extraction.py +303 -0
  90. kiln_ai/datamodel/json_schema.py +25 -10
  91. kiln_ai/datamodel/project.py +40 -1
  92. kiln_ai/datamodel/rag.py +79 -0
  93. kiln_ai/datamodel/registry.py +0 -15
  94. kiln_ai/datamodel/run_config.py +62 -0
  95. kiln_ai/datamodel/task.py +2 -77
  96. kiln_ai/datamodel/task_output.py +6 -1
  97. kiln_ai/datamodel/task_run.py +41 -0
  98. kiln_ai/datamodel/test_attachment.py +649 -0
  99. kiln_ai/datamodel/test_basemodel.py +4 -4
  100. kiln_ai/datamodel/test_chunk_models.py +317 -0
  101. kiln_ai/datamodel/test_dataset_split.py +1 -1
  102. kiln_ai/datamodel/test_embedding_models.py +448 -0
  103. kiln_ai/datamodel/test_eval_model.py +6 -6
  104. kiln_ai/datamodel/test_example_models.py +175 -0
  105. kiln_ai/datamodel/test_external_tool_server.py +691 -0
  106. kiln_ai/datamodel/test_extraction_chunk.py +206 -0
  107. kiln_ai/datamodel/test_extraction_model.py +470 -0
  108. kiln_ai/datamodel/test_rag.py +641 -0
  109. kiln_ai/datamodel/test_registry.py +8 -3
  110. kiln_ai/datamodel/test_task.py +15 -47
  111. kiln_ai/datamodel/test_tool_id.py +320 -0
  112. kiln_ai/datamodel/test_vector_store.py +320 -0
  113. kiln_ai/datamodel/tool_id.py +105 -0
  114. kiln_ai/datamodel/vector_store.py +141 -0
  115. kiln_ai/tools/__init__.py +8 -0
  116. kiln_ai/tools/base_tool.py +82 -0
  117. kiln_ai/tools/built_in_tools/__init__.py +13 -0
  118. kiln_ai/tools/built_in_tools/math_tools.py +124 -0
  119. kiln_ai/tools/built_in_tools/test_math_tools.py +204 -0
  120. kiln_ai/tools/mcp_server_tool.py +95 -0
  121. kiln_ai/tools/mcp_session_manager.py +246 -0
  122. kiln_ai/tools/rag_tools.py +157 -0
  123. kiln_ai/tools/test_base_tools.py +199 -0
  124. kiln_ai/tools/test_mcp_server_tool.py +457 -0
  125. kiln_ai/tools/test_mcp_session_manager.py +1585 -0
  126. kiln_ai/tools/test_rag_tools.py +848 -0
  127. kiln_ai/tools/test_tool_registry.py +562 -0
  128. kiln_ai/tools/tool_registry.py +85 -0
  129. kiln_ai/utils/__init__.py +3 -0
  130. kiln_ai/utils/async_job_runner.py +62 -17
  131. kiln_ai/utils/config.py +24 -2
  132. kiln_ai/utils/env.py +15 -0
  133. kiln_ai/utils/filesystem.py +14 -0
  134. kiln_ai/utils/filesystem_cache.py +60 -0
  135. kiln_ai/utils/litellm.py +94 -0
  136. kiln_ai/utils/lock.py +100 -0
  137. kiln_ai/utils/mime_type.py +38 -0
  138. kiln_ai/utils/open_ai_types.py +94 -0
  139. kiln_ai/utils/pdf_utils.py +38 -0
  140. kiln_ai/utils/project_utils.py +17 -0
  141. kiln_ai/utils/test_async_job_runner.py +151 -35
  142. kiln_ai/utils/test_config.py +138 -1
  143. kiln_ai/utils/test_env.py +142 -0
  144. kiln_ai/utils/test_filesystem_cache.py +316 -0
  145. kiln_ai/utils/test_litellm.py +206 -0
  146. kiln_ai/utils/test_lock.py +185 -0
  147. kiln_ai/utils/test_mime_type.py +66 -0
  148. kiln_ai/utils/test_open_ai_types.py +131 -0
  149. kiln_ai/utils/test_pdf_utils.py +73 -0
  150. kiln_ai/utils/test_uuid.py +111 -0
  151. kiln_ai/utils/test_validation.py +524 -0
  152. kiln_ai/utils/uuid.py +9 -0
  153. kiln_ai/utils/validation.py +90 -0
  154. {kiln_ai-0.19.0.dist-info → kiln_ai-0.21.0.dist-info}/METADATA +12 -5
  155. kiln_ai-0.21.0.dist-info/RECORD +211 -0
  156. kiln_ai-0.19.0.dist-info/RECORD +0 -115
  157. {kiln_ai-0.19.0.dist-info → kiln_ai-0.21.0.dist-info}/WHEEL +0 -0
  158. {kiln_ai-0.19.0.dist-info → kiln_ai-0.21.0.dist-info}/licenses/LICENSE.txt +0 -0
@@ -119,14 +119,16 @@ async def test_mock_returning_run(tmp_path):
119
119
  choices=[{"message": {"content": "mock response"}}],
120
120
  )
121
121
 
122
+ run_config = RunConfigProperties(
123
+ model_name="custom_model",
124
+ model_provider_name="ollama",
125
+ prompt_id="simple_prompt_builder",
126
+ structured_output_mode="json_schema",
127
+ )
128
+
122
129
  adapter = LiteLlmAdapter(
123
130
  config=LiteLlmConfig(
124
- run_config_properties=RunConfigProperties(
125
- model_name="custom_model",
126
- model_provider_name="ollama",
127
- prompt_id="simple_prompt_builder",
128
- structured_output_mode="json_schema",
129
- ),
131
+ run_config_properties=run_config,
130
132
  base_url="http://localhost:11434",
131
133
  additional_body_options={"api_key": "test_key"},
132
134
  ),
@@ -140,7 +142,9 @@ async def test_mock_returning_run(tmp_path):
140
142
  assert run.id is not None
141
143
  assert run.input == "You are a mock, send me the response!"
142
144
  assert run.output.output == "mock response"
145
+ assert run.input_source is not None
143
146
  assert "created_by" in run.input_source.properties
147
+ assert run.output.source is not None
144
148
  assert run.output.source.properties == {
145
149
  "adapter_name": "kiln_openai_compatible_adapter",
146
150
  "model_name": "custom_model",
@@ -150,6 +154,9 @@ async def test_mock_returning_run(tmp_path):
150
154
  "temperature": 1.0,
151
155
  "top_p": 1.0,
152
156
  }
157
+ assert run.output.source.run_config is not None
158
+ saved_run_config = run.output.source.run_config.model_dump()
159
+ assert saved_run_config == run_config.model_dump()
153
160
 
154
161
 
155
162
  @pytest.mark.paid
@@ -359,7 +359,7 @@ def test_prompt_builder_from_id(task_with_examples):
359
359
 
360
360
  with pytest.raises(
361
361
  ValueError,
362
- match="Invalid fine-tune ID format. Expected 'project_id::task_id::fine_tune_id'",
362
+ match=r"Invalid fine-tune ID format. Expected 'project_id::task_id::fine_tune_id'",
363
363
  ):
364
364
  prompt_builder_from_id("fine_tune_prompt::123", task)
365
365
 
@@ -2,6 +2,8 @@ from unittest.mock import AsyncMock, Mock, patch
2
2
 
3
3
  import pytest
4
4
 
5
+ from kiln_ai.adapters.adapter_registry import litellm_core_provider_config
6
+ from kiln_ai.adapters.docker_model_runner_tools import DockerModelRunnerConnection
5
7
  from kiln_ai.adapters.ml_model_list import (
6
8
  KilnModel,
7
9
  ModelName,
@@ -10,6 +12,7 @@ from kiln_ai.adapters.ml_model_list import (
10
12
  )
11
13
  from kiln_ai.adapters.ollama_tools import OllamaConnection
12
14
  from kiln_ai.adapters.provider_tools import (
15
+ LiteLlmCoreConfig,
13
16
  builtin_model_from,
14
17
  check_provider_warnings,
15
18
  core_provider,
@@ -18,7 +21,7 @@ from kiln_ai.adapters.provider_tools import (
18
21
  finetune_provider_model,
19
22
  get_model_and_provider,
20
23
  kiln_model_provider_from,
21
- lite_llm_config_for_openai_compatible,
24
+ lite_llm_core_config_for_provider,
22
25
  lite_llm_provider_model,
23
26
  parse_custom_model_id,
24
27
  provider_enabled,
@@ -603,7 +606,7 @@ def test_openai_compatible_provider_config(mock_shared_config):
603
606
  """Test successful creation of an OpenAI compatible provider"""
604
607
  model_id = "test_provider::gpt-4"
605
608
 
606
- config = lite_llm_config_for_openai_compatible(
609
+ config = litellm_core_provider_config(
607
610
  RunConfigProperties(
608
611
  model_name=model_id,
609
612
  model_provider_name=ModelProviderName.openai_compatible,
@@ -638,10 +641,10 @@ def test_lite_llm_config_no_api_key(mock_shared_config):
638
641
  """Test provider creation without API key (should work as some providers don't require it, but should pass NA to LiteLLM as it requires one)"""
639
642
  model_id = "no_key_provider::gpt-4"
640
643
 
641
- config = lite_llm_config_for_openai_compatible(
644
+ config = litellm_core_provider_config(
642
645
  RunConfigProperties(
643
646
  model_name=model_id,
644
- model_provider_name=ModelProviderName.openai,
647
+ model_provider_name=ModelProviderName.openai_compatible,
645
648
  prompt_id="simple_prompt_builder",
646
649
  structured_output_mode="json_schema",
647
650
  )
@@ -659,7 +662,7 @@ def test_lite_llm_config_no_api_key(mock_shared_config):
659
662
  def test_lite_llm_config_invalid_id():
660
663
  """Test handling of invalid model ID format"""
661
664
  with pytest.raises(ValueError) as exc_info:
662
- lite_llm_config_for_openai_compatible(
665
+ litellm_core_provider_config(
663
666
  RunConfigProperties(
664
667
  model_name="invalid-id-format",
665
668
  model_provider_name=ModelProviderName.openai_compatible,
@@ -677,7 +680,7 @@ def test_lite_llm_config_no_providers(mock_shared_config):
677
680
  mock_shared_config.return_value.openai_compatible_providers = None
678
681
 
679
682
  with pytest.raises(ValueError) as exc_info:
680
- lite_llm_config_for_openai_compatible(
683
+ litellm_core_provider_config(
681
684
  RunConfigProperties(
682
685
  model_name="test_provider::gpt-4",
683
686
  model_provider_name=ModelProviderName.openai_compatible,
@@ -691,7 +694,7 @@ def test_lite_llm_config_no_providers(mock_shared_config):
691
694
  def test_lite_llm_config_provider_not_found(mock_shared_config):
692
695
  """Test handling of non-existent provider"""
693
696
  with pytest.raises(ValueError) as exc_info:
694
- lite_llm_config_for_openai_compatible(
697
+ litellm_core_provider_config(
695
698
  RunConfigProperties(
696
699
  model_name="unknown_provider::gpt-4",
697
700
  model_provider_name=ModelProviderName.openai_compatible,
@@ -714,7 +717,7 @@ def test_lite_llm_config_no_base_url(mock_shared_config):
714
717
  ]
715
718
 
716
719
  with pytest.raises(ValueError) as exc_info:
717
- lite_llm_config_for_openai_compatible(
720
+ litellm_core_provider_config(
718
721
  RunConfigProperties(
719
722
  model_name="test_provider::gpt-4",
720
723
  model_provider_name=ModelProviderName.openai_compatible,
@@ -931,3 +934,246 @@ def test_finetune_provider_model_vertex_ai(mock_project, mock_task, mock_finetun
931
934
  # Verify the model_id is transformed into openai/endpoint_id format
932
935
  assert provider.model_id == "openai/456"
933
936
  assert provider.structured_output_mode == StructuredOutputMode.json_mode
937
+
938
+
939
+ @pytest.fixture
940
+ def mock_config_for_lite_llm_core_config():
941
+ with patch("kiln_ai.adapters.provider_tools.Config") as mock:
942
+ config_instance = Mock()
943
+ mock.shared.return_value = config_instance
944
+
945
+ # Set up all the config values
946
+ config_instance.open_router_api_key = "test-openrouter-key"
947
+ config_instance.open_ai_api_key = "test-openai-key"
948
+ config_instance.groq_api_key = "test-groq-key"
949
+ config_instance.bedrock_access_key = "test-aws-access-key"
950
+ config_instance.bedrock_secret_key = "test-aws-secret-key"
951
+ config_instance.ollama_base_url = "http://test-ollama:11434"
952
+ config_instance.fireworks_api_key = "test-fireworks-key"
953
+ config_instance.anthropic_api_key = "test-anthropic-key"
954
+ config_instance.gemini_api_key = "test-gemini-key"
955
+ config_instance.vertex_project_id = "test-vertex-project"
956
+ config_instance.vertex_location = "us-central1"
957
+ config_instance.together_api_key = "test-together-key"
958
+ config_instance.azure_openai_api_key = "test-azure-key"
959
+ config_instance.azure_openai_endpoint = "https://test.openai.azure.com"
960
+ config_instance.huggingface_api_key = "test-hf-key"
961
+
962
+ yield mock
963
+
964
+
965
+ @pytest.mark.parametrize(
966
+ "provider_name,expected_config",
967
+ [
968
+ (
969
+ ModelProviderName.openrouter,
970
+ LiteLlmCoreConfig(
971
+ base_url="https://openrouter.ai/api/v1",
972
+ additional_body_options={
973
+ "api_key": "test-openrouter-key",
974
+ },
975
+ default_headers={
976
+ "HTTP-Referer": "https://kiln.tech/openrouter",
977
+ "X-Title": "KilnAI",
978
+ },
979
+ ),
980
+ ),
981
+ (
982
+ ModelProviderName.openai,
983
+ LiteLlmCoreConfig(additional_body_options={"api_key": "test-openai-key"}),
984
+ ),
985
+ (
986
+ ModelProviderName.groq,
987
+ LiteLlmCoreConfig(additional_body_options={"api_key": "test-groq-key"}),
988
+ ),
989
+ (
990
+ ModelProviderName.amazon_bedrock,
991
+ LiteLlmCoreConfig(
992
+ additional_body_options={
993
+ "aws_access_key_id": "test-aws-access-key",
994
+ "aws_secret_access_key": "test-aws-secret-key",
995
+ "aws_region_name": "us-west-2",
996
+ },
997
+ ),
998
+ ),
999
+ (
1000
+ ModelProviderName.ollama,
1001
+ LiteLlmCoreConfig(
1002
+ base_url="http://test-ollama:11434/v1",
1003
+ additional_body_options={"api_key": "NA"},
1004
+ ),
1005
+ ),
1006
+ (
1007
+ ModelProviderName.fireworks_ai,
1008
+ LiteLlmCoreConfig(
1009
+ additional_body_options={"api_key": "test-fireworks-key"}
1010
+ ),
1011
+ ),
1012
+ (
1013
+ ModelProviderName.anthropic,
1014
+ LiteLlmCoreConfig(
1015
+ additional_body_options={"api_key": "test-anthropic-key"}
1016
+ ),
1017
+ ),
1018
+ (
1019
+ ModelProviderName.gemini_api,
1020
+ LiteLlmCoreConfig(additional_body_options={"api_key": "test-gemini-key"}),
1021
+ ),
1022
+ (
1023
+ ModelProviderName.vertex,
1024
+ LiteLlmCoreConfig(
1025
+ additional_body_options={
1026
+ "vertex_project": "test-vertex-project",
1027
+ "vertex_location": "us-central1",
1028
+ },
1029
+ ),
1030
+ ),
1031
+ (
1032
+ ModelProviderName.together_ai,
1033
+ LiteLlmCoreConfig(additional_body_options={"api_key": "test-together-key"}),
1034
+ ),
1035
+ (
1036
+ ModelProviderName.azure_openai,
1037
+ LiteLlmCoreConfig(
1038
+ base_url="https://test.openai.azure.com",
1039
+ additional_body_options={
1040
+ "api_key": "test-azure-key",
1041
+ "api_version": "2025-02-01-preview",
1042
+ },
1043
+ ),
1044
+ ),
1045
+ (
1046
+ ModelProviderName.huggingface,
1047
+ LiteLlmCoreConfig(additional_body_options={"api_key": "test-hf-key"}),
1048
+ ),
1049
+ (ModelProviderName.kiln_fine_tune, None),
1050
+ (ModelProviderName.kiln_custom_registry, None),
1051
+ ],
1052
+ )
1053
+ def test_lite_llm_core_config_for_provider(
1054
+ mock_config_for_lite_llm_core_config, provider_name, expected_config
1055
+ ):
1056
+ config = lite_llm_core_config_for_provider(provider_name)
1057
+ assert config == expected_config
1058
+
1059
+
1060
+ def test_lite_llm_core_config_for_provider_openai_compatible(
1061
+ mock_shared_config,
1062
+ ):
1063
+ config = lite_llm_core_config_for_provider(
1064
+ ModelProviderName.openai_compatible, "no_key_provider"
1065
+ )
1066
+ assert config is not None
1067
+ assert config.base_url == "https://api.nokey.com"
1068
+ assert config.additional_body_options == {"api_key": "NA"}
1069
+
1070
+
1071
+ def test_lite_llm_core_config_for_provider_openai_compatible_with_openai_compatible_provider_name(
1072
+ mock_shared_config,
1073
+ ):
1074
+ with pytest.raises(
1075
+ ValueError, match="OpenAI compatible provider requires a provider name"
1076
+ ):
1077
+ lite_llm_core_config_for_provider(ModelProviderName.openai_compatible)
1078
+
1079
+
1080
+ def test_lite_llm_core_config_incorrect_openai_compatible_provider_name(
1081
+ mock_shared_config,
1082
+ ):
1083
+ with pytest.raises(
1084
+ ValueError,
1085
+ match="OpenAI compatible provider provider_that_does_not_exist_in_compatible_openai_providers not found",
1086
+ ):
1087
+ lite_llm_core_config_for_provider(
1088
+ ModelProviderName.openai_compatible,
1089
+ "provider_that_does_not_exist_in_compatible_openai_providers",
1090
+ )
1091
+
1092
+
1093
+ def test_lite_llm_core_config_for_provider_with_string(
1094
+ mock_config_for_lite_llm_core_config,
1095
+ ):
1096
+ # test with a string instead of an enum
1097
+ config = lite_llm_core_config_for_provider("openai")
1098
+ assert config == LiteLlmCoreConfig(
1099
+ additional_body_options={"api_key": "test-openai-key"}
1100
+ )
1101
+
1102
+
1103
+ def test_lite_llm_core_config_for_provider_unknown_provider():
1104
+ with pytest.raises(ValueError, match="Unhandled enum value: unknown_provider"):
1105
+ lite_llm_core_config_for_provider("unknown_provider")
1106
+
1107
+
1108
+ @patch.dict("os.environ", {"OPENROUTER_BASE_URL": "https://custom-openrouter.com"})
1109
+ def test_lite_llm_core_config_for_provider_openrouter_custom_url(
1110
+ mock_config_for_lite_llm_core_config,
1111
+ ):
1112
+ config = lite_llm_core_config_for_provider(ModelProviderName.openrouter)
1113
+ assert config is not None
1114
+ assert config.base_url == "https://custom-openrouter.com"
1115
+
1116
+
1117
+ def test_lite_llm_core_config_for_provider_ollama_default_url(
1118
+ mock_config_for_lite_llm_core_config,
1119
+ ):
1120
+ # Override the mock to return None for ollama_base_url
1121
+ mock_config_for_lite_llm_core_config.shared.return_value.ollama_base_url = None
1122
+
1123
+ config = lite_llm_core_config_for_provider(ModelProviderName.ollama)
1124
+ assert config is not None
1125
+ assert config.base_url == "http://localhost:11434/v1"
1126
+
1127
+
1128
+ @pytest.mark.asyncio
1129
+ async def test_provider_enabled_docker_model_runner_success():
1130
+ """Test provider_enabled for Docker Model Runner with successful connection"""
1131
+ with patch(
1132
+ "kiln_ai.adapters.provider_tools.get_docker_model_runner_connection",
1133
+ new_callable=AsyncMock,
1134
+ ) as mock_get_docker:
1135
+ # Mock successful Docker Model Runner connection with models
1136
+ mock_get_docker.return_value = DockerModelRunnerConnection(
1137
+ message="Connected",
1138
+ supported_models=["llama-3.2-3b-instruct"],
1139
+ untested_models=[],
1140
+ )
1141
+
1142
+ result = await provider_enabled(ModelProviderName.docker_model_runner)
1143
+ assert result is True
1144
+
1145
+
1146
+ @pytest.mark.asyncio
1147
+ async def test_provider_enabled_docker_model_runner_no_models():
1148
+ """Test provider_enabled for Docker Model Runner with no models"""
1149
+ with patch(
1150
+ "kiln_ai.adapters.provider_tools.get_docker_model_runner_connection",
1151
+ new_callable=AsyncMock,
1152
+ ) as mock_get_docker:
1153
+ # Mock Docker Model Runner connection but with no models
1154
+ mock_get_docker.return_value = DockerModelRunnerConnection(
1155
+ message="Connected but no models", supported_models=[], untested_models=[]
1156
+ )
1157
+
1158
+ result = await provider_enabled(ModelProviderName.docker_model_runner)
1159
+ assert result is False
1160
+
1161
+
1162
+ @pytest.mark.asyncio
1163
+ async def test_provider_enabled_docker_model_runner_connection_error():
1164
+ """Test provider_enabled for Docker Model Runner with connection error"""
1165
+ with patch(
1166
+ "kiln_ai.adapters.provider_tools.get_docker_model_runner_connection",
1167
+ new_callable=AsyncMock,
1168
+ ) as mock_get_docker:
1169
+ # Mock Docker Model Runner connection failure
1170
+ mock_get_docker.side_effect = Exception("Connection failed")
1171
+
1172
+ result = await provider_enabled(ModelProviderName.docker_model_runner)
1173
+ assert result is False
1174
+
1175
+
1176
+ def test_provider_name_from_id_docker_model_runner():
1177
+ """Test provider_name_from_id for Docker Model Runner"""
1178
+ result = provider_name_from_id(ModelProviderName.docker_model_runner)
1179
+ assert result == "Docker Model Runner"