kiln-ai 0.8.1__py3-none-any.whl → 0.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kiln-ai might be problematic. Click here for more details.

Files changed (88) hide show
  1. kiln_ai/adapters/__init__.py +7 -7
  2. kiln_ai/adapters/adapter_registry.py +81 -10
  3. kiln_ai/adapters/data_gen/data_gen_task.py +21 -3
  4. kiln_ai/adapters/data_gen/test_data_gen_task.py +23 -3
  5. kiln_ai/adapters/eval/base_eval.py +164 -0
  6. kiln_ai/adapters/eval/eval_runner.py +267 -0
  7. kiln_ai/adapters/eval/g_eval.py +367 -0
  8. kiln_ai/adapters/eval/registry.py +16 -0
  9. kiln_ai/adapters/eval/test_base_eval.py +324 -0
  10. kiln_ai/adapters/eval/test_eval_runner.py +640 -0
  11. kiln_ai/adapters/eval/test_g_eval.py +497 -0
  12. kiln_ai/adapters/eval/test_g_eval_data.py +4 -0
  13. kiln_ai/adapters/fine_tune/base_finetune.py +5 -1
  14. kiln_ai/adapters/fine_tune/dataset_formatter.py +310 -65
  15. kiln_ai/adapters/fine_tune/fireworks_finetune.py +47 -32
  16. kiln_ai/adapters/fine_tune/openai_finetune.py +12 -11
  17. kiln_ai/adapters/fine_tune/test_base_finetune.py +19 -0
  18. kiln_ai/adapters/fine_tune/test_dataset_formatter.py +472 -129
  19. kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py +114 -22
  20. kiln_ai/adapters/fine_tune/test_openai_finetune.py +125 -14
  21. kiln_ai/adapters/ml_model_list.py +434 -93
  22. kiln_ai/adapters/model_adapters/__init__.py +18 -0
  23. kiln_ai/adapters/model_adapters/base_adapter.py +250 -0
  24. kiln_ai/adapters/model_adapters/langchain_adapters.py +309 -0
  25. kiln_ai/adapters/model_adapters/openai_compatible_config.py +10 -0
  26. kiln_ai/adapters/model_adapters/openai_model_adapter.py +289 -0
  27. kiln_ai/adapters/model_adapters/test_base_adapter.py +199 -0
  28. kiln_ai/adapters/{test_langchain_adapter.py → model_adapters/test_langchain_adapter.py} +105 -97
  29. kiln_ai/adapters/model_adapters/test_openai_model_adapter.py +216 -0
  30. kiln_ai/adapters/{test_saving_adapter_results.py → model_adapters/test_saving_adapter_results.py} +80 -30
  31. kiln_ai/adapters/{test_structured_output.py → model_adapters/test_structured_output.py} +125 -46
  32. kiln_ai/adapters/ollama_tools.py +0 -1
  33. kiln_ai/adapters/parsers/__init__.py +10 -0
  34. kiln_ai/adapters/parsers/base_parser.py +12 -0
  35. kiln_ai/adapters/parsers/json_parser.py +37 -0
  36. kiln_ai/adapters/parsers/parser_registry.py +19 -0
  37. kiln_ai/adapters/parsers/r1_parser.py +69 -0
  38. kiln_ai/adapters/parsers/test_json_parser.py +81 -0
  39. kiln_ai/adapters/parsers/test_parser_registry.py +32 -0
  40. kiln_ai/adapters/parsers/test_r1_parser.py +144 -0
  41. kiln_ai/adapters/prompt_builders.py +193 -49
  42. kiln_ai/adapters/provider_tools.py +91 -36
  43. kiln_ai/adapters/repair/repair_task.py +18 -19
  44. kiln_ai/adapters/repair/test_repair_task.py +7 -7
  45. kiln_ai/adapters/run_output.py +11 -0
  46. kiln_ai/adapters/test_adapter_registry.py +177 -0
  47. kiln_ai/adapters/test_generate_docs.py +69 -0
  48. kiln_ai/adapters/test_ollama_tools.py +0 -1
  49. kiln_ai/adapters/test_prompt_adaptors.py +25 -18
  50. kiln_ai/adapters/test_prompt_builders.py +265 -44
  51. kiln_ai/adapters/test_provider_tools.py +268 -46
  52. kiln_ai/datamodel/__init__.py +51 -772
  53. kiln_ai/datamodel/basemodel.py +31 -11
  54. kiln_ai/datamodel/datamodel_enums.py +58 -0
  55. kiln_ai/datamodel/dataset_filters.py +114 -0
  56. kiln_ai/datamodel/dataset_split.py +170 -0
  57. kiln_ai/datamodel/eval.py +298 -0
  58. kiln_ai/datamodel/finetune.py +105 -0
  59. kiln_ai/datamodel/json_schema.py +14 -3
  60. kiln_ai/datamodel/model_cache.py +8 -3
  61. kiln_ai/datamodel/project.py +23 -0
  62. kiln_ai/datamodel/prompt.py +37 -0
  63. kiln_ai/datamodel/prompt_id.py +83 -0
  64. kiln_ai/datamodel/strict_mode.py +24 -0
  65. kiln_ai/datamodel/task.py +181 -0
  66. kiln_ai/datamodel/task_output.py +321 -0
  67. kiln_ai/datamodel/task_run.py +164 -0
  68. kiln_ai/datamodel/test_basemodel.py +80 -2
  69. kiln_ai/datamodel/test_dataset_filters.py +71 -0
  70. kiln_ai/datamodel/test_dataset_split.py +127 -6
  71. kiln_ai/datamodel/test_datasource.py +3 -2
  72. kiln_ai/datamodel/test_eval_model.py +635 -0
  73. kiln_ai/datamodel/test_example_models.py +34 -17
  74. kiln_ai/datamodel/test_json_schema.py +23 -0
  75. kiln_ai/datamodel/test_model_cache.py +24 -0
  76. kiln_ai/datamodel/test_model_perf.py +125 -0
  77. kiln_ai/datamodel/test_models.py +131 -2
  78. kiln_ai/datamodel/test_prompt_id.py +129 -0
  79. kiln_ai/datamodel/test_task.py +159 -0
  80. kiln_ai/utils/config.py +6 -1
  81. kiln_ai/utils/exhaustive_error.py +6 -0
  82. {kiln_ai-0.8.1.dist-info → kiln_ai-0.12.0.dist-info}/METADATA +45 -7
  83. kiln_ai-0.12.0.dist-info/RECORD +100 -0
  84. kiln_ai/adapters/base_adapter.py +0 -191
  85. kiln_ai/adapters/langchain_adapters.py +0 -256
  86. kiln_ai-0.8.1.dist-info/RECORD +0 -58
  87. {kiln_ai-0.8.1.dist-info → kiln_ai-0.12.0.dist-info}/WHEEL +0 -0
  88. {kiln_ai-0.8.1.dist-info → kiln_ai-0.12.0.dist-info}/licenses/LICENSE.txt +0 -0
@@ -1,256 +0,0 @@
1
- import os
2
- from os import getenv
3
- from typing import Any, Dict
4
-
5
- from langchain_aws import ChatBedrockConverse
6
- from langchain_core.language_models import LanguageModelInput
7
- from langchain_core.language_models.chat_models import BaseChatModel
8
- from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
9
- from langchain_core.messages.base import BaseMessage
10
- from langchain_core.runnables import Runnable
11
- from langchain_fireworks import ChatFireworks
12
- from langchain_groq import ChatGroq
13
- from langchain_ollama import ChatOllama
14
- from langchain_openai import ChatOpenAI
15
- from pydantic import BaseModel
16
-
17
- import kiln_ai.datamodel as datamodel
18
- from kiln_ai.adapters.ollama_tools import (
19
- get_ollama_connection,
20
- ollama_base_url,
21
- ollama_model_installed,
22
- )
23
- from kiln_ai.utils.config import Config
24
-
25
- from .base_adapter import AdapterInfo, BaseAdapter, BasePromptBuilder, RunOutput
26
- from .ml_model_list import KilnModelProvider, ModelProviderName
27
- from .provider_tools import kiln_model_provider_from
28
-
29
- LangChainModelType = BaseChatModel | Runnable[LanguageModelInput, Dict | BaseModel]
30
-
31
-
32
- class LangchainAdapter(BaseAdapter):
33
- _model: LangChainModelType | None = None
34
-
35
- def __init__(
36
- self,
37
- kiln_task: datamodel.Task,
38
- custom_model: BaseChatModel | None = None,
39
- model_name: str | None = None,
40
- provider: str | None = None,
41
- prompt_builder: BasePromptBuilder | None = None,
42
- tags: list[str] | None = None,
43
- ):
44
- super().__init__(kiln_task, prompt_builder=prompt_builder, tags=tags)
45
- if custom_model is not None:
46
- self._model = custom_model
47
-
48
- # Attempt to infer model provider and name from custom model
49
- self.model_provider = "custom.langchain:" + custom_model.__class__.__name__
50
- self.model_name = "custom.langchain:unknown_model"
51
- if hasattr(custom_model, "model_name") and isinstance(
52
- getattr(custom_model, "model_name"), str
53
- ):
54
- self.model_name = "custom.langchain:" + getattr(
55
- custom_model, "model_name"
56
- )
57
- if hasattr(custom_model, "model") and isinstance(
58
- getattr(custom_model, "model"), str
59
- ):
60
- self.model_name = "custom.langchain:" + getattr(custom_model, "model")
61
- elif model_name is not None:
62
- self.model_name = model_name
63
- self.model_provider = provider or "custom.langchain.default_provider"
64
- else:
65
- raise ValueError(
66
- "model_name and provider must be provided if custom_model is not provided"
67
- )
68
-
69
- async def model(self) -> LangChainModelType:
70
- # cached model
71
- if self._model:
72
- return self._model
73
-
74
- self._model = await langchain_model_from(self.model_name, self.model_provider)
75
-
76
- if self.has_structured_output():
77
- if not hasattr(self._model, "with_structured_output") or not callable(
78
- getattr(self._model, "with_structured_output")
79
- ):
80
- raise ValueError(
81
- f"model {self._model} does not support structured output, cannot use output_json_schema"
82
- )
83
- # Langchain expects title/description to be at top level, on top of json schema
84
- output_schema = self.kiln_task.output_schema()
85
- if output_schema is None:
86
- raise ValueError(
87
- f"output_json_schema is not valid json: {self.kiln_task.output_json_schema}"
88
- )
89
- output_schema["title"] = "task_response"
90
- output_schema["description"] = "A response from the task"
91
- with_structured_output_options = await get_structured_output_options(
92
- self.model_name, self.model_provider
93
- )
94
- self._model = self._model.with_structured_output(
95
- output_schema,
96
- include_raw=True,
97
- **with_structured_output_options,
98
- )
99
- return self._model
100
-
101
- async def _run(self, input: Dict | str) -> RunOutput:
102
- model = await self.model()
103
- chain = model
104
- intermediate_outputs = {}
105
-
106
- prompt = self.build_prompt()
107
- user_msg = self.prompt_builder.build_user_message(input)
108
- messages = [
109
- SystemMessage(content=prompt),
110
- HumanMessage(content=user_msg),
111
- ]
112
-
113
- # COT with structured output
114
- cot_prompt = self.prompt_builder.chain_of_thought_prompt()
115
- if cot_prompt and self.has_structured_output():
116
- # Base model (without structured output) used for COT message
117
- base_model = await langchain_model_from(
118
- self.model_name, self.model_provider
119
- )
120
- messages.append(
121
- SystemMessage(content=cot_prompt),
122
- )
123
-
124
- cot_messages = [*messages]
125
- cot_response = await base_model.ainvoke(cot_messages)
126
- intermediate_outputs["chain_of_thought"] = cot_response.content
127
- messages.append(AIMessage(content=cot_response.content))
128
- messages.append(
129
- SystemMessage(content="Considering the above, return a final result.")
130
- )
131
- elif cot_prompt:
132
- messages.append(SystemMessage(content=cot_prompt))
133
-
134
- response = await chain.ainvoke(messages)
135
-
136
- if self.has_structured_output():
137
- if (
138
- not isinstance(response, dict)
139
- or "parsed" not in response
140
- or not isinstance(response["parsed"], dict)
141
- ):
142
- raise RuntimeError(f"structured response not returned: {response}")
143
- structured_response = response["parsed"]
144
- return RunOutput(
145
- output=self._munge_response(structured_response),
146
- intermediate_outputs=intermediate_outputs,
147
- )
148
- else:
149
- if not isinstance(response, BaseMessage):
150
- raise RuntimeError(f"response is not a BaseMessage: {response}")
151
- text_content = response.content
152
- if not isinstance(text_content, str):
153
- raise RuntimeError(f"response is not a string: {text_content}")
154
- return RunOutput(
155
- output=text_content,
156
- intermediate_outputs=intermediate_outputs,
157
- )
158
-
159
- def adapter_info(self) -> AdapterInfo:
160
- return AdapterInfo(
161
- model_name=self.model_name,
162
- model_provider=self.model_provider,
163
- adapter_name="kiln_langchain_adapter",
164
- prompt_builder_name=self.prompt_builder.__class__.prompt_builder_name(),
165
- )
166
-
167
- def _munge_response(self, response: Dict) -> Dict:
168
- # Mistral Large tool calling format is a bit different. Convert to standard format.
169
- if (
170
- "name" in response
171
- and response["name"] == "task_response"
172
- and "arguments" in response
173
- ):
174
- return response["arguments"]
175
- return response
176
-
177
-
178
- async def get_structured_output_options(
179
- model_name: str, model_provider: str
180
- ) -> Dict[str, Any]:
181
- finetune_provider = await kiln_model_provider_from(model_name, model_provider)
182
- if finetune_provider and finetune_provider.adapter_options.get("langchain"):
183
- return finetune_provider.adapter_options["langchain"].get(
184
- "with_structured_output_options", {}
185
- )
186
- return {}
187
-
188
-
189
- async def langchain_model_from(
190
- name: str, provider_name: str | None = None
191
- ) -> BaseChatModel:
192
- provider = await kiln_model_provider_from(name, provider_name)
193
- return await langchain_model_from_provider(provider, name)
194
-
195
-
196
- async def langchain_model_from_provider(
197
- provider: KilnModelProvider, model_name: str
198
- ) -> BaseChatModel:
199
- if provider.name == ModelProviderName.openai:
200
- api_key = Config.shared().open_ai_api_key
201
- return ChatOpenAI(**provider.provider_options, openai_api_key=api_key) # type: ignore[arg-type]
202
- elif provider.name == ModelProviderName.openai_compatible:
203
- # See provider_tools.py for how base_url, key and other parameters are set
204
- return ChatOpenAI(**provider.provider_options) # type: ignore[arg-type]
205
- elif provider.name == ModelProviderName.groq:
206
- api_key = Config.shared().groq_api_key
207
- if api_key is None:
208
- raise ValueError(
209
- "Attempted to use Groq without an API key set. "
210
- "Get your API key from https://console.groq.com/keys"
211
- )
212
- return ChatGroq(**provider.provider_options, groq_api_key=api_key) # type: ignore[arg-type]
213
- elif provider.name == ModelProviderName.amazon_bedrock:
214
- api_key = Config.shared().bedrock_access_key
215
- secret_key = Config.shared().bedrock_secret_key
216
- # langchain doesn't allow passing these, so ugly hack to set env vars
217
- os.environ["AWS_ACCESS_KEY_ID"] = api_key
218
- os.environ["AWS_SECRET_ACCESS_KEY"] = secret_key
219
- return ChatBedrockConverse(
220
- **provider.provider_options,
221
- )
222
- elif provider.name == ModelProviderName.fireworks_ai:
223
- api_key = Config.shared().fireworks_api_key
224
- return ChatFireworks(**provider.provider_options, api_key=api_key)
225
- elif provider.name == ModelProviderName.ollama:
226
- # Ollama model naming is pretty flexible. We try a few versions of the model name
227
- potential_model_names = []
228
- if "model" in provider.provider_options:
229
- potential_model_names.append(provider.provider_options["model"])
230
- if "model_aliases" in provider.provider_options:
231
- potential_model_names.extend(provider.provider_options["model_aliases"])
232
-
233
- # Get the list of models Ollama supports
234
- ollama_connection = await get_ollama_connection()
235
- if ollama_connection is None:
236
- raise ValueError("Failed to connect to Ollama. Ensure Ollama is running.")
237
-
238
- for model_name in potential_model_names:
239
- if ollama_model_installed(ollama_connection, model_name):
240
- return ChatOllama(model=model_name, base_url=ollama_base_url())
241
-
242
- raise ValueError(f"Model {model_name} not installed on Ollama")
243
- elif provider.name == ModelProviderName.openrouter:
244
- api_key = Config.shared().open_router_api_key
245
- base_url = getenv("OPENROUTER_BASE_URL") or "https://openrouter.ai/api/v1"
246
- return ChatOpenAI(
247
- **provider.provider_options,
248
- openai_api_key=api_key, # type: ignore[arg-type]
249
- openai_api_base=base_url, # type: ignore[arg-type]
250
- default_headers={
251
- "HTTP-Referer": "https://getkiln.ai/openrouter",
252
- "X-Title": "KilnAI",
253
- },
254
- )
255
- else:
256
- raise ValueError(f"Invalid model or provider: {model_name} - {provider.name}")
@@ -1,58 +0,0 @@
1
- kiln_ai/__init__.py,sha256=Sc4z8LRVFMwJUoc_DPVUriSXTZ6PO9MaJ80PhRbKyB8,34
2
- kiln_ai/adapters/__init__.py,sha256=8-YlnTh3gsaPeEArFVLIqGE7-tbssI42fub4OQBp_DA,970
3
- kiln_ai/adapters/adapter_registry.py,sha256=zO-0_CWF3ZGA-1420_0Uwq976o3-7WXxEY_aTeu0PzQ,688
4
- kiln_ai/adapters/base_adapter.py,sha256=POSdMrZFqd0IJnLpVoyc1w9CGhdNtePZyQPgdBBRUpQ,6276
5
- kiln_ai/adapters/langchain_adapters.py,sha256=S9VZ9JLBDEue-vh00iNv4wM1rdBQRNnF0ubeOFLAdZc,10861
6
- kiln_ai/adapters/ml_model_list.py,sha256=Fl8PUlecibRjcWkKFwfge4cFz7jusVMeK35ewaWw8ac,25446
7
- kiln_ai/adapters/ollama_tools.py,sha256=0Of6ySbJ2d4j--9laOL6QKgRUQSrqX8dJUIrz20n59s,3561
8
- kiln_ai/adapters/prompt_builders.py,sha256=Mdu-f1mC9hWIDwoF7Qwd9F99GDx6oNGvtEZN-SrOsNM,10325
9
- kiln_ai/adapters/provider_tools.py,sha256=m7X93DFbnYnw5H2HDumFJKpTKmeau-GZLv-SUmssJZ0,12381
10
- kiln_ai/adapters/test_langchain_adapter.py,sha256=QiVdCUJJ_uEzD0uA0jYMC3ZO4NTGJLm9iWTwvQfdFxI,12037
11
- kiln_ai/adapters/test_ollama_tools.py,sha256=2KwYVaj3ySV3ld-z51TCGbJEMdb3MZj2eoEicIWz3Q4,2552
12
- kiln_ai/adapters/test_prompt_adaptors.py,sha256=Mc0oSYgDLxfP2u3GVR_iDWaYctTQ8Ug1u6UGvWA90lM,7494
13
- kiln_ai/adapters/test_prompt_builders.py,sha256=sU0bSBZa9Y4Q-mmkDf3HbQ0MNSWk5o9bC9sNgtnBokk,14598
14
- kiln_ai/adapters/test_provider_tools.py,sha256=S1PSXd5MJnPvBe7Hq4FijptB0lbmym2E6iztncAvuUg,20752
15
- kiln_ai/adapters/test_saving_adapter_results.py,sha256=SYYh2xY1zmeKhFHfWAuEY4pEiLd8SitSV5ewGOTmaOI,6447
16
- kiln_ai/adapters/test_structured_output.py,sha256=9Mgng-HOXiZ_WcJG5cpMWhtsdJt8Rn-7qIouBWvWVoU,9324
17
- kiln_ai/adapters/data_gen/__init__.py,sha256=QTZWaf7kq5BorhPvexJfwDEKmjRmIbhwW9ei8LW2SIs,276
18
- kiln_ai/adapters/data_gen/data_gen_prompts.py,sha256=kudjHnAz7L3q0k_NLyTlaIV7M0uRFrxXNcfcnjOE2uc,5810
19
- kiln_ai/adapters/data_gen/data_gen_task.py,sha256=vwjC47YDrsl4GtBJpK6FWh07TGd8CalhZOX4p4YBX8w,5904
20
- kiln_ai/adapters/data_gen/test_data_gen_task.py,sha256=TC_n1iWgfLp87q7eNE3ZunVCuk_J25vfw-ohi2qtnp0,9668
21
- kiln_ai/adapters/fine_tune/__init__.py,sha256=DxdTR60chwgck1aEoVYWyfWi6Ed2ZkdJj0lar-SEAj4,257
22
- kiln_ai/adapters/fine_tune/base_finetune.py,sha256=-3hyWZXImJomaZeAME6mxbjifQDAn7hwlgTm8VVkxkg,5861
23
- kiln_ai/adapters/fine_tune/dataset_formatter.py,sha256=DzmUaCaUalTYaX2aNtnb_oucb5ZghI13RDVwtxECMUU,6340
24
- kiln_ai/adapters/fine_tune/finetune_registry.py,sha256=H1B-opCTlIyd9JlIFTKsY_ctxUX9ziEc49_gnmg1SZg,483
25
- kiln_ai/adapters/fine_tune/fireworks_finetune.py,sha256=B5o_-A0_Y_QYtgUXZWhKAjR1MeCXvZWz5scZZuK3pMg,13303
26
- kiln_ai/adapters/fine_tune/openai_finetune.py,sha256=WJKczDN7CA1TJnIokzZu7hbcZiOv9JIRA1scv1zDe8o,8312
27
- kiln_ai/adapters/fine_tune/test_base_finetune.py,sha256=YOCdQCL5Q0kpBiaU3hccafknCg0kIFRyp16lttR2Io0,9843
28
- kiln_ai/adapters/fine_tune/test_dataset_formatter.py,sha256=7atbHb4kFtgSmHQMNrSnNpH2ZO8drpnfwKWCsx1p8mM,11127
29
- kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py,sha256=Y6r5BxsevFeEUHJikfFLeeG6fbPvLOxQpqIMpn-SpvU,15272
30
- kiln_ai/adapters/fine_tune/test_openai_finetune.py,sha256=EF-f0JbVaPiVXF0eBYbwTKdi5thA45s-XbVB0iUBI00,16629
31
- kiln_ai/adapters/repair/__init__.py,sha256=dOO9MEpEhjiwzDVFg3MNfA2bKMPlax9iekDatpTkX8E,217
32
- kiln_ai/adapters/repair/repair_task.py,sha256=L7WTFEpfaGpWXHPQf7BTNL0wiDPbeBIVqn7qNV_SeZc,3354
33
- kiln_ai/adapters/repair/test_repair_task.py,sha256=JBcyqyQYWniiUo4FSle9kUEsnbTsl5JN1LTRN1SRnrE,7940
34
- kiln_ai/datamodel/__init__.py,sha256=zhiyzeEgaE3IYunmv6KEjX43Biby6uxBjBMMEMAjE1g,28337
35
- kiln_ai/datamodel/basemodel.py,sha256=zWyoYgsA2tmP55jl9H18xQ0yl9vM98aTOFJTUnW5ulU,20984
36
- kiln_ai/datamodel/json_schema.py,sha256=l4BIq1ItLHgcSHqsqDOchegLLHY48U4yR0SP2aMb4i0,2449
37
- kiln_ai/datamodel/model_cache.py,sha256=d8VjPp0p5BhrGSkx9soKyxO6VWW-bcesNSJI21ySvmA,4369
38
- kiln_ai/datamodel/registry.py,sha256=XwGFXJFKZtOpR1Z9ven6SftggfADdZRm8TFxCEVtfUQ,957
39
- kiln_ai/datamodel/test_basemodel.py,sha256=r40jWaW1073ZdIhHe-GHFE8jJDD9ocauItInOsK8pWU,15234
40
- kiln_ai/datamodel/test_dataset_split.py,sha256=Ug-vbga-opGN_LF51Mszx5NN4wXbx3MIP1LiNzIn5Nw,7264
41
- kiln_ai/datamodel/test_datasource.py,sha256=GAiZz31qezVVPwFqnt8wHMu15WvtlV89jw8C1Ue6YNI,3165
42
- kiln_ai/datamodel/test_example_models.py,sha256=9Jhc0bvbM4hCjJGiQNgWH5rwyIsGuneAD8h4o1P3zAY,20356
43
- kiln_ai/datamodel/test_json_schema.py,sha256=vdLnTQxxrcmuSrf6iOmkrmpfh7JnxqIw4B4dbDAAcZ4,3199
44
- kiln_ai/datamodel/test_model_cache.py,sha256=9HvK2etVZJyepdlRz5ja7u1CnyzhsV4_BupJF77yBxE,7285
45
- kiln_ai/datamodel/test_models.py,sha256=t2Uthl559QioTyFAbQUk4BD3PqAywl3u1RSh4tHiMP0,15071
46
- kiln_ai/datamodel/test_nested_save.py,sha256=xciCddqvPyKyoyjC5Lx_3Kh1t4LJv1xYRAPazR3SRcs,5588
47
- kiln_ai/datamodel/test_output_rating.py,sha256=zvPIp2shAgCs2RQBgwYoL09fRA3krHvgAqUa91RlWR0,15125
48
- kiln_ai/datamodel/test_registry.py,sha256=PhS4anLi5Bf_023obuTlO5DALhtPB8WIc_bX12Yg6Po,2705
49
- kiln_ai/utils/__init__.py,sha256=PTD0MwBCKAMIOGsTAwsFaJOusTJJoRFTfOGqRvCaU-E,142
50
- kiln_ai/utils/config.py,sha256=u289b2AHuQoPup_vILTSpgsO29fxJyU8zy8BwADAtvs,6859
51
- kiln_ai/utils/formatting.py,sha256=VtB9oag0lOGv17dwT7OPX_3HzBfaU9GsLH-iLete0yM,97
52
- kiln_ai/utils/name_generator.py,sha256=v26TgpCwQbhQFcZvzgjZvURinjrOyyFhxpsI6NQrHKc,1914
53
- kiln_ai/utils/test_config.py,sha256=Jw3nMFeIgZUsZDRJJY2HpB-2EkR2NoZ-rDe_o9oA7ws,9174
54
- kiln_ai/utils/test_name_geneator.py,sha256=9-hSTBshyakqlPbFnNcggwLrL7lcPTitauBYHg9jFWI,1513
55
- kiln_ai-0.8.1.dist-info/METADATA,sha256=hcnd6e-iYHbE9GsI0W2deM8O6UgZOXl70VI1j8kIJrg,9050
56
- kiln_ai-0.8.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
57
- kiln_ai-0.8.1.dist-info/licenses/LICENSE.txt,sha256=_NA5pnTYgRRr4qH6lE3X-TuZJ8iRcMUi5ASoGr-lEx8,1209
58
- kiln_ai-0.8.1.dist-info/RECORD,,