kiln-ai 0.8.1__py3-none-any.whl → 0.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kiln-ai might be problematic. Click here for more details.

Files changed (88) hide show
  1. kiln_ai/adapters/__init__.py +7 -7
  2. kiln_ai/adapters/adapter_registry.py +81 -10
  3. kiln_ai/adapters/data_gen/data_gen_task.py +21 -3
  4. kiln_ai/adapters/data_gen/test_data_gen_task.py +23 -3
  5. kiln_ai/adapters/eval/base_eval.py +164 -0
  6. kiln_ai/adapters/eval/eval_runner.py +267 -0
  7. kiln_ai/adapters/eval/g_eval.py +367 -0
  8. kiln_ai/adapters/eval/registry.py +16 -0
  9. kiln_ai/adapters/eval/test_base_eval.py +324 -0
  10. kiln_ai/adapters/eval/test_eval_runner.py +640 -0
  11. kiln_ai/adapters/eval/test_g_eval.py +497 -0
  12. kiln_ai/adapters/eval/test_g_eval_data.py +4 -0
  13. kiln_ai/adapters/fine_tune/base_finetune.py +5 -1
  14. kiln_ai/adapters/fine_tune/dataset_formatter.py +310 -65
  15. kiln_ai/adapters/fine_tune/fireworks_finetune.py +47 -32
  16. kiln_ai/adapters/fine_tune/openai_finetune.py +12 -11
  17. kiln_ai/adapters/fine_tune/test_base_finetune.py +19 -0
  18. kiln_ai/adapters/fine_tune/test_dataset_formatter.py +472 -129
  19. kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py +114 -22
  20. kiln_ai/adapters/fine_tune/test_openai_finetune.py +125 -14
  21. kiln_ai/adapters/ml_model_list.py +434 -93
  22. kiln_ai/adapters/model_adapters/__init__.py +18 -0
  23. kiln_ai/adapters/model_adapters/base_adapter.py +250 -0
  24. kiln_ai/adapters/model_adapters/langchain_adapters.py +309 -0
  25. kiln_ai/adapters/model_adapters/openai_compatible_config.py +10 -0
  26. kiln_ai/adapters/model_adapters/openai_model_adapter.py +289 -0
  27. kiln_ai/adapters/model_adapters/test_base_adapter.py +199 -0
  28. kiln_ai/adapters/{test_langchain_adapter.py → model_adapters/test_langchain_adapter.py} +105 -97
  29. kiln_ai/adapters/model_adapters/test_openai_model_adapter.py +216 -0
  30. kiln_ai/adapters/{test_saving_adapter_results.py → model_adapters/test_saving_adapter_results.py} +80 -30
  31. kiln_ai/adapters/{test_structured_output.py → model_adapters/test_structured_output.py} +125 -46
  32. kiln_ai/adapters/ollama_tools.py +0 -1
  33. kiln_ai/adapters/parsers/__init__.py +10 -0
  34. kiln_ai/adapters/parsers/base_parser.py +12 -0
  35. kiln_ai/adapters/parsers/json_parser.py +37 -0
  36. kiln_ai/adapters/parsers/parser_registry.py +19 -0
  37. kiln_ai/adapters/parsers/r1_parser.py +69 -0
  38. kiln_ai/adapters/parsers/test_json_parser.py +81 -0
  39. kiln_ai/adapters/parsers/test_parser_registry.py +32 -0
  40. kiln_ai/adapters/parsers/test_r1_parser.py +144 -0
  41. kiln_ai/adapters/prompt_builders.py +193 -49
  42. kiln_ai/adapters/provider_tools.py +91 -36
  43. kiln_ai/adapters/repair/repair_task.py +18 -19
  44. kiln_ai/adapters/repair/test_repair_task.py +7 -7
  45. kiln_ai/adapters/run_output.py +11 -0
  46. kiln_ai/adapters/test_adapter_registry.py +177 -0
  47. kiln_ai/adapters/test_generate_docs.py +69 -0
  48. kiln_ai/adapters/test_ollama_tools.py +0 -1
  49. kiln_ai/adapters/test_prompt_adaptors.py +25 -18
  50. kiln_ai/adapters/test_prompt_builders.py +265 -44
  51. kiln_ai/adapters/test_provider_tools.py +268 -46
  52. kiln_ai/datamodel/__init__.py +51 -772
  53. kiln_ai/datamodel/basemodel.py +31 -11
  54. kiln_ai/datamodel/datamodel_enums.py +58 -0
  55. kiln_ai/datamodel/dataset_filters.py +114 -0
  56. kiln_ai/datamodel/dataset_split.py +170 -0
  57. kiln_ai/datamodel/eval.py +298 -0
  58. kiln_ai/datamodel/finetune.py +105 -0
  59. kiln_ai/datamodel/json_schema.py +14 -3
  60. kiln_ai/datamodel/model_cache.py +8 -3
  61. kiln_ai/datamodel/project.py +23 -0
  62. kiln_ai/datamodel/prompt.py +37 -0
  63. kiln_ai/datamodel/prompt_id.py +83 -0
  64. kiln_ai/datamodel/strict_mode.py +24 -0
  65. kiln_ai/datamodel/task.py +181 -0
  66. kiln_ai/datamodel/task_output.py +321 -0
  67. kiln_ai/datamodel/task_run.py +164 -0
  68. kiln_ai/datamodel/test_basemodel.py +80 -2
  69. kiln_ai/datamodel/test_dataset_filters.py +71 -0
  70. kiln_ai/datamodel/test_dataset_split.py +127 -6
  71. kiln_ai/datamodel/test_datasource.py +3 -2
  72. kiln_ai/datamodel/test_eval_model.py +635 -0
  73. kiln_ai/datamodel/test_example_models.py +34 -17
  74. kiln_ai/datamodel/test_json_schema.py +23 -0
  75. kiln_ai/datamodel/test_model_cache.py +24 -0
  76. kiln_ai/datamodel/test_model_perf.py +125 -0
  77. kiln_ai/datamodel/test_models.py +131 -2
  78. kiln_ai/datamodel/test_prompt_id.py +129 -0
  79. kiln_ai/datamodel/test_task.py +159 -0
  80. kiln_ai/utils/config.py +6 -1
  81. kiln_ai/utils/exhaustive_error.py +6 -0
  82. {kiln_ai-0.8.1.dist-info → kiln_ai-0.12.0.dist-info}/METADATA +45 -7
  83. kiln_ai-0.12.0.dist-info/RECORD +100 -0
  84. kiln_ai/adapters/base_adapter.py +0 -191
  85. kiln_ai/adapters/langchain_adapters.py +0 -256
  86. kiln_ai-0.8.1.dist-info/RECORD +0 -58
  87. {kiln_ai-0.8.1.dist-info → kiln_ai-0.12.0.dist-info}/WHEEL +0 -0
  88. {kiln_ai-0.8.1.dist-info → kiln_ai-0.12.0.dist-info}/licenses/LICENSE.txt +0 -0
@@ -3,6 +3,8 @@ from typing import Dict, List
3
3
 
4
4
  from pydantic import BaseModel
5
5
 
6
+ from kiln_ai.datamodel import StructuredOutputMode
7
+
6
8
  """
7
9
  Provides model configuration and management for various LLM providers and models.
8
10
  This module handles the integration with different AI model providers and their respective models,
@@ -40,6 +42,9 @@ class ModelFamily(str, Enum):
40
42
  claude = "claude"
41
43
  mixtral = "mixtral"
42
44
  qwen = "qwen"
45
+ deepseek = "deepseek"
46
+ dolphin = "dolphin"
47
+ grok = "grok"
43
48
 
44
49
 
45
50
  # Where models have instruct and raw versions, instruct is default and raw is specified
@@ -60,6 +65,7 @@ class ModelName(str, Enum):
60
65
  gpt_4o_mini = "gpt_4o_mini"
61
66
  gpt_4o = "gpt_4o"
62
67
  phi_3_5 = "phi_3_5"
68
+ phi_4 = "phi_4"
63
69
  mistral_large = "mistral_large"
64
70
  mistral_nemo = "mistral_nemo"
65
71
  gemma_2_2b = "gemma_2_2b"
@@ -67,13 +73,35 @@ class ModelName(str, Enum):
67
73
  gemma_2_27b = "gemma_2_27b"
68
74
  claude_3_5_haiku = "claude_3_5_haiku"
69
75
  claude_3_5_sonnet = "claude_3_5_sonnet"
76
+ claude_3_7_sonnet = "claude_3_7_sonnet"
77
+ claude_3_7_sonnet_thinking = "claude_3_7_sonnet_thinking"
70
78
  gemini_1_5_flash = "gemini_1_5_flash"
71
79
  gemini_1_5_flash_8b = "gemini_1_5_flash_8b"
72
80
  gemini_1_5_pro = "gemini_1_5_pro"
81
+ gemini_2_0_flash = "gemini_2_0_flash"
73
82
  nemotron_70b = "nemotron_70b"
74
83
  mixtral_8x7b = "mixtral_8x7b"
75
84
  qwen_2p5_7b = "qwen_2p5_7b"
76
85
  qwen_2p5_72b = "qwen_2p5_72b"
86
+ deepseek_3 = "deepseek_3"
87
+ deepseek_r1 = "deepseek_r1"
88
+ mistral_small_3 = "mistral_small_3"
89
+ deepseek_r1_distill_qwen_32b = "deepseek_r1_distill_qwen_32b"
90
+ deepseek_r1_distill_llama_70b = "deepseek_r1_distill_llama_70b"
91
+ deepseek_r1_distill_qwen_14b = "deepseek_r1_distill_qwen_14b"
92
+ deepseek_r1_distill_qwen_1p5b = "deepseek_r1_distill_qwen_1p5b"
93
+ deepseek_r1_distill_qwen_7b = "deepseek_r1_distill_qwen_7b"
94
+ deepseek_r1_distill_llama_8b = "deepseek_r1_distill_llama_8b"
95
+ dolphin_2_9_8x22b = "dolphin_2_9_8x22b"
96
+ grok_2 = "grok_2"
97
+
98
+
99
+ class ModelParserID(str, Enum):
100
+ """
101
+ Enumeration of supported model parsers.
102
+ """
103
+
104
+ r1_thinking = "r1_thinking"
77
105
 
78
106
 
79
107
  class KilnModelProvider(BaseModel):
@@ -87,7 +115,9 @@ class KilnModelProvider(BaseModel):
87
115
  untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable.
88
116
  provider_finetune_id: The finetune ID for the provider, if applicable
89
117
  provider_options: Additional provider-specific configuration options
90
- adapter_options: Additional options specific to the adapter. Top level key should be adapter ID.
118
+ structured_output_mode: The mode we should use to call the model for structured output, if it was trained with structured output.
119
+ parser: A parser to use for the model, if applicable
120
+ reasoning_capable: Whether the model is designed to output thinking in a structured format (eg <think></think>). If so we don't use COT across 2 calls, and ask for thinking and final response in the same call.
91
121
  """
92
122
 
93
123
  name: ModelProviderName
@@ -96,7 +126,18 @@ class KilnModelProvider(BaseModel):
96
126
  untested_model: bool = False
97
127
  provider_finetune_id: str | None = None
98
128
  provider_options: Dict = {}
99
- adapter_options: Dict = {}
129
+ structured_output_mode: StructuredOutputMode = StructuredOutputMode.default
130
+ parser: ModelParserID | None = None
131
+ reasoning_capable: bool = False
132
+ supports_logprobs: bool = False
133
+
134
+ # TODO P1: Need a more generalized way to handle custom provider parameters.
135
+ # Making them quite declarative here for now, isolating provider specific logic
136
+ # to this file. Later I should be able to override anything in this file via config.
137
+ r1_openrouter_options: bool = False
138
+ require_openrouter_reasoning: bool = False
139
+ logprobs_openrouter_options: bool = False
140
+ openrouter_skip_required_parameters: bool = False
100
141
 
101
142
 
102
143
  class KilnModel(BaseModel):
@@ -115,7 +156,6 @@ class KilnModel(BaseModel):
115
156
  name: str
116
157
  friendly_name: str
117
158
  providers: List[KilnModelProvider]
118
- supports_structured_output: bool = True
119
159
 
120
160
 
121
161
  built_in_models: List[KilnModel] = [
@@ -129,10 +169,15 @@ built_in_models: List[KilnModel] = [
129
169
  name=ModelProviderName.openai,
130
170
  provider_options={"model": "gpt-4o-mini"},
131
171
  provider_finetune_id="gpt-4o-mini-2024-07-18",
172
+ structured_output_mode=StructuredOutputMode.json_schema,
173
+ supports_logprobs=True,
132
174
  ),
133
175
  KilnModelProvider(
134
176
  name=ModelProviderName.openrouter,
135
177
  provider_options={"model": "openai/gpt-4o-mini"},
178
+ structured_output_mode=StructuredOutputMode.json_schema,
179
+ supports_logprobs=True,
180
+ logprobs_openrouter_options=True,
136
181
  ),
137
182
  ],
138
183
  ),
@@ -146,10 +191,15 @@ built_in_models: List[KilnModel] = [
146
191
  name=ModelProviderName.openai,
147
192
  provider_options={"model": "gpt-4o"},
148
193
  provider_finetune_id="gpt-4o-2024-08-06",
194
+ structured_output_mode=StructuredOutputMode.json_schema,
195
+ supports_logprobs=True,
149
196
  ),
150
197
  KilnModelProvider(
151
198
  name=ModelProviderName.openrouter,
152
- provider_options={"model": "openai/gpt-4o-2024-08-06"},
199
+ provider_options={"model": "openai/gpt-4o"},
200
+ structured_output_mode=StructuredOutputMode.json_schema,
201
+ supports_logprobs=True,
202
+ logprobs_openrouter_options=True,
153
203
  ),
154
204
  ],
155
205
  ),
@@ -161,6 +211,7 @@ built_in_models: List[KilnModel] = [
161
211
  providers=[
162
212
  KilnModelProvider(
163
213
  name=ModelProviderName.openrouter,
214
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
164
215
  provider_options={"model": "anthropic/claude-3-5-haiku"},
165
216
  ),
166
217
  ],
@@ -173,10 +224,40 @@ built_in_models: List[KilnModel] = [
173
224
  providers=[
174
225
  KilnModelProvider(
175
226
  name=ModelProviderName.openrouter,
227
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
176
228
  provider_options={"model": "anthropic/claude-3.5-sonnet"},
177
229
  ),
178
230
  ],
179
231
  ),
232
+ # Claude 3.7 Sonnet
233
+ KilnModel(
234
+ family=ModelFamily.claude,
235
+ name=ModelName.claude_3_7_sonnet,
236
+ friendly_name="Claude 3.7 Sonnet",
237
+ providers=[
238
+ KilnModelProvider(
239
+ name=ModelProviderName.openrouter,
240
+ structured_output_mode=StructuredOutputMode.function_calling,
241
+ provider_options={"model": "anthropic/claude-3.7-sonnet"},
242
+ ),
243
+ ],
244
+ ),
245
+ # Claude 3.7 Sonnet Thinking
246
+ KilnModel(
247
+ family=ModelFamily.claude,
248
+ name=ModelName.claude_3_7_sonnet_thinking,
249
+ friendly_name="Claude 3.7 Sonnet Thinking",
250
+ providers=[
251
+ KilnModelProvider(
252
+ name=ModelProviderName.openrouter,
253
+ provider_options={"model": "anthropic/claude-3.7-sonnet:thinking"},
254
+ reasoning_capable=True,
255
+ # For reasoning models, we need to use json_instructions with OpenRouter
256
+ structured_output_mode=StructuredOutputMode.json_instructions,
257
+ require_openrouter_reasoning=True,
258
+ ),
259
+ ],
260
+ ),
180
261
  # Gemini 1.5 Pro
181
262
  KilnModel(
182
263
  family=ModelFamily.gemini,
@@ -185,9 +266,8 @@ built_in_models: List[KilnModel] = [
185
266
  providers=[
186
267
  KilnModelProvider(
187
268
  name=ModelProviderName.openrouter,
188
- supports_structured_output=False, # it should, but doesn't work on openrouter
189
- supports_data_gen=False, # doesn't work on openrouter
190
269
  provider_options={"model": "google/gemini-pro-1.5"},
270
+ structured_output_mode=StructuredOutputMode.json_schema,
191
271
  ),
192
272
  ],
193
273
  ),
@@ -199,8 +279,8 @@ built_in_models: List[KilnModel] = [
199
279
  providers=[
200
280
  KilnModelProvider(
201
281
  name=ModelProviderName.openrouter,
202
- supports_data_gen=False,
203
282
  provider_options={"model": "google/gemini-flash-1.5"},
283
+ structured_output_mode=StructuredOutputMode.json_schema,
204
284
  ),
205
285
  ],
206
286
  ),
@@ -212,9 +292,21 @@ built_in_models: List[KilnModel] = [
212
292
  providers=[
213
293
  KilnModelProvider(
214
294
  name=ModelProviderName.openrouter,
215
- supports_structured_output=False,
216
- supports_data_gen=False,
217
295
  provider_options={"model": "google/gemini-flash-1.5-8b"},
296
+ structured_output_mode=StructuredOutputMode.json_mode,
297
+ ),
298
+ ],
299
+ ),
300
+ # Gemini 2.0 Flash
301
+ KilnModel(
302
+ family=ModelFamily.gemini,
303
+ name=ModelName.gemini_2_0_flash,
304
+ friendly_name="Gemini 2.0 Flash",
305
+ providers=[
306
+ KilnModelProvider(
307
+ name=ModelProviderName.openrouter,
308
+ structured_output_mode=StructuredOutputMode.json_schema,
309
+ provider_options={"model": "google/gemini-2.0-flash-001"},
218
310
  ),
219
311
  ],
220
312
  ),
@@ -244,7 +336,7 @@ built_in_models: List[KilnModel] = [
244
336
  ),
245
337
  KilnModelProvider(
246
338
  name=ModelProviderName.amazon_bedrock,
247
- supports_structured_output=False,
339
+ structured_output_mode=StructuredOutputMode.json_schema,
248
340
  supports_data_gen=False,
249
341
  provider_options={
250
342
  "model": "meta.llama3-1-8b-instruct-v1:0",
@@ -253,7 +345,7 @@ built_in_models: List[KilnModel] = [
253
345
  ),
254
346
  KilnModelProvider(
255
347
  name=ModelProviderName.ollama,
256
- supports_data_gen=False,
348
+ structured_output_mode=StructuredOutputMode.json_schema,
257
349
  provider_options={
258
350
  "model": "llama3.1:8b",
259
351
  "model_aliases": ["llama3.1"], # 8b is default
@@ -261,14 +353,14 @@ built_in_models: List[KilnModel] = [
261
353
  ),
262
354
  KilnModelProvider(
263
355
  name=ModelProviderName.openrouter,
264
- supports_structured_output=False,
265
356
  supports_data_gen=False,
357
+ structured_output_mode=StructuredOutputMode.function_calling,
266
358
  provider_options={"model": "meta-llama/llama-3.1-8b-instruct"},
267
359
  ),
268
360
  KilnModelProvider(
269
361
  name=ModelProviderName.fireworks_ai,
270
- supports_structured_output=False,
271
- supports_data_gen=False,
362
+ # JSON mode not ideal (no schema), but tool calling doesn't work on 8b
363
+ structured_output_mode=StructuredOutputMode.json_mode,
272
364
  provider_finetune_id="accounts/fireworks/models/llama-v3p1-8b-instruct",
273
365
  provider_options={
274
366
  "model": "accounts/fireworks/models/llama-v3p1-8b-instruct"
@@ -282,14 +374,9 @@ built_in_models: List[KilnModel] = [
282
374
  name=ModelName.llama_3_1_70b,
283
375
  friendly_name="Llama 3.1 70B",
284
376
  providers=[
285
- KilnModelProvider(
286
- name=ModelProviderName.groq,
287
- provider_options={"model": "llama-3.1-70b-versatile"},
288
- ),
289
377
  KilnModelProvider(
290
378
  name=ModelProviderName.amazon_bedrock,
291
- # AWS 70b not working as well as the others.
292
- supports_structured_output=False,
379
+ structured_output_mode=StructuredOutputMode.json_schema,
293
380
  supports_data_gen=False,
294
381
  provider_options={
295
382
  "model": "meta.llama3-1-70b-instruct-v1:0",
@@ -298,14 +385,22 @@ built_in_models: List[KilnModel] = [
298
385
  ),
299
386
  KilnModelProvider(
300
387
  name=ModelProviderName.openrouter,
388
+ supports_data_gen=False,
389
+ # Need to not pass "strict=True" to the function call to get this to work with logprobs for some reason. Openrouter issue.
390
+ structured_output_mode=StructuredOutputMode.function_calling_weak,
301
391
  provider_options={"model": "meta-llama/llama-3.1-70b-instruct"},
392
+ supports_logprobs=True,
393
+ logprobs_openrouter_options=True,
302
394
  ),
303
395
  KilnModelProvider(
304
396
  name=ModelProviderName.ollama,
397
+ structured_output_mode=StructuredOutputMode.json_schema,
305
398
  provider_options={"model": "llama3.1:70b"},
306
399
  ),
307
400
  KilnModelProvider(
308
401
  name=ModelProviderName.fireworks_ai,
402
+ # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
403
+ structured_output_mode=StructuredOutputMode.function_calling,
309
404
  provider_finetune_id="accounts/fireworks/models/llama-v3p1-70b-instruct",
310
405
  provider_options={
311
406
  "model": "accounts/fireworks/models/llama-v3p1-70b-instruct"
@@ -321,6 +416,7 @@ built_in_models: List[KilnModel] = [
321
416
  providers=[
322
417
  KilnModelProvider(
323
418
  name=ModelProviderName.amazon_bedrock,
419
+ structured_output_mode=StructuredOutputMode.json_schema,
324
420
  supports_data_gen=False,
325
421
  provider_options={
326
422
  "model": "meta.llama3-1-405b-instruct-v1:0",
@@ -329,15 +425,18 @@ built_in_models: List[KilnModel] = [
329
425
  ),
330
426
  KilnModelProvider(
331
427
  name=ModelProviderName.ollama,
428
+ structured_output_mode=StructuredOutputMode.json_schema,
332
429
  provider_options={"model": "llama3.1:405b"},
333
430
  ),
334
431
  KilnModelProvider(
335
432
  name=ModelProviderName.openrouter,
433
+ structured_output_mode=StructuredOutputMode.function_calling,
336
434
  provider_options={"model": "meta-llama/llama-3.1-405b-instruct"},
337
435
  ),
338
436
  KilnModelProvider(
339
437
  name=ModelProviderName.fireworks_ai,
340
438
  # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
439
+ structured_output_mode=StructuredOutputMode.function_calling,
341
440
  provider_options={
342
441
  "model": "accounts/fireworks/models/llama-v3p1-405b-instruct"
343
442
  },
@@ -353,6 +452,7 @@ built_in_models: List[KilnModel] = [
353
452
  KilnModelProvider(
354
453
  name=ModelProviderName.openrouter,
355
454
  provider_options={"model": "mistralai/mistral-nemo"},
455
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
356
456
  ),
357
457
  ],
358
458
  ),
@@ -364,6 +464,7 @@ built_in_models: List[KilnModel] = [
364
464
  providers=[
365
465
  KilnModelProvider(
366
466
  name=ModelProviderName.amazon_bedrock,
467
+ structured_output_mode=StructuredOutputMode.json_schema,
367
468
  provider_options={
368
469
  "model": "mistral.mistral-large-2407-v1:0",
369
470
  "region_name": "us-west-2", # only in west-2
@@ -371,10 +472,12 @@ built_in_models: List[KilnModel] = [
371
472
  ),
372
473
  KilnModelProvider(
373
474
  name=ModelProviderName.openrouter,
475
+ structured_output_mode=StructuredOutputMode.json_schema,
374
476
  provider_options={"model": "mistralai/mistral-large"},
375
477
  ),
376
478
  KilnModelProvider(
377
479
  name=ModelProviderName.ollama,
480
+ structured_output_mode=StructuredOutputMode.json_schema,
378
481
  provider_options={"model": "mistral-large"},
379
482
  ),
380
483
  ],
@@ -385,10 +488,16 @@ built_in_models: List[KilnModel] = [
385
488
  name=ModelName.llama_3_2_1b,
386
489
  friendly_name="Llama 3.2 1B",
387
490
  providers=[
491
+ KilnModelProvider(
492
+ name=ModelProviderName.groq,
493
+ provider_options={"model": "llama-3.2-1b-preview"},
494
+ supports_data_gen=False,
495
+ ),
388
496
  KilnModelProvider(
389
497
  name=ModelProviderName.openrouter,
390
498
  supports_structured_output=False,
391
499
  supports_data_gen=False,
500
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
392
501
  provider_options={"model": "meta-llama/llama-3.2-1b-instruct"},
393
502
  ),
394
503
  KilnModelProvider(
@@ -397,15 +506,6 @@ built_in_models: List[KilnModel] = [
397
506
  supports_data_gen=False,
398
507
  provider_options={"model": "llama3.2:1b"},
399
508
  ),
400
- KilnModelProvider(
401
- name=ModelProviderName.fireworks_ai,
402
- provider_finetune_id="accounts/fireworks/models/llama-v3p2-1b-instruct",
403
- supports_structured_output=False,
404
- supports_data_gen=False,
405
- provider_options={
406
- "model": "accounts/fireworks/models/llama-v3p2-1b-instruct"
407
- },
408
- ),
409
509
  ],
410
510
  ),
411
511
  # Llama 3.2 3B
@@ -414,23 +514,27 @@ built_in_models: List[KilnModel] = [
414
514
  name=ModelName.llama_3_2_3b,
415
515
  friendly_name="Llama 3.2 3B",
416
516
  providers=[
517
+ KilnModelProvider(
518
+ name=ModelProviderName.groq,
519
+ provider_options={"model": "llama-3.2-3b-preview"},
520
+ supports_data_gen=False,
521
+ ),
417
522
  KilnModelProvider(
418
523
  name=ModelProviderName.openrouter,
419
524
  supports_structured_output=False,
420
525
  supports_data_gen=False,
526
+ structured_output_mode=StructuredOutputMode.json_schema,
421
527
  provider_options={"model": "meta-llama/llama-3.2-3b-instruct"},
422
528
  ),
423
529
  KilnModelProvider(
424
530
  name=ModelProviderName.ollama,
425
- supports_structured_output=False,
426
531
  supports_data_gen=False,
427
532
  provider_options={"model": "llama3.2"},
428
533
  ),
429
534
  KilnModelProvider(
430
535
  name=ModelProviderName.fireworks_ai,
431
536
  provider_finetune_id="accounts/fireworks/models/llama-v3p2-3b-instruct",
432
- supports_structured_output=False,
433
- supports_data_gen=False,
537
+ structured_output_mode=StructuredOutputMode.json_mode,
434
538
  provider_options={
435
539
  "model": "accounts/fireworks/models/llama-v3p2-3b-instruct"
436
540
  },
@@ -443,19 +547,18 @@ built_in_models: List[KilnModel] = [
443
547
  name=ModelName.llama_3_2_11b,
444
548
  friendly_name="Llama 3.2 11B",
445
549
  providers=[
550
+ KilnModelProvider(
551
+ name=ModelProviderName.groq,
552
+ provider_options={"model": "llama-3.2-11b-vision-preview"},
553
+ ),
446
554
  KilnModelProvider(
447
555
  name=ModelProviderName.openrouter,
556
+ structured_output_mode=StructuredOutputMode.json_schema,
448
557
  provider_options={"model": "meta-llama/llama-3.2-11b-vision-instruct"},
449
- adapter_options={
450
- "langchain": {
451
- "with_structured_output_options": {"method": "json_mode"}
452
- }
453
- },
454
558
  ),
455
559
  KilnModelProvider(
456
560
  name=ModelProviderName.ollama,
457
- supports_structured_output=False,
458
- supports_data_gen=False,
561
+ structured_output_mode=StructuredOutputMode.json_schema,
459
562
  provider_options={"model": "llama3.2-vision"},
460
563
  ),
461
564
  KilnModelProvider(
@@ -464,11 +567,7 @@ built_in_models: List[KilnModel] = [
464
567
  provider_options={
465
568
  "model": "accounts/fireworks/models/llama-v3p2-11b-vision-instruct"
466
569
  },
467
- adapter_options={
468
- "langchain": {
469
- "with_structured_output_options": {"method": "json_mode"}
470
- }
471
- },
570
+ structured_output_mode=StructuredOutputMode.json_mode,
472
571
  ),
473
572
  ],
474
573
  ),
@@ -478,17 +577,18 @@ built_in_models: List[KilnModel] = [
478
577
  name=ModelName.llama_3_2_90b,
479
578
  friendly_name="Llama 3.2 90B",
480
579
  providers=[
580
+ KilnModelProvider(
581
+ name=ModelProviderName.groq,
582
+ provider_options={"model": "llama-3.2-90b-vision-preview"},
583
+ ),
481
584
  KilnModelProvider(
482
585
  name=ModelProviderName.openrouter,
586
+ structured_output_mode=StructuredOutputMode.json_schema,
483
587
  provider_options={"model": "meta-llama/llama-3.2-90b-vision-instruct"},
484
- adapter_options={
485
- "langchain": {
486
- "with_structured_output_options": {"method": "json_mode"}
487
- }
488
- },
489
588
  ),
490
589
  KilnModelProvider(
491
590
  name=ModelProviderName.ollama,
591
+ structured_output_mode=StructuredOutputMode.json_schema,
492
592
  provider_options={"model": "llama3.2-vision:90b"},
493
593
  ),
494
594
  KilnModelProvider(
@@ -497,11 +597,7 @@ built_in_models: List[KilnModel] = [
497
597
  provider_options={
498
598
  "model": "accounts/fireworks/models/llama-v3p2-90b-vision-instruct"
499
599
  },
500
- adapter_options={
501
- "langchain": {
502
- "with_structured_output_options": {"method": "json_mode"}
503
- }
504
- },
600
+ structured_output_mode=StructuredOutputMode.json_mode,
505
601
  ),
506
602
  ],
507
603
  ),
@@ -514,14 +610,10 @@ built_in_models: List[KilnModel] = [
514
610
  KilnModelProvider(
515
611
  name=ModelProviderName.openrouter,
516
612
  provider_options={"model": "meta-llama/llama-3.3-70b-instruct"},
517
- # Openrouter not supporing tools yet. Once they do probably can remove. JSON mode sometimes works, but not consistently.
613
+ structured_output_mode=StructuredOutputMode.json_schema,
614
+ # Openrouter not working with json_schema or tools. JSON_schema sometimes works so force that, but not consistently so still not recommended.
518
615
  supports_structured_output=False,
519
616
  supports_data_gen=False,
520
- adapter_options={
521
- "langchain": {
522
- "with_structured_output_options": {"method": "json_mode"}
523
- }
524
- },
525
617
  ),
526
618
  KilnModelProvider(
527
619
  name=ModelProviderName.groq,
@@ -531,14 +623,15 @@ built_in_models: List[KilnModel] = [
531
623
  ),
532
624
  KilnModelProvider(
533
625
  name=ModelProviderName.ollama,
626
+ structured_output_mode=StructuredOutputMode.json_schema,
534
627
  provider_options={"model": "llama3.3"},
535
628
  ),
536
629
  KilnModelProvider(
537
630
  name=ModelProviderName.fireworks_ai,
538
631
  # Finetuning not live yet
539
632
  # provider_finetune_id="accounts/fireworks/models/llama-v3p3-70b-instruct",
540
- supports_structured_output=True,
541
- supports_data_gen=True,
633
+ # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
634
+ structured_output_mode=StructuredOutputMode.function_calling,
542
635
  provider_options={
543
636
  "model": "accounts/fireworks/models/llama-v3p3-70b-instruct"
544
637
  },
@@ -550,10 +643,10 @@ built_in_models: List[KilnModel] = [
550
643
  family=ModelFamily.phi,
551
644
  name=ModelName.phi_3_5,
552
645
  friendly_name="Phi 3.5",
553
- supports_structured_output=False,
554
646
  providers=[
555
647
  KilnModelProvider(
556
648
  name=ModelProviderName.ollama,
649
+ structured_output_mode=StructuredOutputMode.json_schema,
557
650
  supports_structured_output=False,
558
651
  supports_data_gen=False,
559
652
  provider_options={"model": "phi3.5"},
@@ -563,28 +656,47 @@ built_in_models: List[KilnModel] = [
563
656
  supports_structured_output=False,
564
657
  supports_data_gen=False,
565
658
  provider_options={"model": "microsoft/phi-3.5-mini-128k-instruct"},
659
+ structured_output_mode=StructuredOutputMode.json_schema,
566
660
  ),
567
661
  KilnModelProvider(
568
662
  name=ModelProviderName.fireworks_ai,
569
- supports_structured_output=False,
570
- supports_data_gen=False,
571
663
  # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
664
+ structured_output_mode=StructuredOutputMode.json_mode,
665
+ supports_data_gen=False,
572
666
  provider_options={
573
667
  "model": "accounts/fireworks/models/phi-3-vision-128k-instruct"
574
668
  },
575
669
  ),
576
670
  ],
577
671
  ),
672
+ # Phi 4
673
+ KilnModel(
674
+ family=ModelFamily.phi,
675
+ name=ModelName.phi_4,
676
+ friendly_name="Phi 4",
677
+ providers=[
678
+ KilnModelProvider(
679
+ name=ModelProviderName.ollama,
680
+ structured_output_mode=StructuredOutputMode.json_schema,
681
+ provider_options={"model": "phi4"},
682
+ ),
683
+ KilnModelProvider(
684
+ name=ModelProviderName.openrouter,
685
+ # JSON mode not consistent enough to enable in UI
686
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
687
+ supports_data_gen=False,
688
+ provider_options={"model": "microsoft/phi-4"},
689
+ ),
690
+ ],
691
+ ),
578
692
  # Gemma 2 2.6b
579
693
  KilnModel(
580
694
  family=ModelFamily.gemma,
581
695
  name=ModelName.gemma_2_2b,
582
696
  friendly_name="Gemma 2 2B",
583
- supports_structured_output=False,
584
697
  providers=[
585
698
  KilnModelProvider(
586
699
  name=ModelProviderName.ollama,
587
- supports_structured_output=False,
588
700
  supports_data_gen=False,
589
701
  provider_options={
590
702
  "model": "gemma2:2b",
@@ -597,7 +709,6 @@ built_in_models: List[KilnModel] = [
597
709
  family=ModelFamily.gemma,
598
710
  name=ModelName.gemma_2_9b,
599
711
  friendly_name="Gemma 2 9B",
600
- supports_structured_output=False,
601
712
  providers=[
602
713
  KilnModelProvider(
603
714
  name=ModelProviderName.ollama,
@@ -608,6 +719,7 @@ built_in_models: List[KilnModel] = [
608
719
  ),
609
720
  KilnModelProvider(
610
721
  name=ModelProviderName.openrouter,
722
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
611
723
  supports_data_gen=False,
612
724
  provider_options={"model": "google/gemma-2-9b-it"},
613
725
  ),
@@ -619,7 +731,6 @@ built_in_models: List[KilnModel] = [
619
731
  family=ModelFamily.gemma,
620
732
  name=ModelName.gemma_2_27b,
621
733
  friendly_name="Gemma 2 27B",
622
- supports_structured_output=False,
623
734
  providers=[
624
735
  KilnModelProvider(
625
736
  name=ModelProviderName.ollama,
@@ -630,6 +741,7 @@ built_in_models: List[KilnModel] = [
630
741
  ),
631
742
  KilnModelProvider(
632
743
  name=ModelProviderName.openrouter,
744
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
633
745
  supports_data_gen=False,
634
746
  provider_options={"model": "google/gemma-2-27b-it"},
635
747
  ),
@@ -644,16 +756,11 @@ built_in_models: List[KilnModel] = [
644
756
  KilnModelProvider(
645
757
  name=ModelProviderName.openrouter,
646
758
  provider_options={"model": "mistralai/mixtral-8x7b-instruct"},
647
- adapter_options={
648
- "langchain": {
649
- "with_structured_output_options": {"method": "json_mode"}
650
- }
651
- },
759
+ supports_data_gen=False,
760
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
652
761
  ),
653
762
  KilnModelProvider(
654
763
  name=ModelProviderName.ollama,
655
- supports_structured_output=False,
656
- supports_data_gen=False,
657
764
  provider_options={"model": "mixtral"},
658
765
  ),
659
766
  ],
@@ -667,14 +774,7 @@ built_in_models: List[KilnModel] = [
667
774
  KilnModelProvider(
668
775
  name=ModelProviderName.openrouter,
669
776
  provider_options={"model": "qwen/qwen-2.5-7b-instruct"},
670
- # Tool calls not supported. JSON doesn't error, but fails.
671
- supports_structured_output=False,
672
- supports_data_gen=False,
673
- adapter_options={
674
- "langchain": {
675
- "with_structured_output_options": {"method": "json_mode"}
676
- }
677
- },
777
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
678
778
  ),
679
779
  KilnModelProvider(
680
780
  name=ModelProviderName.ollama,
@@ -694,11 +794,7 @@ built_in_models: List[KilnModel] = [
694
794
  # Not consistent with structure data. Works sometimes but not often
695
795
  supports_structured_output=False,
696
796
  supports_data_gen=False,
697
- adapter_options={
698
- "langchain": {
699
- "with_structured_output_options": {"method": "json_mode"}
700
- }
701
- },
797
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
702
798
  ),
703
799
  KilnModelProvider(
704
800
  name=ModelProviderName.ollama,
@@ -711,11 +807,256 @@ built_in_models: List[KilnModel] = [
711
807
  },
712
808
  # Fireworks will start tuning, but it never finishes.
713
809
  # provider_finetune_id="accounts/fireworks/models/qwen2p5-72b-instruct",
714
- adapter_options={
715
- "langchain": {
716
- "with_structured_output_options": {"method": "json_mode"}
717
- }
810
+ # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
811
+ structured_output_mode=StructuredOutputMode.function_calling,
812
+ ),
813
+ ],
814
+ ),
815
+ # Mistral Small 3
816
+ KilnModel(
817
+ family=ModelFamily.mistral,
818
+ name=ModelName.mistral_small_3,
819
+ friendly_name="Mistral Small 3",
820
+ providers=[
821
+ KilnModelProvider(
822
+ name=ModelProviderName.openrouter,
823
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
824
+ provider_options={"model": "mistralai/mistral-small-24b-instruct-2501"},
825
+ ),
826
+ KilnModelProvider(
827
+ name=ModelProviderName.ollama,
828
+ provider_options={"model": "mistral-small:24b"},
829
+ ),
830
+ ],
831
+ ),
832
+ # DeepSeek 3
833
+ KilnModel(
834
+ family=ModelFamily.deepseek,
835
+ name=ModelName.deepseek_3,
836
+ friendly_name="DeepSeek V3",
837
+ providers=[
838
+ KilnModelProvider(
839
+ name=ModelProviderName.openrouter,
840
+ provider_options={"model": "deepseek/deepseek-chat"},
841
+ structured_output_mode=StructuredOutputMode.function_calling,
842
+ ),
843
+ KilnModelProvider(
844
+ name=ModelProviderName.fireworks_ai,
845
+ provider_options={"model": "accounts/fireworks/models/deepseek-v3"},
846
+ structured_output_mode=StructuredOutputMode.json_mode,
847
+ supports_structured_output=True,
848
+ supports_data_gen=False,
849
+ ),
850
+ ],
851
+ ),
852
+ # DeepSeek R1
853
+ KilnModel(
854
+ family=ModelFamily.deepseek,
855
+ name=ModelName.deepseek_r1,
856
+ friendly_name="DeepSeek R1",
857
+ providers=[
858
+ KilnModelProvider(
859
+ name=ModelProviderName.openrouter,
860
+ provider_options={"model": "deepseek/deepseek-r1"},
861
+ # No custom parser -- openrouter implemented it themselves
862
+ structured_output_mode=StructuredOutputMode.json_instructions,
863
+ reasoning_capable=True,
864
+ r1_openrouter_options=True,
865
+ require_openrouter_reasoning=True,
866
+ ),
867
+ KilnModelProvider(
868
+ name=ModelProviderName.fireworks_ai,
869
+ provider_options={"model": "accounts/fireworks/models/deepseek-r1"},
870
+ parser=ModelParserID.r1_thinking,
871
+ structured_output_mode=StructuredOutputMode.json_instructions,
872
+ reasoning_capable=True,
873
+ ),
874
+ KilnModelProvider(
875
+ # I want your RAM
876
+ name=ModelProviderName.ollama,
877
+ provider_options={"model": "deepseek-r1:671b"},
878
+ parser=ModelParserID.r1_thinking,
879
+ structured_output_mode=StructuredOutputMode.json_instructions,
880
+ reasoning_capable=True,
881
+ ),
882
+ ],
883
+ ),
884
+ # DeepSeek R1 Distill Qwen 32B
885
+ KilnModel(
886
+ family=ModelFamily.deepseek,
887
+ name=ModelName.deepseek_r1_distill_qwen_32b,
888
+ friendly_name="DeepSeek R1 Distill Qwen 32B",
889
+ providers=[
890
+ KilnModelProvider(
891
+ name=ModelProviderName.openrouter,
892
+ reasoning_capable=True,
893
+ structured_output_mode=StructuredOutputMode.json_instructions,
894
+ provider_options={"model": "deepseek/deepseek-r1-distill-qwen-32b"},
895
+ r1_openrouter_options=True,
896
+ require_openrouter_reasoning=True,
897
+ ),
898
+ KilnModelProvider(
899
+ name=ModelProviderName.ollama,
900
+ parser=ModelParserID.r1_thinking,
901
+ reasoning_capable=True,
902
+ structured_output_mode=StructuredOutputMode.json_instructions,
903
+ provider_options={"model": "deepseek-r1:32b"},
904
+ ),
905
+ ],
906
+ ),
907
+ # DeepSeek R1 Distill Llama 70B
908
+ KilnModel(
909
+ family=ModelFamily.deepseek,
910
+ name=ModelName.deepseek_r1_distill_llama_70b,
911
+ friendly_name="DeepSeek R1 Distill Llama 70B",
912
+ providers=[
913
+ KilnModelProvider(
914
+ name=ModelProviderName.openrouter,
915
+ reasoning_capable=True,
916
+ structured_output_mode=StructuredOutputMode.json_instructions,
917
+ provider_options={"model": "deepseek/deepseek-r1-distill-llama-70b"},
918
+ r1_openrouter_options=True,
919
+ require_openrouter_reasoning=True,
920
+ ),
921
+ KilnModelProvider(
922
+ name=ModelProviderName.ollama,
923
+ supports_data_gen=False,
924
+ parser=ModelParserID.r1_thinking,
925
+ reasoning_capable=True,
926
+ structured_output_mode=StructuredOutputMode.json_instructions,
927
+ provider_options={"model": "deepseek-r1:70b"},
928
+ ),
929
+ ],
930
+ ),
931
+ # DeepSeek R1 Distill Qwen 14B
932
+ KilnModel(
933
+ family=ModelFamily.deepseek,
934
+ name=ModelName.deepseek_r1_distill_qwen_14b,
935
+ friendly_name="DeepSeek R1 Distill Qwen 14B",
936
+ providers=[
937
+ KilnModelProvider(
938
+ name=ModelProviderName.openrouter,
939
+ supports_data_gen=False,
940
+ reasoning_capable=True,
941
+ structured_output_mode=StructuredOutputMode.json_instructions,
942
+ provider_options={"model": "deepseek/deepseek-r1-distill-qwen-14b"},
943
+ r1_openrouter_options=True,
944
+ require_openrouter_reasoning=True,
945
+ openrouter_skip_required_parameters=True,
946
+ ),
947
+ KilnModelProvider(
948
+ name=ModelProviderName.ollama,
949
+ supports_data_gen=False,
950
+ parser=ModelParserID.r1_thinking,
951
+ reasoning_capable=True,
952
+ structured_output_mode=StructuredOutputMode.json_instructions,
953
+ provider_options={"model": "deepseek-r1:14b"},
954
+ ),
955
+ ],
956
+ ),
957
+ # DeepSeek R1 Distill Llama 8B
958
+ KilnModel(
959
+ family=ModelFamily.deepseek,
960
+ name=ModelName.deepseek_r1_distill_llama_8b,
961
+ friendly_name="DeepSeek R1 Distill Llama 8B",
962
+ providers=[
963
+ KilnModelProvider(
964
+ name=ModelProviderName.openrouter,
965
+ supports_data_gen=False,
966
+ reasoning_capable=True,
967
+ structured_output_mode=StructuredOutputMode.json_instructions,
968
+ provider_options={"model": "deepseek/deepseek-r1-distill-llama-8b"},
969
+ r1_openrouter_options=True,
970
+ require_openrouter_reasoning=True,
971
+ openrouter_skip_required_parameters=True,
972
+ ),
973
+ KilnModelProvider(
974
+ name=ModelProviderName.ollama,
975
+ supports_data_gen=False,
976
+ parser=ModelParserID.r1_thinking,
977
+ reasoning_capable=True,
978
+ structured_output_mode=StructuredOutputMode.json_instructions,
979
+ provider_options={"model": "deepseek-r1:8b"},
980
+ ),
981
+ ],
982
+ ),
983
+ # DeepSeek R1 Distill Qwen 7B
984
+ KilnModel(
985
+ family=ModelFamily.deepseek,
986
+ name=ModelName.deepseek_r1_distill_qwen_7b,
987
+ friendly_name="DeepSeek R1 Distill Qwen 7B",
988
+ providers=[
989
+ KilnModelProvider(
990
+ name=ModelProviderName.ollama,
991
+ supports_data_gen=False,
992
+ parser=ModelParserID.r1_thinking,
993
+ reasoning_capable=True,
994
+ structured_output_mode=StructuredOutputMode.json_instructions,
995
+ provider_options={"model": "deepseek-r1:7b"},
996
+ ),
997
+ ],
998
+ ),
999
+ # DeepSeek R1 Distill Qwen 1.5B
1000
+ KilnModel(
1001
+ family=ModelFamily.deepseek,
1002
+ name=ModelName.deepseek_r1_distill_qwen_1p5b,
1003
+ friendly_name="DeepSeek R1 Distill Qwen 1.5B",
1004
+ providers=[
1005
+ KilnModelProvider(
1006
+ name=ModelProviderName.openrouter,
1007
+ supports_structured_output=False,
1008
+ supports_data_gen=False,
1009
+ reasoning_capable=True,
1010
+ structured_output_mode=StructuredOutputMode.json_instructions,
1011
+ provider_options={"model": "deepseek/deepseek-r1-distill-qwen-1.5b"},
1012
+ r1_openrouter_options=True,
1013
+ require_openrouter_reasoning=True,
1014
+ openrouter_skip_required_parameters=True,
1015
+ ),
1016
+ KilnModelProvider(
1017
+ name=ModelProviderName.ollama,
1018
+ supports_data_gen=False,
1019
+ parser=ModelParserID.r1_thinking,
1020
+ reasoning_capable=True,
1021
+ structured_output_mode=StructuredOutputMode.json_instructions,
1022
+ provider_options={"model": "deepseek-r1:1.5b"},
1023
+ ),
1024
+ ],
1025
+ ),
1026
+ # Dolphin 2.9 Mixtral 8x22B
1027
+ KilnModel(
1028
+ family=ModelFamily.dolphin,
1029
+ name=ModelName.dolphin_2_9_8x22b,
1030
+ friendly_name="Dolphin 2.9 8x22B",
1031
+ providers=[
1032
+ KilnModelProvider(
1033
+ name=ModelProviderName.ollama,
1034
+ structured_output_mode=StructuredOutputMode.json_schema,
1035
+ supports_data_gen=True,
1036
+ provider_options={"model": "dolphin-mixtral:8x22b"},
1037
+ ),
1038
+ KilnModelProvider(
1039
+ name=ModelProviderName.openrouter,
1040
+ provider_options={
1041
+ "model": "cognitivecomputations/dolphin-mixtral-8x22b"
718
1042
  },
1043
+ supports_data_gen=True,
1044
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1045
+ ),
1046
+ ],
1047
+ ),
1048
+ # Grok 2
1049
+ KilnModel(
1050
+ family=ModelFamily.grok,
1051
+ name=ModelName.grok_2,
1052
+ friendly_name="Grok 2",
1053
+ providers=[
1054
+ KilnModelProvider(
1055
+ name=ModelProviderName.openrouter,
1056
+ provider_options={"model": "x-ai/grok-2-1212"},
1057
+ supports_structured_output=True,
1058
+ supports_data_gen=True,
1059
+ structured_output_mode=StructuredOutputMode.json_schema,
719
1060
  ),
720
1061
  ],
721
1062
  ),