kiln-ai 0.11.1__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kiln-ai might be problematic. Click here for more details.

Files changed (80) hide show
  1. kiln_ai/adapters/__init__.py +4 -0
  2. kiln_ai/adapters/adapter_registry.py +163 -39
  3. kiln_ai/adapters/data_gen/data_gen_task.py +18 -0
  4. kiln_ai/adapters/eval/__init__.py +28 -0
  5. kiln_ai/adapters/eval/base_eval.py +164 -0
  6. kiln_ai/adapters/eval/eval_runner.py +270 -0
  7. kiln_ai/adapters/eval/g_eval.py +368 -0
  8. kiln_ai/adapters/eval/registry.py +16 -0
  9. kiln_ai/adapters/eval/test_base_eval.py +325 -0
  10. kiln_ai/adapters/eval/test_eval_runner.py +641 -0
  11. kiln_ai/adapters/eval/test_g_eval.py +498 -0
  12. kiln_ai/adapters/eval/test_g_eval_data.py +4 -0
  13. kiln_ai/adapters/fine_tune/base_finetune.py +16 -2
  14. kiln_ai/adapters/fine_tune/finetune_registry.py +2 -0
  15. kiln_ai/adapters/fine_tune/test_dataset_formatter.py +4 -1
  16. kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py +1 -1
  17. kiln_ai/adapters/fine_tune/test_openai_finetune.py +1 -1
  18. kiln_ai/adapters/fine_tune/test_together_finetune.py +531 -0
  19. kiln_ai/adapters/fine_tune/together_finetune.py +325 -0
  20. kiln_ai/adapters/ml_model_list.py +758 -163
  21. kiln_ai/adapters/model_adapters/__init__.py +2 -4
  22. kiln_ai/adapters/model_adapters/base_adapter.py +61 -43
  23. kiln_ai/adapters/model_adapters/litellm_adapter.py +391 -0
  24. kiln_ai/adapters/model_adapters/litellm_config.py +13 -0
  25. kiln_ai/adapters/model_adapters/test_base_adapter.py +22 -13
  26. kiln_ai/adapters/model_adapters/test_litellm_adapter.py +407 -0
  27. kiln_ai/adapters/model_adapters/test_saving_adapter_results.py +41 -19
  28. kiln_ai/adapters/model_adapters/test_structured_output.py +59 -35
  29. kiln_ai/adapters/ollama_tools.py +3 -3
  30. kiln_ai/adapters/parsers/r1_parser.py +19 -14
  31. kiln_ai/adapters/parsers/test_r1_parser.py +17 -5
  32. kiln_ai/adapters/prompt_builders.py +80 -42
  33. kiln_ai/adapters/provider_tools.py +50 -58
  34. kiln_ai/adapters/repair/repair_task.py +9 -21
  35. kiln_ai/adapters/repair/test_repair_task.py +6 -6
  36. kiln_ai/adapters/run_output.py +3 -0
  37. kiln_ai/adapters/test_adapter_registry.py +26 -29
  38. kiln_ai/adapters/test_generate_docs.py +4 -4
  39. kiln_ai/adapters/test_ollama_tools.py +0 -1
  40. kiln_ai/adapters/test_prompt_adaptors.py +47 -33
  41. kiln_ai/adapters/test_prompt_builders.py +91 -31
  42. kiln_ai/adapters/test_provider_tools.py +26 -81
  43. kiln_ai/datamodel/__init__.py +50 -952
  44. kiln_ai/datamodel/basemodel.py +2 -0
  45. kiln_ai/datamodel/datamodel_enums.py +60 -0
  46. kiln_ai/datamodel/dataset_filters.py +114 -0
  47. kiln_ai/datamodel/dataset_split.py +170 -0
  48. kiln_ai/datamodel/eval.py +298 -0
  49. kiln_ai/datamodel/finetune.py +105 -0
  50. kiln_ai/datamodel/json_schema.py +7 -1
  51. kiln_ai/datamodel/project.py +23 -0
  52. kiln_ai/datamodel/prompt.py +37 -0
  53. kiln_ai/datamodel/prompt_id.py +83 -0
  54. kiln_ai/datamodel/strict_mode.py +24 -0
  55. kiln_ai/datamodel/task.py +181 -0
  56. kiln_ai/datamodel/task_output.py +328 -0
  57. kiln_ai/datamodel/task_run.py +164 -0
  58. kiln_ai/datamodel/test_basemodel.py +19 -11
  59. kiln_ai/datamodel/test_dataset_filters.py +71 -0
  60. kiln_ai/datamodel/test_dataset_split.py +32 -8
  61. kiln_ai/datamodel/test_datasource.py +22 -2
  62. kiln_ai/datamodel/test_eval_model.py +635 -0
  63. kiln_ai/datamodel/test_example_models.py +9 -13
  64. kiln_ai/datamodel/test_json_schema.py +23 -0
  65. kiln_ai/datamodel/test_models.py +2 -2
  66. kiln_ai/datamodel/test_prompt_id.py +129 -0
  67. kiln_ai/datamodel/test_task.py +159 -0
  68. kiln_ai/utils/config.py +43 -1
  69. kiln_ai/utils/dataset_import.py +232 -0
  70. kiln_ai/utils/test_dataset_import.py +596 -0
  71. {kiln_ai-0.11.1.dist-info → kiln_ai-0.13.0.dist-info}/METADATA +86 -6
  72. kiln_ai-0.13.0.dist-info/RECORD +103 -0
  73. kiln_ai/adapters/model_adapters/langchain_adapters.py +0 -302
  74. kiln_ai/adapters/model_adapters/openai_compatible_config.py +0 -11
  75. kiln_ai/adapters/model_adapters/openai_model_adapter.py +0 -246
  76. kiln_ai/adapters/model_adapters/test_langchain_adapter.py +0 -350
  77. kiln_ai/adapters/model_adapters/test_openai_model_adapter.py +0 -225
  78. kiln_ai-0.11.1.dist-info/RECORD +0 -76
  79. {kiln_ai-0.11.1.dist-info → kiln_ai-0.13.0.dist-info}/WHEEL +0 -0
  80. {kiln_ai-0.11.1.dist-info → kiln_ai-0.13.0.dist-info}/licenses/LICENSE.txt +0 -0
@@ -1,5 +1,5 @@
1
1
  from enum import Enum
2
- from typing import Dict, List
2
+ from typing import Dict, List, Literal
3
3
 
4
4
  from pydantic import BaseModel
5
5
 
@@ -26,6 +26,12 @@ class ModelProviderName(str, Enum):
26
26
  kiln_fine_tune = "kiln_fine_tune"
27
27
  kiln_custom_registry = "kiln_custom_registry"
28
28
  openai_compatible = "openai_compatible"
29
+ anthropic = "anthropic"
30
+ gemini_api = "gemini_api"
31
+ azure_openai = "azure_openai"
32
+ huggingface = "huggingface"
33
+ vertex = "vertex"
34
+ together_ai = "together_ai"
29
35
 
30
36
 
31
37
  class ModelFamily(str, Enum):
@@ -43,6 +49,8 @@ class ModelFamily(str, Enum):
43
49
  mixtral = "mixtral"
44
50
  qwen = "qwen"
45
51
  deepseek = "deepseek"
52
+ dolphin = "dolphin"
53
+ grok = "grok"
46
54
 
47
55
 
48
56
  # Where models have instruct and raw versions, instruct is default and raw is specified
@@ -62,15 +70,29 @@ class ModelName(str, Enum):
62
70
  llama_3_3_70b = "llama_3_3_70b"
63
71
  gpt_4o_mini = "gpt_4o_mini"
64
72
  gpt_4o = "gpt_4o"
73
+ gpt_o1_low = "gpt_o1_low"
74
+ gpt_o1_medium = "gpt_o1_medium"
75
+ gpt_o1_high = "gpt_o1_high"
76
+ gpt_o3_mini_low = "gpt_o3_mini_low"
77
+ gpt_o3_mini_medium = "gpt_o3_mini_medium"
78
+ gpt_o3_mini_high = "gpt_o3_mini_high"
65
79
  phi_3_5 = "phi_3_5"
66
80
  phi_4 = "phi_4"
81
+ phi_4_5p6b = "phi_4_5p6b"
82
+ phi_4_mini = "phi_4_mini"
67
83
  mistral_large = "mistral_large"
68
84
  mistral_nemo = "mistral_nemo"
69
85
  gemma_2_2b = "gemma_2_2b"
70
86
  gemma_2_9b = "gemma_2_9b"
71
87
  gemma_2_27b = "gemma_2_27b"
88
+ gemma_3_1b = "gemma_3_1b"
89
+ gemma_3_4b = "gemma_3_4b"
90
+ gemma_3_12b = "gemma_3_12b"
91
+ gemma_3_27b = "gemma_3_27b"
72
92
  claude_3_5_haiku = "claude_3_5_haiku"
73
93
  claude_3_5_sonnet = "claude_3_5_sonnet"
94
+ claude_3_7_sonnet = "claude_3_7_sonnet"
95
+ claude_3_7_sonnet_thinking = "claude_3_7_sonnet_thinking"
74
96
  gemini_1_5_flash = "gemini_1_5_flash"
75
97
  gemini_1_5_flash_8b = "gemini_1_5_flash_8b"
76
98
  gemini_1_5_pro = "gemini_1_5_pro"
@@ -78,7 +100,9 @@ class ModelName(str, Enum):
78
100
  nemotron_70b = "nemotron_70b"
79
101
  mixtral_8x7b = "mixtral_8x7b"
80
102
  qwen_2p5_7b = "qwen_2p5_7b"
103
+ qwen_2p5_14b = "qwen_2p5_14b"
81
104
  qwen_2p5_72b = "qwen_2p5_72b"
105
+ qwq_32b = "qwq_32b"
82
106
  deepseek_3 = "deepseek_3"
83
107
  deepseek_r1 = "deepseek_r1"
84
108
  mistral_small_3 = "mistral_small_3"
@@ -88,6 +112,8 @@ class ModelName(str, Enum):
88
112
  deepseek_r1_distill_qwen_1p5b = "deepseek_r1_distill_qwen_1p5b"
89
113
  deepseek_r1_distill_qwen_7b = "deepseek_r1_distill_qwen_7b"
90
114
  deepseek_r1_distill_llama_8b = "deepseek_r1_distill_llama_8b"
115
+ dolphin_2_9_8x22b = "dolphin_2_9_8x22b"
116
+ grok_2 = "grok_2"
91
117
 
92
118
 
93
119
  class ModelParserID(str, Enum):
@@ -108,21 +134,32 @@ class KilnModelProvider(BaseModel):
108
134
  supports_data_gen: Whether the provider supports data generation
109
135
  untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable.
110
136
  provider_finetune_id: The finetune ID for the provider, if applicable
111
- provider_options: Additional provider-specific configuration options
112
137
  structured_output_mode: The mode we should use to call the model for structured output, if it was trained with structured output.
113
138
  parser: A parser to use for the model, if applicable
114
139
  reasoning_capable: Whether the model is designed to output thinking in a structured format (eg <think></think>). If so we don't use COT across 2 calls, and ask for thinking and final response in the same call.
115
140
  """
116
141
 
117
142
  name: ModelProviderName
143
+ model_id: str | None = None
118
144
  supports_structured_output: bool = True
119
145
  supports_data_gen: bool = True
120
146
  untested_model: bool = False
121
147
  provider_finetune_id: str | None = None
122
- provider_options: Dict = {}
123
148
  structured_output_mode: StructuredOutputMode = StructuredOutputMode.default
124
149
  parser: ModelParserID | None = None
125
150
  reasoning_capable: bool = False
151
+ supports_logprobs: bool = False
152
+
153
+ # TODO P1: Need a more generalized way to handle custom provider parameters.
154
+ # Making them quite declarative here for now, isolating provider specific logic
155
+ # to this file. Later I should be able to override anything in this file via config.
156
+ r1_openrouter_options: bool = False
157
+ require_openrouter_reasoning: bool = False
158
+ logprobs_openrouter_options: bool = False
159
+ openrouter_skip_required_parameters: bool = False
160
+ thinking_level: Literal["low", "medium", "high"] | None = None
161
+ ollama_model_aliases: List[str] | None = None
162
+ anthropic_extended_thinking: bool = False
126
163
 
127
164
 
128
165
  class KilnModel(BaseModel):
@@ -152,14 +189,21 @@ built_in_models: List[KilnModel] = [
152
189
  providers=[
153
190
  KilnModelProvider(
154
191
  name=ModelProviderName.openai,
155
- provider_options={"model": "gpt-4o-mini"},
192
+ model_id="gpt-4o-mini",
156
193
  provider_finetune_id="gpt-4o-mini-2024-07-18",
157
194
  structured_output_mode=StructuredOutputMode.json_schema,
195
+ supports_logprobs=True,
158
196
  ),
159
197
  KilnModelProvider(
160
198
  name=ModelProviderName.openrouter,
161
- provider_options={"model": "openai/gpt-4o-mini"},
199
+ model_id="openai/gpt-4o-mini",
162
200
  structured_output_mode=StructuredOutputMode.json_schema,
201
+ supports_logprobs=True,
202
+ logprobs_openrouter_options=True,
203
+ ),
204
+ KilnModelProvider(
205
+ name=ModelProviderName.azure_openai,
206
+ model_id="gpt-4o-mini",
163
207
  ),
164
208
  ],
165
209
  ),
@@ -171,14 +215,141 @@ built_in_models: List[KilnModel] = [
171
215
  providers=[
172
216
  KilnModelProvider(
173
217
  name=ModelProviderName.openai,
174
- provider_options={"model": "gpt-4o"},
218
+ model_id="gpt-4o",
175
219
  provider_finetune_id="gpt-4o-2024-08-06",
176
220
  structured_output_mode=StructuredOutputMode.json_schema,
221
+ supports_logprobs=True,
177
222
  ),
178
223
  KilnModelProvider(
179
224
  name=ModelProviderName.openrouter,
180
- provider_options={"model": "openai/gpt-4o"},
225
+ model_id="openai/gpt-4o",
226
+ structured_output_mode=StructuredOutputMode.json_schema,
227
+ supports_logprobs=True,
228
+ logprobs_openrouter_options=True,
229
+ ),
230
+ KilnModelProvider(
231
+ name=ModelProviderName.azure_openai,
232
+ model_id="gpt-4o",
233
+ ),
234
+ ],
235
+ ),
236
+ # GPT o3 Mini Low
237
+ KilnModel(
238
+ family=ModelFamily.gpt,
239
+ name=ModelName.gpt_o3_mini_low,
240
+ friendly_name="GPT o3 Mini - Low",
241
+ providers=[
242
+ KilnModelProvider(
243
+ name=ModelProviderName.openai,
244
+ model_id="o3-mini",
245
+ thinking_level="low",
246
+ structured_output_mode=StructuredOutputMode.json_schema,
247
+ ),
248
+ KilnModelProvider(
249
+ name=ModelProviderName.azure_openai,
250
+ model_id="o3-mini",
251
+ structured_output_mode=StructuredOutputMode.json_schema,
252
+ thinking_level="low",
253
+ ),
254
+ ],
255
+ ),
256
+ # GPT o3 Mini Medium
257
+ KilnModel(
258
+ family=ModelFamily.gpt,
259
+ name=ModelName.gpt_o3_mini_medium,
260
+ friendly_name="GPT o3 Mini - Medium",
261
+ providers=[
262
+ KilnModelProvider(
263
+ name=ModelProviderName.openai,
264
+ model_id="o3-mini",
265
+ thinking_level="medium",
266
+ structured_output_mode=StructuredOutputMode.json_schema,
267
+ ),
268
+ KilnModelProvider(
269
+ name=ModelProviderName.azure_openai,
270
+ model_id="o3-mini",
271
+ structured_output_mode=StructuredOutputMode.json_schema,
272
+ thinking_level="medium",
273
+ ),
274
+ ],
275
+ ),
276
+ # GPT o3 Mini High
277
+ KilnModel(
278
+ family=ModelFamily.gpt,
279
+ name=ModelName.gpt_o3_mini_high,
280
+ friendly_name="GPT o3 Mini - High",
281
+ providers=[
282
+ KilnModelProvider(
283
+ name=ModelProviderName.openai,
284
+ model_id="o3-mini",
285
+ thinking_level="high",
286
+ structured_output_mode=StructuredOutputMode.json_schema,
287
+ ),
288
+ KilnModelProvider(
289
+ name=ModelProviderName.azure_openai,
290
+ model_id="o3-mini",
291
+ structured_output_mode=StructuredOutputMode.json_schema,
292
+ thinking_level="high",
293
+ ),
294
+ ],
295
+ ),
296
+ # GPT o1 Low
297
+ KilnModel(
298
+ family=ModelFamily.gpt,
299
+ name=ModelName.gpt_o1_low,
300
+ friendly_name="GPT o1 - Low",
301
+ providers=[
302
+ KilnModelProvider(
303
+ name=ModelProviderName.openai,
304
+ model_id="o1",
305
+ thinking_level="low",
306
+ structured_output_mode=StructuredOutputMode.json_schema,
307
+ ),
308
+ KilnModelProvider(
309
+ name=ModelProviderName.azure_openai,
310
+ model_id="o1",
311
+ structured_output_mode=StructuredOutputMode.json_schema,
312
+ thinking_level="low",
313
+ ),
314
+ ],
315
+ ),
316
+ # GPT o1 Medium
317
+ KilnModel(
318
+ family=ModelFamily.gpt,
319
+ name=ModelName.gpt_o1_medium,
320
+ friendly_name="GPT o1 - Medium",
321
+ providers=[
322
+ KilnModelProvider(
323
+ name=ModelProviderName.openai,
324
+ model_id="o1",
325
+ thinking_level="medium",
326
+ structured_output_mode=StructuredOutputMode.json_schema,
327
+ ),
328
+ KilnModelProvider(
329
+ name=ModelProviderName.azure_openai,
330
+ model_id="o1",
181
331
  structured_output_mode=StructuredOutputMode.json_schema,
332
+ thinking_level="medium",
333
+ ),
334
+ ],
335
+ ),
336
+ # GPT o1 High
337
+ KilnModel(
338
+ family=ModelFamily.gpt,
339
+ name=ModelName.gpt_o1_high,
340
+ friendly_name="GPT o1 - High",
341
+ providers=[
342
+ KilnModelProvider(
343
+ name=ModelProviderName.openai,
344
+ model_id="o1",
345
+ thinking_level="high",
346
+ structured_output_mode=StructuredOutputMode.json_schema,
347
+ ),
348
+ KilnModelProvider(
349
+ name=ModelProviderName.azure_openai,
350
+ model_id="o1",
351
+ structured_output_mode=StructuredOutputMode.json_schema,
352
+ thinking_level="high",
182
353
  ),
183
354
  ],
184
355
  ),
@@ -191,7 +362,17 @@ built_in_models: List[KilnModel] = [
191
362
  KilnModelProvider(
192
363
  name=ModelProviderName.openrouter,
193
364
  structured_output_mode=StructuredOutputMode.function_calling,
194
- provider_options={"model": "anthropic/claude-3-5-haiku"},
365
+ model_id="anthropic/claude-3-5-haiku",
366
+ ),
367
+ KilnModelProvider(
368
+ name=ModelProviderName.anthropic,
369
+ model_id="claude-3-5-haiku-20241022",
370
+ structured_output_mode=StructuredOutputMode.function_calling,
371
+ ),
372
+ KilnModelProvider(
373
+ name=ModelProviderName.vertex,
374
+ model_id="claude-3-5-haiku",
375
+ structured_output_mode=StructuredOutputMode.function_calling_weak,
195
376
  ),
196
377
  ],
197
378
  ),
@@ -204,50 +385,58 @@ built_in_models: List[KilnModel] = [
204
385
  KilnModelProvider(
205
386
  name=ModelProviderName.openrouter,
206
387
  structured_output_mode=StructuredOutputMode.function_calling,
207
- provider_options={"model": "anthropic/claude-3.5-sonnet"},
388
+ model_id="anthropic/claude-3.5-sonnet",
389
+ ),
390
+ KilnModelProvider(
391
+ name=ModelProviderName.anthropic,
392
+ model_id="claude-3-5-sonnet-20241022",
393
+ structured_output_mode=StructuredOutputMode.function_calling,
394
+ ),
395
+ KilnModelProvider(
396
+ name=ModelProviderName.vertex,
397
+ model_id="claude-3-5-sonnet",
398
+ structured_output_mode=StructuredOutputMode.function_calling_weak,
208
399
  ),
209
400
  ],
210
401
  ),
211
- # DeepSeek 3
402
+ # Claude 3.7 Sonnet
212
403
  KilnModel(
213
- family=ModelFamily.deepseek,
214
- name=ModelName.deepseek_3,
215
- friendly_name="DeepSeek v3",
404
+ family=ModelFamily.claude,
405
+ name=ModelName.claude_3_7_sonnet,
406
+ friendly_name="Claude 3.7 Sonnet",
216
407
  providers=[
217
408
  KilnModelProvider(
218
409
  name=ModelProviderName.openrouter,
219
- provider_options={"model": "deepseek/deepseek-chat"},
410
+ structured_output_mode=StructuredOutputMode.function_calling,
411
+ model_id="anthropic/claude-3.7-sonnet",
412
+ ),
413
+ KilnModelProvider(
414
+ name=ModelProviderName.anthropic,
415
+ model_id="claude-3-7-sonnet-20250219",
220
416
  structured_output_mode=StructuredOutputMode.function_calling,
221
417
  ),
222
418
  ],
223
419
  ),
224
- # DeepSeek R1
420
+ # Claude 3.7 Sonnet Thinking
225
421
  KilnModel(
226
- family=ModelFamily.deepseek,
227
- name=ModelName.deepseek_r1,
228
- friendly_name="DeepSeek R1",
422
+ family=ModelFamily.claude,
423
+ name=ModelName.claude_3_7_sonnet_thinking,
424
+ friendly_name="Claude 3.7 Sonnet Thinking",
229
425
  providers=[
230
426
  KilnModelProvider(
231
427
  name=ModelProviderName.openrouter,
232
- provider_options={"model": "deepseek/deepseek-r1"},
233
- # No custom parser -- openrouter implemented it themselves
234
- structured_output_mode=StructuredOutputMode.json_instructions,
428
+ model_id="anthropic/claude-3.7-sonnet:thinking",
235
429
  reasoning_capable=True,
236
- ),
237
- KilnModelProvider(
238
- name=ModelProviderName.fireworks_ai,
239
- provider_options={"model": "accounts/fireworks/models/deepseek-r1"},
240
- parser=ModelParserID.r1_thinking,
430
+ # For reasoning models, we need to use json_instructions with OpenRouter
241
431
  structured_output_mode=StructuredOutputMode.json_instructions,
242
- reasoning_capable=True,
432
+ require_openrouter_reasoning=True,
243
433
  ),
244
434
  KilnModelProvider(
245
- # I want your RAM
246
- name=ModelProviderName.ollama,
247
- provider_options={"model": "deepseek-r1:671b"},
248
- parser=ModelParserID.r1_thinking,
249
- structured_output_mode=StructuredOutputMode.json_instructions,
435
+ name=ModelProviderName.anthropic,
250
436
  reasoning_capable=True,
437
+ model_id="claude-3-7-sonnet-20250219",
438
+ anthropic_extended_thinking=True,
439
+ structured_output_mode=StructuredOutputMode.json_instructions,
251
440
  ),
252
441
  ],
253
442
  ),
@@ -259,8 +448,18 @@ built_in_models: List[KilnModel] = [
259
448
  providers=[
260
449
  KilnModelProvider(
261
450
  name=ModelProviderName.openrouter,
262
- provider_options={"model": "google/gemini-pro-1.5"},
263
- structured_output_mode=StructuredOutputMode.json_schema,
451
+ model_id="google/gemini-pro-1.5",
452
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
453
+ ),
454
+ KilnModelProvider(
455
+ name=ModelProviderName.gemini_api,
456
+ model_id="gemini-1.5-pro",
457
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
458
+ ),
459
+ KilnModelProvider(
460
+ name=ModelProviderName.vertex,
461
+ model_id="gemini-1.5-pro",
462
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
264
463
  ),
265
464
  ],
266
465
  ),
@@ -272,8 +471,18 @@ built_in_models: List[KilnModel] = [
272
471
  providers=[
273
472
  KilnModelProvider(
274
473
  name=ModelProviderName.openrouter,
275
- provider_options={"model": "google/gemini-flash-1.5"},
276
- structured_output_mode=StructuredOutputMode.json_schema,
474
+ model_id="google/gemini-flash-1.5",
475
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
476
+ ),
477
+ KilnModelProvider(
478
+ name=ModelProviderName.gemini_api,
479
+ model_id="gemini-1.5-flash",
480
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
481
+ ),
482
+ KilnModelProvider(
483
+ name=ModelProviderName.vertex,
484
+ model_id="gemini-1.5-flash",
485
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
277
486
  ),
278
487
  ],
279
488
  ),
@@ -285,8 +494,15 @@ built_in_models: List[KilnModel] = [
285
494
  providers=[
286
495
  KilnModelProvider(
287
496
  name=ModelProviderName.openrouter,
288
- provider_options={"model": "google/gemini-flash-1.5-8b"},
289
- structured_output_mode=StructuredOutputMode.json_mode,
497
+ model_id="google/gemini-flash-1.5-8b",
498
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
499
+ supports_data_gen=False,
500
+ ),
501
+ KilnModelProvider(
502
+ name=ModelProviderName.gemini_api,
503
+ model_id="gemini-1.5-flash-8b",
504
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
505
+ supports_data_gen=False,
290
506
  ),
291
507
  ],
292
508
  ),
@@ -298,8 +514,18 @@ built_in_models: List[KilnModel] = [
298
514
  providers=[
299
515
  KilnModelProvider(
300
516
  name=ModelProviderName.openrouter,
301
- structured_output_mode=StructuredOutputMode.json_schema,
302
- provider_options={"model": "google/gemini-2.0-flash-001"},
517
+ model_id="google/gemini-2.0-flash-001",
518
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
519
+ ),
520
+ KilnModelProvider(
521
+ name=ModelProviderName.gemini_api,
522
+ model_id="gemini-2.0-flash",
523
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
524
+ ),
525
+ KilnModelProvider(
526
+ name=ModelProviderName.vertex,
527
+ model_id="gemini-2.0-flash",
528
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
303
529
  ),
304
530
  ],
305
531
  ),
@@ -313,7 +539,7 @@ built_in_models: List[KilnModel] = [
313
539
  name=ModelProviderName.openrouter,
314
540
  supports_structured_output=False,
315
541
  supports_data_gen=False,
316
- provider_options={"model": "nvidia/llama-3.1-nemotron-70b-instruct"},
542
+ model_id="nvidia/llama-3.1-nemotron-70b-instruct",
317
543
  ),
318
544
  ],
319
545
  ),
@@ -325,39 +551,40 @@ built_in_models: List[KilnModel] = [
325
551
  providers=[
326
552
  KilnModelProvider(
327
553
  name=ModelProviderName.groq,
328
- provider_options={"model": "llama-3.1-8b-instant"},
554
+ model_id="llama-3.1-8b-instant",
329
555
  ),
330
556
  KilnModelProvider(
331
557
  name=ModelProviderName.amazon_bedrock,
332
558
  structured_output_mode=StructuredOutputMode.json_schema,
333
- supports_data_gen=False,
334
- provider_options={
335
- "model": "meta.llama3-1-8b-instruct-v1:0",
336
- "region_name": "us-west-2", # Llama 3.1 only in west-2
337
- },
559
+ supports_structured_output=False,
560
+ model_id="meta.llama3-1-8b-instruct-v1:0",
338
561
  ),
339
562
  KilnModelProvider(
340
563
  name=ModelProviderName.ollama,
341
564
  structured_output_mode=StructuredOutputMode.json_schema,
342
- provider_options={
343
- "model": "llama3.1:8b",
344
- "model_aliases": ["llama3.1"], # 8b is default
345
- },
565
+ model_id="llama3.1:8b",
566
+ ollama_model_aliases=["llama3.1"], # 8b is default
346
567
  ),
347
568
  KilnModelProvider(
348
569
  name=ModelProviderName.openrouter,
349
570
  supports_data_gen=False,
350
571
  structured_output_mode=StructuredOutputMode.function_calling,
351
- provider_options={"model": "meta-llama/llama-3.1-8b-instruct"},
572
+ model_id="meta-llama/llama-3.1-8b-instruct",
352
573
  ),
353
574
  KilnModelProvider(
354
575
  name=ModelProviderName.fireworks_ai,
355
576
  # JSON mode not ideal (no schema), but tool calling doesn't work on 8b
356
- structured_output_mode=StructuredOutputMode.json_mode,
577
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
578
+ supports_data_gen=False,
357
579
  provider_finetune_id="accounts/fireworks/models/llama-v3p1-8b-instruct",
358
- provider_options={
359
- "model": "accounts/fireworks/models/llama-v3p1-8b-instruct"
360
- },
580
+ model_id="accounts/fireworks/models/llama-v3p1-8b-instruct",
581
+ ),
582
+ KilnModelProvider(
583
+ name=ModelProviderName.together_ai,
584
+ model_id="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
585
+ supports_data_gen=False,
586
+ structured_output_mode=StructuredOutputMode.function_calling_weak,
587
+ provider_finetune_id="meta-llama/Meta-Llama-3.1-8B-Instruct",
361
588
  ),
362
589
  ],
363
590
  ),
@@ -371,30 +598,35 @@ built_in_models: List[KilnModel] = [
371
598
  name=ModelProviderName.amazon_bedrock,
372
599
  structured_output_mode=StructuredOutputMode.json_schema,
373
600
  supports_data_gen=False,
374
- provider_options={
375
- "model": "meta.llama3-1-70b-instruct-v1:0",
376
- "region_name": "us-west-2", # Llama 3.1 only in west-2
377
- },
601
+ model_id="meta.llama3-1-70b-instruct-v1:0",
378
602
  ),
379
603
  KilnModelProvider(
380
604
  name=ModelProviderName.openrouter,
381
605
  supports_data_gen=False,
382
- structured_output_mode=StructuredOutputMode.function_calling,
383
- provider_options={"model": "meta-llama/llama-3.1-70b-instruct"},
606
+ # Need to not pass "strict=True" to the function call to get this to work with logprobs for some reason. Openrouter issue.
607
+ structured_output_mode=StructuredOutputMode.function_calling_weak,
608
+ model_id="meta-llama/llama-3.1-70b-instruct",
609
+ supports_logprobs=True,
610
+ logprobs_openrouter_options=True,
384
611
  ),
385
612
  KilnModelProvider(
386
613
  name=ModelProviderName.ollama,
387
614
  structured_output_mode=StructuredOutputMode.json_schema,
388
- provider_options={"model": "llama3.1:70b"},
615
+ model_id="llama3.1:70b",
389
616
  ),
390
617
  KilnModelProvider(
391
618
  name=ModelProviderName.fireworks_ai,
392
619
  # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
393
- structured_output_mode=StructuredOutputMode.function_calling,
620
+ structured_output_mode=StructuredOutputMode.function_calling_weak,
394
621
  provider_finetune_id="accounts/fireworks/models/llama-v3p1-70b-instruct",
395
- provider_options={
396
- "model": "accounts/fireworks/models/llama-v3p1-70b-instruct"
397
- },
622
+ model_id="accounts/fireworks/models/llama-v3p1-70b-instruct",
623
+ ),
624
+ KilnModelProvider(
625
+ name=ModelProviderName.together_ai,
626
+ model_id="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
627
+ supports_data_gen=False,
628
+ structured_output_mode=StructuredOutputMode.function_calling_weak,
629
+ provider_finetune_id="meta-llama/Meta-Llama-3.1-70B-Instruct",
398
630
  ),
399
631
  ],
400
632
  ),
@@ -408,28 +640,29 @@ built_in_models: List[KilnModel] = [
408
640
  name=ModelProviderName.amazon_bedrock,
409
641
  structured_output_mode=StructuredOutputMode.json_schema,
410
642
  supports_data_gen=False,
411
- provider_options={
412
- "model": "meta.llama3-1-405b-instruct-v1:0",
413
- "region_name": "us-west-2", # Llama 3.1 only in west-2
414
- },
643
+ model_id="meta.llama3-1-405b-instruct-v1:0",
415
644
  ),
416
645
  KilnModelProvider(
417
646
  name=ModelProviderName.ollama,
418
647
  structured_output_mode=StructuredOutputMode.json_schema,
419
- provider_options={"model": "llama3.1:405b"},
648
+ model_id="llama3.1:405b",
420
649
  ),
421
650
  KilnModelProvider(
422
651
  name=ModelProviderName.openrouter,
423
652
  structured_output_mode=StructuredOutputMode.function_calling,
424
- provider_options={"model": "meta-llama/llama-3.1-405b-instruct"},
653
+ model_id="meta-llama/llama-3.1-405b-instruct",
425
654
  ),
426
655
  KilnModelProvider(
427
656
  name=ModelProviderName.fireworks_ai,
428
657
  # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
429
- structured_output_mode=StructuredOutputMode.function_calling,
430
- provider_options={
431
- "model": "accounts/fireworks/models/llama-v3p1-405b-instruct"
432
- },
658
+ structured_output_mode=StructuredOutputMode.function_calling_weak,
659
+ model_id="accounts/fireworks/models/llama-v3p1-405b-instruct",
660
+ ),
661
+ KilnModelProvider(
662
+ name=ModelProviderName.together_ai,
663
+ model_id="meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
664
+ supports_data_gen=False,
665
+ structured_output_mode=StructuredOutputMode.function_calling_weak,
433
666
  ),
434
667
  ],
435
668
  ),
@@ -441,7 +674,7 @@ built_in_models: List[KilnModel] = [
441
674
  providers=[
442
675
  KilnModelProvider(
443
676
  name=ModelProviderName.openrouter,
444
- provider_options={"model": "mistralai/mistral-nemo"},
677
+ model_id="mistralai/mistral-nemo",
445
678
  structured_output_mode=StructuredOutputMode.json_instruction_and_object,
446
679
  ),
447
680
  ],
@@ -454,21 +687,18 @@ built_in_models: List[KilnModel] = [
454
687
  providers=[
455
688
  KilnModelProvider(
456
689
  name=ModelProviderName.amazon_bedrock,
457
- structured_output_mode=StructuredOutputMode.json_schema,
458
- provider_options={
459
- "model": "mistral.mistral-large-2407-v1:0",
460
- "region_name": "us-west-2", # only in west-2
461
- },
690
+ structured_output_mode=StructuredOutputMode.json_instructions,
691
+ model_id="mistral.mistral-large-2407-v1:0",
462
692
  ),
463
693
  KilnModelProvider(
464
694
  name=ModelProviderName.openrouter,
465
695
  structured_output_mode=StructuredOutputMode.json_schema,
466
- provider_options={"model": "mistralai/mistral-large"},
696
+ model_id="mistralai/mistral-large",
467
697
  ),
468
698
  KilnModelProvider(
469
699
  name=ModelProviderName.ollama,
470
700
  structured_output_mode=StructuredOutputMode.json_schema,
471
- provider_options={"model": "mistral-large"},
701
+ model_id="mistral-large",
472
702
  ),
473
703
  ],
474
704
  ),
@@ -480,7 +710,8 @@ built_in_models: List[KilnModel] = [
480
710
  providers=[
481
711
  KilnModelProvider(
482
712
  name=ModelProviderName.groq,
483
- provider_options={"model": "llama-3.2-1b-preview"},
713
+ model_id="llama-3.2-1b-preview",
714
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
484
715
  supports_data_gen=False,
485
716
  ),
486
717
  KilnModelProvider(
@@ -488,13 +719,23 @@ built_in_models: List[KilnModel] = [
488
719
  supports_structured_output=False,
489
720
  supports_data_gen=False,
490
721
  structured_output_mode=StructuredOutputMode.json_instruction_and_object,
491
- provider_options={"model": "meta-llama/llama-3.2-1b-instruct"},
722
+ model_id="meta-llama/llama-3.2-1b-instruct",
492
723
  ),
493
724
  KilnModelProvider(
494
725
  name=ModelProviderName.ollama,
495
726
  supports_structured_output=False,
496
727
  supports_data_gen=False,
497
- provider_options={"model": "llama3.2:1b"},
728
+ model_id="llama3.2:1b",
729
+ ),
730
+ KilnModelProvider(
731
+ name=ModelProviderName.huggingface,
732
+ model_id="meta-llama/Llama-3.2-1B-Instruct",
733
+ supports_structured_output=False,
734
+ supports_data_gen=False,
735
+ ),
736
+ KilnModelProvider(
737
+ name=ModelProviderName.together_ai,
738
+ provider_finetune_id="meta-llama/Llama-3.2-1B-Instruct",
498
739
  ),
499
740
  ],
500
741
  ),
@@ -506,7 +747,7 @@ built_in_models: List[KilnModel] = [
506
747
  providers=[
507
748
  KilnModelProvider(
508
749
  name=ModelProviderName.groq,
509
- provider_options={"model": "llama-3.2-3b-preview"},
750
+ model_id="llama-3.2-3b-preview",
510
751
  supports_data_gen=False,
511
752
  ),
512
753
  KilnModelProvider(
@@ -514,20 +755,32 @@ built_in_models: List[KilnModel] = [
514
755
  supports_structured_output=False,
515
756
  supports_data_gen=False,
516
757
  structured_output_mode=StructuredOutputMode.json_schema,
517
- provider_options={"model": "meta-llama/llama-3.2-3b-instruct"},
758
+ model_id="meta-llama/llama-3.2-3b-instruct",
518
759
  ),
519
760
  KilnModelProvider(
520
761
  name=ModelProviderName.ollama,
521
762
  supports_data_gen=False,
522
- provider_options={"model": "llama3.2"},
763
+ model_id="llama3.2",
523
764
  ),
524
765
  KilnModelProvider(
525
766
  name=ModelProviderName.fireworks_ai,
526
767
  provider_finetune_id="accounts/fireworks/models/llama-v3p2-3b-instruct",
527
- structured_output_mode=StructuredOutputMode.json_mode,
528
- provider_options={
529
- "model": "accounts/fireworks/models/llama-v3p2-3b-instruct"
530
- },
768
+ supports_structured_output=False,
769
+ supports_data_gen=False,
770
+ model_id="accounts/fireworks/models/llama-v3p2-3b-instruct",
771
+ ),
772
+ KilnModelProvider(
773
+ name=ModelProviderName.huggingface,
774
+ model_id="meta-llama/Llama-3.2-3B-Instruct",
775
+ supports_structured_output=False,
776
+ supports_data_gen=False,
777
+ ),
778
+ KilnModelProvider(
779
+ name=ModelProviderName.together_ai,
780
+ model_id="meta-llama/Llama-3.2-3B-Instruct-Turbo",
781
+ supports_structured_output=False,
782
+ supports_data_gen=False,
783
+ provider_finetune_id="meta-llama/Llama-3.2-3B-Instruct",
531
784
  ),
532
785
  ],
533
786
  ),
@@ -539,25 +792,39 @@ built_in_models: List[KilnModel] = [
539
792
  providers=[
540
793
  KilnModelProvider(
541
794
  name=ModelProviderName.groq,
542
- provider_options={"model": "llama-3.2-11b-vision-preview"},
795
+ model_id="llama-3.2-11b-vision-preview",
543
796
  ),
544
797
  KilnModelProvider(
545
798
  name=ModelProviderName.openrouter,
546
- structured_output_mode=StructuredOutputMode.json_schema,
547
- provider_options={"model": "meta-llama/llama-3.2-11b-vision-instruct"},
799
+ # Best mode, but fails to often to enable without warning
800
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
801
+ supports_structured_output=False,
802
+ supports_data_gen=False,
803
+ model_id="meta-llama/llama-3.2-11b-vision-instruct",
548
804
  ),
549
805
  KilnModelProvider(
550
806
  name=ModelProviderName.ollama,
551
807
  structured_output_mode=StructuredOutputMode.json_schema,
552
- provider_options={"model": "llama3.2-vision"},
808
+ model_id="llama3.2-vision",
553
809
  ),
554
810
  KilnModelProvider(
555
811
  name=ModelProviderName.fireworks_ai,
556
812
  # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
557
- provider_options={
558
- "model": "accounts/fireworks/models/llama-v3p2-11b-vision-instruct"
559
- },
560
- structured_output_mode=StructuredOutputMode.json_mode,
813
+ model_id="accounts/fireworks/models/llama-v3p2-11b-vision-instruct",
814
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
815
+ supports_data_gen=False,
816
+ ),
817
+ KilnModelProvider(
818
+ name=ModelProviderName.huggingface,
819
+ model_id="meta-llama/Llama-3.2-11B-Vision-Instruct",
820
+ supports_structured_output=False,
821
+ supports_data_gen=False,
822
+ ),
823
+ KilnModelProvider(
824
+ name=ModelProviderName.together_ai,
825
+ model_id="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
826
+ supports_structured_output=False,
827
+ supports_data_gen=False,
561
828
  ),
562
829
  ],
563
830
  ),
@@ -569,25 +836,30 @@ built_in_models: List[KilnModel] = [
569
836
  providers=[
570
837
  KilnModelProvider(
571
838
  name=ModelProviderName.groq,
572
- provider_options={"model": "llama-3.2-90b-vision-preview"},
839
+ model_id="llama-3.2-90b-vision-preview",
573
840
  ),
574
841
  KilnModelProvider(
575
842
  name=ModelProviderName.openrouter,
576
- structured_output_mode=StructuredOutputMode.json_schema,
577
- provider_options={"model": "meta-llama/llama-3.2-90b-vision-instruct"},
843
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
844
+ model_id="meta-llama/llama-3.2-90b-vision-instruct",
578
845
  ),
579
846
  KilnModelProvider(
580
847
  name=ModelProviderName.ollama,
581
848
  structured_output_mode=StructuredOutputMode.json_schema,
582
- provider_options={"model": "llama3.2-vision:90b"},
849
+ model_id="llama3.2-vision:90b",
583
850
  ),
584
851
  KilnModelProvider(
585
852
  name=ModelProviderName.fireworks_ai,
586
853
  # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
587
- provider_options={
588
- "model": "accounts/fireworks/models/llama-v3p2-90b-vision-instruct"
589
- },
590
- structured_output_mode=StructuredOutputMode.json_mode,
854
+ model_id="accounts/fireworks/models/llama-v3p2-90b-vision-instruct",
855
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
856
+ supports_data_gen=False,
857
+ ),
858
+ KilnModelProvider(
859
+ name=ModelProviderName.together_ai,
860
+ model_id="meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
861
+ supports_structured_output=False,
862
+ supports_data_gen=False,
591
863
  ),
592
864
  ],
593
865
  ),
@@ -599,7 +871,7 @@ built_in_models: List[KilnModel] = [
599
871
  providers=[
600
872
  KilnModelProvider(
601
873
  name=ModelProviderName.openrouter,
602
- provider_options={"model": "meta-llama/llama-3.3-70b-instruct"},
874
+ model_id="meta-llama/llama-3.3-70b-instruct",
603
875
  structured_output_mode=StructuredOutputMode.json_schema,
604
876
  # Openrouter not working with json_schema or tools. JSON_schema sometimes works so force that, but not consistently so still not recommended.
605
877
  supports_structured_output=False,
@@ -609,22 +881,32 @@ built_in_models: List[KilnModel] = [
609
881
  name=ModelProviderName.groq,
610
882
  supports_structured_output=True,
611
883
  supports_data_gen=True,
612
- provider_options={"model": "llama-3.3-70b-versatile"},
884
+ model_id="llama-3.3-70b-versatile",
613
885
  ),
614
886
  KilnModelProvider(
615
887
  name=ModelProviderName.ollama,
616
888
  structured_output_mode=StructuredOutputMode.json_schema,
617
- provider_options={"model": "llama3.3"},
889
+ model_id="llama3.3",
618
890
  ),
619
891
  KilnModelProvider(
620
892
  name=ModelProviderName.fireworks_ai,
621
893
  # Finetuning not live yet
622
894
  # provider_finetune_id="accounts/fireworks/models/llama-v3p3-70b-instruct",
623
895
  # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
624
- structured_output_mode=StructuredOutputMode.function_calling,
625
- provider_options={
626
- "model": "accounts/fireworks/models/llama-v3p3-70b-instruct"
627
- },
896
+ structured_output_mode=StructuredOutputMode.function_calling_weak,
897
+ model_id="accounts/fireworks/models/llama-v3p3-70b-instruct",
898
+ ),
899
+ KilnModelProvider(
900
+ name=ModelProviderName.vertex,
901
+ model_id="meta/llama-3.3-70b-instruct-maas",
902
+ # Doesn't work, TODO to debug
903
+ supports_structured_output=False,
904
+ supports_data_gen=False,
905
+ ),
906
+ KilnModelProvider(
907
+ name=ModelProviderName.together_ai,
908
+ model_id="meta-llama/Llama-3.3-70B-Instruct-Turbo",
909
+ structured_output_mode=StructuredOutputMode.function_calling_weak,
628
910
  ),
629
911
  ],
630
912
  ),
@@ -639,23 +921,21 @@ built_in_models: List[KilnModel] = [
639
921
  structured_output_mode=StructuredOutputMode.json_schema,
640
922
  supports_structured_output=False,
641
923
  supports_data_gen=False,
642
- provider_options={"model": "phi3.5"},
924
+ model_id="phi3.5",
643
925
  ),
644
926
  KilnModelProvider(
645
927
  name=ModelProviderName.openrouter,
646
928
  supports_structured_output=False,
647
929
  supports_data_gen=False,
648
- provider_options={"model": "microsoft/phi-3.5-mini-128k-instruct"},
930
+ model_id="microsoft/phi-3.5-mini-128k-instruct",
649
931
  structured_output_mode=StructuredOutputMode.json_schema,
650
932
  ),
651
933
  KilnModelProvider(
652
934
  name=ModelProviderName.fireworks_ai,
653
935
  # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
654
- structured_output_mode=StructuredOutputMode.json_mode,
936
+ supports_structured_output=False,
655
937
  supports_data_gen=False,
656
- provider_options={
657
- "model": "accounts/fireworks/models/phi-3-vision-128k-instruct"
658
- },
938
+ model_id="accounts/fireworks/models/phi-3-vision-128k-instruct",
659
939
  ),
660
940
  ],
661
941
  ),
@@ -663,19 +943,45 @@ built_in_models: List[KilnModel] = [
663
943
  KilnModel(
664
944
  family=ModelFamily.phi,
665
945
  name=ModelName.phi_4,
666
- friendly_name="Phi 4",
946
+ friendly_name="Phi 4 - 14B",
667
947
  providers=[
668
948
  KilnModelProvider(
669
949
  name=ModelProviderName.ollama,
670
950
  structured_output_mode=StructuredOutputMode.json_schema,
671
- provider_options={"model": "phi4"},
951
+ model_id="phi4",
672
952
  ),
673
953
  KilnModelProvider(
674
954
  name=ModelProviderName.openrouter,
675
955
  # JSON mode not consistent enough to enable in UI
676
956
  structured_output_mode=StructuredOutputMode.json_instruction_and_object,
677
957
  supports_data_gen=False,
678
- provider_options={"model": "microsoft/phi-4"},
958
+ model_id="microsoft/phi-4",
959
+ ),
960
+ ],
961
+ ),
962
+ # Phi 4 5.6B
963
+ KilnModel(
964
+ family=ModelFamily.phi,
965
+ name=ModelName.phi_4_5p6b,
966
+ friendly_name="Phi 4 - 5.6B",
967
+ providers=[
968
+ KilnModelProvider(
969
+ name=ModelProviderName.openrouter,
970
+ model_id="microsoft/phi-4-multimodal-instruct",
971
+ supports_structured_output=False,
972
+ supports_data_gen=False,
973
+ ),
974
+ ],
975
+ ),
976
+ # Phi 4 Mini
977
+ KilnModel(
978
+ family=ModelFamily.phi,
979
+ name=ModelName.phi_4_mini,
980
+ friendly_name="Phi 4 Mini - 3.8B",
981
+ providers=[
982
+ KilnModelProvider(
983
+ name=ModelProviderName.ollama,
984
+ model_id="phi4-mini",
679
985
  ),
680
986
  ],
681
987
  ),
@@ -688,9 +994,7 @@ built_in_models: List[KilnModel] = [
688
994
  KilnModelProvider(
689
995
  name=ModelProviderName.ollama,
690
996
  supports_data_gen=False,
691
- provider_options={
692
- "model": "gemma2:2b",
693
- },
997
+ model_id="gemma2:2b",
694
998
  ),
695
999
  ],
696
1000
  ),
@@ -703,15 +1007,15 @@ built_in_models: List[KilnModel] = [
703
1007
  KilnModelProvider(
704
1008
  name=ModelProviderName.ollama,
705
1009
  supports_data_gen=False,
706
- provider_options={
707
- "model": "gemma2:9b",
708
- },
1010
+ model_id="gemma2:9b",
709
1011
  ),
710
1012
  KilnModelProvider(
711
1013
  name=ModelProviderName.openrouter,
1014
+ # Best mode, but fails to often to enable without warning
712
1015
  structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1016
+ supports_structured_output=False,
713
1017
  supports_data_gen=False,
714
- provider_options={"model": "google/gemma-2-9b-it"},
1018
+ model_id="google/gemma-2-9b-it",
715
1019
  ),
716
1020
  # fireworks AI errors - not allowing system role. Exclude until resolved.
717
1021
  ],
@@ -725,15 +1029,93 @@ built_in_models: List[KilnModel] = [
725
1029
  KilnModelProvider(
726
1030
  name=ModelProviderName.ollama,
727
1031
  supports_data_gen=False,
728
- provider_options={
729
- "model": "gemma2:27b",
730
- },
1032
+ model_id="gemma2:27b",
731
1033
  ),
732
1034
  KilnModelProvider(
733
1035
  name=ModelProviderName.openrouter,
734
1036
  structured_output_mode=StructuredOutputMode.json_instruction_and_object,
735
1037
  supports_data_gen=False,
736
- provider_options={"model": "google/gemma-2-27b-it"},
1038
+ model_id="google/gemma-2-27b-it",
1039
+ ),
1040
+ ],
1041
+ ),
1042
+ # Gemma 3 1B
1043
+ KilnModel(
1044
+ family=ModelFamily.gemma,
1045
+ name=ModelName.gemma_3_1b,
1046
+ friendly_name="Gemma 3 1B",
1047
+ providers=[
1048
+ KilnModelProvider(
1049
+ name=ModelProviderName.ollama,
1050
+ model_id="gemma3:1b",
1051
+ supports_structured_output=False,
1052
+ supports_data_gen=False,
1053
+ ),
1054
+ KilnModelProvider(
1055
+ name=ModelProviderName.openrouter,
1056
+ # TODO: swap to non-free model when available (more reliable)
1057
+ model_id="google/gemma-3-1b-it:free",
1058
+ supports_structured_output=False,
1059
+ supports_data_gen=False,
1060
+ ),
1061
+ ],
1062
+ ),
1063
+ # Gemma 3 4B
1064
+ KilnModel(
1065
+ family=ModelFamily.gemma,
1066
+ name=ModelName.gemma_3_4b,
1067
+ friendly_name="Gemma 3 4B",
1068
+ providers=[
1069
+ KilnModelProvider(
1070
+ name=ModelProviderName.ollama,
1071
+ model_id="gemma3:4b",
1072
+ ollama_model_aliases=["gemma3"],
1073
+ ),
1074
+ KilnModelProvider(
1075
+ name=ModelProviderName.openrouter,
1076
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1077
+ # TODO: swap to non-free model when available (more reliable)
1078
+ model_id="google/gemma-3-4b-it:free",
1079
+ ),
1080
+ ],
1081
+ ),
1082
+ # Gemma 3 12B
1083
+ KilnModel(
1084
+ family=ModelFamily.gemma,
1085
+ name=ModelName.gemma_3_12b,
1086
+ friendly_name="Gemma 3 12B",
1087
+ providers=[
1088
+ KilnModelProvider(
1089
+ name=ModelProviderName.ollama,
1090
+ model_id="gemma3:12b",
1091
+ ),
1092
+ KilnModelProvider(
1093
+ name=ModelProviderName.openrouter,
1094
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1095
+ # TODO: swap to non-free model when available (more reliable)
1096
+ model_id="google/gemma-3-12b-it:free",
1097
+ ),
1098
+ ],
1099
+ ),
1100
+ # Gemma 3 27B
1101
+ KilnModel(
1102
+ family=ModelFamily.gemma,
1103
+ name=ModelName.gemma_3_27b,
1104
+ friendly_name="Gemma 3 27B",
1105
+ providers=[
1106
+ KilnModelProvider(
1107
+ name=ModelProviderName.ollama,
1108
+ model_id="gemma3:27b",
1109
+ ),
1110
+ KilnModelProvider(
1111
+ name=ModelProviderName.openrouter,
1112
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1113
+ model_id="google/gemma-3-27b-it",
1114
+ ),
1115
+ KilnModelProvider(
1116
+ name=ModelProviderName.huggingface,
1117
+ model_id="google/gemma-3-27b-it",
1118
+ structured_output_mode=StructuredOutputMode.json_instructions,
737
1119
  ),
738
1120
  ],
739
1121
  ),
@@ -745,13 +1127,58 @@ built_in_models: List[KilnModel] = [
745
1127
  providers=[
746
1128
  KilnModelProvider(
747
1129
  name=ModelProviderName.openrouter,
748
- provider_options={"model": "mistralai/mixtral-8x7b-instruct"},
1130
+ model_id="mistralai/mixtral-8x7b-instruct",
749
1131
  supports_data_gen=False,
750
1132
  structured_output_mode=StructuredOutputMode.json_instruction_and_object,
751
1133
  ),
752
1134
  KilnModelProvider(
753
1135
  name=ModelProviderName.ollama,
754
- provider_options={"model": "mixtral"},
1136
+ model_id="mixtral",
1137
+ ),
1138
+ ],
1139
+ ),
1140
+ # QwQ 32B
1141
+ KilnModel(
1142
+ family=ModelFamily.qwen,
1143
+ name=ModelName.qwq_32b,
1144
+ friendly_name="QwQ 32B (Qwen Reasoning)",
1145
+ providers=[
1146
+ KilnModelProvider(
1147
+ name=ModelProviderName.openrouter,
1148
+ model_id="qwen/qwq-32b",
1149
+ reasoning_capable=True,
1150
+ require_openrouter_reasoning=True,
1151
+ r1_openrouter_options=True,
1152
+ structured_output_mode=StructuredOutputMode.json_instructions,
1153
+ parser=ModelParserID.r1_thinking,
1154
+ ),
1155
+ KilnModelProvider(
1156
+ name=ModelProviderName.fireworks_ai,
1157
+ model_id="accounts/fireworks/models/qwq-32b",
1158
+ reasoning_capable=True,
1159
+ parser=ModelParserID.r1_thinking,
1160
+ structured_output_mode=StructuredOutputMode.json_instructions,
1161
+ ),
1162
+ KilnModelProvider(
1163
+ name=ModelProviderName.ollama,
1164
+ model_id="qwq",
1165
+ reasoning_capable=True,
1166
+ parser=ModelParserID.r1_thinking,
1167
+ structured_output_mode=StructuredOutputMode.json_instructions,
1168
+ ),
1169
+ KilnModelProvider(
1170
+ name=ModelProviderName.groq,
1171
+ model_id="qwen-qwq-32b",
1172
+ reasoning_capable=True,
1173
+ parser=ModelParserID.r1_thinking,
1174
+ structured_output_mode=StructuredOutputMode.json_instructions,
1175
+ ),
1176
+ KilnModelProvider(
1177
+ name=ModelProviderName.together_ai,
1178
+ model_id="Qwen/QwQ-32B",
1179
+ structured_output_mode=StructuredOutputMode.json_instructions,
1180
+ parser=ModelParserID.r1_thinking,
1181
+ reasoning_capable=True,
755
1182
  ),
756
1183
  ],
757
1184
  ),
@@ -763,12 +1190,29 @@ built_in_models: List[KilnModel] = [
763
1190
  providers=[
764
1191
  KilnModelProvider(
765
1192
  name=ModelProviderName.openrouter,
766
- provider_options={"model": "qwen/qwen-2.5-7b-instruct"},
1193
+ model_id="qwen/qwen-2.5-7b-instruct",
767
1194
  structured_output_mode=StructuredOutputMode.json_instruction_and_object,
768
1195
  ),
769
1196
  KilnModelProvider(
770
1197
  name=ModelProviderName.ollama,
771
- provider_options={"model": "qwen2.5"},
1198
+ model_id="qwen2.5",
1199
+ ),
1200
+ ],
1201
+ ),
1202
+ # Qwen 2.5 14B
1203
+ KilnModel(
1204
+ family=ModelFamily.qwen,
1205
+ name=ModelName.qwen_2p5_14b,
1206
+ friendly_name="Qwen 2.5 14B",
1207
+ providers=[
1208
+ KilnModelProvider(
1209
+ name=ModelProviderName.together_ai,
1210
+ provider_finetune_id="Qwen/Qwen2.5-14B-Instruct",
1211
+ ),
1212
+ KilnModelProvider(
1213
+ name=ModelProviderName.ollama,
1214
+ model_id="qwen2.5:14b",
1215
+ supports_data_gen=False,
772
1216
  ),
773
1217
  ],
774
1218
  ),
@@ -780,7 +1224,7 @@ built_in_models: List[KilnModel] = [
780
1224
  providers=[
781
1225
  KilnModelProvider(
782
1226
  name=ModelProviderName.openrouter,
783
- provider_options={"model": "qwen/qwen-2.5-72b-instruct"},
1227
+ model_id="qwen/qwen-2.5-72b-instruct",
784
1228
  # Not consistent with structure data. Works sometimes but not often
785
1229
  supports_structured_output=False,
786
1230
  supports_data_gen=False,
@@ -788,17 +1232,17 @@ built_in_models: List[KilnModel] = [
788
1232
  ),
789
1233
  KilnModelProvider(
790
1234
  name=ModelProviderName.ollama,
791
- provider_options={"model": "qwen2.5:72b"},
1235
+ model_id="qwen2.5:72b",
792
1236
  ),
793
1237
  KilnModelProvider(
794
1238
  name=ModelProviderName.fireworks_ai,
795
- provider_options={
796
- "model": "accounts/fireworks/models/qwen2p5-72b-instruct"
797
- },
798
- # Fireworks will start tuning, but it never finishes.
799
- # provider_finetune_id="accounts/fireworks/models/qwen2p5-72b-instruct",
1239
+ model_id="accounts/fireworks/models/qwen2p5-72b-instruct",
800
1240
  # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
801
- structured_output_mode=StructuredOutputMode.function_calling,
1241
+ structured_output_mode=StructuredOutputMode.function_calling_weak,
1242
+ ),
1243
+ KilnModelProvider(
1244
+ name=ModelProviderName.together_ai,
1245
+ provider_finetune_id="Qwen/Qwen2.5-72B-Instruct",
802
1246
  ),
803
1247
  ],
804
1248
  ),
@@ -811,11 +1255,75 @@ built_in_models: List[KilnModel] = [
811
1255
  KilnModelProvider(
812
1256
  name=ModelProviderName.openrouter,
813
1257
  structured_output_mode=StructuredOutputMode.json_instruction_and_object,
814
- provider_options={"model": "mistralai/mistral-small-24b-instruct-2501"},
1258
+ model_id="mistralai/mistral-small-24b-instruct-2501",
815
1259
  ),
816
1260
  KilnModelProvider(
817
1261
  name=ModelProviderName.ollama,
818
- provider_options={"model": "mistral-small:24b"},
1262
+ model_id="mistral-small:24b",
1263
+ ),
1264
+ ],
1265
+ ),
1266
+ # DeepSeek 3
1267
+ KilnModel(
1268
+ family=ModelFamily.deepseek,
1269
+ name=ModelName.deepseek_3,
1270
+ friendly_name="DeepSeek V3",
1271
+ providers=[
1272
+ KilnModelProvider(
1273
+ name=ModelProviderName.openrouter,
1274
+ model_id="deepseek/deepseek-chat",
1275
+ structured_output_mode=StructuredOutputMode.function_calling,
1276
+ ),
1277
+ KilnModelProvider(
1278
+ name=ModelProviderName.fireworks_ai,
1279
+ model_id="accounts/fireworks/models/deepseek-v3",
1280
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1281
+ supports_structured_output=True,
1282
+ supports_data_gen=False,
1283
+ ),
1284
+ KilnModelProvider(
1285
+ name=ModelProviderName.together_ai,
1286
+ model_id="deepseek-ai/DeepSeek-V3",
1287
+ structured_output_mode=StructuredOutputMode.json_instructions,
1288
+ ),
1289
+ ],
1290
+ ),
1291
+ # DeepSeek R1
1292
+ KilnModel(
1293
+ family=ModelFamily.deepseek,
1294
+ name=ModelName.deepseek_r1,
1295
+ friendly_name="DeepSeek R1",
1296
+ providers=[
1297
+ KilnModelProvider(
1298
+ name=ModelProviderName.openrouter,
1299
+ model_id="deepseek/deepseek-r1",
1300
+ parser=ModelParserID.r1_thinking,
1301
+ structured_output_mode=StructuredOutputMode.json_instructions,
1302
+ reasoning_capable=True,
1303
+ r1_openrouter_options=True,
1304
+ require_openrouter_reasoning=True,
1305
+ ),
1306
+ KilnModelProvider(
1307
+ name=ModelProviderName.fireworks_ai,
1308
+ model_id="accounts/fireworks/models/deepseek-r1",
1309
+ parser=ModelParserID.r1_thinking,
1310
+ structured_output_mode=StructuredOutputMode.json_instructions,
1311
+ reasoning_capable=True,
1312
+ ),
1313
+ KilnModelProvider(
1314
+ # I want your RAM
1315
+ name=ModelProviderName.ollama,
1316
+ model_id="deepseek-r1:671b",
1317
+ parser=ModelParserID.r1_thinking,
1318
+ structured_output_mode=StructuredOutputMode.json_instructions,
1319
+ reasoning_capable=True,
1320
+ ),
1321
+ KilnModelProvider(
1322
+ name=ModelProviderName.together_ai,
1323
+ model_id="deepseek-ai/DeepSeek-R1",
1324
+ structured_output_mode=StructuredOutputMode.json_instructions,
1325
+ parser=ModelParserID.r1_thinking,
1326
+ reasoning_capable=True,
819
1327
  ),
820
1328
  ],
821
1329
  ),
@@ -829,14 +1337,24 @@ built_in_models: List[KilnModel] = [
829
1337
  name=ModelProviderName.openrouter,
830
1338
  reasoning_capable=True,
831
1339
  structured_output_mode=StructuredOutputMode.json_instructions,
832
- provider_options={"model": "deepseek/deepseek-r1-distill-qwen-32b"},
1340
+ model_id="deepseek/deepseek-r1-distill-qwen-32b",
1341
+ r1_openrouter_options=True,
1342
+ parser=ModelParserID.r1_thinking,
1343
+ require_openrouter_reasoning=True,
833
1344
  ),
834
1345
  KilnModelProvider(
835
1346
  name=ModelProviderName.ollama,
836
1347
  parser=ModelParserID.r1_thinking,
837
1348
  reasoning_capable=True,
838
1349
  structured_output_mode=StructuredOutputMode.json_instructions,
839
- provider_options={"model": "deepseek-r1:32b"},
1350
+ model_id="deepseek-r1:32b",
1351
+ ),
1352
+ KilnModelProvider(
1353
+ name=ModelProviderName.together_ai,
1354
+ model_id="deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
1355
+ structured_output_mode=StructuredOutputMode.json_instructions,
1356
+ parser=ModelParserID.r1_thinking,
1357
+ reasoning_capable=True,
840
1358
  ),
841
1359
  ],
842
1360
  ),
@@ -850,7 +1368,10 @@ built_in_models: List[KilnModel] = [
850
1368
  name=ModelProviderName.openrouter,
851
1369
  reasoning_capable=True,
852
1370
  structured_output_mode=StructuredOutputMode.json_instructions,
853
- provider_options={"model": "deepseek/deepseek-r1-distill-llama-70b"},
1371
+ model_id="deepseek/deepseek-r1-distill-llama-70b",
1372
+ r1_openrouter_options=True,
1373
+ require_openrouter_reasoning=True,
1374
+ parser=ModelParserID.r1_thinking,
854
1375
  ),
855
1376
  KilnModelProvider(
856
1377
  name=ModelProviderName.ollama,
@@ -858,7 +1379,13 @@ built_in_models: List[KilnModel] = [
858
1379
  parser=ModelParserID.r1_thinking,
859
1380
  reasoning_capable=True,
860
1381
  structured_output_mode=StructuredOutputMode.json_instructions,
861
- provider_options={"model": "deepseek-r1:70b"},
1382
+ model_id="deepseek-r1:70b",
1383
+ ),
1384
+ KilnModelProvider(
1385
+ name=ModelProviderName.together_ai,
1386
+ model_id="deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
1387
+ structured_output_mode=StructuredOutputMode.json_instructions,
1388
+ parser=ModelParserID.r1_thinking,
862
1389
  ),
863
1390
  ],
864
1391
  ),
@@ -873,7 +1400,11 @@ built_in_models: List[KilnModel] = [
873
1400
  supports_data_gen=False,
874
1401
  reasoning_capable=True,
875
1402
  structured_output_mode=StructuredOutputMode.json_instructions,
876
- provider_options={"model": "deepseek/deepseek-r1-distill-qwen-14b"},
1403
+ model_id="deepseek/deepseek-r1-distill-qwen-14b",
1404
+ r1_openrouter_options=True,
1405
+ require_openrouter_reasoning=True,
1406
+ openrouter_skip_required_parameters=True,
1407
+ parser=ModelParserID.r1_thinking,
877
1408
  ),
878
1409
  KilnModelProvider(
879
1410
  name=ModelProviderName.ollama,
@@ -881,7 +1412,13 @@ built_in_models: List[KilnModel] = [
881
1412
  parser=ModelParserID.r1_thinking,
882
1413
  reasoning_capable=True,
883
1414
  structured_output_mode=StructuredOutputMode.json_instructions,
884
- provider_options={"model": "deepseek-r1:14b"},
1415
+ model_id="deepseek-r1:14b",
1416
+ ),
1417
+ KilnModelProvider(
1418
+ name=ModelProviderName.together_ai,
1419
+ model_id="deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
1420
+ structured_output_mode=StructuredOutputMode.json_instructions,
1421
+ parser=ModelParserID.r1_thinking,
885
1422
  ),
886
1423
  ],
887
1424
  ),
@@ -893,18 +1430,26 @@ built_in_models: List[KilnModel] = [
893
1430
  providers=[
894
1431
  KilnModelProvider(
895
1432
  name=ModelProviderName.openrouter,
1433
+ supports_structured_output=False,
896
1434
  supports_data_gen=False,
897
1435
  reasoning_capable=True,
1436
+ # Best mode, but fails to often to enable without warning
898
1437
  structured_output_mode=StructuredOutputMode.json_instructions,
899
- provider_options={"model": "deepseek/deepseek-r1-distill-llama-8b"},
1438
+ model_id="deepseek/deepseek-r1-distill-llama-8b",
1439
+ r1_openrouter_options=True,
1440
+ require_openrouter_reasoning=True,
1441
+ openrouter_skip_required_parameters=True,
1442
+ parser=ModelParserID.r1_thinking,
900
1443
  ),
901
1444
  KilnModelProvider(
902
1445
  name=ModelProviderName.ollama,
1446
+ supports_structured_output=False,
903
1447
  supports_data_gen=False,
904
1448
  parser=ModelParserID.r1_thinking,
905
1449
  reasoning_capable=True,
1450
+ # Best mode, but fails to often to enable without warning
906
1451
  structured_output_mode=StructuredOutputMode.json_instructions,
907
- provider_options={"model": "deepseek-r1:8b"},
1452
+ model_id="deepseek-r1:8b",
908
1453
  ),
909
1454
  ],
910
1455
  ),
@@ -916,11 +1461,13 @@ built_in_models: List[KilnModel] = [
916
1461
  providers=[
917
1462
  KilnModelProvider(
918
1463
  name=ModelProviderName.ollama,
1464
+ # Best mode, but fails to often to enable without warning
1465
+ supports_structured_output=False,
919
1466
  supports_data_gen=False,
920
1467
  parser=ModelParserID.r1_thinking,
921
1468
  reasoning_capable=True,
922
1469
  structured_output_mode=StructuredOutputMode.json_instructions,
923
- provider_options={"model": "deepseek-r1:7b"},
1470
+ model_id="deepseek-r1:7b",
924
1471
  ),
925
1472
  ],
926
1473
  ),
@@ -936,15 +1483,63 @@ built_in_models: List[KilnModel] = [
936
1483
  supports_data_gen=False,
937
1484
  reasoning_capable=True,
938
1485
  structured_output_mode=StructuredOutputMode.json_instructions,
939
- provider_options={"model": "deepseek/deepseek-r1-distill-qwen-1.5b"},
1486
+ model_id="deepseek/deepseek-r1-distill-qwen-1.5b",
1487
+ r1_openrouter_options=True,
1488
+ require_openrouter_reasoning=True,
1489
+ openrouter_skip_required_parameters=True,
1490
+ parser=ModelParserID.r1_thinking,
940
1491
  ),
941
1492
  KilnModelProvider(
942
1493
  name=ModelProviderName.ollama,
1494
+ supports_structured_output=False,
943
1495
  supports_data_gen=False,
944
1496
  parser=ModelParserID.r1_thinking,
945
1497
  reasoning_capable=True,
946
1498
  structured_output_mode=StructuredOutputMode.json_instructions,
947
- provider_options={"model": "deepseek-r1:1.5b"},
1499
+ model_id="deepseek-r1:1.5b",
1500
+ ),
1501
+ KilnModelProvider(
1502
+ name=ModelProviderName.together_ai,
1503
+ model_id="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
1504
+ structured_output_mode=StructuredOutputMode.json_instructions,
1505
+ parser=ModelParserID.r1_thinking,
1506
+ supports_structured_output=False,
1507
+ supports_data_gen=False,
1508
+ ),
1509
+ ],
1510
+ ),
1511
+ # Dolphin 2.9 Mixtral 8x22B
1512
+ KilnModel(
1513
+ family=ModelFamily.dolphin,
1514
+ name=ModelName.dolphin_2_9_8x22b,
1515
+ friendly_name="Dolphin 2.9 8x22B",
1516
+ providers=[
1517
+ KilnModelProvider(
1518
+ name=ModelProviderName.ollama,
1519
+ structured_output_mode=StructuredOutputMode.json_schema,
1520
+ supports_data_gen=True,
1521
+ model_id="dolphin-mixtral:8x22b",
1522
+ ),
1523
+ KilnModelProvider(
1524
+ name=ModelProviderName.openrouter,
1525
+ supports_data_gen=True,
1526
+ structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1527
+ model_id="cognitivecomputations/dolphin-mixtral-8x22b",
1528
+ ),
1529
+ ],
1530
+ ),
1531
+ # Grok 2
1532
+ KilnModel(
1533
+ family=ModelFamily.grok,
1534
+ name=ModelName.grok_2,
1535
+ friendly_name="Grok 2",
1536
+ providers=[
1537
+ KilnModelProvider(
1538
+ name=ModelProviderName.openrouter,
1539
+ model_id="x-ai/grok-2-1212",
1540
+ supports_structured_output=True,
1541
+ supports_data_gen=True,
1542
+ structured_output_mode=StructuredOutputMode.json_schema,
948
1543
  ),
949
1544
  ],
950
1545
  ),