kiln-ai 0.19.0__py3-none-any.whl → 0.21.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kiln-ai might be problematic. Click here for more details.

Files changed (158) hide show
  1. kiln_ai/adapters/__init__.py +8 -2
  2. kiln_ai/adapters/adapter_registry.py +43 -208
  3. kiln_ai/adapters/chat/chat_formatter.py +8 -12
  4. kiln_ai/adapters/chat/test_chat_formatter.py +6 -2
  5. kiln_ai/adapters/chunkers/__init__.py +13 -0
  6. kiln_ai/adapters/chunkers/base_chunker.py +42 -0
  7. kiln_ai/adapters/chunkers/chunker_registry.py +16 -0
  8. kiln_ai/adapters/chunkers/fixed_window_chunker.py +39 -0
  9. kiln_ai/adapters/chunkers/helpers.py +23 -0
  10. kiln_ai/adapters/chunkers/test_base_chunker.py +63 -0
  11. kiln_ai/adapters/chunkers/test_chunker_registry.py +28 -0
  12. kiln_ai/adapters/chunkers/test_fixed_window_chunker.py +346 -0
  13. kiln_ai/adapters/chunkers/test_helpers.py +75 -0
  14. kiln_ai/adapters/data_gen/test_data_gen_task.py +9 -3
  15. kiln_ai/adapters/docker_model_runner_tools.py +119 -0
  16. kiln_ai/adapters/embedding/__init__.py +0 -0
  17. kiln_ai/adapters/embedding/base_embedding_adapter.py +44 -0
  18. kiln_ai/adapters/embedding/embedding_registry.py +32 -0
  19. kiln_ai/adapters/embedding/litellm_embedding_adapter.py +199 -0
  20. kiln_ai/adapters/embedding/test_base_embedding_adapter.py +283 -0
  21. kiln_ai/adapters/embedding/test_embedding_registry.py +166 -0
  22. kiln_ai/adapters/embedding/test_litellm_embedding_adapter.py +1149 -0
  23. kiln_ai/adapters/eval/base_eval.py +2 -2
  24. kiln_ai/adapters/eval/eval_runner.py +9 -3
  25. kiln_ai/adapters/eval/g_eval.py +2 -2
  26. kiln_ai/adapters/eval/test_base_eval.py +2 -4
  27. kiln_ai/adapters/eval/test_g_eval.py +4 -5
  28. kiln_ai/adapters/extractors/__init__.py +18 -0
  29. kiln_ai/adapters/extractors/base_extractor.py +72 -0
  30. kiln_ai/adapters/extractors/encoding.py +20 -0
  31. kiln_ai/adapters/extractors/extractor_registry.py +44 -0
  32. kiln_ai/adapters/extractors/extractor_runner.py +112 -0
  33. kiln_ai/adapters/extractors/litellm_extractor.py +386 -0
  34. kiln_ai/adapters/extractors/test_base_extractor.py +244 -0
  35. kiln_ai/adapters/extractors/test_encoding.py +54 -0
  36. kiln_ai/adapters/extractors/test_extractor_registry.py +181 -0
  37. kiln_ai/adapters/extractors/test_extractor_runner.py +181 -0
  38. kiln_ai/adapters/extractors/test_litellm_extractor.py +1192 -0
  39. kiln_ai/adapters/fine_tune/__init__.py +1 -1
  40. kiln_ai/adapters/fine_tune/openai_finetune.py +14 -4
  41. kiln_ai/adapters/fine_tune/test_dataset_formatter.py +2 -2
  42. kiln_ai/adapters/fine_tune/test_fireworks_tinetune.py +2 -6
  43. kiln_ai/adapters/fine_tune/test_openai_finetune.py +108 -111
  44. kiln_ai/adapters/fine_tune/test_together_finetune.py +2 -6
  45. kiln_ai/adapters/ml_embedding_model_list.py +192 -0
  46. kiln_ai/adapters/ml_model_list.py +761 -37
  47. kiln_ai/adapters/model_adapters/base_adapter.py +51 -21
  48. kiln_ai/adapters/model_adapters/litellm_adapter.py +380 -138
  49. kiln_ai/adapters/model_adapters/test_base_adapter.py +193 -17
  50. kiln_ai/adapters/model_adapters/test_litellm_adapter.py +407 -2
  51. kiln_ai/adapters/model_adapters/test_litellm_adapter_tools.py +1103 -0
  52. kiln_ai/adapters/model_adapters/test_saving_adapter_results.py +5 -5
  53. kiln_ai/adapters/model_adapters/test_structured_output.py +113 -5
  54. kiln_ai/adapters/ollama_tools.py +69 -12
  55. kiln_ai/adapters/parsers/__init__.py +1 -1
  56. kiln_ai/adapters/provider_tools.py +205 -47
  57. kiln_ai/adapters/rag/deduplication.py +49 -0
  58. kiln_ai/adapters/rag/progress.py +252 -0
  59. kiln_ai/adapters/rag/rag_runners.py +844 -0
  60. kiln_ai/adapters/rag/test_deduplication.py +195 -0
  61. kiln_ai/adapters/rag/test_progress.py +785 -0
  62. kiln_ai/adapters/rag/test_rag_runners.py +2376 -0
  63. kiln_ai/adapters/remote_config.py +80 -8
  64. kiln_ai/adapters/repair/test_repair_task.py +12 -9
  65. kiln_ai/adapters/run_output.py +3 -0
  66. kiln_ai/adapters/test_adapter_registry.py +657 -85
  67. kiln_ai/adapters/test_docker_model_runner_tools.py +305 -0
  68. kiln_ai/adapters/test_ml_embedding_model_list.py +429 -0
  69. kiln_ai/adapters/test_ml_model_list.py +251 -1
  70. kiln_ai/adapters/test_ollama_tools.py +340 -1
  71. kiln_ai/adapters/test_prompt_adaptors.py +13 -6
  72. kiln_ai/adapters/test_prompt_builders.py +1 -1
  73. kiln_ai/adapters/test_provider_tools.py +254 -8
  74. kiln_ai/adapters/test_remote_config.py +651 -58
  75. kiln_ai/adapters/vector_store/__init__.py +1 -0
  76. kiln_ai/adapters/vector_store/base_vector_store_adapter.py +83 -0
  77. kiln_ai/adapters/vector_store/lancedb_adapter.py +389 -0
  78. kiln_ai/adapters/vector_store/test_base_vector_store.py +160 -0
  79. kiln_ai/adapters/vector_store/test_lancedb_adapter.py +1841 -0
  80. kiln_ai/adapters/vector_store/test_vector_store_registry.py +199 -0
  81. kiln_ai/adapters/vector_store/vector_store_registry.py +33 -0
  82. kiln_ai/datamodel/__init__.py +39 -34
  83. kiln_ai/datamodel/basemodel.py +170 -1
  84. kiln_ai/datamodel/chunk.py +158 -0
  85. kiln_ai/datamodel/datamodel_enums.py +28 -0
  86. kiln_ai/datamodel/embedding.py +64 -0
  87. kiln_ai/datamodel/eval.py +1 -1
  88. kiln_ai/datamodel/external_tool_server.py +298 -0
  89. kiln_ai/datamodel/extraction.py +303 -0
  90. kiln_ai/datamodel/json_schema.py +25 -10
  91. kiln_ai/datamodel/project.py +40 -1
  92. kiln_ai/datamodel/rag.py +79 -0
  93. kiln_ai/datamodel/registry.py +0 -15
  94. kiln_ai/datamodel/run_config.py +62 -0
  95. kiln_ai/datamodel/task.py +2 -77
  96. kiln_ai/datamodel/task_output.py +6 -1
  97. kiln_ai/datamodel/task_run.py +41 -0
  98. kiln_ai/datamodel/test_attachment.py +649 -0
  99. kiln_ai/datamodel/test_basemodel.py +4 -4
  100. kiln_ai/datamodel/test_chunk_models.py +317 -0
  101. kiln_ai/datamodel/test_dataset_split.py +1 -1
  102. kiln_ai/datamodel/test_embedding_models.py +448 -0
  103. kiln_ai/datamodel/test_eval_model.py +6 -6
  104. kiln_ai/datamodel/test_example_models.py +175 -0
  105. kiln_ai/datamodel/test_external_tool_server.py +691 -0
  106. kiln_ai/datamodel/test_extraction_chunk.py +206 -0
  107. kiln_ai/datamodel/test_extraction_model.py +470 -0
  108. kiln_ai/datamodel/test_rag.py +641 -0
  109. kiln_ai/datamodel/test_registry.py +8 -3
  110. kiln_ai/datamodel/test_task.py +15 -47
  111. kiln_ai/datamodel/test_tool_id.py +320 -0
  112. kiln_ai/datamodel/test_vector_store.py +320 -0
  113. kiln_ai/datamodel/tool_id.py +105 -0
  114. kiln_ai/datamodel/vector_store.py +141 -0
  115. kiln_ai/tools/__init__.py +8 -0
  116. kiln_ai/tools/base_tool.py +82 -0
  117. kiln_ai/tools/built_in_tools/__init__.py +13 -0
  118. kiln_ai/tools/built_in_tools/math_tools.py +124 -0
  119. kiln_ai/tools/built_in_tools/test_math_tools.py +204 -0
  120. kiln_ai/tools/mcp_server_tool.py +95 -0
  121. kiln_ai/tools/mcp_session_manager.py +246 -0
  122. kiln_ai/tools/rag_tools.py +157 -0
  123. kiln_ai/tools/test_base_tools.py +199 -0
  124. kiln_ai/tools/test_mcp_server_tool.py +457 -0
  125. kiln_ai/tools/test_mcp_session_manager.py +1585 -0
  126. kiln_ai/tools/test_rag_tools.py +848 -0
  127. kiln_ai/tools/test_tool_registry.py +562 -0
  128. kiln_ai/tools/tool_registry.py +85 -0
  129. kiln_ai/utils/__init__.py +3 -0
  130. kiln_ai/utils/async_job_runner.py +62 -17
  131. kiln_ai/utils/config.py +24 -2
  132. kiln_ai/utils/env.py +15 -0
  133. kiln_ai/utils/filesystem.py +14 -0
  134. kiln_ai/utils/filesystem_cache.py +60 -0
  135. kiln_ai/utils/litellm.py +94 -0
  136. kiln_ai/utils/lock.py +100 -0
  137. kiln_ai/utils/mime_type.py +38 -0
  138. kiln_ai/utils/open_ai_types.py +94 -0
  139. kiln_ai/utils/pdf_utils.py +38 -0
  140. kiln_ai/utils/project_utils.py +17 -0
  141. kiln_ai/utils/test_async_job_runner.py +151 -35
  142. kiln_ai/utils/test_config.py +138 -1
  143. kiln_ai/utils/test_env.py +142 -0
  144. kiln_ai/utils/test_filesystem_cache.py +316 -0
  145. kiln_ai/utils/test_litellm.py +206 -0
  146. kiln_ai/utils/test_lock.py +185 -0
  147. kiln_ai/utils/test_mime_type.py +66 -0
  148. kiln_ai/utils/test_open_ai_types.py +131 -0
  149. kiln_ai/utils/test_pdf_utils.py +73 -0
  150. kiln_ai/utils/test_uuid.py +111 -0
  151. kiln_ai/utils/test_validation.py +524 -0
  152. kiln_ai/utils/uuid.py +9 -0
  153. kiln_ai/utils/validation.py +90 -0
  154. {kiln_ai-0.19.0.dist-info → kiln_ai-0.21.0.dist-info}/METADATA +12 -5
  155. kiln_ai-0.21.0.dist-info/RECORD +211 -0
  156. kiln_ai-0.19.0.dist-info/RECORD +0 -115
  157. {kiln_ai-0.19.0.dist-info → kiln_ai-0.21.0.dist-info}/WHEEL +0 -0
  158. {kiln_ai-0.19.0.dist-info → kiln_ai-0.21.0.dist-info}/licenses/LICENSE.txt +0 -0
@@ -0,0 +1,457 @@
1
+ from unittest.mock import AsyncMock, patch
2
+
3
+ import pytest
4
+ from mcp.types import (
5
+ CallToolResult,
6
+ ContentBlock,
7
+ ImageContent,
8
+ ListToolsResult,
9
+ TextContent,
10
+ Tool,
11
+ )
12
+
13
+ from kiln_ai.datamodel.external_tool_server import ExternalToolServer, ToolServerType
14
+ from kiln_ai.datamodel.tool_id import MCP_REMOTE_TOOL_ID_PREFIX
15
+ from kiln_ai.tools.mcp_server_tool import MCPServerTool
16
+
17
+
18
+ class TestMCPServerTool:
19
+ """Unit tests for MCPServerTool."""
20
+
21
+ @pytest.mark.asyncio
22
+ async def test_constructor(self):
23
+ """Test MCPServerTool initialization."""
24
+ server = ExternalToolServer(
25
+ name="test_server",
26
+ type=ToolServerType.remote_mcp,
27
+ description="Test server",
28
+ properties={
29
+ "server_url": "https://example.com",
30
+ "headers": {},
31
+ },
32
+ )
33
+
34
+ tool = MCPServerTool(server, "test_tool")
35
+
36
+ # Check ID pattern - uses server's generated ID, not name
37
+ tool_id = await tool.id()
38
+ assert tool_id.startswith(MCP_REMOTE_TOOL_ID_PREFIX)
39
+ assert tool_id.endswith("::test_tool")
40
+ assert await tool.name() == "test_tool"
41
+ # Note: description() now loads properties, so we can't test "Not Loaded" state
42
+ # Instead we verify that _tool is initially None before properties are loaded
43
+ assert tool._tool_server_model == server
44
+ assert tool._tool is None
45
+
46
+ @pytest.mark.asyncio
47
+ @patch("kiln_ai.tools.mcp_server_tool.MCPSessionManager")
48
+ async def test_run_success(self, mock_session_manager):
49
+ """Test successful run() execution."""
50
+ # Setup mocks
51
+ mock_session = AsyncMock()
52
+ mock_session_manager.shared.return_value.mcp_client.return_value.__aenter__.return_value = mock_session
53
+
54
+ result_content = [TextContent(type="text", text="Success result")]
55
+ call_result = CallToolResult(content=result_content, isError=False) # type: ignore
56
+ mock_session.call_tool.return_value = call_result
57
+
58
+ server = ExternalToolServer(
59
+ name="test_server",
60
+ type=ToolServerType.remote_mcp,
61
+ properties={
62
+ "server_url": "https://example.com",
63
+ "headers": {},
64
+ },
65
+ )
66
+ tool = MCPServerTool(server, "test_tool")
67
+
68
+ result = await tool.run(param1="value1", param2="value2")
69
+
70
+ assert result == "Success result"
71
+ mock_session.call_tool.assert_called_once_with(
72
+ name="test_tool", arguments={"param1": "value1", "param2": "value2"}
73
+ )
74
+
75
+ @pytest.mark.asyncio
76
+ @patch("kiln_ai.tools.mcp_server_tool.MCPSessionManager")
77
+ async def test_run_empty_content(self, mock_session_manager):
78
+ """Test run() with empty content raises ValueError."""
79
+ mock_session = AsyncMock()
80
+ mock_session_manager.shared.return_value.mcp_client.return_value.__aenter__.return_value = mock_session
81
+
82
+ call_result = CallToolResult(
83
+ content=list[ContentBlock]([]),
84
+ isError=False, # type: ignore
85
+ )
86
+ mock_session.call_tool.return_value = call_result
87
+
88
+ server = ExternalToolServer(
89
+ name="test_server",
90
+ type=ToolServerType.remote_mcp,
91
+ properties={
92
+ "server_url": "https://example.com",
93
+ "headers": {},
94
+ },
95
+ )
96
+ tool = MCPServerTool(server, "test_tool")
97
+
98
+ with pytest.raises(ValueError, match="Tool returned no content"):
99
+ await tool.run()
100
+
101
+ @pytest.mark.asyncio
102
+ @patch("kiln_ai.tools.mcp_server_tool.MCPSessionManager")
103
+ async def test_run_non_text_content_error(self, mock_session_manager):
104
+ """Test run() raises error when first content is not TextContent."""
105
+ mock_session = AsyncMock()
106
+ mock_session_manager.shared.return_value.mcp_client.return_value.__aenter__.return_value = mock_session
107
+
108
+ result_content = [
109
+ ImageContent(type="image", data="base64data", mimeType="image/png")
110
+ ]
111
+ call_result = CallToolResult(content=result_content, isError=False) # type: ignore
112
+ mock_session.call_tool.return_value = call_result
113
+
114
+ server = ExternalToolServer(
115
+ name="test_server",
116
+ type=ToolServerType.remote_mcp,
117
+ properties={
118
+ "server_url": "https://example.com",
119
+ "headers": {},
120
+ },
121
+ )
122
+ tool = MCPServerTool(server, "test_tool")
123
+
124
+ with pytest.raises(ValueError, match="First block must be a text block"):
125
+ await tool.run()
126
+
127
+ @pytest.mark.asyncio
128
+ @patch("kiln_ai.tools.mcp_server_tool.MCPSessionManager")
129
+ async def test_run_error_result(self, mock_session_manager):
130
+ """Test run() raises error when tool returns isError=True."""
131
+ mock_session = AsyncMock()
132
+ mock_session_manager.shared.return_value.mcp_client.return_value.__aenter__.return_value = mock_session
133
+
134
+ result_content = [TextContent(type="text", text="Error occurred")]
135
+ call_result = CallToolResult(
136
+ content=list[ContentBlock](result_content),
137
+ isError=True, # type: ignore
138
+ )
139
+ mock_session.call_tool.return_value = call_result
140
+
141
+ server = ExternalToolServer(
142
+ name="test_server",
143
+ type=ToolServerType.remote_mcp,
144
+ properties={
145
+ "server_url": "https://example.com",
146
+ "headers": {},
147
+ },
148
+ )
149
+ tool = MCPServerTool(server, "test_tool")
150
+
151
+ with pytest.raises(ValueError, match="Tool test_tool returned an error"):
152
+ await tool.run()
153
+
154
+ @pytest.mark.asyncio
155
+ @patch("kiln_ai.tools.mcp_server_tool.MCPSessionManager")
156
+ async def test_run_multiple_content_blocks_error(self, mock_session_manager):
157
+ """Test run() raises error when tool returns multiple content blocks."""
158
+ mock_session = AsyncMock()
159
+ mock_session_manager.shared.return_value.mcp_client.return_value.__aenter__.return_value = mock_session
160
+
161
+ result_content = [
162
+ TextContent(type="text", text="First block"),
163
+ TextContent(type="text", text="Second block"),
164
+ ]
165
+ call_result = CallToolResult(content=result_content, isError=False) # type: ignore
166
+ mock_session.call_tool.return_value = call_result
167
+
168
+ server = ExternalToolServer(
169
+ name="test_server",
170
+ type=ToolServerType.remote_mcp,
171
+ properties={
172
+ "server_url": "https://example.com",
173
+ "headers": {},
174
+ },
175
+ )
176
+ tool = MCPServerTool(server, "test_tool")
177
+
178
+ with pytest.raises(
179
+ ValueError, match="Tool returned multiple content blocks, expected one"
180
+ ):
181
+ await tool.run()
182
+
183
+ @pytest.mark.asyncio
184
+ @patch("kiln_ai.tools.mcp_server_tool.MCPSessionManager")
185
+ async def test_call_tool_success(self, mock_session_manager):
186
+ """Test _call_tool() method."""
187
+ mock_session = AsyncMock()
188
+ mock_session_manager.shared.return_value.mcp_client.return_value.__aenter__.return_value = mock_session
189
+
190
+ result_content = [TextContent(type="text", text="Async result")]
191
+ call_result = CallToolResult(content=result_content, isError=False) # type: ignore
192
+ mock_session.call_tool.return_value = call_result
193
+
194
+ server = ExternalToolServer(
195
+ name="test_server",
196
+ type=ToolServerType.remote_mcp,
197
+ properties={
198
+ "server_url": "https://example.com",
199
+ "headers": {},
200
+ },
201
+ )
202
+ tool = MCPServerTool(server, "test_tool")
203
+
204
+ result = await tool._call_tool(arg1="test", arg2=123)
205
+
206
+ assert result == call_result
207
+ mock_session.call_tool.assert_called_once_with(
208
+ name="test_tool", arguments={"arg1": "test", "arg2": 123}
209
+ )
210
+
211
+ @pytest.mark.asyncio
212
+ @patch("kiln_ai.tools.mcp_server_tool.MCPSessionManager")
213
+ async def test_get_tool_success(self, mock_session_manager):
214
+ """Test _get_tool() method finds tool successfully."""
215
+ mock_session = AsyncMock()
216
+ mock_session_manager.shared.return_value.mcp_client.return_value.__aenter__.return_value = mock_session
217
+
218
+ # Mock tools list
219
+ target_tool = Tool(
220
+ name="target_tool",
221
+ description="Target tool description",
222
+ inputSchema={"type": "object", "properties": {"param": {"type": "string"}}},
223
+ )
224
+ other_tool = Tool(name="other_tool", description="Other tool", inputSchema={})
225
+
226
+ tools_result = ListToolsResult(tools=[other_tool, target_tool])
227
+ mock_session.list_tools.return_value = tools_result
228
+
229
+ server = ExternalToolServer(
230
+ name="test_server",
231
+ type=ToolServerType.remote_mcp,
232
+ properties={
233
+ "server_url": "https://example.com",
234
+ "headers": {},
235
+ },
236
+ )
237
+ tool = MCPServerTool(server, "target_tool")
238
+
239
+ result = await tool._get_tool("target_tool")
240
+
241
+ assert result == target_tool
242
+ mock_session.list_tools.assert_called_once()
243
+
244
+ @pytest.mark.asyncio
245
+ @patch("kiln_ai.tools.mcp_server_tool.MCPSessionManager")
246
+ async def test_get_tool_not_found(self, mock_session_manager):
247
+ """Test _get_tool() raises error when tool not found."""
248
+ mock_session = AsyncMock()
249
+ mock_session_manager.shared.return_value.mcp_client.return_value.__aenter__.return_value = mock_session
250
+
251
+ # Mock tools list without target tool
252
+ other_tool = Tool(name="other_tool", description="Other tool", inputSchema={})
253
+ tools_result = ListToolsResult(tools=[other_tool])
254
+ mock_session.list_tools.return_value = tools_result
255
+
256
+ server = ExternalToolServer(
257
+ name="test_server",
258
+ type=ToolServerType.remote_mcp,
259
+ properties={
260
+ "server_url": "https://example.com",
261
+ "headers": {},
262
+ },
263
+ )
264
+ tool = MCPServerTool(server, "missing_tool")
265
+
266
+ with pytest.raises(ValueError, match="Tool missing_tool not found"):
267
+ await tool._get_tool("missing_tool")
268
+
269
+ @pytest.mark.asyncio
270
+ @patch("kiln_ai.tools.mcp_server_tool.MCPSessionManager")
271
+ async def test_load_tool_properties_success(self, mock_session_manager):
272
+ """Test _load_tool_properties() updates tool properties."""
273
+ mock_session = AsyncMock()
274
+ mock_session_manager.shared.return_value.mcp_client.return_value.__aenter__.return_value = mock_session
275
+
276
+ # Mock tool with properties
277
+ tool_def = Tool(
278
+ name="test_tool",
279
+ description="Loaded tool description",
280
+ inputSchema={"type": "object", "properties": {"param": {"type": "string"}}},
281
+ )
282
+ tools_result = ListToolsResult(tools=[tool_def])
283
+ mock_session.list_tools.return_value = tools_result
284
+
285
+ server = ExternalToolServer(
286
+ name="test_server",
287
+ type=ToolServerType.remote_mcp,
288
+ properties={
289
+ "server_url": "https://example.com",
290
+ "headers": {},
291
+ },
292
+ )
293
+ tool = MCPServerTool(server, "test_tool")
294
+
295
+ # Verify initial state - _tool is None before loading
296
+ assert tool._tool is None
297
+
298
+ # After loading properties, verify state
299
+ description = await tool.description()
300
+ assert description == "Loaded tool description"
301
+ assert tool._parameters_schema == {
302
+ "type": "object",
303
+ "properties": {"param": {"type": "string"}},
304
+ }
305
+ assert tool._tool == tool_def
306
+
307
+ @pytest.mark.asyncio
308
+ @patch("kiln_ai.tools.mcp_server_tool.MCPSessionManager")
309
+ async def test_load_tool_properties_no_description(self, mock_session_manager):
310
+ """Test _load_tool_properties() handles missing description."""
311
+ mock_session = AsyncMock()
312
+ mock_session_manager.shared.return_value.mcp_client.return_value.__aenter__.return_value = mock_session
313
+
314
+ # Mock tool without description
315
+ tool_def = Tool(name="test_tool", description=None, inputSchema={})
316
+ tools_result = ListToolsResult(tools=[tool_def])
317
+ mock_session.list_tools.return_value = tools_result
318
+
319
+ server = ExternalToolServer(
320
+ name="test_server",
321
+ type=ToolServerType.remote_mcp,
322
+ properties={
323
+ "server_url": "https://example.com",
324
+ "headers": {},
325
+ },
326
+ )
327
+ tool = MCPServerTool(server, "test_tool")
328
+
329
+ await tool._load_tool_properties()
330
+
331
+ assert await tool.description() == "N/A"
332
+
333
+ @pytest.mark.asyncio
334
+ @patch("kiln_ai.tools.mcp_server_tool.MCPSessionManager")
335
+ async def test_load_tool_properties_no_input_schema(self, mock_session_manager):
336
+ """Test _load_tool_properties() handles missing inputSchema."""
337
+ mock_session = AsyncMock()
338
+ mock_session_manager.shared.return_value.mcp_client.return_value.__aenter__.return_value = mock_session
339
+
340
+ # Mock tool without inputSchema - actually test with empty dict since None is not allowed
341
+ tool_def = Tool(name="test_tool", description="Test tool", inputSchema={})
342
+ tools_result = ListToolsResult(tools=[tool_def])
343
+ mock_session.list_tools.return_value = tools_result
344
+
345
+ server = ExternalToolServer(
346
+ name="test_server",
347
+ type=ToolServerType.remote_mcp,
348
+ properties={
349
+ "server_url": "https://example.com",
350
+ "headers": {},
351
+ },
352
+ )
353
+ tool = MCPServerTool(server, "test_tool")
354
+
355
+ await tool._load_tool_properties()
356
+
357
+ # Should be empty object for now, our JSON schema validation will fail if properties are missing
358
+ assert tool._parameters_schema == {"type": "object", "properties": {}}
359
+
360
+ @pytest.mark.asyncio
361
+ async def test_toolcall_definition(self):
362
+ """Test toolcall_definition() returns proper OpenAI format."""
363
+ server = ExternalToolServer(
364
+ name="test_server",
365
+ type=ToolServerType.remote_mcp,
366
+ properties={
367
+ "server_url": "https://example.com",
368
+ "headers": {},
369
+ },
370
+ )
371
+ tool = MCPServerTool(server, "test_tool")
372
+
373
+ # Update properties to test the definition
374
+ tool._description = "Test tool description"
375
+ tool._parameters_schema = {
376
+ "type": "object",
377
+ "properties": {
378
+ "param1": {"type": "string", "description": "First parameter"}
379
+ },
380
+ "required": ["param1"],
381
+ }
382
+ # Mark tool as loaded to avoid triggering _load_tool_properties()
383
+ from mcp.types import Tool as MCPTool
384
+
385
+ tool._tool = MCPTool(
386
+ name="test_tool", description="Test tool description", inputSchema={}
387
+ )
388
+
389
+ definition = await tool.toolcall_definition()
390
+
391
+ expected = {
392
+ "type": "function",
393
+ "function": {
394
+ "name": "test_tool",
395
+ "description": "Test tool description",
396
+ "parameters": {
397
+ "type": "object",
398
+ "properties": {
399
+ "param1": {"type": "string", "description": "First parameter"}
400
+ },
401
+ "required": ["param1"],
402
+ },
403
+ },
404
+ }
405
+
406
+ assert definition == expected
407
+
408
+
409
+ class TestMCPServerToolIntegration:
410
+ """Integration tests for MCPServerTool using real services."""
411
+
412
+ external_tool_server = ExternalToolServer(
413
+ name="postman_echo",
414
+ type=ToolServerType.remote_mcp,
415
+ description="Postman Echo MCP Server for testing",
416
+ properties={
417
+ "server_url": "https://postman-echo-mcp.fly.dev/",
418
+ "headers": {},
419
+ },
420
+ )
421
+
422
+ @pytest.mark.skip(
423
+ reason="Skipping integration test since it requires calling a real MCP server"
424
+ )
425
+ async def test_call_tool_success(self):
426
+ """Test successful call_tool execution."""
427
+ # Create MCP server using Postman Echo MCP server with 'echo' tool
428
+ tool = MCPServerTool(self.external_tool_server, "echo")
429
+
430
+ test_message = "Hello, world!"
431
+ result = await tool._call_tool(message=test_message)
432
+
433
+ # First block should be TextContent
434
+ assert len(result.content) > 0
435
+ text_content = result.content[0]
436
+ assert isinstance(text_content, TextContent)
437
+ assert (
438
+ text_content.text == "Tool echo: " + test_message
439
+ ) # 'Tool echo: Hello, world!'
440
+
441
+ @pytest.mark.skip(
442
+ reason="Skipping integration test since it requires calling a real MCP server"
443
+ )
444
+ def test_tool_run(self):
445
+ tool = MCPServerTool(self.external_tool_server, "echo")
446
+ test_message = "Hello, world!"
447
+
448
+ run_result = tool.run(message=test_message)
449
+ assert run_result == "Tool echo: " + test_message
450
+
451
+ @pytest.mark.skip(
452
+ reason="Skipping integration test since it requires calling a real MCP server"
453
+ )
454
+ async def test_get_tool(self):
455
+ tool = MCPServerTool(self.external_tool_server, "echo")
456
+ mcp_tool = await tool._get_tool("echo")
457
+ assert mcp_tool.name == "echo"