deepset-mcp 0.0.6__py3-none-any.whl → 0.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepset_mcp/__init__.py +3 -4
- deepset_mcp/api/__init__.py +3 -0
- deepset_mcp/api/client.py +126 -107
- deepset_mcp/api/custom_components/__init__.py +3 -0
- deepset_mcp/api/custom_components/models.py +7 -8
- deepset_mcp/api/custom_components/protocols.py +4 -3
- deepset_mcp/api/custom_components/resource.py +39 -13
- deepset_mcp/api/haystack_service/__init__.py +3 -0
- deepset_mcp/api/haystack_service/protocols.py +21 -0
- deepset_mcp/api/haystack_service/resource.py +46 -0
- deepset_mcp/api/indexes/__init__.py +3 -0
- deepset_mcp/api/indexes/models.py +23 -11
- deepset_mcp/api/indexes/protocols.py +13 -4
- deepset_mcp/api/indexes/resource.py +86 -22
- deepset_mcp/api/integrations/__init__.py +4 -0
- deepset_mcp/api/integrations/models.py +4 -13
- deepset_mcp/api/integrations/protocols.py +3 -3
- deepset_mcp/api/integrations/resource.py +5 -5
- deepset_mcp/api/pipeline/__init__.py +1 -15
- deepset_mcp/api/pipeline/models.py +66 -28
- deepset_mcp/api/pipeline/protocols.py +6 -10
- deepset_mcp/api/pipeline/resource.py +101 -58
- deepset_mcp/api/pipeline_template/__init__.py +3 -0
- deepset_mcp/api/pipeline_template/models.py +12 -23
- deepset_mcp/api/pipeline_template/protocols.py +11 -5
- deepset_mcp/api/pipeline_template/resource.py +51 -39
- deepset_mcp/api/protocols.py +13 -11
- deepset_mcp/api/secrets/__init__.py +3 -0
- deepset_mcp/api/secrets/models.py +2 -8
- deepset_mcp/api/secrets/protocols.py +4 -3
- deepset_mcp/api/secrets/resource.py +32 -7
- deepset_mcp/api/shared_models.py +111 -1
- deepset_mcp/api/transport.py +30 -58
- deepset_mcp/api/user/__init__.py +3 -0
- deepset_mcp/api/workspace/__init__.py +1 -3
- deepset_mcp/api/workspace/models.py +4 -8
- deepset_mcp/api/workspace/protocols.py +3 -3
- deepset_mcp/api/workspace/resource.py +5 -9
- deepset_mcp/config.py +1 -1
- deepset_mcp/main.py +5 -20
- deepset_mcp/mcp/__init__.py +10 -0
- deepset_mcp/{server.py → mcp/server.py} +8 -18
- deepset_mcp/{store.py → mcp/store.py} +3 -3
- deepset_mcp/{tool_factory.py → mcp/tool_factory.py} +20 -37
- deepset_mcp/mcp/tool_models.py +57 -0
- deepset_mcp/{tool_registry.py → mcp/tool_registry.py} +16 -6
- deepset_mcp/{tools/tokonomics → tokonomics}/__init__.py +3 -1
- deepset_mcp/{tools/tokonomics → tokonomics}/decorators.py +2 -2
- deepset_mcp/{tools/tokonomics → tokonomics}/explorer.py +1 -1
- deepset_mcp/tools/__init__.py +58 -0
- deepset_mcp/tools/custom_components.py +7 -4
- deepset_mcp/tools/haystack_service.py +64 -22
- deepset_mcp/tools/haystack_service_models.py +40 -0
- deepset_mcp/tools/indexes.py +131 -32
- deepset_mcp/tools/object_store.py +1 -1
- deepset_mcp/tools/pipeline.py +40 -10
- deepset_mcp/tools/pipeline_template.py +35 -18
- deepset_mcp/tools/secrets.py +29 -13
- deepset_mcp/tools/workspace.py +2 -2
- deepset_mcp-0.0.8.dist-info/METADATA +100 -0
- deepset_mcp-0.0.8.dist-info/RECORD +74 -0
- deepset_mcp/api/README.md +0 -536
- deepset_mcp/api/pipeline/log_level.py +0 -13
- deepset_mcp/tool_models.py +0 -42
- deepset_mcp-0.0.6.dist-info/METADATA +0 -807
- deepset_mcp-0.0.6.dist-info/RECORD +0 -75
- /deepset_mcp/{tools/tokonomics → tokonomics}/object_store.py +0 -0
- {deepset_mcp-0.0.6.dist-info → deepset_mcp-0.0.8.dist-info}/WHEEL +0 -0
- {deepset_mcp-0.0.6.dist-info → deepset_mcp-0.0.8.dist-info}/entry_points.txt +0 -0
- {deepset_mcp-0.0.6.dist-info → deepset_mcp-0.0.8.dist-info}/licenses/LICENSE +0 -0
deepset_mcp/tools/indexes.py
CHANGED
|
@@ -2,24 +2,53 @@
|
|
|
2
2
|
#
|
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
|
4
4
|
|
|
5
|
+
import yaml
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
|
|
5
8
|
from deepset_mcp.api.exceptions import BadRequestError, ResourceNotFoundError, UnexpectedAPIError
|
|
6
|
-
from deepset_mcp.api.indexes.models import Index
|
|
7
|
-
from deepset_mcp.api.pipeline import PipelineValidationResult
|
|
9
|
+
from deepset_mcp.api.indexes.models import Index
|
|
10
|
+
from deepset_mcp.api.pipeline.models import PipelineValidationResult
|
|
8
11
|
from deepset_mcp.api.protocols import AsyncClientProtocol
|
|
12
|
+
from deepset_mcp.api.shared_models import PaginatedResponse
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class IndexValidationResultWithYaml(BaseModel):
|
|
16
|
+
"""Model for index validation result that includes the original YAML."""
|
|
17
|
+
|
|
18
|
+
validation_result: PipelineValidationResult
|
|
19
|
+
"Result of validating the index configuration"
|
|
20
|
+
yaml_config: str
|
|
21
|
+
"Original YAML configuration that was validated"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class IndexOperationWithErrors(BaseModel):
|
|
25
|
+
"""Model for index operations that complete with validation errors."""
|
|
9
26
|
|
|
27
|
+
message: str
|
|
28
|
+
"Descriptive message about the index operation"
|
|
29
|
+
validation_result: PipelineValidationResult
|
|
30
|
+
"Validation errors encountered during the operation"
|
|
31
|
+
index: Index
|
|
32
|
+
"Index object after the operation completed"
|
|
10
33
|
|
|
11
|
-
async def list_indexes(*, client: AsyncClientProtocol, workspace: str) -> IndexList | str:
|
|
12
|
-
"""Use this to list available indexes on the deepset platform in your workspace.
|
|
13
34
|
|
|
14
|
-
|
|
15
|
-
:
|
|
35
|
+
async def list_indexes(
|
|
36
|
+
*, client: AsyncClientProtocol, workspace: str, after: str | None = None
|
|
37
|
+
) -> PaginatedResponse[Index] | str:
|
|
38
|
+
"""Retrieves a list of all indexes available within the currently configured deepset workspace.
|
|
39
|
+
|
|
40
|
+
:param client: The async client for API communication.
|
|
41
|
+
:param workspace: The workspace name.
|
|
42
|
+
:param after: The cursor to fetch the next page of results.
|
|
43
|
+
If there are more results to fetch, the cursor will appear as `next_cursor` on the response.
|
|
44
|
+
:returns: List of indexes or error message.
|
|
16
45
|
"""
|
|
17
46
|
try:
|
|
18
|
-
|
|
19
|
-
except ResourceNotFoundError
|
|
20
|
-
return f"
|
|
21
|
-
|
|
22
|
-
|
|
47
|
+
return await client.indexes(workspace=workspace).list(after=after)
|
|
48
|
+
except ResourceNotFoundError:
|
|
49
|
+
return f"There is no workspace named '{workspace}'. Did you mean to configure it?"
|
|
50
|
+
except (BadRequestError, UnexpectedAPIError) as e:
|
|
51
|
+
return f"Failed to list indexes: {e}"
|
|
23
52
|
|
|
24
53
|
|
|
25
54
|
async def get_index(*, client: AsyncClientProtocol, workspace: str, index_name: str) -> Index | str:
|
|
@@ -37,6 +66,33 @@ async def get_index(*, client: AsyncClientProtocol, workspace: str, index_name:
|
|
|
37
66
|
return response
|
|
38
67
|
|
|
39
68
|
|
|
69
|
+
async def validate_index(
|
|
70
|
+
*, client: AsyncClientProtocol, workspace: str, yaml_configuration: str
|
|
71
|
+
) -> IndexValidationResultWithYaml | str:
|
|
72
|
+
"""Validates the provided index YAML configuration against the deepset API.
|
|
73
|
+
|
|
74
|
+
:param client: The async client for API communication.
|
|
75
|
+
:param workspace: The workspace name.
|
|
76
|
+
:param yaml_configuration: The YAML configuration to validate.
|
|
77
|
+
:returns: Validation result with original YAML or error message.
|
|
78
|
+
"""
|
|
79
|
+
if not yaml_configuration or not yaml_configuration.strip():
|
|
80
|
+
return "You need to provide a YAML configuration to validate."
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
yaml.safe_load(yaml_configuration)
|
|
84
|
+
except yaml.YAMLError as e:
|
|
85
|
+
return f"Invalid YAML provided: {e}"
|
|
86
|
+
|
|
87
|
+
try:
|
|
88
|
+
response = await client.indexes(workspace=workspace).validate(yaml_configuration)
|
|
89
|
+
return IndexValidationResultWithYaml(validation_result=response, yaml_config=yaml_configuration)
|
|
90
|
+
except ResourceNotFoundError:
|
|
91
|
+
return f"There is no workspace named '{workspace}'. Did you mean to configure it?"
|
|
92
|
+
except (BadRequestError, UnexpectedAPIError) as e:
|
|
93
|
+
return f"Failed to validate index: {e}"
|
|
94
|
+
|
|
95
|
+
|
|
40
96
|
async def create_index(
|
|
41
97
|
*,
|
|
42
98
|
client: AsyncClientProtocol,
|
|
@@ -55,7 +111,7 @@ async def create_index(
|
|
|
55
111
|
"""
|
|
56
112
|
try:
|
|
57
113
|
result = await client.indexes(workspace=workspace).create(
|
|
58
|
-
|
|
114
|
+
index_name=index_name, yaml_config=yaml_configuration, description=description
|
|
59
115
|
)
|
|
60
116
|
except ResourceNotFoundError:
|
|
61
117
|
return f"There is no workspace named '{workspace}'. Did you mean to configure it?"
|
|
@@ -72,35 +128,78 @@ async def update_index(
|
|
|
72
128
|
client: AsyncClientProtocol,
|
|
73
129
|
workspace: str,
|
|
74
130
|
index_name: str,
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
131
|
+
original_config_snippet: str,
|
|
132
|
+
replacement_config_snippet: str,
|
|
133
|
+
skip_validation_errors: bool = True,
|
|
134
|
+
) -> Index | IndexOperationWithErrors | str:
|
|
135
|
+
"""
|
|
136
|
+
Updates an index configuration in the specified workspace with a replacement configuration snippet.
|
|
79
137
|
|
|
80
|
-
This function
|
|
81
|
-
|
|
138
|
+
This function validates the replacement configuration snippet before applying it to the index.
|
|
139
|
+
If the validation fails and skip_validation_errors is False, it returns error messages.
|
|
140
|
+
Otherwise, the replacement snippet is used to update the index's configuration.
|
|
82
141
|
|
|
83
|
-
:param client:
|
|
84
|
-
:param workspace:
|
|
85
|
-
:param index_name:
|
|
86
|
-
:param
|
|
87
|
-
:param
|
|
142
|
+
:param client: The async client for API communication.
|
|
143
|
+
:param workspace: The workspace name.
|
|
144
|
+
:param index_name: Name of the index to update.
|
|
145
|
+
:param original_config_snippet: The configuration snippet to replace.
|
|
146
|
+
:param replacement_config_snippet: The new configuration snippet.
|
|
147
|
+
:param skip_validation_errors: If True (default), updates the index even if validation fails.
|
|
148
|
+
If False, stops update when validation fails.
|
|
149
|
+
:returns: Updated index or error message.
|
|
88
150
|
"""
|
|
89
|
-
if not updated_index_name and not yaml_configuration:
|
|
90
|
-
return "You must provide either a new name or a new configuration to update the index."
|
|
91
|
-
|
|
92
151
|
try:
|
|
93
|
-
|
|
94
|
-
|
|
152
|
+
original_index = await client.indexes(workspace=workspace).get(index_name=index_name)
|
|
153
|
+
except ResourceNotFoundError:
|
|
154
|
+
return f"There is no index named '{index_name}'. Did you mean to create it?"
|
|
155
|
+
except (BadRequestError, UnexpectedAPIError) as e:
|
|
156
|
+
return f"Failed to fetch index '{index_name}': {e}"
|
|
157
|
+
|
|
158
|
+
if original_index.yaml_config is None:
|
|
159
|
+
return f"The index '{index_name}' does not have a YAML configuration."
|
|
160
|
+
|
|
161
|
+
occurrences = original_index.yaml_config.count(original_config_snippet)
|
|
162
|
+
|
|
163
|
+
if occurrences == 0:
|
|
164
|
+
return f"No occurrences of the provided configuration snippet were found in the index '{index_name}'."
|
|
165
|
+
|
|
166
|
+
if occurrences > 1:
|
|
167
|
+
return (
|
|
168
|
+
f"Multiple occurrences ({occurrences}) of the provided configuration snippet were found in the index "
|
|
169
|
+
f"'{index_name}'. Specify a more precise snippet to proceed with the update."
|
|
95
170
|
)
|
|
171
|
+
|
|
172
|
+
updated_yaml_configuration = original_index.yaml_config.replace(
|
|
173
|
+
original_config_snippet, replacement_config_snippet, 1
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
try:
|
|
177
|
+
validation_response = await client.indexes(workspace=workspace).validate(updated_yaml_configuration)
|
|
178
|
+
|
|
179
|
+
if not validation_response.valid and not skip_validation_errors:
|
|
180
|
+
error_messages = [f"{error.code}: {error.message}" for error in validation_response.errors]
|
|
181
|
+
return "Index validation failed:\n" + "\n".join(error_messages)
|
|
182
|
+
|
|
183
|
+
await client.indexes(workspace=workspace).update(index_name=index_name, yaml_config=updated_yaml_configuration)
|
|
184
|
+
|
|
185
|
+
# Get the full index after update
|
|
186
|
+
index = await client.indexes(workspace=workspace).get(index_name)
|
|
187
|
+
|
|
188
|
+
# If validation failed but we proceeded anyway, return the special model
|
|
189
|
+
if not validation_response.valid:
|
|
190
|
+
return IndexOperationWithErrors(
|
|
191
|
+
message="The operation completed with errors", validation_result=validation_response, index=index
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
# Otherwise return just the index
|
|
195
|
+
return index
|
|
196
|
+
|
|
96
197
|
except ResourceNotFoundError:
|
|
97
198
|
return f"There is no index named '{index_name}'. Did you mean to create it?"
|
|
98
199
|
except BadRequestError as e:
|
|
99
|
-
return f"Failed to update index '{index_name}': {e}"
|
|
200
|
+
return f"Failed to update the index '{index_name}': {e}"
|
|
100
201
|
except UnexpectedAPIError as e:
|
|
101
|
-
return f"Failed to update index '{index_name}': {e}"
|
|
102
|
-
|
|
103
|
-
return {"message": f"Index '{index_name}' updated successfully.", "index": result}
|
|
202
|
+
return f"Failed to update the index '{index_name}': {e}"
|
|
104
203
|
|
|
105
204
|
|
|
106
205
|
async def deploy_index(
|
deepset_mcp/tools/pipeline.py
CHANGED
|
@@ -5,30 +5,33 @@
|
|
|
5
5
|
import asyncio
|
|
6
6
|
|
|
7
7
|
import yaml
|
|
8
|
+
from pydantic import BaseModel
|
|
8
9
|
|
|
9
10
|
from deepset_mcp.api.exceptions import BadRequestError, ResourceNotFoundError, UnexpectedAPIError
|
|
10
|
-
from deepset_mcp.api.pipeline.log_level import LogLevel
|
|
11
11
|
from deepset_mcp.api.pipeline.models import (
|
|
12
12
|
DeepsetPipeline,
|
|
13
13
|
DeepsetSearchResponse,
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
PipelineOperationWithErrors,
|
|
14
|
+
LogLevel,
|
|
15
|
+
PipelineLog,
|
|
17
16
|
PipelineValidationResult,
|
|
18
|
-
PipelineValidationResultWithYaml,
|
|
19
17
|
)
|
|
20
18
|
from deepset_mcp.api.protocols import AsyncClientProtocol
|
|
19
|
+
from deepset_mcp.api.shared_models import PaginatedResponse
|
|
21
20
|
|
|
22
21
|
|
|
23
|
-
async def list_pipelines(
|
|
22
|
+
async def list_pipelines(
|
|
23
|
+
*, client: AsyncClientProtocol, workspace: str, after: str | None = None
|
|
24
|
+
) -> PaginatedResponse[DeepsetPipeline] | str:
|
|
24
25
|
"""Retrieves a list of all pipeline available within the currently configured deepset workspace.
|
|
25
26
|
|
|
26
27
|
:param client: The async client for API communication.
|
|
27
28
|
:param workspace: The workspace name.
|
|
29
|
+
:param after: The cursor to fetch the next page of results.
|
|
30
|
+
If there are more results to fetch, the cursor will appear as `next_cursor` on the response.
|
|
28
31
|
:returns: List of pipelines or error message.
|
|
29
32
|
"""
|
|
30
33
|
try:
|
|
31
|
-
return await client.pipelines(workspace=workspace).list()
|
|
34
|
+
return await client.pipelines(workspace=workspace).list(after=after)
|
|
32
35
|
except ResourceNotFoundError:
|
|
33
36
|
return f"There is no workspace named '{workspace}'. Did you mean to configure it?"
|
|
34
37
|
except (BadRequestError, UnexpectedAPIError) as e:
|
|
@@ -51,6 +54,15 @@ async def get_pipeline(*, client: AsyncClientProtocol, workspace: str, pipeline_
|
|
|
51
54
|
return f"Failed to fetch pipeline '{pipeline_name}': {e}"
|
|
52
55
|
|
|
53
56
|
|
|
57
|
+
class PipelineValidationResultWithYaml(BaseModel):
|
|
58
|
+
"""Model for pipeline validation result that includes the original YAML."""
|
|
59
|
+
|
|
60
|
+
validation_result: PipelineValidationResult
|
|
61
|
+
"Result of validating the pipeline configuration"
|
|
62
|
+
yaml_config: str
|
|
63
|
+
"Original YAML configuration that was validated"
|
|
64
|
+
|
|
65
|
+
|
|
54
66
|
async def validate_pipeline(
|
|
55
67
|
*, client: AsyncClientProtocol, workspace: str, yaml_configuration: str
|
|
56
68
|
) -> PipelineValidationResultWithYaml | str:
|
|
@@ -78,6 +90,17 @@ async def validate_pipeline(
|
|
|
78
90
|
return f"Failed to validate pipeline: {e}"
|
|
79
91
|
|
|
80
92
|
|
|
93
|
+
class PipelineOperationWithErrors(BaseModel):
|
|
94
|
+
"""Model for pipeline operations that complete with validation errors."""
|
|
95
|
+
|
|
96
|
+
message: str
|
|
97
|
+
"Descriptive message about the pipeline operation"
|
|
98
|
+
validation_result: PipelineValidationResult
|
|
99
|
+
"Validation errors encountered during the operation"
|
|
100
|
+
pipeline: DeepsetPipeline
|
|
101
|
+
"Pipeline object after the operation completed"
|
|
102
|
+
|
|
103
|
+
|
|
81
104
|
async def create_pipeline(
|
|
82
105
|
*,
|
|
83
106
|
client: AsyncClientProtocol,
|
|
@@ -207,8 +230,14 @@ async def update_pipeline(
|
|
|
207
230
|
|
|
208
231
|
|
|
209
232
|
async def get_pipeline_logs(
|
|
210
|
-
*,
|
|
211
|
-
|
|
233
|
+
*,
|
|
234
|
+
client: AsyncClientProtocol,
|
|
235
|
+
workspace: str,
|
|
236
|
+
pipeline_name: str,
|
|
237
|
+
limit: int = 30,
|
|
238
|
+
level: LogLevel | None = None,
|
|
239
|
+
after: str | None = None,
|
|
240
|
+
) -> PaginatedResponse[PipelineLog] | str:
|
|
212
241
|
"""Fetches logs for a specific pipeline.
|
|
213
242
|
|
|
214
243
|
Retrieves log entries for the specified pipeline, with optional filtering by log level.
|
|
@@ -219,12 +248,13 @@ async def get_pipeline_logs(
|
|
|
219
248
|
:param pipeline_name: Name of the pipeline to fetch logs for.
|
|
220
249
|
:param limit: Maximum number of log entries to return (default: 30).
|
|
221
250
|
:param level: Filter logs by level. If None, returns all levels.
|
|
251
|
+
:param after: The cursor to fetch the next page of results.
|
|
222
252
|
|
|
223
253
|
:returns: Pipeline logs or error message.
|
|
224
254
|
"""
|
|
225
255
|
try:
|
|
226
256
|
return await client.pipelines(workspace=workspace).get_logs(
|
|
227
|
-
pipeline_name=pipeline_name, limit=limit, level=level
|
|
257
|
+
pipeline_name=pipeline_name, limit=limit, level=level, after=after
|
|
228
258
|
)
|
|
229
259
|
except ResourceNotFoundError:
|
|
230
260
|
return f"There is no pipeline named '{pipeline_name}' in workspace '{workspace}'."
|
|
@@ -3,16 +3,15 @@
|
|
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
|
4
4
|
|
|
5
5
|
import numpy as np
|
|
6
|
+
from pydantic import BaseModel
|
|
6
7
|
|
|
7
8
|
from deepset_mcp.api.exceptions import ResourceNotFoundError, UnexpectedAPIError
|
|
8
9
|
from deepset_mcp.api.pipeline_template.models import (
|
|
9
10
|
PipelineTemplate,
|
|
10
|
-
PipelineTemplateList,
|
|
11
|
-
PipelineTemplateSearchResult,
|
|
12
|
-
PipelineTemplateSearchResults,
|
|
13
11
|
PipelineType,
|
|
14
12
|
)
|
|
15
13
|
from deepset_mcp.api.protocols import AsyncClientProtocol
|
|
14
|
+
from deepset_mcp.api.shared_models import PaginatedResponse
|
|
16
15
|
from deepset_mcp.tools.model_protocol import ModelProtocol
|
|
17
16
|
|
|
18
17
|
|
|
@@ -21,27 +20,22 @@ async def list_templates(
|
|
|
21
20
|
client: AsyncClientProtocol,
|
|
22
21
|
workspace: str,
|
|
23
22
|
limit: int = 100,
|
|
24
|
-
field: str = "created_at",
|
|
25
|
-
order: str = "DESC",
|
|
26
23
|
pipeline_type: PipelineType | str | None = None,
|
|
27
|
-
|
|
24
|
+
# after: str | None = None TODO
|
|
25
|
+
) -> PaginatedResponse[PipelineTemplate] | str:
|
|
28
26
|
"""Retrieves a list of all available pipeline and indexing templates.
|
|
29
27
|
|
|
30
28
|
:param client: The async client for API requests.
|
|
31
29
|
:param workspace: The workspace to list templates from.
|
|
32
30
|
:param limit: Maximum number of templates to return (default: 100).
|
|
33
|
-
:param field: Field to sort by (default: "created_at").
|
|
34
|
-
:param order: Sort order, either "ASC" or "DESC" (default: "DESC").
|
|
35
31
|
:param pipeline_type: The type of pipeline to return.
|
|
36
32
|
|
|
37
33
|
:returns: List of pipeline templates or error message.
|
|
38
34
|
"""
|
|
39
35
|
try:
|
|
40
|
-
return await client.pipeline_templates(workspace=workspace).
|
|
36
|
+
return await client.pipeline_templates(workspace=workspace).list(
|
|
41
37
|
limit=limit,
|
|
42
|
-
|
|
43
|
-
order=order,
|
|
44
|
-
filter=f"pipeline_type eq '{pipeline_type}'" if pipeline_type else None,
|
|
38
|
+
filter=f"pipeline_type eq '{pipeline_type}'" if pipeline_type else None, # TODO: after=after
|
|
45
39
|
)
|
|
46
40
|
except ResourceNotFoundError:
|
|
47
41
|
return f"There is no workspace named '{workspace}'. Did you mean to configure it?"
|
|
@@ -66,6 +60,26 @@ async def get_template(*, client: AsyncClientProtocol, workspace: str, template_
|
|
|
66
60
|
return f"Failed to fetch pipeline template '{template_name}': {e}"
|
|
67
61
|
|
|
68
62
|
|
|
63
|
+
class PipelineTemplateSearchResult(BaseModel):
|
|
64
|
+
"""Model representing a search result for pipeline templates."""
|
|
65
|
+
|
|
66
|
+
template: PipelineTemplate
|
|
67
|
+
"Pipeline template that matched the search criteria"
|
|
68
|
+
similarity_score: float
|
|
69
|
+
"Relevance score indicating how well the template matches the search"
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class PipelineTemplateSearchResults(BaseModel):
|
|
73
|
+
"""Response model for pipeline template search results."""
|
|
74
|
+
|
|
75
|
+
results: list[PipelineTemplateSearchResult]
|
|
76
|
+
"List of pipeline templates matching the search criteria"
|
|
77
|
+
query: str
|
|
78
|
+
"Original search query string"
|
|
79
|
+
total_found: int
|
|
80
|
+
"Total number of templates found matching the search criteria"
|
|
81
|
+
|
|
82
|
+
|
|
69
83
|
async def search_templates(
|
|
70
84
|
*,
|
|
71
85
|
client: AsyncClientProtocol,
|
|
@@ -87,18 +101,21 @@ async def search_templates(
|
|
|
87
101
|
:returns: Search results with similarity scores or error message.
|
|
88
102
|
"""
|
|
89
103
|
try:
|
|
90
|
-
response = await client.pipeline_templates(workspace=workspace).
|
|
91
|
-
|
|
92
|
-
|
|
104
|
+
response = await client.pipeline_templates(workspace=workspace).list()
|
|
105
|
+
templates = response.data
|
|
106
|
+
|
|
107
|
+
# Filter by pipeline_type if specified
|
|
108
|
+
if pipeline_type:
|
|
109
|
+
templates = [t for t in templates if t.pipeline_type == pipeline_type]
|
|
93
110
|
except UnexpectedAPIError as e:
|
|
94
111
|
return f"Failed to retrieve pipeline templates: {e}"
|
|
95
112
|
|
|
96
|
-
if not
|
|
113
|
+
if not templates:
|
|
97
114
|
return PipelineTemplateSearchResults(results=[], query=query, total_found=0)
|
|
98
115
|
|
|
99
116
|
# Extract text for embedding from all templates
|
|
100
117
|
template_texts: list[tuple[str, str]] = [
|
|
101
|
-
(template.template_name, f"{template.template_name} {template.description}") for template in
|
|
118
|
+
(template.template_name, f"{template.template_name} {template.description}") for template in templates
|
|
102
119
|
]
|
|
103
120
|
template_names: list[str] = [t[0] for t in template_texts]
|
|
104
121
|
|
|
@@ -122,7 +139,7 @@ async def search_templates(
|
|
|
122
139
|
search_results = []
|
|
123
140
|
for template_name, sim in top_templates:
|
|
124
141
|
# Find the template object by name
|
|
125
|
-
template = next((t for t in
|
|
142
|
+
template = next((t for t in templates if t.template_name == template_name), None)
|
|
126
143
|
if template:
|
|
127
144
|
search_results.append(PipelineTemplateSearchResult(template=template, similarity_score=float(sim)))
|
|
128
145
|
|
deepset_mcp/tools/secrets.py
CHANGED
|
@@ -13,19 +13,29 @@ class EnvironmentSecret(BaseModel):
|
|
|
13
13
|
"""Model representing a secret or an integration."""
|
|
14
14
|
|
|
15
15
|
name: str
|
|
16
|
+
"Human-readable name of the secret or integration"
|
|
16
17
|
id: str
|
|
18
|
+
"Unique identifier for the secret or integration"
|
|
17
19
|
invalid: bool | None = None
|
|
20
|
+
"Whether the secret or integration is invalid (None for secrets)"
|
|
18
21
|
|
|
19
22
|
|
|
20
23
|
class EnvironmentSecretList(BaseModel):
|
|
21
24
|
"""Model representing a list of secrets and integrations."""
|
|
22
25
|
|
|
23
26
|
data: list[EnvironmentSecret]
|
|
27
|
+
"List of secrets and integrations for the current page"
|
|
24
28
|
has_more: bool
|
|
29
|
+
"Whether there are more items available beyond this page"
|
|
25
30
|
total: int
|
|
31
|
+
"Total number of secrets and integrations"
|
|
32
|
+
next_cursor: str | None = None
|
|
33
|
+
"Cursor for fetching the next page of results"
|
|
26
34
|
|
|
27
35
|
|
|
28
|
-
async def list_secrets(
|
|
36
|
+
async def list_secrets(
|
|
37
|
+
*, client: AsyncClientProtocol, limit: int = 10, after: str | None = None
|
|
38
|
+
) -> EnvironmentSecretList | str:
|
|
29
39
|
"""Lists all secrets available in the user's deepset organization.
|
|
30
40
|
|
|
31
41
|
Use this tool to retrieve a list of secrets with their names and IDs.
|
|
@@ -33,29 +43,35 @@ async def list_secrets(*, client: AsyncClientProtocol, limit: int = 10) -> Envir
|
|
|
33
43
|
|
|
34
44
|
:param client: The deepset API client
|
|
35
45
|
:param limit: Maximum number of secrets to return (default: 10)
|
|
46
|
+
:param after: The cursor to fetch the next page of results
|
|
36
47
|
|
|
37
48
|
:returns: List of secrets or error message
|
|
38
49
|
"""
|
|
39
50
|
try:
|
|
40
|
-
secrets_list = await client.secrets().list(limit=limit)
|
|
41
|
-
integrations_list = await client.integrations().list()
|
|
51
|
+
secrets_list = await client.secrets().list(limit=limit, after=after)
|
|
42
52
|
|
|
43
53
|
env_secrets = [EnvironmentSecret(name=secret.name, id=secret.secret_id) for secret in secrets_list.data]
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
54
|
+
|
|
55
|
+
# Only fetch integrations if no cursor is provided (first page)
|
|
56
|
+
# This optimizes performance by skipping the integrations call for subsequent pages
|
|
57
|
+
if after is None:
|
|
58
|
+
integrations_list = await client.integrations().list()
|
|
59
|
+
for integration in integrations_list:
|
|
60
|
+
env_vars = TOKEN_DOMAIN_MAPPING.get(integration.provider_domain, [])
|
|
61
|
+
for env_var in env_vars:
|
|
62
|
+
env_secrets.append(
|
|
63
|
+
EnvironmentSecret(
|
|
64
|
+
name=env_var,
|
|
65
|
+
id=str(integration.model_registry_token_id),
|
|
66
|
+
invalid=integration.invalid,
|
|
67
|
+
)
|
|
52
68
|
)
|
|
53
|
-
)
|
|
54
69
|
|
|
55
70
|
return EnvironmentSecretList(
|
|
56
71
|
data=env_secrets,
|
|
57
72
|
has_more=secrets_list.has_more,
|
|
58
73
|
total=len(env_secrets),
|
|
74
|
+
next_cursor=secrets_list.next_cursor,
|
|
59
75
|
)
|
|
60
76
|
except ResourceNotFoundError as e:
|
|
61
77
|
return f"Error: {str(e)}"
|
|
@@ -82,7 +98,7 @@ async def get_secret(*, client: AsyncClientProtocol, secret_id: str) -> Environm
|
|
|
82
98
|
except ResourceNotFoundError:
|
|
83
99
|
try:
|
|
84
100
|
integrations_list = await client.integrations().list()
|
|
85
|
-
for integration in integrations_list
|
|
101
|
+
for integration in integrations_list:
|
|
86
102
|
if str(integration.model_registry_token_id) == secret_id:
|
|
87
103
|
env_vars = TOKEN_DOMAIN_MAPPING.get(integration.provider_domain, [])
|
|
88
104
|
if env_vars:
|
deepset_mcp/tools/workspace.py
CHANGED
|
@@ -7,10 +7,10 @@
|
|
|
7
7
|
from deepset_mcp.api.exceptions import BadRequestError, ResourceNotFoundError, UnexpectedAPIError
|
|
8
8
|
from deepset_mcp.api.protocols import AsyncClientProtocol
|
|
9
9
|
from deepset_mcp.api.shared_models import NoContentResponse
|
|
10
|
-
from deepset_mcp.api.workspace.models import Workspace
|
|
10
|
+
from deepset_mcp.api.workspace.models import Workspace
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
async def list_workspaces(*, client: AsyncClientProtocol) ->
|
|
13
|
+
async def list_workspaces(*, client: AsyncClientProtocol) -> list[Workspace] | str:
|
|
14
14
|
"""Retrieves a list of all workspaces available to the user.
|
|
15
15
|
|
|
16
16
|
This tool provides an overview of all workspaces that the user has access to.
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: deepset-mcp
|
|
3
|
+
Version: 0.0.8
|
|
4
|
+
Summary: Collection of MCP tools and Agents to work with the deepset AI platform. Create, debug or learn about pipelines on the platform. Useable from the CLI, Cursor, Claude Code, or other MCP clients.
|
|
5
|
+
Project-URL: Homepage, https://deepset.ai
|
|
6
|
+
Author-email: Mathis Lucka <mathis.lucka@deepset.ai>, Tanay Soni <tanay.soni@deepset.ai>
|
|
7
|
+
License-Expression: Apache-2.0
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Keywords: Agents,Haystack,LLM,MCP,deepset,pipelines
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: Freely Distributable
|
|
13
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
21
|
+
Requires-Python: >=3.11
|
|
22
|
+
Requires-Dist: fastapi
|
|
23
|
+
Requires-Dist: glom
|
|
24
|
+
Requires-Dist: httpx
|
|
25
|
+
Requires-Dist: mcp>=1.10.1
|
|
26
|
+
Requires-Dist: model2vec
|
|
27
|
+
Requires-Dist: numpy
|
|
28
|
+
Requires-Dist: orjson
|
|
29
|
+
Requires-Dist: pydantic>=2.0.0
|
|
30
|
+
Requires-Dist: pyjwt[crypto]
|
|
31
|
+
Requires-Dist: pyyaml
|
|
32
|
+
Requires-Dist: rich
|
|
33
|
+
Requires-Dist: typer
|
|
34
|
+
Provides-Extra: redis
|
|
35
|
+
Requires-Dist: redis>=4.0.0; extra == 'redis'
|
|
36
|
+
Description-Content-Type: text/markdown
|
|
37
|
+
|
|
38
|
+
# deepset-mcp
|
|
39
|
+
|
|
40
|
+
**The official MCP server and Python SDK for the deepset AI platform**
|
|
41
|
+
|
|
42
|
+
deepset-mcp enables AI agents to build and debug pipelines on the [deepset AI platform](https://www.deepset.ai/products-and-services/deepset-ai-platform) through 30+ specialized tools. It also provides a Python SDK for programmatic access to many platform resources.
|
|
43
|
+
|
|
44
|
+
## Documentation
|
|
45
|
+
|
|
46
|
+
📖 **[View the full documentation](https://deepset-ai.github.io/deepset-mcp-server/)**
|
|
47
|
+
|
|
48
|
+
## Quick Links
|
|
49
|
+
|
|
50
|
+
- 🔗 **[deepset AI Platform](https://www.deepset.ai/products-and-services/deepset-ai-platform)**
|
|
51
|
+
- 📚 **[Installation Guide](https://deepset-ai.github.io/deepset-mcp-server/installation/)**
|
|
52
|
+
- 🛠️ **[MCP Server Guide](https://deepset-ai.github.io/deepset-mcp-server/guides/mcp_server/)**
|
|
53
|
+
- 🐍 **[Python SDK Guide](https://deepset-ai.github.io/deepset-mcp-server/guides/api_sdk/)**
|
|
54
|
+
|
|
55
|
+
## Development
|
|
56
|
+
|
|
57
|
+
### Installation
|
|
58
|
+
|
|
59
|
+
Install the project using [uv](https://docs.astral.sh/uv/):
|
|
60
|
+
|
|
61
|
+
```bash
|
|
62
|
+
# Install uv first
|
|
63
|
+
pipx install uv
|
|
64
|
+
|
|
65
|
+
# Install project with all dependencies
|
|
66
|
+
uv sync --locked --all-extras --all-groups
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### Code Quality & Testing
|
|
70
|
+
|
|
71
|
+
Run code quality checks and tests using the Makefile:
|
|
72
|
+
|
|
73
|
+
```bash
|
|
74
|
+
# Install dependencies
|
|
75
|
+
make install
|
|
76
|
+
|
|
77
|
+
# Code quality
|
|
78
|
+
make lint # Run ruff linting
|
|
79
|
+
make format # Format code with ruff
|
|
80
|
+
make types # Run mypy type checking
|
|
81
|
+
|
|
82
|
+
# Testing
|
|
83
|
+
make test # Run unit tests (default)
|
|
84
|
+
make test-unit # Run unit tests only
|
|
85
|
+
make test-integration # Run integration tests
|
|
86
|
+
make test-all # Run all tests
|
|
87
|
+
|
|
88
|
+
# Clean up
|
|
89
|
+
make clean # Remove cache files
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
### Documentation
|
|
93
|
+
|
|
94
|
+
Documentation is built using [MkDocs](https://www.mkdocs.org/) with the Material theme:
|
|
95
|
+
|
|
96
|
+
- Configuration: `mkdocs.yml`
|
|
97
|
+
- Content: `docs/` directory
|
|
98
|
+
- Auto-generated API docs via [mkdocstrings](https://mkdocstrings.github.io/)
|
|
99
|
+
- Deployed via GitHub Pages (automated via GitHub Actions on push to main branch)
|
|
100
|
+
|