deepset-mcp 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. deepset_mcp/__init__.py +0 -0
  2. deepset_mcp/agents/__init__.py +0 -0
  3. deepset_mcp/agents/debugging/__init__.py +0 -0
  4. deepset_mcp/agents/debugging/debugging_agent.py +37 -0
  5. deepset_mcp/agents/debugging/system_prompt.md +214 -0
  6. deepset_mcp/agents/generalist/__init__.py +0 -0
  7. deepset_mcp/agents/generalist/generalist_agent.py +38 -0
  8. deepset_mcp/agents/generalist/system_prompt.md +241 -0
  9. deepset_mcp/api/README.md +536 -0
  10. deepset_mcp/api/__init__.py +0 -0
  11. deepset_mcp/api/client.py +277 -0
  12. deepset_mcp/api/custom_components/__init__.py +0 -0
  13. deepset_mcp/api/custom_components/models.py +25 -0
  14. deepset_mcp/api/custom_components/protocols.py +17 -0
  15. deepset_mcp/api/custom_components/resource.py +56 -0
  16. deepset_mcp/api/exceptions.py +70 -0
  17. deepset_mcp/api/haystack_service/__init__.py +0 -0
  18. deepset_mcp/api/haystack_service/protocols.py +13 -0
  19. deepset_mcp/api/haystack_service/resource.py +55 -0
  20. deepset_mcp/api/indexes/__init__.py +0 -0
  21. deepset_mcp/api/indexes/models.py +63 -0
  22. deepset_mcp/api/indexes/protocols.py +53 -0
  23. deepset_mcp/api/indexes/resource.py +138 -0
  24. deepset_mcp/api/integrations/__init__.py +1 -0
  25. deepset_mcp/api/integrations/models.py +49 -0
  26. deepset_mcp/api/integrations/protocols.py +27 -0
  27. deepset_mcp/api/integrations/resource.py +57 -0
  28. deepset_mcp/api/pipeline/__init__.py +17 -0
  29. deepset_mcp/api/pipeline/log_level.py +9 -0
  30. deepset_mcp/api/pipeline/models.py +235 -0
  31. deepset_mcp/api/pipeline/protocols.py +83 -0
  32. deepset_mcp/api/pipeline/resource.py +378 -0
  33. deepset_mcp/api/pipeline_template/__init__.py +0 -0
  34. deepset_mcp/api/pipeline_template/models.py +56 -0
  35. deepset_mcp/api/pipeline_template/protocols.py +17 -0
  36. deepset_mcp/api/pipeline_template/resource.py +88 -0
  37. deepset_mcp/api/protocols.py +122 -0
  38. deepset_mcp/api/secrets/__init__.py +0 -0
  39. deepset_mcp/api/secrets/models.py +16 -0
  40. deepset_mcp/api/secrets/protocols.py +29 -0
  41. deepset_mcp/api/secrets/resource.py +112 -0
  42. deepset_mcp/api/shared_models.py +17 -0
  43. deepset_mcp/api/transport.py +336 -0
  44. deepset_mcp/api/user/__init__.py +0 -0
  45. deepset_mcp/api/user/protocols.py +11 -0
  46. deepset_mcp/api/user/resource.py +38 -0
  47. deepset_mcp/api/workspace/__init__.py +7 -0
  48. deepset_mcp/api/workspace/models.py +23 -0
  49. deepset_mcp/api/workspace/protocols.py +41 -0
  50. deepset_mcp/api/workspace/resource.py +94 -0
  51. deepset_mcp/benchmark/README.md +425 -0
  52. deepset_mcp/benchmark/__init__.py +1 -0
  53. deepset_mcp/benchmark/agent_configs/debugging_agent.yml +10 -0
  54. deepset_mcp/benchmark/agent_configs/generalist_agent.yml +6 -0
  55. deepset_mcp/benchmark/dp_validation_error_analysis/__init__.py +0 -0
  56. deepset_mcp/benchmark/dp_validation_error_analysis/eda.ipynb +757 -0
  57. deepset_mcp/benchmark/dp_validation_error_analysis/prepare_interaction_data.ipynb +167 -0
  58. deepset_mcp/benchmark/dp_validation_error_analysis/preprocessing_utils.py +213 -0
  59. deepset_mcp/benchmark/runner/__init__.py +0 -0
  60. deepset_mcp/benchmark/runner/agent_benchmark_runner.py +561 -0
  61. deepset_mcp/benchmark/runner/agent_loader.py +110 -0
  62. deepset_mcp/benchmark/runner/cli.py +39 -0
  63. deepset_mcp/benchmark/runner/cli_agent.py +373 -0
  64. deepset_mcp/benchmark/runner/cli_index.py +71 -0
  65. deepset_mcp/benchmark/runner/cli_pipeline.py +73 -0
  66. deepset_mcp/benchmark/runner/cli_tests.py +226 -0
  67. deepset_mcp/benchmark/runner/cli_utils.py +61 -0
  68. deepset_mcp/benchmark/runner/config.py +73 -0
  69. deepset_mcp/benchmark/runner/config_loader.py +64 -0
  70. deepset_mcp/benchmark/runner/interactive.py +140 -0
  71. deepset_mcp/benchmark/runner/models.py +203 -0
  72. deepset_mcp/benchmark/runner/repl.py +67 -0
  73. deepset_mcp/benchmark/runner/setup_actions.py +238 -0
  74. deepset_mcp/benchmark/runner/streaming.py +360 -0
  75. deepset_mcp/benchmark/runner/teardown_actions.py +196 -0
  76. deepset_mcp/benchmark/runner/tracing.py +21 -0
  77. deepset_mcp/benchmark/tasks/chat_rag_answers_wrong_format.yml +16 -0
  78. deepset_mcp/benchmark/tasks/documents_output_wrong.yml +13 -0
  79. deepset_mcp/benchmark/tasks/jinja_str_instead_of_complex_type.yml +11 -0
  80. deepset_mcp/benchmark/tasks/jinja_syntax_error.yml +11 -0
  81. deepset_mcp/benchmark/tasks/missing_output_mapping.yml +14 -0
  82. deepset_mcp/benchmark/tasks/no_query_input.yml +13 -0
  83. deepset_mcp/benchmark/tasks/pipelines/chat_agent_jinja_str.yml +141 -0
  84. deepset_mcp/benchmark/tasks/pipelines/chat_agent_jinja_syntax.yml +141 -0
  85. deepset_mcp/benchmark/tasks/pipelines/chat_rag_answers_wrong_format.yml +181 -0
  86. deepset_mcp/benchmark/tasks/pipelines/chat_rag_missing_output_mapping.yml +189 -0
  87. deepset_mcp/benchmark/tasks/pipelines/rag_documents_wrong_format.yml +193 -0
  88. deepset_mcp/benchmark/tasks/pipelines/rag_no_query_input.yml +191 -0
  89. deepset_mcp/benchmark/tasks/pipelines/standard_index.yml +167 -0
  90. deepset_mcp/initialize_embedding_model.py +12 -0
  91. deepset_mcp/main.py +133 -0
  92. deepset_mcp/prompts/deepset_copilot_prompt.md +271 -0
  93. deepset_mcp/prompts/deepset_debugging_agent.md +214 -0
  94. deepset_mcp/store.py +5 -0
  95. deepset_mcp/tool_factory.py +473 -0
  96. deepset_mcp/tools/__init__.py +0 -0
  97. deepset_mcp/tools/custom_components.py +52 -0
  98. deepset_mcp/tools/doc_search.py +83 -0
  99. deepset_mcp/tools/haystack_service.py +358 -0
  100. deepset_mcp/tools/haystack_service_models.py +97 -0
  101. deepset_mcp/tools/indexes.py +129 -0
  102. deepset_mcp/tools/model_protocol.py +16 -0
  103. deepset_mcp/tools/pipeline.py +335 -0
  104. deepset_mcp/tools/pipeline_template.py +116 -0
  105. deepset_mcp/tools/secrets.py +45 -0
  106. deepset_mcp/tools/tokonomics/__init__.py +73 -0
  107. deepset_mcp/tools/tokonomics/decorators.py +396 -0
  108. deepset_mcp/tools/tokonomics/explorer.py +347 -0
  109. deepset_mcp/tools/tokonomics/object_store.py +177 -0
  110. deepset_mcp/tools/workspace.py +61 -0
  111. deepset_mcp-0.0.2.dist-info/METADATA +288 -0
  112. deepset_mcp-0.0.2.dist-info/RECORD +114 -0
  113. deepset_mcp-0.0.2.dist-info/WHEEL +4 -0
  114. deepset_mcp-0.0.2.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,335 @@
1
+ import asyncio
2
+
3
+ import yaml
4
+
5
+ from deepset_mcp.api.exceptions import BadRequestError, ResourceNotFoundError, UnexpectedAPIError
6
+ from deepset_mcp.api.pipeline.log_level import LogLevel
7
+ from deepset_mcp.api.pipeline.models import (
8
+ DeepsetPipeline,
9
+ DeepsetSearchResponse,
10
+ PipelineList,
11
+ PipelineLogList,
12
+ PipelineOperationWithErrors,
13
+ PipelineValidationResult,
14
+ PipelineValidationResultWithYaml,
15
+ )
16
+ from deepset_mcp.api.protocols import AsyncClientProtocol
17
+
18
+
19
+ async def list_pipelines(*, client: AsyncClientProtocol, workspace: str) -> PipelineList | str:
20
+ """Retrieves a list of all pipeline available within the currently configured deepset workspace.
21
+
22
+ :param client: The async client for API communication.
23
+ :param workspace: The workspace name.
24
+ :returns: List of pipelines or error message.
25
+ """
26
+ try:
27
+ return await client.pipelines(workspace=workspace).list()
28
+ except ResourceNotFoundError:
29
+ return f"There is no workspace named '{workspace}'. Did you mean to configure it?"
30
+ except (BadRequestError, UnexpectedAPIError) as e:
31
+ return f"Failed to list pipelines: {e}"
32
+
33
+
34
+ async def get_pipeline(*, client: AsyncClientProtocol, workspace: str, pipeline_name: str) -> DeepsetPipeline | str:
35
+ """Fetches detailed configuration information for a specific pipeline, identified by its unique `pipeline_name`.
36
+
37
+ :param client: The async client for API communication.
38
+ :param workspace: The workspace name.
39
+ :param pipeline_name: The name of the pipeline to fetch.
40
+ :returns: Pipeline details or error message.
41
+ """
42
+ try:
43
+ return await client.pipelines(workspace=workspace).get(pipeline_name=pipeline_name)
44
+ except ResourceNotFoundError:
45
+ return f"There is no pipeline named '{pipeline_name}' in workspace '{workspace}'."
46
+ except (BadRequestError, UnexpectedAPIError) as e:
47
+ return f"Failed to fetch pipeline '{pipeline_name}': {e}"
48
+
49
+
50
+ async def validate_pipeline(
51
+ *, client: AsyncClientProtocol, workspace: str, yaml_configuration: str
52
+ ) -> PipelineValidationResultWithYaml | str:
53
+ """Validates the provided pipeline YAML configuration against the deepset API.
54
+
55
+ :param client: The async client for API communication.
56
+ :param workspace: The workspace name.
57
+ :param yaml_configuration: The YAML configuration to validate.
58
+ :returns: Validation result with original YAML or error message.
59
+ """
60
+ if not yaml_configuration or not yaml_configuration.strip():
61
+ return "You need to provide a YAML configuration to validate."
62
+
63
+ try:
64
+ yaml.safe_load(yaml_configuration)
65
+ except yaml.YAMLError as e:
66
+ return f"Invalid YAML provided: {e}"
67
+
68
+ try:
69
+ response = await client.pipelines(workspace=workspace).validate(yaml_configuration)
70
+ return PipelineValidationResultWithYaml(validation_result=response, yaml_config=yaml_configuration)
71
+ except ResourceNotFoundError:
72
+ return f"There is no workspace named '{workspace}'. Did you mean to configure it?"
73
+ except (BadRequestError, UnexpectedAPIError) as e:
74
+ return f"Failed to validate pipeline: {e}"
75
+
76
+
77
+ async def create_pipeline(
78
+ *,
79
+ client: AsyncClientProtocol,
80
+ workspace: str,
81
+ pipeline_name: str,
82
+ yaml_configuration: str,
83
+ skip_validation_errors: bool = True,
84
+ ) -> DeepsetPipeline | PipelineOperationWithErrors | str:
85
+ """Creates a new pipeline within the currently configured deepset workspace.
86
+
87
+ :param client: The async client for API communication.
88
+ :param workspace: The workspace name.
89
+ :param pipeline_name: Name of the pipeline to create.
90
+ :param yaml_configuration: YAML configuration for the pipeline.
91
+ :param skip_validation_errors: If True (default), creates the pipeline even if validation fails.
92
+ If False, stops creation when validation fails.
93
+ :returns: Created pipeline or error message.
94
+ """
95
+ try:
96
+ validation_response = await client.pipelines(workspace=workspace).validate(yaml_configuration)
97
+
98
+ if not validation_response.valid and not skip_validation_errors:
99
+ error_messages = [f"{error.code}: {error.message}" for error in validation_response.errors]
100
+ return "Pipeline validation failed:\n" + "\n".join(error_messages)
101
+
102
+ await client.pipelines(workspace=workspace).create(name=pipeline_name, yaml_config=yaml_configuration)
103
+
104
+ # Get the full pipeline after creation
105
+ pipeline = await client.pipelines(workspace=workspace).get(pipeline_name)
106
+
107
+ # If validation failed but we proceeded anyway, return the special model
108
+ if not validation_response.valid:
109
+ return PipelineOperationWithErrors(
110
+ message="The operation completed with errors", validation_result=validation_response, pipeline=pipeline
111
+ )
112
+
113
+ # Otherwise return just the pipeline
114
+ return pipeline
115
+
116
+ except ResourceNotFoundError:
117
+ return f"There is no workspace named '{workspace}'. Did you mean to configure it?"
118
+ except BadRequestError as e:
119
+ return f"Failed to create pipeline '{pipeline_name}': {e}"
120
+ except UnexpectedAPIError as e:
121
+ return f"Failed to create pipeline '{pipeline_name}': {e}"
122
+
123
+
124
+ async def update_pipeline(
125
+ *,
126
+ client: AsyncClientProtocol,
127
+ workspace: str,
128
+ pipeline_name: str,
129
+ original_config_snippet: str,
130
+ replacement_config_snippet: str,
131
+ skip_validation_errors: bool = True,
132
+ ) -> DeepsetPipeline | PipelineOperationWithErrors | str:
133
+ """
134
+ Updates a pipeline configuration in the specified workspace with a replacement configuration snippet.
135
+
136
+ This function validates the replacement configuration snippet before applying it to the pipeline.
137
+ If the validation fails and skip_validation_errors is False, it returns error messages.
138
+ Otherwise, the replacement snippet is used to update the pipeline's configuration.
139
+
140
+ :param client: The async client for API communication.
141
+ :param workspace: The workspace name.
142
+ :param pipeline_name: Name of the pipeline to update.
143
+ :param original_config_snippet: The configuration snippet to replace.
144
+ :param replacement_config_snippet: The new configuration snippet.
145
+ :param skip_validation_errors: If True (default), updates the pipeline even if validation fails.
146
+ If False, stops update when validation fails.
147
+ :returns: Updated pipeline or error message.
148
+ """
149
+ try:
150
+ original_pipeline = await client.pipelines(workspace=workspace).get(pipeline_name=pipeline_name)
151
+ except ResourceNotFoundError:
152
+ return f"There is no pipeline named '{pipeline_name}'. Did you mean to create it?"
153
+ except (BadRequestError, UnexpectedAPIError) as e:
154
+ return f"Failed to fetch pipeline '{pipeline_name}': {e}"
155
+
156
+ if original_pipeline.yaml_config is None:
157
+ return f"The pipeline '{pipeline_name}' does not have a YAML configuration."
158
+
159
+ occurrences = original_pipeline.yaml_config.count(original_config_snippet)
160
+
161
+ if occurrences == 0:
162
+ return f"No occurrences of the provided configuration snippet were found in the pipeline '{pipeline_name}'."
163
+
164
+ if occurrences > 1:
165
+ return (
166
+ f"Multiple occurrences ({occurrences}) of the provided configuration snippet were found in the pipeline "
167
+ f"'{pipeline_name}'. Specify a more precise snippet to proceed with the update."
168
+ )
169
+
170
+ updated_yaml_configuration = original_pipeline.yaml_config.replace(
171
+ original_config_snippet, replacement_config_snippet, 1
172
+ )
173
+
174
+ try:
175
+ validation_response = await client.pipelines(workspace=workspace).validate(updated_yaml_configuration)
176
+
177
+ if not validation_response.valid and not skip_validation_errors:
178
+ error_messages = [f"{error.code}: {error.message}" for error in validation_response.errors]
179
+ return "Pipeline validation failed:\n" + "\n".join(error_messages)
180
+
181
+ await client.pipelines(workspace=workspace).update(
182
+ pipeline_name=pipeline_name, yaml_config=updated_yaml_configuration
183
+ )
184
+
185
+ # Get the full pipeline after update
186
+ pipeline = await client.pipelines(workspace=workspace).get(pipeline_name)
187
+
188
+ # If validation failed but we proceeded anyway, return the special model
189
+ if not validation_response.valid:
190
+ return PipelineOperationWithErrors(
191
+ message="The operation completed with errors", validation_result=validation_response, pipeline=pipeline
192
+ )
193
+
194
+ # Otherwise return just the pipeline
195
+ return pipeline
196
+
197
+ except ResourceNotFoundError:
198
+ return f"There is no pipeline named '{pipeline_name}'. Did you mean to create it?"
199
+ except BadRequestError as e:
200
+ return f"Failed to update the pipeline '{pipeline_name}': {e}"
201
+ except UnexpectedAPIError as e:
202
+ return f"Failed to update the pipeline '{pipeline_name}': {e}"
203
+
204
+
205
+ async def get_pipeline_logs(
206
+ *, client: AsyncClientProtocol, workspace: str, pipeline_name: str, limit: int = 30, level: LogLevel | None = None
207
+ ) -> PipelineLogList | str:
208
+ """Fetches logs for a specific pipeline.
209
+
210
+ Retrieves log entries for the specified pipeline, with optional filtering by log level.
211
+ This is useful for debugging pipeline issues or monitoring pipeline execution.
212
+
213
+ :param client: The async client for API communication.
214
+ :param workspace: The workspace name.
215
+ :param pipeline_name: Name of the pipeline to fetch logs for.
216
+ :param limit: Maximum number of log entries to return (default: 30).
217
+ :param level: Filter logs by level. If None, returns all levels.
218
+
219
+ :returns: Pipeline logs or error message.
220
+ """
221
+ try:
222
+ return await client.pipelines(workspace=workspace).get_logs(
223
+ pipeline_name=pipeline_name, limit=limit, level=level
224
+ )
225
+ except ResourceNotFoundError:
226
+ return f"There is no pipeline named '{pipeline_name}' in workspace '{workspace}'."
227
+ except BadRequestError as e:
228
+ return f"Failed to fetch logs for pipeline '{pipeline_name}': {e}"
229
+ except UnexpectedAPIError as e:
230
+ return f"Failed to fetch logs for pipeline '{pipeline_name}': {e}"
231
+
232
+
233
+ async def deploy_pipeline(
234
+ *,
235
+ client: AsyncClientProtocol,
236
+ workspace: str,
237
+ pipeline_name: str,
238
+ wait_for_deployment: bool = False,
239
+ timeout_seconds: float = 600,
240
+ poll_interval: float = 10,
241
+ ) -> PipelineValidationResult | str:
242
+ """Deploys a pipeline to production.
243
+
244
+ This function attempts to deploy the specified pipeline in the given workspace.
245
+ If the deployment fails due to validation errors, it returns a validation result.
246
+
247
+ :param client: The async client for API communication.
248
+ :param workspace: The workspace name.
249
+ :param pipeline_name: Name of the pipeline to deploy.
250
+ :param wait_for_deployment: If True, waits for the pipeline to reach DEPLOYED status.
251
+ :param timeout_seconds: Maximum time to wait for deployment when wait_for_deployment is True (default: 600.0).
252
+ :param poll_interval: Time between status checks in seconds when wait_for_deployment is True (default: 10.0).
253
+
254
+ :returns: Deployment validation result or error message.
255
+ """
256
+ try:
257
+ deployment_result = await client.pipelines(workspace=workspace).deploy(pipeline_name=pipeline_name)
258
+ except ResourceNotFoundError:
259
+ return f"There is no pipeline named '{pipeline_name}' in workspace '{workspace}'."
260
+ except BadRequestError as e:
261
+ return f"Failed to deploy pipeline '{pipeline_name}': {e}"
262
+ except UnexpectedAPIError as e:
263
+ return f"Failed to deploy pipeline '{pipeline_name}': {e}"
264
+
265
+ if not deployment_result.valid:
266
+ return deployment_result
267
+
268
+ # If not waiting for deployment, return success immediately
269
+ if not wait_for_deployment:
270
+ return deployment_result
271
+
272
+ start_time = asyncio.get_event_loop().time()
273
+
274
+ while True:
275
+ current_time = asyncio.get_event_loop().time()
276
+ if current_time - start_time > timeout_seconds:
277
+ return (
278
+ f"Pipeline '{pipeline_name}' deployment initiated successfully, but did not reach DEPLOYED status "
279
+ f"within {timeout_seconds} seconds. You can check the pipeline status manually."
280
+ )
281
+
282
+ try:
283
+ # Get the current pipeline status
284
+ pipeline = await client.pipelines(workspace=workspace).get(pipeline_name=pipeline_name, include_yaml=False)
285
+
286
+ if pipeline.status == "DEPLOYED":
287
+ return deployment_result # Return the successful validation result
288
+ elif pipeline.status == "FAILED":
289
+ return f"Pipeline '{pipeline_name}' deployment failed. Current status: FAILED."
290
+
291
+ # Wait before next poll
292
+ await asyncio.sleep(poll_interval)
293
+
294
+ except Exception as e:
295
+ return f"Pipeline '{pipeline_name}' deployment initiated, but failed to check deployment status: {e}"
296
+
297
+
298
+ async def search_pipeline(
299
+ *, client: AsyncClientProtocol, workspace: str, pipeline_name: str, query: str
300
+ ) -> DeepsetSearchResponse | str:
301
+ """Searches using a pipeline.
302
+
303
+ Uses the specified pipeline to perform a search with the given query.
304
+ Before executing the search, checks if the pipeline is deployed (status = DEPLOYED).
305
+ Returns search results.
306
+
307
+ :param client: The async client for API communication.
308
+ :param workspace: The workspace name.
309
+ :param pipeline_name: Name of the pipeline to use for search.
310
+ :param query: The search query to execute.
311
+
312
+ :returns: Search results or error message.
313
+ """
314
+ try:
315
+ # First, check if the pipeline exists and get its status
316
+ pipeline = await client.pipelines(workspace=workspace).get(pipeline_name=pipeline_name)
317
+
318
+ # Check if pipeline is deployed
319
+ if pipeline.status != "DEPLOYED":
320
+ return (
321
+ f"Pipeline '{pipeline_name}' is not deployed (current status: {pipeline.status}). "
322
+ f"Please deploy the pipeline first using the deploy_pipeline tool before attempting to search."
323
+ )
324
+
325
+ # Execute the search
326
+ return await client.pipelines(workspace=workspace).search(pipeline_name=pipeline_name, query=query)
327
+
328
+ except ResourceNotFoundError:
329
+ return f"There is no pipeline named '{pipeline_name}' in workspace '{workspace}'."
330
+ except BadRequestError as e:
331
+ return f"Failed to search using pipeline '{pipeline_name}': {e}"
332
+ except UnexpectedAPIError as e:
333
+ return f"Failed to search using pipeline '{pipeline_name}': {e}"
334
+ except Exception as e:
335
+ return f"An unexpected error occurred while searching with pipeline '{pipeline_name}': {str(e)}"
@@ -0,0 +1,116 @@
1
+ import numpy as np
2
+
3
+ from deepset_mcp.api.exceptions import ResourceNotFoundError, UnexpectedAPIError
4
+ from deepset_mcp.api.pipeline_template.models import (
5
+ PipelineTemplate,
6
+ PipelineTemplateList,
7
+ PipelineTemplateSearchResult,
8
+ PipelineTemplateSearchResults,
9
+ )
10
+ from deepset_mcp.api.protocols import AsyncClientProtocol
11
+ from deepset_mcp.tools.model_protocol import ModelProtocol
12
+
13
+
14
+ async def list_pipeline_templates(
15
+ *,
16
+ client: AsyncClientProtocol,
17
+ workspace: str,
18
+ limit: int = 100,
19
+ field: str = "created_at",
20
+ order: str = "DESC",
21
+ filter: str | None = None,
22
+ ) -> PipelineTemplateList | str:
23
+ """Retrieves a list of all available pipeline templates.
24
+
25
+ :param client: The async client for API requests.
26
+ :param workspace: The workspace to list templates from.
27
+ :param limit: Maximum number of templates to return (default: 100).
28
+ :param field: Field to sort by (default: "created_at").
29
+ :param order: Sort order, either "ASC" or "DESC" (default: "DESC").
30
+ :param filter: OData filter expression to filter templates by criteria.
31
+
32
+ :returns: List of pipeline templates or error message.
33
+ """
34
+ try:
35
+ return await client.pipeline_templates(workspace=workspace).list_templates(
36
+ limit=limit, field=field, order=order, filter=filter
37
+ )
38
+ except ResourceNotFoundError:
39
+ return f"There is no workspace named '{workspace}'. Did you mean to configure it?"
40
+ except UnexpectedAPIError as e:
41
+ return f"Failed to list pipeline templates: {e}"
42
+
43
+
44
+ async def get_pipeline_template(
45
+ *, client: AsyncClientProtocol, workspace: str, template_name: str
46
+ ) -> PipelineTemplate | str:
47
+ """Fetches detailed information for a specific pipeline template, identified by its `template_name`.
48
+
49
+ :param client: The async client for API requests.
50
+ :param workspace: The workspace to fetch template from.
51
+ :param template_name: The name of the template to fetch.
52
+
53
+ :returns: Pipeline template details or error message.
54
+ """
55
+ try:
56
+ return await client.pipeline_templates(workspace=workspace).get_template(template_name=template_name)
57
+ except ResourceNotFoundError:
58
+ return f"There is no pipeline template named '{template_name}' in workspace '{workspace}'."
59
+ except UnexpectedAPIError as e:
60
+ return f"Failed to fetch pipeline template '{template_name}': {e}"
61
+
62
+
63
+ async def search_pipeline_templates(
64
+ *, client: AsyncClientProtocol, query: str, model: ModelProtocol, workspace: str, top_k: int = 10
65
+ ) -> PipelineTemplateSearchResults | str:
66
+ """Searches for pipeline templates based on name or description using semantic similarity.
67
+
68
+ :param client: The API client to use.
69
+ :param query: The search query.
70
+ :param model: The model to use for computing embeddings.
71
+ :param workspace: The workspace to search templates from.
72
+ :param top_k: Maximum number of results to return (default: 10).
73
+
74
+ :returns: Search results with similarity scores or error message.
75
+ """
76
+ try:
77
+ response = await client.pipeline_templates(workspace=workspace).list_templates(
78
+ filter="pipeline_type eq 'QUERY'"
79
+ )
80
+ except UnexpectedAPIError as e:
81
+ return f"Failed to retrieve pipeline templates: {e}"
82
+
83
+ if not response.data:
84
+ return PipelineTemplateSearchResults(results=[], query=query, total_found=0)
85
+
86
+ # Extract text for embedding from all templates
87
+ template_texts: list[tuple[str, str]] = [
88
+ (template.template_name, f"{template.template_name} {template.description}") for template in response.data
89
+ ]
90
+ template_names: list[str] = [t[0] for t in template_texts]
91
+
92
+ # Compute embeddings
93
+ query_embedding = model.encode(query)
94
+ template_embeddings = model.encode([text for _, text in template_texts])
95
+
96
+ query_embedding_reshaped = query_embedding.reshape(1, -1)
97
+
98
+ # Calculate dot product between target and all templates
99
+ # This gives us a similarity score for each template
100
+ similarities = np.dot(template_embeddings, query_embedding_reshaped.T).flatten()
101
+
102
+ # Create (template_name, similarity) pairs
103
+ template_similarities = list(zip(template_names, similarities, strict=False))
104
+
105
+ # Sort by similarity score in descending order
106
+ template_similarities.sort(key=lambda x: x[1], reverse=True)
107
+
108
+ top_templates = template_similarities[:top_k]
109
+ search_results = []
110
+ for template_name, sim in top_templates:
111
+ # Find the template object by name
112
+ template = next((t for t in response.data if t.template_name == template_name), None)
113
+ if template:
114
+ search_results.append(PipelineTemplateSearchResult(template=template, similarity_score=float(sim)))
115
+
116
+ return PipelineTemplateSearchResults(results=search_results, query=query, total_found=len(search_results))
@@ -0,0 +1,45 @@
1
+ from deepset_mcp.api.exceptions import ResourceNotFoundError, UnexpectedAPIError
2
+ from deepset_mcp.api.protocols import AsyncClientProtocol
3
+ from deepset_mcp.api.secrets.models import Secret, SecretList
4
+
5
+
6
+ async def list_secrets(*, client: AsyncClientProtocol, limit: int = 10) -> SecretList | str:
7
+ """Lists all secrets available in the user's deepset organization.
8
+
9
+ Use this tool to retrieve a list of secrets with their names and IDs.
10
+ This is useful for getting an overview of all secrets before retrieving specific ones.
11
+
12
+ :param client: The deepset API client
13
+ :param limit: Maximum number of secrets to return (default: 10)
14
+
15
+ :returns: List of secrets or error message
16
+ """
17
+ try:
18
+ return await client.secrets().list(limit=limit)
19
+ except ResourceNotFoundError as e:
20
+ return f"Error: {str(e)}"
21
+ except UnexpectedAPIError as e:
22
+ return f"API Error: {str(e)}"
23
+ except Exception as e:
24
+ return f"Unexpected error: {str(e)}"
25
+
26
+
27
+ async def get_secret(*, client: AsyncClientProtocol, secret_id: str) -> Secret | str:
28
+ """Retrieves detailed information about a specific secret by its ID.
29
+
30
+ Use this tool to get information about a specific secret when you know its ID.
31
+ The secret value itself is not returned for security reasons, only metadata.
32
+
33
+ :param client: The deepset API client
34
+ :param secret_id: The unique identifier of the secret to retrieve
35
+
36
+ :returns: Secret information or error message
37
+ """
38
+ try:
39
+ return await client.secrets().get(secret_id=secret_id)
40
+ except ResourceNotFoundError as e:
41
+ return f"Error: {str(e)}"
42
+ except UnexpectedAPIError as e:
43
+ return f"API Error: {str(e)}"
44
+ except Exception as e:
45
+ return f"Unexpected error: {str(e)}"
@@ -0,0 +1,73 @@
1
+ """
2
+ Tokonomics: Explorable and Referenceable Tools for LLM Agents.
3
+
4
+ =============================================================
5
+
6
+ A library that provides token-efficient object exploration and reference
7
+ passing capabilities for LLM agents.
8
+
9
+ Key Features:
10
+ - TTL-based object storage for temporary results
11
+ - Rich object exploration with multiple rendering modes
12
+ - Reference-based parameter passing (@obj_001.path.to.value)
13
+ - Type-safe decorators that preserve function signatures
14
+ - Configurable preview truncation and custom rendering callbacks
15
+
16
+ Usage:
17
+ ------
18
+
19
+ Basic explorable tool:
20
+
21
+ >>> from deepset_mcp.tools.tokonomics import explorable
22
+ >>>
23
+ >>> @explorable
24
+ >>> def get_data():
25
+ ... return {"users": [{"name": "Alice", "age": 30}]}
26
+ >>>
27
+ >>> result = get_data()
28
+ >>> print(result) # Shows rich preview
29
+ >>> result.obj_id # "obj_001"
30
+ >>> result.value # Original data
31
+
32
+ Referenceable tool that accepts references:
33
+
34
+ >>> from deepset_mcp.tools.tokonomics import referenceable
35
+ >>>
36
+ >>> @referenceable
37
+ >>> def process_users(users: list) -> str:
38
+ ... return f"Processed {len(users)} users"
39
+ >>>
40
+ >>> # Use with direct data
41
+ >>> process_users([{"name": "Bob"}])
42
+ >>>
43
+ >>> # Use with reference
44
+ >>> process_users("@obj_001.users")
45
+
46
+ Exploration utilities:
47
+
48
+ >>> from deepset_mcp.tools.tokonomics import explore, search
49
+ >>>
50
+ >>> # Explore object structure
51
+ >>> explore("obj_001", mode="tree")
52
+ >>>
53
+ >>> # Search within objects
54
+ >>> search("obj_001", "Alice")
55
+ """
56
+
57
+ from .decorators import explorable, explorable_and_referenceable, referenceable
58
+ from .explorer import RichExplorer
59
+ from .object_store import Explorable, ObjectRef, ObjectStore
60
+
61
+ __all__ = [
62
+ # Core classes
63
+ "Explorable",
64
+ "ObjectRef",
65
+ "ObjectStore",
66
+ "RichExplorer",
67
+ # Decorators
68
+ "explorable",
69
+ "referenceable",
70
+ "explorable_and_referenceable",
71
+ ]
72
+
73
+ __version__ = "0.1.0"