universal-mcp 0.1.23rc2__py3-none-any.whl → 0.1.24rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. universal_mcp/agentr/__init__.py +6 -0
  2. universal_mcp/agentr/agentr.py +30 -0
  3. universal_mcp/{utils/agentr.py → agentr/client.py} +22 -7
  4. universal_mcp/agentr/integration.py +104 -0
  5. universal_mcp/agentr/registry.py +91 -0
  6. universal_mcp/agentr/server.py +51 -0
  7. universal_mcp/agents/__init__.py +6 -0
  8. universal_mcp/agents/auto.py +576 -0
  9. universal_mcp/agents/base.py +88 -0
  10. universal_mcp/agents/cli.py +27 -0
  11. universal_mcp/agents/codeact/__init__.py +243 -0
  12. universal_mcp/agents/codeact/sandbox.py +27 -0
  13. universal_mcp/agents/codeact/test.py +15 -0
  14. universal_mcp/agents/codeact/utils.py +61 -0
  15. universal_mcp/agents/hil.py +104 -0
  16. universal_mcp/agents/llm.py +10 -0
  17. universal_mcp/agents/react.py +58 -0
  18. universal_mcp/agents/simple.py +40 -0
  19. universal_mcp/agents/utils.py +111 -0
  20. universal_mcp/analytics.py +44 -14
  21. universal_mcp/applications/__init__.py +42 -75
  22. universal_mcp/applications/application.py +187 -133
  23. universal_mcp/applications/sample/app.py +245 -0
  24. universal_mcp/cli.py +14 -231
  25. universal_mcp/client/oauth.py +122 -18
  26. universal_mcp/client/token_store.py +62 -3
  27. universal_mcp/client/{client.py → transport.py} +127 -48
  28. universal_mcp/config.py +189 -49
  29. universal_mcp/exceptions.py +54 -6
  30. universal_mcp/integrations/__init__.py +0 -18
  31. universal_mcp/integrations/integration.py +185 -168
  32. universal_mcp/servers/__init__.py +2 -14
  33. universal_mcp/servers/server.py +84 -258
  34. universal_mcp/stores/store.py +126 -93
  35. universal_mcp/tools/__init__.py +3 -0
  36. universal_mcp/tools/adapters.py +20 -11
  37. universal_mcp/tools/func_metadata.py +1 -1
  38. universal_mcp/tools/manager.py +38 -53
  39. universal_mcp/tools/registry.py +41 -0
  40. universal_mcp/tools/tools.py +24 -3
  41. universal_mcp/types.py +10 -0
  42. universal_mcp/utils/common.py +245 -0
  43. universal_mcp/utils/installation.py +3 -4
  44. universal_mcp/utils/openapi/api_generator.py +71 -17
  45. universal_mcp/utils/openapi/api_splitter.py +0 -1
  46. universal_mcp/utils/openapi/cli.py +669 -0
  47. universal_mcp/utils/openapi/filters.py +114 -0
  48. universal_mcp/utils/openapi/openapi.py +315 -23
  49. universal_mcp/utils/openapi/postprocessor.py +275 -0
  50. universal_mcp/utils/openapi/preprocessor.py +63 -8
  51. universal_mcp/utils/openapi/test_generator.py +287 -0
  52. universal_mcp/utils/prompts.py +634 -0
  53. universal_mcp/utils/singleton.py +4 -1
  54. universal_mcp/utils/testing.py +196 -8
  55. universal_mcp-0.1.24rc3.dist-info/METADATA +68 -0
  56. universal_mcp-0.1.24rc3.dist-info/RECORD +70 -0
  57. universal_mcp/applications/README.md +0 -122
  58. universal_mcp/client/__main__.py +0 -30
  59. universal_mcp/client/agent.py +0 -96
  60. universal_mcp/integrations/README.md +0 -25
  61. universal_mcp/servers/README.md +0 -79
  62. universal_mcp/stores/README.md +0 -74
  63. universal_mcp/tools/README.md +0 -86
  64. universal_mcp-0.1.23rc2.dist-info/METADATA +0 -283
  65. universal_mcp-0.1.23rc2.dist-info/RECORD +0 -51
  66. /universal_mcp/{utils → tools}/docstring_parser.py +0 -0
  67. {universal_mcp-0.1.23rc2.dist-info → universal_mcp-0.1.24rc3.dist-info}/WHEEL +0 -0
  68. {universal_mcp-0.1.23rc2.dist-info → universal_mcp-0.1.24rc3.dist-info}/entry_points.txt +0 -0
  69. {universal_mcp-0.1.23rc2.dist-info → universal_mcp-0.1.24rc3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,275 @@
1
+ import ast
2
+ import re
3
+
4
+ import litellm
5
+
6
+
7
+ def add_hint_tags_to_docstrings(input_path: str, output_path: str):
8
+ """
9
+ Reads a Python API client file, inspects each function, and adds appropriate tags to the docstring:
10
+ - 'readOnlyHint': Tool does not modify its environment (fetching, reading, etc.)
11
+ - 'destructiveHint': Tool may perform destructive updates
12
+ - 'openWorldHint': Tool interacts with external entities (3rd party APIs)
13
+
14
+ Functions can have multiple tags (e.g., 'readOnlyHint, openWorldHint').
15
+ Does not alter other tags in the docstring.
16
+ Writes the modified code to output_path.
17
+ """
18
+ with open(input_path, encoding="utf-8") as f:
19
+ source = f.read()
20
+ tree = ast.parse(source)
21
+
22
+ # Initialize counters
23
+ total_functions = 0
24
+ functions_with_http_methods = 0
25
+ functions_processed_by_llm = 0
26
+ functions_tagged = 0
27
+ llm_failures = 0
28
+
29
+ class DocstringTagAdder(ast.NodeTransformer):
30
+ def _find_http_method(self, node):
31
+ """Find the HTTP method used in the function body."""
32
+ http_methods = []
33
+
34
+ def visit_node(n):
35
+ if (
36
+ isinstance(n, ast.Call)
37
+ and isinstance(n.func, ast.Attribute)
38
+ and isinstance(n.func.value, ast.Name)
39
+ and n.func.value.id == "self"
40
+ and n.func.attr in ["_get", "_post", "_put", "_patch", "_delete"]
41
+ ):
42
+ http_methods.append(n.func.attr[1:])
43
+ for child in ast.iter_child_nodes(n):
44
+ visit_node(child)
45
+
46
+ visit_node(node)
47
+ return http_methods[0] if http_methods else None
48
+
49
+ def visit_FunctionDef(self, node):
50
+ nonlocal \
51
+ total_functions, \
52
+ functions_with_http_methods, \
53
+ functions_processed_by_llm, \
54
+ functions_tagged, \
55
+ llm_failures
56
+
57
+ total_functions += 1
58
+ print(f"\n[{total_functions}] Processing function: {node.name}")
59
+
60
+ http_method = self._find_http_method(node)
61
+ tag_to_add = None
62
+
63
+ if http_method:
64
+ functions_with_http_methods += 1
65
+ print(f" └─ Found HTTP method: {http_method.upper()}")
66
+
67
+ # Use simple agent to decide tag
68
+ print(" └─ Calling LLM to determine tag...")
69
+ tag_to_add = self._get_tag_suggestion_from_agent(node, http_method)
70
+
71
+ if tag_to_add:
72
+ functions_processed_by_llm += 1
73
+ print(f" └─ LLM suggested tags: {tag_to_add}")
74
+ else:
75
+ print(" └─ LLM failed or returned invalid response")
76
+ else:
77
+ print(" └─ No HTTP method found - skipping")
78
+
79
+ if tag_to_add:
80
+ docstring = ast.get_docstring(node, clean=False)
81
+ if docstring is not None:
82
+ # Look for Tags: section in the docstring
83
+ tags_match = re.search(r"Tags:\s*(.+)", docstring, re.DOTALL)
84
+ if tags_match:
85
+ tags_line = tags_match.group(1).strip()
86
+ # Parse existing tags
87
+ existing_tags = [tag.strip() for tag in tags_line.split(",")]
88
+
89
+ # Parse new tags to add
90
+ new_tags_to_add = [tag.strip() for tag in tag_to_add.split(",")]
91
+ tags_to_add = [tag for tag in new_tags_to_add if tag not in existing_tags]
92
+
93
+ if tags_to_add:
94
+ # Add the new tags to the existing list
95
+ new_tags_line = tags_line.rstrip() + f", {', '.join(tags_to_add)}"
96
+ new_docstring = re.sub(r"(Tags:\s*)(.+)", r"\1" + new_tags_line, docstring, flags=re.DOTALL)
97
+ # Replace docstring
98
+ if isinstance(node.body[0], ast.Expr) and isinstance(node.body[0].value, ast.Constant):
99
+ node.body[0].value.value = new_docstring
100
+ functions_tagged += 1
101
+ print(f" └─ ✅ Tags '{', '.join(tags_to_add)}' added successfully")
102
+ else:
103
+ print(f" └─ ⚠️ All tags '{tag_to_add}' already exist - skipping")
104
+ else:
105
+ print(" └─ ⚠️ No 'Tags:' section found in docstring - skipping")
106
+ else:
107
+ print(" └─ ⚠️ No docstring found - skipping")
108
+ return node
109
+
110
+ def _get_tag_suggestion_from_agent(self, node, http_method):
111
+ """Use a simple agent to decide which tag to add based on function context."""
112
+
113
+ function_name = node.name
114
+ docstring = ast.get_docstring(node, clean=False) or ""
115
+ parameters = [arg.arg for arg in node.args.args if arg.arg != "self"]
116
+
117
+ system_prompt = """You are an expert at analyzing API functions and determining their characteristics.
118
+
119
+ Your task is to analyze each function and decide which tags to add:
120
+ - 'readOnlyHint': Tool does not modify its environment (fetching, reading, etc.)
121
+ - 'destructiveHint': Tool may perform destructive updates
122
+ - 'openWorldHint': Tool interacts with external entities (3rd party APIs)
123
+
124
+ IMPORTANT:
125
+ - HTTP method alone is NOT enough to determine the tags. You must analyze the function's actual purpose.
126
+ - Since these are all API client functions, MOST functions should have 'openWorldHint' (they interact with external APIs).
127
+ - Only functions that are purely local operations (like reading local files) should NOT have 'openWorldHint'.
128
+
129
+ Functions can have multiple tags. For example:
130
+ - A function that reads from Gmail API: 'readOnlyHint, openWorldHint'
131
+ - A function that deletes from GitHub API: 'destructiveHint, openWorldHint'
132
+ - A function that only reads local files: 'readOnlyHint' (no openWorldHint)
133
+
134
+ Respond with comma-separated tags (e.g., 'readOnlyHint, openWorldHint') or 'none' if no tags apply."""
135
+
136
+ user_prompt = f"""Analyze this API function and decide which tags to add:
137
+
138
+ Function Name: {function_name}
139
+ HTTP Method: {http_method}
140
+ Parameters: {", ".join(parameters)}
141
+ Docstring: {docstring[:1000]}...
142
+
143
+ Based on this information, which tags should this function get?
144
+
145
+ Think through:
146
+ 1. What does this function actually do? (from name and docstring)
147
+ 2. Does it modify its environment or just read/fetch?
148
+ 3. Does it interact with external entities (3rd party APIs)?
149
+ 4. Could it be potentially destructive?
150
+
151
+ GUIDELINES for readOnlyHint (does not modify environment):
152
+ - Functions that only READ or FETCH data
153
+ - Functions that VALIDATE or CHECK things without saving
154
+ - Functions that EXPORT or DOWNLOAD data
155
+ - Functions that perform HEALTH CHECKS or PING operations
156
+ - Functions that REFRESH tokens or sessions
157
+ - Functions that SEARCH or FILTER data
158
+ - Functions that GET information without changing anything
159
+ - Functions that LIST or RETRIEVE data
160
+
161
+ GUIDELINES for destructiveHint (DESTROYS or DELETES things):
162
+ - Functions that DELETE resources or data
163
+ - Functions that REMOVE or ERASE things
164
+ - Functions that DESTROY or TERMINATE resources
165
+ - Functions that CANCEL or ABORT operations
166
+ - Functions that REVOKE or INVALIDATE things
167
+
168
+ IMPORTANT:
169
+ - A function should NOT have both readOnlyHint and destructiveHint - they are mutually exclusive.
170
+ - Creating, sending, or updating things is NOT destructive - only deleting/destroying is destructive.
171
+ - Functions that CREATE, SEND, UPDATE, or MODIFY should NOT get destructiveHint.
172
+
173
+ GUIDELINES for openWorldHint (interacts with external entities):
174
+ - Functions that interact with 3rd party APIs (Gmail, Outlook, Reddit, GitHub, etc.)
175
+ - Functions that make external HTTP requests
176
+ - Functions that connect to external services
177
+ - Functions that interact with cloud services
178
+ - Functions that communicate with external databases
179
+ - Functions that call external webhooks
180
+ - MOST API client functions will have this tag since they interact with external APIs
181
+
182
+ NOT openWorldHint (local operations):
183
+ - Functions that only read local files
184
+ - Functions that process local data
185
+ - Functions that work with local databases
186
+ - Functions that manipulate local variables
187
+ - Functions that only work with local system resources
188
+
189
+ Examples:
190
+ - Gmail API read function: 'readOnlyHint, openWorldHint'
191
+ - Gmail API send email: 'openWorldHint' (not destructive, just sending)
192
+ - Gmail API create draft: 'openWorldHint' (not destructive, just creating)
193
+ - GitHub API delete repository: 'destructiveHint, openWorldHint'
194
+ - Local file reader: 'readOnlyHint' (no openWorldHint)
195
+ - Local data processor: 'none' (no tags)
196
+
197
+ Focus on the FUNCTION'S PURPOSE, not just the HTTP method.
198
+
199
+ Your answer (comma-separated tags or 'none'):"""
200
+
201
+ try:
202
+ response = litellm.completion(
203
+ model="perplexity/sonar-pro",
204
+ messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
205
+ temperature=0.1,
206
+ max_tokens=50,
207
+ )
208
+
209
+ suggested_tags = response.choices[0].message.content.strip().lower()
210
+
211
+ if suggested_tags == "none":
212
+ return None
213
+
214
+ # Parse comma-separated tags
215
+ tag_list = [tag.strip() for tag in suggested_tags.split(",")]
216
+ valid_tags = []
217
+
218
+ for tag in tag_list:
219
+ if tag == "readonlyhint":
220
+ valid_tags.append("readOnlyHint")
221
+ elif tag == "destructivehint":
222
+ valid_tags.append("destructiveHint")
223
+ elif tag == "openworldhint":
224
+ valid_tags.append("openWorldHint")
225
+
226
+ if valid_tags:
227
+ return ", ".join(valid_tags)
228
+ else:
229
+ # If LLM gives unexpected response, return None (no tag added)
230
+ return None
231
+
232
+ except Exception as e:
233
+ nonlocal llm_failures
234
+ llm_failures += 1
235
+ print(f" └─ ❌ LLM failed for function {function_name}: {e}")
236
+ # If LLM fails, return None (no tag added)
237
+ return None
238
+
239
+ new_tree = DocstringTagAdder().visit(tree)
240
+ ast.fix_missing_locations(new_tree)
241
+ new_source = ast.unparse(new_tree)
242
+
243
+ # Print summary statistics
244
+ print(f"\n{'=' * 60}")
245
+ print("📊 PROCESSING SUMMARY")
246
+ print(f"{'=' * 60}")
247
+ print(f"Total functions processed: {total_functions}")
248
+ print(f"Functions with HTTP methods: {functions_with_http_methods}")
249
+ print(f"Functions processed by LLM: {functions_processed_by_llm}")
250
+ print(f"Functions successfully tagged: {functions_tagged}")
251
+ print(f"LLM failures: {llm_failures}")
252
+ if functions_with_http_methods > 0:
253
+ print(
254
+ f"LLM success rate: {(functions_processed_by_llm / functions_with_http_methods * 100):.1f}% of HTTP functions"
255
+ )
256
+ print(f"{'=' * 60}")
257
+
258
+ # Format with Black in memory
259
+ try:
260
+ import black
261
+
262
+ formatted_content = black.format_file_contents(new_source, fast=False, mode=black.FileMode())
263
+ with open(output_path, "w", encoding="utf-8") as f:
264
+ f.write(formatted_content)
265
+ print(f"Black formatting applied successfully to: {output_path}")
266
+ except ImportError:
267
+ print(f"Black not installed. Skipping formatting for: {output_path}")
268
+ # Write unformatted version if Black is not available
269
+ with open(output_path, "w", encoding="utf-8") as f:
270
+ f.write(new_source)
271
+ except Exception as e:
272
+ print(f"Black formatting failed for {output_path}: {e}")
273
+ # Write unformatted version if Black formatting fails
274
+ with open(output_path, "w", encoding="utf-8") as f:
275
+ f.write(new_source)
@@ -12,6 +12,8 @@ import typer
12
12
  import yaml
13
13
  from rich.console import Console
14
14
 
15
+ from .filters import load_filter_config, should_process_operation
16
+
15
17
  console = Console()
16
18
 
17
19
 
@@ -225,7 +227,7 @@ def generate_description_llm(
225
227
  if len(param_context_str) > 1000: # Limit context size
226
228
  param_context_str = param_context_str[:1000] + "..."
227
229
 
228
- current_description = context.get("current_description", None)
230
+ current_description = context.get("current_description")
229
231
  if current_description and isinstance(current_description, str) and current_description.strip():
230
232
  user_prompt = f"""The current description for the API parameter named '{param_name}' located '{param_in}' for the '{method.upper()}' operation at path '{path_key}' is:\n'{current_description.strip()}'\n\nTask: Rewrite and enrich this description so it is clear, self-contained, and makes sense to a user. If the description is cut off, incomplete, or awkward, make it complete and natural. Ensure it is concise and under {MAX_DESCRIPTION_LENGTH} characters. Do not include any links, HTML, markdown, or any notes or comments about the character limit. Respond ONLY with the improved single-line description."""
231
233
  fallback_text = (
@@ -451,11 +453,12 @@ def simplify_parameter_context(parameter: dict) -> dict:
451
453
  return simplified_context
452
454
 
453
455
 
454
- def scan_schema_for_status(schema_data: dict):
456
+ def scan_schema_for_status(schema_data: dict, filter_config: dict[str, str | list[str]] | None = None):
455
457
  """
456
458
  Scans the schema to report the status of descriptions/summaries
457
459
  and identify critical issues like missing parameter 'name'/'in'.
458
460
  Does NOT modify the schema or call the LLM.
461
+ Respects filter configuration if provided.
459
462
  """
460
463
  logger.info("\n--- Scanning Schema for Status ---")
461
464
 
@@ -526,6 +529,12 @@ def scan_schema_for_status(schema_data: dict):
526
529
  "trace",
527
530
  ]:
528
531
  operation_location_base = f"paths.{path_key}.{method.lower()}"
532
+
533
+ # Apply filter configuration
534
+ if not should_process_operation(path_key, method, filter_config):
535
+ logger.debug(f"Skipping operation '{method.upper()} {path_key}' due to filter configuration.")
536
+ continue
537
+
529
538
  if not isinstance(operation_value, dict):
530
539
  logger.warning(f"Operation value for '{operation_location_base}' is not a dictionary. Skipping.")
531
540
  continue
@@ -886,12 +895,20 @@ def process_operation(
886
895
 
887
896
 
888
897
  def process_paths(
889
- paths: dict, llm_model: str, enhance_all: bool, summaries_only: bool = False, operation_ids_only: bool = False
898
+ paths: dict,
899
+ llm_model: str,
900
+ enhance_all: bool,
901
+ summaries_only: bool = False,
902
+ operation_ids_only: bool = False,
903
+ filter_config: dict[str, str | list[str]] | None = None,
890
904
  ):
891
905
  if not isinstance(paths, dict):
892
906
  logger.warning("'paths' field is not a dictionary. Skipping path processing.")
893
907
  return
894
908
 
909
+ processed_count = 0
910
+ skipped_count = 0
911
+
895
912
  for path_key, path_value in paths.items():
896
913
  if path_key.lower().startswith("x-"):
897
914
  logger.debug(f"Skipping processing of path extension '{path_key}'.")
@@ -909,9 +926,17 @@ def process_paths(
909
926
  "patch",
910
927
  "trace",
911
928
  ]:
929
+ # Apply filter configuration
930
+ if not should_process_operation(path_key, method, filter_config):
931
+ logger.debug(f"Skipping operation '{method.upper()} {path_key}' due to filter configuration.")
932
+ skipped_count += 1
933
+ continue
934
+
935
+ logger.info(f"Processing operation: {method.upper()} {path_key}")
912
936
  process_operation(
913
937
  operation_value, path_key, method, llm_model, enhance_all, summaries_only, operation_ids_only
914
938
  )
939
+ processed_count += 1
915
940
  elif method.lower().startswith("x-"):
916
941
  logger.debug(f"Skipping processing of method extension '{method.lower()}' in path '{path_key}'.")
917
942
  continue
@@ -928,6 +953,11 @@ def process_paths(
928
953
  elif path_value is not None:
929
954
  logger.warning(f"Path value for '{path_key}' is not a dictionary. Skipping processing.")
930
955
 
956
+ if filter_config is not None:
957
+ logger.info(
958
+ f"Selective processing complete: {processed_count} operations processed, {skipped_count} operations skipped."
959
+ )
960
+
931
961
 
932
962
  def process_info_section(schema_data: dict, llm_model: str, enhance_all: bool): # New flag
933
963
  info = schema_data.get("info")
@@ -1036,7 +1066,12 @@ def regenerate_duplicate_operation_ids(schema_data: dict, llm_model: str):
1036
1066
 
1037
1067
 
1038
1068
  def preprocess_schema_with_llm(
1039
- schema_data: dict, llm_model: str, enhance_all: bool, summaries_only: bool = False, operation_ids_only: bool = False
1069
+ schema_data: dict,
1070
+ llm_model: str,
1071
+ enhance_all: bool,
1072
+ summaries_only: bool = False,
1073
+ operation_ids_only: bool = False,
1074
+ filter_config: dict[str, str | list[str]] | None = None,
1040
1075
  ):
1041
1076
  """
1042
1077
  Processes the schema to add/enhance descriptions/summaries using an LLM.
@@ -1045,8 +1080,12 @@ def preprocess_schema_with_llm(
1045
1080
  If operation_ids_only is True, only missing operationIds are generated (never overwritten).
1046
1081
  Assumes basic schema structure validation (info, title) has already passed.
1047
1082
  """
1083
+ filter_info = ""
1084
+ if filter_config is not None:
1085
+ filter_info = f" | Selective processing: {len(filter_config)} path specifications"
1086
+
1048
1087
  logger.info(
1049
- f"\n--- Starting LLM Generation (enhance_all={enhance_all}, summaries_only={summaries_only}, operation_ids_only={operation_ids_only}) ---"
1088
+ f"\n--- Starting LLM Generation (enhance_all={enhance_all}, summaries_only={summaries_only}, operation_ids_only={operation_ids_only}){filter_info} ---"
1050
1089
  )
1051
1090
 
1052
1091
  # Only process info section if not operation_ids_only
@@ -1054,7 +1093,7 @@ def preprocess_schema_with_llm(
1054
1093
  process_info_section(schema_data, llm_model, enhance_all)
1055
1094
 
1056
1095
  paths = schema_data.get("paths")
1057
- process_paths(paths, llm_model, enhance_all, summaries_only, operation_ids_only)
1096
+ process_paths(paths, llm_model, enhance_all, summaries_only, operation_ids_only, filter_config)
1058
1097
 
1059
1098
  # After process_paths, regenerate_duplicate_operation_ids(schema_data, llm_model)
1060
1099
 
@@ -1066,10 +1105,24 @@ def run_preprocessing(
1066
1105
  output_path: Path | None = None,
1067
1106
  model: str = "perplexity/sonar",
1068
1107
  debug: bool = False,
1108
+ filter_config_path: str | None = None,
1069
1109
  ):
1070
1110
  set_logging_level("DEBUG" if debug else "INFO")
1071
1111
  console.print("[bold blue]--- Starting OpenAPI Schema Preprocessor ---[/bold blue]")
1072
1112
 
1113
+ # Load filter configuration if provided
1114
+ filter_config = None
1115
+ if filter_config_path:
1116
+ try:
1117
+ filter_config = load_filter_config(filter_config_path)
1118
+ console.print("[bold cyan]Selective Processing Mode Enabled[/bold cyan]")
1119
+ console.print(f"[cyan]Filter configuration loaded from: {filter_config_path}[/cyan]")
1120
+ console.print(f"[cyan]Will process {len(filter_config)} path specifications[/cyan]")
1121
+ console.print()
1122
+ except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
1123
+ console.print(f"[red]Error loading filter configuration: {e}[/red]")
1124
+ raise typer.Exit(1) from e
1125
+
1073
1126
  if schema_path is None:
1074
1127
  path_str = typer.prompt(
1075
1128
  "Please enter the path to the OpenAPI schema file (JSON or YAML)",
@@ -1090,7 +1143,7 @@ def run_preprocessing(
1090
1143
 
1091
1144
  # --- Step 2: Scan and Report Status ---
1092
1145
  try:
1093
- scan_report = scan_schema_for_status(schema_data)
1146
+ scan_report = scan_schema_for_status(schema_data, filter_config)
1094
1147
  report_scan_results(scan_report)
1095
1148
  except Exception as e:
1096
1149
  console.print(f"[red]An unexpected error occurred during schema scanning: {e}[/red]")
@@ -1224,7 +1277,9 @@ def run_preprocessing(
1224
1277
  f"[blue]Starting LLM generation with Enhance All: {enhance_all}, Summaries Only: {summaries_only}, OperationIds Only: {operation_ids_only}[/blue]"
1225
1278
  )
1226
1279
  try:
1227
- preprocess_schema_with_llm(schema_data, model, enhance_all, summaries_only, operation_ids_only)
1280
+ preprocess_schema_with_llm(
1281
+ schema_data, model, enhance_all, summaries_only, operation_ids_only, filter_config
1282
+ )
1228
1283
  console.print("[green]LLM generation complete.[/green]")
1229
1284
  except Exception as e:
1230
1285
  console.print(f"[red]Error during LLM generation: {e}[/red]")