universal-mcp 0.1.15rc7__py3-none-any.whl → 0.1.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/applications/__init__.py +20 -27
- universal_mcp/applications/application.py +19 -55
- universal_mcp/cli.py +10 -29
- universal_mcp/config.py +16 -48
- universal_mcp/integrations/__init__.py +1 -3
- universal_mcp/logger.py +31 -29
- universal_mcp/servers/server.py +6 -18
- universal_mcp/tools/func_metadata.py +5 -19
- universal_mcp/tools/manager.py +5 -15
- universal_mcp/tools/tools.py +4 -11
- universal_mcp/utils/agentr.py +2 -6
- universal_mcp/utils/common.py +1 -1
- universal_mcp/utils/docstring_parser.py +4 -13
- universal_mcp/utils/installation.py +67 -184
- universal_mcp/utils/openapi/api_generator.py +1 -3
- universal_mcp/utils/openapi/docgen.py +17 -54
- universal_mcp/utils/openapi/openapi.py +62 -110
- universal_mcp/utils/openapi/preprocessor.py +60 -190
- universal_mcp/utils/openapi/readme.py +3 -9
- universal_mcp-0.1.16.dist-info/METADATA +282 -0
- universal_mcp-0.1.16.dist-info/RECORD +44 -0
- universal_mcp-0.1.15rc7.dist-info/METADATA +0 -247
- universal_mcp-0.1.15rc7.dist-info/RECORD +0 -44
- {universal_mcp-0.1.15rc7.dist-info → universal_mcp-0.1.16.dist-info}/WHEEL +0 -0
- {universal_mcp-0.1.15rc7.dist-info → universal_mcp-0.1.16.dist-info}/entry_points.txt +0 -0
- {universal_mcp-0.1.15rc7.dist-info → universal_mcp-0.1.16.dist-info}/licenses/LICENSE +0 -0
@@ -94,9 +94,7 @@ def read_schema_file(schema_path: str) -> dict:
|
|
94
94
|
# Keep this function as is
|
95
95
|
logger.info(f"Attempting to read schema file: {schema_path}")
|
96
96
|
if not os.path.exists(schema_path):
|
97
|
-
logger.critical(
|
98
|
-
f"Schema file not found at: {schema_path}"
|
99
|
-
) # Use critical for pre-processing essential step
|
97
|
+
logger.critical(f"Schema file not found at: {schema_path}") # Use critical for pre-processing essential step
|
100
98
|
raise FileNotFoundError(f"Schema file not found at: {schema_path}")
|
101
99
|
|
102
100
|
try:
|
@@ -132,9 +130,7 @@ def read_schema_file(schema_path: str) -> dict:
|
|
132
130
|
logger.critical(f"Error reading schema file {schema_path}: {e}")
|
133
131
|
raise
|
134
132
|
except Exception as e:
|
135
|
-
logger.critical(
|
136
|
-
f"An unexpected error occurred while reading {schema_path}: {e}"
|
137
|
-
)
|
133
|
+
logger.critical(f"An unexpected error occurred while reading {schema_path}: {e}")
|
138
134
|
traceback.print_exc(file=sys.stderr)
|
139
135
|
raise
|
140
136
|
|
@@ -154,9 +150,7 @@ def write_schema_file(schema_data: dict, output_path: str):
|
|
154
150
|
|
155
151
|
if file_extension == ".json":
|
156
152
|
json.dump(schema_data, f, indent=2, ensure_ascii=False)
|
157
|
-
logger.info(
|
158
|
-
f"Successfully wrote processed schema as JSON to {output_path}"
|
159
|
-
)
|
153
|
+
logger.info(f"Successfully wrote processed schema as JSON to {output_path}")
|
160
154
|
elif file_extension in [".yaml", ".yml"]:
|
161
155
|
yaml.dump(
|
162
156
|
schema_data,
|
@@ -165,24 +159,16 @@ def write_schema_file(schema_data: dict, output_path: str):
|
|
165
159
|
allow_unicode=True,
|
166
160
|
sort_keys=False,
|
167
161
|
)
|
168
|
-
logger.info(
|
169
|
-
f"Successfully wrote processed schema as YAML to {output_path}"
|
170
|
-
)
|
162
|
+
logger.info(f"Successfully wrote processed schema as YAML to {output_path}")
|
171
163
|
else:
|
172
|
-
logger.error(
|
173
|
-
|
174
|
-
)
|
175
|
-
raise ValueError(
|
176
|
-
f"Unsupported output file extension '{file_extension}'. Use .json or .yaml/.yml."
|
177
|
-
)
|
164
|
+
logger.error(f"Unsupported output file extension '{file_extension}' for writing.")
|
165
|
+
raise ValueError(f"Unsupported output file extension '{file_extension}'. Use .json or .yaml/.yml.")
|
178
166
|
|
179
167
|
except OSError as e:
|
180
168
|
logger.critical(f"Error writing schema file {output_path}: {e}")
|
181
169
|
raise
|
182
170
|
except Exception as e:
|
183
|
-
logger.critical(
|
184
|
-
f"An unexpected error occurred while writing {output_path}: {e}"
|
185
|
-
)
|
171
|
+
logger.critical(f"An unexpected error occurred while writing {output_path}: {e}")
|
186
172
|
traceback.print_exc(file=sys.stderr)
|
187
173
|
raise
|
188
174
|
|
@@ -251,15 +237,11 @@ def generate_description_llm(
|
|
251
237
|
fallback_text = f"[LLM could not generate description for API '{api_title}']" # More specific fallback
|
252
238
|
|
253
239
|
else:
|
254
|
-
logger.error(
|
255
|
-
f"Invalid description_type '{description_type}' passed to generate_description_llm."
|
256
|
-
)
|
240
|
+
logger.error(f"Invalid description_type '{description_type}' passed to generate_description_llm.")
|
257
241
|
return "[Invalid description type specified]"
|
258
242
|
|
259
243
|
if not user_prompt:
|
260
|
-
logger.error(
|
261
|
-
f"User prompt was not generated for description_type '{description_type}'."
|
262
|
-
)
|
244
|
+
logger.error(f"User prompt was not generated for description_type '{description_type}'.")
|
263
245
|
return fallback_text
|
264
246
|
|
265
247
|
messages = [
|
@@ -305,12 +287,7 @@ def generate_description_llm(
|
|
305
287
|
# f"{COLORS['YELLOW']}--------------------------------------------{COLORS['ENDC']}\n"
|
306
288
|
# )
|
307
289
|
|
308
|
-
if
|
309
|
-
response
|
310
|
-
and response.choices
|
311
|
-
and response.choices[0]
|
312
|
-
and response.choices[0].message
|
313
|
-
):
|
290
|
+
if response and response.choices and response.choices[0] and response.choices[0].message:
|
314
291
|
response_text = response.choices[0].message.content.strip()
|
315
292
|
|
316
293
|
# Remove potential quotes around the response
|
@@ -360,9 +337,7 @@ def generate_description_llm(
|
|
360
337
|
logger.info(f"Retrying in {retry_delay} seconds...")
|
361
338
|
time.sleep(retry_delay)
|
362
339
|
else:
|
363
|
-
logger.error(
|
364
|
-
f"Max retries ({max_retries}) reached for type '{description_type}'."
|
365
|
-
)
|
340
|
+
logger.error(f"Max retries ({max_retries}) reached for type '{description_type}'.")
|
366
341
|
break # Exit retry loop
|
367
342
|
|
368
343
|
# Restore original logging level
|
@@ -413,21 +388,15 @@ def simplify_operation_context(operation_value: dict) -> dict:
|
|
413
388
|
simplified_request_body = {}
|
414
389
|
if "required" in original_request_body:
|
415
390
|
simplified_request_body["required"] = original_request_body["required"]
|
416
|
-
if "content" in original_request_body and isinstance(
|
417
|
-
original_request_body["content"]
|
418
|
-
):
|
419
|
-
simplified_request_body["content_types"] = list(
|
420
|
-
original_request_body["content"].keys()
|
421
|
-
)
|
391
|
+
if "content" in original_request_body and isinstance(original_request_body["content"], dict):
|
392
|
+
simplified_request_body["content_types"] = list(original_request_body["content"].keys())
|
422
393
|
if simplified_request_body:
|
423
394
|
simplified_context["requestBody"] = simplified_request_body
|
424
395
|
|
425
396
|
# Include security if present (simplified)
|
426
397
|
original_security = operation_value.get("security")
|
427
398
|
if isinstance(original_security, list) and original_security:
|
428
|
-
simplified_context["security"] = (
|
429
|
-
original_security # List of security requirement objects (usually small)
|
430
|
-
)
|
399
|
+
simplified_context["security"] = original_security # List of security requirement objects (usually small)
|
431
400
|
|
432
401
|
return simplified_context
|
433
402
|
|
@@ -483,9 +452,7 @@ def scan_schema_for_status(schema_data: dict):
|
|
483
452
|
|
484
453
|
info_title = info.get("title")
|
485
454
|
if not isinstance(info_title, str) or not info_title.strip():
|
486
|
-
error_msg =
|
487
|
-
f"Critical: Required field '{info_location}.title' is missing or empty."
|
488
|
-
)
|
455
|
+
error_msg = f"Critical: Required field '{info_location}.title' is missing or empty."
|
489
456
|
logger.critical(error_msg)
|
490
457
|
scan_report["critical_errors"].append(error_msg)
|
491
458
|
# Cannot proceed meaningfully without title
|
@@ -503,9 +470,7 @@ def scan_schema_for_status(schema_data: dict):
|
|
503
470
|
# --- Check Paths ---
|
504
471
|
paths = schema_data.get("paths")
|
505
472
|
if not isinstance(paths, dict):
|
506
|
-
if
|
507
|
-
paths is not None
|
508
|
-
): # Allow None if schema is empty, but warn if it's wrong type
|
473
|
+
if paths is not None: # Allow None if schema is empty, but warn if it's wrong type
|
509
474
|
logger.warning("'paths' field is not a dictionary. Skipping path scanning.")
|
510
475
|
else:
|
511
476
|
logger.info("'paths' field is missing or null. No operations to scan.")
|
@@ -517,9 +482,7 @@ def scan_schema_for_status(schema_data: dict):
|
|
517
482
|
continue
|
518
483
|
|
519
484
|
if not isinstance(path_value, dict):
|
520
|
-
logger.warning(
|
521
|
-
f"Path value for '{path_key}' is not a dictionary. Skipping scanning for this path."
|
522
|
-
)
|
485
|
+
logger.warning(f"Path value for '{path_key}' is not a dictionary. Skipping scanning for this path.")
|
523
486
|
continue
|
524
487
|
|
525
488
|
for method, operation_value in path_value.items():
|
@@ -535,9 +498,7 @@ def scan_schema_for_status(schema_data: dict):
|
|
535
498
|
]:
|
536
499
|
operation_location_base = f"paths.{path_key}.{method.lower()}"
|
537
500
|
if not isinstance(operation_value, dict):
|
538
|
-
logger.warning(
|
539
|
-
f"Operation value for '{operation_location_base}' is not a dictionary. Skipping."
|
540
|
-
)
|
501
|
+
logger.warning(f"Operation value for '{operation_location_base}' is not a dictionary. Skipping.")
|
541
502
|
continue
|
542
503
|
|
543
504
|
# Check Operation Summary
|
@@ -569,30 +530,20 @@ def scan_schema_for_status(schema_data: dict):
|
|
569
530
|
param_name = parameter.get("name")
|
570
531
|
param_in = parameter.get("in")
|
571
532
|
param_location_id = (
|
572
|
-
param_name
|
573
|
-
if isinstance(param_name, str) and param_name.strip()
|
574
|
-
else f"index {i}"
|
575
|
-
)
|
576
|
-
param_location_base = (
|
577
|
-
f"{operation_location_base}.parameters[{param_location_id}]"
|
533
|
+
param_name if isinstance(param_name, str) and param_name.strip() else f"index {i}"
|
578
534
|
)
|
535
|
+
param_location_base = f"{operation_location_base}.parameters[{param_location_id}]"
|
579
536
|
|
580
537
|
# Check Parameter 'name' and 'in'
|
581
538
|
if not isinstance(param_name, str) or not param_name.strip():
|
582
539
|
error_msg = f"Missing/empty 'name' field for parameter at {param_location_base}. Cannot generate description."
|
583
|
-
logger.warning(
|
584
|
-
|
585
|
-
) # Use warning as it might be fixable manually
|
586
|
-
scan_report["parameters_missing_name"].append(
|
587
|
-
param_location_base
|
588
|
-
)
|
540
|
+
logger.warning(error_msg) # Use warning as it might be fixable manually
|
541
|
+
scan_report["parameters_missing_name"].append(param_location_base)
|
589
542
|
|
590
543
|
if not isinstance(param_in, str) or not param_in.strip():
|
591
544
|
error_msg = f"Missing/empty 'in' field for parameter '{param_name}' at {param_location_base}. Cannot generate description."
|
592
545
|
logger.warning(error_msg) # Use warning
|
593
|
-
scan_report["parameters_missing_in"].append(
|
594
|
-
param_location_base
|
595
|
-
)
|
546
|
+
scan_report["parameters_missing_in"].append(param_location_base)
|
596
547
|
|
597
548
|
# Check Parameter Description (only if name/in are present for meaningful description)
|
598
549
|
if (
|
@@ -602,14 +553,9 @@ def scan_schema_for_status(schema_data: dict):
|
|
602
553
|
and param_in.strip()
|
603
554
|
):
|
604
555
|
param_description = parameter.get("description")
|
605
|
-
if (
|
606
|
-
isinstance(param_description, str)
|
607
|
-
and param_description.strip()
|
608
|
-
):
|
556
|
+
if isinstance(param_description, str) and param_description.strip():
|
609
557
|
if is_fallback_text(param_description):
|
610
|
-
scan_report["parameter_description"][
|
611
|
-
"fallback"
|
612
|
-
] += 1
|
558
|
+
scan_report["parameter_description"]["fallback"] += 1
|
613
559
|
else:
|
614
560
|
scan_report["parameter_description"]["present"] += 1
|
615
561
|
else:
|
@@ -625,23 +571,15 @@ def scan_schema_for_status(schema_data: dict):
|
|
625
571
|
)
|
626
572
|
|
627
573
|
elif method.lower().startswith("x-"):
|
628
|
-
logger.debug(
|
629
|
-
f"Skipping scanning of method extension '{method.lower()}' in path '{path_key}'."
|
630
|
-
)
|
574
|
+
logger.debug(f"Skipping scanning of method extension '{method.lower()}' in path '{path_key}'.")
|
631
575
|
continue
|
632
576
|
elif method.lower() == "parameters": # Path level parameters
|
633
|
-
logger.debug(
|
634
|
-
f"Skipping scanning of path-level parameters in '{path_key}'."
|
635
|
-
)
|
577
|
+
logger.debug(f"Skipping scanning of path-level parameters in '{path_key}'.")
|
636
578
|
continue
|
637
579
|
elif operation_value is not None:
|
638
|
-
logger.warning(
|
639
|
-
f"Unknown method '{method}' found in path '{path_key}'. Skipping scanning."
|
640
|
-
)
|
580
|
+
logger.warning(f"Unknown method '{method}' found in path '{path_key}'. Skipping scanning.")
|
641
581
|
elif operation_value is None:
|
642
|
-
logger.debug(
|
643
|
-
f"Operation value for method '{method}' in path '{path_key}' is null. Skipping scanning."
|
644
|
-
)
|
582
|
+
logger.debug(f"Operation value for method '{method}' in path '{path_key}' is null. Skipping scanning.")
|
645
583
|
|
646
584
|
logger.info("--- Scan Complete ---")
|
647
585
|
return scan_report
|
@@ -649,11 +587,7 @@ def scan_schema_for_status(schema_data: dict):
|
|
649
587
|
|
650
588
|
def report_scan_results(scan_report: dict):
|
651
589
|
"""Prints a formatted summary of the scan results."""
|
652
|
-
console = (
|
653
|
-
logging.getLogger().handlers[0].console
|
654
|
-
if hasattr(logging.getLogger().handlers[0], "console")
|
655
|
-
else None
|
656
|
-
)
|
590
|
+
console = logging.getLogger().handlers[0].console if hasattr(logging.getLogger().handlers[0], "console") else None
|
657
591
|
if console is None: # Fallback if rich console isn't attached to logger
|
658
592
|
from rich.console import Console
|
659
593
|
|
@@ -665,9 +599,7 @@ def report_scan_results(scan_report: dict):
|
|
665
599
|
console.print("[bold red]CRITICAL ERRORS FOUND:[/bold red]")
|
666
600
|
for error in scan_report["critical_errors"]:
|
667
601
|
console.print(f" [red]❌[/red] {error}")
|
668
|
-
console.print(
|
669
|
-
"[bold red]Critical errors prevent automatic generation. Please fix these manually.[/bold red]"
|
670
|
-
)
|
602
|
+
console.print("[bold red]Critical errors prevent automatic generation. Please fix these manually.[/bold red]")
|
671
603
|
return # Stop here if critical errors exist
|
672
604
|
|
673
605
|
console.print("[bold yellow]Description/Summary Status:[/bold yellow]")
|
@@ -694,9 +626,7 @@ def report_scan_results(scan_report: dict):
|
|
694
626
|
missing_in = scan_report.get("parameters_missing_in", [])
|
695
627
|
|
696
628
|
if missing_name or missing_in:
|
697
|
-
console.print(
|
698
|
-
"\n[bold red]Parameter Issues Preventing LLM Generation:[/bold red]"
|
699
|
-
)
|
629
|
+
console.print("\n[bold red]Parameter Issues Preventing LLM Generation:[/bold red]")
|
700
630
|
console.print(
|
701
631
|
"[yellow]Parameters below cannot have descriptions generated by LLM until 'name' and 'in' fields are fixed manually.[/yellow]"
|
702
632
|
)
|
@@ -723,9 +653,7 @@ def report_scan_results(scan_report: dict):
|
|
723
653
|
f"\n[bold]Total items missing or needing enhancement:[/bold] [orange1]{total_missing_or_fallback}[/orange1]"
|
724
654
|
)
|
725
655
|
else:
|
726
|
-
console.print(
|
727
|
-
"\n[bold green]Scan found no missing or fallback descriptions/summaries.[/bold green]"
|
728
|
-
)
|
656
|
+
console.print("\n[bold green]Scan found no missing or fallback descriptions/summaries.[/bold green]")
|
729
657
|
|
730
658
|
console.print("[bold blue]-------------------------[/bold blue]")
|
731
659
|
|
@@ -739,9 +667,7 @@ def process_parameter(
|
|
739
667
|
enhance_all: bool, # New flag
|
740
668
|
):
|
741
669
|
if not isinstance(parameter, dict):
|
742
|
-
logger.warning(
|
743
|
-
f"Invalid parameter object found in {operation_location_base}. Expected dictionary."
|
744
|
-
)
|
670
|
+
logger.warning(f"Invalid parameter object found in {operation_location_base}. Expected dictionary.")
|
745
671
|
return
|
746
672
|
|
747
673
|
if "$ref" in parameter:
|
@@ -762,9 +688,7 @@ def process_parameter(
|
|
762
688
|
elif isinstance(param_in, str) and param_in.strip():
|
763
689
|
param_location_id = f"{param_in.strip()}:[name missing]"
|
764
690
|
|
765
|
-
parameter_location_base =
|
766
|
-
f"{operation_location_base}.parameters[{param_location_id}]"
|
767
|
-
)
|
691
|
+
parameter_location_base = f"{operation_location_base}.parameters[{param_location_id}]"
|
768
692
|
|
769
693
|
# Crucial check: Cannot generate description without name/in
|
770
694
|
if (
|
@@ -784,15 +708,11 @@ def process_parameter(
|
|
784
708
|
enhance_all # Generate if enhancing all
|
785
709
|
or not isinstance(param_description, str) # Generate if missing
|
786
710
|
or not param_description.strip() # Generate if empty
|
787
|
-
or is_fallback_text(
|
788
|
-
param_description
|
789
|
-
) # Generate if it's previous fallback text
|
711
|
+
or is_fallback_text(param_description) # Generate if it's previous fallback text
|
790
712
|
)
|
791
713
|
|
792
714
|
if needs_generation:
|
793
|
-
logger.info(
|
794
|
-
f"Generating description for parameter '{param_name}' at {parameter_location_base}."
|
795
|
-
)
|
715
|
+
logger.info(f"Generating description for parameter '{param_name}' at {parameter_location_base}.")
|
796
716
|
|
797
717
|
simplified_context = simplify_parameter_context(parameter)
|
798
718
|
|
@@ -808,9 +728,7 @@ def process_parameter(
|
|
808
728
|
},
|
809
729
|
)
|
810
730
|
parameter["description"] = generated_description
|
811
|
-
logger.debug(
|
812
|
-
f"Inserted description for parameter '{param_name}' at {parameter_location_base}."
|
813
|
-
)
|
731
|
+
logger.debug(f"Inserted description for parameter '{param_name}' at {parameter_location_base}.")
|
814
732
|
else:
|
815
733
|
logger.debug(
|
816
734
|
f"Existing 'description' found for parameter '{param_name}' at {parameter_location_base}. Skipping generation."
|
@@ -818,16 +736,10 @@ def process_parameter(
|
|
818
736
|
|
819
737
|
# --- Remove URLs from the parameter description ---
|
820
738
|
current_description = parameter.get("description", "")
|
821
|
-
if (
|
822
|
-
isinstance(current_description, str)
|
823
|
-
and current_description
|
824
|
-
and not is_fallback_text(current_description)
|
825
|
-
):
|
739
|
+
if isinstance(current_description, str) and current_description and not is_fallback_text(current_description):
|
826
740
|
url_pattern = r"https?://[\S]+"
|
827
741
|
modified_description = re.sub(url_pattern, "", current_description).strip()
|
828
|
-
modified_description = re.sub(
|
829
|
-
r"\s{2,}", " ", modified_description
|
830
|
-
).strip() # Collapse multiple spaces
|
742
|
+
modified_description = re.sub(r"\s{2,}", " ", modified_description).strip() # Collapse multiple spaces
|
831
743
|
|
832
744
|
if modified_description != current_description:
|
833
745
|
parameter["description"] = modified_description
|
@@ -856,9 +768,7 @@ def process_operation(
|
|
856
768
|
operation_location_base = f"paths.{path_key}.{method.lower()}"
|
857
769
|
|
858
770
|
if not isinstance(operation_value, dict):
|
859
|
-
logger.warning(
|
860
|
-
f"Operation value for '{operation_location_base}' is not a dictionary. Skipping processing."
|
861
|
-
)
|
771
|
+
logger.warning(f"Operation value for '{operation_location_base}' is not a dictionary. Skipping processing.")
|
862
772
|
return
|
863
773
|
|
864
774
|
if method.lower().startswith("x-"):
|
@@ -892,9 +802,7 @@ def process_operation(
|
|
892
802
|
operation_value["summary"] = generated_summary
|
893
803
|
logger.debug(f"Inserted summary for '{operation_location_base}'.")
|
894
804
|
else:
|
895
|
-
logger.debug(
|
896
|
-
f"Existing summary found for '{operation_location_base}'. Skipping generation."
|
897
|
-
)
|
805
|
+
logger.debug(f"Existing summary found for '{operation_location_base}'. Skipping generation.")
|
898
806
|
|
899
807
|
# Validate final summary length (after potential generation)
|
900
808
|
final_summary = operation_value.get("summary", "")
|
@@ -945,37 +853,25 @@ def process_paths(paths: dict, llm_model: str, enhance_all: bool): # New flag
|
|
945
853
|
"patch",
|
946
854
|
"trace",
|
947
855
|
]:
|
948
|
-
process_operation(
|
949
|
-
operation_value, path_key, method, llm_model, enhance_all
|
950
|
-
) # Pass enhance_all
|
856
|
+
process_operation(operation_value, path_key, method, llm_model, enhance_all) # Pass enhance_all
|
951
857
|
elif method.lower().startswith("x-"):
|
952
|
-
logger.debug(
|
953
|
-
f"Skipping processing of method extension '{method.lower()}' in path '{path_key}'."
|
954
|
-
)
|
858
|
+
logger.debug(f"Skipping processing of method extension '{method.lower()}' in path '{path_key}'.")
|
955
859
|
continue
|
956
860
|
elif method.lower() == "parameters":
|
957
|
-
logger.debug(
|
958
|
-
f"Skipping processing of path-level parameters in '{path_key}'."
|
959
|
-
)
|
861
|
+
logger.debug(f"Skipping processing of path-level parameters in '{path_key}'.")
|
960
862
|
continue
|
961
863
|
elif operation_value is not None:
|
962
|
-
logger.warning(
|
963
|
-
f"Unknown method '{method}' found in path '{path_key}'. Skipping processing."
|
964
|
-
)
|
864
|
+
logger.warning(f"Unknown method '{method}' found in path '{path_key}'. Skipping processing.")
|
965
865
|
elif operation_value is None:
|
966
866
|
logger.debug(
|
967
867
|
f"Operation value for method '{method}' in path '{path_key}' is null. Skipping processing."
|
968
868
|
)
|
969
869
|
|
970
870
|
elif path_value is not None:
|
971
|
-
logger.warning(
|
972
|
-
f"Path value for '{path_key}' is not a dictionary. Skipping processing."
|
973
|
-
)
|
871
|
+
logger.warning(f"Path value for '{path_key}' is not a dictionary. Skipping processing.")
|
974
872
|
|
975
873
|
|
976
|
-
def process_info_section(
|
977
|
-
schema_data: dict, llm_model: str, enhance_all: bool
|
978
|
-
): # New flag
|
874
|
+
def process_info_section(schema_data: dict, llm_model: str, enhance_all: bool): # New flag
|
979
875
|
info = schema_data.get("info")
|
980
876
|
info_location = "info"
|
981
877
|
|
@@ -1020,9 +916,7 @@ def process_info_section(
|
|
1020
916
|
)
|
1021
917
|
|
1022
918
|
|
1023
|
-
def preprocess_schema_with_llm(
|
1024
|
-
schema_data: dict, llm_model: str, enhance_all: bool
|
1025
|
-
): # New flag
|
919
|
+
def preprocess_schema_with_llm(schema_data: dict, llm_model: str, enhance_all: bool): # New flag
|
1026
920
|
"""
|
1027
921
|
Processes the schema to add/enhance descriptions/summaries using an LLM.
|
1028
922
|
Decides whether to generate based on the 'enhance_all' flag and existing content.
|
@@ -1062,9 +956,7 @@ def run_preprocessing(
|
|
1062
956
|
except (FileNotFoundError, yaml.YAMLError, json.JSONDecodeError, OSError) as e:
|
1063
957
|
raise typer.Exit(1) from e
|
1064
958
|
except Exception as e:
|
1065
|
-
console.print(
|
1066
|
-
f"[red]An unexpected error occurred while reading schema: {e}[/red]"
|
1067
|
-
)
|
959
|
+
console.print(f"[red]An unexpected error occurred while reading schema: {e}[/red]")
|
1068
960
|
raise typer.Exit(1) from e
|
1069
961
|
|
1070
962
|
# --- Step 2: Scan and Report Status ---
|
@@ -1072,9 +964,7 @@ def run_preprocessing(
|
|
1072
964
|
scan_report = scan_schema_for_status(schema_data)
|
1073
965
|
report_scan_results(scan_report)
|
1074
966
|
except Exception as e:
|
1075
|
-
console.print(
|
1076
|
-
f"[red]An unexpected error occurred during schema scanning: {e}[/red]"
|
1077
|
-
)
|
967
|
+
console.print(f"[red]An unexpected error occurred during schema scanning: {e}[/red]")
|
1078
968
|
raise typer.Exit(1) from e
|
1079
969
|
|
1080
970
|
# --- Step 3: Check for Critical Errors ---
|
@@ -1130,13 +1020,9 @@ def run_preprocessing(
|
|
1130
1020
|
"[bold yellow]These parameters require manual fixing and cannot be generated by the LLM.[/bold yellow]"
|
1131
1021
|
)
|
1132
1022
|
else:
|
1133
|
-
console.print(
|
1134
|
-
"[bold green]Scan found no missing or fallback descriptions/summaries.[/bold green]"
|
1135
|
-
)
|
1023
|
+
console.print("[bold green]Scan found no missing or fallback descriptions/summaries.[/bold green]")
|
1136
1024
|
|
1137
|
-
console.print(
|
1138
|
-
"[bold blue]You can choose to enhance all existing descriptions or exit.[/bold blue]"
|
1139
|
-
)
|
1025
|
+
console.print("[bold blue]You can choose to enhance all existing descriptions or exit.[/bold blue]")
|
1140
1026
|
|
1141
1027
|
prompt_options = [
|
1142
1028
|
" [2] Generate/Enhance [bold]all[/bold] descriptions/summaries",
|
@@ -1149,14 +1035,10 @@ def run_preprocessing(
|
|
1149
1035
|
console.print(option_text)
|
1150
1036
|
|
1151
1037
|
while True:
|
1152
|
-
choice = typer.prompt(
|
1153
|
-
"Enter choice", default=default_choice, show_default=False, type=str
|
1154
|
-
).strip()
|
1038
|
+
choice = typer.prompt("Enter choice", default=default_choice, show_default=False, type=str).strip()
|
1155
1039
|
|
1156
1040
|
if choice not in valid_choices:
|
1157
|
-
console.print(
|
1158
|
-
"[red]Invalid choice. Please select from the options above.[/red]"
|
1159
|
-
)
|
1041
|
+
console.print("[red]Invalid choice. Please select from the options above.[/red]")
|
1160
1042
|
continue # Ask again
|
1161
1043
|
|
1162
1044
|
if choice == "3":
|
@@ -1170,17 +1052,11 @@ def run_preprocessing(
|
|
1170
1052
|
break # Exit prompt loop
|
1171
1053
|
|
1172
1054
|
perform_generation = False
|
1173
|
-
if enhance_all:
|
1174
|
-
perform_generation = True
|
1175
|
-
elif (
|
1176
|
-
choice == "1" and total_missing_or_fallback > 0
|
1177
|
-
): # Chosen option 1 AND there was something missing
|
1055
|
+
if enhance_all or choice == "1" and total_missing_or_fallback > 0:
|
1178
1056
|
perform_generation = True
|
1179
1057
|
|
1180
1058
|
if perform_generation:
|
1181
|
-
console.print(
|
1182
|
-
f"[blue]Starting LLM generation with Enhance All: {enhance_all}[/blue]"
|
1183
|
-
)
|
1059
|
+
console.print(f"[blue]Starting LLM generation with Enhance All: {enhance_all}[/blue]")
|
1184
1060
|
try:
|
1185
1061
|
preprocess_schema_with_llm(schema_data, model, enhance_all)
|
1186
1062
|
console.print("[green]LLM generation complete.[/green]")
|
@@ -1199,9 +1075,7 @@ def run_preprocessing(
|
|
1199
1075
|
if output_path is None:
|
1200
1076
|
base, ext = os.path.splitext(schema_path)
|
1201
1077
|
output_path = Path(f"{base}_processed{ext}")
|
1202
|
-
console.print(
|
1203
|
-
f"[blue]No output path specified. Defaulting to: {output_path}[/blue]"
|
1204
|
-
)
|
1078
|
+
console.print(f"[blue]No output path specified. Defaulting to: {output_path}[/blue]")
|
1205
1079
|
else:
|
1206
1080
|
console.print(f"[blue]Saving processed schema to: {output_path}[/blue]")
|
1207
1081
|
|
@@ -1211,13 +1085,9 @@ def run_preprocessing(
|
|
1211
1085
|
# write_schema_file logs critical errors, just exit here
|
1212
1086
|
raise typer.Exit(1) from e
|
1213
1087
|
except Exception as e:
|
1214
|
-
console.print(
|
1215
|
-
f"[red]An unexpected error occurred while writing the schema: {e}[/red]"
|
1216
|
-
)
|
1088
|
+
console.print(f"[red]An unexpected error occurred while writing the schema: {e}[/red]")
|
1217
1089
|
raise typer.Exit(1) from e
|
1218
1090
|
|
1219
|
-
console.print(
|
1220
|
-
"\n[bold green]--- Schema Processing and Saving Complete ---[/bold green]"
|
1221
|
-
)
|
1091
|
+
console.print("\n[bold green]--- Schema Processing and Saving Complete ---[/bold green]")
|
1222
1092
|
console.print(f"Processed schema saved to: [blue]{output_path}[/blue]")
|
1223
1093
|
console.print("[bold blue]Preprocessor finished successfully.[/bold blue]")
|
@@ -18,17 +18,13 @@ def _import_class(module_path: str, class_name: str):
|
|
18
18
|
return getattr(module, class_name)
|
19
19
|
except AttributeError as e:
|
20
20
|
logger.error(f"Class '{class_name}' not found in module '{module_path}'")
|
21
|
-
raise ModuleNotFoundError(
|
22
|
-
f"Class '{class_name}' not found in module '{module_path}'"
|
23
|
-
) from e
|
21
|
+
raise ModuleNotFoundError(f"Class '{class_name}' not found in module '{module_path}'") from e
|
24
22
|
|
25
23
|
|
26
24
|
def _get_single_class_name(file_path: Path) -> str:
|
27
25
|
with open(file_path) as file:
|
28
26
|
tree = ast.parse(file.read(), filename=str(file_path))
|
29
|
-
class_defs = [
|
30
|
-
node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)
|
31
|
-
]
|
27
|
+
class_defs = [node.name for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
32
28
|
if len(class_defs) == 1:
|
33
29
|
logger.info(f"Auto-detected class: {class_defs[0]}")
|
34
30
|
return class_defs[0]
|
@@ -56,9 +52,7 @@ def generate_readme(app: Path) -> Path:
|
|
56
52
|
raise FileNotFoundError(f"Template directory not found: {template_dir}")
|
57
53
|
|
58
54
|
try:
|
59
|
-
env = Environment(
|
60
|
-
loader=FileSystemLoader(template_dir), autoescape=select_autoescape()
|
61
|
-
)
|
55
|
+
env = Environment(loader=FileSystemLoader(template_dir), autoescape=select_autoescape())
|
62
56
|
template = env.get_template("README.md.j2")
|
63
57
|
except Exception as e:
|
64
58
|
logger.error(f"Error loading template: {e}")
|