universal-mcp 0.1.8rc3__py3-none-any.whl → 0.1.9rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/applications/__init__.py +7 -2
- universal_mcp/applications/ahrefs/README.md +76 -0
- universal_mcp/applications/ahrefs/__init__.py +0 -0
- universal_mcp/applications/ahrefs/app.py +2291 -0
- universal_mcp/applications/application.py +202 -87
- universal_mcp/applications/cal_com_v2/README.md +175 -0
- universal_mcp/applications/cal_com_v2/__init__.py +0 -0
- universal_mcp/applications/cal_com_v2/app.py +4735 -0
- universal_mcp/applications/calendly/app.py +0 -12
- universal_mcp/applications/clickup/README.md +160 -0
- universal_mcp/applications/clickup/__init__.py +0 -0
- universal_mcp/applications/clickup/app.py +4359 -0
- universal_mcp/applications/coda/app.py +0 -33
- universal_mcp/applications/e2b/app.py +2 -28
- universal_mcp/applications/figma/README.md +74 -0
- universal_mcp/applications/figma/__init__.py +0 -0
- universal_mcp/applications/figma/app.py +1261 -0
- universal_mcp/applications/firecrawl/app.py +2 -32
- universal_mcp/applications/google_calendar/app.py +0 -11
- universal_mcp/applications/google_docs/app.py +0 -18
- universal_mcp/applications/google_drive/app.py +0 -17
- universal_mcp/applications/google_mail/app.py +0 -16
- universal_mcp/applications/google_sheet/app.py +0 -18
- universal_mcp/applications/hashnode/app.py +77 -0
- universal_mcp/applications/hashnode/prompt.md +21 -0
- universal_mcp/applications/mailchimp/README.md +306 -0
- universal_mcp/applications/mailchimp/__init__.py +0 -0
- universal_mcp/applications/mailchimp/app.py +8883 -0
- universal_mcp/applications/markitdown/app.py +2 -2
- universal_mcp/applications/perplexity/app.py +0 -35
- universal_mcp/applications/replicate/README.md +53 -0
- universal_mcp/applications/replicate/app.py +969 -0
- universal_mcp/applications/resend/app.py +0 -18
- universal_mcp/applications/retell_ai/README.md +46 -0
- universal_mcp/applications/retell_ai/__init__.py +0 -0
- universal_mcp/applications/retell_ai/app.py +316 -0
- universal_mcp/applications/rocketlane/README.md +42 -0
- universal_mcp/applications/rocketlane/__init__.py +0 -0
- universal_mcp/applications/rocketlane/app.py +180 -0
- universal_mcp/applications/serpapi/app.py +2 -28
- universal_mcp/applications/spotify/README.md +116 -0
- universal_mcp/applications/spotify/__init__.py +0 -0
- universal_mcp/applications/spotify/app.py +2231 -0
- universal_mcp/applications/supabase/README.md +112 -0
- universal_mcp/applications/supabase/__init__.py +0 -0
- universal_mcp/applications/supabase/app.py +2644 -0
- universal_mcp/applications/tavily/app.py +0 -20
- universal_mcp/applications/wrike/app.py +0 -12
- universal_mcp/applications/youtube/app.py +0 -18
- universal_mcp/integrations/agentr.py +27 -4
- universal_mcp/integrations/integration.py +14 -6
- universal_mcp/servers/server.py +3 -6
- universal_mcp/stores/store.py +7 -0
- universal_mcp/tools/tools.py +2 -2
- universal_mcp/utils/docstring_parser.py +171 -104
- universal_mcp/utils/installation.py +199 -8
- {universal_mcp-0.1.8rc3.dist-info → universal_mcp-0.1.9rc1.dist-info}/METADATA +2 -1
- universal_mcp-0.1.9rc1.dist-info/RECORD +106 -0
- universal_mcp-0.1.8rc3.dist-info/RECORD +0 -75
- {universal_mcp-0.1.8rc3.dist-info → universal_mcp-0.1.9rc1.dist-info}/WHEEL +0 -0
- {universal_mcp-0.1.8rc3.dist-info → universal_mcp-0.1.9rc1.dist-info}/entry_points.txt +0 -0
@@ -7,25 +7,6 @@ class TavilyApp(APIApplication):
|
|
7
7
|
name = "tavily"
|
8
8
|
self.base_url = "https://api.tavily.com"
|
9
9
|
super().__init__(name=name, integration=integration)
|
10
|
-
self.api_key = None
|
11
|
-
|
12
|
-
def _get_headers(self):
|
13
|
-
if not self.api_key:
|
14
|
-
credentials = self.integration.get_credentials()
|
15
|
-
if not credentials:
|
16
|
-
raise ValueError("No credentials found")
|
17
|
-
api_key = (
|
18
|
-
credentials.get("api_key")
|
19
|
-
or credentials.get("API_KEY")
|
20
|
-
or credentials.get("apiKey")
|
21
|
-
)
|
22
|
-
if not api_key:
|
23
|
-
raise ValueError("No API key found")
|
24
|
-
self.api_key = api_key
|
25
|
-
return {
|
26
|
-
"Authorization": f"Bearer {self.api_key}",
|
27
|
-
"Content-Type": "application/json",
|
28
|
-
}
|
29
10
|
|
30
11
|
def search(self, query: str) -> str:
|
31
12
|
"""
|
@@ -44,7 +25,6 @@ class TavilyApp(APIApplication):
|
|
44
25
|
Tags:
|
45
26
|
search, ai, web, query, important, api-client, text-processing
|
46
27
|
"""
|
47
|
-
self.validate()
|
48
28
|
url = f"{self.base_url}/search"
|
49
29
|
payload = {
|
50
30
|
"query": query,
|
@@ -19,18 +19,6 @@ class WrikeApp(APIApplication):
|
|
19
19
|
super().__init__(name="wrike", integration=integration, **kwargs)
|
20
20
|
self.base_url = "https://www.wrike.com/api/v4"
|
21
21
|
|
22
|
-
def _get_headers(self):
|
23
|
-
if not self.integration:
|
24
|
-
raise ValueError("Integration not configured for WrikeApp")
|
25
|
-
credentials = self.integration.get_credentials()
|
26
|
-
|
27
|
-
if "headers" in credentials:
|
28
|
-
return credentials["headers"]
|
29
|
-
return {
|
30
|
-
"Authorization": f"Bearer {credentials['access_token']}",
|
31
|
-
"Content-Type": "application/json",
|
32
|
-
}
|
33
|
-
|
34
22
|
def get_contacts(self, deleted=None, fields=None, metadata=None) -> Any:
|
35
23
|
"""
|
36
24
|
Retrieves a list of contacts from the server, with optional filtering and field selection.
|
@@ -1,9 +1,6 @@
|
|
1
1
|
from typing import Any
|
2
2
|
|
3
|
-
from loguru import logger
|
4
|
-
|
5
3
|
from universal_mcp.applications import APIApplication
|
6
|
-
from universal_mcp.exceptions import NotAuthorizedError
|
7
4
|
from universal_mcp.integrations import Integration
|
8
5
|
|
9
6
|
|
@@ -22,21 +19,6 @@ class YoutubeApp(APIApplication):
|
|
22
19
|
super().__init__(name="youtube", integration=integration, **kwargs)
|
23
20
|
self.base_url = "https://www.googleapis.com/youtube/v3"
|
24
21
|
|
25
|
-
def _get_headers(self):
|
26
|
-
if not self.integration:
|
27
|
-
raise ValueError("Integration not configured for YoutubeApp")
|
28
|
-
credentials = self.integration.get_credentials()
|
29
|
-
if not credentials:
|
30
|
-
logger.warning("No Google credentials found via integration.")
|
31
|
-
action = self.integration.authorize()
|
32
|
-
raise NotAuthorizedError(action)
|
33
|
-
if "headers" in credentials:
|
34
|
-
return credentials["headers"]
|
35
|
-
return {
|
36
|
-
"Authorization": f"Bearer {credentials['access_token']}",
|
37
|
-
"Content-Type": "application/json",
|
38
|
-
}
|
39
|
-
|
40
22
|
def get_jobs_job_reports(
|
41
23
|
self,
|
42
24
|
jobId,
|
@@ -29,7 +29,10 @@ class AgentRIntegration(Integration):
|
|
29
29
|
"API key for AgentR is missing. Please visit https://agentr.dev to create an API key, then set it as AGENTR_API_KEY environment variable."
|
30
30
|
)
|
31
31
|
raise ValueError("AgentR API key required - get one at https://agentr.dev")
|
32
|
-
self.base_url = os.getenv("AGENTR_BASE_URL", "https://api.agentr.dev")
|
32
|
+
self.base_url = os.getenv("AGENTR_BASE_URL", "https://api.agentr.dev").rstrip(
|
33
|
+
"/"
|
34
|
+
)
|
35
|
+
self._credentials = None
|
33
36
|
|
34
37
|
def set_credentials(self, credentials: dict | None = None):
|
35
38
|
"""Set credentials for the integration.
|
@@ -43,9 +46,9 @@ class AgentRIntegration(Integration):
|
|
43
46
|
str: Authorization URL from authorize() method
|
44
47
|
"""
|
45
48
|
return self.authorize()
|
46
|
-
# raise NotImplementedError("AgentR Integration does not support setting credentials. Visit the authorize url to set credentials.")
|
47
49
|
|
48
|
-
|
50
|
+
@property
|
51
|
+
def credentials(self):
|
49
52
|
"""Get credentials for the integration from the AgentR API.
|
50
53
|
|
51
54
|
Makes API request to retrieve stored credentials for this integration.
|
@@ -57,16 +60,36 @@ class AgentRIntegration(Integration):
|
|
57
60
|
NotAuthorizedError: If credentials are not found (404 response)
|
58
61
|
HTTPError: For other API errors
|
59
62
|
"""
|
63
|
+
if self._credentials is not None:
|
64
|
+
return self._credentials
|
60
65
|
response = httpx.get(
|
61
66
|
f"{self.base_url}/api/{self.name}/credentials/",
|
62
67
|
headers={"accept": "application/json", "X-API-KEY": self.api_key},
|
63
68
|
)
|
64
69
|
if response.status_code == 404:
|
70
|
+
logger.warning(
|
71
|
+
f"No credentials found for {self.name}. Requesting authorization..."
|
72
|
+
)
|
65
73
|
action = self.authorize()
|
66
74
|
raise NotAuthorizedError(action)
|
67
75
|
response.raise_for_status()
|
68
76
|
data = response.json()
|
69
|
-
|
77
|
+
self._credentials = data
|
78
|
+
return self._credentials
|
79
|
+
|
80
|
+
def get_credentials(self):
|
81
|
+
"""Get credentials for the integration from the AgentR API.
|
82
|
+
|
83
|
+
Makes API request to retrieve stored credentials for this integration.
|
84
|
+
|
85
|
+
Returns:
|
86
|
+
dict: Credentials data from API response
|
87
|
+
|
88
|
+
Raises:
|
89
|
+
NotAuthorizedError: If credentials are not found (404 response)
|
90
|
+
HTTPError: For other API errors
|
91
|
+
"""
|
92
|
+
return self.credentials
|
70
93
|
|
71
94
|
def authorize(self):
|
72
95
|
"""Get authorization URL for the integration.
|
@@ -91,9 +91,22 @@ class ApiKeyIntegration(Integration):
|
|
91
91
|
"""
|
92
92
|
|
93
93
|
def __init__(self, name: str, store: BaseStore | None = None, **kwargs):
|
94
|
+
self.type = "api_key"
|
94
95
|
sanitized_name = sanitize_api_key_name(name)
|
95
96
|
super().__init__(sanitized_name, store, **kwargs)
|
96
97
|
logger.info(f"Initializing API Key Integration: {name} with store: {store}")
|
98
|
+
self._api_key: str | None = None
|
99
|
+
|
100
|
+
@property
|
101
|
+
def api_key(self) -> str | None:
|
102
|
+
if not self._api_key:
|
103
|
+
try:
|
104
|
+
credentials = self.store.get(self.name)
|
105
|
+
self._api_key = credentials
|
106
|
+
except KeyNotFoundError as e:
|
107
|
+
action = self.authorize()
|
108
|
+
raise NotAuthorizedError(action) from e
|
109
|
+
return self._api_key
|
97
110
|
|
98
111
|
def get_credentials(self) -> dict[str, str]:
|
99
112
|
"""Get API key credentials.
|
@@ -104,12 +117,7 @@ class ApiKeyIntegration(Integration):
|
|
104
117
|
Raises:
|
105
118
|
NotAuthorizedError: If API key is not found.
|
106
119
|
"""
|
107
|
-
|
108
|
-
credentials = self.store.get(self.name)
|
109
|
-
except KeyNotFoundError as e:
|
110
|
-
action = self.authorize()
|
111
|
-
raise NotAuthorizedError(action) from e
|
112
|
-
return {"api_key": credentials}
|
120
|
+
return {"api_key": self.api_key}
|
113
121
|
|
114
122
|
def set_credentials(self, credentials: dict[str, Any]) -> None:
|
115
123
|
"""Set API key credentials.
|
universal_mcp/servers/server.py
CHANGED
@@ -9,8 +9,7 @@ from loguru import logger
|
|
9
9
|
from mcp.server.fastmcp import FastMCP
|
10
10
|
from mcp.types import TextContent
|
11
11
|
|
12
|
-
from universal_mcp.
|
13
|
-
from universal_mcp.applications import Application, app_from_slug
|
12
|
+
from universal_mcp.applications import BaseApplication, app_from_slug
|
14
13
|
from universal_mcp.config import AppConfig, ServerConfig, StoreConfig
|
15
14
|
from universal_mcp.integrations import AgentRIntegration, integration_from_config
|
16
15
|
from universal_mcp.stores import BaseStore, store_from_config
|
@@ -130,7 +129,7 @@ class LocalServer(BaseServer):
|
|
130
129
|
self.add_tool(store.delete)
|
131
130
|
return store
|
132
131
|
|
133
|
-
def _load_app(self, app_config: AppConfig) ->
|
132
|
+
def _load_app(self, app_config: AppConfig) -> BaseApplication | None:
|
134
133
|
"""Load a single application with its integration.
|
135
134
|
|
136
135
|
Args:
|
@@ -145,7 +144,6 @@ class LocalServer(BaseServer):
|
|
145
144
|
if app_config.integration
|
146
145
|
else None
|
147
146
|
)
|
148
|
-
analytics.track_app_loaded(app_config.name) # Track app loading
|
149
147
|
return app_from_slug(app_config.name)(integration=integration)
|
150
148
|
except Exception as e:
|
151
149
|
logger.error(f"Failed to load app {app_config.name}: {e}", exc_info=True)
|
@@ -204,7 +202,7 @@ class AgentRServer(BaseServer):
|
|
204
202
|
logger.error(f"Failed to fetch apps from AgentR: {e}", exc_info=True)
|
205
203
|
raise
|
206
204
|
|
207
|
-
def _load_app(self, app_config: AppConfig) ->
|
205
|
+
def _load_app(self, app_config: AppConfig) -> BaseApplication | None:
|
208
206
|
"""Load a single application with AgentR integration.
|
209
207
|
|
210
208
|
Args:
|
@@ -221,7 +219,6 @@ class AgentRServer(BaseServer):
|
|
221
219
|
if app_config.integration
|
222
220
|
else None
|
223
221
|
)
|
224
|
-
analytics.track_app_loaded(app_config.name) # Track app loading
|
225
222
|
return app_from_slug(app_config.name)(integration=integration)
|
226
223
|
except Exception as e:
|
227
224
|
logger.error(f"Failed to load app {app_config.name}: {e}", exc_info=True)
|
universal_mcp/stores/store.py
CHANGED
universal_mcp/tools/tools.py
CHANGED
@@ -9,7 +9,7 @@ from loguru import logger
|
|
9
9
|
from pydantic import BaseModel, Field
|
10
10
|
|
11
11
|
from universal_mcp.analytics import analytics
|
12
|
-
from universal_mcp.applications
|
12
|
+
from universal_mcp.applications import BaseApplication
|
13
13
|
from universal_mcp.exceptions import NotAuthorizedError, ToolError
|
14
14
|
from universal_mcp.utils.docstring_parser import parse_docstring
|
15
15
|
|
@@ -253,7 +253,7 @@ class ToolManager:
|
|
253
253
|
|
254
254
|
def register_tools_from_app(
|
255
255
|
self,
|
256
|
-
app:
|
256
|
+
app: BaseApplication,
|
257
257
|
tools: list[str] | None = None,
|
258
258
|
tags: list[str] | None = None,
|
259
259
|
) -> None:
|
@@ -4,16 +4,23 @@ from typing import Any
|
|
4
4
|
|
5
5
|
def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
6
6
|
"""
|
7
|
-
Parses a
|
7
|
+
Parses a Python docstring into structured components: summary, arguments,
|
8
|
+
return value, raised exceptions, and custom tags.
|
9
|
+
|
10
|
+
Supports multi-line descriptions for each section. Recognizes common section
|
11
|
+
headers like 'Args:', 'Returns:', 'Raises:', 'Tags:', etc. Also attempts
|
12
|
+
to parse key-value pairs within 'Args:' and 'Raises:' sections.
|
8
13
|
|
9
14
|
Args:
|
10
|
-
docstring: The docstring to parse.
|
15
|
+
docstring: The docstring string to parse, or None.
|
11
16
|
|
12
17
|
Returns:
|
13
|
-
A dictionary
|
14
|
-
'
|
15
|
-
'
|
16
|
-
'
|
18
|
+
A dictionary containing the parsed components:
|
19
|
+
- 'summary': The first paragraph of the docstring.
|
20
|
+
- 'args': A dictionary mapping argument names to their descriptions.
|
21
|
+
- 'returns': The description of the return value.
|
22
|
+
- 'raises': A dictionary mapping exception types to their descriptions.
|
23
|
+
- 'tags': A list of strings found in the 'Tags:' section.
|
17
24
|
"""
|
18
25
|
if not docstring:
|
19
26
|
return {"summary": "", "args": {}, "returns": "", "raises": {}, "tags": []}
|
@@ -22,123 +29,183 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
22
29
|
if not lines:
|
23
30
|
return {"summary": "", "args": {}, "returns": "", "raises": {}, "tags": []}
|
24
31
|
|
25
|
-
summary =
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
32
|
+
summary: str = ""
|
33
|
+
summary_lines: list[str] = []
|
34
|
+
args: dict[str, str] = {}
|
35
|
+
returns: str = ""
|
36
|
+
raises: dict[str, str] = {}
|
37
|
+
tags: list[str] = []
|
38
|
+
|
39
|
+
current_section: str | None = None
|
40
|
+
current_key: str | None = None
|
41
|
+
current_desc_lines: list[str] = []
|
42
|
+
|
43
|
+
# Pattern to capture item key and the start of its description
|
44
|
+
# Matches "key:" or "key (type):" followed by description
|
33
45
|
key_pattern = re.compile(r"^\s*([\w\.]+)\s*(?:\(.*\))?:\s*(.*)")
|
34
46
|
|
35
47
|
def finalize_current_item():
|
36
|
-
"""
|
37
|
-
nonlocal returns, tags
|
48
|
+
"""Processes the collected current_desc_lines and assigns them."""
|
49
|
+
nonlocal returns, tags, args, raises
|
38
50
|
desc = " ".join(current_desc_lines).strip()
|
51
|
+
|
39
52
|
if current_section == "args" and current_key:
|
40
|
-
|
53
|
+
if desc:
|
54
|
+
args[current_key] = desc
|
41
55
|
elif current_section == "raises" and current_key:
|
42
|
-
|
56
|
+
if desc:
|
57
|
+
raises[current_key] = desc
|
43
58
|
elif current_section == "returns":
|
44
59
|
returns = desc
|
45
|
-
|
46
|
-
|
47
|
-
tags
|
60
|
+
elif current_section == "tags":
|
61
|
+
# Tags section content is treated as a comma-separated list
|
62
|
+
tags.clear() # Clear existing tags in case of multiple tag sections (unlikely but safe)
|
63
|
+
tags.extend([tag.strip() for tag in desc.split(",") if tag.strip()])
|
64
|
+
# 'other' sections are ignored in the final output
|
65
|
+
|
66
|
+
def check_for_section_header(line: str) -> tuple[bool, str | None, str]:
|
67
|
+
"""Checks if a line is a recognized section header."""
|
68
|
+
stripped_lower = line.strip().lower()
|
69
|
+
section_type: str | None = None
|
70
|
+
header_content = ""
|
48
71
|
|
49
|
-
|
50
|
-
|
72
|
+
if stripped_lower in ("args:", "arguments:", "parameters:"):
|
73
|
+
section_type = "args"
|
74
|
+
elif stripped_lower in ("returns:", "yields:"):
|
75
|
+
section_type = "returns"
|
76
|
+
elif stripped_lower in ("raises:", "errors:", "exceptions:"):
|
77
|
+
section_type = "raises"
|
78
|
+
elif stripped_lower in ("tags:",):
|
79
|
+
section_type = "tags"
|
80
|
+
# Allow "Raises Description:" or "Tags content:"
|
81
|
+
elif stripped_lower.startswith(("raises ", "errors ", "exceptions ")):
|
82
|
+
section_type = "raises"
|
83
|
+
# Capture content after header word and potential colon/space
|
84
|
+
parts = re.split(r"[:\s]+", line.strip(), maxsplit=1) # B034: Use keyword maxsplit
|
85
|
+
if len(parts) > 1:
|
86
|
+
header_content = parts[1].strip()
|
87
|
+
elif stripped_lower.startswith(("tags",)):
|
88
|
+
section_type = "tags"
|
89
|
+
# Capture content after header word and potential colon/space
|
90
|
+
parts = re.split(r"[:\s]+", line.strip(), maxsplit=1) # B034: Use keyword maxsplit
|
91
|
+
if len(parts) > 1:
|
92
|
+
header_content = parts[1].strip()
|
93
|
+
|
94
|
+
|
95
|
+
# Identify other known sections, but don't store their content
|
96
|
+
elif stripped_lower.endswith(":") and stripped_lower[:-1] in (
|
97
|
+
"attributes", "see also", "example", "examples", "notes", "todo", "fixme", "warning", "warnings"
|
98
|
+
):
|
99
|
+
section_type = "other"
|
100
|
+
|
101
|
+
return section_type is not None, section_type, header_content
|
102
|
+
|
103
|
+
|
104
|
+
in_summary = True
|
105
|
+
|
106
|
+
for line in lines:
|
51
107
|
stripped_line = line.strip()
|
52
108
|
original_indentation = len(line) - len(line.lstrip(" "))
|
53
109
|
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
110
|
+
is_new_section_header, new_section_type_this_line, header_content_this_line = check_for_section_header(line)
|
111
|
+
|
112
|
+
should_finalize_previous = False
|
113
|
+
|
114
|
+
# --- Summary Handling ---
|
115
|
+
if in_summary:
|
116
|
+
if not stripped_line or is_new_section_header:
|
117
|
+
# Empty line or section header marks the end of the summary
|
118
|
+
in_summary = False
|
119
|
+
summary = " ".join(summary_lines).strip()
|
120
|
+
summary_lines = [] # Clear summary_lines after finalizing summary
|
121
|
+
|
122
|
+
if not stripped_line:
|
123
|
+
# If the line was just empty, continue to the next line
|
124
|
+
# The new_section_header check will happen on the next iteration if it exists
|
125
|
+
continue
|
126
|
+
# If it was a header, fall through to section handling below
|
127
|
+
|
128
|
+
else:
|
129
|
+
# Still in summary, append line
|
130
|
+
summary_lines.append(stripped_line)
|
131
|
+
continue # Process next line
|
132
|
+
|
133
|
+
|
134
|
+
# --- Section and Item Handling ---
|
135
|
+
|
136
|
+
# Decide if the previous item/section block should be finalized BEFORE processing the current line
|
137
|
+
# Finalize if:
|
138
|
+
# 1. A new section header is encountered.
|
139
|
+
# 2. An empty line is encountered AFTER we've started collecting content for an item or section.
|
140
|
+
# 3. In 'args' or 'raises', we encounter a line that looks like a new key: value pair, or a non-indented line.
|
141
|
+
# 4. In 'returns', 'tags', or 'other', we encounter a non-indented line after collecting content.
|
142
|
+
if is_new_section_header or (not stripped_line and (current_desc_lines or current_key is not None)) or \
|
143
|
+
(current_section in ["args", "raises"] and current_key is not None and (key_pattern.match(line) or (original_indentation == 0 and stripped_line))) or \
|
144
|
+
(current_section in ["returns", "tags", "other"] and current_desc_lines and original_indentation == 0 and stripped_line):
|
145
|
+
should_finalize_previous = True
|
146
|
+
elif current_section in ["args", "raises"] and current_key is not None:
|
147
|
+
# Inside args/raises, processing an item (current_key is set)
|
148
|
+
pass # Logic moved to the combined if statement
|
149
|
+
elif current_section in ["returns", "tags", "other"] and current_desc_lines:
|
150
|
+
# Inside returns/tags/other, collecting description lines
|
151
|
+
pass # Logic moved to the combined if statement
|
152
|
+
|
153
|
+
# If finalizing the previous item/section
|
154
|
+
if should_finalize_previous:
|
155
|
+
finalize_current_item()
|
156
|
+
# Reset state after finalizing the previous item/section block
|
157
|
+
# If it was a new section header, reset everything
|
158
|
+
# If it was an end-of-item/block signal within a section, reset key and description lines
|
159
|
+
# (The condition for resetting key here is complex but matches the original logic)
|
160
|
+
if is_new_section_header or (current_section in ["args", "raises"] and current_key is not None and not key_pattern.match(line) and (not stripped_line or original_indentation == 0)):
|
161
|
+
current_key = None
|
162
|
+
current_desc_lines = [] # Always clear description lines
|
58
163
|
|
59
|
-
|
60
|
-
new_section_type = "args"
|
61
|
-
is_new_section_header = True
|
62
|
-
elif section_line in ("returns:", "yields:"):
|
63
|
-
new_section_type = "returns"
|
64
|
-
is_new_section_header = True
|
65
|
-
elif section_line.startswith(("raises ", "raises:", "errors:", "exceptions:")):
|
66
|
-
new_section_type = "raises"
|
67
|
-
is_new_section_header = True
|
68
|
-
elif section_line.startswith(
|
69
|
-
("tags:", "tags")
|
70
|
-
): # Match "Tags:" or "Tags" potentially followed by content
|
71
|
-
new_section_type = "tags"
|
72
|
-
is_new_section_header = True
|
73
|
-
if ":" in stripped_line:
|
74
|
-
header_content = stripped_line.split(":", 1)[1].strip()
|
75
|
-
elif section_line.endswith(":") and section_line[:-1] in (
|
76
|
-
"attributes",
|
77
|
-
"see also",
|
78
|
-
"example",
|
79
|
-
"examples",
|
80
|
-
"notes",
|
81
|
-
):
|
82
|
-
new_section_type = "other"
|
83
|
-
is_new_section_header = True
|
164
|
+
# --- Process the current line ---
|
84
165
|
|
85
|
-
|
166
|
+
# If the current line is a section header
|
86
167
|
if is_new_section_header:
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
if original_indentation == 0 and stripped_line:
|
93
|
-
finalize_previous = True
|
94
|
-
# SIM102 applied: Combine nested if/elif
|
95
|
-
elif (
|
96
|
-
not stripped_line
|
97
|
-
and current_desc_lines
|
98
|
-
and current_section in ["args", "raises", "returns", "tags"]
|
99
|
-
and (current_section not in ["args", "raises"] or current_key)
|
100
|
-
):
|
101
|
-
finalize_previous = True
|
102
|
-
|
103
|
-
if finalize_previous:
|
104
|
-
finalize_current_item()
|
105
|
-
current_key = None
|
106
|
-
current_desc_lines = []
|
107
|
-
if not is_new_section_header or new_section_type == "other":
|
108
|
-
current_section = None
|
109
|
-
|
110
|
-
if is_new_section_header and new_section_type != "other":
|
111
|
-
current_section = new_section_type
|
112
|
-
# If Tags header had content, start accumulating it
|
113
|
-
if new_section_type == "tags" and header_content:
|
114
|
-
current_desc_lines.append(header_content)
|
115
|
-
# Don't process the header line itself further
|
116
|
-
continue
|
168
|
+
current_section = new_section_type_this_line
|
169
|
+
if header_content_this_line:
|
170
|
+
# Add content immediately following the header on the same line
|
171
|
+
current_desc_lines.append(header_content_this_line)
|
172
|
+
continue # Move to the next line, header is processed
|
117
173
|
|
174
|
+
# If the line is empty, and not a section header (handled above), skip it
|
118
175
|
if not stripped_line:
|
119
176
|
continue
|
120
177
|
|
178
|
+
# If we are inside a section, process the line's content
|
121
179
|
if current_section == "args" or current_section == "raises":
|
122
180
|
match = key_pattern.match(line)
|
123
181
|
if match:
|
182
|
+
# Found a new key: value item within args/raises
|
124
183
|
current_key = match.group(1)
|
125
|
-
current_desc_lines = [match.group(2).strip()]
|
126
|
-
elif
|
127
|
-
|
128
|
-
|
129
|
-
|
184
|
+
current_desc_lines = [match.group(2).strip()] # Start new description
|
185
|
+
elif current_key is not None:
|
186
|
+
# Not a new key, but processing an existing item - append to description
|
187
|
+
current_desc_lines.append(stripped_line)
|
188
|
+
# Lines that don't match key_pattern and occur when current_key is None
|
189
|
+
# within args/raises are effectively ignored by this block, which seems
|
190
|
+
# consistent with needing a key: description format.
|
191
|
+
|
192
|
+
elif current_section in ["returns", "tags", "other"]:
|
193
|
+
# In these sections, all non-empty, non-header lines are description lines
|
194
|
+
current_desc_lines.append(stripped_line)
|
195
|
+
|
196
|
+
# --- Finalization after loop ---
|
197
|
+
# Finalize any pending item/section block that was being collected
|
198
|
+
finalize_current_item()
|
130
199
|
|
131
|
-
|
132
|
-
|
133
|
-
|
200
|
+
# If the docstring only had a summary (no empty line or section header)
|
201
|
+
# ensure the summary is captured. This check is technically redundant
|
202
|
+
# because summary is finalized upon hitting the first empty line or header,
|
203
|
+
# or falls through to the final finalize call if neither occurs.
|
204
|
+
# Keeping it for clarity, though the logic flow should cover it.
|
205
|
+
if in_summary:
|
206
|
+
summary = " ".join(summary_lines).strip()
|
134
207
|
|
135
|
-
elif current_section == "tags":
|
136
|
-
if (
|
137
|
-
original_indentation > 0 or not current_desc_lines
|
138
|
-
): # Indented or first line
|
139
|
-
current_desc_lines.append(stripped_line)
|
140
208
|
|
141
|
-
finalize_current_item()
|
142
209
|
return {
|
143
210
|
"summary": summary,
|
144
211
|
"args": args,
|
@@ -147,9 +214,9 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
147
214
|
"tags": tags,
|
148
215
|
}
|
149
216
|
|
150
|
-
|
151
217
|
docstring_example = """
|
152
|
-
Starts a crawl job for a given URL using Firecrawl.
|
218
|
+
Starts a crawl job for a given URL using Firecrawl.
|
219
|
+
Returns the job ID immediately.
|
153
220
|
|
154
221
|
Args:
|
155
222
|
url: The starting URL for the crawl.
|
@@ -163,17 +230,17 @@ docstring_example = """
|
|
163
230
|
or a string containing an error message on failure. This description
|
164
231
|
can also span multiple lines.
|
165
232
|
|
166
|
-
|
233
|
+
Raises:
|
167
234
|
ValueError: If the URL is invalid.
|
168
|
-
|
235
|
+
ConnectionError: If connection fails.
|
169
236
|
|
170
237
|
Tags:
|
171
238
|
crawl, async_job, start, api, long_tag_example , another
|
172
239
|
, final_tag
|
173
|
-
"""
|
240
|
+
"""
|
174
241
|
|
175
242
|
if __name__ == "__main__":
|
176
|
-
parsed = parse_docstring(docstring_example)
|
177
243
|
import json
|
178
244
|
|
179
|
-
|
245
|
+
parsed = parse_docstring(docstring_example)
|
246
|
+
print(json.dumps(parsed, indent=4))
|