mcp-scoring-engine 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_scoring_engine-0.1.0/.gitignore +10 -0
- mcp_scoring_engine-0.1.0/LICENSE +21 -0
- mcp_scoring_engine-0.1.0/PKG-INFO +40 -0
- mcp_scoring_engine-0.1.0/README.md +13 -0
- mcp_scoring_engine-0.1.0/pyproject.toml +51 -0
- mcp_scoring_engine-0.1.0/src/mcp_scoring_engine/__init__.py +72 -0
- mcp_scoring_engine-0.1.0/src/mcp_scoring_engine/badges.py +228 -0
- mcp_scoring_engine-0.1.0/src/mcp_scoring_engine/classification.py +186 -0
- mcp_scoring_engine-0.1.0/src/mcp_scoring_engine/flags.py +234 -0
- mcp_scoring_engine-0.1.0/src/mcp_scoring_engine/probes/__init__.py +0 -0
- mcp_scoring_engine-0.1.0/src/mcp_scoring_engine/probes/github_client.py +126 -0
- mcp_scoring_engine-0.1.0/src/mcp_scoring_engine/probes/health.py +154 -0
- mcp_scoring_engine-0.1.0/src/mcp_scoring_engine/probes/protocol.py +422 -0
- mcp_scoring_engine-0.1.0/src/mcp_scoring_engine/probes/reliability.py +44 -0
- mcp_scoring_engine-0.1.0/src/mcp_scoring_engine/probes/static.py +675 -0
- mcp_scoring_engine-0.1.0/src/mcp_scoring_engine/scoring.py +402 -0
- mcp_scoring_engine-0.1.0/src/mcp_scoring_engine/types.py +151 -0
- mcp_scoring_engine-0.1.0/tests/__init__.py +0 -0
- mcp_scoring_engine-0.1.0/tests/conftest.py +127 -0
- mcp_scoring_engine-0.1.0/tests/test_badges.py +121 -0
- mcp_scoring_engine-0.1.0/tests/test_classification.py +94 -0
- mcp_scoring_engine-0.1.0/tests/test_flags.py +198 -0
- mcp_scoring_engine-0.1.0/tests/test_integration.py +167 -0
- mcp_scoring_engine-0.1.0/tests/test_probes_reliability.py +52 -0
- mcp_scoring_engine-0.1.0/tests/test_scoring.py +258 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Brightwing Systems LLC
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: mcp-scoring-engine
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Scoring engine for MCP server quality assessment
|
|
5
|
+
Project-URL: Homepage, https://patchworkmcp.com
|
|
6
|
+
Project-URL: Repository, https://github.com/Brightwing-Systems-LLC/mcp-scoring-engine
|
|
7
|
+
Author-email: Brightwing Systems LLC <support@brightwingsystems.com>
|
|
8
|
+
License-Expression: MIT
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Keywords: ai,mcp,model-context-protocol,quality,scoring
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
18
|
+
Requires-Python: >=3.12
|
|
19
|
+
Requires-Dist: httpx
|
|
20
|
+
Requires-Dist: httpx-sse
|
|
21
|
+
Requires-Dist: mcp>=1.26
|
|
22
|
+
Provides-Extra: dev
|
|
23
|
+
Requires-Dist: pytest-asyncio>=1.0; extra == 'dev'
|
|
24
|
+
Requires-Dist: pytest>=8.0; extra == 'dev'
|
|
25
|
+
Requires-Dist: respx>=0.22; extra == 'dev'
|
|
26
|
+
Description-Content-Type: text/markdown
|
|
27
|
+
|
|
28
|
+
# mcp-scoring-engine
|
|
29
|
+
|
|
30
|
+
Standalone scoring engine for MCP server quality assessment. Used by [PatchworkMCP](https://patchworkmcp.com) to evaluate and grade Model Context Protocol servers.
|
|
31
|
+
|
|
32
|
+
## Installation
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
pip install mcp-scoring-engine
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## License
|
|
39
|
+
|
|
40
|
+
MIT
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# mcp-scoring-engine
|
|
2
|
+
|
|
3
|
+
Standalone scoring engine for MCP server quality assessment. Used by [PatchworkMCP](https://patchworkmcp.com) to evaluate and grade Model Context Protocol servers.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install mcp-scoring-engine
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## License
|
|
12
|
+
|
|
13
|
+
MIT
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "mcp-scoring-engine"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Scoring engine for MCP server quality assessment"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
requires-python = ">=3.12"
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "Brightwing Systems LLC", email = "support@brightwingsystems.com" },
|
|
14
|
+
]
|
|
15
|
+
keywords = ["mcp", "scoring", "quality", "model-context-protocol", "ai"]
|
|
16
|
+
classifiers = [
|
|
17
|
+
"Development Status :: 3 - Alpha",
|
|
18
|
+
"Intended Audience :: Developers",
|
|
19
|
+
"License :: OSI Approved :: MIT License",
|
|
20
|
+
"Programming Language :: Python :: 3",
|
|
21
|
+
"Programming Language :: Python :: 3.12",
|
|
22
|
+
"Programming Language :: Python :: 3.13",
|
|
23
|
+
"Topic :: Software Development :: Libraries",
|
|
24
|
+
]
|
|
25
|
+
dependencies = [
|
|
26
|
+
"mcp>=1.26",
|
|
27
|
+
"httpx",
|
|
28
|
+
"httpx-sse",
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
[project.optional-dependencies]
|
|
32
|
+
dev = [
|
|
33
|
+
"pytest>=8.0",
|
|
34
|
+
"pytest-asyncio>=1.0",
|
|
35
|
+
"respx>=0.22",
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
[project.urls]
|
|
39
|
+
Homepage = "https://patchworkmcp.com"
|
|
40
|
+
Repository = "https://github.com/Brightwing-Systems-LLC/mcp-scoring-engine"
|
|
41
|
+
|
|
42
|
+
[tool.hatch.build.targets.wheel]
|
|
43
|
+
packages = ["src/mcp_scoring_engine"]
|
|
44
|
+
|
|
45
|
+
[tool.pytest.ini_options]
|
|
46
|
+
asyncio_mode = "auto"
|
|
47
|
+
testpaths = ["tests"]
|
|
48
|
+
|
|
49
|
+
[tool.ruff]
|
|
50
|
+
line-length = 100
|
|
51
|
+
target-version = "py312"
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""MCP Scoring Engine — standalone quality assessment for MCP servers.
|
|
2
|
+
|
|
3
|
+
Public API for scoring, probing, and classifying MCP servers.
|
|
4
|
+
No Django, no database — pure Python.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .badges import generate_badges
|
|
8
|
+
from .classification import classify_server
|
|
9
|
+
from .flags import detect_flags, FlagContext
|
|
10
|
+
from .probes.health import probe_server, probe_server_stdio
|
|
11
|
+
from .probes.protocol import deep_probe_server, deep_probe_server_stdio
|
|
12
|
+
from .probes.reliability import compute_reliability_score
|
|
13
|
+
from .probes.static import analyze_repo
|
|
14
|
+
from .scoring import (
|
|
15
|
+
compute_score,
|
|
16
|
+
extract_publisher,
|
|
17
|
+
GRADE_THRESHOLDS,
|
|
18
|
+
is_verified_publisher,
|
|
19
|
+
score_to_grade,
|
|
20
|
+
VERIFIED_PUBLISHERS,
|
|
21
|
+
WEIGHT_MAINTENANCE,
|
|
22
|
+
WEIGHT_PROTOCOL,
|
|
23
|
+
WEIGHT_RELIABILITY,
|
|
24
|
+
WEIGHT_SCHEMA_DOCS,
|
|
25
|
+
WEIGHT_SECURITY,
|
|
26
|
+
)
|
|
27
|
+
from .types import (
|
|
28
|
+
Badge,
|
|
29
|
+
DeepProbeResult,
|
|
30
|
+
FastProbeResult,
|
|
31
|
+
Flag,
|
|
32
|
+
ReliabilityData,
|
|
33
|
+
ScoreResult,
|
|
34
|
+
ServerInfo,
|
|
35
|
+
StaticAnalysis,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
__all__ = [
|
|
39
|
+
# Types
|
|
40
|
+
"Badge",
|
|
41
|
+
"DeepProbeResult",
|
|
42
|
+
"FastProbeResult",
|
|
43
|
+
"Flag",
|
|
44
|
+
"FlagContext",
|
|
45
|
+
"ReliabilityData",
|
|
46
|
+
"ScoreResult",
|
|
47
|
+
"ServerInfo",
|
|
48
|
+
"StaticAnalysis",
|
|
49
|
+
# Scoring
|
|
50
|
+
"compute_score",
|
|
51
|
+
"score_to_grade",
|
|
52
|
+
"is_verified_publisher",
|
|
53
|
+
"extract_publisher",
|
|
54
|
+
"GRADE_THRESHOLDS",
|
|
55
|
+
"VERIFIED_PUBLISHERS",
|
|
56
|
+
"WEIGHT_SCHEMA_DOCS",
|
|
57
|
+
"WEIGHT_PROTOCOL",
|
|
58
|
+
"WEIGHT_RELIABILITY",
|
|
59
|
+
"WEIGHT_MAINTENANCE",
|
|
60
|
+
"WEIGHT_SECURITY",
|
|
61
|
+
# Probes
|
|
62
|
+
"probe_server",
|
|
63
|
+
"probe_server_stdio",
|
|
64
|
+
"deep_probe_server",
|
|
65
|
+
"deep_probe_server_stdio",
|
|
66
|
+
"analyze_repo",
|
|
67
|
+
"compute_reliability_score",
|
|
68
|
+
# Classification & Flags & Badges
|
|
69
|
+
"classify_server",
|
|
70
|
+
"detect_flags",
|
|
71
|
+
"generate_badges",
|
|
72
|
+
]
|
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
"""Structured badge generation for MCP server scoring.
|
|
2
|
+
|
|
3
|
+
Generates badge pills grouped by scoring category. Each badge conveys
|
|
4
|
+
a quick status signal: good (green), neutral (gray), warning (orange),
|
|
5
|
+
critical (red).
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import re
|
|
11
|
+
|
|
12
|
+
from .types import Badge, DeepProbeResult, ReliabilityData, ServerInfo, StaticAnalysis
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def generate_badges(
|
|
16
|
+
server: ServerInfo,
|
|
17
|
+
static_result: StaticAnalysis | None = None,
|
|
18
|
+
deep_probe: DeepProbeResult | None = None,
|
|
19
|
+
reliability: ReliabilityData | None = None,
|
|
20
|
+
flags: list[dict] | None = None,
|
|
21
|
+
) -> dict:
|
|
22
|
+
"""Generate badge groups keyed by scoring category.
|
|
23
|
+
|
|
24
|
+
Returns {"schema": [...], "protocol": [...], ...} where each value
|
|
25
|
+
is a list of serialized badge dicts.
|
|
26
|
+
"""
|
|
27
|
+
badges = {
|
|
28
|
+
"schema": _schema_badges(server, static_result, flags),
|
|
29
|
+
"protocol": _protocol_badges(deep_probe),
|
|
30
|
+
"reliability": _reliability_badges(server, reliability),
|
|
31
|
+
"maintenance": _maintenance_badges(static_result),
|
|
32
|
+
"security": _security_badges(server),
|
|
33
|
+
}
|
|
34
|
+
return {k: [_serialize(b) for b in v] for k, v in badges.items()}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _serialize(badge: Badge) -> dict:
|
|
38
|
+
return {"key": badge.key, "label": badge.label, "level": badge.level}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _schema_badges(
|
|
42
|
+
server: ServerInfo,
|
|
43
|
+
static_result: StaticAnalysis | None,
|
|
44
|
+
flags: list[dict] | None,
|
|
45
|
+
) -> list[Badge]:
|
|
46
|
+
badges = []
|
|
47
|
+
|
|
48
|
+
if static_result:
|
|
49
|
+
details = static_result.details or {}
|
|
50
|
+
|
|
51
|
+
doc_checks = details.get("documentation_coverage", {}).get("checks", {})
|
|
52
|
+
if doc_checks.get("has_readme"):
|
|
53
|
+
badges.append(Badge("has_readme", "README", "good"))
|
|
54
|
+
elif doc_checks:
|
|
55
|
+
badges.append(Badge("no_readme", "No README", "critical"))
|
|
56
|
+
|
|
57
|
+
if doc_checks.get("has_changelog"):
|
|
58
|
+
badges.append(Badge("has_changelog", "Changelog", "good"))
|
|
59
|
+
if doc_checks.get("has_examples"):
|
|
60
|
+
badges.append(Badge("has_examples", "Examples", "good"))
|
|
61
|
+
if doc_checks.get("has_contributing"):
|
|
62
|
+
badges.append(Badge("has_contributing", "Contributing Guide", "good"))
|
|
63
|
+
if doc_checks.get("has_docs_dir"):
|
|
64
|
+
badges.append(Badge("has_docs_dir", "Docs Dir", "good"))
|
|
65
|
+
|
|
66
|
+
prov_checks = details.get("provenance", {}).get("checks", {})
|
|
67
|
+
if prov_checks.get("has_security_policy"):
|
|
68
|
+
badges.append(Badge("security_policy", "SECURITY.md", "good"))
|
|
69
|
+
if prov_checks.get("has_code_of_conduct"):
|
|
70
|
+
badges.append(Badge("code_of_conduct", "Code of Conduct", "good"))
|
|
71
|
+
if prov_checks.get("namespace_owner_match") is True:
|
|
72
|
+
badges.append(Badge("namespace_match", "Namespace Match", "good"))
|
|
73
|
+
elif prov_checks.get("namespace_owner_match") is False:
|
|
74
|
+
badges.append(Badge("namespace_mismatch", "Namespace Mismatch", "warning"))
|
|
75
|
+
if prov_checks.get("has_installable_package"):
|
|
76
|
+
badges.append(Badge("installable", "Installable", "good"))
|
|
77
|
+
|
|
78
|
+
desc_checks = details.get("description_quality", {}).get("checks", {})
|
|
79
|
+
if desc_checks.get("has_usage_section"):
|
|
80
|
+
badges.append(Badge("usage_docs", "Usage Docs", "good"))
|
|
81
|
+
if desc_checks.get("has_code_examples"):
|
|
82
|
+
badges.append(Badge("code_examples", "Code Examples", "good"))
|
|
83
|
+
|
|
84
|
+
if flags:
|
|
85
|
+
flag_keys = {f["key"] if isinstance(f, dict) else f.key for f in flags}
|
|
86
|
+
if "TEMPLATE_DESCRIPTION" in flag_keys:
|
|
87
|
+
badges.append(Badge("template_desc", "Template Description", "warning"))
|
|
88
|
+
|
|
89
|
+
return badges
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _protocol_badges(deep_probe: DeepProbeResult | None) -> list[Badge]:
|
|
93
|
+
badges = []
|
|
94
|
+
|
|
95
|
+
if not deep_probe:
|
|
96
|
+
return badges
|
|
97
|
+
|
|
98
|
+
if deep_probe.is_reachable:
|
|
99
|
+
badges.append(Badge("reachable", "Reachable", "good"))
|
|
100
|
+
else:
|
|
101
|
+
badges.append(Badge("unreachable", "Unreachable", "critical"))
|
|
102
|
+
|
|
103
|
+
if deep_probe.schema_valid is True:
|
|
104
|
+
badges.append(Badge("schema_valid", "Schema Valid", "good"))
|
|
105
|
+
elif deep_probe.schema_valid is False:
|
|
106
|
+
badges.append(Badge("schema_invalid", "Schema Invalid", "warning"))
|
|
107
|
+
|
|
108
|
+
if deep_probe.tools_count is not None and deep_probe.tools_count > 0:
|
|
109
|
+
badges.append(Badge("has_tools", f"{deep_probe.tools_count} Tools", "good"))
|
|
110
|
+
|
|
111
|
+
if deep_probe.error_handling_score is not None:
|
|
112
|
+
if deep_probe.error_handling_score >= 70:
|
|
113
|
+
badges.append(Badge("good_errors", "Good Error Handling", "good"))
|
|
114
|
+
elif deep_probe.error_handling_score < 40:
|
|
115
|
+
badges.append(Badge("poor_errors", "Poor Error Handling", "warning"))
|
|
116
|
+
|
|
117
|
+
if deep_probe.auth_discovery_valid is True:
|
|
118
|
+
badges.append(Badge("auth_discovery", "Auth Discovery", "good"))
|
|
119
|
+
|
|
120
|
+
return badges
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def _reliability_badges(
|
|
124
|
+
server: ServerInfo, reliability: ReliabilityData | None
|
|
125
|
+
) -> list[Badge]:
|
|
126
|
+
badges = []
|
|
127
|
+
|
|
128
|
+
if reliability and reliability.uptime_pct is not None:
|
|
129
|
+
if reliability.uptime_pct >= 99.0:
|
|
130
|
+
badges.append(Badge("high_uptime", "99%+ Uptime", "good"))
|
|
131
|
+
elif reliability.uptime_pct >= 95.0:
|
|
132
|
+
badges.append(Badge("good_uptime", "95%+ Uptime", "good"))
|
|
133
|
+
elif reliability.uptime_pct >= 80.0:
|
|
134
|
+
badges.append(Badge("degraded_uptime", "Degraded Uptime", "warning"))
|
|
135
|
+
else:
|
|
136
|
+
badges.append(Badge("low_uptime", "Low Uptime", "critical"))
|
|
137
|
+
|
|
138
|
+
if reliability and reliability.latency_p50_ms is not None:
|
|
139
|
+
p50 = reliability.latency_p50_ms
|
|
140
|
+
if p50 < 200:
|
|
141
|
+
badges.append(Badge("low_latency", "Fast (<200ms)", "good"))
|
|
142
|
+
elif p50 < 500:
|
|
143
|
+
badges.append(Badge("med_latency", "Moderate Latency", "neutral"))
|
|
144
|
+
elif p50 < 1000:
|
|
145
|
+
badges.append(Badge("high_latency", "Slow (>500ms)", "warning"))
|
|
146
|
+
else:
|
|
147
|
+
badges.append(Badge("very_high_latency", "Very Slow (>1s)", "critical"))
|
|
148
|
+
|
|
149
|
+
if not server.is_remote:
|
|
150
|
+
badges.append(Badge("local_only", "Local Only", "neutral"))
|
|
151
|
+
|
|
152
|
+
return badges
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _maintenance_badges(static_result: StaticAnalysis | None) -> list[Badge]:
|
|
156
|
+
badges = []
|
|
157
|
+
|
|
158
|
+
if not static_result:
|
|
159
|
+
return badges
|
|
160
|
+
|
|
161
|
+
details = static_result.details or {}
|
|
162
|
+
|
|
163
|
+
maint = details.get("maintenance_pulse", {})
|
|
164
|
+
days = maint.get("days_since_last_push")
|
|
165
|
+
if days is not None:
|
|
166
|
+
if days <= 30:
|
|
167
|
+
badges.append(Badge("active", "Active Development", "good"))
|
|
168
|
+
elif days <= 180:
|
|
169
|
+
badges.append(Badge("moderate", "Moderate Activity", "neutral"))
|
|
170
|
+
elif days <= 365:
|
|
171
|
+
badges.append(Badge("stale", "Stale", "warning"))
|
|
172
|
+
else:
|
|
173
|
+
badges.append(Badge("abandoned", "Possibly Abandoned", "critical"))
|
|
174
|
+
|
|
175
|
+
if maint.get("release_count", 0) >= 3:
|
|
176
|
+
badges.append(Badge("regular_releases", "Regular Releases", "good"))
|
|
177
|
+
elif maint.get("release_count", 0) >= 1:
|
|
178
|
+
badges.append(Badge("has_releases", "Has Releases", "good"))
|
|
179
|
+
|
|
180
|
+
dep_checks = details.get("dependency_health", {}).get("checks", {})
|
|
181
|
+
if dep_checks.get("has_ci"):
|
|
182
|
+
badges.append(Badge("has_ci", "CI/CD", "good"))
|
|
183
|
+
if dep_checks.get("has_lock_file"):
|
|
184
|
+
badges.append(Badge("lock_file", "Lock File", "good"))
|
|
185
|
+
if dep_checks.get("has_dependency_automation"):
|
|
186
|
+
badges.append(Badge("dep_automation", "Dep Automation", "good"))
|
|
187
|
+
|
|
188
|
+
lic = details.get("license_clarity", {})
|
|
189
|
+
if lic.get("spdx_id") and lic["spdx_id"] != "NOASSERTION":
|
|
190
|
+
badges.append(Badge("licensed", lic["spdx_id"], "good"))
|
|
191
|
+
elif lic.get("issue") == "no_license_detected":
|
|
192
|
+
badges.append(Badge("no_license", "No License", "warning"))
|
|
193
|
+
|
|
194
|
+
ver = details.get("version_hygiene", {})
|
|
195
|
+
if ver.get("semver_ratio", 0) >= 0.8:
|
|
196
|
+
badges.append(Badge("semver", "Semver", "good"))
|
|
197
|
+
|
|
198
|
+
return badges
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def _security_badges(server: ServerInfo) -> list[Badge]:
|
|
202
|
+
badges = []
|
|
203
|
+
|
|
204
|
+
meta = server.registry_metadata or {}
|
|
205
|
+
env_vars = meta.get("env_vars", [])
|
|
206
|
+
|
|
207
|
+
secret_pattern = re.compile(
|
|
208
|
+
r"(api[_-]?key|secret|token|password|auth|credential|private[_-]?key)",
|
|
209
|
+
re.IGNORECASE,
|
|
210
|
+
)
|
|
211
|
+
sensitive_vars = [v for v in env_vars if secret_pattern.search(v)]
|
|
212
|
+
|
|
213
|
+
if len(sensitive_vars) == 0:
|
|
214
|
+
badges.append(Badge("no_secrets", "No Secrets Required", "good"))
|
|
215
|
+
elif len(sensitive_vars) <= 2:
|
|
216
|
+
badges.append(Badge("few_secrets", f"{len(sensitive_vars)} Secret(s)", "neutral"))
|
|
217
|
+
else:
|
|
218
|
+
badges.append(Badge("many_secrets", f"{len(sensitive_vars)} Secrets", "warning"))
|
|
219
|
+
|
|
220
|
+
if not server.is_remote:
|
|
221
|
+
badges.append(Badge("stdio", "STDIO Only", "good"))
|
|
222
|
+
else:
|
|
223
|
+
badges.append(Badge("remote", "Remote Endpoint", "neutral"))
|
|
224
|
+
|
|
225
|
+
if server.npm_url or server.pypi_url or server.dockerhub_url:
|
|
226
|
+
badges.append(Badge("published", "Published Package", "good"))
|
|
227
|
+
|
|
228
|
+
return badges
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
"""Server categorization engine.
|
|
2
|
+
|
|
3
|
+
Classifies servers into categories based on name, description, namespace,
|
|
4
|
+
and repo URL analysis. Also identifies target platforms.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import re
|
|
10
|
+
|
|
11
|
+
from .types import ServerInfo
|
|
12
|
+
|
|
13
|
+
# Target platforms: keyword → (platform_label, category)
|
|
14
|
+
TARGET_PLATFORMS = {
|
|
15
|
+
# AI/ML
|
|
16
|
+
"openai": ("OpenAI", "ai_ml"),
|
|
17
|
+
"gpt": ("OpenAI", "ai_ml"),
|
|
18
|
+
"chatgpt": ("OpenAI", "ai_ml"),
|
|
19
|
+
"anthropic": ("Anthropic", "ai_ml"),
|
|
20
|
+
"claude": ("Anthropic", "ai_ml"),
|
|
21
|
+
"huggingface": ("Hugging Face", "ai_ml"),
|
|
22
|
+
"hugging-face": ("Hugging Face", "ai_ml"),
|
|
23
|
+
"ollama": ("Ollama", "ai_ml"),
|
|
24
|
+
"langchain": ("LangChain", "ai_ml"),
|
|
25
|
+
"cohere": ("Cohere", "ai_ml"),
|
|
26
|
+
"replicate": ("Replicate", "ai_ml"),
|
|
27
|
+
"stable-diffusion": ("Stable Diffusion", "ai_ml"),
|
|
28
|
+
"midjourney": ("Midjourney", "ai_ml"),
|
|
29
|
+
"vertex-ai": ("Google Vertex AI", "ai_ml"),
|
|
30
|
+
"bedrock": ("AWS Bedrock", "ai_ml"),
|
|
31
|
+
"gemini": ("Google Gemini", "ai_ml"),
|
|
32
|
+
# Database
|
|
33
|
+
"postgres": ("PostgreSQL", "database"),
|
|
34
|
+
"postgresql": ("PostgreSQL", "database"),
|
|
35
|
+
"mysql": ("MySQL", "database"),
|
|
36
|
+
"mongodb": ("MongoDB", "database"),
|
|
37
|
+
"redis": ("Redis", "database"),
|
|
38
|
+
"sqlite": ("SQLite", "database"),
|
|
39
|
+
"supabase": ("Supabase", "database"),
|
|
40
|
+
"firebase": ("Firebase", "database"),
|
|
41
|
+
"dynamodb": ("DynamoDB", "database"),
|
|
42
|
+
"elasticsearch": ("Elasticsearch", "database"),
|
|
43
|
+
"pinecone": ("Pinecone", "database"),
|
|
44
|
+
"qdrant": ("Qdrant", "database"),
|
|
45
|
+
"chromadb": ("ChromaDB", "database"),
|
|
46
|
+
"weaviate": ("Weaviate", "database"),
|
|
47
|
+
"neon": ("Neon", "database"),
|
|
48
|
+
"turso": ("Turso", "database"),
|
|
49
|
+
# DevTools
|
|
50
|
+
"github": ("GitHub", "devtools"),
|
|
51
|
+
"gitlab": ("GitLab", "devtools"),
|
|
52
|
+
"bitbucket": ("Bitbucket", "devtools"),
|
|
53
|
+
"docker": ("Docker", "devtools"),
|
|
54
|
+
"kubernetes": ("Kubernetes", "devtools"),
|
|
55
|
+
"terraform": ("Terraform", "devtools"),
|
|
56
|
+
"npm": ("npm", "devtools"),
|
|
57
|
+
"jira": ("Jira", "devtools"),
|
|
58
|
+
"linear": ("Linear", "devtools"),
|
|
59
|
+
"sentry": ("Sentry", "devtools"),
|
|
60
|
+
"jest": ("Jest", "devtools"),
|
|
61
|
+
"eslint": ("ESLint", "devtools"),
|
|
62
|
+
"prettier": ("Prettier", "devtools"),
|
|
63
|
+
# Cloud
|
|
64
|
+
"aws": ("AWS", "cloud"),
|
|
65
|
+
"gcp": ("Google Cloud", "cloud"),
|
|
66
|
+
"azure": ("Azure", "cloud"),
|
|
67
|
+
"cloudflare": ("Cloudflare", "cloud"),
|
|
68
|
+
"vercel": ("Vercel", "cloud"),
|
|
69
|
+
"netlify": ("Netlify", "cloud"),
|
|
70
|
+
"heroku": ("Heroku", "cloud"),
|
|
71
|
+
"digitalocean": ("DigitalOcean", "cloud"),
|
|
72
|
+
"fly.io": ("Fly.io", "cloud"),
|
|
73
|
+
"railway": ("Railway", "cloud"),
|
|
74
|
+
# Communication
|
|
75
|
+
"slack": ("Slack", "communication"),
|
|
76
|
+
"discord": ("Discord", "communication"),
|
|
77
|
+
"telegram": ("Telegram", "communication"),
|
|
78
|
+
"email": ("Email", "communication"),
|
|
79
|
+
"twilio": ("Twilio", "communication"),
|
|
80
|
+
"sendgrid": ("SendGrid", "communication"),
|
|
81
|
+
"whatsapp": ("WhatsApp", "communication"),
|
|
82
|
+
# Productivity
|
|
83
|
+
"notion": ("Notion", "productivity"),
|
|
84
|
+
"obsidian": ("Obsidian", "productivity"),
|
|
85
|
+
"google-docs": ("Google Docs", "productivity"),
|
|
86
|
+
"google-drive": ("Google Drive", "productivity"),
|
|
87
|
+
"google-sheets": ("Google Sheets", "productivity"),
|
|
88
|
+
"airtable": ("Airtable", "productivity"),
|
|
89
|
+
"todoist": ("Todoist", "productivity"),
|
|
90
|
+
"trello": ("Trello", "productivity"),
|
|
91
|
+
"asana": ("Asana", "productivity"),
|
|
92
|
+
"calendar": ("Calendar", "productivity"),
|
|
93
|
+
# Search
|
|
94
|
+
"brave-search": ("Brave Search", "search"),
|
|
95
|
+
"google-search": ("Google Search", "search"),
|
|
96
|
+
"tavily": ("Tavily", "search"),
|
|
97
|
+
"exa": ("Exa", "search"),
|
|
98
|
+
"perplexity": ("Perplexity", "search"),
|
|
99
|
+
"wikipedia": ("Wikipedia", "search"),
|
|
100
|
+
"arxiv": ("arXiv", "search"),
|
|
101
|
+
# Monitoring
|
|
102
|
+
"datadog": ("Datadog", "monitoring"),
|
|
103
|
+
"grafana": ("Grafana", "monitoring"),
|
|
104
|
+
"prometheus": ("Prometheus", "monitoring"),
|
|
105
|
+
"pagerduty": ("PagerDuty", "monitoring"),
|
|
106
|
+
"newrelic": ("New Relic", "monitoring"),
|
|
107
|
+
# Data
|
|
108
|
+
"snowflake": ("Snowflake", "data"),
|
|
109
|
+
"bigquery": ("BigQuery", "data"),
|
|
110
|
+
"dbt": ("dbt", "data"),
|
|
111
|
+
"pandas": ("pandas", "data"),
|
|
112
|
+
"jupyter": ("Jupyter", "data"),
|
|
113
|
+
# Finance
|
|
114
|
+
"stripe": ("Stripe", "finance"),
|
|
115
|
+
"plaid": ("Plaid", "finance"),
|
|
116
|
+
"coinbase": ("Coinbase", "finance"),
|
|
117
|
+
# Media
|
|
118
|
+
"youtube": ("YouTube", "media"),
|
|
119
|
+
"spotify": ("Spotify", "media"),
|
|
120
|
+
"figma": ("Figma", "media"),
|
|
121
|
+
"canva": ("Canva", "media"),
|
|
122
|
+
# E-Commerce
|
|
123
|
+
"shopify": ("Shopify", "ecommerce"),
|
|
124
|
+
"woocommerce": ("WooCommerce", "ecommerce"),
|
|
125
|
+
# Browser
|
|
126
|
+
"puppeteer": ("Puppeteer", "browser"),
|
|
127
|
+
"playwright": ("Playwright", "browser"),
|
|
128
|
+
"selenium": ("Selenium", "browser"),
|
|
129
|
+
"browserbase": ("Browserbase", "browser"),
|
|
130
|
+
"fetch": ("Web Fetch", "browser"),
|
|
131
|
+
"scraper": ("Web Scraper", "browser"),
|
|
132
|
+
"scraping": ("Web Scraping", "browser"),
|
|
133
|
+
"crawl": ("Web Crawler", "browser"),
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def classify_server(server: ServerInfo) -> tuple[str, list[str]]:
|
|
138
|
+
"""Classify a server into a category and identify target platforms.
|
|
139
|
+
|
|
140
|
+
Uses 4-tier matching: name, description, registry namespace, repo URL.
|
|
141
|
+
Returns (category, list_of_target_labels).
|
|
142
|
+
"""
|
|
143
|
+
targets_found: set[str] = set()
|
|
144
|
+
category_votes: dict[str, int] = {}
|
|
145
|
+
|
|
146
|
+
name_lower = (server.name or "").lower()
|
|
147
|
+
desc_lower = (server.description or "").lower()
|
|
148
|
+
|
|
149
|
+
namespace = ""
|
|
150
|
+
rid = server.registry_id or ""
|
|
151
|
+
if "/" in rid:
|
|
152
|
+
namespace = rid.rsplit("/", 1)[-1].lower()
|
|
153
|
+
|
|
154
|
+
repo_path = ""
|
|
155
|
+
if server.repo_url:
|
|
156
|
+
repo_path = (
|
|
157
|
+
server.repo_url.lower().rstrip("/").rsplit("/", 1)[-1]
|
|
158
|
+
if "/" in server.repo_url
|
|
159
|
+
else ""
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
search_fields = [
|
|
163
|
+
(name_lower, 3),
|
|
164
|
+
(desc_lower, 1),
|
|
165
|
+
(namespace, 2),
|
|
166
|
+
(repo_path, 2),
|
|
167
|
+
]
|
|
168
|
+
|
|
169
|
+
for keyword, (platform_label, category) in TARGET_PLATFORMS.items():
|
|
170
|
+
kw_pattern = re.compile(
|
|
171
|
+
r"(?:^|[\s\-_/])(" + re.escape(keyword) + r")(?:[\s\-_/.]|$)", re.IGNORECASE
|
|
172
|
+
)
|
|
173
|
+
for text, weight in search_fields:
|
|
174
|
+
if not text:
|
|
175
|
+
continue
|
|
176
|
+
if keyword in text or kw_pattern.search(text):
|
|
177
|
+
targets_found.add(platform_label)
|
|
178
|
+
category_votes[category] = category_votes.get(category, 0) + weight
|
|
179
|
+
break
|
|
180
|
+
|
|
181
|
+
if category_votes:
|
|
182
|
+
best_category = max(category_votes, key=category_votes.get)
|
|
183
|
+
else:
|
|
184
|
+
best_category = "other"
|
|
185
|
+
|
|
186
|
+
return best_category, sorted(targets_found)
|