pcp-mcp 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,290 @@
1
+ """Metric transformation and builder functions.
2
+
3
+ Consolidated metric builders extracted from tools/system.py to follow DRY principles.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from pcp_mcp.models import (
9
+ CPUMetrics,
10
+ DiskMetrics,
11
+ LoadMetrics,
12
+ MemoryMetrics,
13
+ NetworkMetrics,
14
+ ProcessInfo,
15
+ )
16
+ from pcp_mcp.utils.extractors import get_first_value, sum_instances
17
+
18
+
19
+ def build_cpu_metrics(data: dict) -> CPUMetrics:
20
+ """Build CPU metrics from fetched data."""
21
+ user = get_first_value(data, "kernel.all.cpu.user")
22
+ sys = get_first_value(data, "kernel.all.cpu.sys")
23
+ idle = get_first_value(data, "kernel.all.cpu.idle")
24
+ iowait = get_first_value(data, "kernel.all.cpu.wait.total")
25
+ ncpu = int(get_first_value(data, "hinv.ncpu", 1))
26
+
27
+ total = user + sys + idle + iowait
28
+ if total > 0:
29
+ user_pct = (user / total) * 100
30
+ sys_pct = (sys / total) * 100
31
+ idle_pct = (idle / total) * 100
32
+ iowait_pct = (iowait / total) * 100
33
+ else:
34
+ user_pct = sys_pct = idle_pct = iowait_pct = 0.0
35
+
36
+ if iowait_pct > 20:
37
+ assessment = "High I/O wait - system is disk bound"
38
+ elif idle_pct < 10:
39
+ assessment = "CPU is saturated"
40
+ elif user_pct > 70:
41
+ assessment = "CPU bound on user processes"
42
+ elif sys_pct > 30:
43
+ assessment = "High system/kernel CPU usage"
44
+ else:
45
+ assessment = "CPU utilization is normal"
46
+
47
+ return CPUMetrics(
48
+ user_percent=round(user_pct, 1),
49
+ system_percent=round(sys_pct, 1),
50
+ idle_percent=round(idle_pct, 1),
51
+ iowait_percent=round(iowait_pct, 1),
52
+ ncpu=ncpu,
53
+ assessment=assessment,
54
+ )
55
+
56
+
57
+ def build_memory_metrics(data: dict) -> MemoryMetrics:
58
+ """Build memory metrics from fetched data."""
59
+ total = int(get_first_value(data, "mem.physmem")) * 1024
60
+ available = int(get_first_value(data, "mem.util.available")) * 1024
61
+ free = int(get_first_value(data, "mem.util.free")) * 1024
62
+ cached = int(get_first_value(data, "mem.util.cached")) * 1024
63
+ buffers = int(get_first_value(data, "mem.util.bufmem")) * 1024
64
+ swap_total = int(get_first_value(data, "mem.util.swapTotal")) * 1024
65
+ swap_free = int(get_first_value(data, "mem.util.swapFree")) * 1024
66
+ swap_used = swap_total - swap_free
67
+
68
+ used = total - available
69
+ used_pct = (used / total * 100) if total > 0 else 0.0
70
+
71
+ if swap_used > swap_total * 0.5:
72
+ assessment = "Heavy swap usage - memory pressure"
73
+ elif used_pct > 90:
74
+ assessment = "Memory usage is critical"
75
+ elif used_pct > 75:
76
+ assessment = "Memory usage is elevated"
77
+ else:
78
+ assessment = "Memory utilization is normal"
79
+
80
+ return MemoryMetrics(
81
+ total_bytes=total,
82
+ used_bytes=used,
83
+ free_bytes=free,
84
+ available_bytes=available,
85
+ cached_bytes=cached,
86
+ buffers_bytes=buffers,
87
+ swap_used_bytes=swap_used,
88
+ swap_total_bytes=swap_total,
89
+ used_percent=round(used_pct, 1),
90
+ assessment=assessment,
91
+ )
92
+
93
+
94
+ def build_load_metrics(data: dict) -> LoadMetrics:
95
+ """Build load metrics from fetched data."""
96
+ load_data = data.get("kernel.all.load", {}).get("instances", {})
97
+
98
+ load_1m = float(load_data.get(1, 0.0))
99
+ load_5m = float(load_data.get(5, 0.0))
100
+ load_15m = float(load_data.get(15, 0.0))
101
+
102
+ runnable = int(get_first_value(data, "kernel.all.runnable"))
103
+ nprocs = int(get_first_value(data, "kernel.all.nprocs"))
104
+ ncpu = int(get_first_value(data, "hinv.ncpu", 1))
105
+
106
+ if load_1m > ncpu * 2:
107
+ assessment = f"Load is very high ({load_1m:.1f} vs {ncpu} CPUs)"
108
+ elif load_1m > ncpu:
109
+ assessment = f"Load is elevated ({load_1m:.1f} > {ncpu} CPUs)"
110
+ else:
111
+ assessment = "Load is normal"
112
+
113
+ return LoadMetrics(
114
+ load_1m=round(load_1m, 2),
115
+ load_5m=round(load_5m, 2),
116
+ load_15m=round(load_15m, 2),
117
+ runnable=runnable,
118
+ nprocs=nprocs,
119
+ assessment=assessment,
120
+ )
121
+
122
+
123
+ def build_disk_metrics(data: dict) -> DiskMetrics:
124
+ """Build disk I/O metrics from fetched data."""
125
+ read_bytes = get_first_value(data, "disk.all.read_bytes")
126
+ write_bytes = get_first_value(data, "disk.all.write_bytes")
127
+ reads = get_first_value(data, "disk.all.read")
128
+ writes = get_first_value(data, "disk.all.write")
129
+
130
+ if read_bytes > 100_000_000 or write_bytes > 100_000_000:
131
+ assessment = (
132
+ f"Heavy disk I/O ({read_bytes / 1e6:.0f} MB/s read, {write_bytes / 1e6:.0f} MB/s write)"
133
+ )
134
+ elif read_bytes > 10_000_000 or write_bytes > 10_000_000:
135
+ assessment = "Moderate disk activity"
136
+ else:
137
+ assessment = "Disk I/O is low"
138
+
139
+ return DiskMetrics(
140
+ read_bytes_per_sec=round(read_bytes, 1),
141
+ write_bytes_per_sec=round(write_bytes, 1),
142
+ reads_per_sec=round(reads, 1),
143
+ writes_per_sec=round(writes, 1),
144
+ assessment=assessment,
145
+ )
146
+
147
+
148
+ def build_network_metrics(data: dict) -> NetworkMetrics:
149
+ """Build network I/O metrics from fetched data."""
150
+ in_bytes = sum_instances(data, "network.interface.in.bytes")
151
+ out_bytes = sum_instances(data, "network.interface.out.bytes")
152
+ in_packets = sum_instances(data, "network.interface.in.packets")
153
+ out_packets = sum_instances(data, "network.interface.out.packets")
154
+
155
+ total_throughput = in_bytes + out_bytes
156
+ if total_throughput > 100_000_000:
157
+ assessment = f"High network throughput ({total_throughput / 1e6:.0f} MB/s)"
158
+ elif total_throughput > 10_000_000:
159
+ assessment = "Moderate network activity"
160
+ else:
161
+ assessment = "Network I/O is low"
162
+
163
+ return NetworkMetrics(
164
+ in_bytes_per_sec=round(in_bytes, 1),
165
+ out_bytes_per_sec=round(out_bytes, 1),
166
+ in_packets_per_sec=round(in_packets, 1),
167
+ out_packets_per_sec=round(out_packets, 1),
168
+ assessment=assessment,
169
+ )
170
+
171
+
172
+ def _extract_process_data_sources(data: dict) -> dict[str, dict]:
173
+ """Extract all process data sources from raw PCP data."""
174
+ return {
175
+ "pid": data.get("proc.psinfo.pid", {}).get("instances", {}),
176
+ "cmd": data.get("proc.psinfo.cmd", {}).get("instances", {}),
177
+ "args": data.get("proc.psinfo.psargs", {}).get("instances", {}),
178
+ "rss": data.get("proc.memory.rss", {}).get("instances", {}),
179
+ "utime": data.get("proc.psinfo.utime", {}).get("instances", {}),
180
+ "stime": data.get("proc.psinfo.stime", {}).get("instances", {}),
181
+ "io_read": data.get("proc.io.read_bytes", {}).get("instances", {}),
182
+ "io_write": data.get("proc.io.write_bytes", {}).get("instances", {}),
183
+ }
184
+
185
+
186
+ def _calculate_cpu_percent(
187
+ inst_id: str, utime_data: dict, stime_data: dict, include_cpu: bool
188
+ ) -> float | None:
189
+ """Calculate CPU percentage for a process instance."""
190
+ if not include_cpu and not utime_data:
191
+ return None
192
+ utime = float(utime_data.get(inst_id, 0))
193
+ stime = float(stime_data.get(inst_id, 0))
194
+ return (utime + stime) / 10.0
195
+
196
+
197
+ def _calculate_io_metrics(
198
+ inst_id: str, io_read_data: dict, io_write_data: dict, include_io: bool
199
+ ) -> tuple[float | None, float | None]:
200
+ """Calculate I/O read/write metrics for a process instance."""
201
+ if not include_io and not io_read_data:
202
+ return None, None
203
+ io_read = float(io_read_data.get(inst_id, 0))
204
+ io_write = float(io_write_data.get(inst_id, 0))
205
+ return io_read, io_write
206
+
207
+
208
+ def _build_process_info(
209
+ inst_id: str, sources: dict[str, dict], sort_by: str, total_mem: float
210
+ ) -> ProcessInfo | None:
211
+ """Build a single ProcessInfo from instance data."""
212
+ pid = int(sources["pid"].get(inst_id, 0))
213
+ if pid <= 0:
214
+ return None
215
+
216
+ cmd = str(sources["cmd"].get(inst_id, "unknown"))
217
+ cmdline = str(sources["args"].get(inst_id, cmd))[:200]
218
+ rss = int(sources["rss"].get(inst_id, 0)) * 1024
219
+ rss_pct = (rss / total_mem * 100) if total_mem > 0 else 0.0
220
+
221
+ cpu_pct = _calculate_cpu_percent(inst_id, sources["utime"], sources["stime"], sort_by == "cpu")
222
+
223
+ io_read, io_write = _calculate_io_metrics(
224
+ inst_id, sources["io_read"], sources["io_write"], sort_by == "io"
225
+ )
226
+
227
+ return ProcessInfo(
228
+ pid=pid,
229
+ command=cmd,
230
+ cmdline=cmdline,
231
+ cpu_percent=round(cpu_pct, 1) if cpu_pct is not None else None,
232
+ rss_bytes=rss,
233
+ rss_percent=round(rss_pct, 1),
234
+ io_read_bytes_per_sec=round(io_read, 1) if io_read is not None else None,
235
+ io_write_bytes_per_sec=round(io_write, 1) if io_write is not None else None,
236
+ )
237
+
238
+
239
+ def build_process_list(data: dict, sort_by: str, total_mem: float, ncpu: int) -> list[ProcessInfo]:
240
+ """Build list of ProcessInfo from fetched data."""
241
+ sources = _extract_process_data_sources(data)
242
+ processes: list[ProcessInfo] = []
243
+
244
+ for inst_id in sources["pid"]:
245
+ process = _build_process_info(inst_id, sources, sort_by, total_mem)
246
+ if process is not None:
247
+ processes.append(process)
248
+
249
+ return processes
250
+
251
+
252
+ def get_sort_key(proc: ProcessInfo, sort_by: str) -> float:
253
+ """Get sort key value for a process."""
254
+ if sort_by == "cpu":
255
+ return proc.cpu_percent or 0.0
256
+ elif sort_by == "memory":
257
+ return float(proc.rss_bytes)
258
+ elif sort_by == "io":
259
+ return (proc.io_read_bytes_per_sec or 0.0) + (proc.io_write_bytes_per_sec or 0.0)
260
+ return 0.0
261
+
262
+
263
+ def assess_processes(processes: list[ProcessInfo], sort_by: str, ncpu: int) -> str:
264
+ """Generate assessment string for top processes."""
265
+ if not processes:
266
+ return "No processes found"
267
+
268
+ top = processes[0]
269
+ if sort_by == "cpu":
270
+ if top.cpu_percent and top.cpu_percent > ncpu * 100 * 0.5:
271
+ return f"{top.command} is CPU-bound ({top.cpu_percent:.0f}%)"
272
+ return f"Top CPU: {top.command} ({top.cpu_percent:.0f}%)"
273
+ elif sort_by == "memory":
274
+ return f"Top memory: {top.command} ({top.rss_percent:.1f}%)"
275
+ elif sort_by == "io":
276
+ total_io = (top.io_read_bytes_per_sec or 0) + (top.io_write_bytes_per_sec or 0)
277
+ return f"Top I/O: {top.command} ({total_io / 1e6:.1f} MB/s)"
278
+ return f"Top process: {top.command}"
279
+
280
+
281
+ __all__ = [
282
+ "build_cpu_metrics",
283
+ "build_memory_metrics",
284
+ "build_load_metrics",
285
+ "build_disk_metrics",
286
+ "build_network_metrics",
287
+ "build_process_list",
288
+ "get_sort_key",
289
+ "assess_processes",
290
+ ]
@@ -0,0 +1,38 @@
1
+ """Error handling decorators for MCP tools."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from functools import wraps
6
+ from typing import TYPE_CHECKING
7
+
8
+ if TYPE_CHECKING:
9
+ from collections.abc import Callable
10
+ from typing import Any
11
+
12
+
13
+ def handle_pcp_errors(operation: str) -> Callable:
14
+ """Decorator to convert PCP exceptions to ToolError.
15
+
16
+ Args:
17
+ operation: Description of the operation (e.g., "fetching metrics").
18
+
19
+ Returns:
20
+ Decorated async function that handles PCP errors.
21
+ """
22
+
23
+ def decorator(func: Callable) -> Callable:
24
+ @wraps(func)
25
+ async def wrapper(*args: Any, **kwargs: Any) -> Any:
26
+ from pcp_mcp.errors import handle_pcp_error
27
+
28
+ try:
29
+ return await func(*args, **kwargs)
30
+ except Exception as e:
31
+ raise handle_pcp_error(e, operation) from e
32
+
33
+ return wrapper
34
+
35
+ return decorator
36
+
37
+
38
+ __all__ = ["handle_pcp_errors"]
@@ -0,0 +1,60 @@
1
+ """Metric data extraction utilities.
2
+
3
+ Consolidated helpers for extracting values from PCP metric responses.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+
9
+ def get_first_value(data: dict, metric: str, default: float = 0.0) -> float:
10
+ """Get first instance value from fetched data."""
11
+ metric_data = data.get(metric, {})
12
+ instances = metric_data.get("instances", {})
13
+ if instances:
14
+ return float(next(iter(instances.values()), default))
15
+ return default
16
+
17
+
18
+ def get_scalar_value(response: dict, metric: str, default: int = 0) -> int:
19
+ """Get scalar value from raw fetch response."""
20
+ for v in response.get("values", []):
21
+ if v.get("name") == metric:
22
+ instances = v.get("instances", [])
23
+ if instances:
24
+ return int(instances[0].get("value", default))
25
+ return default
26
+
27
+
28
+ def sum_instances(data: dict, metric: str) -> float:
29
+ """Sum all instance values for a metric."""
30
+ metric_data = data.get(metric, {})
31
+ instances = metric_data.get("instances", {})
32
+ return sum(float(v) for v in instances.values())
33
+
34
+
35
+ def extract_help_text(metric_dict: dict, default: str = "") -> str:
36
+ """Extract help text from metric info dictionary.
37
+
38
+ Tries text-help first, then falls back to text-oneline.
39
+ """
40
+ return metric_dict.get("text-help") or metric_dict.get("text-oneline") or default
41
+
42
+
43
+ def extract_timestamp(response: dict) -> float:
44
+ """Extract timestamp from pmproxy response.
45
+
46
+ Handles both float timestamps and dict format {s: ..., us: ...}.
47
+ """
48
+ ts = response.get("timestamp", 0.0)
49
+ if isinstance(ts, dict):
50
+ return ts.get("s", 0) + ts.get("us", 0) / 1e6
51
+ return float(ts)
52
+
53
+
54
+ __all__ = [
55
+ "get_first_value",
56
+ "get_scalar_value",
57
+ "sum_instances",
58
+ "extract_help_text",
59
+ "extract_timestamp",
60
+ ]
@@ -0,0 +1,227 @@
1
+ Metadata-Version: 2.4
2
+ Name: pcp-mcp
3
+ Version: 0.1.0
4
+ Summary: MCP server for Performance Co-Pilot
5
+ Keywords: mcp,pcp,performance-co-pilot,monitoring,model-context-protocol
6
+ Author: Major Hayden
7
+ Author-email: Major Hayden <major@mhtx.net>
8
+ License-Expression: MIT
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: Intended Audience :: System Administrators
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
17
+ Classifier: Programming Language :: Python :: 3.14
18
+ Classifier: Topic :: System :: Monitoring
19
+ Classifier: Typing :: Typed
20
+ Requires-Dist: fastmcp>=2.0.0
21
+ Requires-Dist: httpx>=0.27
22
+ Requires-Dist: pydantic-settings>=2.0.0
23
+ Requires-Dist: typing-extensions>=4.0 ; python_full_version < '3.11'
24
+ Requires-Python: >=3.10
25
+ Project-URL: Homepage, https://github.com/major/pcp-mcp
26
+ Project-URL: Repository, https://github.com/major/pcp-mcp
27
+ Description-Content-Type: text/markdown
28
+
29
+ # pcp-mcp
30
+
31
+ MCP server for [Performance Co-Pilot (PCP)](https://pcp.io/) metrics.
32
+
33
+ Query system performance metrics via the Model Context Protocol - CPU, memory, disk I/O, network, processes, and more.
34
+
35
+ ## 🚀 Installation
36
+
37
+ ```bash
38
+ pip install pcp-mcp
39
+ ```
40
+
41
+ Or with [uv](https://docs.astral.sh/uv/):
42
+
43
+ ```bash
44
+ uv add pcp-mcp
45
+ ```
46
+
47
+ ## 📋 Requirements
48
+
49
+ - **Python**: 3.10+
50
+ - **PCP**: Performance Co-Pilot with `pmproxy` running
51
+ ```bash
52
+ # Fedora/RHEL/CentOS
53
+ sudo dnf install pcp
54
+ sudo systemctl enable --now pmproxy
55
+
56
+ # Ubuntu/Debian
57
+ sudo apt install pcp
58
+ sudo systemctl enable --now pmproxy
59
+ ```
60
+
61
+ ## ⚙️ Configuration
62
+
63
+ Configure via environment variables:
64
+
65
+ | Variable | Description | Default |
66
+ |----------|-------------|---------|
67
+ | `PCP_HOST` | pmproxy host | `localhost` |
68
+ | `PCP_PORT` | pmproxy port | `44322` |
69
+ | `PCP_TARGET_HOST` | Target pmcd host to monitor | `localhost` |
70
+ | `PCP_USE_TLS` | Use HTTPS for pmproxy | `false` |
71
+ | `PCP_TIMEOUT` | Request timeout (seconds) | `30` |
72
+ | `PCP_USERNAME` | HTTP basic auth user | (optional) |
73
+ | `PCP_PASSWORD` | HTTP basic auth password | (optional) |
74
+
75
+ ## 🎯 Usage
76
+
77
+ ### Monitor localhost (default)
78
+
79
+ ```bash
80
+ pcp-mcp
81
+ ```
82
+
83
+ ### Monitor a remote host
84
+
85
+ ```bash
86
+ PCP_TARGET_HOST=webserver1.example.com pcp-mcp
87
+ ```
88
+
89
+ Or use the CLI flag:
90
+
91
+ ```bash
92
+ pcp-mcp --target-host webserver1.example.com
93
+ ```
94
+
95
+ ### Connect to remote pmproxy
96
+
97
+ ```bash
98
+ PCP_HOST=metrics.example.com pcp-mcp
99
+ ```
100
+
101
+ ### Use SSE transport
102
+
103
+ ```bash
104
+ pcp-mcp --transport sse
105
+ ```
106
+
107
+ ## 🔌 MCP Client Configuration
108
+
109
+ ### Claude Desktop
110
+
111
+ Add to `~/.config/claude/claude_desktop_config.json`:
112
+
113
+ ```json
114
+ {
115
+ "mcpServers": {
116
+ "pcp": {
117
+ "command": "pcp-mcp"
118
+ }
119
+ }
120
+ }
121
+ ```
122
+
123
+ For remote monitoring:
124
+
125
+ ```json
126
+ {
127
+ "mcpServers": {
128
+ "pcp": {
129
+ "command": "pcp-mcp",
130
+ "args": ["--target-host", "webserver1.example.com"]
131
+ }
132
+ }
133
+ }
134
+ ```
135
+
136
+ ## 🛠️ Available Tools
137
+
138
+ ### System Monitoring
139
+
140
+ - **`get_system_snapshot`** - Point-in-time system overview (CPU, memory, disk, network, load)
141
+ - **`get_process_top`** - Top processes by CPU, memory, or I/O usage
142
+ - **`query_metrics`** - Fetch current values for specific PCP metrics
143
+ - **`search_metrics`** - Discover available metrics by name pattern
144
+ - **`describe_metric`** - Get detailed metadata about a metric
145
+
146
+ ### Example Queries
147
+
148
+ ```
149
+ "What's the current CPU usage?"
150
+ → Uses get_system_snapshot
151
+
152
+ "Show me the top 10 processes by memory usage"
153
+ → Uses get_process_top(sort_by="memory", limit=10)
154
+
155
+ "What metrics are available for network traffic?"
156
+ → Uses search_metrics(pattern="network")
157
+
158
+ "Get detailed info about kernel.all.load"
159
+ → Uses describe_metric(name="kernel.all.load")
160
+ ```
161
+
162
+ ## 📚 Resources
163
+
164
+ Browse metrics via MCP resources:
165
+
166
+ - `pcp://metrics` - List all available metrics (grouped by prefix)
167
+ - `pcp://system/snapshot` - Latest system snapshot
168
+ - `pcp://processes/top` - Top processes
169
+
170
+ ## 💡 Use Cases
171
+
172
+ ### Performance Troubleshooting
173
+
174
+ Ask Claude to:
175
+ - "Analyze current system performance and identify bottlenecks"
176
+ - "Why is my disk I/O so high?"
177
+ - "Which processes are consuming the most CPU?"
178
+
179
+ ### System Monitoring
180
+
181
+ - "Give me a health check of the production server"
182
+ - "Compare CPU usage over the last minute"
183
+ - "Monitor network traffic on eth0"
184
+
185
+ ### Capacity Planning
186
+
187
+ - "What's the memory utilization trend?"
188
+ - "Show me disk usage across all filesystems"
189
+ - "Analyze process resource consumption patterns"
190
+
191
+ ## 🏗️ Architecture
192
+
193
+ ```
194
+ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐
195
+ │ LLM │ ◄─MCP─► │ pcp-mcp │ ◄─HTTP─► │ pmproxy │ ◄─────► │ pmcd │
196
+ └─────────┘ └─────────┘ └─────────┘ └─────────┘
197
+ (REST API) (metrics)
198
+ ```
199
+
200
+ - **pcp-mcp**: FastMCP server exposing PCP metrics via MCP tools
201
+ - **pmproxy**: PCP's REST API server (runs on port 44322 by default)
202
+ - **pmcd**: PCP metrics collector daemon
203
+ - **Remote monitoring**: Set `PCP_TARGET_HOST` to query a different pmcd instance via pmproxy
204
+
205
+ ## 🔧 Development
206
+
207
+ ```bash
208
+ # Install dependencies
209
+ uv sync --dev
210
+
211
+ # Run all checks
212
+ make check
213
+
214
+ # Individual commands
215
+ make lint # ruff check
216
+ make format # ruff format
217
+ make typecheck # ty check
218
+ make test # pytest with coverage
219
+ ```
220
+
221
+ ## 📖 Documentation
222
+
223
+ Full documentation at [https://major.github.io/pcp-mcp](https://major.github.io/pcp-mcp)
224
+
225
+ ## 📄 License
226
+
227
+ MIT
@@ -0,0 +1,22 @@
1
+ pcp_mcp/__init__.py,sha256=u-jLAyQZEv6jKWvAfBOAkuNkh7AVQIMgrq3hX9KqvpM,1658
2
+ pcp_mcp/client.py,sha256=CaEP8uVkMsfqLG-HDDhf4qnl3g4CgJBt0xpDcScgzzg,8151
3
+ pcp_mcp/config.py,sha256=EvcHnVHMM9tHSklYmksA2zMmtUJnOLAc-LSCWz4HW68,1771
4
+ pcp_mcp/context.py,sha256=Q7R8nB3UHxNxHVm2bKRnM7J2jXLIUv3eC3a1KfQWiQ4,1298
5
+ pcp_mcp/errors.py,sha256=sIes9OSNdYQeOmwjFknhfXXjBjOOzXmc94bbB-4b_tg,1598
6
+ pcp_mcp/models.py,sha256=GUILMSh9XGBt0Ayjt0-N8_ELl9K2jq78MD9ltdL-FBA,6390
7
+ pcp_mcp/prompts/__init__.py,sha256=0XnHZQVYFjCOEPFI54CuJsRdfl4g_PhocHDepe6_OV4,12208
8
+ pcp_mcp/resources/__init__.py,sha256=55Mm2ZCuyyQWWql9N8cLk6ld1pNj3Ezc416jgFX35dU,541
9
+ pcp_mcp/resources/catalog.py,sha256=1TIZgZ-o5dec47ftSpSRSl1xMtnKJZZOCKjrs9xIpaE,10374
10
+ pcp_mcp/resources/health.py,sha256=ekirrQdEnSI0VjFtXepeFlBjhshG0sDRrJpI2Ngh2Qs,2194
11
+ pcp_mcp/server.py,sha256=1VIei1UdeXgIaiO00nPFfuXRJti48_bLKwkS8aCW-LM,4209
12
+ pcp_mcp/tools/__init__.py,sha256=sXhOqqnUwzSf16QU6eS79LMvXJcv7jqSXQlrpQG4UV0,505
13
+ pcp_mcp/tools/metrics.py,sha256=suu9X6JAYENdZjlQXqaGtFAqxkIGSoG5NNHCuZdlae0,4479
14
+ pcp_mcp/tools/system.py,sha256=hrOu-xoCmQbmsGhGU2w7vmG1f-Sv7BMqLEnKSwFxD78,6942
15
+ pcp_mcp/utils/__init__.py,sha256=HfZ2KM23RlO85sOLcrJG0Oii2YhOqq4SkeVKS0TLBmA,1031
16
+ pcp_mcp/utils/builders.py,sha256=n13Ou6cb1-YToG-M31J8_jWajq8ioJx6tJTKnqaQiio,10293
17
+ pcp_mcp/utils/decorators.py,sha256=FutKJgYjP6hgg9N388mycyzUTp-hTqwU_jYt_i39O5o,939
18
+ pcp_mcp/utils/extractors.py,sha256=F977szuqUNBkhNa9Tx1nQio9YiiwKAkbWD9aROZ7sgo,1807
19
+ pcp_mcp-0.1.0.dist-info/WHEEL,sha256=XV0cjMrO7zXhVAIyyc8aFf1VjZ33Fen4IiJk5zFlC3g,80
20
+ pcp_mcp-0.1.0.dist-info/entry_points.txt,sha256=PhVo92EGoS05yEpHVRyKEsxKya_bWlPLodp-g4tr2Rg,42
21
+ pcp_mcp-0.1.0.dist-info/METADATA,sha256=gCYEaj2adv_Qw7gIgIX8_pbDJMA04MW0aUTg6A4QfNU,5708
22
+ pcp_mcp-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: uv 0.9.26
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ pcp-mcp = pcp_mcp:main
3
+