mcpower-proxy 0.0.73__py3-none-any.whl → 0.0.77__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,4 +3,3 @@ Common IDE Tools
3
3
 
4
4
  Shared logic for all IDE integrations.
5
5
  """
6
-
@@ -3,4 +3,3 @@ Common Hook Handlers
3
3
 
4
4
  Shared hook logic for all IDEs
5
5
  """
6
-
@@ -33,13 +33,14 @@ def output_init_result(success: bool, message: str):
33
33
 
34
34
 
35
35
  async def handle_init(
36
- logger: MCPLogger,
37
- audit_logger: AuditTrailLogger,
38
- event_id: str,
39
- cwd: Optional[str],
40
- server_name: str,
41
- client_name: str,
42
- hooks: Dict[str, Dict[str, str]]
36
+ logger: MCPLogger,
37
+ audit_logger: AuditTrailLogger,
38
+ event_id: str,
39
+ prompt_id: str,
40
+ cwd: Optional[str],
41
+ server_name: str,
42
+ client_name: str,
43
+ hooks: Dict[str, Dict[str, str]]
43
44
  ) -> None:
44
45
  """
45
46
  Generic init handler - registers hooks with security API
@@ -48,6 +49,7 @@ async def handle_init(
48
49
  logger: Logger instance
49
50
  audit_logger: Audit logger instance
50
51
  event_id: Event identifier
52
+ prompt_id: Prompt identifier
51
53
  cwd: Current working directory
52
54
  server_name: IDE-specific server name (e.g. "cursor_tools_mcp")
53
55
  client_name: IDE-specific client name (e.g. "cursor", "claude-code")
@@ -56,19 +58,22 @@ async def handle_init(
56
58
  Outputs result and exits with appropriate code.
57
59
  """
58
60
  session_id = get_session_id()
59
-
60
- logger.info(f"Init handler started (client={client_name}, event_id={event_id}, cwd={cwd})")
61
-
61
+
62
+ logger.info(f"Init handler started (client={client_name}, event_id={event_id}, prompt_id={prompt_id}, cwd={cwd})")
63
+
62
64
  try:
63
65
  app_uid = read_app_uid(logger, get_project_mcpower_dir(cwd))
64
66
  audit_logger.set_app_uid(app_uid)
65
-
66
- audit_logger.log_event("mcpower_start", {
67
- "wrapper_version": __version__,
68
- "wrapped_server_name": server_name,
69
- "client": client_name
70
- })
71
-
67
+
68
+ audit_logger.log_event(
69
+ "mcpower_start",
70
+ {
71
+ "wrapper_version": __version__,
72
+ "wrapped_server_name": server_name,
73
+ "client": client_name
74
+ }
75
+ )
76
+
72
77
  try:
73
78
  tools = [
74
79
  ToolRef(
@@ -78,7 +83,7 @@ async def handle_init(
78
83
  )
79
84
  for hook_info in hooks.values()
80
85
  ]
81
-
86
+
82
87
  init_request = InitRequest(
83
88
  environment=EnvironmentContext(
84
89
  session_id=session_id,
@@ -98,7 +103,7 @@ async def handle_init(
98
103
  ),
99
104
  tools=tools
100
105
  )
101
-
106
+
102
107
  async with SecurityPolicyClient(
103
108
  session_id=session_id,
104
109
  logger=logger,
@@ -106,20 +111,19 @@ async def handle_init(
106
111
  app_id=app_uid
107
112
  ) as client:
108
113
  await client.init_tools(init_request, event_id=event_id)
109
-
114
+
110
115
  logger.info(f"Hooks registered successfully for {client_name}")
111
-
116
+
112
117
  # Success - output result and exit
113
118
  output_init_result(True, f"{client_name.title()} hooks registered successfully")
114
119
  sys.exit(0)
115
-
120
+
116
121
  except Exception as e:
117
122
  logger.error(f"API initialization failed: {e}")
118
123
  output_init_result(False, f"Error: {str(e)}")
119
124
  sys.exit(1)
120
-
125
+
121
126
  except Exception as e:
122
127
  logger.error(f"Unexpected error in init handler: {e}", exc_info=True)
123
128
  output_init_result(False, f"Initialization failed: {str(e)}")
124
129
  sys.exit(1)
125
-
@@ -10,12 +10,12 @@ from .types import OutputFormat
10
10
 
11
11
 
12
12
  def output_result(
13
- logger: MCPLogger,
14
- output_format: OutputFormat,
15
- hook_type: str,
16
- allowed: bool,
17
- user_message: Optional[str] = None,
18
- agent_message: Optional[str] = None
13
+ logger: MCPLogger,
14
+ output_format: OutputFormat,
15
+ hook_type: str,
16
+ allowed: bool,
17
+ user_message: Optional[str] = None,
18
+ agent_message: Optional[str] = None
19
19
  ) -> None:
20
20
  """
21
21
  Output hook result in IDE-specific format and exit with appropriate code
@@ -30,20 +30,20 @@ def output_result(
30
30
  """
31
31
  # Format output using IDE-specific formatter
32
32
  formatted_output = output_format.formatter(hook_type, allowed, user_message, agent_message)
33
-
33
+
34
34
  logger.info(f"Hook output ({hook_type}, allowed={allowed}): {formatted_output}")
35
35
  print(formatted_output, flush=True)
36
-
36
+
37
37
  # Exit with appropriate code
38
38
  exit_code = output_format.allow_exit_code if allowed else output_format.deny_exit_code
39
39
  sys.exit(exit_code)
40
40
 
41
41
 
42
42
  def output_error(
43
- logger: MCPLogger,
44
- output_format: OutputFormat,
45
- hook_type: str,
46
- error_message: str
43
+ logger: MCPLogger,
44
+ output_format: OutputFormat,
45
+ hook_type: str,
46
+ error_message: str
47
47
  ) -> None:
48
48
  """
49
49
  Output error and exit with error code
@@ -55,10 +55,9 @@ def output_error(
55
55
  error_message: Error message
56
56
  """
57
57
  logger.error(f"Hook error: {error_message}")
58
-
58
+
59
59
  # Output as deny/block with error message
60
60
  formatted_output = output_format.formatter(hook_type, False, error_message, error_message)
61
61
  print(formatted_output, flush=True)
62
-
63
- sys.exit(output_format.error_exit_code)
64
62
 
63
+ sys.exit(output_format.error_exit_code)
@@ -8,10 +8,10 @@ from modules.logs.audit_trail import AuditTrailLogger
8
8
  from modules.logs.logger import MCPLogger
9
9
  from modules.redaction import redact
10
10
  from modules.utils.ids import get_session_id, read_app_uid, get_project_mcpower_dir
11
+ from modules.utils.string import truncate_at
11
12
  from .output import output_result, output_error
12
13
  from .types import HookConfig
13
- from .utils import create_validator, extract_redaction_patterns, build_sensitive_data_types, \
14
- process_attachments_for_redaction, inspect_and_enforce
14
+ from .utils import create_validator, extract_redaction_patterns, process_attachments_for_redaction, inspect_and_enforce
15
15
 
16
16
 
17
17
  async def handle_prompt_submit(
@@ -46,7 +46,6 @@ async def handle_prompt_submit(
46
46
  audit_logger.set_app_uid(app_uid)
47
47
 
48
48
  try:
49
- # Validate input
50
49
  try:
51
50
  validator = create_validator(
52
51
  required_fields={"prompt": str},
@@ -60,17 +59,17 @@ async def handle_prompt_submit(
60
59
  output_error(logger, config.output_format, "continue", str(e))
61
60
  return
62
61
 
63
- # Check for redactions in prompt
64
62
  redacted_prompt = redact(prompt)
65
- # Log audit event
63
+
66
64
  audit_logger.log_event(
67
65
  "prompt_submission",
68
66
  {
69
67
  "server": config.server_name,
70
68
  "tool": tool_name,
71
- "params": {"prompt": f"{redacted_prompt[:20]}...", "attachments_count": len(attachments)}
69
+ "params": {"prompt": truncate_at(redacted_prompt, 100), "attachments_count": len(attachments)}
72
70
  },
73
- event_id=event_id
71
+ event_id=event_id,
72
+ prompt_id=prompt_id
74
73
  )
75
74
 
76
75
  prompt_patterns = extract_redaction_patterns(redacted_prompt)
@@ -83,60 +82,12 @@ async def handle_prompt_submit(
83
82
 
84
83
  has_any_redactions = bool(prompt_patterns) or len(files_with_redactions) > 0
85
84
 
86
- # If no redactions found, allow immediately without API call
87
- if not has_any_redactions:
88
- logger.info("No sensitive data found in prompt or attachments - allowing without API call")
89
-
90
- audit_logger.log_event(
91
- "prompt_submission_forwarded",
92
- {
93
- "server": config.server_name,
94
- "tool": tool_name,
95
- "params": {"redactions_found": has_any_redactions}
96
- },
97
- event_id=event_id
98
- )
99
-
100
- output_result(logger, config.output_format, "continue", True)
101
- return
102
-
103
- logger.info(f"Found redactions in prompt or {len(files_with_redactions)} file(s) - calling API for inspection")
104
-
105
- # Build explicit content_data structure showing security risk
106
85
  content_data: Dict[str, Any] = {
107
- "security_alert": "Sensitive data detected in user prompt submission"
86
+ "prompt": redacted_prompt,
87
+ "is_redacted": has_any_redactions,
88
+ "redacted_files": files_with_redactions,
108
89
  }
109
90
 
110
- # Add prompt analysis if sensitive data found in prompt text
111
- if prompt_patterns:
112
- sensitive_data_types = build_sensitive_data_types(prompt_patterns, "prompt text")
113
-
114
- total_prompt_items = sum(prompt_patterns.values())
115
- content_data["user_prompt_analysis"] = {
116
- "contains_sensitive_data": True,
117
- "sensitive_data_types": sensitive_data_types,
118
- "risk_summary": f"Prompt contains {total_prompt_items} sensitive data item(s) across {len(prompt_patterns)} type(s)"
119
- }
120
-
121
- # Add file analysis if sensitive data found in attachments
122
- if files_with_redactions:
123
- total_file_items = sum(
124
- sum(f["sensitive_data_types"][dt]["occurrences"] for dt in f["sensitive_data_types"])
125
- for f in files_with_redactions
126
- )
127
- content_data["attached_files_with_secrets_or_pii"] = files_with_redactions
128
- content_data["files_summary"] = \
129
- f"{len(files_with_redactions)} file(s) contain {total_file_items} sensitive data item(s)"
130
-
131
- # Calculate overall risk level
132
- total_sensitive_items = sum(prompt_patterns.values()) if prompt_patterns else 0
133
- if files_with_redactions:
134
- total_sensitive_items += sum(
135
- sum(f["sensitive_data_types"][dt]["occurrences"] for dt in f["sensitive_data_types"])
136
- for f in files_with_redactions
137
- )
138
- content_data["overall_summary"] = f"Total: {total_sensitive_items} sensitive data item(s) detected"
139
-
140
91
  # Call security API and enforce decision
141
92
  try:
142
93
  decision = await inspect_and_enforce(
@@ -154,7 +105,6 @@ async def handle_prompt_submit(
154
105
  client_name=config.client_name
155
106
  )
156
107
 
157
- # Log audit event for forwarding
158
108
  audit_logger.log_event(
159
109
  "prompt_submission_forwarded",
160
110
  {
@@ -162,13 +112,13 @@ async def handle_prompt_submit(
162
112
  "tool": tool_name,
163
113
  "params": {"redactions_found": has_any_redactions}
164
114
  },
165
- event_id=event_id
115
+ event_id=event_id,
116
+ prompt_id=prompt_id
166
117
  )
167
118
 
168
- # Output success
169
119
  reasons = decision.get("reasons", [])
170
- agent_message = "Prompt submission approved: " + "; ".join(
171
- reasons) if reasons else "Prompt submission approved by security policy"
120
+ agent_message = "Prompt submission approved: {0}".format("; ".join(
121
+ reasons)) if reasons else "Prompt submission approved by security policy"
172
122
  output_result(logger, config.output_format, "continue", True,
173
123
  "Prompt approved", agent_message)
174
124
 
@@ -37,13 +37,13 @@ async def handle_read_file(
37
37
  tool_name: IDE-specific tool name (e.g., "beforeReadFile", "PreToolUse")
38
38
  """
39
39
  session_id = get_session_id()
40
- logger.info(f"Read file handler started (client={config.client_name}, prompt_id={prompt_id}, event_id={event_id}, cwd={cwd})")
40
+ logger.info(
41
+ f"Read file handler started (client={config.client_name}, prompt_id={prompt_id}, event_id={event_id}, cwd={cwd})")
41
42
 
42
43
  app_uid = read_app_uid(logger, get_project_mcpower_dir(cwd))
43
44
  audit_logger.set_app_uid(app_uid)
44
45
 
45
46
  try:
46
- # Validate input
47
47
  try:
48
48
  validator = create_validator(
49
49
  required_fields={"file_path": str, "content": str},
@@ -58,7 +58,6 @@ async def handle_read_file(
58
58
  output_error(logger, config.output_format, "permission", str(e))
59
59
  return
60
60
 
61
- # Log audit event
62
61
  audit_logger.log_event(
63
62
  "agent_request",
64
63
  {
@@ -66,7 +65,8 @@ async def handle_read_file(
66
65
  "tool": tool_name,
67
66
  "params": {"file_path": file_path, "attachments_count": len(attachments)}
68
67
  },
69
- event_id=event_id
68
+ event_id=event_id,
69
+ prompt_id=prompt_id
70
70
  )
71
71
 
72
72
  # Check content length - skip API if too large
@@ -85,7 +85,8 @@ async def handle_read_file(
85
85
  "content_too_large": True
86
86
  }
87
87
  },
88
- event_id=event_id
88
+ event_id=event_id,
89
+ prompt_id=prompt_id
89
90
  )
90
91
 
91
92
  output_result(logger, config.output_format, "permission", True)
@@ -93,11 +94,11 @@ async def handle_read_file(
93
94
 
94
95
  # Redact the main content
95
96
  redacted_content = redact(provided_content)
96
-
97
+
97
98
  # Process attachments for redaction status
98
99
  files_with_redactions = process_attachments_for_redaction(attachments, logger)
99
100
  files_with_redactions_paths = {f["file_path"] for f in files_with_redactions}
100
-
101
+
101
102
  # Build attachments info with redaction status
102
103
  attachments_info = []
103
104
  for attachment in attachments:
@@ -107,10 +108,10 @@ async def handle_read_file(
107
108
  "file_path": att_path,
108
109
  "has_redactions": att_path in files_with_redactions_paths
109
110
  })
110
-
111
+
111
112
  logger.info(f"Processed file and {len(attachments)} attachment(s), found redactions in "
112
113
  f"{len(files_with_redactions)} attachment(s)")
113
-
114
+
114
115
  # Build content_data with file_path, redacted content, and attachments
115
116
  content_data = {
116
117
  "file_path": file_path,
@@ -136,7 +137,6 @@ async def handle_read_file(
136
137
  client_name=config.client_name
137
138
  )
138
139
 
139
- # Log audit event for forwarding
140
140
  audit_logger.log_event(
141
141
  "agent_request_forwarded",
142
142
  {
@@ -147,12 +147,13 @@ async def handle_read_file(
147
147
  "content_length": len(provided_content),
148
148
  "attachments_with_redactions": len(files_with_redactions)}
149
149
  },
150
- event_id=event_id
150
+ event_id=event_id,
151
+ prompt_id=prompt_id
151
152
  )
152
153
 
153
- # Output success
154
154
  reasons = decision.get("reasons", [])
155
- agent_message = "File read approved: " + "; ".join(reasons) if reasons else "File read approved by security policy"
155
+ agent_message = "File read approved: " + "; ".join(
156
+ reasons) if reasons else "File read approved by security policy"
156
157
  output_result(logger, config.output_format, "permission", True, "File read approved", agent_message)
157
158
 
158
159
  except Exception as e:
@@ -167,4 +168,3 @@ async def handle_read_file(
167
168
  except Exception as e:
168
169
  logger.error(f"Unexpected error in read file handler: {e}", exc_info=True)
169
170
  output_error(logger, config.output_format, "permission", f"Unexpected error: {str(e)}")
170
-