tweek 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. tweek/__init__.py +16 -0
  2. tweek/cli.py +3390 -0
  3. tweek/cli_helpers.py +193 -0
  4. tweek/config/__init__.py +13 -0
  5. tweek/config/allowed_dirs.yaml +23 -0
  6. tweek/config/manager.py +1064 -0
  7. tweek/config/patterns.yaml +751 -0
  8. tweek/config/tiers.yaml +129 -0
  9. tweek/diagnostics.py +589 -0
  10. tweek/hooks/__init__.py +1 -0
  11. tweek/hooks/pre_tool_use.py +861 -0
  12. tweek/integrations/__init__.py +3 -0
  13. tweek/integrations/moltbot.py +243 -0
  14. tweek/licensing.py +398 -0
  15. tweek/logging/__init__.py +9 -0
  16. tweek/logging/bundle.py +350 -0
  17. tweek/logging/json_logger.py +150 -0
  18. tweek/logging/security_log.py +745 -0
  19. tweek/mcp/__init__.py +24 -0
  20. tweek/mcp/approval.py +456 -0
  21. tweek/mcp/approval_cli.py +356 -0
  22. tweek/mcp/clients/__init__.py +37 -0
  23. tweek/mcp/clients/chatgpt.py +112 -0
  24. tweek/mcp/clients/claude_desktop.py +203 -0
  25. tweek/mcp/clients/gemini.py +178 -0
  26. tweek/mcp/proxy.py +667 -0
  27. tweek/mcp/screening.py +175 -0
  28. tweek/mcp/server.py +317 -0
  29. tweek/platform/__init__.py +131 -0
  30. tweek/plugins/__init__.py +835 -0
  31. tweek/plugins/base.py +1080 -0
  32. tweek/plugins/compliance/__init__.py +30 -0
  33. tweek/plugins/compliance/gdpr.py +333 -0
  34. tweek/plugins/compliance/gov.py +324 -0
  35. tweek/plugins/compliance/hipaa.py +285 -0
  36. tweek/plugins/compliance/legal.py +322 -0
  37. tweek/plugins/compliance/pci.py +361 -0
  38. tweek/plugins/compliance/soc2.py +275 -0
  39. tweek/plugins/detectors/__init__.py +30 -0
  40. tweek/plugins/detectors/continue_dev.py +206 -0
  41. tweek/plugins/detectors/copilot.py +254 -0
  42. tweek/plugins/detectors/cursor.py +192 -0
  43. tweek/plugins/detectors/moltbot.py +205 -0
  44. tweek/plugins/detectors/windsurf.py +214 -0
  45. tweek/plugins/git_discovery.py +395 -0
  46. tweek/plugins/git_installer.py +491 -0
  47. tweek/plugins/git_lockfile.py +338 -0
  48. tweek/plugins/git_registry.py +503 -0
  49. tweek/plugins/git_security.py +482 -0
  50. tweek/plugins/providers/__init__.py +30 -0
  51. tweek/plugins/providers/anthropic.py +181 -0
  52. tweek/plugins/providers/azure_openai.py +289 -0
  53. tweek/plugins/providers/bedrock.py +248 -0
  54. tweek/plugins/providers/google.py +197 -0
  55. tweek/plugins/providers/openai.py +230 -0
  56. tweek/plugins/scope.py +130 -0
  57. tweek/plugins/screening/__init__.py +26 -0
  58. tweek/plugins/screening/llm_reviewer.py +149 -0
  59. tweek/plugins/screening/pattern_matcher.py +273 -0
  60. tweek/plugins/screening/rate_limiter.py +174 -0
  61. tweek/plugins/screening/session_analyzer.py +159 -0
  62. tweek/proxy/__init__.py +302 -0
  63. tweek/proxy/addon.py +223 -0
  64. tweek/proxy/interceptor.py +313 -0
  65. tweek/proxy/server.py +315 -0
  66. tweek/sandbox/__init__.py +71 -0
  67. tweek/sandbox/executor.py +382 -0
  68. tweek/sandbox/linux.py +278 -0
  69. tweek/sandbox/profile_generator.py +323 -0
  70. tweek/screening/__init__.py +13 -0
  71. tweek/screening/context.py +81 -0
  72. tweek/security/__init__.py +22 -0
  73. tweek/security/llm_reviewer.py +348 -0
  74. tweek/security/rate_limiter.py +682 -0
  75. tweek/security/secret_scanner.py +506 -0
  76. tweek/security/session_analyzer.py +600 -0
  77. tweek/vault/__init__.py +40 -0
  78. tweek/vault/cross_platform.py +251 -0
  79. tweek/vault/keychain.py +288 -0
  80. tweek-0.1.0.dist-info/METADATA +335 -0
  81. tweek-0.1.0.dist-info/RECORD +85 -0
  82. tweek-0.1.0.dist-info/WHEEL +5 -0
  83. tweek-0.1.0.dist-info/entry_points.txt +25 -0
  84. tweek-0.1.0.dist-info/licenses/LICENSE +190 -0
  85. tweek-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,197 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Tweek Google Gemini Provider Plugin
4
+
5
+ Handles Google Gemini API format:
6
+ - Endpoint: generativelanguage.googleapis.com
7
+ - Tool calls in functionCall parts
8
+ - GenerateContent API format
9
+ """
10
+
11
+ from typing import Optional, List, Dict, Any
12
+ from tweek.plugins.base import LLMProviderPlugin, ToolCall
13
+
14
+
15
+ class GoogleProvider(LLMProviderPlugin):
16
+ """
17
+ Google Gemini API provider plugin.
18
+
19
+ Supports:
20
+ - GenerateContent API
21
+ - Function calling
22
+ - Multi-turn conversations
23
+ """
24
+
25
+ VERSION = "1.0.0"
26
+ DESCRIPTION = "Google Gemini API provider"
27
+ AUTHOR = "Tweek"
28
+ REQUIRES_LICENSE = "free"
29
+ TAGS = ["provider", "google", "gemini"]
30
+
31
+ @property
32
+ def name(self) -> str:
33
+ return "google"
34
+
35
+ @property
36
+ def api_hosts(self) -> List[str]:
37
+ return [
38
+ "generativelanguage.googleapis.com",
39
+ "aiplatform.googleapis.com", # Vertex AI
40
+ ]
41
+
42
+ def extract_tool_calls(self, response: Dict[str, Any]) -> List[ToolCall]:
43
+ """
44
+ Extract tool calls from Gemini API response.
45
+
46
+ Gemini format:
47
+ {
48
+ "candidates": [
49
+ {
50
+ "content": {
51
+ "parts": [
52
+ {
53
+ "functionCall": {
54
+ "name": "tool_name",
55
+ "args": {...}
56
+ }
57
+ }
58
+ ]
59
+ }
60
+ }
61
+ ]
62
+ }
63
+ """
64
+ tool_calls = []
65
+
66
+ candidates = response.get("candidates", [])
67
+ if not isinstance(candidates, list):
68
+ return tool_calls
69
+
70
+ for idx, candidate in enumerate(candidates):
71
+ if not isinstance(candidate, dict):
72
+ continue
73
+
74
+ content = candidate.get("content", {})
75
+ if not isinstance(content, dict):
76
+ continue
77
+
78
+ parts = content.get("parts", [])
79
+ if not isinstance(parts, list):
80
+ continue
81
+
82
+ for part_idx, part in enumerate(parts):
83
+ if not isinstance(part, dict):
84
+ continue
85
+
86
+ function_call = part.get("functionCall")
87
+ if isinstance(function_call, dict):
88
+ tool_calls.append(ToolCall(
89
+ id=f"gemini_{idx}_{part_idx}",
90
+ name=function_call.get("name", ""),
91
+ input=function_call.get("args", {}),
92
+ provider=self.name,
93
+ raw=function_call,
94
+ ))
95
+
96
+ return tool_calls
97
+
98
+ def extract_content(self, response: Dict[str, Any]) -> str:
99
+ """
100
+ Extract text content from Gemini API response.
101
+ """
102
+ candidates = response.get("candidates", [])
103
+ if not isinstance(candidates, list):
104
+ return ""
105
+
106
+ text_parts = []
107
+ for candidate in candidates:
108
+ if not isinstance(candidate, dict):
109
+ continue
110
+
111
+ content = candidate.get("content", {})
112
+ if not isinstance(content, dict):
113
+ continue
114
+
115
+ parts = content.get("parts", [])
116
+ if not isinstance(parts, list):
117
+ continue
118
+
119
+ for part in parts:
120
+ if isinstance(part, dict) and "text" in part:
121
+ text_parts.append(part["text"])
122
+
123
+ return "\n".join(text_parts)
124
+
125
+ def extract_messages(self, request: Dict[str, Any]) -> List[Dict[str, Any]]:
126
+ """
127
+ Extract messages from Gemini API request.
128
+
129
+ Gemini uses 'contents' instead of 'messages'.
130
+ """
131
+ contents = request.get("contents", [])
132
+ if not isinstance(contents, list):
133
+ return []
134
+
135
+ # Convert Gemini format to standard format
136
+ messages = []
137
+ for content in contents:
138
+ if not isinstance(content, dict):
139
+ continue
140
+
141
+ role = content.get("role", "user")
142
+ parts = content.get("parts", [])
143
+
144
+ text_parts = []
145
+ for part in parts:
146
+ if isinstance(part, dict) and "text" in part:
147
+ text_parts.append(part["text"])
148
+ elif isinstance(part, str):
149
+ text_parts.append(part)
150
+
151
+ if text_parts:
152
+ messages.append({
153
+ "role": role,
154
+ "content": "\n".join(text_parts)
155
+ })
156
+
157
+ return messages
158
+
159
+ def get_system_prompt(self, request: Dict[str, Any]) -> Optional[str]:
160
+ """Extract system instruction from request."""
161
+ # Gemini uses systemInstruction
162
+ system = request.get("systemInstruction")
163
+ if isinstance(system, dict):
164
+ parts = system.get("parts", [])
165
+ text_parts = []
166
+ for part in parts:
167
+ if isinstance(part, dict) and "text" in part:
168
+ text_parts.append(part["text"])
169
+ return "\n".join(text_parts) if text_parts else None
170
+ elif isinstance(system, str):
171
+ return system
172
+ return None
173
+
174
+ def is_streaming_response(self, response: Dict[str, Any]) -> bool:
175
+ """Check if response is a streaming chunk."""
176
+ # Gemini streaming sends candidates with partial content
177
+ # and includes a 'usageMetadata' only in final chunk
178
+ return "candidates" in response and "usageMetadata" not in response
179
+
180
+ def extract_function_declarations(
181
+ self,
182
+ request: Dict[str, Any]
183
+ ) -> List[Dict[str, Any]]:
184
+ """
185
+ Extract function declarations from request.
186
+
187
+ Returns the tool definitions from the request.
188
+ """
189
+ tools = request.get("tools", [])
190
+ declarations = []
191
+
192
+ for tool in tools:
193
+ if isinstance(tool, dict):
194
+ func_decls = tool.get("functionDeclarations", [])
195
+ declarations.extend(func_decls)
196
+
197
+ return declarations
@@ -0,0 +1,230 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Tweek OpenAI Provider Plugin
4
+
5
+ Handles OpenAI GPT API format:
6
+ - Endpoint: api.openai.com
7
+ - Tool calls in message.tool_calls array
8
+ - Chat completions API format
9
+ """
10
+
11
+ import json
12
+ from typing import Optional, List, Dict, Any
13
+ from tweek.plugins.base import LLMProviderPlugin, ToolCall
14
+
15
+
16
+ class OpenAIProvider(LLMProviderPlugin):
17
+ """
18
+ OpenAI GPT API provider plugin.
19
+
20
+ Supports:
21
+ - Chat Completions API
22
+ - Function calling (legacy)
23
+ - Tool use (current)
24
+ - Streaming responses
25
+ """
26
+
27
+ VERSION = "1.0.0"
28
+ DESCRIPTION = "OpenAI GPT API provider"
29
+ AUTHOR = "Tweek"
30
+ REQUIRES_LICENSE = "free"
31
+ TAGS = ["provider", "openai", "gpt"]
32
+
33
+ @property
34
+ def name(self) -> str:
35
+ return "openai"
36
+
37
+ @property
38
+ def api_hosts(self) -> List[str]:
39
+ return [
40
+ "api.openai.com",
41
+ ]
42
+
43
+ def extract_tool_calls(self, response: Dict[str, Any]) -> List[ToolCall]:
44
+ """
45
+ Extract tool calls from OpenAI API response.
46
+
47
+ OpenAI format:
48
+ {
49
+ "choices": [
50
+ {
51
+ "message": {
52
+ "tool_calls": [
53
+ {
54
+ "id": "call_xxx",
55
+ "type": "function",
56
+ "function": {
57
+ "name": "tool_name",
58
+ "arguments": "{...}" # JSON string
59
+ }
60
+ }
61
+ ]
62
+ }
63
+ }
64
+ ]
65
+ }
66
+ """
67
+ tool_calls = []
68
+
69
+ choices = response.get("choices", [])
70
+ if not isinstance(choices, list):
71
+ return tool_calls
72
+
73
+ for choice in choices:
74
+ if not isinstance(choice, dict):
75
+ continue
76
+
77
+ message = choice.get("message", {})
78
+ if not isinstance(message, dict):
79
+ continue
80
+
81
+ # Handle tool_calls (current format)
82
+ for tc in message.get("tool_calls", []):
83
+ if not isinstance(tc, dict):
84
+ continue
85
+
86
+ func = tc.get("function", {})
87
+ if not isinstance(func, dict):
88
+ continue
89
+
90
+ # Parse arguments JSON
91
+ args_str = func.get("arguments", "{}")
92
+ try:
93
+ args = json.loads(args_str) if isinstance(args_str, str) else args_str
94
+ except json.JSONDecodeError:
95
+ args = {"_raw": args_str}
96
+
97
+ tool_calls.append(ToolCall(
98
+ id=tc.get("id", ""),
99
+ name=func.get("name", ""),
100
+ input=args if isinstance(args, dict) else {"_value": args},
101
+ provider=self.name,
102
+ raw=tc,
103
+ ))
104
+
105
+ # Handle function_call (legacy format)
106
+ function_call = message.get("function_call")
107
+ if isinstance(function_call, dict):
108
+ args_str = function_call.get("arguments", "{}")
109
+ try:
110
+ args = json.loads(args_str) if isinstance(args_str, str) else args_str
111
+ except json.JSONDecodeError:
112
+ args = {"_raw": args_str}
113
+
114
+ tool_calls.append(ToolCall(
115
+ id="function_call",
116
+ name=function_call.get("name", ""),
117
+ input=args if isinstance(args, dict) else {"_value": args},
118
+ provider=self.name,
119
+ raw=function_call,
120
+ ))
121
+
122
+ return tool_calls
123
+
124
+ def extract_content(self, response: Dict[str, Any]) -> str:
125
+ """
126
+ Extract text content from OpenAI API response.
127
+ """
128
+ choices = response.get("choices", [])
129
+ if not isinstance(choices, list) or not choices:
130
+ return ""
131
+
132
+ content_parts = []
133
+ for choice in choices:
134
+ if not isinstance(choice, dict):
135
+ continue
136
+
137
+ message = choice.get("message", {})
138
+ if isinstance(message, dict):
139
+ content = message.get("content")
140
+ if isinstance(content, str):
141
+ content_parts.append(content)
142
+
143
+ return "\n".join(content_parts)
144
+
145
+ def extract_messages(self, request: Dict[str, Any]) -> List[Dict[str, Any]]:
146
+ """
147
+ Extract messages from OpenAI API request.
148
+ """
149
+ return request.get("messages", [])
150
+
151
+ def get_system_prompt(self, request: Dict[str, Any]) -> Optional[str]:
152
+ """Extract system prompt from request."""
153
+ messages = request.get("messages", [])
154
+ for msg in messages:
155
+ if isinstance(msg, dict) and msg.get("role") == "system":
156
+ content = msg.get("content")
157
+ if isinstance(content, str):
158
+ return content
159
+ elif isinstance(content, list):
160
+ # Handle content array format
161
+ parts = []
162
+ for part in content:
163
+ if isinstance(part, dict) and part.get("type") == "text":
164
+ parts.append(part.get("text", ""))
165
+ return "\n".join(parts)
166
+ return None
167
+
168
+ def is_streaming_response(self, response: Dict[str, Any]) -> bool:
169
+ """Check if response is a streaming chunk."""
170
+ # Streaming responses have 'object': 'chat.completion.chunk'
171
+ return response.get("object") == "chat.completion.chunk"
172
+
173
+ def extract_streaming_tool_calls(
174
+ self,
175
+ chunks: List[Dict[str, Any]]
176
+ ) -> List[ToolCall]:
177
+ """
178
+ Extract tool calls from streaming chunks.
179
+
180
+ Reassembles tool calls from delta chunks.
181
+ """
182
+ tool_calls: Dict[int, Dict[str, Any]] = {}
183
+
184
+ for chunk in chunks:
185
+ choices = chunk.get("choices", [])
186
+ for choice in choices:
187
+ if not isinstance(choice, dict):
188
+ continue
189
+
190
+ delta = choice.get("delta", {})
191
+ if not isinstance(delta, dict):
192
+ continue
193
+
194
+ for tc in delta.get("tool_calls", []):
195
+ if not isinstance(tc, dict):
196
+ continue
197
+
198
+ index = tc.get("index", 0)
199
+
200
+ if index not in tool_calls:
201
+ tool_calls[index] = {
202
+ "id": tc.get("id", ""),
203
+ "name": "",
204
+ "arguments": "",
205
+ }
206
+
207
+ func = tc.get("function", {})
208
+ if isinstance(func, dict):
209
+ if func.get("name"):
210
+ tool_calls[index]["name"] = func["name"]
211
+ if func.get("arguments"):
212
+ tool_calls[index]["arguments"] += func["arguments"]
213
+
214
+ # Convert to ToolCall objects
215
+ result = []
216
+ for index in sorted(tool_calls.keys()):
217
+ tc_data = tool_calls[index]
218
+ try:
219
+ args = json.loads(tc_data["arguments"]) if tc_data["arguments"] else {}
220
+ except json.JSONDecodeError:
221
+ args = {"_raw": tc_data["arguments"]}
222
+
223
+ result.append(ToolCall(
224
+ id=tc_data["id"],
225
+ name=tc_data["name"],
226
+ input=args,
227
+ provider=self.name,
228
+ ))
229
+
230
+ return result
tweek/plugins/scope.py ADDED
@@ -0,0 +1,130 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Plugin Scoping System
4
+
5
+ Allows plugins to be scoped to specific tools, skills, projects, tiers,
6
+ and scan directions. Plugins without a scope run globally (default behavior).
7
+
8
+ Example scope config:
9
+ scope:
10
+ tools: [Bash, WebFetch, Write]
11
+ skills: [email-search, patient-records]
12
+ projects: ["/Users/me/healthcare-app"]
13
+ tiers: [risky, dangerous]
14
+ directions: [input, output]
15
+ """
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import Optional, List, Dict, Any
19
+ import logging
20
+
21
+ from tweek.screening.context import ScreeningContext
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ @dataclass
27
+ class PluginScope:
28
+ """
29
+ Defines when a plugin should be active.
30
+
31
+ Each field is a filter. None means "match everything" (no restriction).
32
+ When a field is set, the context must match at least one value in the list.
33
+
34
+ All non-None fields must match for the scope to match (AND logic).
35
+ Within a field, any value can match (OR logic).
36
+ """
37
+ tools: Optional[List[str]] = None
38
+ skills: Optional[List[str]] = None
39
+ projects: Optional[List[str]] = None
40
+ tiers: Optional[List[str]] = None
41
+ directions: Optional[List[str]] = None
42
+
43
+ def matches(self, context: ScreeningContext) -> bool:
44
+ """
45
+ Check if this plugin should run given the context.
46
+
47
+ Returns True if all specified scope filters match the context.
48
+ Unset filters (None) always match.
49
+
50
+ Args:
51
+ context: The current screening context
52
+
53
+ Returns:
54
+ True if the plugin should be active for this context
55
+ """
56
+ # Tools: If specified, tool_name must be in the list
57
+ if self.tools is not None:
58
+ if context.tool_name not in self.tools:
59
+ return False
60
+
61
+ # Skills: If specified, context must have a matching skill_name
62
+ # If skill_name is None in context, we don't filter by skill
63
+ # (avoids blocking when skill info isn't available)
64
+ if self.skills is not None:
65
+ if context.skill_name is not None and context.skill_name not in self.skills:
66
+ return False
67
+
68
+ # Projects: If specified, working_dir must be under one of the project paths
69
+ if self.projects is not None:
70
+ if not any(
71
+ context.working_dir.startswith(p)
72
+ for p in self.projects
73
+ ):
74
+ return False
75
+
76
+ # Tiers: If specified, effective tier must match
77
+ if self.tiers is not None:
78
+ if context.tier not in self.tiers:
79
+ return False
80
+
81
+ return True
82
+
83
+ def to_dict(self) -> Dict[str, Any]:
84
+ """Convert to dictionary for serialization."""
85
+ result = {}
86
+ if self.tools is not None:
87
+ result["tools"] = self.tools
88
+ if self.skills is not None:
89
+ result["skills"] = self.skills
90
+ if self.projects is not None:
91
+ result["projects"] = self.projects
92
+ if self.tiers is not None:
93
+ result["tiers"] = self.tiers
94
+ if self.directions is not None:
95
+ result["directions"] = self.directions
96
+ return result
97
+
98
+ @classmethod
99
+ def from_dict(cls, data: Dict[str, Any]) -> "PluginScope":
100
+ """Create a PluginScope from a dictionary (e.g., from config YAML)."""
101
+ return cls(
102
+ tools=data.get("tools"),
103
+ skills=data.get("skills"),
104
+ projects=data.get("projects"),
105
+ tiers=data.get("tiers"),
106
+ directions=data.get("directions"),
107
+ )
108
+
109
+ @property
110
+ def is_global(self) -> bool:
111
+ """Returns True if this scope has no restrictions (matches everything)."""
112
+ return all(
113
+ v is None
114
+ for v in [self.tools, self.skills, self.projects, self.tiers, self.directions]
115
+ )
116
+
117
+ def describe(self) -> str:
118
+ """Human-readable description of the scope."""
119
+ parts = []
120
+ if self.tools is not None:
121
+ parts.append(f"Tools: {', '.join(self.tools)}")
122
+ if self.skills is not None:
123
+ parts.append(f"Skills: {', '.join(self.skills)}")
124
+ if self.projects is not None:
125
+ parts.append(f"Projects: {', '.join(self.projects)}")
126
+ if self.tiers is not None:
127
+ parts.append(f"Tiers: {', '.join(self.tiers)}")
128
+ if self.directions is not None:
129
+ parts.append(f"Directions: {', '.join(self.directions)}")
130
+ return " | ".join(parts) if parts else "Global (no restrictions)"
@@ -0,0 +1,26 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Tweek Screening Plugins
4
+
5
+ Screening plugins provide security analysis methods:
6
+ - RateLimiter: Detect burst patterns and abuse
7
+ - PatternMatcher: Regex-based pattern matching
8
+ - LLMReviewer: Semantic analysis using LLM
9
+ - SessionAnalyzer: Cross-turn anomaly detection
10
+
11
+ License tiers:
12
+ - FREE: PatternMatcher (basic patterns)
13
+ - PRO: RateLimiter, LLMReviewer, SessionAnalyzer
14
+ """
15
+
16
+ from tweek.plugins.screening.rate_limiter import RateLimiterPlugin
17
+ from tweek.plugins.screening.pattern_matcher import PatternMatcherPlugin
18
+ from tweek.plugins.screening.llm_reviewer import LLMReviewerPlugin
19
+ from tweek.plugins.screening.session_analyzer import SessionAnalyzerPlugin
20
+
21
+ __all__ = [
22
+ "RateLimiterPlugin",
23
+ "PatternMatcherPlugin",
24
+ "LLMReviewerPlugin",
25
+ "SessionAnalyzerPlugin",
26
+ ]