snipara-mcp 1.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snipara_mcp-1.1.1/.gitignore +44 -0
- snipara_mcp-1.1.1/LICENSE +21 -0
- snipara_mcp-1.1.1/PKG-INFO +189 -0
- snipara_mcp-1.1.1/README.md +166 -0
- snipara_mcp-1.1.1/pyproject.toml +45 -0
- snipara_mcp-1.1.1/src/snipara_mcp/__init__.py +6 -0
- snipara_mcp-1.1.1/src/snipara_mcp/py.typed +0 -0
- snipara_mcp-1.1.1/src/snipara_mcp/server.py +438 -0
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# Dependencies
|
|
2
|
+
node_modules
|
|
3
|
+
.pnpm-store
|
|
4
|
+
|
|
5
|
+
# Build outputs
|
|
6
|
+
.next
|
|
7
|
+
.turbo
|
|
8
|
+
dist
|
|
9
|
+
out
|
|
10
|
+
|
|
11
|
+
# Environment
|
|
12
|
+
.env
|
|
13
|
+
.env.local
|
|
14
|
+
.env.*.local
|
|
15
|
+
.env.docker
|
|
16
|
+
|
|
17
|
+
# IDE
|
|
18
|
+
.idea
|
|
19
|
+
.vscode
|
|
20
|
+
*.swp
|
|
21
|
+
*.swo
|
|
22
|
+
|
|
23
|
+
# OS
|
|
24
|
+
.DS_Store
|
|
25
|
+
Thumbs.db
|
|
26
|
+
|
|
27
|
+
# Logs
|
|
28
|
+
*.log
|
|
29
|
+
npm-debug.log*
|
|
30
|
+
pnpm-debug.log*
|
|
31
|
+
|
|
32
|
+
# Testing
|
|
33
|
+
coverage
|
|
34
|
+
|
|
35
|
+
# Database
|
|
36
|
+
*.db
|
|
37
|
+
*.sqlite
|
|
38
|
+
|
|
39
|
+
# Python
|
|
40
|
+
__pycache__
|
|
41
|
+
*.pyc
|
|
42
|
+
.venv
|
|
43
|
+
.vercel
|
|
44
|
+
.env*.local
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Snipara
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: snipara-mcp
|
|
3
|
+
Version: 1.1.1
|
|
4
|
+
Summary: MCP server for Snipara - Context optimization for LLMs
|
|
5
|
+
Project-URL: Homepage, https://snipara.com
|
|
6
|
+
Project-URL: Documentation, https://docs.snipara.com
|
|
7
|
+
Project-URL: Repository, https://github.com/alopez3006/snipara-mcp-server
|
|
8
|
+
Author-email: Snipara <support@snipara.com>
|
|
9
|
+
License: MIT
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: claude,context,cursor,documentation,llm,mcp,snipara
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Requires-Python: >=3.10
|
|
20
|
+
Requires-Dist: httpx>=0.26.0
|
|
21
|
+
Requires-Dist: mcp>=1.0.0
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
|
|
24
|
+
# Snipara MCP Server
|
|
25
|
+
|
|
26
|
+
[](https://pypi.org/project/snipara-mcp/)
|
|
27
|
+
[](https://www.python.org/downloads/)
|
|
28
|
+
[](https://opensource.org/licenses/MIT)
|
|
29
|
+
|
|
30
|
+
MCP server for [Snipara](https://snipara.com) - Context optimization for any LLM.
|
|
31
|
+
|
|
32
|
+
Query your documentation efficiently with **90% token reduction**. Works with any MCP-compatible client including Claude Desktop, Cursor, Windsurf, Gemini, GPT, and more.
|
|
33
|
+
|
|
34
|
+
**LLM-agnostic**: Snipara optimizes context delivery - you use your own LLM (Claude, GPT, Gemini, Llama, etc.).
|
|
35
|
+
|
|
36
|
+
## Installation
|
|
37
|
+
|
|
38
|
+
### Option 1: uvx (Recommended - No Install)
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
uvx snipara-mcp
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
### Option 2: pip
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
pip install snipara-mcp
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Configuration
|
|
51
|
+
|
|
52
|
+
### Claude Desktop
|
|
53
|
+
|
|
54
|
+
Add to `~/Library/Application Support/Claude/claude_desktop_config.json` (macOS) or `%APPDATA%\Claude\claude_desktop_config.json` (Windows):
|
|
55
|
+
|
|
56
|
+
```json
|
|
57
|
+
{
|
|
58
|
+
"mcpServers": {
|
|
59
|
+
"snipara": {
|
|
60
|
+
"command": "uvx",
|
|
61
|
+
"args": ["snipara-mcp"],
|
|
62
|
+
"env": {
|
|
63
|
+
"SNIPARA_API_KEY": "sk-your-api-key",
|
|
64
|
+
"SNIPARA_PROJECT_ID": "your-project-id"
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
### Cursor
|
|
72
|
+
|
|
73
|
+
Add to `~/.cursor/mcp.json`:
|
|
74
|
+
|
|
75
|
+
```json
|
|
76
|
+
{
|
|
77
|
+
"mcpServers": {
|
|
78
|
+
"snipara": {
|
|
79
|
+
"command": "uvx",
|
|
80
|
+
"args": ["snipara-mcp"],
|
|
81
|
+
"env": {
|
|
82
|
+
"SNIPARA_API_KEY": "sk-your-api-key",
|
|
83
|
+
"SNIPARA_PROJECT_ID": "your-project-id"
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
### Claude Code
|
|
91
|
+
|
|
92
|
+
```bash
|
|
93
|
+
claude mcp add snipara -- uvx snipara-mcp
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
Then set environment variables in your shell or `.env` file.
|
|
97
|
+
|
|
98
|
+
### Windsurf
|
|
99
|
+
|
|
100
|
+
Add to `~/.codeium/windsurf/mcp_config.json`:
|
|
101
|
+
|
|
102
|
+
```json
|
|
103
|
+
{
|
|
104
|
+
"mcpServers": {
|
|
105
|
+
"snipara": {
|
|
106
|
+
"command": "uvx",
|
|
107
|
+
"args": ["snipara-mcp"],
|
|
108
|
+
"env": {
|
|
109
|
+
"SNIPARA_API_KEY": "sk-your-api-key",
|
|
110
|
+
"SNIPARA_PROJECT_ID": "your-project-id"
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
## Environment Variables
|
|
118
|
+
|
|
119
|
+
| Variable | Required | Description |
|
|
120
|
+
|----------|----------|-------------|
|
|
121
|
+
| `SNIPARA_API_KEY` | Yes | Your Snipara API key |
|
|
122
|
+
| `SNIPARA_PROJECT_ID` | Yes | Your project ID |
|
|
123
|
+
| `SNIPARA_API_URL` | No | API URL (default: https://api.snipara.com) |
|
|
124
|
+
|
|
125
|
+
Get your API key and project ID from [snipara.com/dashboard](https://snipara.com/dashboard).
|
|
126
|
+
|
|
127
|
+
## Available Tools
|
|
128
|
+
|
|
129
|
+
### Primary Tool
|
|
130
|
+
|
|
131
|
+
- **`rlm_context_query`** - Query optimized context from your documentation
|
|
132
|
+
- `query`: Your question (required)
|
|
133
|
+
- `max_tokens`: Token budget (default: 4000)
|
|
134
|
+
- `search_mode`: `keyword`, `semantic`, or `hybrid` (default: hybrid)
|
|
135
|
+
|
|
136
|
+
### Search & Navigation
|
|
137
|
+
|
|
138
|
+
- **`rlm_search`** - Regex pattern search
|
|
139
|
+
- **`rlm_sections`** - List all document sections
|
|
140
|
+
- **`rlm_read`** - Read specific line ranges
|
|
141
|
+
- **`rlm_stats`** - Documentation statistics
|
|
142
|
+
|
|
143
|
+
### Advanced (Pro+)
|
|
144
|
+
|
|
145
|
+
- **`rlm_decompose`** - Break complex queries into sub-queries
|
|
146
|
+
- **`rlm_multi_query`** - Execute multiple queries with shared token budget
|
|
147
|
+
|
|
148
|
+
### Session Context
|
|
149
|
+
|
|
150
|
+
- **`rlm_inject`** - Set context for subsequent queries
|
|
151
|
+
- **`rlm_context`** - Show current context
|
|
152
|
+
- **`rlm_clear_context`** - Clear context
|
|
153
|
+
|
|
154
|
+
## Example Usage
|
|
155
|
+
|
|
156
|
+
Once configured, ask your LLM:
|
|
157
|
+
|
|
158
|
+
> "Use snipara to find how authentication works in my codebase"
|
|
159
|
+
|
|
160
|
+
The LLM will call `rlm_context_query` and return relevant documentation sections.
|
|
161
|
+
|
|
162
|
+
## Alternative: Direct HTTP (No Local Install)
|
|
163
|
+
|
|
164
|
+
For clients that support HTTP transport (Claude Code, Cursor v0.48+), you can connect directly without installing anything:
|
|
165
|
+
|
|
166
|
+
**Claude Code:**
|
|
167
|
+
```json
|
|
168
|
+
{
|
|
169
|
+
"mcpServers": {
|
|
170
|
+
"snipara": {
|
|
171
|
+
"type": "http",
|
|
172
|
+
"url": "https://api.snipara.com/mcp/YOUR_PROJECT_ID",
|
|
173
|
+
"headers": {
|
|
174
|
+
"Authorization": "Bearer sk-your-api-key"
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
## Support
|
|
182
|
+
|
|
183
|
+
- Website: [snipara.com](https://snipara.com)
|
|
184
|
+
- Issues: [github.com/alopez3006/snipara-mcp/issues](https://github.com/alopez3006/snipara-mcp/issues)
|
|
185
|
+
- Email: support@snipara.com
|
|
186
|
+
|
|
187
|
+
## License
|
|
188
|
+
|
|
189
|
+
MIT
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
# Snipara MCP Server
|
|
2
|
+
|
|
3
|
+
[](https://pypi.org/project/snipara-mcp/)
|
|
4
|
+
[](https://www.python.org/downloads/)
|
|
5
|
+
[](https://opensource.org/licenses/MIT)
|
|
6
|
+
|
|
7
|
+
MCP server for [Snipara](https://snipara.com) - Context optimization for any LLM.
|
|
8
|
+
|
|
9
|
+
Query your documentation efficiently with **90% token reduction**. Works with any MCP-compatible client including Claude Desktop, Cursor, Windsurf, Gemini, GPT, and more.
|
|
10
|
+
|
|
11
|
+
**LLM-agnostic**: Snipara optimizes context delivery - you use your own LLM (Claude, GPT, Gemini, Llama, etc.).
|
|
12
|
+
|
|
13
|
+
## Installation
|
|
14
|
+
|
|
15
|
+
### Option 1: uvx (Recommended - No Install)
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
uvx snipara-mcp
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
### Option 2: pip
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
pip install snipara-mcp
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Configuration
|
|
28
|
+
|
|
29
|
+
### Claude Desktop
|
|
30
|
+
|
|
31
|
+
Add to `~/Library/Application Support/Claude/claude_desktop_config.json` (macOS) or `%APPDATA%\Claude\claude_desktop_config.json` (Windows):
|
|
32
|
+
|
|
33
|
+
```json
|
|
34
|
+
{
|
|
35
|
+
"mcpServers": {
|
|
36
|
+
"snipara": {
|
|
37
|
+
"command": "uvx",
|
|
38
|
+
"args": ["snipara-mcp"],
|
|
39
|
+
"env": {
|
|
40
|
+
"SNIPARA_API_KEY": "sk-your-api-key",
|
|
41
|
+
"SNIPARA_PROJECT_ID": "your-project-id"
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
### Cursor
|
|
49
|
+
|
|
50
|
+
Add to `~/.cursor/mcp.json`:
|
|
51
|
+
|
|
52
|
+
```json
|
|
53
|
+
{
|
|
54
|
+
"mcpServers": {
|
|
55
|
+
"snipara": {
|
|
56
|
+
"command": "uvx",
|
|
57
|
+
"args": ["snipara-mcp"],
|
|
58
|
+
"env": {
|
|
59
|
+
"SNIPARA_API_KEY": "sk-your-api-key",
|
|
60
|
+
"SNIPARA_PROJECT_ID": "your-project-id"
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Claude Code
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
claude mcp add snipara -- uvx snipara-mcp
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
Then set environment variables in your shell or `.env` file.
|
|
74
|
+
|
|
75
|
+
### Windsurf
|
|
76
|
+
|
|
77
|
+
Add to `~/.codeium/windsurf/mcp_config.json`:
|
|
78
|
+
|
|
79
|
+
```json
|
|
80
|
+
{
|
|
81
|
+
"mcpServers": {
|
|
82
|
+
"snipara": {
|
|
83
|
+
"command": "uvx",
|
|
84
|
+
"args": ["snipara-mcp"],
|
|
85
|
+
"env": {
|
|
86
|
+
"SNIPARA_API_KEY": "sk-your-api-key",
|
|
87
|
+
"SNIPARA_PROJECT_ID": "your-project-id"
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
## Environment Variables
|
|
95
|
+
|
|
96
|
+
| Variable | Required | Description |
|
|
97
|
+
|----------|----------|-------------|
|
|
98
|
+
| `SNIPARA_API_KEY` | Yes | Your Snipara API key |
|
|
99
|
+
| `SNIPARA_PROJECT_ID` | Yes | Your project ID |
|
|
100
|
+
| `SNIPARA_API_URL` | No | API URL (default: https://api.snipara.com) |
|
|
101
|
+
|
|
102
|
+
Get your API key and project ID from [snipara.com/dashboard](https://snipara.com/dashboard).
|
|
103
|
+
|
|
104
|
+
## Available Tools
|
|
105
|
+
|
|
106
|
+
### Primary Tool
|
|
107
|
+
|
|
108
|
+
- **`rlm_context_query`** - Query optimized context from your documentation
|
|
109
|
+
- `query`: Your question (required)
|
|
110
|
+
- `max_tokens`: Token budget (default: 4000)
|
|
111
|
+
- `search_mode`: `keyword`, `semantic`, or `hybrid` (default: hybrid)
|
|
112
|
+
|
|
113
|
+
### Search & Navigation
|
|
114
|
+
|
|
115
|
+
- **`rlm_search`** - Regex pattern search
|
|
116
|
+
- **`rlm_sections`** - List all document sections
|
|
117
|
+
- **`rlm_read`** - Read specific line ranges
|
|
118
|
+
- **`rlm_stats`** - Documentation statistics
|
|
119
|
+
|
|
120
|
+
### Advanced (Pro+)
|
|
121
|
+
|
|
122
|
+
- **`rlm_decompose`** - Break complex queries into sub-queries
|
|
123
|
+
- **`rlm_multi_query`** - Execute multiple queries with shared token budget
|
|
124
|
+
|
|
125
|
+
### Session Context
|
|
126
|
+
|
|
127
|
+
- **`rlm_inject`** - Set context for subsequent queries
|
|
128
|
+
- **`rlm_context`** - Show current context
|
|
129
|
+
- **`rlm_clear_context`** - Clear context
|
|
130
|
+
|
|
131
|
+
## Example Usage
|
|
132
|
+
|
|
133
|
+
Once configured, ask your LLM:
|
|
134
|
+
|
|
135
|
+
> "Use snipara to find how authentication works in my codebase"
|
|
136
|
+
|
|
137
|
+
The LLM will call `rlm_context_query` and return relevant documentation sections.
|
|
138
|
+
|
|
139
|
+
## Alternative: Direct HTTP (No Local Install)
|
|
140
|
+
|
|
141
|
+
For clients that support HTTP transport (Claude Code, Cursor v0.48+), you can connect directly without installing anything:
|
|
142
|
+
|
|
143
|
+
**Claude Code:**
|
|
144
|
+
```json
|
|
145
|
+
{
|
|
146
|
+
"mcpServers": {
|
|
147
|
+
"snipara": {
|
|
148
|
+
"type": "http",
|
|
149
|
+
"url": "https://api.snipara.com/mcp/YOUR_PROJECT_ID",
|
|
150
|
+
"headers": {
|
|
151
|
+
"Authorization": "Bearer sk-your-api-key"
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
## Support
|
|
159
|
+
|
|
160
|
+
- Website: [snipara.com](https://snipara.com)
|
|
161
|
+
- Issues: [github.com/alopez3006/snipara-mcp/issues](https://github.com/alopez3006/snipara-mcp/issues)
|
|
162
|
+
- Email: support@snipara.com
|
|
163
|
+
|
|
164
|
+
## License
|
|
165
|
+
|
|
166
|
+
MIT
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "snipara-mcp"
|
|
3
|
+
version = "1.1.1"
|
|
4
|
+
description = "MCP server for Snipara - Context optimization for LLMs"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
requires-python = ">=3.10"
|
|
7
|
+
license = { text = "MIT" }
|
|
8
|
+
authors = [{ name = "Snipara", email = "support@snipara.com" }]
|
|
9
|
+
keywords = ["mcp", "llm", "context", "documentation", "snipara", "claude", "cursor"]
|
|
10
|
+
classifiers = [
|
|
11
|
+
"Development Status :: 4 - Beta",
|
|
12
|
+
"Intended Audience :: Developers",
|
|
13
|
+
"License :: OSI Approved :: MIT License",
|
|
14
|
+
"Programming Language :: Python :: 3",
|
|
15
|
+
"Programming Language :: Python :: 3.10",
|
|
16
|
+
"Programming Language :: Python :: 3.11",
|
|
17
|
+
"Programming Language :: Python :: 3.12",
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
dependencies = [
|
|
21
|
+
"mcp>=1.0.0",
|
|
22
|
+
"httpx>=0.26.0",
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
[project.scripts]
|
|
26
|
+
snipara-mcp = "snipara_mcp:main"
|
|
27
|
+
|
|
28
|
+
[project.urls]
|
|
29
|
+
Homepage = "https://snipara.com"
|
|
30
|
+
Documentation = "https://docs.snipara.com"
|
|
31
|
+
Repository = "https://github.com/alopez3006/snipara-mcp-server"
|
|
32
|
+
|
|
33
|
+
[build-system]
|
|
34
|
+
requires = ["hatchling<1.26"]
|
|
35
|
+
build-backend = "hatchling.build"
|
|
36
|
+
|
|
37
|
+
[tool.hatch.build.targets.wheel]
|
|
38
|
+
packages = ["src/snipara_mcp"]
|
|
39
|
+
|
|
40
|
+
[tool.hatch.metadata]
|
|
41
|
+
license-files = []
|
|
42
|
+
|
|
43
|
+
[tool.ruff]
|
|
44
|
+
line-length = 100
|
|
45
|
+
target-version = "py310"
|
|
File without changes
|
|
@@ -0,0 +1,438 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Snipara MCP Server - stdio transport to Snipara REST API.
|
|
4
|
+
|
|
5
|
+
This MCP server connects your LLM client (Claude Desktop, Cursor, etc.)
|
|
6
|
+
to your Snipara project for context-optimized documentation queries.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
snipara-mcp
|
|
10
|
+
|
|
11
|
+
Environment variables:
|
|
12
|
+
SNIPARA_API_KEY: Your Snipara API key (required)
|
|
13
|
+
SNIPARA_PROJECT_ID: Your project ID (required)
|
|
14
|
+
SNIPARA_API_URL: API URL (default: https://api.snipara.com)
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import asyncio
|
|
18
|
+
import os
|
|
19
|
+
import sys
|
|
20
|
+
import time
|
|
21
|
+
from typing import Any
|
|
22
|
+
|
|
23
|
+
import httpx
|
|
24
|
+
from mcp.server import Server
|
|
25
|
+
from mcp.server.stdio import stdio_server
|
|
26
|
+
from mcp.types import TextContent, Tool
|
|
27
|
+
|
|
28
|
+
# Configuration
|
|
29
|
+
API_URL = os.environ.get("SNIPARA_API_URL", "https://api.snipara.com")
|
|
30
|
+
API_KEY = os.environ.get("SNIPARA_API_KEY", "")
|
|
31
|
+
PROJECT_ID = os.environ.get("SNIPARA_PROJECT_ID", "")
|
|
32
|
+
|
|
33
|
+
# Session context cache
|
|
34
|
+
_session_context: str = ""
|
|
35
|
+
|
|
36
|
+
# Settings cache (5 minute TTL)
|
|
37
|
+
_settings_cache: dict[str, Any] = {}
|
|
38
|
+
_settings_cache_time: float = 0
|
|
39
|
+
SETTINGS_CACHE_TTL = 300 # 5 minutes
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
async def get_project_settings() -> dict[str, Any]:
|
|
43
|
+
"""Fetch project automation settings from API with caching."""
|
|
44
|
+
global _settings_cache, _settings_cache_time
|
|
45
|
+
|
|
46
|
+
# Return cached settings if still valid
|
|
47
|
+
if _settings_cache and (time.time() - _settings_cache_time) < SETTINGS_CACHE_TTL:
|
|
48
|
+
return _settings_cache
|
|
49
|
+
|
|
50
|
+
# Fetch fresh settings from API
|
|
51
|
+
try:
|
|
52
|
+
async with httpx.AsyncClient(timeout=10.0) as client:
|
|
53
|
+
response = await client.get(
|
|
54
|
+
f"{API_URL}/v1/{PROJECT_ID}/automation",
|
|
55
|
+
headers=get_headers(),
|
|
56
|
+
)
|
|
57
|
+
if response.status_code == 200:
|
|
58
|
+
data = response.json()
|
|
59
|
+
_settings_cache = data.get("settings", {})
|
|
60
|
+
_settings_cache_time = time.time()
|
|
61
|
+
return _settings_cache
|
|
62
|
+
except Exception:
|
|
63
|
+
pass # Fall back to defaults on error
|
|
64
|
+
|
|
65
|
+
# Return defaults if API fails
|
|
66
|
+
return {
|
|
67
|
+
"maxTokensPerQuery": 4000,
|
|
68
|
+
"searchMode": "hybrid",
|
|
69
|
+
"includeSummaries": True,
|
|
70
|
+
"autoInjectContext": False,
|
|
71
|
+
"enrichPrompts": False,
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
server = Server("snipara")
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def get_headers() -> dict[str, str]:
|
|
78
|
+
return {"X-API-Key": API_KEY, "Content-Type": "application/json"}
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
async def call_api(tool: str, params: dict[str, Any]) -> dict[str, Any]:
|
|
82
|
+
"""Call the Snipara MCP API."""
|
|
83
|
+
async with httpx.AsyncClient(timeout=60.0) as client:
|
|
84
|
+
response = await client.post(
|
|
85
|
+
f"{API_URL}/v1/{PROJECT_ID}/mcp",
|
|
86
|
+
headers=get_headers(),
|
|
87
|
+
json={"tool": tool, "params": params},
|
|
88
|
+
)
|
|
89
|
+
response.raise_for_status()
|
|
90
|
+
return response.json()
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
@server.list_tools()
|
|
94
|
+
async def list_tools() -> list[Tool]:
|
|
95
|
+
"""List available Snipara tools."""
|
|
96
|
+
return [
|
|
97
|
+
Tool(
|
|
98
|
+
name="rlm_context_query",
|
|
99
|
+
description="""Query optimized context from your documentation.
|
|
100
|
+
|
|
101
|
+
Returns ranked, relevant sections that fit within your token budget.
|
|
102
|
+
This is the PRIMARY tool - use it for any documentation questions.
|
|
103
|
+
|
|
104
|
+
Examples:
|
|
105
|
+
- "How does authentication work?"
|
|
106
|
+
- "What are the API endpoints?"
|
|
107
|
+
- "Where is the database schema?"
|
|
108
|
+
|
|
109
|
+
Returns sections with relevance scores, file paths, and line numbers.""",
|
|
110
|
+
inputSchema={
|
|
111
|
+
"type": "object",
|
|
112
|
+
"properties": {
|
|
113
|
+
"query": {"type": "string", "description": "Your question"},
|
|
114
|
+
"max_tokens": {"type": "integer", "default": 4000, "description": "Token budget"},
|
|
115
|
+
"search_mode": {"type": "string", "enum": ["keyword", "semantic", "hybrid"], "default": "hybrid"},
|
|
116
|
+
},
|
|
117
|
+
"required": ["query"],
|
|
118
|
+
},
|
|
119
|
+
),
|
|
120
|
+
Tool(
|
|
121
|
+
name="rlm_ask",
|
|
122
|
+
description="Query documentation (basic). Use rlm_context_query for better results.",
|
|
123
|
+
inputSchema={
|
|
124
|
+
"type": "object",
|
|
125
|
+
"properties": {"question": {"type": "string", "description": "The question to ask"}},
|
|
126
|
+
"required": ["question"],
|
|
127
|
+
},
|
|
128
|
+
),
|
|
129
|
+
Tool(
|
|
130
|
+
name="rlm_search",
|
|
131
|
+
description="Search documentation for a pattern (regex supported).",
|
|
132
|
+
inputSchema={
|
|
133
|
+
"type": "object",
|
|
134
|
+
"properties": {
|
|
135
|
+
"pattern": {"type": "string", "description": "Regex pattern"},
|
|
136
|
+
"max_results": {"type": "integer", "default": 20},
|
|
137
|
+
},
|
|
138
|
+
"required": ["pattern"],
|
|
139
|
+
},
|
|
140
|
+
),
|
|
141
|
+
Tool(
|
|
142
|
+
name="rlm_decompose",
|
|
143
|
+
description="Break complex query into sub-queries with execution order.",
|
|
144
|
+
inputSchema={
|
|
145
|
+
"type": "object",
|
|
146
|
+
"properties": {
|
|
147
|
+
"query": {"type": "string", "description": "Complex question to decompose"},
|
|
148
|
+
"max_depth": {"type": "integer", "default": 2},
|
|
149
|
+
},
|
|
150
|
+
"required": ["query"],
|
|
151
|
+
},
|
|
152
|
+
),
|
|
153
|
+
Tool(
|
|
154
|
+
name="rlm_multi_query",
|
|
155
|
+
description="Execute multiple queries in one call with shared token budget.",
|
|
156
|
+
inputSchema={
|
|
157
|
+
"type": "object",
|
|
158
|
+
"properties": {
|
|
159
|
+
"queries": {
|
|
160
|
+
"type": "array",
|
|
161
|
+
"items": {"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]},
|
|
162
|
+
},
|
|
163
|
+
"max_tokens": {"type": "integer", "default": 8000},
|
|
164
|
+
},
|
|
165
|
+
"required": ["queries"],
|
|
166
|
+
},
|
|
167
|
+
),
|
|
168
|
+
Tool(
|
|
169
|
+
name="rlm_inject",
|
|
170
|
+
description="Set session context for subsequent queries.",
|
|
171
|
+
inputSchema={
|
|
172
|
+
"type": "object",
|
|
173
|
+
"properties": {
|
|
174
|
+
"context": {"type": "string", "description": "Context to inject"},
|
|
175
|
+
"append": {"type": "boolean", "default": False},
|
|
176
|
+
},
|
|
177
|
+
"required": ["context"],
|
|
178
|
+
},
|
|
179
|
+
),
|
|
180
|
+
Tool(
|
|
181
|
+
name="rlm_context",
|
|
182
|
+
description="Show current session context.",
|
|
183
|
+
inputSchema={"type": "object", "properties": {}, "required": []},
|
|
184
|
+
),
|
|
185
|
+
Tool(
|
|
186
|
+
name="rlm_clear_context",
|
|
187
|
+
description="Clear session context.",
|
|
188
|
+
inputSchema={"type": "object", "properties": {}, "required": []},
|
|
189
|
+
),
|
|
190
|
+
Tool(
|
|
191
|
+
name="rlm_stats",
|
|
192
|
+
description="Show documentation statistics.",
|
|
193
|
+
inputSchema={"type": "object", "properties": {}, "required": []},
|
|
194
|
+
),
|
|
195
|
+
Tool(
|
|
196
|
+
name="rlm_sections",
|
|
197
|
+
description="List all document sections.",
|
|
198
|
+
inputSchema={"type": "object", "properties": {}, "required": []},
|
|
199
|
+
),
|
|
200
|
+
Tool(
|
|
201
|
+
name="rlm_read",
|
|
202
|
+
description="Read specific lines from documentation.",
|
|
203
|
+
inputSchema={
|
|
204
|
+
"type": "object",
|
|
205
|
+
"properties": {"start_line": {"type": "integer"}, "end_line": {"type": "integer"}},
|
|
206
|
+
"required": ["start_line", "end_line"],
|
|
207
|
+
},
|
|
208
|
+
),
|
|
209
|
+
Tool(
|
|
210
|
+
name="rlm_settings",
|
|
211
|
+
description="Show current project settings from dashboard (max_tokens, search_mode, etc.).",
|
|
212
|
+
inputSchema={"type": "object", "properties": {"refresh": {"type": "boolean", "default": False, "description": "Force refresh from API"}}, "required": []},
|
|
213
|
+
),
|
|
214
|
+
]
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
@server.call_tool()
|
|
218
|
+
async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
219
|
+
"""Handle tool calls."""
|
|
220
|
+
global _session_context
|
|
221
|
+
|
|
222
|
+
try:
|
|
223
|
+
if name == "rlm_context_query":
|
|
224
|
+
# Get project settings from dashboard (cached)
|
|
225
|
+
settings = await get_project_settings()
|
|
226
|
+
|
|
227
|
+
query = arguments["query"]
|
|
228
|
+
if _session_context:
|
|
229
|
+
query = f"Context: {_session_context}\n\nQuestion: {query}"
|
|
230
|
+
|
|
231
|
+
# Use settings from dashboard, allow override from arguments
|
|
232
|
+
max_tokens = arguments.get("max_tokens") or settings.get("maxTokensPerQuery", 4000)
|
|
233
|
+
search_mode = arguments.get("search_mode") or settings.get("searchMode", "hybrid")
|
|
234
|
+
include_summaries = settings.get("includeSummaries", True)
|
|
235
|
+
|
|
236
|
+
result = await call_api("rlm_context_query", {
|
|
237
|
+
"query": query,
|
|
238
|
+
"max_tokens": max_tokens,
|
|
239
|
+
"search_mode": search_mode,
|
|
240
|
+
"include_metadata": True,
|
|
241
|
+
"prefer_summaries": include_summaries,
|
|
242
|
+
})
|
|
243
|
+
|
|
244
|
+
if result.get("success"):
|
|
245
|
+
data = result.get("result", {})
|
|
246
|
+
sections = data.get("sections", [])
|
|
247
|
+
if sections:
|
|
248
|
+
parts = ["## Relevant Documentation\n"]
|
|
249
|
+
for s in sections:
|
|
250
|
+
parts.append(f"### {s.get('title', 'Untitled')}")
|
|
251
|
+
parts.append(f"*{s.get('file', '')} | Score: {s.get('relevance_score', 0):.2f}*\n")
|
|
252
|
+
parts.append(s.get("content", ""))
|
|
253
|
+
parts.append("")
|
|
254
|
+
parts.append(f"---\n*{len(sections)} sections, {data.get('total_tokens', 0)} tokens*")
|
|
255
|
+
return [TextContent(type="text", text="\n".join(parts))]
|
|
256
|
+
return [TextContent(type="text", text="No relevant documentation found.")]
|
|
257
|
+
return [TextContent(type="text", text=f"**Error:** {result.get('error', 'Unknown error')}")]
|
|
258
|
+
|
|
259
|
+
elif name == "rlm_ask":
|
|
260
|
+
question = arguments["question"]
|
|
261
|
+
if _session_context:
|
|
262
|
+
question = f"Context: {_session_context}\n\nQuestion: {question}"
|
|
263
|
+
|
|
264
|
+
result = await call_api("rlm_context_query", {
|
|
265
|
+
"query": question, "max_tokens": 4000, "search_mode": "hybrid", "include_metadata": True,
|
|
266
|
+
})
|
|
267
|
+
|
|
268
|
+
if result.get("success"):
|
|
269
|
+
data = result.get("result", {})
|
|
270
|
+
sections = data.get("sections", [])
|
|
271
|
+
if sections:
|
|
272
|
+
parts = ["## Relevant Documentation\n"]
|
|
273
|
+
for s in sections:
|
|
274
|
+
parts.append(f"### {s.get('title', 'Untitled')}")
|
|
275
|
+
parts.append(f"*{s.get('file', '')}*\n")
|
|
276
|
+
parts.append(s.get("content", ""))
|
|
277
|
+
parts.append("")
|
|
278
|
+
return [TextContent(type="text", text="\n".join(parts))]
|
|
279
|
+
return [TextContent(type="text", text="No relevant documentation found.")]
|
|
280
|
+
return [TextContent(type="text", text=f"**Error:** {result.get('error', 'Unknown error')}")]
|
|
281
|
+
|
|
282
|
+
elif name == "rlm_search":
|
|
283
|
+
result = await call_api("rlm_search", {"pattern": arguments["pattern"]})
|
|
284
|
+
if result.get("success"):
|
|
285
|
+
matches = result.get("result", {}).get("matches", [])
|
|
286
|
+
max_results = arguments.get("max_results", 20)
|
|
287
|
+
if matches:
|
|
288
|
+
lines = [f"Found {len(matches)} matches:\n"]
|
|
289
|
+
for m in matches[:max_results]:
|
|
290
|
+
lines.append(f" {m.get('file', '')}:{m.get('line_number', 0)}: {m.get('content', '')[:100]}")
|
|
291
|
+
if len(matches) > max_results:
|
|
292
|
+
lines.append(f"\n... and {len(matches) - max_results} more")
|
|
293
|
+
return [TextContent(type="text", text="\n".join(lines))]
|
|
294
|
+
return [TextContent(type="text", text=f"No matches for '{arguments['pattern']}'")]
|
|
295
|
+
return [TextContent(type="text", text=f"**Error:** {result.get('error', 'Unknown error')}")]
|
|
296
|
+
|
|
297
|
+
elif name == "rlm_decompose":
|
|
298
|
+
result = await call_api("rlm_decompose", {"query": arguments["query"], "max_depth": arguments.get("max_depth", 2)})
|
|
299
|
+
if result.get("success"):
|
|
300
|
+
data = result.get("result", {})
|
|
301
|
+
sub = data.get("sub_queries", [])
|
|
302
|
+
lines = [f"**Decomposed into {len(sub)} sub-queries:**\n"]
|
|
303
|
+
for q in sub:
|
|
304
|
+
lines.append(f"{q.get('id', 0)}. {q.get('query', '')} (priority: {q.get('priority', 1)})")
|
|
305
|
+
lines.append(f"\n**Suggested order:** {data.get('suggested_sequence', [])}")
|
|
306
|
+
return [TextContent(type="text", text="\n".join(lines))]
|
|
307
|
+
return [TextContent(type="text", text=f"**Error:** {result.get('error', 'Unknown error')}")]
|
|
308
|
+
|
|
309
|
+
elif name == "rlm_multi_query":
|
|
310
|
+
result = await call_api("rlm_multi_query", {
|
|
311
|
+
"queries": arguments["queries"], "max_tokens": arguments.get("max_tokens", 8000),
|
|
312
|
+
})
|
|
313
|
+
if result.get("success"):
|
|
314
|
+
data = result.get("result", {})
|
|
315
|
+
parts = [f"**Executed {data.get('queries_executed', 0)} queries:**\n"]
|
|
316
|
+
for r in data.get("results", []):
|
|
317
|
+
parts.append(f"### {r.get('query', '')}")
|
|
318
|
+
for s in r.get("sections", [])[:2]:
|
|
319
|
+
parts.append(f"- {s.get('title', '')} ({s.get('file', '')})")
|
|
320
|
+
parts.append("")
|
|
321
|
+
parts.append(f"*Total: {data.get('total_tokens', 0)} tokens*")
|
|
322
|
+
return [TextContent(type="text", text="\n".join(parts))]
|
|
323
|
+
return [TextContent(type="text", text=f"**Error:** {result.get('error', 'Unknown error')}")]
|
|
324
|
+
|
|
325
|
+
elif name == "rlm_inject":
|
|
326
|
+
ctx = arguments["context"]
|
|
327
|
+
if arguments.get("append") and _session_context:
|
|
328
|
+
_session_context = _session_context + "\n\n" + ctx
|
|
329
|
+
else:
|
|
330
|
+
_session_context = ctx
|
|
331
|
+
try:
|
|
332
|
+
await call_api("rlm_inject", {"context": _session_context})
|
|
333
|
+
except Exception:
|
|
334
|
+
pass
|
|
335
|
+
return [TextContent(type="text", text=f"Session context {'appended' if arguments.get('append') else 'set'} ({len(_session_context)} chars)")]
|
|
336
|
+
|
|
337
|
+
elif name == "rlm_context":
|
|
338
|
+
if _session_context:
|
|
339
|
+
return [TextContent(type="text", text=f"**Session Context:**\n\n{_session_context}")]
|
|
340
|
+
return [TextContent(type="text", text="No session context. Use rlm_inject to set.")]
|
|
341
|
+
|
|
342
|
+
elif name == "rlm_clear_context":
|
|
343
|
+
if _session_context:
|
|
344
|
+
_session_context = ""
|
|
345
|
+
try:
|
|
346
|
+
await call_api("rlm_clear_context", {})
|
|
347
|
+
except Exception:
|
|
348
|
+
pass
|
|
349
|
+
return [TextContent(type="text", text="Session context cleared.")]
|
|
350
|
+
return [TextContent(type="text", text="No context to clear.")]
|
|
351
|
+
|
|
352
|
+
elif name == "rlm_stats":
|
|
353
|
+
result = await call_api("rlm_stats", {})
|
|
354
|
+
if result.get("success"):
|
|
355
|
+
# Handle both "result" and "data" keys from different API responses
|
|
356
|
+
d = result.get("result", result.get("data", {}))
|
|
357
|
+
# Backend returns total_characters, not total_tokens
|
|
358
|
+
files_loaded = d.get('files_loaded', d.get('document_count', 0))
|
|
359
|
+
total_lines = d.get('total_lines', 0)
|
|
360
|
+
total_chars = d.get('total_characters', 0)
|
|
361
|
+
sections = d.get('sections', d.get('chunk_count', 0))
|
|
362
|
+
# Safe formatting - convert to int if numeric string, else show as-is
|
|
363
|
+
def fmt(v):
|
|
364
|
+
if isinstance(v, (int, float)):
|
|
365
|
+
return f"{int(v):,}"
|
|
366
|
+
if isinstance(v, str) and v.isdigit():
|
|
367
|
+
return f"{int(v):,}"
|
|
368
|
+
return str(v) if v else "0"
|
|
369
|
+
return [TextContent(type="text", text=f"**Stats:**\n- Files: {fmt(files_loaded)}\n- Lines: {fmt(total_lines)}\n- Characters: {fmt(total_chars)}\n- Sections: {fmt(sections)}")]
|
|
370
|
+
return [TextContent(type="text", text=f"**Error:** {result.get('error', 'Unknown error')}")]
|
|
371
|
+
|
|
372
|
+
elif name == "rlm_sections":
|
|
373
|
+
result = await call_api("rlm_sections", {})
|
|
374
|
+
if result.get("success"):
|
|
375
|
+
sections = result.get("result", {}).get("sections", [])
|
|
376
|
+
lines = ["**Documents:**\n"]
|
|
377
|
+
for s in sections:
|
|
378
|
+
lines.append(f"- {s.get('path', '')} ({s.get('chunk_count', 0)} chunks)")
|
|
379
|
+
return [TextContent(type="text", text="\n".join(lines))]
|
|
380
|
+
return [TextContent(type="text", text=f"**Error:** {result.get('error', 'Unknown error')}")]
|
|
381
|
+
|
|
382
|
+
elif name == "rlm_read":
|
|
383
|
+
result = await call_api("rlm_read", {"start_line": arguments["start_line"], "end_line": arguments["end_line"]})
|
|
384
|
+
if result.get("success"):
|
|
385
|
+
content = result.get("result", {}).get("content", "")
|
|
386
|
+
return [TextContent(type="text", text=f"**Lines {arguments['start_line']}-{arguments['end_line']}:**\n```\n{content}\n```")]
|
|
387
|
+
return [TextContent(type="text", text=f"**Error:** {result.get('error', 'Unknown error')}")]
|
|
388
|
+
|
|
389
|
+
elif name == "rlm_settings":
|
|
390
|
+
global _settings_cache, _settings_cache_time
|
|
391
|
+
# Force refresh if requested
|
|
392
|
+
if arguments.get("refresh"):
|
|
393
|
+
_settings_cache = {}
|
|
394
|
+
_settings_cache_time = 0
|
|
395
|
+
settings = await get_project_settings()
|
|
396
|
+
cache_age = int(time.time() - _settings_cache_time) if _settings_cache_time else 0
|
|
397
|
+
lines = [
|
|
398
|
+
"**Project Settings** (from dashboard)\n",
|
|
399
|
+
f"- Max Tokens: {settings.get('maxTokensPerQuery', 4000)}",
|
|
400
|
+
f"- Search Mode: {settings.get('searchMode', 'hybrid')}",
|
|
401
|
+
f"- Include Summaries: {settings.get('includeSummaries', True)}",
|
|
402
|
+
f"- Auto-Inject Context: {settings.get('autoInjectContext', False)}",
|
|
403
|
+
f"- Enrich Prompts: {settings.get('enrichPrompts', False)}",
|
|
404
|
+
f"\n*Cache age: {cache_age}s (TTL: {SETTINGS_CACHE_TTL}s)*",
|
|
405
|
+
]
|
|
406
|
+
return [TextContent(type="text", text="\n".join(lines))]
|
|
407
|
+
|
|
408
|
+
else:
|
|
409
|
+
return [TextContent(type="text", text=f"Unknown tool: {name}")]
|
|
410
|
+
|
|
411
|
+
except httpx.HTTPStatusError as e:
|
|
412
|
+
return [TextContent(type="text", text=f"**API Error:** {e.response.status_code} - {e.response.text}")]
|
|
413
|
+
except httpx.ConnectError:
|
|
414
|
+
return [TextContent(type="text", text=f"**Connection Error:** Cannot reach {API_URL}")]
|
|
415
|
+
except Exception as e:
|
|
416
|
+
return [TextContent(type="text", text=f"**Error:** {type(e).__name__}: {str(e)}")]
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
async def run_server():
|
|
420
|
+
"""Run the MCP server."""
|
|
421
|
+
if not API_KEY:
|
|
422
|
+
print("Error: SNIPARA_API_KEY environment variable required", file=sys.stderr)
|
|
423
|
+
sys.exit(1)
|
|
424
|
+
if not PROJECT_ID:
|
|
425
|
+
print("Error: SNIPARA_PROJECT_ID environment variable required", file=sys.stderr)
|
|
426
|
+
sys.exit(1)
|
|
427
|
+
|
|
428
|
+
async with stdio_server() as (read_stream, write_stream):
|
|
429
|
+
await server.run(read_stream, write_stream, server.create_initialization_options())
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
def main():
|
|
433
|
+
"""Entry point for snipara-mcp command."""
|
|
434
|
+
asyncio.run(run_server())
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
if __name__ == "__main__":
|
|
438
|
+
main()
|