everyrow-mcp 0.1.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,219 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ # Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ # poetry.lock
109
+ # poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ # pdm.lock
116
+ # pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ # pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # Redis
135
+ *.rdb
136
+ *.aof
137
+ *.pid
138
+
139
+ # RabbitMQ
140
+ mnesia/
141
+ rabbitmq/
142
+ rabbitmq-data/
143
+
144
+ # ActiveMQ
145
+ activemq-data/
146
+
147
+ # SageMath parsed files
148
+ *.sage.py
149
+
150
+ # Environments
151
+ .env
152
+ .envrc
153
+ .venv
154
+ env/
155
+ venv/
156
+ ENV/
157
+ env.bak/
158
+ venv.bak/
159
+
160
+ # Spyder project settings
161
+ .spyderproject
162
+ .spyproject
163
+
164
+ # Rope project settings
165
+ .ropeproject
166
+
167
+ # mkdocs documentation
168
+ /site
169
+
170
+ # mypy
171
+ .mypy_cache/
172
+ .dmypy.json
173
+ dmypy.json
174
+
175
+ # Pyre type checker
176
+ .pyre/
177
+
178
+ # pytype static type analyzer
179
+ .pytype/
180
+
181
+ # Cython debug symbols
182
+ cython_debug/
183
+
184
+ # PyCharm
185
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
186
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
187
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
188
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
189
+ # .idea/
190
+
191
+ # Abstra
192
+ # Abstra is an AI-powered process automation framework.
193
+ # Ignore directories containing user credentials, local state, and settings.
194
+ # Learn more at https://abstra.io/docs
195
+ .abstra/
196
+
197
+ # Visual Studio Code
198
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
199
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
200
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
201
+ # you could uncomment the following to ignore the entire vscode folder
202
+ # .vscode/
203
+
204
+ # Ruff stuff:
205
+ .ruff_cache/
206
+
207
+ # PyPI configuration file
208
+ .pypirc
209
+
210
+ # Marimo
211
+ marimo/_static/
212
+ marimo/_lsp/
213
+ __marimo__/
214
+
215
+ # Streamlit
216
+ .streamlit/secrets.toml
217
+
218
+ .history/
219
+ .python-version
@@ -0,0 +1,146 @@
1
+ Metadata-Version: 2.4
2
+ Name: everyrow-mcp
3
+ Version: 0.1.7
4
+ Summary: MCP server for everyrow: agent ops at spreadsheet scale
5
+ Requires-Python: >=3.12
6
+ Requires-Dist: everyrow>=0.1.5
7
+ Requires-Dist: mcp[cli]>=1.0.0
8
+ Requires-Dist: pandas>=2.0.0
9
+ Requires-Dist: pydantic<3.0.0,>=2.0.0
10
+ Description-Content-Type: text/markdown
11
+
12
+ # everyrow MCP Server
13
+
14
+ MCP (Model Context Protocol) server for [everyrow](https://everyrow.io): agent ops at spreadsheet scale.
15
+
16
+ This server exposes everyrow's 5 core operations as MCP tools, allowing LLM applications to screen, rank, dedupe, merge, and run agents on CSV files.
17
+
18
+ **All tools operate on local CSV files.** Provide absolute file paths as input, and transformed results are written to new CSV files at your specified output path.
19
+
20
+ ## Setup
21
+
22
+ The server requires an everyrow API key. Get one at [everyrow.io/api-key](https://everyrow.io/api-key) ($20 free credit).
23
+
24
+ Either set the API key in your shell environment, or hardcode it directly in the config below.
25
+
26
+ ```bash
27
+ export EVERYROW_API_KEY=your_key_here
28
+ ```
29
+
30
+ Add this to your MCP config. If you have [uv](https://docs.astral.sh/uv/) installed:
31
+
32
+ ```json
33
+ {
34
+ "mcpServers": {
35
+ "everyrow": {
36
+ "command": "uvx",
37
+ "args": ["everyrow-mcp"],
38
+ "env": {
39
+ "EVERYROW_API_KEY": "${EVERYROW_API_KEY}"
40
+ }
41
+ }
42
+ }
43
+ }
44
+ ```
45
+
46
+ Alternatively, install with pip (ideally in a venv) and use `"command": "everyrow-mcp"` instead of uvx.
47
+
48
+ ## Available Tools
49
+
50
+ ### everyrow_screen
51
+
52
+ Filter CSV rows based on criteria that require judgment.
53
+
54
+ ```
55
+ Parameters:
56
+ - task: Natural language description of screening criteria
57
+ - input_csv: Absolute path to input CSV
58
+ - output_path: Directory or full .csv path for output
59
+ ```
60
+
61
+ Example: Filter job postings for "remote-friendly AND senior-level AND salary disclosed"
62
+
63
+ ### everyrow_rank
64
+
65
+ Score and sort CSV rows based on qualitative criteria.
66
+
67
+ ```
68
+ Parameters:
69
+ - task: Natural language description of ranking criteria
70
+ - input_csv: Absolute path to input CSV
71
+ - output_path: Directory or full .csv path for output
72
+ - field_name: Name of the score field to add
73
+ - field_type: Type of field (float, int, str, bool)
74
+ - ascending_order: Sort direction (default: true)
75
+ ```
76
+
77
+ Example: Rank leads by "likelihood to need data integration solutions"
78
+
79
+ ### everyrow_dedupe
80
+
81
+ Remove duplicate rows using semantic equivalence.
82
+
83
+ ```
84
+ Parameters:
85
+ - equivalence_relation: Natural language description of what makes rows duplicates
86
+ - input_csv: Absolute path to input CSV
87
+ - output_path: Directory or full .csv path for output
88
+ - select_representative: Keep one row per duplicate group (default: true)
89
+ ```
90
+
91
+ Example: Dedupe contacts where "same person even with name abbreviations or career changes"
92
+
93
+ ### everyrow_merge
94
+
95
+ Join two CSV files using intelligent entity matching.
96
+
97
+ ```
98
+ Parameters:
99
+ - task: Natural language description of how to match rows
100
+ - left_csv: Absolute path to primary CSV
101
+ - right_csv: Absolute path to secondary CSV
102
+ - output_path: Directory or full .csv path for output
103
+ - merge_on_left: (optional) Column name in left table
104
+ - merge_on_right: (optional) Column name in right table
105
+ ```
106
+
107
+ Example: Match software products to parent companies (Photoshop -> Adobe)
108
+
109
+ ### everyrow_agent
110
+
111
+ Run web research agents on each row of a CSV.
112
+
113
+ ```
114
+ Parameters:
115
+ - task: Natural language description of research task
116
+ - input_csv: Absolute path to input CSV
117
+ - output_path: Directory or full .csv path for output
118
+ ```
119
+
120
+ Example: "Find this company's latest funding round and lead investors"
121
+
122
+ ## Output Path Handling
123
+
124
+ The `output_path` parameter accepts two formats:
125
+
126
+ 1. **Directory**: Output file is named `{operation}_{input_name}.csv`
127
+ - Input: `/data/companies.csv`, Output path: `/output/`
128
+ - Result: `/output/screened_companies.csv`
129
+
130
+ 2. **Full file path**: Use the exact path specified
131
+ - Output path: `/output/my_results.csv`
132
+ - Result: `/output/my_results.csv`
133
+
134
+ The server validates output paths before making API requests to avoid wasted costs.
135
+
136
+ ## Development
137
+
138
+ ```bash
139
+ cd everyrow-mcp
140
+ uv sync
141
+ uv run pytest
142
+ ```
143
+
144
+ ## License
145
+
146
+ MIT - See [LICENSE.txt](../LICENSE.txt)
@@ -0,0 +1,135 @@
1
+ # everyrow MCP Server
2
+
3
+ MCP (Model Context Protocol) server for [everyrow](https://everyrow.io): agent ops at spreadsheet scale.
4
+
5
+ This server exposes everyrow's 5 core operations as MCP tools, allowing LLM applications to screen, rank, dedupe, merge, and run agents on CSV files.
6
+
7
+ **All tools operate on local CSV files.** Provide absolute file paths as input, and transformed results are written to new CSV files at your specified output path.
8
+
9
+ ## Setup
10
+
11
+ The server requires an everyrow API key. Get one at [everyrow.io/api-key](https://everyrow.io/api-key) ($20 free credit).
12
+
13
+ Either set the API key in your shell environment, or hardcode it directly in the config below.
14
+
15
+ ```bash
16
+ export EVERYROW_API_KEY=your_key_here
17
+ ```
18
+
19
+ Add this to your MCP config. If you have [uv](https://docs.astral.sh/uv/) installed:
20
+
21
+ ```json
22
+ {
23
+ "mcpServers": {
24
+ "everyrow": {
25
+ "command": "uvx",
26
+ "args": ["everyrow-mcp"],
27
+ "env": {
28
+ "EVERYROW_API_KEY": "${EVERYROW_API_KEY}"
29
+ }
30
+ }
31
+ }
32
+ }
33
+ ```
34
+
35
+ Alternatively, install with pip (ideally in a venv) and use `"command": "everyrow-mcp"` instead of uvx.
36
+
37
+ ## Available Tools
38
+
39
+ ### everyrow_screen
40
+
41
+ Filter CSV rows based on criteria that require judgment.
42
+
43
+ ```
44
+ Parameters:
45
+ - task: Natural language description of screening criteria
46
+ - input_csv: Absolute path to input CSV
47
+ - output_path: Directory or full .csv path for output
48
+ ```
49
+
50
+ Example: Filter job postings for "remote-friendly AND senior-level AND salary disclosed"
51
+
52
+ ### everyrow_rank
53
+
54
+ Score and sort CSV rows based on qualitative criteria.
55
+
56
+ ```
57
+ Parameters:
58
+ - task: Natural language description of ranking criteria
59
+ - input_csv: Absolute path to input CSV
60
+ - output_path: Directory or full .csv path for output
61
+ - field_name: Name of the score field to add
62
+ - field_type: Type of field (float, int, str, bool)
63
+ - ascending_order: Sort direction (default: true)
64
+ ```
65
+
66
+ Example: Rank leads by "likelihood to need data integration solutions"
67
+
68
+ ### everyrow_dedupe
69
+
70
+ Remove duplicate rows using semantic equivalence.
71
+
72
+ ```
73
+ Parameters:
74
+ - equivalence_relation: Natural language description of what makes rows duplicates
75
+ - input_csv: Absolute path to input CSV
76
+ - output_path: Directory or full .csv path for output
77
+ - select_representative: Keep one row per duplicate group (default: true)
78
+ ```
79
+
80
+ Example: Dedupe contacts where "same person even with name abbreviations or career changes"
81
+
82
+ ### everyrow_merge
83
+
84
+ Join two CSV files using intelligent entity matching.
85
+
86
+ ```
87
+ Parameters:
88
+ - task: Natural language description of how to match rows
89
+ - left_csv: Absolute path to primary CSV
90
+ - right_csv: Absolute path to secondary CSV
91
+ - output_path: Directory or full .csv path for output
92
+ - merge_on_left: (optional) Column name in left table
93
+ - merge_on_right: (optional) Column name in right table
94
+ ```
95
+
96
+ Example: Match software products to parent companies (Photoshop -> Adobe)
97
+
98
+ ### everyrow_agent
99
+
100
+ Run web research agents on each row of a CSV.
101
+
102
+ ```
103
+ Parameters:
104
+ - task: Natural language description of research task
105
+ - input_csv: Absolute path to input CSV
106
+ - output_path: Directory or full .csv path for output
107
+ ```
108
+
109
+ Example: "Find this company's latest funding round and lead investors"
110
+
111
+ ## Output Path Handling
112
+
113
+ The `output_path` parameter accepts two formats:
114
+
115
+ 1. **Directory**: Output file is named `{operation}_{input_name}.csv`
116
+ - Input: `/data/companies.csv`, Output path: `/output/`
117
+ - Result: `/output/screened_companies.csv`
118
+
119
+ 2. **Full file path**: Use the exact path specified
120
+ - Output path: `/output/my_results.csv`
121
+ - Result: `/output/my_results.csv`
122
+
123
+ The server validates output paths before making API requests to avoid wasted costs.
124
+
125
+ ## Development
126
+
127
+ ```bash
128
+ cd everyrow-mcp
129
+ uv sync
130
+ uv run pytest
131
+ ```
132
+
133
+ ## License
134
+
135
+ MIT - See [LICENSE.txt](../LICENSE.txt)
@@ -0,0 +1,61 @@
1
+ [project]
2
+ name = "everyrow-mcp"
3
+ version = "0.1.7"
4
+ description = "MCP server for everyrow: agent ops at spreadsheet scale"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = [
8
+ "everyrow>=0.1.5",
9
+ "mcp[cli]>=1.0.0",
10
+ "pandas>=2.0.0",
11
+ "pydantic>=2.0.0,<3.0.0",
12
+ ]
13
+
14
+ [project.scripts]
15
+ everyrow-mcp = "everyrow_mcp.server:main"
16
+
17
+ [tool.hatch.build.targets.wheel]
18
+ packages = ["src/everyrow_mcp"]
19
+
20
+ [build-system]
21
+ requires = ["hatchling"]
22
+ build-backend = "hatchling.build"
23
+
24
+ [dependency-groups]
25
+ dev = [
26
+ "pytest>=9.0.2",
27
+ "pytest-asyncio>=1.3.0",
28
+ "basedpyright>=1.22.0",
29
+ "ruff>=0.9.9",
30
+ ]
31
+
32
+ [tool.basedpyright]
33
+ venvPath = "."
34
+ venv = ".venv"
35
+ include = ["src", "tests"]
36
+ typeCheckingMode = "standard"
37
+
38
+ [tool.ruff]
39
+ include = ["src/**/*.py", "tests/**/*.py"]
40
+ exclude = [".venv"]
41
+ target-version = "py312"
42
+
43
+ [tool.ruff.lint]
44
+ select = ["F", "E", "W", "PL", "RUF", "ARG", "F401", "UP"]
45
+ ignore = [
46
+ "E501", # line too long
47
+ "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar`
48
+ "PLR2004", # Magic value used in comparison
49
+ "PLR0913", # Too many arguments
50
+ ]
51
+ fixable = ["ALL"]
52
+ unfixable = []
53
+ extend-select = ["I"]
54
+
55
+ [tool.ruff.lint.flake8-tidy-imports]
56
+ ban-relative-imports = "all"
57
+
58
+ [tool.pytest.ini_options]
59
+ addopts = ["--import-mode=importlib"]
60
+ pythonpath = ["src"]
61
+ asyncio_mode = "auto"
@@ -0,0 +1 @@
1
+ """MCP server for everyrow: agent ops at spreadsheet scale."""