applybot 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- applybot-1.0.0/PKG-INFO +110 -0
- applybot-1.0.0/README.md +73 -0
- applybot-1.0.0/applybot/__init__.py +1 -0
- applybot-1.0.0/applybot/apply.py +215 -0
- applybot-1.0.0/applybot/cli.py +131 -0
- applybot-1.0.0/applybot/config.py +69 -0
- applybot-1.0.0/applybot/contextual.py +102 -0
- applybot-1.0.0/applybot/dashboard.py +39 -0
- applybot-1.0.0/applybot/generator.py +125 -0
- applybot-1.0.0/applybot/pipeline.py +121 -0
- applybot-1.0.0/applybot/scorer.py +67 -0
- applybot-1.0.0/applybot/scraper.py +147 -0
- applybot-1.0.0/applybot/tracker.py +63 -0
- applybot-1.0.0/applybot/wizard.py +122 -0
- applybot-1.0.0/applybot.egg-info/PKG-INFO +110 -0
- applybot-1.0.0/applybot.egg-info/SOURCES.txt +32 -0
- applybot-1.0.0/applybot.egg-info/dependency_links.txt +1 -0
- applybot-1.0.0/applybot.egg-info/entry_points.txt +2 -0
- applybot-1.0.0/applybot.egg-info/requires.txt +18 -0
- applybot-1.0.0/applybot.egg-info/top_level.txt +1 -0
- applybot-1.0.0/pyproject.toml +51 -0
- applybot-1.0.0/setup.cfg +4 -0
- applybot-1.0.0/tests/test_apply.py +254 -0
- applybot-1.0.0/tests/test_cli.py +37 -0
- applybot-1.0.0/tests/test_config.py +55 -0
- applybot-1.0.0/tests/test_contextual.py +114 -0
- applybot-1.0.0/tests/test_dashboard.py +54 -0
- applybot-1.0.0/tests/test_generator.py +55 -0
- applybot-1.0.0/tests/test_login.py +65 -0
- applybot-1.0.0/tests/test_pipeline.py +74 -0
- applybot-1.0.0/tests/test_scorer.py +63 -0
- applybot-1.0.0/tests/test_scraper.py +53 -0
- applybot-1.0.0/tests/test_tracker.py +88 -0
- applybot-1.0.0/tests/test_wizard.py +56 -0
applybot-1.0.0/PKG-INFO
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: applybot
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Automate your job search pipeline — scrape, score, generate tailored resumes, and auto-apply.
|
|
5
|
+
License: MIT
|
|
6
|
+
Project-URL: Homepage, https://github.com/Ramidoz/applybot
|
|
7
|
+
Project-URL: Repository, https://github.com/Ramidoz/applybot
|
|
8
|
+
Project-URL: Bug Tracker, https://github.com/Ramidoz/applybot/issues
|
|
9
|
+
Keywords: job-search,automation,resume,linkedin,playwright
|
|
10
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
11
|
+
Classifier: Environment :: Console
|
|
12
|
+
Classifier: Intended Audience :: End Users/Desktop
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Topic :: Office/Business
|
|
20
|
+
Requires-Python: >=3.9
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
Requires-Dist: click>=8.0
|
|
23
|
+
Requires-Dist: prompt_toolkit>=3.0
|
|
24
|
+
Requires-Dist: python-docx>=1.1
|
|
25
|
+
Requires-Dist: requests>=2.31
|
|
26
|
+
Requires-Dist: beautifulsoup4>=4.12
|
|
27
|
+
Requires-Dist: rich>=13.0
|
|
28
|
+
Provides-Extra: browser
|
|
29
|
+
Requires-Dist: playwright>=1.40; extra == "browser"
|
|
30
|
+
Provides-Extra: llm
|
|
31
|
+
Requires-Dist: anthropic>=0.20; extra == "llm"
|
|
32
|
+
Requires-Dist: openai>=1.10; extra == "llm"
|
|
33
|
+
Provides-Extra: dev
|
|
34
|
+
Requires-Dist: pytest>=8.0; extra == "dev"
|
|
35
|
+
Requires-Dist: pytest-cov; extra == "dev"
|
|
36
|
+
Requires-Dist: responses>=0.25; extra == "dev"
|
|
37
|
+
|
|
38
|
+
# ApplyBot
|
|
39
|
+
|
|
40
|
+
Automate your job search pipeline — for any profession.
|
|
41
|
+
|
|
42
|
+
## Quick start
|
|
43
|
+
|
|
44
|
+
### Technical users
|
|
45
|
+
```bash
|
|
46
|
+
pip install applybot
|
|
47
|
+
applybot init # 2-min guided wizard
|
|
48
|
+
applybot run --dry-run # test without submitting
|
|
49
|
+
applybot run # go live
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### Non-technical users
|
|
53
|
+
Double-click `installers/install.bat` (Windows) or `installers/install.sh` (Mac/Linux).
|
|
54
|
+
|
|
55
|
+
## Commands
|
|
56
|
+
|
|
57
|
+
| Command | What it does |
|
|
58
|
+
|---|---|
|
|
59
|
+
| `applybot init` | Guided setup wizard |
|
|
60
|
+
| `applybot login linkedin` | Save LinkedIn session (for auto-apply) |
|
|
61
|
+
| `applybot run` | Full pipeline |
|
|
62
|
+
| `applybot run --dry-run` | Test without submitting |
|
|
63
|
+
| `applybot run --no-apply` | Scrape + generate docs only |
|
|
64
|
+
| `applybot status` | Print status table in terminal |
|
|
65
|
+
| `applybot dashboard` | Rebuild + open dashboard |
|
|
66
|
+
|
|
67
|
+
## Configuration
|
|
68
|
+
|
|
69
|
+
All settings live in `applybot.json` (created by `applybot init`).
|
|
70
|
+
Never commit this file — it contains your API key.
|
|
71
|
+
|
|
72
|
+
## Browser automation
|
|
73
|
+
|
|
74
|
+
Auto-apply to LinkedIn Easy Apply, Greenhouse, and Lever.
|
|
75
|
+
|
|
76
|
+
**Setup (one time):**
|
|
77
|
+
```bash
|
|
78
|
+
pip install "applybot[browser]"
|
|
79
|
+
playwright install chrome
|
|
80
|
+
applybot login linkedin # opens Chrome — log in, then close the window
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
Cookies are saved to `sessions/linkedin_cookies.json`. Keep this file private.
|
|
84
|
+
|
|
85
|
+
**Supported platforms:**
|
|
86
|
+
- LinkedIn Easy Apply
|
|
87
|
+
- Greenhouse
|
|
88
|
+
- Lever
|
|
89
|
+
|
|
90
|
+
## LLM setup (optional)
|
|
91
|
+
|
|
92
|
+
ApplyBot can use an LLM to answer open-text custom questions on application forms.
|
|
93
|
+
|
|
94
|
+
Set `llm_provider` in `applybot.json` to one of:
|
|
95
|
+
|
|
96
|
+
| Provider | Description |
|
|
97
|
+
|---|---|
|
|
98
|
+
| `none` (default) | Skip custom questions — fill manually |
|
|
99
|
+
| `claude` | Anthropic Claude (requires `llm_api_key`) |
|
|
100
|
+
| `openai` | OpenAI GPT (requires `llm_api_key`) |
|
|
101
|
+
| `custom` | Any OpenAI-compatible endpoint (set `llm_custom_url`) |
|
|
102
|
+
|
|
103
|
+
Example config for Claude:
|
|
104
|
+
```json
|
|
105
|
+
{
|
|
106
|
+
"llm_provider": "claude",
|
|
107
|
+
"llm_api_key": "sk-ant-...",
|
|
108
|
+
"llm_model": "claude-haiku-4-5-20251001"
|
|
109
|
+
}
|
|
110
|
+
```
|
applybot-1.0.0/README.md
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
# ApplyBot
|
|
2
|
+
|
|
3
|
+
Automate your job search pipeline — for any profession.
|
|
4
|
+
|
|
5
|
+
## Quick start
|
|
6
|
+
|
|
7
|
+
### Technical users
|
|
8
|
+
```bash
|
|
9
|
+
pip install applybot
|
|
10
|
+
applybot init # 2-min guided wizard
|
|
11
|
+
applybot run --dry-run # test without submitting
|
|
12
|
+
applybot run # go live
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
### Non-technical users
|
|
16
|
+
Double-click `installers/install.bat` (Windows) or `installers/install.sh` (Mac/Linux).
|
|
17
|
+
|
|
18
|
+
## Commands
|
|
19
|
+
|
|
20
|
+
| Command | What it does |
|
|
21
|
+
|---|---|
|
|
22
|
+
| `applybot init` | Guided setup wizard |
|
|
23
|
+
| `applybot login linkedin` | Save LinkedIn session (for auto-apply) |
|
|
24
|
+
| `applybot run` | Full pipeline |
|
|
25
|
+
| `applybot run --dry-run` | Test without submitting |
|
|
26
|
+
| `applybot run --no-apply` | Scrape + generate docs only |
|
|
27
|
+
| `applybot status` | Print status table in terminal |
|
|
28
|
+
| `applybot dashboard` | Rebuild + open dashboard |
|
|
29
|
+
|
|
30
|
+
## Configuration
|
|
31
|
+
|
|
32
|
+
All settings live in `applybot.json` (created by `applybot init`).
|
|
33
|
+
Never commit this file — it contains your API key.
|
|
34
|
+
|
|
35
|
+
## Browser automation
|
|
36
|
+
|
|
37
|
+
Auto-apply to LinkedIn Easy Apply, Greenhouse, and Lever.
|
|
38
|
+
|
|
39
|
+
**Setup (one time):**
|
|
40
|
+
```bash
|
|
41
|
+
pip install "applybot[browser]"
|
|
42
|
+
playwright install chrome
|
|
43
|
+
applybot login linkedin # opens Chrome — log in, then close the window
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Cookies are saved to `sessions/linkedin_cookies.json`. Keep this file private.
|
|
47
|
+
|
|
48
|
+
**Supported platforms:**
|
|
49
|
+
- LinkedIn Easy Apply
|
|
50
|
+
- Greenhouse
|
|
51
|
+
- Lever
|
|
52
|
+
|
|
53
|
+
## LLM setup (optional)
|
|
54
|
+
|
|
55
|
+
ApplyBot can use an LLM to answer open-text custom questions on application forms.
|
|
56
|
+
|
|
57
|
+
Set `llm_provider` in `applybot.json` to one of:
|
|
58
|
+
|
|
59
|
+
| Provider | Description |
|
|
60
|
+
|---|---|
|
|
61
|
+
| `none` (default) | Skip custom questions — fill manually |
|
|
62
|
+
| `claude` | Anthropic Claude (requires `llm_api_key`) |
|
|
63
|
+
| `openai` | OpenAI GPT (requires `llm_api_key`) |
|
|
64
|
+
| `custom` | Any OpenAI-compatible endpoint (set `llm_custom_url`) |
|
|
65
|
+
|
|
66
|
+
Example config for Claude:
|
|
67
|
+
```json
|
|
68
|
+
{
|
|
69
|
+
"llm_provider": "claude",
|
|
70
|
+
"llm_api_key": "sk-ant-...",
|
|
71
|
+
"llm_model": "claude-haiku-4-5-20251001"
|
|
72
|
+
}
|
|
73
|
+
```
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "1.0.0"
|
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
"""Browser automation for form submission."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from applybot.scraper import JobPost
|
|
7
|
+
from applybot.contextual import answer_question
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def detect_platform(url: str) -> str:
|
|
11
|
+
"""Return platform name from job URL: linkedin | greenhouse | lever | other."""
|
|
12
|
+
if "linkedin.com" in url:
|
|
13
|
+
return "linkedin"
|
|
14
|
+
if "greenhouse.io" in url:
|
|
15
|
+
return "greenhouse"
|
|
16
|
+
if "lever.co" in url:
|
|
17
|
+
return "lever"
|
|
18
|
+
return "other"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Maps config keys to common form field label patterns
|
|
22
|
+
_FIELD_MAP = {
|
|
23
|
+
"email": ["email", "e-mail"],
|
|
24
|
+
"phone": ["phone", "mobile", "telephone"],
|
|
25
|
+
"name": ["full name", "your name", "first and last name"],
|
|
26
|
+
"linkedin_url": ["linkedin", "linkedin url", "linkedin profile"],
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _fill_standard_fields(page: Any, config: dict[str, Any]) -> None:
|
|
31
|
+
"""Fill common form fields (email, phone, name) from config."""
|
|
32
|
+
for config_key, labels in _FIELD_MAP.items():
|
|
33
|
+
value = config.get(config_key, "")
|
|
34
|
+
if not value:
|
|
35
|
+
continue
|
|
36
|
+
for label in labels:
|
|
37
|
+
try:
|
|
38
|
+
safe_label = label.replace("'", "\\'")
|
|
39
|
+
locator = page.locator(f"input[placeholder*='{safe_label}' i], input[aria-label*='{safe_label}' i]")
|
|
40
|
+
if locator.count() > 0:
|
|
41
|
+
locator.first.fill(value)
|
|
42
|
+
break
|
|
43
|
+
except Exception:
|
|
44
|
+
pass
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _answer_custom_questions(page: Any, job: JobPost, config: dict[str, Any], resume_text: str) -> None:
|
|
48
|
+
"""Detect open-text textareas not in standard field map and fill via LLM."""
|
|
49
|
+
if config.get("llm_provider", "none") == "none":
|
|
50
|
+
return
|
|
51
|
+
try:
|
|
52
|
+
textareas = page.locator("textarea")
|
|
53
|
+
count = textareas.count()
|
|
54
|
+
for i in range(count):
|
|
55
|
+
ta = textareas.nth(i)
|
|
56
|
+
label_text = ""
|
|
57
|
+
try:
|
|
58
|
+
labelledby_id = ta.get_attribute("aria-labelledby") or ""
|
|
59
|
+
own_id = ta.get_attribute("id") or ""
|
|
60
|
+
if labelledby_id:
|
|
61
|
+
label_el = page.locator(f"#{labelledby_id}")
|
|
62
|
+
if label_el.count() > 0:
|
|
63
|
+
label_text = label_el.first.inner_text()
|
|
64
|
+
elif own_id:
|
|
65
|
+
label_el = page.locator(f"label[for='{own_id}']")
|
|
66
|
+
if label_el.count() > 0:
|
|
67
|
+
label_text = label_el.first.inner_text()
|
|
68
|
+
except Exception:
|
|
69
|
+
pass
|
|
70
|
+
if label_text:
|
|
71
|
+
answer = answer_question(label_text, job.description, resume_text, config)
|
|
72
|
+
if answer:
|
|
73
|
+
ta.fill(answer)
|
|
74
|
+
except Exception:
|
|
75
|
+
pass
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _handle_linkedin(page: Any, job: JobPost, config: dict[str, Any], resume_text: str) -> str:
|
|
79
|
+
"""Handle LinkedIn Easy Apply flow. Returns status string."""
|
|
80
|
+
try:
|
|
81
|
+
page.goto(job.url, wait_until="domcontentloaded", timeout=15000)
|
|
82
|
+
|
|
83
|
+
easy_apply = page.locator("button:has-text('Easy Apply'), button[aria-label*='Easy Apply' i]")
|
|
84
|
+
if easy_apply.count() == 0:
|
|
85
|
+
return "needs_action"
|
|
86
|
+
|
|
87
|
+
easy_apply.first.click()
|
|
88
|
+
page.wait_for_timeout(1000)
|
|
89
|
+
|
|
90
|
+
# Multi-step modal: loop through Next buttons until Submit appears
|
|
91
|
+
max_steps = 10
|
|
92
|
+
for _ in range(max_steps):
|
|
93
|
+
_fill_standard_fields(page, config)
|
|
94
|
+
_answer_custom_questions(page, job, config, resume_text)
|
|
95
|
+
|
|
96
|
+
submit_btn = page.locator(
|
|
97
|
+
"button[aria-label*='Submit application' i], button:has-text('Submit application')"
|
|
98
|
+
)
|
|
99
|
+
if submit_btn.count() > 0:
|
|
100
|
+
submit_btn.first.click()
|
|
101
|
+
page.wait_for_timeout(2000)
|
|
102
|
+
return "submitted"
|
|
103
|
+
|
|
104
|
+
next_btn = page.locator(
|
|
105
|
+
"button[aria-label*='Continue to next step' i], button:has-text('Next')"
|
|
106
|
+
)
|
|
107
|
+
if next_btn.count() > 0:
|
|
108
|
+
next_btn.first.click()
|
|
109
|
+
page.wait_for_timeout(800)
|
|
110
|
+
else:
|
|
111
|
+
# No next and no submit — stuck or form not supported
|
|
112
|
+
return "needs_action"
|
|
113
|
+
|
|
114
|
+
return "needs_action" # exceeded max_steps
|
|
115
|
+
except Exception:
|
|
116
|
+
return "failed"
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _handle_greenhouse(page: Any, job: JobPost, config: dict[str, Any], resume_text: str) -> str:
|
|
120
|
+
"""Handle Greenhouse application form. Returns status string."""
|
|
121
|
+
try:
|
|
122
|
+
page.goto(job.url, wait_until="domcontentloaded", timeout=15000)
|
|
123
|
+
page.wait_for_timeout(1000)
|
|
124
|
+
|
|
125
|
+
_fill_standard_fields(page, config)
|
|
126
|
+
_answer_custom_questions(page, job, config, resume_text)
|
|
127
|
+
|
|
128
|
+
submit_btn = page.locator(
|
|
129
|
+
"button[type='submit'], input[type='submit'], button:has-text('Submit')"
|
|
130
|
+
)
|
|
131
|
+
if submit_btn.count() > 0:
|
|
132
|
+
submit_btn.first.click()
|
|
133
|
+
page.wait_for_timeout(2000)
|
|
134
|
+
return "submitted"
|
|
135
|
+
|
|
136
|
+
return "needs_action"
|
|
137
|
+
except Exception:
|
|
138
|
+
return "failed"
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _handle_lever(page: Any, job: JobPost, config: dict[str, Any], resume_text: str) -> str:
|
|
142
|
+
"""Handle Lever application form. Returns status string."""
|
|
143
|
+
try:
|
|
144
|
+
page.goto(job.url, wait_until="domcontentloaded", timeout=15000)
|
|
145
|
+
|
|
146
|
+
apply_btn = page.locator(
|
|
147
|
+
"a:has-text('Apply'), button:has-text('Apply'), a[href*='apply']"
|
|
148
|
+
)
|
|
149
|
+
if apply_btn.count() == 0:
|
|
150
|
+
return "needs_action"
|
|
151
|
+
|
|
152
|
+
apply_btn.first.click()
|
|
153
|
+
page.wait_for_timeout(1000)
|
|
154
|
+
|
|
155
|
+
_fill_standard_fields(page, config)
|
|
156
|
+
_answer_custom_questions(page, job, config, resume_text)
|
|
157
|
+
|
|
158
|
+
submit_btn = page.locator(
|
|
159
|
+
"button:has-text('Submit application'), button[type='submit'], input[type='submit']"
|
|
160
|
+
)
|
|
161
|
+
if submit_btn.count() > 0:
|
|
162
|
+
submit_btn.first.click()
|
|
163
|
+
page.wait_for_timeout(2000)
|
|
164
|
+
return "submitted"
|
|
165
|
+
|
|
166
|
+
return "needs_action"
|
|
167
|
+
except Exception:
|
|
168
|
+
return "failed"
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def submit_application(job: JobPost, config: dict[str, Any], resume_text: str = "", _page=None) -> str:
|
|
172
|
+
"""Submit a job application. Returns status string.
|
|
173
|
+
|
|
174
|
+
_page: injectable Playwright page for testing (skips browser launch when provided).
|
|
175
|
+
"""
|
|
176
|
+
platform = detect_platform(job.url)
|
|
177
|
+
|
|
178
|
+
if _page is not None:
|
|
179
|
+
# Testing path — use the provided page directly
|
|
180
|
+
return _dispatch(platform, _page, job, config, resume_text)
|
|
181
|
+
|
|
182
|
+
# Production path — launch Playwright
|
|
183
|
+
from playwright.sync_api import sync_playwright # lazy import
|
|
184
|
+
cookies_path = Path("sessions") / "linkedin_cookies.json"
|
|
185
|
+
|
|
186
|
+
with sync_playwright() as pw:
|
|
187
|
+
browser = pw.chromium.launch(channel="chrome", headless=False)
|
|
188
|
+
context = browser.new_context()
|
|
189
|
+
|
|
190
|
+
# Load saved cookies if available
|
|
191
|
+
if cookies_path.exists():
|
|
192
|
+
import json
|
|
193
|
+
cookies = json.loads(cookies_path.read_text(encoding="utf-8"))
|
|
194
|
+
context.add_cookies(cookies)
|
|
195
|
+
|
|
196
|
+
page = context.new_page()
|
|
197
|
+
try:
|
|
198
|
+
result = _dispatch(platform, page, job, config, resume_text)
|
|
199
|
+
finally:
|
|
200
|
+
context.close()
|
|
201
|
+
browser.close()
|
|
202
|
+
|
|
203
|
+
return result
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def _dispatch(platform: str, page: Any, job: JobPost, config: dict[str, Any], resume_text: str = "") -> str:
|
|
207
|
+
"""Route to the correct platform handler."""
|
|
208
|
+
if platform == "linkedin":
|
|
209
|
+
return _handle_linkedin(page, job, config, resume_text)
|
|
210
|
+
elif platform == "greenhouse":
|
|
211
|
+
return _handle_greenhouse(page, job, config, resume_text)
|
|
212
|
+
elif platform == "lever":
|
|
213
|
+
return _handle_lever(page, job, config, resume_text)
|
|
214
|
+
else:
|
|
215
|
+
return "needs_action"
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
"""Click entry point for all applybot commands."""
|
|
2
|
+
import click
|
|
3
|
+
from rich.console import Console
|
|
4
|
+
|
|
5
|
+
console = Console()
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
from playwright.sync_api import sync_playwright
|
|
9
|
+
except ImportError:
|
|
10
|
+
sync_playwright = None # type: ignore[assignment]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@click.group()
|
|
14
|
+
@click.version_option()
|
|
15
|
+
def cli():
|
|
16
|
+
"""ApplyBot - automate your job search pipeline."""
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@cli.command()
|
|
20
|
+
@click.option("--dir", "project_dir", default=".", show_default=True,
|
|
21
|
+
help="Directory to write applybot.json into.")
|
|
22
|
+
def init(project_dir):
|
|
23
|
+
"""Interactive setup wizard — creates applybot.json."""
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
from applybot.wizard import run_wizard
|
|
26
|
+
run_wizard(output_dir=Path(project_dir))
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@cli.command()
|
|
30
|
+
@click.argument("platform", type=click.Choice(["linkedin"]))
|
|
31
|
+
def login(platform):
|
|
32
|
+
"""Save session cookies for a platform (one-time login)."""
|
|
33
|
+
import json
|
|
34
|
+
from pathlib import Path
|
|
35
|
+
|
|
36
|
+
if platform == "linkedin":
|
|
37
|
+
if sync_playwright is None:
|
|
38
|
+
console.print("[red]Playwright is not installed. Run: pip install playwright\nAlso ensure Google Chrome is installed (channel='chrome').[/red]")
|
|
39
|
+
return
|
|
40
|
+
|
|
41
|
+
console.print("[yellow]Opening Chrome for LinkedIn login...[/yellow]")
|
|
42
|
+
console.print("Log in at linkedin.com, then close the browser window.")
|
|
43
|
+
console.print("[dim]Cookies will be saved automatically on close.[/dim]")
|
|
44
|
+
|
|
45
|
+
sessions_dir = Path("sessions")
|
|
46
|
+
sessions_dir.mkdir(exist_ok=True)
|
|
47
|
+
cookies_path = sessions_dir / "linkedin_cookies.json"
|
|
48
|
+
|
|
49
|
+
with sync_playwright() as pw:
|
|
50
|
+
browser = pw.chromium.launch(channel="chrome", headless=False)
|
|
51
|
+
context = browser.new_context()
|
|
52
|
+
page = context.new_page()
|
|
53
|
+
page.goto("https://www.linkedin.com/login")
|
|
54
|
+
page.wait_for_event("close", timeout=300_000)
|
|
55
|
+
cookies = context.cookies()
|
|
56
|
+
context.close()
|
|
57
|
+
browser.close()
|
|
58
|
+
|
|
59
|
+
cookies_path.write_text(json.dumps(cookies, ensure_ascii=False), encoding="utf-8")
|
|
60
|
+
console.print(f"[green]Cookies saved to {cookies_path}[/green]")
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@cli.command()
|
|
64
|
+
@click.option("--dry-run", "dry_run", is_flag=True, default=False,
|
|
65
|
+
help="Run pipeline without submitting any applications.")
|
|
66
|
+
@click.option("--no-apply", "no_apply", is_flag=True, default=False,
|
|
67
|
+
help="Scraper-only mode: scrape, score, generate - no form submission.")
|
|
68
|
+
@click.option("--config", "config_path", default="applybot.json", show_default=True)
|
|
69
|
+
def run(dry_run, no_apply, config_path):
|
|
70
|
+
"""Full pipeline: scrape, score, generate, apply, dashboard."""
|
|
71
|
+
from pathlib import Path
|
|
72
|
+
from applybot.config import load_config
|
|
73
|
+
from applybot.pipeline import run_pipeline
|
|
74
|
+
|
|
75
|
+
cfg = load_config(Path(config_path))
|
|
76
|
+
run_pipeline(cfg, dry_run=dry_run, no_apply=no_apply)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
@cli.command()
|
|
80
|
+
@click.option("--config", "config_path", default="applybot.json", show_default=True)
|
|
81
|
+
def status(config_path):
|
|
82
|
+
"""Print ASCII status table of all applications."""
|
|
83
|
+
from pathlib import Path
|
|
84
|
+
from applybot.tracker import load_tracker
|
|
85
|
+
from rich.table import Table
|
|
86
|
+
|
|
87
|
+
tracker = load_tracker(Path("applications_tracker.json"))
|
|
88
|
+
apps = tracker["applications"]
|
|
89
|
+
|
|
90
|
+
table = Table(title="ApplyBot Status", show_lines=True)
|
|
91
|
+
table.add_column("Company", style="bold")
|
|
92
|
+
table.add_column("Role")
|
|
93
|
+
table.add_column("Status")
|
|
94
|
+
table.add_column("Score")
|
|
95
|
+
table.add_column("Applied")
|
|
96
|
+
|
|
97
|
+
STATUS_LABELS = {
|
|
98
|
+
"submitted": "✅ Applied!",
|
|
99
|
+
"needs_action": "👋 Needs Your Help",
|
|
100
|
+
"captcha": "🤖 CAPTCHA Blocked",
|
|
101
|
+
"ai_answered": "🤖 AI Filled",
|
|
102
|
+
"failed": "❌ Failed",
|
|
103
|
+
"expired": "💨 Expired",
|
|
104
|
+
"dry_run": "🧪 Test Run",
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
for app in sorted(apps, key=lambda a: a.get("applied_date") or "", reverse=True):
|
|
108
|
+
table.add_row(
|
|
109
|
+
app.get("company", ""),
|
|
110
|
+
app.get("role", ""),
|
|
111
|
+
STATUS_LABELS.get(app.get("status", ""), app.get("status", "")),
|
|
112
|
+
str(app.get("score", "")),
|
|
113
|
+
app.get("applied_date") or "—",
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
console.print(table if apps else "[dim]No applications yet. Run 'applybot run' to get started.[/dim]")
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
@cli.command()
|
|
120
|
+
@click.option("--config", "config_path", default="applybot.json", show_default=True)
|
|
121
|
+
def dashboard(config_path):
|
|
122
|
+
"""Rebuild dashboard HTML and open in browser."""
|
|
123
|
+
from pathlib import Path
|
|
124
|
+
from applybot.config import load_config
|
|
125
|
+
from applybot.tracker import load_tracker
|
|
126
|
+
from applybot.dashboard import build_dashboard, open_dashboard
|
|
127
|
+
|
|
128
|
+
cfg = load_config(Path(config_path))
|
|
129
|
+
tracker = load_tracker(Path("applications_tracker.json"))
|
|
130
|
+
out_path = build_dashboard(tracker, output_dir=Path("output"))
|
|
131
|
+
open_dashboard(out_path)
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
"""Load, save, and validate applybot.json configuration."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
REQUIRED_KEYS = [
|
|
9
|
+
"name", "email", "phone", "linkedin_url", "target_roles",
|
|
10
|
+
"remote_only", "location", "salary_min", "salary_max",
|
|
11
|
+
"years_of_experience", "work_authorized", "require_sponsorship",
|
|
12
|
+
"master_resume", "autofill", "score_threshold", "llm_provider",
|
|
13
|
+
"llm_api_key", "llm_model", "llm_custom_url", "llm_custom_auth_header",
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
DEFAULTS = {
|
|
17
|
+
"portfolio_url": "",
|
|
18
|
+
"blocklist_companies": [],
|
|
19
|
+
"location": "",
|
|
20
|
+
"llm_api_key": "",
|
|
21
|
+
"llm_model": "",
|
|
22
|
+
"llm_custom_url": "",
|
|
23
|
+
"llm_custom_auth_header": "",
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ConfigError(Exception):
|
|
28
|
+
pass
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def load_config(path: Path) -> dict[str, Any]:
|
|
32
|
+
"""Load and validate applybot.json. Raises ConfigError on failure."""
|
|
33
|
+
path = Path(path)
|
|
34
|
+
if not path.exists():
|
|
35
|
+
raise ConfigError(f"Config file not found: {path}")
|
|
36
|
+
try:
|
|
37
|
+
data = json.loads(path.read_text(encoding="utf-8"))
|
|
38
|
+
except json.JSONDecodeError as e:
|
|
39
|
+
raise ConfigError(f"Config file contains invalid JSON: {e}") from e
|
|
40
|
+
# Apply defaults for optional fields
|
|
41
|
+
for k, v in DEFAULTS.items():
|
|
42
|
+
data.setdefault(k, v)
|
|
43
|
+
validate_config(data)
|
|
44
|
+
return data
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def save_config(config: dict[str, Any], path: Path) -> None:
|
|
48
|
+
"""Write config dict to applybot.json."""
|
|
49
|
+
Path(path).write_text(
|
|
50
|
+
json.dumps(config, indent=2, ensure_ascii=False), encoding="utf-8"
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def validate_config(config: dict[str, Any]) -> None:
|
|
55
|
+
"""Raise ConfigError if config is missing required fields or has bad values."""
|
|
56
|
+
for key in REQUIRED_KEYS:
|
|
57
|
+
if key not in config:
|
|
58
|
+
raise ConfigError(f"Missing required config field: {key}")
|
|
59
|
+
|
|
60
|
+
if not isinstance(config["target_roles"], list) or len(config["target_roles"]) == 0:
|
|
61
|
+
raise ConfigError("target_roles must be a non-empty list of job title strings")
|
|
62
|
+
|
|
63
|
+
threshold = config["score_threshold"]
|
|
64
|
+
if not isinstance(threshold, (int, float)) or not (0 <= threshold <= 100):
|
|
65
|
+
raise ConfigError(f"score_threshold must be a number between 0 and 100, got: {threshold}")
|
|
66
|
+
|
|
67
|
+
valid_providers = {"claude", "openai", "custom", "none"}
|
|
68
|
+
if config["llm_provider"] not in valid_providers:
|
|
69
|
+
raise ConfigError(f"llm_provider must be one of {valid_providers}")
|