python-job-scraper 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
jobscraper/__init__.py ADDED
@@ -0,0 +1,302 @@
1
+ """jobscraper — Multi-platform job scraping library.
2
+
3
+ Public API:
4
+ scrape_jobs() — scrape job listings from one or more platforms and
5
+ return results as a pandas DataFrame.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import re
11
+ from concurrent.futures import ThreadPoolExecutor, as_completed
12
+ from typing import Any
13
+
14
+ import pandas as pd
15
+
16
+ from jobscraper.model import (
17
+ Country,
18
+ JobResponse,
19
+ JobType,
20
+ ScraperInput,
21
+ Site,
22
+ )
23
+ from jobscraper.util import (
24
+ convert_to_annual,
25
+ desired_order,
26
+ extract_salary,
27
+ get_enum_from_value,
28
+ map_str_to_site,
29
+ set_logger_level,
30
+ )
31
+
32
+ # ---------------------------------------------------------------------------
33
+ # Scraper registry
34
+ # ---------------------------------------------------------------------------
35
+
36
+ from jobscraper.glassdoor import GlassdoorScraper # noqa: E402
37
+ from jobscraper.indeed import IndeedScraper # noqa: E402
38
+ from jobscraper.linkedin import LinkedInScraper # noqa: E402
39
+
40
+ SCRAPER_MAPPING: dict[Site, type] = {
41
+ Site.INDEED: IndeedScraper,
42
+ Site.GLASSDOOR: GlassdoorScraper,
43
+ Site.LINKEDIN: LinkedInScraper,
44
+ }
45
+
46
+
47
+ # ---------------------------------------------------------------------------
48
+ # Public entry point
49
+ # ---------------------------------------------------------------------------
50
+
51
+
52
+ def scrape_jobs(
53
+ site_name: str | list[str] | Site | list[Site] | None = None,
54
+ search_term: str | None = None,
55
+ location: str | None = None,
56
+ distance: int | None = 50,
57
+ is_remote: bool = False,
58
+ job_type: str | None = None,
59
+ results_wanted: int = 20,
60
+ country_indeed: str = "india",
61
+ proxies: list[str] | str | None = None,
62
+ ca_cert: str | None = None,
63
+ description_format: str = "markdown",
64
+ offset: int | None = 0,
65
+ hours_old: int | None = None,
66
+ enforce_annual_salary: bool = False,
67
+ verbose: int = 0,
68
+ user_agent: str | None = None,
69
+ cookies: dict[str, str] | None = None,
70
+ ) -> pd.DataFrame:
71
+ """Scrape job listings from one or more platforms and return a DataFrame.
72
+
73
+ Args:
74
+ site_name: Platform(s) to scrape. Accepts a string, Site enum, or
75
+ a mixed list of both. Supports "indeed", "glassdoor", "linkedin".
76
+ search_term: Job title or keyword to search for.
77
+ location: City or region to filter jobs by.
78
+ distance: Search radius in km (default 50). Passed as-is to Indeed's
79
+ ``radius`` parameter.
80
+ is_remote: If True, filter for remote jobs only (not yet used by
81
+ Indeed in Phase 1; reserved for future scrapers).
82
+ job_type: Filter by job type string (e.g. "fulltime", "internship").
83
+ results_wanted: Maximum number of job results to return (default 20).
84
+ country_indeed: Country for the Indeed scraper (default "india").
85
+ proxies: Optional proxy URL string or list of proxy URL strings.
86
+ ca_cert: Path to a CA certificate bundle for HTTPS verification.
87
+ description_format: "markdown" (default) or "html" for job
88
+ description text format.
89
+ offset: Start from the Nth result (useful for pagination across
90
+ calls). Defaults to 0.
91
+ hours_old: Only return jobs posted within this many hours.
92
+ enforce_annual_salary: If True, normalize all salary intervals to
93
+ annual equivalents before returning.
94
+ verbose: Logging verbosity. 0=ERROR, 1=WARNING, 2=INFO.
95
+ user_agent: Override the default User-Agent header for Indeed.
96
+ cookies: Optional cookies dict to pass to scrapers (e.g. for
97
+ LinkedIn authentication).
98
+
99
+ Returns:
100
+ A pandas DataFrame with one row per job posting. Columns follow
101
+ ``desired_order``; all-NA columns are dropped.
102
+
103
+ Raises:
104
+ ValueError: If site_name is missing or an unsupported site is
105
+ requested.
106
+ """
107
+ set_logger_level(verbose)
108
+
109
+ if not site_name:
110
+ raise ValueError("site_name is required.")
111
+
112
+ # Coerce site_name to list[Site]
113
+ if isinstance(site_name, (str, Site)):
114
+ site_name = [site_name]
115
+ sites: list[Site] = []
116
+ for s in site_name:
117
+ if isinstance(s, str):
118
+ sites.append(map_str_to_site(s))
119
+ else:
120
+ sites.append(s)
121
+
122
+ # Coerce country_indeed
123
+ country = Country.from_string(country_indeed)
124
+
125
+ # Coerce job_type
126
+ job_type_enum: JobType | None = None
127
+ if job_type:
128
+ job_type_enum = get_enum_from_value(job_type)
129
+
130
+ # Normalize proxies to list
131
+ if isinstance(proxies, str):
132
+ proxies = [p.strip() for p in proxies.split(",") if p.strip()]
133
+
134
+ scraper_input = ScraperInput(
135
+ site_name=sites,
136
+ search_term=search_term or "",
137
+ location=location,
138
+ distance=distance,
139
+ hours_old=hours_old,
140
+ results_wanted=results_wanted,
141
+ offset=offset or 0,
142
+ job_type=job_type_enum,
143
+ is_remote=is_remote,
144
+ cookies=cookies,
145
+ country_indeed=country,
146
+ description_format=description_format, # type: ignore[arg-type]
147
+ fetch_full_description=True,
148
+ proxies=proxies,
149
+ ca_cert=ca_cert,
150
+ enforce_annual_salary=enforce_annual_salary,
151
+ user_agent=user_agent,
152
+ )
153
+
154
+ # Dispatch scrapers via ThreadPoolExecutor
155
+ responses: list[JobResponse] = []
156
+
157
+ def _run(site: Site) -> JobResponse:
158
+ scraper_cls = SCRAPER_MAPPING.get(site)
159
+ if scraper_cls is None:
160
+ raise ValueError(f"No scraper implemented for site: {site}")
161
+ return scraper_cls().scrape(scraper_input)
162
+
163
+ with ThreadPoolExecutor(max_workers=len(sites)) as executor:
164
+ futures = {executor.submit(_run, site): site for site in sites}
165
+ for future in as_completed(futures):
166
+ try:
167
+ responses.append(future.result())
168
+ except Exception as exc:
169
+ site = futures[future]
170
+ import logging
171
+
172
+ logging.getLogger("jobscraper:main").warning(
173
+ "Scraper for %s raised: %s", site, exc
174
+ )
175
+
176
+ if not responses or not any(r.jobs for r in responses):
177
+ return pd.DataFrame()
178
+
179
+ # Flatten all JobPost objects to dicts
180
+ rows: list[dict[str, Any]] = []
181
+ for response in responses:
182
+ for job in response.jobs:
183
+ rows.append(_flatten_job(job, enforce_annual_salary))
184
+
185
+ if not rows:
186
+ return pd.DataFrame()
187
+
188
+ df = pd.concat([pd.DataFrame([r]) for r in rows], ignore_index=True)
189
+
190
+ # Drop all-NA columns
191
+ df = df.dropna(axis=1, how="all")
192
+
193
+ # Ensure all desired_order columns are present (add None for missing)
194
+ for col in desired_order:
195
+ if col not in df.columns:
196
+ df[col] = None
197
+
198
+ # Reorder columns: desired_order first, then any extras
199
+ extra_cols = [c for c in df.columns if c not in desired_order]
200
+ df = df[[c for c in desired_order if c in df.columns] + extra_cols]
201
+
202
+ # Sort by site then date_posted descending
203
+ sort_cols = [c for c in ["site", "date_posted"] if c in df.columns]
204
+ if sort_cols:
205
+ df = df.sort_values(sort_cols, ascending=[True, False], na_position="last")
206
+
207
+ return df.reset_index(drop=True)
208
+
209
+
210
+ # ---------------------------------------------------------------------------
211
+ # Private helpers
212
+ # ---------------------------------------------------------------------------
213
+
214
+
215
+ def _flatten_job(job: Any, enforce_annual_salary: bool) -> dict[str, Any]:
216
+ """Flatten a JobPost into a dict suitable for a DataFrame row.
217
+
218
+ - compensation → min_amount, max_amount, interval, currency columns
219
+ - location → formatted string via display_location()
220
+ - job_type list → comma-joined string
221
+ - emails list → comma-joined string
222
+ - Falls back to extract_salary() on description when no compensation object
223
+ """
224
+ data: dict[str, Any] = {
225
+ "id": job.id,
226
+ "site": job.site.value if job.site else None,
227
+ "job_url": job.job_url,
228
+ "job_url_direct": job.job_url_direct,
229
+ "title": job.title,
230
+ "company": job.company,
231
+ "location": job.location.display_location() if job.location else None,
232
+ "date_posted": job.date_posted,
233
+ "is_remote": job.is_remote,
234
+ "is_indeed_apply": job.is_indeed_apply,
235
+ "job_level": job.job_level,
236
+ "company_url": job.company_url,
237
+ "company_logo": job.company_logo,
238
+ "description": job.description,
239
+ "emails": ", ".join(job.emails) if job.emails else None,
240
+ "job_type": (
241
+ ", ".join(jt.value for jt in job.job_type) if job.job_type else None
242
+ ),
243
+ }
244
+
245
+ # Compensation columns
246
+ salary_source: str | None = None
247
+ if job.compensation:
248
+ data["min_amount"] = job.compensation.min_amount
249
+ data["max_amount"] = job.compensation.max_amount
250
+ data["interval"] = (
251
+ job.compensation.interval.value if job.compensation.interval else None
252
+ )
253
+ data["currency"] = job.compensation.currency
254
+ salary_source = "DIRECT_DATA"
255
+ elif job.description:
256
+ # Fallback: extract salary from description text only when salary
257
+ # keywords or a currency symbol are present, to avoid false positives
258
+ # like "0–2 years of experience" being parsed as a salary range.
259
+ _salary_ctx = re.compile(
260
+ r"\b(salary|pay|stipend|ctc|lpa|lakh|lac|package|compensation"
261
+ r"|(?:per|a|an)\s+(?:hour|month|year|annum))\b"
262
+ r"|[$£€₹]",
263
+ re.IGNORECASE,
264
+ )
265
+ if _salary_ctx.search(job.description):
266
+ try:
267
+ interval_str, min_val, max_val, currency = extract_salary(
268
+ job.description
269
+ )
270
+ data["min_amount"] = min_val
271
+ data["max_amount"] = max_val
272
+ data["interval"] = interval_str
273
+ data["currency"] = currency
274
+ if min_val is not None:
275
+ salary_source = "DESCRIPTION"
276
+ except Exception:
277
+ data["min_amount"] = None
278
+ data["max_amount"] = None
279
+ data["interval"] = None
280
+ data["currency"] = None
281
+ else:
282
+ data["min_amount"] = None
283
+ data["max_amount"] = None
284
+ data["interval"] = None
285
+ data["currency"] = None
286
+ else:
287
+ data["min_amount"] = None
288
+ data["max_amount"] = None
289
+ data["interval"] = None
290
+ data["currency"] = None
291
+
292
+ data["salary_source"] = salary_source
293
+
294
+ # Enforce annual salary normalization
295
+ if (
296
+ enforce_annual_salary
297
+ and data.get("interval")
298
+ and data.get("min_amount") is not None
299
+ ):
300
+ convert_to_annual(data)
301
+
302
+ return data
@@ -0,0 +1,32 @@
1
+ """Custom exceptions for jobscraper.
2
+
3
+ This module defines platform-specific exception classes raised by scrapers
4
+ when they encounter unrecoverable errors. Future platform exceptions are
5
+ stubbed out as comments and will be uncommented as scrapers are added.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+
11
+ class IndeedException(Exception):
12
+ """Raised when the Indeed scraper encounters an unrecoverable error."""
13
+
14
+ def __init__(self, message: str | None = None):
15
+ super().__init__(message or "An error occurred with Indeed")
16
+
17
+
18
+ class GlassdoorException(Exception):
19
+ """Raised when the Glassdoor scraper encounters an unrecoverable error."""
20
+
21
+ def __init__(self, message: str | None = None):
22
+ super().__init__(message or "An error occurred with Glassdoor")
23
+
24
+
25
+ class LinkedInException(Exception):
26
+ """Raised when the LinkedIn scraper encounters an unrecoverable error."""
27
+
28
+ def __init__(self, message: str | None = None):
29
+ super().__init__(message or "An error occurred with LinkedIn")
30
+
31
+
32
+ # class NaukriException(Exception): pass # planned
@@ -0,0 +1,309 @@
1
+ """Glassdoor scraper implementation."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import random
6
+ import time
7
+ from datetime import datetime
8
+ from typing import Any
9
+
10
+ from jobscraper.exception import GlassdoorException
11
+ from jobscraper.glassdoor.constant import GLASSDOOR_HEADERS, JOB_TYPE_MAP
12
+ from jobscraper.glassdoor.util import (
13
+ build_search_url,
14
+ extract_emails,
15
+ get_job_detail_url,
16
+ get_location_id,
17
+ parse_compensation,
18
+ parse_html_jobs,
19
+ parse_location,
20
+ )
21
+ from jobscraper.model import JobPost, JobResponse, Scraper, ScraperInput, Site
22
+ from jobscraper.util import create_logger, create_session, markdown_converter
23
+
24
+ logger = create_logger("glassdoor")
25
+
26
+ _PAGE_SIZE = 27 # Glassdoor returns ~27 results per HTML page
27
+
28
+
29
+ class GlassdoorScraper(Scraper):
30
+ """Scraper for glassdoor.co.in job listings.
31
+
32
+ Fetches search result pages as HTML and extracts job data from the
33
+ embedded RSC (React Server Components) JSON chunks. No GraphQL needed.
34
+ """
35
+
36
+ def scrape(self, scraper_input: ScraperInput) -> JobResponse:
37
+ """Fetch job listings from Glassdoor and return as a JobResponse.
38
+
39
+ Args:
40
+ scraper_input: Validated scraper configuration.
41
+
42
+ Returns:
43
+ JobResponse containing all collected JobPost objects.
44
+
45
+ Raises:
46
+ GlassdoorException: On unrecoverable HTTP errors.
47
+ """
48
+ session = create_session(
49
+ proxies=scraper_input.proxies,
50
+ ca_cert=scraper_input.ca_cert,
51
+ is_tls=True,
52
+ )
53
+
54
+ headers = dict(GLASSDOOR_HEADERS)
55
+ if scraper_input.user_agent:
56
+ headers["User-Agent"] = scraper_input.user_agent
57
+
58
+ # Warm up session to acquire cookies
59
+ try:
60
+ session.get(f"https://www.glassdoor.co.in", headers=headers)
61
+ time.sleep(random.uniform(1.0, 2.0))
62
+ except Exception:
63
+ pass
64
+
65
+ # Resolve location → slug + numeric ID
66
+ loc_slug, loc_id = "india", 115
67
+ if scraper_input.location:
68
+ result = get_location_id(session, headers, scraper_input.location)
69
+ if result:
70
+ loc_slug, loc_id = result
71
+ else:
72
+ logger.warning(
73
+ "Could not resolve location '%s'; using fallback.",
74
+ scraper_input.location,
75
+ )
76
+
77
+ jobs: list[JobPost] = []
78
+ start_page = (scraper_input.offset // _PAGE_SIZE) + 1
79
+ page = start_page
80
+
81
+ while len(jobs) < scraper_input.results_wanted:
82
+ url = build_search_url(
83
+ keyword=scraper_input.search_term,
84
+ location_slug=loc_slug,
85
+ location_id=loc_id,
86
+ page=page,
87
+ )
88
+ logger.info("Fetching Glassdoor page %d: %s", page, url)
89
+
90
+ try:
91
+ response = session.get(url, headers=headers)
92
+ except Exception as exc:
93
+ raise GlassdoorException(f"Failed to fetch Glassdoor page: {exc}") from exc
94
+
95
+ status = getattr(response, "status_code", None)
96
+ if isinstance(status, int) and status >= 400:
97
+ raise GlassdoorException(
98
+ f"Glassdoor returned HTTP {status}. Bot detection may be active."
99
+ )
100
+
101
+ html = (
102
+ response.text
103
+ if hasattr(response, "text")
104
+ else response.content.decode()
105
+ )
106
+
107
+ raw_jobs = parse_html_jobs(html)
108
+ if not raw_jobs:
109
+ logger.info("No jobs parsed on page %d; stopping.", page)
110
+ break
111
+
112
+ for raw in raw_jobs:
113
+ if len(jobs) >= scraper_input.results_wanted:
114
+ break
115
+ job = self._build_job_post(raw, scraper_input, session, headers)
116
+ if job:
117
+ jobs.append(job)
118
+
119
+ if len(raw_jobs) < _PAGE_SIZE:
120
+ break
121
+
122
+ page += 1
123
+ time.sleep(random.uniform(0.5, 2.5))
124
+
125
+ return JobResponse(jobs=jobs)
126
+
127
+ # ------------------------------------------------------------------
128
+ # Private helpers
129
+ # ------------------------------------------------------------------
130
+
131
+ def _build_job_post(
132
+ self,
133
+ raw: dict[str, Any],
134
+ scraper_input: ScraperInput,
135
+ session: Any,
136
+ headers: dict[str, str],
137
+ ) -> JobPost | None:
138
+ """Convert a raw Glassdoor jobview dict to a JobPost.
139
+
140
+ Uses field-level try/except so partial data never crashes the scraper.
141
+ """
142
+ try:
143
+ header = raw.get("header", {})
144
+ job = raw.get("job", {})
145
+ overview = raw.get("overview", {})
146
+
147
+ # ID — listingId lives in job sub-dict
148
+ try:
149
+ listing_id = str(job.get("listingId") or "")
150
+ if not listing_id:
151
+ logger.warning("Glassdoor job missing listingId, skipping")
152
+ return None
153
+ except Exception:
154
+ return None
155
+
156
+ job_url = get_job_detail_url(listing_id)
157
+
158
+ # Title — in both header and job; prefer header
159
+ try:
160
+ title = header.get("jobTitleText") or job.get("jobTitleText")
161
+ if not title:
162
+ logger.warning("Glassdoor job %s missing title", listing_id)
163
+ return None
164
+ except KeyError:
165
+ return None
166
+
167
+ # Company
168
+ try:
169
+ company = (
170
+ header.get("employerNameFromSearch")
171
+ or header.get("employer", {}).get("name")
172
+ )
173
+ except Exception:
174
+ company = None
175
+
176
+ # Location
177
+ try:
178
+ location = parse_location(header.get("locationName") or "")
179
+ except Exception:
180
+ location = None
181
+
182
+ # Date posted — Glassdoor gives ageInDays, not a timestamp
183
+ try:
184
+ age = header.get("ageInDays")
185
+ if age is not None:
186
+ from datetime import date, timedelta
187
+ date_posted = date.today() - timedelta(days=int(age))
188
+ else:
189
+ date_posted = None
190
+ except Exception:
191
+ date_posted = None
192
+
193
+ # Job type
194
+ try:
195
+ raw_types = job.get("jobTypes") or []
196
+ job_type = (
197
+ [
198
+ JOB_TYPE_MAP[t.lower()]
199
+ for t in raw_types
200
+ if t.lower() in JOB_TYPE_MAP
201
+ ] or None
202
+ ) if raw_types else None
203
+ except Exception:
204
+ job_type = None
205
+
206
+ # Compensation
207
+ try:
208
+ compensation = parse_compensation(header)
209
+ except Exception:
210
+ compensation = None
211
+
212
+ # Remote
213
+ try:
214
+ is_remote = bool(job.get("isRemoteOrHybrid"))
215
+ except Exception:
216
+ is_remote = None
217
+
218
+ # Easy apply = Glassdoor's own apply flow
219
+ try:
220
+ is_indeed_apply: bool | None = bool(header.get("easyApply"))
221
+ except Exception:
222
+ is_indeed_apply = None
223
+
224
+ # Description (fragments available inline; full fetch optional)
225
+ description: str | None = None
226
+ emails: list[str] | None = None
227
+ job_url_direct: str | None = None
228
+
229
+ fragments = job.get("descriptionFragmentsText") or []
230
+ if fragments:
231
+ inline = " ".join(fragments)
232
+ description = (
233
+ markdown_converter(f"<p>{inline}</p>")
234
+ if scraper_input.description_format == "markdown"
235
+ else inline
236
+ )
237
+
238
+ if scraper_input.fetch_full_description:
239
+ try:
240
+ from bs4 import BeautifulSoup
241
+
242
+ detail_resp = session.get(job_url, headers=headers)
243
+ detail_html = (
244
+ detail_resp.text
245
+ if hasattr(detail_resp, "text")
246
+ else detail_resp.content.decode()
247
+ )
248
+ soup = BeautifulSoup(detail_html, "lxml")
249
+ desc_tag = (
250
+ soup.find("div", {"class": "jobDescriptionContent"})
251
+ or soup.find("div", {"id": "JobDescriptionContainer"})
252
+ or soup.find("div", {"data-test": "jobDescriptionText"})
253
+ )
254
+ if desc_tag:
255
+ raw_html = str(desc_tag)
256
+ description = (
257
+ markdown_converter(raw_html)
258
+ if scraper_input.description_format == "markdown"
259
+ else raw_html
260
+ )
261
+ emails = extract_emails(detail_html) or None
262
+ time.sleep(random.uniform(0.5, 2.5))
263
+ except Exception as exc:
264
+ logger.warning(
265
+ "Job %s: failed to fetch detail page: %s", listing_id, exc
266
+ )
267
+
268
+ # Company URL from employer ID
269
+ try:
270
+ employer_id = header.get("employer", {}).get("id") or None
271
+ company_url = (
272
+ f"{_GD_BASE}/Overview/W-EI_IE{employer_id}.htm"
273
+ if employer_id
274
+ else None
275
+ )
276
+ except Exception:
277
+ company_url = None
278
+
279
+ # Company logo
280
+ try:
281
+ company_logo = overview.get("squareLogoUrl") or None
282
+ except Exception:
283
+ company_logo = None
284
+
285
+ return JobPost(
286
+ id=listing_id,
287
+ site=Site.GLASSDOOR,
288
+ job_url=job_url,
289
+ job_url_direct=job_url_direct,
290
+ title=title,
291
+ company=company,
292
+ location=location,
293
+ date_posted=date_posted,
294
+ job_type=job_type,
295
+ compensation=compensation,
296
+ is_remote=is_remote,
297
+ is_indeed_apply=is_indeed_apply,
298
+ description=description,
299
+ emails=emails,
300
+ company_url=company_url,
301
+ company_logo=company_logo,
302
+ )
303
+
304
+ except Exception as exc:
305
+ logger.warning("Unexpected error building Glassdoor JobPost: %s", exc)
306
+ return None
307
+
308
+
309
+ _GD_BASE = "https://www.glassdoor.co.in"
@@ -0,0 +1,33 @@
1
+ """Constants for the Glassdoor scraper."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from jobscraper.model import JobType
6
+
7
+ BASE_URL = "https://www.glassdoor.co.in"
8
+ GRAPHQL_URL = BASE_URL + "/graph"
9
+
10
+ GLASSDOOR_HEADERS: dict[str, str] = {
11
+ "User-Agent": (
12
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
13
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
14
+ "Chrome/120.0.0.0 Safari/537.36"
15
+ ),
16
+ "Accept": "*/*",
17
+ "Accept-Language": "en-US,en;q=0.9",
18
+ "Content-Type": "application/json",
19
+ "Apollo-Requires-Preflight": "true",
20
+ "Referer": BASE_URL + "/",
21
+ "Origin": BASE_URL,
22
+ }
23
+
24
+ JOB_TYPE_MAP: dict[str, JobType] = {
25
+ "fulltime": JobType.FULL_TIME,
26
+ "full_time": JobType.FULL_TIME,
27
+ "parttime": JobType.PART_TIME,
28
+ "part_time": JobType.PART_TIME,
29
+ "contract": JobType.CONTRACT,
30
+ "temporary": JobType.TEMPORARY,
31
+ "internship": JobType.INTERNSHIP,
32
+ "intern": JobType.INTERNSHIP,
33
+ }