ohmyscrapper 0.7.1__tar.gz → 0.7.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/PKG-INFO +6 -3
  2. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/README.md +1 -1
  3. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/pyproject.toml +6 -1
  4. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/__init__.py +11 -2
  5. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/core/config.py +6 -0
  6. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/core/config_files.py +29 -2
  7. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/core/default_files/config.yaml +3 -0
  8. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/core/default_files/url_sniffing.yaml +4 -0
  9. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/models/urls_manager.py +16 -2
  10. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/modules/load_txt.py +7 -3
  11. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/modules/scrap_urls.py +6 -0
  12. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/modules/seed.py +4 -1
  13. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/modules/sniff_url.py +13 -3
  14. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/__main__.py +0 -0
  15. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/core/default_files/url_types.yaml +0 -0
  16. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/modules/classify_urls.py +0 -0
  17. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/modules/merge_dbs.py +0 -0
  18. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/modules/process_with_ai.py +0 -0
  19. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/modules/show.py +0 -0
  20. {ohmyscrapper-0.7.1 → ohmyscrapper-0.7.4}/src/ohmyscrapper/modules/untouch_all.py +0 -0
@@ -1,9 +1,10 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: ohmyscrapper
3
- Version: 0.7.1
3
+ Version: 0.7.4
4
4
  Summary: OhMyScrapper scrapes texts and urls looking for links and jobs-data to create a final report with general information about job positions.
5
5
  Author: Cesar Cardoso
6
6
  Author-email: Cesar Cardoso <hello@cesarcardoso.cc>
7
+ License-Expression: MIT
7
8
  Requires-Dist: beautifulsoup4>=4.14.3
8
9
  Requires-Dist: google-genai>=1.55.0
9
10
  Requires-Dist: markdown>=3.10
@@ -14,9 +15,11 @@ Requires-Dist: requests>=2.32.5
14
15
  Requires-Dist: rich>=14.2.0
15
16
  Requires-Dist: urlextract>=1.9.0
16
17
  Requires-Python: >=3.11
18
+ Project-URL: Changelog, https://github.com/bouli/ohmyscrapper/releases/latest
19
+ Project-URL: Repository, https://github.com/bouli/ohmyscrapper
17
20
  Description-Content-Type: text/markdown
18
21
 
19
- # 🐶 OhMyScrapper - v0.7.1
22
+ # 🐶 OhMyScrapper - v0.7.4
20
23
 
21
24
  OhMyScrapper scrapes texts and urls looking for links and jobs-data to create a
22
25
  final report with general information about job positions.
@@ -1,4 +1,4 @@
1
- # 🐶 OhMyScrapper - v0.7.1
1
+ # 🐶 OhMyScrapper - v0.7.4
2
2
 
3
3
  OhMyScrapper scrapes texts and urls looking for links and jobs-data to create a
4
4
  final report with general information about job positions.
@@ -1,6 +1,7 @@
1
1
  [project]
2
2
  name = "ohmyscrapper"
3
- version = "0.7.1"
3
+ version = "0.7.4"
4
+ license = "MIT"
4
5
  description = "OhMyScrapper scrapes texts and urls looking for links and jobs-data to create a final report with general information about job positions."
5
6
  readme = "README.md"
6
7
  authors = [
@@ -19,6 +20,10 @@ dependencies = [
19
20
  "urlextract>=1.9.0",
20
21
  ]
21
22
 
23
+ [project.urls]
24
+ Repository = "https://github.com/bouli/ohmyscrapper"
25
+ Changelog = "https://github.com/bouli/ohmyscrapper/releases/latest"
26
+
22
27
  [project.scripts]
23
28
  ohmyscrapper = "ohmyscrapper:main"
24
29
 
@@ -20,7 +20,7 @@ from ohmyscrapper.core.config import update
20
20
 
21
21
  def main():
22
22
  parser = argparse.ArgumentParser(prog="ohmyscrapper")
23
- parser.add_argument("--version", action="version", version="%(prog)s v0.7.1")
23
+ parser.add_argument("--version", action="version", version="%(prog)s v0.7.4")
24
24
 
25
25
  update()
26
26
  subparsers = parser.add_subparsers(dest="command", help="Available commands")
@@ -53,6 +53,14 @@ def main():
53
53
  help="Add all `url_types` from the bank to the `/ohmyscrapper/url_types.yaml` file.",
54
54
  action="store_true",
55
55
  )
56
+
57
+ seed_parser.add_argument(
58
+ "--reset",
59
+ default=False,
60
+ help="Reset all `url_types`.",
61
+ action="store_true",
62
+ )
63
+
56
64
  untouch_parser = subparsers.add_parser(
57
65
  "untouch-all", help="Untouch all urls. That resets classification"
58
66
  )
@@ -137,7 +145,7 @@ def main():
137
145
  if args.export:
138
146
  export_url_types_to_file()
139
147
  else:
140
- seed()
148
+ seed(args.reset)
141
149
  return
142
150
 
143
151
  if args.command == "untouch-all":
@@ -191,6 +199,7 @@ def main():
191
199
  return
192
200
 
193
201
  if args.command == "start":
202
+ seed()
194
203
  if args.input != None:
195
204
  load_txt(file_name=args.input)
196
205
  else:
@@ -39,6 +39,12 @@ def get_ai(param):
39
39
  )
40
40
 
41
41
 
42
+ def get_sniffing(param):
43
+ return config_files.get_param(
44
+ parent_param="sniffing", param=param, default_app_dir=default_app_dir
45
+ )
46
+
47
+
42
48
  def load_config(force_default=False):
43
49
  config_file_name = "config.yaml"
44
50
  config_params = config_files.create_and_read_config_file(
@@ -4,14 +4,29 @@ import yaml
4
4
 
5
5
  def create_and_read_config_file(file_name, default_app_dir, force_default=False):
6
6
  config_file = config_file_path(file_name, default_app_dir)
7
+ default_config_params = _get_default_file(default_file=file_name)
7
8
  if force_default or not os.path.exists(config_file):
8
- config_params = _get_default_file(default_file=file_name)
9
9
  overwrite_config_file(
10
- data=config_params, file_name=file_name, default_app_dir=default_app_dir
10
+ data=default_config_params,
11
+ file_name=file_name,
12
+ default_app_dir=default_app_dir,
11
13
  )
14
+ config_params = default_config_params
12
15
  else:
13
16
  with open(config_file, "r") as f:
14
17
  config_params = yaml.safe_load(f.read())
18
+ if complete_config_file(
19
+ config_params=config_params,
20
+ default_config_params=default_config_params,
21
+ file_name=file_name,
22
+ default_app_dir=default_app_dir,
23
+ ):
24
+ config_params = create_and_read_config_file(
25
+ file_name=file_name,
26
+ default_app_dir=default_app_dir,
27
+ force_default=force_default,
28
+ )
29
+
15
30
  if config_params is None:
16
31
  config_params = create_and_read_config_file(
17
32
  file_name=file_name, default_app_dir=default_app_dir, force_default=True
@@ -19,6 +34,18 @@ def create_and_read_config_file(file_name, default_app_dir, force_default=False)
19
34
  return config_params
20
35
 
21
36
 
37
+ def complete_config_file(
38
+ config_params, default_config_params, file_name, default_app_dir
39
+ ):
40
+ has_updated = False
41
+ for key, values in default_config_params.items():
42
+ if key not in config_params.keys():
43
+ has_updated = True
44
+ data = {key: values}
45
+ append_config_file(data, file_name, default_app_dir)
46
+ return has_updated
47
+
48
+
22
49
  def overwrite_config_file(data, file_name, default_app_dir):
23
50
  config_file = config_file_path(file_name, default_app_dir)
24
51
  with open(config_file, "+w") as f:
@@ -14,3 +14,6 @@ default_files:
14
14
 
15
15
  ai:
16
16
  default_prompt_file: prompt.md
17
+
18
+ sniffing:
19
+ timeout: 10
@@ -23,3 +23,7 @@ linkedin_redirect:
23
23
  og:url: url_destiny
24
24
  atags:
25
25
  first-tag-as-url_destiny: 5
26
+
27
+ read_all_a_tags:
28
+ atags:
29
+ load_atags: True
@@ -69,6 +69,14 @@ def seeds(seeds={}):
69
69
  return True
70
70
 
71
71
 
72
+ @use_connection
73
+ def reset_seeds():
74
+ sql = "DELETE FROM urls_valid_prefix"
75
+ c = conn.cursor()
76
+ c.execute(sql)
77
+ conn.commit()
78
+
79
+
72
80
  @use_connection
73
81
  def add_urls_valid_prefix(url_prefix, url_type):
74
82
 
@@ -198,6 +206,8 @@ def get_url_like_unclassified(like_condition):
198
206
 
199
207
  @use_connection
200
208
  def add_url(url, title=None, parent_url=None):
209
+ if url[:1] == "/":
210
+ return
201
211
  url = clean_url(url)
202
212
  c = conn.cursor()
203
213
 
@@ -340,7 +350,9 @@ def set_url_error(url, value):
340
350
  @use_connection
341
351
  def set_url_type_by_id(url_id, url_type):
342
352
  c = conn.cursor()
343
- c.execute(f"UPDATE urls SET url_type = '{url_type}' WHERE id = {url_id}")
353
+ c.execute(
354
+ f"UPDATE urls SET url_type = '{url_type}', last_touch = NULL WHERE id = {url_id}"
355
+ )
344
356
  conn.commit()
345
357
 
346
358
 
@@ -392,8 +404,10 @@ def touch_url(url):
392
404
  @use_connection
393
405
  def untouch_url(url):
394
406
  url = clean_url(url)
407
+ url = str(url.strip())
408
+
395
409
  c = conn.cursor()
396
- c.execute("UPDATE urls SET last_touch = NULL WHERE url = ?", (url))
410
+ c.execute(f"UPDATE urls SET last_touch = NULL, url_type = NULL WHERE url = '{url}'")
397
411
  conn.commit()
398
412
 
399
413
 
@@ -19,14 +19,16 @@ def load_txt(file_name="input", verbose=False):
19
19
 
20
20
  text_file_content = ""
21
21
  if file_name is not None and not os.path.isdir(file_name):
22
- print(f"📖 reading file `{file_name}`... ")
23
22
  if not os.path.exists(file_name):
24
23
  if file_name.startswith("https://") or file_name.startswith("http://"):
24
+ print(f"📖 reading url `{file_name}`... ")
25
25
  text_file_content = " " + file_name + " "
26
+ urls_manager.untouch_url(url=file_name)
26
27
  else:
27
28
  print(f"\n file `{file_name}` not found.")
28
29
  return
29
30
  else:
31
+ print(f"📖 reading file `{file_name}`... ")
30
32
  text_file_content = _increment_file_name(
31
33
  text_file_content=text_file_content, file_name=file_name
32
34
  )
@@ -51,13 +53,15 @@ def load_txt(file_name="input", verbose=False):
51
53
  file_name=os.path.join(dir_files, text_files[0]),
52
54
  )
53
55
  else:
54
- print("\nChoose a text file. Use `*` for process all and `q` to quit:")
56
+ print("\nFiles list:")
55
57
  for index, file in enumerate(text_files):
56
58
  print(f"[{index}]:", os.path.join(dir_files, file))
57
59
 
58
60
  text_file_option = -1
59
61
  while text_file_option < 0 or text_file_option >= len(text_files):
60
- text_file_option = input("Enter the file number: ")
62
+ text_file_option = input(
63
+ "Choose a text file. Use `*` for process all and `q` to quit. Enter the file number: "
64
+ )
61
65
  if text_file_option == "*":
62
66
  for file in text_files:
63
67
  text_file_content = _increment_file_name(
@@ -104,6 +104,12 @@ def process_sniffed_url(url_report, url, sniffing_config, verbose=False):
104
104
  ):
105
105
  if "first-a-link" in url_report.keys():
106
106
  db_fields["url_destiny"] = url_report["first-a-link"]
107
+ if (
108
+ "atags" in sniffing_config.keys()
109
+ and "load_links" in sniffing_config["atags"].keys()
110
+ ):
111
+ for a_link in url_report["a_links"]:
112
+ urls_manager.add_url(url=a_link["href"], parent_url=url["url"])
107
113
 
108
114
  if db_fields["title"] is not None:
109
115
  urls_manager.set_url_title(url=url["url"], value=db_fields["title"])
@@ -2,7 +2,10 @@ import ohmyscrapper.models.urls_manager as urls_manager
2
2
  from ohmyscrapper.core import config
3
3
 
4
4
 
5
- def seed():
5
+ def seed(reset=False):
6
+ if reset:
7
+ urls_manager.reset_seeds()
8
+
6
9
  if not config.url_types_file_exists():
7
10
  db_url_types = urls_manager.get_urls_valid_prefix()
8
11
  if len(db_url_types) > 0:
@@ -1,6 +1,7 @@
1
1
  import requests
2
2
  from bs4 import BeautifulSoup
3
3
  import json
4
+ from ohmyscrapper.core import config
4
5
 
5
6
 
6
7
  def sniff_url(
@@ -8,6 +9,8 @@ def sniff_url(
8
9
  silent=False,
9
10
  sniffing_config={},
10
11
  ):
12
+ final_report = {}
13
+ final_report["error"] = None
11
14
  if "metatags" in sniffing_config:
12
15
  metatags_to_search = sniffing_config["metatags"]
13
16
  else:
@@ -41,10 +44,17 @@ def sniff_url(
41
44
  if not silent:
42
45
  print("checking url:", url)
43
46
 
44
- r = requests.get(url=url)
45
- soup = BeautifulSoup(r.text, "html.parser")
47
+ try:
48
+ r = requests.get(url=url, timeout=config.get_sniffing("timeout"))
49
+ soup = BeautifulSoup(r.text, "html.parser")
50
+ except requests.exceptions.ReadTimeout:
51
+ url_domain = url.split("/")[2]
52
+ final_report["error"] = (
53
+ f"!!! timeout (10 seconds) while checking the url with domain: `{url_domain}` !!!"
54
+ )
55
+ print(f"\n\n{final_report['error']}\n\n")
56
+ soup = BeautifulSoup("", "html.parser")
46
57
 
47
- final_report = {}
48
58
  final_report["scrapped-url"] = url
49
59
  if len(metatags_to_search) > 0:
50
60
  final_report.update(