scwrap 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
scwrap/__init__.py ADDED
@@ -0,0 +1,15 @@
1
+ from .scwrap import (
2
+ Page,
3
+ wrap_node,
4
+ wrap_node_group,
5
+ wrap_page,
6
+ wrap_parser,
7
+ )
8
+
9
+ __all__ = [
10
+ "Page",
11
+ "wrap_node",
12
+ "wrap_node_group",
13
+ "wrap_page",
14
+ "wrap_parser",
15
+ ]
scwrap/browser.py ADDED
@@ -0,0 +1,33 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Iterator
4
+ from contextlib import contextmanager
5
+
6
+ from camoufox.sync_api import Camoufox
7
+ from patchright.sync_api import Page as PatchrightPage, sync_playwright
8
+ from playwright.sync_api import Page as PlaywrightPage
9
+
10
+ Page = PatchrightPage | PlaywrightPage
11
+
12
+
13
+ @contextmanager
14
+ def patchright_page() -> Iterator[Page]:
15
+ with sync_playwright() as pw:
16
+ with pw.chromium.launch(
17
+ channel='chrome',
18
+ headless=False,
19
+ ) as browser:
20
+ with browser.new_context(no_viewport=True) as context:
21
+ page = context.new_page()
22
+ yield page
23
+
24
+
25
+ @contextmanager
26
+ def camoufox_page(locale: str | list[str] = 'ja-JP,ja') -> Iterator[Page]:
27
+ with Camoufox(
28
+ headless=False,
29
+ humanize=True,
30
+ locale=locale,
31
+ ) as browser:
32
+ page = browser.new_page()
33
+ yield page
scwrap/scwrap.py ADDED
@@ -0,0 +1,232 @@
1
+ from __future__ import annotations
2
+
3
+ import random
4
+ import re
5
+ import time
6
+ import unicodedata as ud
7
+ from urllib.parse import urljoin
8
+
9
+ from loguru import logger
10
+ from patchright.sync_api import Page as PatchrightPage, ElementHandle as PatchrightElementHandle
11
+ from playwright.sync_api import Page as PlaywrightPage, ElementHandle as PlaywrightElementHandle
12
+ from selectolax.lexbor import LexborHTMLParser, LexborNode
13
+
14
+
15
+ Page = PatchrightPage | PlaywrightPage
16
+ ElementHandle = PatchrightElementHandle | PlaywrightElementHandle
17
+
18
+
19
+ def wrap_page(page: Page) -> _WrappedPage:
20
+ return _WrappedPage(page)
21
+
22
+ class _PageScoped:
23
+ _page: Page
24
+
25
+ def wrap_element(self, elem: ElementHandle | None) -> _WrappedElement:
26
+ return _WrappedElement(self._page, elem)
27
+
28
+ def wrap_element_group(self, elems: list[_WrappedElement]) -> _WrappedElementGroup:
29
+ return _WrappedElementGroup(self._page, elems)
30
+
31
+
32
+ def wrap_parser(parser: LexborHTMLParser) -> _WrappedParser:
33
+ return _WrappedParser(parser)
34
+
35
+ def wrap_node(node: LexborNode | None) -> _WrappedNode:
36
+ return _WrappedNode(node)
37
+
38
+ def wrap_node_group(nodes: list[_WrappedNode]) -> _WrappedNodeGroup:
39
+ return _WrappedNodeGroup(nodes)
40
+
41
+
42
+ class _WrappedPage(_PageScoped):
43
+ def __init__(self, page: Page) -> None:
44
+ self._page = page
45
+
46
+ def css(self, selector: str) -> _WrappedElementGroup:
47
+ elems = self._page.query_selector_all(selector)
48
+ return self.wrap_element_group([self.wrap_element(e) for e in elems])
49
+
50
+ def goto(self, url: str | None, try_cnt: int = 3, wait_range: tuple[float, float] = (3, 5)) -> bool:
51
+ if not url:
52
+ return False
53
+ for i in range(try_cnt):
54
+ try:
55
+ if self._page.goto(url) is not None:
56
+ return True
57
+ else:
58
+ reason = "response is None"
59
+ except Exception as e:
60
+ reason = f"{type(e).__name__}: {e}"
61
+ logger.warning(f"[goto] {url} ({i+1}/{try_cnt}) {reason}")
62
+ if i + 1 < try_cnt:
63
+ time.sleep(random.uniform(*wait_range))
64
+ logger.error(f"[goto] giving up: {url}")
65
+ return False
66
+
67
+ def wait(self, selector: str, timeout: int = 15000) -> _WrappedElement:
68
+ try:
69
+ elem = self._page.wait_for_selector(selector, timeout=timeout)
70
+ return self.wrap_element(elem)
71
+ except Exception as e:
72
+ logger.warning(f"[wait] {type(e).__name__}: {e} | selector={selector!r} | url={self._page.url}")
73
+ return self.wrap_element(None)
74
+
75
+
76
+ class _WrappedElement(_PageScoped):
77
+ def __init__(self, page: Page, elem: ElementHandle | None) -> None:
78
+ self._page = page
79
+ self._elem = elem
80
+
81
+ @property
82
+ def raw(self) -> ElementHandle | None:
83
+ return self._elem
84
+
85
+ def css(self, selector: str) -> _WrappedElementGroup:
86
+ elems = self._elem.query_selector_all(selector) if self._elem else []
87
+ return self.wrap_element_group([self.wrap_element(e) for e in elems])
88
+
89
+ def next(self, selector: str) -> _WrappedElement:
90
+ if self._elem is None:
91
+ return self.wrap_element(None)
92
+ try:
93
+ elem = self._elem.evaluate_handle(
94
+ """(el, sel) => {
95
+ let cur = el.nextElementSibling;
96
+ while (cur) {
97
+ if (cur.matches(sel)) return cur;
98
+ cur = cur.nextElementSibling;
99
+ }
100
+ return null;
101
+ }""",
102
+ selector,
103
+ ).as_element()
104
+ return self.wrap_element(elem)
105
+ except Exception as e:
106
+ logger.error(f"[next] {self._elem} {type(e).__name__}: {e}")
107
+ return self.wrap_element(None)
108
+
109
+ @property
110
+ def text(self) -> str | None:
111
+ if self._elem is None:
112
+ return None
113
+ if not (text := self._elem.text_content()):
114
+ return None
115
+ if not (t := text.strip()):
116
+ return None
117
+ return t
118
+
119
+ def attr(self, attr_name: str) -> str | None:
120
+ if self._elem is None:
121
+ return None
122
+ return a.strip() if (a := self._elem.get_attribute(attr_name)) else None
123
+
124
+ @property
125
+ def url(self) -> str | None:
126
+ if not (href := self.attr('href')):
127
+ return None
128
+ if re.search(r'(?i)^(?:#|javascript:|mailto:|tel:|data:)', href):
129
+ return None
130
+ return urljoin(self._page.url, href)
131
+
132
+ class _WrappedElementGroup(_PageScoped):
133
+ def __init__(self, page: Page, elems: list[_WrappedElement]) -> None:
134
+ self._page = page
135
+ self._elems = elems
136
+
137
+ @property
138
+ def raw(self) -> list[_WrappedElement]:
139
+ return self._elems
140
+
141
+ @property
142
+ def first(self) -> _WrappedElement:
143
+ return self._elems[0] if self._elems else self.wrap_element(None)
144
+
145
+ def grep(self, pattern: str) -> _WrappedElementGroup:
146
+ prog = re.compile(pattern)
147
+ filtered = [
148
+ e for e in self._elems
149
+ if (t := e.text) and prog.search(ud.normalize('NFKC', t))
150
+ ]
151
+ return self.wrap_element_group(filtered)
152
+
153
+ @property
154
+ def texts(self) -> list[str | None]:
155
+ return [e.text for e in self._elems]
156
+
157
+ def attrs(self, attr_name: str) -> list[str | None]:
158
+ return [e.attr(attr_name) for e in self._elems]
159
+
160
+ @property
161
+ def urls(self) -> list[str | None]:
162
+ return [e.url for e in self._elems]
163
+
164
+
165
+ class _WrappedParser:
166
+ def __init__(self, parser: LexborHTMLParser) -> None:
167
+ self._parser = parser
168
+
169
+ def css(self, selector: str) -> _WrappedNodeGroup:
170
+ nodes = self._parser.css(selector)
171
+ return wrap_node_group([wrap_node(n) for n in nodes])
172
+
173
+ class _WrappedNode:
174
+ def __init__(self, node: LexborNode | None) -> None:
175
+ self._node = node
176
+
177
+ @property
178
+ def raw(self) -> LexborNode | None:
179
+ return self._node
180
+
181
+ def css(self, selector: str) -> _WrappedNodeGroup:
182
+ nodes = self._node.css(selector) if self._node else []
183
+ return wrap_node_group([wrap_node(n) for n in nodes])
184
+
185
+ def next(self, selector: str) -> _WrappedNode:
186
+ if self._node is None:
187
+ return wrap_node(None)
188
+ cur = self._node.next
189
+ while cur is not None:
190
+ if cur.is_element_node and cur.css_matches(selector):
191
+ return wrap_node(cur)
192
+ cur = cur.next
193
+ return wrap_node(None)
194
+
195
+ @property
196
+ def text(self) -> str | None:
197
+ if self._node is None:
198
+ return None
199
+ return t if (t := self._node.text(strip=True)) else None
200
+
201
+ def attr(self, attr_name: str) -> str | None:
202
+ if self._node is None:
203
+ return None
204
+ return a.strip() if (a := self._node.attributes.get(attr_name)) else None
205
+
206
+ class _WrappedNodeGroup:
207
+ def __init__(self, nodes: list[_WrappedNode]) -> None:
208
+ self._nodes = nodes
209
+
210
+ @property
211
+ def raw(self) -> list[_WrappedNode]:
212
+ return self._nodes
213
+
214
+ @property
215
+ def first(self) -> _WrappedNode:
216
+ return self._nodes[0] if self._nodes else wrap_node(None)
217
+
218
+ def grep(self, pattern: str) -> _WrappedNodeGroup:
219
+ prog = re.compile(pattern)
220
+ filtered = [
221
+ n for n in self._nodes
222
+ if (t := n.text) and prog.search(ud.normalize('NFKC', t))
223
+ ]
224
+ return wrap_node_group(filtered)
225
+
226
+ @property
227
+ def texts(self) -> list[str | None]:
228
+ return [n.text for n in self._nodes]
229
+
230
+ def attrs(self, attr_name: str) -> list[str | None]:
231
+ return [n.attr(attr_name) for n in self._nodes]
232
+
scwrap/utils.py ADDED
@@ -0,0 +1,70 @@
1
+ from __future__ import annotations
2
+
3
+ import hashlib
4
+ import random
5
+ import time
6
+ from pathlib import Path
7
+ from typing import Callable
8
+
9
+ import pandas as pd
10
+ from loguru import logger
11
+ from selectolax.lexbor import LexborHTMLParser
12
+
13
+
14
+ def parse_html(path: Path | str) -> LexborHTMLParser | None:
15
+ try:
16
+ return LexborHTMLParser(Path(path).read_text(encoding='utf-8'))
17
+ except Exception as e:
18
+ logger.error(f"[parse_html] {path} {type(e).__name__}: {e}")
19
+ return None
20
+
21
+
22
+ def from_here(file: str) -> Callable[[str], Path]:
23
+ base = Path(file).resolve().parent
24
+ return lambda path: base / path
25
+
26
+
27
+ def random_sleep(a: float, b: float) -> None:
28
+ time.sleep(random.uniform(a, b))
29
+
30
+
31
+ def append_csv(path: Path | str, row: dict) -> None:
32
+ p = Path(path)
33
+ try:
34
+ pd.DataFrame([row]).to_csv(
35
+ p,
36
+ mode='a',
37
+ index=False,
38
+ header=True if not p.exists() else p.stat().st_size == 0,
39
+ encoding='utf-8-sig',
40
+ )
41
+ except Exception as e:
42
+ logger.error(f"[append_csv] {path} {row} {type(e).__name__}: {e}")
43
+
44
+
45
+ def write_parquet(path: Path | str, rows: list[dict]) -> None:
46
+ try:
47
+ pd.DataFrame(rows).to_parquet(
48
+ Path(path),
49
+ index=False,
50
+ )
51
+ except Exception as e:
52
+ logger.error(f"[write_parquet] {path} {type(e).__name__}: {e}")
53
+
54
+
55
+ def hash_name(key: str) -> str:
56
+ return hashlib.md5(key.encode()).hexdigest()
57
+
58
+
59
+ def save_html(filepath: Path, html: str) -> bool:
60
+ try:
61
+ filepath.parent.mkdir(parents=True, exist_ok=True)
62
+ filepath.write_text(html, encoding="utf-8", errors="replace")
63
+ return True
64
+ except Exception as e:
65
+ logger.error(f"[save_html] {filepath} {type(e).__name__}: {e}")
66
+ return False
67
+
68
+
69
+ def log_to_file(path: Path | str) -> None:
70
+ logger.add(Path(path), level="WARNING", encoding="utf-8")
@@ -0,0 +1,202 @@
1
+ Metadata-Version: 2.4
2
+ Name: scwrap
3
+ Version: 0.1.0
4
+ Summary: Lightweight scraping helpers: wrapped Page/parser APIs (Patchright, Playwright, selectolax), browser presets, CSV/Parquet and logging utilities.
5
+ Requires-Python: >=3.12
6
+ Description-Content-Type: text/markdown
7
+ License-File: LICENSE
8
+ Requires-Dist: patchright>=1.40
9
+ Requires-Dist: playwright>=1.40
10
+ Requires-Dist: selectolax>=0.3
11
+ Requires-Dist: pandas>=2.0
12
+ Requires-Dist: pyarrow>=14.0
13
+ Requires-Dist: camoufox>=0.4
14
+ Requires-Dist: loguru>=0.7
15
+
16
+ # scwrap
17
+
18
+ ## Overview - 概要
19
+
20
+ scwrap is a scraping utility library built on Patchright, Playwright, and selectolax.
21
+ scwrap は Patchright / Playwright(`Page` API)と selectolax をベースにしたスクレイピングユーティリティライブラリです。**細かい挙動はプリミティブの組み合わせで組み立てる**前提の薄いラッパーです(「よしなに」な自動修復は置かない方針)。
22
+
23
+ DOM・パーサのラッパーは **`scwrap`**(`wrap_page` / `wrap_parser` などのファクトリー)から、ブラウザ起動は **`scwrap.browser`**、CSV やログなどの周辺は **`scwrap.utils`** から import します。
24
+
25
+
26
+ ## Requirements - 必要条件
27
+
28
+ - Python 3.12 or higher(`requires-python` は `pyproject.toml` 参照)
29
+ - 主要依存: patchright, playwright, selectolax, pandas, pyarrow, camoufox, loguru(一覧・下限は `pyproject.toml` の `[project.dependencies]`)
30
+ - `write_parquet` は **pandas + pyarrow**(`pyarrow` は依存に含まれる)。別エンジンに切り替える場合のみ `fastparquet` などが必要になることがあります。
31
+ - ブラウザ: **Patchright / Playwright 用の取得**と、下記のとおり **`patchright_page` は Google Chrome 前提**です。
32
+
33
+ ## Installation - インストール
34
+
35
+ ### pip
36
+
37
+ ```
38
+ pip install scwrap
39
+ ```
40
+
41
+ ### uv (推奨)
42
+
43
+ ```
44
+ uv add scwrap
45
+ ```
46
+
47
+ Playwright / Patchright が使うブラウザバイナリは別途取得してください。
48
+ 加えて **`patchright_page()` は `channel='chrome'` で起動するため、マシンに [Google Chrome](https://www.google.com/chrome/) がインストールされている必要があります**(Chromium のみの環境では起動に失敗することがあります)。
49
+
50
+ ### Patchright(Chromium 等)
51
+
52
+ #### pip
53
+
54
+ ```
55
+ python -m patchright install chromium
56
+ ```
57
+
58
+ #### uv (推奨)
59
+
60
+ ```
61
+ uv run patchright install chromium
62
+ ```
63
+
64
+ ### Camoufox(Firefox)
65
+
66
+ #### pip
67
+
68
+ ```
69
+ camoufox fetch
70
+ ```
71
+
72
+ #### uv (推奨)
73
+
74
+ ```
75
+ uv run camoufox fetch
76
+ ```
77
+
78
+ ## メソッド
79
+
80
+ ### `scwrap`(ラッパー)
81
+
82
+ ブラウザ側は `wrap_page(page)` が起点です。`goto`・`wait`・`css` などはこの戻り値に対して呼びます。要素が複数なら `css(...)` はグループを返し、先頭だけなら `.first`、正規表現で絞り込みは `.grep(pattern)`、相対 URL の解決には `.urls`(単一は `.url`)を使います。テキストや生の要素は `.text` / `.raw` プロパティです。
83
+
84
+ 静的 HTML(selectolax)側は `wrap_parser(parser)` から `css` / `grep` / `text` など(ノードは `wrap_node` 系)。クラス実装は非公開で、**コンストラクトは常にこれらのファクトリー経由**にしてください。
85
+
86
+ ### `scwrap.browser`
87
+
88
+ - **`patchright_page()`** … コンテキストマネージャ。Patchright で **Google Chrome**(`channel='chrome'`)を起動し、**毎回クリーンな `BrowserContext`** の `Page` を `with` に渡す(永続プロファイルは使わない)。`headless=False`・`no_viewport=True` などは固定。
89
+
90
+ - **`camoufox_page(locale=...)`** … Camoufox(Firefox)で `Page` を開く。
91
+ _例:_ `with camoufox_page(locale='en-US,en') as page:`
92
+ デフォルトの `locale` は `'ja-JP,ja'`。`headless=False`・`humanize=True` は固定。
93
+
94
+ ウィンドウ最大化が必要なら、コードではなく **ブラウザ上で手動**してください(起動引数に依存させない)。
95
+
96
+ ### `scwrap.utils`
97
+
98
+ `log_to_file`・`from_here`・`parse_html`・`append_csv`・`write_parquet`・`save_html`・`hash_name`・`random_sleep` など(各関数は `scwrap/utils.py` を参照)。`log_to_file` はログファイルの **親ディレクトリが無いと失敗**するので、必要なら先に `Path.mkdir` するか、`save_html` のように親を作る処理を挟んでください。
99
+
100
+
101
+ ## Basic Usage - 基本的な使い方
102
+
103
+ ```python
104
+ from scwrap import wrap_page
105
+ from scwrap.browser import patchright_page
106
+ from scwrap.utils import log_to_file, append_csv, from_here, random_sleep
107
+
108
+ fh = from_here(__file__)
109
+ log_to_file(fh('log/scraping.log'))
110
+
111
+ with patchright_page() as page:
112
+ p = wrap_page(page)
113
+ p.goto('https://www.foobarbaz1.jp')
114
+
115
+ pref_urls = p.css('li.item > ul > li > a').urls
116
+
117
+ classroom_urls = []
118
+ for i, url in enumerate(pref_urls, 1):
119
+ print(f'pref_urls {i}/{len(pref_urls)}')
120
+ if not url or not p.goto(url):
121
+ continue
122
+ random_sleep(1, 2)
123
+ classroom_urls.extend(p.css('.school-area h4 a').urls)
124
+
125
+ for i, url in enumerate(classroom_urls, 1):
126
+ print(f'classroom_urls {i}/{len(classroom_urls)}')
127
+ if not p.goto(url):
128
+ continue
129
+ random_sleep(1, 2)
130
+ append_csv(fh('csv/out.csv'), {
131
+ 'URL': page.url,
132
+ '教室名': p.css('h1 .text01').first.text,
133
+ '住所': p.css('.item .mapText').first.text,
134
+ '電話番号': p.css('.item .phoneNumber').first.text,
135
+ 'HP': p.css('th').grep('ホームページ').first.next('td').css('a').first.url,
136
+ })
137
+ ```
138
+
139
+ ## Save HTML while scraping - スクレイピングしながらHTMLを保存する
140
+
141
+ ```python
142
+ from scwrap import wrap_page
143
+ from scwrap.browser import camoufox_page
144
+ from scwrap.utils import log_to_file, append_csv, from_here, hash_name, random_sleep, save_html
145
+
146
+ fh = from_here(__file__)
147
+ log_to_file(fh('log/scraping.log'))
148
+
149
+ with camoufox_page() as page:
150
+ ctx = {}
151
+ p = wrap_page(page)
152
+ p.goto('https://www.foobarbaz1.jp')
153
+
154
+ ctx['アイテムURLs'] = p.css('ul.items > li > a').urls
155
+
156
+ for i, url in enumerate(ctx['アイテムURLs'], 1):
157
+ print(f"アイテムURLs {i}/{len(ctx['アイテムURLs'])}")
158
+ if not p.goto(url):
159
+ continue
160
+ random_sleep(1, 2)
161
+ if p.wait('#logo', timeout=10000).raw is None:
162
+ continue
163
+ file_name = f'{hash_name(url)}.html'
164
+ if not save_html(fh('html') / file_name, page.content()):
165
+ continue
166
+ append_csv(fh('outurlhtml.csv'), {
167
+ 'URL': url,
168
+ 'HTML': file_name,
169
+ })
170
+ ```
171
+
172
+ ## Scrape from local HTML files - 保存済みHTMLからスクレイピングしてParquetに出力する
173
+
174
+ ```python
175
+ import pandas as pd
176
+
177
+ from scwrap import wrap_parser
178
+ from scwrap.utils import log_to_file, from_here, parse_html, write_parquet
179
+
180
+ fh = from_here(__file__)
181
+ log_to_file(fh('log/scraping.log'))
182
+
183
+ df = pd.read_csv(fh('outurlhtml.csv'))
184
+ results = []
185
+ for i, (url, path) in enumerate(zip(df['URL'], df['HTML']), 1):
186
+ print(f'outhtml {i}/{len(df)}')
187
+ if not (parser := parse_html(fh('html') / path)):
188
+ continue
189
+ p = wrap_parser(parser)
190
+ results.append({
191
+ 'URL': url,
192
+ '教室名': p.css('h1 .text02').first.text,
193
+ '住所': p.css('.item .mapText').first.text,
194
+ '所在地': p.css('dt').grep(r'所在地').first.next('dd').text,
195
+ })
196
+ write_parquet(fh('outhtml.parquet'), results)
197
+ ```
198
+
199
+ ## License - ライセンス
200
+
201
+ [MIT](./LICENSE)
202
+
@@ -0,0 +1,8 @@
1
+ scwrap/__init__.py,sha256=GTlVZ5bqedRNxXhzDM-laifkYnBvjAhwgwZVKT7LlMI,220
2
+ scwrap/browser.py,sha256=3dRr0egksptuC5yfZqbJv3m3bKKC_hT9pKqDaiFCJ_c,946
3
+ scwrap/scwrap.py,sha256=SGh0YeqRI-43f2FM61JIwFVByGmMM2Iqbr7M5d2U2-w,7815
4
+ scwrap/utils.py,sha256=LzCusWPTOEXQ_3anjKnxWVjbkqyGGL-GAqh7LQwITu4,1955
5
+ scwrap-0.1.0.dist-info/licenses/LICENSE,sha256=q8ED812OTMMCwQSdHvtx6PSnmtRIotcIjKPHMmVseQI,1096
6
+ scwrap-0.1.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
7
+ scwrap-0.1.0.dist-info/METADATA,sha256=3lLujlmGR_DgRRmeD9p3hieOxgaCcbfEmtGSwnaNWSM,7534
8
+ scwrap-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: flit 3.12.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Nishizawa Takamasa
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.