repo-digest 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,5 @@
1
+ __all__ = [
2
+ "export_repo_as_text",
3
+ ]
4
+
5
+ from .core import export_repo_as_text
repo_digest/cli.py ADDED
@@ -0,0 +1,43 @@
1
+ import argparse
2
+ import os
3
+ import sys
4
+ from .core import export_repo_as_text
5
+
6
+ def main() -> None:
7
+ parser = argparse.ArgumentParser(
8
+ prog="repo-digest",
9
+ description="Turn any repository into an AI-ready text bundle with safe defaults and rich analytics.",
10
+ epilog=(
11
+ "Safety: By default, files matching sensitive patterns (e.g., .env, *secret*, *.key) are blocked. "
12
+ "Use --allow-secrets only if you understand the risk."
13
+ ),
14
+ )
15
+ parser.add_argument("path", nargs="?", default=".", help="Path to repository (default: current directory)")
16
+ parser.add_argument("-o", "--output", default="repo_export.txt", help="Output file path (default: repo_export.txt)")
17
+ parser.add_argument("--preview", action="store_true", help="Preview counts only; do not write output")
18
+ parser.add_argument("--max-bytes", type=int, default=None, help="Fail if estimated total bytes exceed this limit")
19
+ parser.add_argument("--allow-secrets", action="store_true", help="Allow files that match sensitive patterns (NOT recommended)")
20
+ parser.add_argument("--no-gitignore", action="store_true", help="Do not respect .gitignore (default is to respect it)")
21
+
22
+ args = parser.parse_args()
23
+
24
+ path = os.path.abspath(args.path)
25
+ if not os.path.exists(path):
26
+ print(f"[error] Path does not exist: {path}")
27
+ sys.exit(1)
28
+ if not os.path.isdir(path):
29
+ print(f"[error] Not a directory: {path}")
30
+ sys.exit(1)
31
+
32
+ code = export_repo_as_text(
33
+ path,
34
+ args.output,
35
+ allow_secrets=args.allow_secrets,
36
+ respect_gitignore=(not args.no_gitignore),
37
+ max_bytes=args.max_bytes,
38
+ preview=args.preview,
39
+ )
40
+ sys.exit(code)
41
+
42
+ if __name__ == "__main__":
43
+ main()
repo_digest/core.py ADDED
@@ -0,0 +1,340 @@
1
+ import os
2
+ import fnmatch
3
+ from collections import defaultdict, Counter
4
+ from datetime import datetime
5
+ from typing import Iterable, Dict, Any, List, Tuple, Optional
6
+
7
+ try:
8
+ import tiktoken # type: ignore
9
+ except ImportError: # pragma: no cover - optional dependency
10
+ tiktoken = None
11
+
12
+ # Comprehensive list of directories and files to exclude
13
+ EXCLUDES = [
14
+ # Version control
15
+ '.git', '.svn', '.hg', '.bzr',
16
+
17
+ # OS generated files
18
+ '.DS_Store', 'Thumbs.db', 'desktop.ini',
19
+
20
+ # Python
21
+ '__pycache__', '*.pyc', '*.pyo', '*.pyd', '.Python',
22
+ 'pip-log.txt', 'pip-delete-this-directory.txt',
23
+ '.venv', 'venv', 'ENV', 'env',
24
+ '.pytest_cache', '.mypy_cache', '.tox',
25
+ 'htmlcov', '.coverage', '.coverage.*',
26
+ '*.egg-info', 'dist', 'build', 'wheels',
27
+ '.eggs', '*.egg',
28
+
29
+ # Node.js / JavaScript
30
+ 'node_modules', 'npm-debug.log*', 'yarn-debug.log*', 'yarn-error.log*',
31
+ '.npm', '.yarn', '.pnp', '.pnp.js',
32
+ 'bower_components', 'jspm_packages',
33
+
34
+ # IDE and editors
35
+ '.idea', '.vscode', '*.swp', '*.swo', '*~',
36
+ '.project', '.classpath', '.settings',
37
+ '*.sublime-project', '*.sublime-workspace',
38
+
39
+ # Build outputs
40
+ 'target', 'out', 'bin', 'obj',
41
+ '*.class', '*.jar', '*.war', '*.ear',
42
+ '*.dll', '*.exe', '*.o', '*.so', '*.dylib',
43
+
44
+ # Logs and databases
45
+ '*.log', '*.sql', '*.sqlite', '*.db',
46
+ 'logs', 'log',
47
+
48
+ # Temporary files
49
+ '*.tmp', '*.temp', '*.bak', '*.backup', '*.cache',
50
+ '.cache', 'tmp', 'temp',
51
+
52
+ # Security sensitive files
53
+ '*.key', '*.pem', '*.p12', '*.pfx',
54
+ 'secrets', 'credentials',
55
+
56
+ # Documentation builds
57
+ '_build', 'site', 'docs/_build',
58
+
59
+ # Package manager locks (usually not needed for understanding code)
60
+ 'package-lock.json', 'yarn.lock', 'pnpm-lock.yaml',
61
+ 'Pipfile.lock', 'poetry.lock', 'composer.lock',
62
+
63
+ # Other
64
+ '.sass-cache', '.next', '.nuxt', '.turbo',
65
+ '.docusaurus', '.cache-loader',
66
+ 'vendor', 'vendors',
67
+ ]
68
+
69
+ # File extensions to exclude
70
+ EXCLUDE_EXTENSIONS = [
71
+ # Binary files
72
+ '.jpg', '.jpeg', '.png', '.gif', '.bmp', '.ico', '.svg',
73
+ '.mp3', '.mp4', '.avi', '.mov', '.wmv', '.flv',
74
+ '.zip', '.tar', '.gz', '.rar', '.7z',
75
+ '.pdf', '.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx',
76
+
77
+ # Compiled files
78
+ '.pyc', '.pyo', '.class', '.o', '.so', '.dll', '.exe',
79
+
80
+ # Lock files
81
+ '.lock',
82
+
83
+ # Large data files
84
+ '.csv', '.tsv', '.parquet', '.feather', '.h5', '.hdf5',
85
+
86
+ # Font files
87
+ '.ttf', '.otf', '.woff', '.woff2', '.eot',
88
+
89
+ # Map files
90
+ '.map', '.min.js.map', '.css.map',
91
+ ]
92
+
93
+ # Patterns for files that might contain sensitive information
94
+ SENSITIVE_PATTERNS = [
95
+ '*secret*', '*password*', '*token*', '*key*',
96
+ '*.pem', '*.key', '*.cert', '*.crt',
97
+ '.env*', '*.env',
98
+ ]
99
+
100
+ GITIGNORE = '.gitignore'
101
+
102
+ def load_gitignore(root_dir: str) -> List[str]:
103
+ patterns: List[str] = []
104
+ gitignore_path = os.path.join(root_dir, GITIGNORE)
105
+ if os.path.exists(gitignore_path):
106
+ with open(gitignore_path, 'r', encoding='utf-8', errors='ignore') as f:
107
+ for line in f:
108
+ line = line.strip()
109
+ if line and not line.startswith('#'):
110
+ patterns.append(line)
111
+ return patterns
112
+
113
+ def is_ignored(path: str, patterns: Iterable[str], check_sensitive: bool = True) -> bool:
114
+ # Check against gitignore patterns
115
+ for pat in patterns:
116
+ if fnmatch.fnmatch(path, pat):
117
+ return True
118
+
119
+ # Check file extension
120
+ _, ext = os.path.splitext(path)
121
+ if ext.lower() in EXCLUDE_EXTENSIONS:
122
+ return True
123
+
124
+ # Check sensitive patterns (optional)
125
+ if check_sensitive:
126
+ for pat in SENSITIVE_PATTERNS:
127
+ if fnmatch.fnmatch(path.lower(), pat.lower()):
128
+ return True
129
+
130
+ # Check if any part of the path contains excluded patterns
131
+ path_parts = path.split(os.sep)
132
+ for part in path_parts:
133
+ for exclude in EXCLUDES:
134
+ if fnmatch.fnmatch(part, exclude):
135
+ return True
136
+
137
+ return False
138
+
139
+ def count_tokens(text: str, encoder=None) -> int:
140
+ if encoder:
141
+ return len(encoder.encode(text))
142
+ return len(text.split())
143
+
144
+ def iter_files(root_dir: str, patterns: Iterable[str]) -> Iterable[str]:
145
+ for dirpath, dirnames, filenames in os.walk(root_dir):
146
+ # Filter out excluded directories
147
+ dirnames[:] = [d for d in dirnames if not any(fnmatch.fnmatch(d, exc) for exc in EXCLUDES)]
148
+
149
+ for filename in sorted(filenames):
150
+ # Skip files matching exclude patterns
151
+ if any(fnmatch.fnmatch(filename, exc) for exc in EXCLUDES):
152
+ continue
153
+
154
+ rel_path = os.path.relpath(os.path.join(dirpath, filename), root_dir)
155
+
156
+ # Skip if ignored by any rule (but don't check sensitive patterns here)
157
+ if is_ignored(rel_path, patterns, check_sensitive=False):
158
+ continue
159
+
160
+ # Skip minified files
161
+ if '.min.' in filename or filename.endswith('.min.js') or filename.endswith('.min.css'):
162
+ continue
163
+
164
+ yield rel_path
165
+
166
+ def build_dir_aggregates(file_infos: Iterable[Dict[str, Any]]) -> Tuple[Dict[str, Dict[str, int]], Dict[str, List[str]]]:
167
+ aggregates: Dict[str, Dict[str, int]] = defaultdict(lambda: {"files": 0, "tokens": 0, "bytes": 0})
168
+ children_map: Dict[str, set] = defaultdict(set)
169
+ for info in file_infos:
170
+ rel_path = info["path"]
171
+ tokens = int(info["tokens"]) # ensure ints
172
+ size = int(info["bytes"])
173
+ # accumulate for this file's directory and all ancestors
174
+ dir_path = os.path.dirname(rel_path) or "."
175
+ parts = [] if dir_path == "." else dir_path.split(os.sep)
176
+ for i in range(len(parts) + 1):
177
+ d = "." if i == 0 else os.sep.join(parts[:i])
178
+ # Count file for all ancestor directories including root (represents total files under dir)
179
+ aggregates[d]["files"] += 1
180
+ aggregates[d]["tokens"] += tokens
181
+ aggregates[d]["bytes"] += size
182
+ # children map
183
+ if dir_path != ".":
184
+ parent = os.path.dirname(dir_path) or "."
185
+ children_map[parent].add(dir_path)
186
+ else:
187
+ children_map["."].add(".") # ensure root exists
188
+ # ensure sets converted to sorted lists
189
+ children = {k: sorted(v - {k}) for k, v in children_map.items()}
190
+ return aggregates, children
191
+
192
+ def print_dir_tree(out, aggregates: Dict[str, Dict[str, int]], children: Dict[str, List[str]], current: str = ".", prefix: str = "") -> None:
193
+ # Print current directory line (skip printing for root at first call)
194
+ if prefix == "":
195
+ # root header printed separately by caller
196
+ pass
197
+ else:
198
+ data = aggregates.get(current, {"files": 0, "tokens": 0, "bytes": 0})
199
+ out.write(f"{prefix}{os.path.basename(current) or '.'}/ (files: {data['files']}, tokens: {data['tokens']}, bytes: {data['bytes']})\n")
200
+ # children dirs
201
+ dirs = sorted([d for d in children.get(current, []) if d != current])
202
+ for idx, child in enumerate(dirs):
203
+ is_last = idx == len(dirs) - 1
204
+ branch = "└── " if is_last else "├── "
205
+ next_prefix = (prefix.replace("└── ", " ").replace("├── ", "│ ") if prefix else "") + branch
206
+ print_dir_tree(out, aggregates, children, child, next_prefix)
207
+
208
+
209
+ def export_repo_as_text(root_dir: str, output_file: str, *, allow_secrets: bool = False, respect_gitignore: bool = True, max_bytes: Optional[int] = None, preview: bool = False) -> int:
210
+ """
211
+ Export repository at root_dir into a single text file with summaries.
212
+
213
+ Returns an exit code:
214
+ 0 success
215
+ 2 safety violation (secrets detected and not allowed)
216
+ 3 exceeded size/limits in preview
217
+ """
218
+ patterns = load_gitignore(root_dir) if respect_gitignore else []
219
+ encoder = tiktoken.get_encoding('cl100k_base') if tiktoken else None
220
+ tokenizer_name = 'cl100k_base' if encoder else 'words_approx'
221
+
222
+ file_infos: List[Dict[str, Any]] = []
223
+ total_tokens = 0
224
+ total_bytes = 0
225
+ by_ext_tokens: Counter = Counter()
226
+ by_ext_bytes: Counter = Counter()
227
+ by_ext_files: Counter = Counter()
228
+
229
+ # Pre-scan to detect secrets and size
230
+ blocked_sensitive: List[str] = []
231
+
232
+ for rel_path in iter_files(root_dir, patterns):
233
+ # sensitive check by pattern (path-level)
234
+ is_sensitive = any(fnmatch.fnmatch(rel_path.lower(), pat.lower()) for pat in SENSITIVE_PATTERNS)
235
+ if is_sensitive and not allow_secrets:
236
+ blocked_sensitive.append(rel_path)
237
+ continue
238
+
239
+ abs_path = os.path.join(root_dir, rel_path)
240
+ try:
241
+ with open(abs_path, 'r', encoding='utf-8', errors='ignore') as f:
242
+ content = f.read()
243
+ tokens = count_tokens(content, encoder)
244
+ lines = content.count('\n') + (1 if content and not content.endswith('\n') else 0)
245
+ size = os.path.getsize(abs_path)
246
+ file_infos.append({"path": rel_path, "tokens": tokens, "lines": lines, "bytes": size, "content": content})
247
+ total_tokens += tokens
248
+ total_bytes += size
249
+ ext = os.path.splitext(rel_path)[1].lower() or "<no-ext>"
250
+ by_ext_tokens[ext] += tokens
251
+ by_ext_bytes[ext] += size
252
+ by_ext_files[ext] += 1
253
+ except Exception as e:
254
+ print(f"[skip] {rel_path}: {e}")
255
+
256
+ # Safety: secrets check
257
+ if blocked_sensitive and not allow_secrets:
258
+ print("[SAFETY] Sensitive-looking files were blocked by default:")
259
+ for p in blocked_sensitive[:20]:
260
+ print(f" - {p}")
261
+ if len(blocked_sensitive) > 20:
262
+ print(f" ... and {len(blocked_sensitive) - 20} more")
263
+ print("Re-run with --allow-secrets if you know what you're doing.")
264
+ return 2
265
+
266
+ # Warning when secrets are allowed
267
+ if allow_secrets:
268
+ sensitive_included = [info["path"] for info in file_infos if any(fnmatch.fnmatch(info["path"].lower(), pat.lower()) for pat in SENSITIVE_PATTERNS)]
269
+ if sensitive_included:
270
+ print(f"[WARNING] Including {len(sensitive_included)} sensitive files (--allow-secrets enabled)")
271
+ for p in sensitive_included[:5]:
272
+ print(f" - {p}")
273
+ if len(sensitive_included) > 5:
274
+ print(f" ... and {len(sensitive_included) - 5} more")
275
+
276
+ # Preview mode
277
+ if preview:
278
+ print("===== PREVIEW =====")
279
+ print(f"Tokenizer: {tokenizer_name}")
280
+ print(f"Total candidate files: {len(file_infos)}")
281
+ print(f"Estimated total tokens: {total_tokens}")
282
+ print(f"Estimated total bytes: {total_bytes}")
283
+ print("Top extensions:")
284
+ for ext in sorted(by_ext_files.keys())[:10]:
285
+ print(f" {ext}: files={by_ext_files[ext]}, tokens={by_ext_tokens[ext]}, bytes={by_ext_bytes[ext]}")
286
+ if max_bytes is not None and total_bytes > max_bytes:
287
+ print(f"[LIMIT] Estimated bytes {total_bytes} exceed --max-bytes={max_bytes}")
288
+ return 3
289
+ return 0
290
+
291
+ # Max bytes enforcement (write path)
292
+ if max_bytes is not None and total_bytes > max_bytes:
293
+ print(f"[LIMIT] Total bytes {total_bytes} exceed --max-bytes={max_bytes}. Use --preview first or raise the limit.")
294
+ return 3
295
+
296
+ # Build aggregates and tree
297
+ aggregates, children = build_dir_aggregates(file_infos)
298
+
299
+ with open(output_file, 'w', encoding='utf-8') as out:
300
+ # Summary
301
+ out.write('===== REPO SUMMARY =====\n')
302
+ out.write(f"Generated: {datetime.now().isoformat()}\n")
303
+ out.write(f"Tokenizer: {tokenizer_name}\n")
304
+ out.write(f"Total files: {len(file_infos)}\n")
305
+ out.write(f"Total tokens: {total_tokens}\n")
306
+ out.write(f"Total bytes: {total_bytes}\n")
307
+
308
+ # Extension breakdown
309
+ out.write('\n===== SUMMARY BY EXTENSION =====\n')
310
+ for ext in sorted(by_ext_files.keys()):
311
+ out.write(f"{ext}: files={by_ext_files[ext]}, tokens={by_ext_tokens[ext]}, bytes={by_ext_bytes[ext]}\n")
312
+
313
+ # Directory tree
314
+ out.write('\n===== DIRECTORY TREE =====\n')
315
+ root_data = aggregates.get('.', {"files": 0, "tokens": 0, "bytes": 0})
316
+ out.write(f"./ (files: {root_data['files']}, tokens: {root_data['tokens']}, bytes: {root_data['bytes']})\n")
317
+ print_dir_tree(out, aggregates, children, current='.', prefix='')
318
+
319
+ # Files
320
+ out.write('\n===== FILES =====\n')
321
+ for info in sorted(file_infos, key=lambda x: x["path"]):
322
+ out.write(f"\n===== FILE: {info['path']} =====\n")
323
+ out.write(f"[TOKENS: {info['tokens']} | LINES: {info['lines']} | BYTES: {info['bytes']}]\n")
324
+ out.write(info['content'])
325
+ out.write('\n')
326
+
327
+ # Detailed summary by file
328
+ out.write(f"\n===== SUMMARY BY FILE =====\n")
329
+ for info in sorted(file_infos, key=lambda x: x['tokens'], reverse=True):
330
+ out.write(f"{info['path']} : {info['tokens']} tokens, {info['lines']} lines, {info['bytes']} bytes\n")
331
+
332
+ # Top files
333
+ out.write(f"\n===== TOP 20 BY TOKENS =====\n")
334
+ for info in sorted(file_infos, key=lambda x: x['tokens'], reverse=True)[:20]:
335
+ out.write(f"{info['path']} : {info['tokens']} tokens\n")
336
+ out.write(f"\n===== TOP 20 BY BYTES =====\n")
337
+ for info in sorted(file_infos, key=lambda x: x['bytes'], reverse=True)[:20]:
338
+ out.write(f"{info['path']} : {info['bytes']} bytes\n")
339
+
340
+ return 0
@@ -0,0 +1,75 @@
1
+ Metadata-Version: 2.4
2
+ Name: repo-digest
3
+ Version: 0.1.0
4
+ Summary: Turn any repository into an AI-ready text bundle with safe defaults and rich analytics.
5
+ Author: Your Name
6
+ License: MIT
7
+ License-File: LICENSE
8
+ Keywords: ai,analysis,export,llm,repository,text,tiktoken,tokens
9
+ Classifier: Environment :: Console
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Topic :: Software Development :: Build Tools
14
+ Classifier: Topic :: Utilities
15
+ Requires-Python: >=3.8
16
+ Provides-Extra: dev
17
+ Requires-Dist: pytest>=7.0.0; extra == 'dev'
18
+ Provides-Extra: tiktoken
19
+ Requires-Dist: tiktoken>=0.4.0; extra == 'tiktoken'
20
+ Description-Content-Type: text/markdown
21
+
22
+ # repo-digest
23
+
24
+ Turn any repository into an AI-ready text bundle with safe defaults and rich analytics.
25
+
26
+ Who is this for?
27
+ - Anyone who wants to paste a project into ChatGPT/Claude or create a quick, comprehensive repo digest.
28
+ - Works out-of-the-box on macOS, Linux, Windows.
29
+
30
+ Quickstart (60 seconds)
31
+ 1) Install
32
+ - pip install repo-digest
33
+ - For precise token counts (optional): pip install "repo-digest[tiktoken]"
34
+
35
+ 2) Export your repo
36
+ - repo-digest . -o repo.txt
37
+
38
+ 3) Preview first (optional)
39
+ - repo-digest . -o repo.txt --preview
40
+
41
+ Safety first (defaults)
42
+ - Secrets are blocked by default (e.g., .env, *secret*, *.key, *.pem)
43
+ - Binary/large data files are excluded
44
+ - .gitignore respected by default
45
+ - To override secrets blocking (NOT recommended): --allow-secrets
46
+
47
+ Examples
48
+ - Export current repo: repo-digest . -o repo.txt
49
+ - Preview and check size: repo-digest . -o repo.txt --preview
50
+ - Enforce a size limit (bytes): repo-digest . -o repo.txt --max-bytes 5000000
51
+ - Ignore .gitignore: repo-digest . -o repo.txt --no-gitignore
52
+
53
+ Exit codes
54
+ - 0 success
55
+ - 1 runtime error (bad path, permission)
56
+ - 2 safety violation (secrets detected and not allowed)
57
+ - 3 exceeded size/limits
58
+
59
+ Troubleshooting
60
+ - Windows long paths: try running from a shorter path (e.g., C:\src)
61
+ - Encoding issues: files are read as UTF-8 with errors ignored
62
+ - Large repos: use --preview to estimate and --max-bytes to cap
63
+
64
+ FAQ
65
+ - Why are some files missing? They’re excluded by default to keep the export safe and useful. Use --no-gitignore or tweak locally if needed.
66
+ - Why token counts differ from my model? Install tiktoken for tokenizer parity; fallback uses an approximate word count.
67
+ - Can I include secrets? Not recommended. If you must: --allow-secrets (and understand the risk).
68
+
69
+ Roadmap (post-MVP)
70
+ - Markdown/JSON outputs, config file support
71
+ - GitHub URL input, chunking for huge repos
72
+ - Simple GUI if user demand is strong
73
+
74
+ License
75
+ - MIT
@@ -0,0 +1,8 @@
1
+ repo_digest/__init__.py,sha256=f0YYw7y8uxO-ZqTMSI0J0d2dhlHzOvvf7EazkTRO4uA,80
2
+ repo_digest/cli.py,sha256=dNP_ldwnN8Zw2tl_AH9KzAtrAWMJD-9R_Ooivex6HB4,1770
3
+ repo_digest/core.py,sha256=MBBZ6MgDNmt4HLK0dW3fR2u3Z3tF1lQgSwqHpRFmlP8,13605
4
+ repo_digest-0.1.0.dist-info/METADATA,sha256=YoWPUhfS-8b9bdqGPCAonEb_xsUd3_2Y6WSduH6LQZ8,2667
5
+ repo_digest-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
6
+ repo_digest-0.1.0.dist-info/entry_points.txt,sha256=0DbXPeQq15cx9Vn6miNs0w3ijVBoksyPi90QCIqKhEg,53
7
+ repo_digest-0.1.0.dist-info/licenses/LICENSE,sha256=OphKV48tcMv6ep-7j-8T6nycykPT0g8ZlMJ9zbGvdPs,1066
8
+ repo_digest-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ repo-digest = repo_digest.cli:main
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Your Name
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.