dataact 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,222 @@
1
+ from __future__ import annotations
2
+
3
+ import copy
4
+ import dataclasses
5
+ from typing import Callable
6
+
7
+ from dataact.cache import SessionCache
8
+ from dataact.providers.base import ProviderAdapter
9
+ from dataact.tools.interpreter import PythonInterpreter
10
+ from dataact.tools.variables import make_list_variables_spec
11
+ from dataact.types import ToolSpec
12
+
13
+ _SUBAGENT_TOOL_NAME = "subagent"
14
+
15
+ _WORKER_SYSTEM_TEMPLATE = """\
16
+ You are a clean-context worker invoked by another agent.
17
+
18
+ Your task: {task}
19
+
20
+ Available input handles (already loaded into your cache): {input_handles}
21
+
22
+ Use `python_interpreter` to inspect cached handles. Call `save(name, value)` for any
23
+ computed artifact worth returning. You must produce final text summarizing your
24
+ findings. If you save artifacts, mention what they contain and why they matter."""
25
+
26
+
27
+ def make_subagent_spec(
28
+ adapter_factory: Callable[[], ProviderAdapter],
29
+ parent_tools: list[ToolSpec],
30
+ parent_cache: SessionCache,
31
+ run_dir: str = "./runs",
32
+ get_sub_cache: Callable[[], SessionCache] | None = None,
33
+ make_sub_tools: Callable[[SessionCache], list[ToolSpec]] | None = None,
34
+ ) -> ToolSpec:
35
+ """Create a subagent tool with an explicit cache boundary.
36
+
37
+ If parent_tools include cache-bound wrappers such as ConnectorRegistry
38
+ wrapped specs, pass make_sub_tools so those handlers can be rebuilt against
39
+ the subagent cache. The fallback path only copies cache-independent tools
40
+ and the built-in cache tools it knows how to rebind.
41
+ """
42
+
43
+ def subagent(
44
+ task: str,
45
+ input_handles: list[str] | None = None,
46
+ output_policy: str = "text_only",
47
+ ) -> str:
48
+ from dataact.loop import Harness
49
+
50
+ # Validate input_handles against parent cache
51
+ if input_handles:
52
+ missing = [h for h in input_handles if not parent_cache.has_handle(h)]
53
+ if missing:
54
+ return f"Error: input handles not found in parent cache: {missing}"
55
+
56
+ # Build sub-cache
57
+ if get_sub_cache is not None:
58
+ sub_cache = get_sub_cache()
59
+ else:
60
+ sub_cache = SessionCache(sample_size=parent_cache.sample_size)
61
+
62
+ # Copy requested handles into sub-cache
63
+ if input_handles:
64
+ for handle in input_handles:
65
+ try:
66
+ sub_cache.put(handle, _copy_cache_value(parent_cache.get(handle)))
67
+ except Exception as exc:
68
+ return (
69
+ "Error: failed to copy input handle "
70
+ f"{handle!r}: {type(exc).__name__}: {exc}"
71
+ )
72
+
73
+ # Track pre-run handles to detect newly created ones
74
+ pre_run_handles = set(sub_cache.handle_names())
75
+
76
+ # Build sub-tools — exclude subagent to prevent recursion
77
+ try:
78
+ if make_sub_tools is not None:
79
+ sub_tools = [
80
+ dataclasses.replace(t)
81
+ for t in make_sub_tools(sub_cache)
82
+ if t.name != _SUBAGENT_TOOL_NAME
83
+ ]
84
+ else:
85
+ sub_tools = [
86
+ _copy_tool_for_subcache(t, sub_cache)
87
+ for t in parent_tools
88
+ if t.name != _SUBAGENT_TOOL_NAME
89
+ ]
90
+ except ValueError as exc:
91
+ return f"Error: subagent tools are not isolated: {exc}"
92
+
93
+ # Build system prompt
94
+ handles_str = str(input_handles) if input_handles else "none"
95
+ system = _WORKER_SYSTEM_TEMPLATE.format(task=task, input_handles=handles_str)
96
+
97
+ # Spawn fresh adapter
98
+ sub_adapter = adapter_factory()
99
+
100
+ sub_harness = Harness(
101
+ adapter=sub_adapter,
102
+ system=system,
103
+ tools=sub_tools,
104
+ run_dir=run_dir,
105
+ cache=sub_cache,
106
+ )
107
+
108
+ try:
109
+ final_text = sub_harness.run(task)
110
+ except Exception as exc:
111
+ return f"Error: subagent failed: {type(exc).__name__}: {exc}"
112
+
113
+ if output_policy == "text_only":
114
+ return f"Subagent final output:\n{final_text}"
115
+
116
+ # publish_created: find newly created handles
117
+ new_handles = {}
118
+ for name in sub_cache.handle_names():
119
+ if name in pre_run_handles:
120
+ continue
121
+ new_handles[name] = sub_cache.get(name)
122
+
123
+ if not new_handles:
124
+ return f"Subagent final output:\n{final_text}\n\nPublished outputs: none"
125
+
126
+ published_lines = []
127
+ for sub_name, value in new_handles.items():
128
+ parent_name = parent_cache.put(sub_name, value)
129
+ snap = parent_cache.snapshot(parent_name)
130
+ published_lines.append(f"- {sub_name} -> {parent_name}\n Snapshot: {snap}")
131
+
132
+ published_str = "\n".join(published_lines)
133
+ return (
134
+ f"Subagent final output:\n{final_text}\n\n"
135
+ f"Published outputs:\n{published_str}"
136
+ )
137
+
138
+ return ToolSpec(
139
+ name=_SUBAGENT_TOOL_NAME,
140
+ description=(
141
+ "Spawn a clean-context subagent to handle a subtask. "
142
+ "The subagent has fresh message history and session cache. "
143
+ "Use input_handles to pass data from this cache to the subagent."
144
+ ),
145
+ input_schema={
146
+ "type": "object",
147
+ "properties": {
148
+ "task": {
149
+ "type": "string",
150
+ "description": "Natural-language instruction for the subagent.",
151
+ },
152
+ "input_handles": {
153
+ "type": "array",
154
+ "items": {"type": "string"},
155
+ "description": (
156
+ "Parent cache handle names to copy into the subagent's cache."
157
+ ),
158
+ },
159
+ "output_policy": {
160
+ "type": "string",
161
+ "enum": ["text_only", "publish_created"],
162
+ "description": (
163
+ "'text_only': return only the subagent's final text. "
164
+ "'publish_created': also copy newly-created handles back to"
165
+ " parent cache."
166
+ ),
167
+ },
168
+ },
169
+ "required": ["task"],
170
+ },
171
+ handler=subagent,
172
+ )
173
+
174
+
175
+ def _copy_tool_for_subcache(tool: ToolSpec, sub_cache: SessionCache) -> ToolSpec:
176
+ if tool.name == "python_interpreter":
177
+ return PythonInterpreter.make_tool_spec(sub_cache)
178
+ if tool.name == "list_variables":
179
+ return make_list_variables_spec(sub_cache)
180
+ if _handler_closes_over_cache(tool.handler):
181
+ raise ValueError(
182
+ f"{tool.name!r} has a handler closed over a SessionCache. "
183
+ "Pass make_sub_tools to rebuild cache-bound tool specs for the "
184
+ "subagent cache."
185
+ )
186
+ return dataclasses.replace(tool)
187
+
188
+
189
+ def _handler_closes_over_cache(handler) -> bool:
190
+ closure = getattr(handler, "__closure__", None)
191
+ if not closure:
192
+ return False
193
+ for cell in closure:
194
+ try:
195
+ if isinstance(cell.cell_contents, SessionCache):
196
+ return True
197
+ except ValueError:
198
+ continue
199
+ return False
200
+
201
+
202
+ def _copy_cache_value(value):
203
+ try:
204
+ import pandas as pd
205
+
206
+ if isinstance(value, pd.DataFrame):
207
+ # Deep copy is deliberate: shallow DataFrame copies can still share
208
+ # underlying blocks, which would break the parent/subagent boundary
209
+ # for representative in-place mutations.
210
+ return value.copy(deep=True)
211
+ except ImportError:
212
+ pass
213
+
214
+ try:
215
+ import numpy as np
216
+
217
+ if isinstance(value, np.ndarray):
218
+ return value.copy()
219
+ except ImportError:
220
+ pass
221
+
222
+ return copy.deepcopy(value)
@@ -0,0 +1,25 @@
1
+ from __future__ import annotations
2
+
3
+ from dataact.cache import SessionCache
4
+ from dataact.types import ToolSpec
5
+
6
+
7
+ def make_list_variables_spec(cache: SessionCache) -> ToolSpec:
8
+ def list_variables() -> str:
9
+ handles = cache.list_handles()
10
+ if not handles:
11
+ return "No variables in session cache."
12
+ lines = [f"Session cache ({len(handles)} handle(s)):"]
13
+ for name, snapshot in handles.items():
14
+ lines.append(f"\n {name}:\n {snapshot}")
15
+ return "\n".join(lines)
16
+
17
+ return ToolSpec(
18
+ name="list_variables",
19
+ description=(
20
+ "List all variables currently stored in the session cache"
21
+ " with their snapshots."
22
+ ),
23
+ input_schema={"type": "object", "properties": {}},
24
+ handler=list_variables,
25
+ )
dataact/types.py ADDED
@@ -0,0 +1,54 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Any, Callable, Literal
5
+
6
+
7
+ @dataclass
8
+ class TextBlock:
9
+ text: str
10
+
11
+
12
+ @dataclass
13
+ class ToolUseBlock:
14
+ tool_use_id: str
15
+ tool_name: str
16
+ tool_input: dict
17
+
18
+
19
+ @dataclass
20
+ class ToolResultBlock:
21
+ tool_use_id: str
22
+ content: str
23
+ is_error: bool = False
24
+
25
+
26
+ ContentBlock = TextBlock | ToolUseBlock | ToolResultBlock
27
+
28
+
29
+ @dataclass
30
+ class Message:
31
+ role: Literal["user", "assistant"]
32
+ content: list[ContentBlock]
33
+
34
+ def __post_init__(self) -> None:
35
+ if self.role not in ("user", "assistant"):
36
+ raise ValueError(
37
+ f"Invalid role: {self.role!r}. Must be 'user' or 'assistant'."
38
+ )
39
+
40
+
41
+ @dataclass
42
+ class ToolSpec:
43
+ name: str
44
+ description: str
45
+ input_schema: dict
46
+ handler: Callable[..., Any] | None = None
47
+ visible: bool = True
48
+
49
+ def to_provider_dict(self) -> dict:
50
+ return {
51
+ "name": self.name,
52
+ "description": self.description,
53
+ "input_schema": self.input_schema,
54
+ }
@@ -0,0 +1,212 @@
1
+ Metadata-Version: 2.4
2
+ Name: dataact
3
+ Version: 0.1.0
4
+ Summary: Data agent with Python-native tools (no bash)
5
+ Project-URL: Homepage, https://github.com/maxkskhor/dataact
6
+ Project-URL: Repository, https://github.com/maxkskhor/dataact
7
+ Project-URL: Issues, https://github.com/maxkskhor/dataact/issues
8
+ Author: Max Khor
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Requires-Python: >=3.10
12
+ Requires-Dist: anthropic
13
+ Requires-Dist: loguru
14
+ Requires-Dist: numpy
15
+ Requires-Dist: pandas
16
+ Provides-Extra: dev
17
+ Requires-Dist: openai; extra == 'dev'
18
+ Requires-Dist: pytest; extra == 'dev'
19
+ Requires-Dist: pytest-mock; extra == 'dev'
20
+ Requires-Dist: python-dotenv; extra == 'dev'
21
+ Provides-Extra: openai
22
+ Requires-Dist: openai; extra == 'openai'
23
+ Description-Content-Type: text/markdown
24
+
25
+ # dataact
26
+
27
+ *(data + ReAct — a ReAct agent harness built for data workflows)*
28
+
29
+ A minimal, transparent, data-native agent harness for Python — built without bash.
30
+
31
+ Most agent frameworks hand the model a shell and call it a day. `dataact` takes a different approach: the model operates entirely through a sandboxed Python interpreter, with data stored in a session cache and exposed as named handles. No bash. No framework magic. Just a loop you can read in an afternoon.
32
+
33
+ Built as an installable reference implementation for engineers who want to understand how a production-style harness actually works. It is not a polished SDK surface; the convenience API exists to remove setup noise while keeping the harness boundaries visible.
34
+
35
+ The design is covered in a three-part series:
36
+
37
+ - [Designing a ReAct Harness for Data Workflows Without Bash](https://maxkskhor.substack.com/p/designing-a-react-harness-for-data)
38
+ - [How a Bash-Free Data Agent Remembers Its Work](https://maxkskhor.substack.com/p/how-a-bash-free-data-agent-remembers)
39
+ - [The Bugs Hidden Inside a Data Agent Harness](https://maxkskhor.substack.com/p/the-engineering-invariants-behind)
40
+
41
+ ---
42
+
43
+ ## Why no bash?
44
+
45
+ Giving an agent shell access is the path of least resistance, but it creates real problems in production: unpredictable side effects, security exposure, and behaviour that's hard to reproduce. `dataact` deliberately constrains the model to Python only — which turns out to be enough for most data workloads and forces cleaner tool design.
46
+
47
+ ---
48
+
49
+ ## Core design decisions
50
+
51
+ Each decision here is intentional. Understanding them is the point.
52
+
53
+ **Handle/snapshot pattern**
54
+ Large objects (DataFrames, arrays, query results) live in a `SessionCache`, not in message history. The model only sees a compact snapshot — shape, columns, a few sample rows. It accesses the data by writing Python against the handle name. This keeps context lean without hiding data from the model.
55
+
56
+ **Prefix-stable system prompt**
57
+ The system prompt never changes between turns. Reminders, state, and nags are appended to the conversation suffix. This is a KV-cache discipline: a stable prefix means the provider can cache it, which reduces latency and cost on long runs.
58
+
59
+ **Progressive connector disclosure**
60
+ Data connectors (databases, APIs, warehouses) are registered but hidden from the tool list until explicitly loaded. A shorter tool list means the model makes better routing decisions. Connectors are only visible when relevant.
61
+
62
+ **Subagent isolation**
63
+ Spawned subagents get a fresh adapter and a fresh cache. State is transferred explicitly via `input_handles`. No implicit shared state. This makes subagent behaviour reproducible and debuggable.
64
+
65
+ **Suffix-only nag reminders**
66
+ The planner escalates reminders at 4 / 8 / 12 turns without progress. These are always appended to the suffix, never inserted into the prefix, so the KV cache is never busted by reminder text.
67
+
68
+ **JSONL turn logging**
69
+ Every turn is logged to a `.jsonl` file from the start. Not bolted on later. Each line is a complete turn record including latency, token counts, and cache hit/miss. Reproducibility is a first-class concern.
70
+
71
+ ---
72
+
73
+ ## Install
74
+
75
+ ```bash
76
+ # requires Python 3.10+ and uv
77
+ uv sync
78
+ ```
79
+
80
+ ## Quick start
81
+
82
+ `Agent` needs a provider adapter. The adapter is the boundary between the
83
+ provider SDK and the harness: it turns Anthropic/OpenAI responses into
84
+ `dataact`'s normalised `Message`, `ToolUseBlock`, and token-count types. It is
85
+ explicit on purpose so the harness is not tied to one model provider, and tests
86
+ can swap in `FakeAdapter` without touching the loop.
87
+
88
+ For Anthropic:
89
+
90
+ ```python
91
+ from dataact import Agent
92
+ from dataact.providers.anthropic import AnthropicAdapter
93
+
94
+ adapter = AnthropicAdapter(model="claude-sonnet-4-6")
95
+ agent = Agent(adapter=adapter, system="You are a data analyst.")
96
+
97
+ result = agent.run("Compute the mean of [1, 2, 3, 4, 5] and print it.")
98
+ print(result)
99
+ ```
100
+
101
+ For OpenAI, install the optional extra and change only the adapter:
102
+
103
+ ```bash
104
+ pip install "dataact[openai]"
105
+ ```
106
+
107
+ ```python
108
+ from dataact.providers.openai import OpenAIAdapter
109
+
110
+ adapter = OpenAIAdapter(model="gpt-4o-mini")
111
+ ```
112
+
113
+ Run the minimal Anthropic example:
114
+
115
+ ```bash
116
+ uv run python examples/quickstart.py
117
+ ```
118
+
119
+ `examples/quickstart.py` requires `ANTHROPIC_API_KEY` when run as a script. Tests import `build_agent()` and drive it with `FakeAdapter`, so the example stays covered without token spend.
120
+
121
+ ## Connector example
122
+
123
+ Connector helpers keep the quick path small while preserving progressive disclosure. Connector tools start hidden; the model must call `load_connectors` before it can use them.
124
+
125
+ ```python
126
+ from dataact import Agent
127
+ from dataact.providers.anthropic import AnthropicAdapter
128
+
129
+ adapter = AnthropicAdapter(model="claude-sonnet-4-6")
130
+ agent = Agent(adapter=adapter, system="You are a data analyst.")
131
+
132
+ market_data = agent.connector(
133
+ "market_data",
134
+ description="Market data tools.",
135
+ )
136
+
137
+
138
+ def fetch_ohlcv(symbol: str) -> list[dict]:
139
+ return [{"symbol": symbol, "close": 101.2}]
140
+
141
+
142
+ market_data.tool(
143
+ fetch_ohlcv,
144
+ description="Fetch OHLCV data for a ticker.",
145
+ )
146
+
147
+ result = agent.run("Load market_data and inspect AAPL.")
148
+ print(result)
149
+ ```
150
+
151
+ ## What `Agent` composes
152
+
153
+ `Agent` is a thin composition layer over the lower-level primitives:
154
+
155
+ - A provider adapter translates model-provider SDK objects into the harness's normalised response types.
156
+ - `Harness` owns the ReAct loop, messages, dispatch, reminders, and JSONL logging.
157
+ - `SessionCache` stores large values as handles plus compact snapshots.
158
+ - `python_interpreter` is the controlled execution surface; there is no bash tool.
159
+ - `list_variables` exposes cache handles without dumping raw payloads.
160
+ - `ConnectorRegistry` keeps connector tools hidden until loaded.
161
+ - `Planner` reminders and subagents are opt-in helpers, not a second runtime.
162
+
163
+ For the explicit wiring, read [examples/advanced_wiring.py](examples/advanced_wiring.py). It deliberately shows the moving parts that `Agent` composes.
164
+
165
+ Run the advanced example - it loads a checked-in FRED unemployment-rate sample, runs analysis, uses subagents and the planner (requires `ANTHROPIC_API_KEY`):
166
+
167
+ ```bash
168
+ uv run python examples/advanced_wiring.py
169
+ ```
170
+
171
+ Run tests:
172
+
173
+ ```bash
174
+ uv run pytest tests/ -v
175
+ uv run pytest tests/ -m live -v # requires provider API keys
176
+ ```
177
+
178
+ ---
179
+
180
+ ## Project structure
181
+
182
+ ```
183
+ dataact/
184
+ loop.py # Harness: the core ReAct loop
185
+ cache.py # SessionCache: handle/snapshot storage
186
+ providers/ # Normalised adapter interface (Anthropic and OpenAI)
187
+ tools/
188
+ interpreter.py # Sandboxed Python executor
189
+ connectors.py # Progressive connector registry
190
+ planner.py # Plan/nag tool
191
+ subagent.py # Isolated subagent spawning
192
+ variables.py # list_variables tool
193
+ types.py # Shared types: Message, ToolSpec, ContentBlock
194
+ logger.py # JSONL turn logging
195
+ observe.py # Latency measurement
196
+ examples/
197
+ quickstart.py # Minimal Agent path
198
+ advanced_wiring.py # Explicit Harness wiring
199
+ data/ # Small public sample data for the advanced demo
200
+ ```
201
+
202
+ ---
203
+
204
+ ## Sandbox disclaimer
205
+
206
+ The Python interpreter uses AST checks and restricted globals to reduce accidental misuse. It is not a container sandbox and should not be treated as safe for untrusted input.
207
+
208
+ ---
209
+
210
+ ## License
211
+
212
+ MIT
@@ -0,0 +1,26 @@
1
+ dataact/__init__.py,sha256=D9fE6RPw5_QvcWm0LuGYMFt7gNwM5Q-I8zPlLOxEikY,637
2
+ dataact/agent.py,sha256=ozA96jxOZRuKCMTXTbsvXAhuN0YiiSj-pzAOYFaATdI,7914
3
+ dataact/cache.py,sha256=ompG180d4M-7IScsCNT4TYsXuUrWJfl7vsSg1qAkYos,10617
4
+ dataact/exceptions.py,sha256=EP1by_xnQ7R02iNv7555qgS01faUK0yukEk7TVaL2ac,501
5
+ dataact/format.py,sha256=vdXxU59U6mSl8V2Ez2f9KW8BZyvhfbf0A5pHxJm5ztg,3038
6
+ dataact/logger.py,sha256=5b2emK8Z1M4TmirVl1gM-jStQLHygNqzjD-hbMAc3bU,2092
7
+ dataact/loop.py,sha256=gLAiFmtDhq2YqLKvr-PNHE5_A23OuOSHiaFz0t-Zohs,5458
8
+ dataact/observe.py,sha256=COUTkw4gYzyIz0wNSMGsM99fHu3QXm5TY0zWS0awLjA,642
9
+ dataact/schema.py,sha256=Hc-NSPR5zS79M3TY6QBgj4yMJrdIMgVvRUmWF8FXm5A,2833
10
+ dataact/serialize.py,sha256=31JUDwmTagBsrmdp8TD44Xlemh0fF8huZbMDsE4e7Lo,3284
11
+ dataact/testing.py,sha256=EzLqAJrlbXcZzvyjJnJEwiHNH6CIdczgw4a0EWS2jUA,2077
12
+ dataact/types.py,sha256=_jQEf4HoOiVFRpKANelRw-z5K5hh4E_bAW5DSg7rQLc,1071
13
+ dataact/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ dataact/providers/anthropic.py,sha256=ReOpc25r6uZRdwOc6qxX2ab8txmi2uvTp_KBtJ3Ftvs,3976
15
+ dataact/providers/base.py,sha256=NvQcx_-5436u8HbzJpyve2ULZlq3ZdfYyxZlpQGOwjk,770
16
+ dataact/providers/openai.py,sha256=wznDOOoM1rwcbWU8AV1t-NQUChwkxpU4QpDGcoNccQg,4336
17
+ dataact/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
+ dataact/tools/connectors.py,sha256=q7teEleaZa1abXG2ZW8XL_Wt5if7OtHLI0LfH8K70Rk,4672
19
+ dataact/tools/interpreter.py,sha256=p1fOa4VdexXbOHDuqQk16_6hk1Ggp4Ds1bwYHZgi0Cg,5683
20
+ dataact/tools/planner.py,sha256=YIJsrNlCpbf0Pc7jAeLKaMuS1miOgCgVffYV8FZuIQc,3611
21
+ dataact/tools/subagent.py,sha256=wHZOwLwDaD9yLkNLvn20DxFrEREc2b40I_3Sb_iIrgI,7745
22
+ dataact/tools/variables.py,sha256=o46VupCROipKE9SSJK9ECEzJYZqEBa-qCK8WLfSD8ik,813
23
+ dataact-0.1.0.dist-info/METADATA,sha256=wbrOkRmnxaEcDAFztwTCGpwgjfExNEwJL-7iSXVO3rk,8076
24
+ dataact-0.1.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
25
+ dataact-0.1.0.dist-info/licenses/LICENSE,sha256=PC5ladx3ylrBqW0euBuh1mBL9D5xXdw03MpFWq0KjmM,1065
26
+ dataact-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.29.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Max Khor
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.