empathy-framework 5.0.3__py3-none-any.whl → 5.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-5.0.3.dist-info → empathy_framework-5.1.0.dist-info}/METADATA +259 -142
- {empathy_framework-5.0.3.dist-info → empathy_framework-5.1.0.dist-info}/RECORD +56 -26
- empathy_framework-5.1.0.dist-info/licenses/LICENSE +201 -0
- empathy_framework-5.1.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- empathy_os/__init__.py +1 -1
- empathy_os/cli/commands/batch.py +5 -5
- empathy_os/cli/commands/routing.py +1 -1
- empathy_os/cli/commands/workflow.py +2 -1
- empathy_os/cli/parsers/cache 2.py +65 -0
- empathy_os/cli_minimal.py +3 -3
- empathy_os/cli_router 2.py +416 -0
- empathy_os/dashboard/__init__.py +1 -2
- empathy_os/dashboard/app 2.py +512 -0
- empathy_os/dashboard/app.py +1 -1
- empathy_os/dashboard/simple_server 2.py +403 -0
- empathy_os/dashboard/standalone_server 2.py +536 -0
- empathy_os/memory/types 2.py +441 -0
- empathy_os/models/__init__.py +19 -0
- empathy_os/models/adaptive_routing 2.py +437 -0
- empathy_os/models/auth_cli.py +444 -0
- empathy_os/models/auth_strategy.py +450 -0
- empathy_os/project_index/scanner_parallel 2.py +291 -0
- empathy_os/telemetry/agent_coordination 2.py +478 -0
- empathy_os/telemetry/agent_coordination.py +3 -3
- empathy_os/telemetry/agent_tracking 2.py +350 -0
- empathy_os/telemetry/agent_tracking.py +1 -2
- empathy_os/telemetry/approval_gates 2.py +563 -0
- empathy_os/telemetry/event_streaming 2.py +405 -0
- empathy_os/telemetry/event_streaming.py +3 -3
- empathy_os/telemetry/feedback_loop 2.py +557 -0
- empathy_os/telemetry/feedback_loop.py +1 -1
- empathy_os/vscode_bridge 2.py +173 -0
- empathy_os/workflows/__init__.py +8 -0
- empathy_os/workflows/autonomous_test_gen.py +569 -0
- empathy_os/workflows/bug_predict.py +45 -0
- empathy_os/workflows/code_review.py +92 -22
- empathy_os/workflows/document_gen.py +594 -62
- empathy_os/workflows/llm_base.py +363 -0
- empathy_os/workflows/perf_audit.py +69 -0
- empathy_os/workflows/progressive/README 2.md +454 -0
- empathy_os/workflows/progressive/__init__ 2.py +92 -0
- empathy_os/workflows/progressive/cli 2.py +242 -0
- empathy_os/workflows/progressive/core 2.py +488 -0
- empathy_os/workflows/progressive/orchestrator 2.py +701 -0
- empathy_os/workflows/progressive/reports 2.py +528 -0
- empathy_os/workflows/progressive/telemetry 2.py +280 -0
- empathy_os/workflows/progressive/test_gen 2.py +514 -0
- empathy_os/workflows/progressive/workflow 2.py +628 -0
- empathy_os/workflows/release_prep.py +54 -0
- empathy_os/workflows/security_audit.py +154 -79
- empathy_os/workflows/test_gen.py +60 -0
- empathy_os/workflows/test_gen_behavioral.py +477 -0
- empathy_os/workflows/test_gen_parallel.py +341 -0
- empathy_framework-5.0.3.dist-info/licenses/LICENSE +0 -139
- {empathy_framework-5.0.3.dist-info → empathy_framework-5.1.0.dist-info}/WHEEL +0 -0
- {empathy_framework-5.0.3.dist-info → empathy_framework-5.1.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-5.0.3.dist-info → empathy_framework-5.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,341 @@
|
|
|
1
|
+
"""Parallel Behavioral Test Generation & Completion Workflow.
|
|
2
|
+
|
|
3
|
+
Uses multi-tier LLM orchestration to generate AND complete tests in parallel.
|
|
4
|
+
This dramatically accelerates achieving 99.9% test coverage.
|
|
5
|
+
|
|
6
|
+
Key Features:
|
|
7
|
+
- Parallel template generation (cheap tier - fast)
|
|
8
|
+
- Parallel test completion (capable tier - quality)
|
|
9
|
+
- Batch processing of 10-50 modules simultaneously
|
|
10
|
+
- Automatic validation and fixing
|
|
11
|
+
|
|
12
|
+
Usage:
|
|
13
|
+
empathy workflow run test-gen-parallel --top 200 --parallel 10
|
|
14
|
+
|
|
15
|
+
Copyright 2026 Smart-AI-Memory
|
|
16
|
+
Licensed under Apache 2.0
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import ast
|
|
20
|
+
import asyncio
|
|
21
|
+
import json
|
|
22
|
+
from dataclasses import dataclass
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
from typing import Any
|
|
25
|
+
|
|
26
|
+
from ..workflows.base import BaseWorkflow, ModelTier, WorkflowResult, WorkflowStage
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class TestGenerationTask:
|
|
31
|
+
"""A test generation task."""
|
|
32
|
+
|
|
33
|
+
module_path: str
|
|
34
|
+
coverage: float
|
|
35
|
+
output_path: str
|
|
36
|
+
status: str = "pending" # pending, generated, completed, validated
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ParallelTestGenerationWorkflow(BaseWorkflow):
|
|
40
|
+
"""Generate and complete behavioral tests in parallel using multi-tier LLMs."""
|
|
41
|
+
|
|
42
|
+
def __init__(self):
|
|
43
|
+
super().__init__(
|
|
44
|
+
name="parallel-test-generation",
|
|
45
|
+
description="Generate behavioral tests in parallel with AI completion",
|
|
46
|
+
stages={
|
|
47
|
+
"discover": WorkflowStage(
|
|
48
|
+
name="discover",
|
|
49
|
+
description="Find modules needing tests",
|
|
50
|
+
tier_hint=ModelTier.CHEAP,
|
|
51
|
+
system_prompt="Analyze coverage and prioritize modules for testing",
|
|
52
|
+
task_type="analysis",
|
|
53
|
+
),
|
|
54
|
+
"generate_templates": WorkflowStage(
|
|
55
|
+
name="generate_templates",
|
|
56
|
+
description="Generate test templates in parallel",
|
|
57
|
+
tier_hint=ModelTier.CHEAP,
|
|
58
|
+
system_prompt="Generate behavioral test template structure",
|
|
59
|
+
task_type="code_generation",
|
|
60
|
+
),
|
|
61
|
+
"complete_tests": WorkflowStage(
|
|
62
|
+
name="complete_tests",
|
|
63
|
+
description="Complete test implementation with AI",
|
|
64
|
+
tier_hint=ModelTier.CAPABLE,
|
|
65
|
+
system_prompt="""Complete the behavioral test implementation.
|
|
66
|
+
|
|
67
|
+
You are given a test template with TODO markers. Your task:
|
|
68
|
+
|
|
69
|
+
1. Analyze the module being tested
|
|
70
|
+
2. Create realistic test data
|
|
71
|
+
3. Add proper assertions
|
|
72
|
+
4. Test both success AND error paths
|
|
73
|
+
5. Use mocks/patches where needed
|
|
74
|
+
6. Follow pytest best practices
|
|
75
|
+
|
|
76
|
+
Generate complete, runnable tests that will increase coverage.""",
|
|
77
|
+
task_type="code_generation",
|
|
78
|
+
),
|
|
79
|
+
"validate": WorkflowStage(
|
|
80
|
+
name="validate",
|
|
81
|
+
description="Validate generated tests run correctly",
|
|
82
|
+
tier_hint=ModelTier.CHEAP,
|
|
83
|
+
system_prompt="Check if tests are valid Python and follow pytest conventions",
|
|
84
|
+
task_type="validation",
|
|
85
|
+
),
|
|
86
|
+
},
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
def discover_low_coverage_modules(self, top_n: int = 200) -> list[tuple[str, float]]:
|
|
90
|
+
"""Find modules with lowest coverage."""
|
|
91
|
+
try:
|
|
92
|
+
# Get coverage data
|
|
93
|
+
import subprocess
|
|
94
|
+
|
|
95
|
+
subprocess.run(
|
|
96
|
+
["coverage", "json", "-o", "/tmp/coverage_batch.json"],
|
|
97
|
+
capture_output=True,
|
|
98
|
+
check=True,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
with open("/tmp/coverage_batch.json") as f:
|
|
102
|
+
data = json.load(f)
|
|
103
|
+
|
|
104
|
+
coverage_by_file = []
|
|
105
|
+
for file_path, info in data.get("files", {}).items():
|
|
106
|
+
if file_path.startswith("src/"):
|
|
107
|
+
coverage_pct = info["summary"]["percent_covered"]
|
|
108
|
+
total_lines = info["summary"]["num_statements"]
|
|
109
|
+
|
|
110
|
+
# Skip very small files
|
|
111
|
+
if total_lines > 30:
|
|
112
|
+
coverage_by_file.append((file_path, coverage_pct, total_lines))
|
|
113
|
+
|
|
114
|
+
# Sort by coverage (lowest first), then by size (largest first)
|
|
115
|
+
sorted_modules = sorted(coverage_by_file, key=lambda x: (x[1], -x[2]))
|
|
116
|
+
|
|
117
|
+
return [(path, cov) for path, cov, _ in sorted_modules[:top_n]]
|
|
118
|
+
|
|
119
|
+
except Exception as e:
|
|
120
|
+
self.logger.warning(f"Could not get coverage data: {e}")
|
|
121
|
+
return []
|
|
122
|
+
|
|
123
|
+
def analyze_module_structure(self, file_path: str) -> dict[str, Any]:
|
|
124
|
+
"""Analyze module to extract structure (fast, synchronous)."""
|
|
125
|
+
try:
|
|
126
|
+
source = Path(file_path).read_text()
|
|
127
|
+
tree = ast.parse(source)
|
|
128
|
+
|
|
129
|
+
classes = []
|
|
130
|
+
functions = []
|
|
131
|
+
|
|
132
|
+
for node in ast.walk(tree):
|
|
133
|
+
if isinstance(node, ast.ClassDef):
|
|
134
|
+
methods = [
|
|
135
|
+
n.name
|
|
136
|
+
for n in node.body
|
|
137
|
+
if isinstance(n, (ast.FunctionDef, ast.AsyncFunctionDef))
|
|
138
|
+
]
|
|
139
|
+
classes.append(
|
|
140
|
+
{"name": node.name, "methods": methods, "line": node.lineno}
|
|
141
|
+
)
|
|
142
|
+
elif isinstance(node, ast.FunctionDef) and node.col_offset == 0:
|
|
143
|
+
functions.append({"name": node.name, "line": node.lineno})
|
|
144
|
+
|
|
145
|
+
return {"file": file_path, "classes": classes, "functions": functions}
|
|
146
|
+
|
|
147
|
+
except Exception as e:
|
|
148
|
+
return {"file": file_path, "error": str(e)}
|
|
149
|
+
|
|
150
|
+
async def generate_test_template_with_ai(
|
|
151
|
+
self, module_path: str, structure: dict[str, Any]
|
|
152
|
+
) -> str:
|
|
153
|
+
"""Generate test template using cheap tier AI."""
|
|
154
|
+
prompt = f"""Generate a behavioral test template for this module:
|
|
155
|
+
|
|
156
|
+
File: {module_path}
|
|
157
|
+
Structure: {json.dumps(structure, indent=2)}
|
|
158
|
+
|
|
159
|
+
Generate a pytest test file with:
|
|
160
|
+
1. Proper imports
|
|
161
|
+
2. Test classes for each class in the module
|
|
162
|
+
3. Test methods for key functionality
|
|
163
|
+
4. TODO markers where test logic is needed
|
|
164
|
+
5. Proper async/await for async methods
|
|
165
|
+
|
|
166
|
+
Output ONLY the Python code, no explanations."""
|
|
167
|
+
|
|
168
|
+
result = await self._call_llm(
|
|
169
|
+
tier=ModelTier.CHEAP,
|
|
170
|
+
user_prompt=prompt,
|
|
171
|
+
context={"module": module_path, "structure": structure},
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
return result.get("content", "")
|
|
175
|
+
|
|
176
|
+
async def complete_test_with_ai(self, template: str, module_path: str) -> str:
|
|
177
|
+
"""Complete test implementation using capable tier AI."""
|
|
178
|
+
source_code = Path(module_path).read_text()
|
|
179
|
+
|
|
180
|
+
prompt = f"""Complete this behavioral test implementation.
|
|
181
|
+
|
|
182
|
+
MODULE SOURCE CODE:
|
|
183
|
+
```python
|
|
184
|
+
{source_code[:5000]} # First 5000 chars
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
TEST TEMPLATE:
|
|
188
|
+
```python
|
|
189
|
+
{template}
|
|
190
|
+
```
|
|
191
|
+
|
|
192
|
+
Complete ALL TODOs with:
|
|
193
|
+
1. Realistic test data
|
|
194
|
+
2. Proper mocking where needed
|
|
195
|
+
3. Comprehensive assertions
|
|
196
|
+
4. Both success and error cases
|
|
197
|
+
|
|
198
|
+
Output the COMPLETE test file, no TODOs remaining."""
|
|
199
|
+
|
|
200
|
+
result = await self._call_llm(
|
|
201
|
+
tier=ModelTier.CAPABLE,
|
|
202
|
+
user_prompt=prompt,
|
|
203
|
+
context={"template": template, "module": module_path},
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
return result.get("content", "")
|
|
207
|
+
|
|
208
|
+
async def process_module_batch(
|
|
209
|
+
self, modules: list[tuple[str, float]], output_dir: Path, batch_size: int = 10
|
|
210
|
+
) -> list[TestGenerationTask]:
|
|
211
|
+
"""Process modules in parallel batches."""
|
|
212
|
+
tasks = []
|
|
213
|
+
|
|
214
|
+
# Create tasks
|
|
215
|
+
for module_path, coverage in modules:
|
|
216
|
+
test_filename = f"test_{Path(module_path).stem}_behavioral.py"
|
|
217
|
+
output_path = output_dir / test_filename
|
|
218
|
+
|
|
219
|
+
tasks.append(
|
|
220
|
+
TestGenerationTask(
|
|
221
|
+
module_path=module_path,
|
|
222
|
+
coverage=coverage,
|
|
223
|
+
output_path=str(output_path),
|
|
224
|
+
)
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
# Process in batches
|
|
228
|
+
for i in range(0, len(tasks), batch_size):
|
|
229
|
+
batch = tasks[i : i + batch_size]
|
|
230
|
+
|
|
231
|
+
# Generate templates in parallel
|
|
232
|
+
template_coros = []
|
|
233
|
+
for task in batch:
|
|
234
|
+
structure = self.analyze_module_structure(task.module_path)
|
|
235
|
+
coro = self.generate_test_template_with_ai(task.module_path, structure)
|
|
236
|
+
template_coros.append(coro)
|
|
237
|
+
|
|
238
|
+
templates = await asyncio.gather(*template_coros, return_exceptions=True)
|
|
239
|
+
|
|
240
|
+
# Complete tests in parallel
|
|
241
|
+
completion_coros = []
|
|
242
|
+
for task, template in zip(batch, templates, strict=False):
|
|
243
|
+
if isinstance(template, Exception):
|
|
244
|
+
task.status = "error"
|
|
245
|
+
continue
|
|
246
|
+
|
|
247
|
+
coro = self.complete_test_with_ai(template, task.module_path)
|
|
248
|
+
completion_coros.append(coro)
|
|
249
|
+
|
|
250
|
+
completed = await asyncio.gather(*completion_coros, return_exceptions=True)
|
|
251
|
+
|
|
252
|
+
# Save results
|
|
253
|
+
for task, completed_test in zip(batch, completed, strict=False):
|
|
254
|
+
if isinstance(completed_test, Exception):
|
|
255
|
+
task.status = "error"
|
|
256
|
+
continue
|
|
257
|
+
|
|
258
|
+
# Extract code from markdown if needed
|
|
259
|
+
code = self._extract_code(completed_test)
|
|
260
|
+
|
|
261
|
+
# Save to file
|
|
262
|
+
Path(task.output_path).write_text(code)
|
|
263
|
+
task.status = "completed"
|
|
264
|
+
|
|
265
|
+
self.logger.info(f"✅ Generated: {task.output_path}")
|
|
266
|
+
|
|
267
|
+
return tasks
|
|
268
|
+
|
|
269
|
+
def _extract_code(self, content: str) -> str:
|
|
270
|
+
"""Extract Python code from markdown code blocks if present."""
|
|
271
|
+
if "```python" in content:
|
|
272
|
+
parts = content.split("```python")
|
|
273
|
+
if len(parts) > 1:
|
|
274
|
+
code = parts[1].split("```")[0]
|
|
275
|
+
return code.strip()
|
|
276
|
+
|
|
277
|
+
# If no markdown, assume it's already code
|
|
278
|
+
return content
|
|
279
|
+
|
|
280
|
+
async def execute(
|
|
281
|
+
self, top: int = 200, batch_size: int = 10, output_dir: str = "tests/behavioral/generated", **kwargs
|
|
282
|
+
) -> WorkflowResult:
|
|
283
|
+
"""Execute parallel test generation workflow.
|
|
284
|
+
|
|
285
|
+
Args:
|
|
286
|
+
top: Number of modules to process
|
|
287
|
+
batch_size: Number of modules to process in parallel (10-50 recommended)
|
|
288
|
+
output_dir: Where to save generated tests
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
WorkflowResult with generated file paths and statistics
|
|
292
|
+
"""
|
|
293
|
+
output_path = Path(output_dir)
|
|
294
|
+
output_path.mkdir(exist_ok=True, parents=True)
|
|
295
|
+
|
|
296
|
+
# Stage 1: Discover modules
|
|
297
|
+
self.logger.info(f"🔍 Discovering top {top} modules with lowest coverage...")
|
|
298
|
+
modules = self.discover_low_coverage_modules(top_n=top)
|
|
299
|
+
|
|
300
|
+
if not modules:
|
|
301
|
+
return WorkflowResult(
|
|
302
|
+
workflow_name=self.name,
|
|
303
|
+
stages_executed=["discover"],
|
|
304
|
+
final_output={"error": "No coverage data found. Run pytest with coverage first."},
|
|
305
|
+
cost_report=self._generate_cost_report(),
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
self.logger.info(f"📋 Found {len(modules)} modules to process")
|
|
309
|
+
|
|
310
|
+
# Stage 2 & 3: Generate and complete in parallel batches
|
|
311
|
+
self.logger.info(f"⚡ Processing in batches of {batch_size}...")
|
|
312
|
+
tasks = await self.process_module_batch(modules, output_path, batch_size=batch_size)
|
|
313
|
+
|
|
314
|
+
# Statistics
|
|
315
|
+
completed = [t for t in tasks if t.status == "completed"]
|
|
316
|
+
errors = [t for t in tasks if t.status == "error"]
|
|
317
|
+
|
|
318
|
+
result_data = {
|
|
319
|
+
"total_modules": len(modules),
|
|
320
|
+
"completed": len(completed),
|
|
321
|
+
"errors": len(errors),
|
|
322
|
+
"output_dir": str(output_path),
|
|
323
|
+
"generated_files": [t.output_path for t in completed],
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
self.logger.info(f"\n{'='*80}")
|
|
327
|
+
self.logger.info(f"✅ COMPLETED: {len(completed)} test files")
|
|
328
|
+
self.logger.info(f"❌ ERRORS: {len(errors)} modules")
|
|
329
|
+
self.logger.info(f"📁 Location: {output_path}")
|
|
330
|
+
self.logger.info(f"{'='*80}\n")
|
|
331
|
+
|
|
332
|
+
return WorkflowResult(
|
|
333
|
+
workflow_name=self.name,
|
|
334
|
+
stages_executed=["discover", "generate_templates", "complete_tests"],
|
|
335
|
+
final_output=result_data,
|
|
336
|
+
cost_report=self._generate_cost_report(),
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
# Export
|
|
341
|
+
__all__ = ["ParallelTestGenerationWorkflow"]
|
|
@@ -1,139 +0,0 @@
|
|
|
1
|
-
# Fair Source License, version 0.9
|
|
2
|
-
|
|
3
|
-
**Copyright © 2025 Deep Study AI, LLC**
|
|
4
|
-
|
|
5
|
-
## Grant of Rights
|
|
6
|
-
|
|
7
|
-
**Licensor:** Deep Study AI, LLC
|
|
8
|
-
**Licensed Work:** Empathy
|
|
9
|
-
**Change Date:** January 1, 2029 (4 years from first release)
|
|
10
|
-
**Change License:** Apache License 2.0
|
|
11
|
-
|
|
12
|
-
---
|
|
13
|
-
|
|
14
|
-
## Terms
|
|
15
|
-
|
|
16
|
-
### Grant of Use
|
|
17
|
-
|
|
18
|
-
Subject to the conditions below, Licensor grants you a non-exclusive, worldwide, royalty-free license to:
|
|
19
|
-
|
|
20
|
-
- Use the Licensed Work
|
|
21
|
-
- Modify the Licensed Work
|
|
22
|
-
- Create derivative works
|
|
23
|
-
- Distribute copies (subject to restrictions)
|
|
24
|
-
|
|
25
|
-
### Usage Limits - Free Tier
|
|
26
|
-
|
|
27
|
-
You may use the Licensed Work **free of charge** if you meet ANY of these conditions:
|
|
28
|
-
|
|
29
|
-
1. **Educational Use:** You are a student or educator using the Licensed Work for educational purposes
|
|
30
|
-
2. **Small Business:** Your organization has **5 or fewer total employees**
|
|
31
|
-
3. **Personal/Research:** You are using the Licensed Work for personal projects or academic research
|
|
32
|
-
4. **Evaluation:** You are evaluating the Licensed Work for up to 30 days
|
|
33
|
-
|
|
34
|
-
### Usage Limits - Commercial License Required
|
|
35
|
-
|
|
36
|
-
A **Commercial License is REQUIRED** if:
|
|
37
|
-
|
|
38
|
-
1. Your organization has **6 or more employees**, AND
|
|
39
|
-
2. You are using the Licensed Work in a production environment, OR
|
|
40
|
-
3. You are using the Licensed Work to provide services to third parties
|
|
41
|
-
|
|
42
|
-
**Commercial License:** $99 USD per developer per year
|
|
43
|
-
|
|
44
|
-
- "Developer" means any employee, contractor, or agent who uses, modifies, or deploys the Licensed Work
|
|
45
|
-
- One license covers all environments (development, staging, production, CI/CD)
|
|
46
|
-
- License includes updates and support
|
|
47
|
-
- Purchase at: https://smartaimemory.com/empathy-framework/pricing
|
|
48
|
-
|
|
49
|
-
### Restrictions
|
|
50
|
-
|
|
51
|
-
You may NOT:
|
|
52
|
-
|
|
53
|
-
1. **Remove or modify** licensing, copyright notices, or attribution
|
|
54
|
-
2. **Circumvent** the usage limits or commercial license requirements
|
|
55
|
-
3. **Offer as a managed service** without a separate reseller agreement
|
|
56
|
-
4. **Sublicense, sell, or rent** the Licensed Work to third parties
|
|
57
|
-
5. **Use the Licensed Work** in violation of applicable laws
|
|
58
|
-
|
|
59
|
-
### Source Code Availability
|
|
60
|
-
|
|
61
|
-
The source code for the Licensed Work is available at:
|
|
62
|
-
https://github.com/Smart-AI-Memory/empathy
|
|
63
|
-
|
|
64
|
-
You may view, inspect, and audit the source code for:
|
|
65
|
-
- Security review
|
|
66
|
-
- Compliance verification
|
|
67
|
-
- Understanding implementation
|
|
68
|
-
- Creating derivative works (subject to this license)
|
|
69
|
-
|
|
70
|
-
### Attribution
|
|
71
|
-
|
|
72
|
-
If you distribute the Licensed Work or derivative works, you must:
|
|
73
|
-
|
|
74
|
-
1. Include this license file
|
|
75
|
-
2. Provide attribution to "Deep Study AI, LLC - Empathy"
|
|
76
|
-
3. Include a link to https://github.com/Smart-AI-Memory/empathy
|
|
77
|
-
|
|
78
|
-
### Warranty Disclaimer
|
|
79
|
-
|
|
80
|
-
THE LICENSED WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT.
|
|
81
|
-
|
|
82
|
-
### Liability Limitation
|
|
83
|
-
|
|
84
|
-
IN NO EVENT SHALL LICENSOR BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE LICENSED WORK.
|
|
85
|
-
|
|
86
|
-
### Change Date Conversion
|
|
87
|
-
|
|
88
|
-
On the Change Date (January 1, 2029), this license automatically converts to the Change License (Apache License 2.0), and all restrictions in this Fair Source License no longer apply.
|
|
89
|
-
|
|
90
|
-
**Rationale:** After 4 years, the Licensed Work becomes fully open source, allowing maximum community benefit while protecting Licensor's commercial interests during the critical growth period.
|
|
91
|
-
|
|
92
|
-
### Verification Rights
|
|
93
|
-
|
|
94
|
-
Licensor reserves the right to:
|
|
95
|
-
|
|
96
|
-
1. Request verification of compliance with usage limits
|
|
97
|
-
2. Audit use of the Licensed Work with reasonable notice
|
|
98
|
-
3. Terminate licenses for violations after 30-day cure period
|
|
99
|
-
|
|
100
|
-
### Commercial License Purchase
|
|
101
|
-
|
|
102
|
-
To purchase a Commercial License:
|
|
103
|
-
|
|
104
|
-
1. Visit: https://smartaimemory.com/empathy-framework/pricing
|
|
105
|
-
2. Email: admin@smartaimemory.com
|
|
106
|
-
3. Complete order form and payment
|
|
107
|
-
4. Receive license key and invoice
|
|
108
|
-
|
|
109
|
-
Volume discounts available for teams of 20+ developers.
|
|
110
|
-
|
|
111
|
-
### Definitions
|
|
112
|
-
|
|
113
|
-
- **Employee:** Any W-2 employee, 1099 contractor working >20 hours/week, or intern
|
|
114
|
-
- **Production Environment:** Any environment serving end users or customers
|
|
115
|
-
- **Developer:** Any person who uses, modifies, or deploys the Licensed Work
|
|
116
|
-
- **Organization:** The legal entity employing you, or yourself if self-employed
|
|
117
|
-
|
|
118
|
-
### Questions?
|
|
119
|
-
|
|
120
|
-
For licensing questions, contact: licensing@smartaimemory.com
|
|
121
|
-
|
|
122
|
-
---
|
|
123
|
-
|
|
124
|
-
## Why Fair Source?
|
|
125
|
-
|
|
126
|
-
This license balances:
|
|
127
|
-
|
|
128
|
-
✅ **Free for small teams** - Students, educators, and small businesses (≤5 employees) use free forever
|
|
129
|
-
✅ **Source code visibility** - Review code for security, compliance, learning
|
|
130
|
-
✅ **Commercial sustainability** - Larger organizations pay to fund development
|
|
131
|
-
✅ **Future open source** - Automatically becomes Apache 2.0 in 4 years
|
|
132
|
-
|
|
133
|
-
We believe software should be inspectable and accessible while ensuring sustainable development.
|
|
134
|
-
|
|
135
|
-
---
|
|
136
|
-
|
|
137
|
-
**Version:** 0.9
|
|
138
|
-
**Last Updated:** November 7, 2025
|
|
139
|
-
**Effective Date:** January 1, 2025
|
|
File without changes
|
|
File without changes
|
|
File without changes
|