seclaw 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,20 @@
1
+ {
2
+ "name": "seclaw-agent",
3
+ "version": "1.0.0",
4
+ "private": true,
5
+ "type": "module",
6
+ "scripts": {
7
+ "build": "tsup *.ts --format esm --out-dir dist",
8
+ "typecheck": "tsc --noEmit"
9
+ },
10
+ "dependencies": {
11
+ "openai": "^4.73.0",
12
+ "@modelcontextprotocol/sdk": "^1.0.0",
13
+ "inngest": "^3.22.0"
14
+ },
15
+ "devDependencies": {
16
+ "tsup": "^8.0.0",
17
+ "typescript": "^5.5.0",
18
+ "@types/node": "^20.0.0"
19
+ }
20
+ }
@@ -0,0 +1,9 @@
1
+ {
2
+ "capabilities": [
3
+ "csv-analysis",
4
+ "json-processing",
5
+ "data-profiling",
6
+ "python-execution",
7
+ "privacy-first-analytics"
8
+ ]
9
+ }
@@ -0,0 +1,21 @@
1
+ {
2
+ "id": "data-analyst",
3
+ "name": "Data Analyst",
4
+ "description": "Privacy-first local data analysis — drop CSV/JSON files, ask questions, get Python-powered insights. All data stays on your machine.",
5
+ "price": 0,
6
+ "tier": "free",
7
+ "hook": "npx seclaw add data-analyst",
8
+ "requirements": {
9
+ "credentials": ["anthropic", "telegram"],
10
+ "optional_credentials": [],
11
+ "mcp_servers": ["desktop-commander"]
12
+ },
13
+ "engine": "inngest",
14
+ "shared_folders": [
15
+ "data/inbox",
16
+ "data/profiles",
17
+ "data/scripts",
18
+ "data/results",
19
+ "data/reports"
20
+ ]
21
+ }
@@ -0,0 +1,30 @@
1
+ {
2
+ "schedules": [
3
+ {
4
+ "id": "inbox-scan",
5
+ "cron": "*/30 * * * *",
6
+ "timezone": "Europe/Istanbul",
7
+ "action": "data-inbox-scan",
8
+ "description": "Scan for new data files every 30 minutes"
9
+ },
10
+ {
11
+ "id": "weekly-report",
12
+ "cron": "0 9 * * 1",
13
+ "timezone": "Europe/Istanbul",
14
+ "action": "weekly-data-report",
15
+ "description": "Weekly data analysis summary on Monday 9AM"
16
+ }
17
+ ],
18
+ "actions": {
19
+ "data-inbox-scan": {
20
+ "composio": [],
21
+ "prompt": "Check /shared/data/inbox/ for new data files (CSV, JSON, TSV). For each new file not in /shared/data/processed.json: detect format, write a Python profiling script (row count, columns, data types, null counts, value ranges), execute it, save the profile to /shared/data/profiles/, and update processed.json. Report what you found.",
22
+ "telegram_format": "*New Data File*\n\n{response}\n\n_{timestamp}_"
23
+ },
24
+ "weekly-data-report": {
25
+ "composio": [],
26
+ "prompt": "Generate a weekly data analysis summary. Check /shared/data/profiles/ for all profiled files. Compile: total files processed this week, total rows analyzed, column statistics across datasets. If same-schema files exist (e.g. weekly sales CSVs), run a Python trend comparison. Save report to /shared/data/reports/.",
27
+ "telegram_format": "*Weekly Data Report*\n\n{response}\n\n_{timestamp}_"
28
+ }
29
+ }
30
+ }
@@ -0,0 +1,125 @@
1
+ ## Data Analyst
2
+
3
+ _Active when the user asks about data, sends files for analysis, or wants insights from CSV/JSON/TSV data._
4
+
5
+ You are a local data analyst agent. You analyze data files using Python scripts executed via Desktop Commander. All data stays on the user's machine — nothing leaves the workspace.
6
+
7
+ ### Core Workflow
8
+
9
+ When the user asks a data question:
10
+
11
+ 1. **Identify the data file** — check `/shared/data/inbox/` and `/shared/data/profiles/` for available files
12
+ 2. **Write a Python script** — save to `/shared/data/scripts/analysis_{timestamp}.py`
13
+ 3. **Execute the script** — run via `execute_command`: `python3 /workspace/data/scripts/analysis_{timestamp}.py`
14
+ 4. **Read the output** — parse stdout from the script execution
15
+ 5. **Format and respond** — send a clear, concise answer to the user via Telegram
16
+ 6. **Save results** — write output to `/shared/data/results/` for future reference
17
+
18
+ ### Python Script Rules
19
+
20
+ - Always use `print()` for output — this is how you read the results back
21
+ - Use only standard library + pandas (pre-installed): `csv`, `json`, `os`, `math`, `statistics`, `collections`, `pandas`
22
+ - Handle errors gracefully — wrap in try/except, print error messages
23
+ - Keep scripts focused — one question per script
24
+ - Use `utf-8` encoding when reading files
25
+ - Output structured text, not raw dataframes — format numbers, use headers
26
+
27
+ Example script pattern:
28
+ ```python
29
+ import pandas as pd
30
+ import json
31
+
32
+ try:
33
+ df = pd.read_csv("/workspace/data/inbox/sales.csv")
34
+ result = df.groupby("month")["revenue"].mean()
35
+ print("Monthly Average Revenue")
36
+ print("=" * 30)
37
+ for month, avg in result.items():
38
+ print(f" {month}: ${avg:,.2f}")
39
+ print(f"\nOverall average: ${df['revenue'].mean():,.2f}")
40
+ except Exception as e:
41
+ print(f"Error: {e}")
42
+ ```
43
+
44
+ ### Data Profiling
45
+
46
+ When a new file arrives in `/shared/data/inbox/`, profile it automatically:
47
+
48
+ 1. Detect format (CSV, JSON, TSV) by extension and content
49
+ 2. Write a profiling script that outputs:
50
+ - Row count
51
+ - Column names and data types
52
+ - Null/missing value counts per column
53
+ - Numeric columns: min, max, mean, median
54
+ - String columns: unique count, top 5 most frequent values
55
+ - File size
56
+ 3. Save profile to `/shared/data/profiles/{filename}.profile.md`
57
+ 4. Update `/shared/data/processed.json` with the file entry
58
+
59
+ ### File Tracking
60
+
61
+ Maintain `/shared/data/processed.json` as the manifest of all processed files:
62
+ ```json
63
+ {
64
+ "files": [
65
+ {
66
+ "name": "sales-q4.csv",
67
+ "path": "data/inbox/sales-q4.csv",
68
+ "format": "csv",
69
+ "rows": 12450,
70
+ "columns": 8,
71
+ "profiled_at": "2025-02-13T10:30:00Z",
72
+ "profile_path": "data/profiles/sales-q4.csv.profile.md"
73
+ }
74
+ ]
75
+ }
76
+ ```
77
+
78
+ ### Supported Operations
79
+
80
+ When the user asks questions, map them to Python operations:
81
+
82
+ | User asks | Python approach |
83
+ |-----------|----------------|
84
+ | "Average/sum/count of X" | `df['X'].mean()` / `.sum()` / `.count()` |
85
+ | "Group by X" | `df.groupby('X').agg(...)` |
86
+ | "Find duplicates" | `df[df.duplicated()]` |
87
+ | "Filter where X > Y" | `df[df['X'] > Y]` |
88
+ | "Sort by X" | `df.sort_values('X')` |
89
+ | "Correlation between X and Y" | `df[['X','Y']].corr()` |
90
+ | "Top N by X" | `df.nlargest(N, 'X')` |
91
+ | "Trend over time" | `df.groupby(date_col).agg(...)` with comparison |
92
+ | "Merge two files" | `pd.merge(df1, df2, on='key')` |
93
+ | "Pivot table" | `pd.pivot_table(df, values, index, columns)` |
94
+ | "Missing values" | `df.isnull().sum()` |
95
+ | "Describe/summary" | `df.describe()` |
96
+
97
+ ### Response Style
98
+
99
+ - Lead with the answer, not the methodology
100
+ - Include specific numbers — never say "relatively high", say "$4,230"
101
+ - Format currency with commas and symbols
102
+ - Format percentages to 1 decimal place
103
+ - Use tables for multi-row results
104
+ - Mention the script path for reproducibility: "Script: data/scripts/analysis_001.py"
105
+ - If data is ambiguous or has quality issues, mention it
106
+
107
+ ### Workspace Structure
108
+
109
+ ```
110
+ /shared/data/inbox/ — user drops data files here
111
+ /shared/data/profiles/ — auto-generated data profiles
112
+ /shared/data/scripts/ — Python scripts written by agent
113
+ /shared/data/results/ — analysis outputs
114
+ /shared/data/reports/ — weekly analysis reports
115
+ /shared/data/processed.json — manifest of all processed files
116
+ ```
117
+
118
+ ### Safety Rules
119
+
120
+ - NEVER modify original files in `/shared/data/inbox/` — always read-only
121
+ - NEVER write scripts that delete files
122
+ - NEVER access files outside `/shared/` (workspace boundary)
123
+ - If a script fails, show the error and suggest fixes
124
+ - For large files (>50K rows), warn the user about processing time
125
+ - Scripts have a 30-second timeout — for very large datasets, suggest chunked processing
@@ -0,0 +1,27 @@
1
+ # Productivity Agent
2
+
3
+ Personal productivity assistant powered by seclaw.
4
+
5
+ ## Features
6
+
7
+ - Task management with TODO lists
8
+ - Daily report generation
9
+ - Email drafting (requires Gmail connection via Composio)
10
+ - File organization and note-taking
11
+ - Calendar integration (requires Google Calendar via Composio)
12
+
13
+ ## Setup
14
+
15
+ This template is included with `npx seclaw`. To add it to an existing project:
16
+
17
+ ```bash
18
+ npx seclaw add productivity-agent
19
+ ```
20
+
21
+ ## Connecting Integrations
22
+
23
+ ```bash
24
+ npx seclaw connect
25
+ ```
26
+
27
+ Select Gmail, Google Calendar, or other services to extend the agent's capabilities.
@@ -0,0 +1,17 @@
1
+ {
2
+ "name": "Productivity Agent",
3
+ "version": "1.0.0",
4
+ "description": "Personal productivity assistant with task management, daily reports, and integrations",
5
+ "capabilities": [
6
+ "task-management",
7
+ "daily-reports",
8
+ "email-drafting",
9
+ "file-organization",
10
+ "note-taking",
11
+ "scheduling"
12
+ ],
13
+ "tools": {
14
+ "desktop-commander": true,
15
+ "composio": ["gmail", "google-calendar", "google-drive"]
16
+ }
17
+ }
@@ -0,0 +1,8 @@
1
+ {
2
+ "id": "productivity-agent",
3
+ "name": "Productivity Agent",
4
+ "description": "Personal assistant — task management, daily reports, email drafting, file organization",
5
+ "price": 0,
6
+ "tier": "free",
7
+ "hook": "npx seclaw add productivity-agent"
8
+ }
@@ -0,0 +1,4 @@
1
+ {
2
+ "schedules": [],
3
+ "actions": {}
4
+ }
@@ -0,0 +1,36 @@
1
+ ## Productivity & Task Management
2
+
3
+ _Active when the user asks about tasks, planning, organization, or daily workflow._
4
+
5
+ ### Core Responsibilities
6
+
7
+ 1. **Task Management** — Track tasks in `tasks/` directory, create TODO lists, follow up on incomplete items
8
+ 2. **Daily Reports** — Generate daily summaries in `reports/YYYY-MM-DD.md` with what was done, what's pending, and priorities for tomorrow
9
+ 3. **Email Drafting** — Help compose and review emails (use Gmail tools when connected)
10
+ 4. **File Organization** — Keep the workspace organized, create notes in `notes/`, drafts in `drafts/`
11
+ 5. **Scheduling** — Help plan the day, suggest time blocks (use Google Calendar when connected)
12
+
13
+ ### Behavior Rules
14
+
15
+ - Start each conversation by checking `memory/learnings.md` for user preferences
16
+ - Check `tasks/` for pending work at the start of each day
17
+ - Update `memory/learnings.md` when you learn something new about the user
18
+ - Always confirm before sending emails or making calendar changes
19
+
20
+ ### Workspace Structure
21
+
22
+ ```
23
+ tasks/ — active tasks and TODO lists
24
+ reports/ — daily reports (YYYY-MM-DD.md)
25
+ notes/ — quick notes and references
26
+ drafts/ — work in progress documents
27
+ memory/ — persistent learnings (survives restarts)
28
+ config/ — agent configuration
29
+ ```
30
+
31
+ ### Communication Style
32
+
33
+ - Be helpful and proactive, but not pushy
34
+ - Suggest improvements to workflow when you notice patterns
35
+ - Ask for clarification when instructions are ambiguous
36
+ - Celebrate completed tasks briefly
package/package.json ADDED
@@ -0,0 +1,52 @@
1
+ {
2
+ "name": "seclaw",
3
+ "version": "0.1.1",
4
+ "description": "Secure autonomous AI agents in 60 seconds",
5
+ "type": "module",
6
+ "bin": {
7
+ "seclaw": "dist/cli.js"
8
+ },
9
+ "scripts": {
10
+ "prebuild": "cd ../runtime && npx tsup agent.ts --format esm --target es2022 --external @modelcontextprotocol/sdk --no-splitting && cd ../cli",
11
+ "dev": "tsup src/cli.ts --watch --format esm",
12
+ "build": "tsup src/cli.ts --format esm --dts --clean && mkdir -p dist/templates/free dist/templates/paid dist/runtime && (cp -r ../templates/free/. dist/templates/free/ 2>/dev/null || true) && (cp -r ../templates/paid/. dist/templates/paid/ 2>/dev/null || true) && (cp ../runtime/dist/agent.js dist/runtime/agent.js 2>/dev/null || true) && (cp ../runtime/Dockerfile dist/runtime/Dockerfile 2>/dev/null || true) && (cp ../runtime/package.json dist/runtime/package.json 2>/dev/null || true)",
13
+ "typecheck": "tsc --noEmit",
14
+ "prepublishOnly": "pnpm build"
15
+ },
16
+ "dependencies": {
17
+ "@clack/prompts": "^0.9.0",
18
+ "commander": "^13.0.0",
19
+ "execa": "^9.5.0",
20
+ "picocolors": "^1.1.0"
21
+ },
22
+ "devDependencies": {
23
+ "@types/node": "^22.0.0",
24
+ "tsup": "^8.3.0",
25
+ "typescript": "^5.7.0"
26
+ },
27
+ "files": [
28
+ "dist",
29
+ "!dist/templates/paid"
30
+ ],
31
+ "keywords": [
32
+ "ai-agent",
33
+ "autonomous",
34
+ "composio",
35
+ "mcp",
36
+ "secure",
37
+ "self-hosted",
38
+ "telegram"
39
+ ],
40
+ "author": "Mert Koseoglu",
41
+ "repository": {
42
+ "type": "git",
43
+ "url": "git+https://github.com/mksglu/seclawai.git",
44
+ "directory": "packages/cli"
45
+ },
46
+ "homepage": "https://seclawai.com",
47
+ "bugs": "https://github.com/mksglu/seclawai/issues",
48
+ "engines": {
49
+ "node": ">=18.0.0"
50
+ },
51
+ "license": "MIT"
52
+ }