watchd 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
watchd-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 level09
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
watchd-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,303 @@
1
+ Metadata-Version: 2.4
2
+ Name: watchd
3
+ Version: 0.1.0
4
+ Summary: Schedule, run, and track AI agents with zero infra.
5
+ Keywords: ai,agents,scheduler,cron,monitoring,automation
6
+ Author-email: level09 <level09@gmail.com>
7
+ Requires-Python: >=3.11
8
+ Description-Content-Type: text/markdown
9
+ License-Expression: MIT
10
+ Classifier: Development Status :: 3 - Alpha
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python :: 3.13
16
+ Classifier: Topic :: Software Development :: Libraries
17
+ Classifier: Topic :: System :: Monitoring
18
+ Classifier: Typing :: Typed
19
+ License-File: LICENSE
20
+ Requires-Dist: apscheduler>=3.10,<4
21
+ Requires-Dist: structlog
22
+ Requires-Dist: cyclopts
23
+ Requires-Dist: anthropic>=0.40 ; extra == "ai"
24
+ Requires-Dist: openai>=1.50 ; extra == "ai"
25
+ Requires-Dist: anthropic>=0.40 ; extra == "anthropic"
26
+ Requires-Dist: openai>=1.50 ; extra == "openai"
27
+ Project-URL: Documentation, https://watchd.dev
28
+ Project-URL: Homepage, https://watchd.dev
29
+ Project-URL: Issues, https://github.com/level09/watchd/issues
30
+ Project-URL: Repository, https://github.com/level09/watchd
31
+ Provides-Extra: ai
32
+ Provides-Extra: anthropic
33
+ Provides-Extra: openai
34
+
35
+ # watchd
36
+
37
+ Autonomous AI agents that watch, understand, and act. On a schedule. With memory.
38
+
39
+ One SQLite file. No Redis, no Docker, no queue.
40
+
41
+ ## Install
42
+
43
+ ```bash
44
+ uv add "watchd[ai]"
45
+ ```
46
+
47
+ ## Quick start
48
+
49
+ ```bash
50
+ watchd init # creates watchd.toml + watchd_agents/
51
+ watchd new my_agent # scaffold a new agent
52
+ watchd run my_agent # run once
53
+ watchd up # start all agents on their schedules
54
+ ```
55
+
56
+ ## What makes this different
57
+
58
+ Before LLMs, scheduled tasks were dumb: check a threshold, send an alert. watchd agents **understand context**, **build memory across runs**, and **take intelligent action**. Things that required a team of analysts now fit in a single Python file.
59
+
60
+ ### Contract compliance watchdog
61
+
62
+ An agent that reads your vendor contracts, cross-references them against incoming invoices, and flags discrepancies. It remembers pricing terms across runs, so it catches slow drift that no human would notice.
63
+
64
+ ```python
65
+ from watchd import agent, every
66
+ from litellm import completion
67
+
68
+ @agent(schedule=every.day.at("07:00"))
69
+ def contract_compliance(ctx):
70
+ invoices = fetch_new_invoices(since=ctx.state.get("last_check"))
71
+ terms = ctx.state.get("contract_terms", {})
72
+
73
+ for inv in invoices:
74
+ resp = completion(model="gpt-4o", messages=[{"role": "user", "content": f"""
75
+ Contract terms for {inv['vendor']}: {terms.get(inv['vendor'], 'unknown')}
76
+ Invoice: {inv['line_items']}
77
+ Flag any line item that exceeds contracted rates or introduces
78
+ charges not covered by the agreement."""}])
79
+
80
+ analysis = resp.choices[0].message.content
81
+ if "flag" in analysis.lower() or "exceeds" in analysis.lower():
82
+ ctx.log.error("invoice_discrepancy", vendor=inv["vendor"])
83
+ post_to_slack(f"Invoice issue: {inv['vendor']}\n{analysis}")
84
+
85
+ ctx.state["last_check"] = now_iso()
86
+ ```
87
+
88
+ ### Incident post-mortem writer
89
+
90
+ Watches your monitoring stack. When an incident resolves, it pulls logs, metrics, and the alert timeline, then drafts a post-mortem with root cause analysis, impact summary, and action items. By the time your team opens Slack on Monday, the write-up is already there.
91
+
92
+ ```python
93
+ @agent(schedule=every.minutes(15))
94
+ def postmortem_drafter(ctx):
95
+ incidents = fetch_resolved_incidents(since=ctx.state.get("last_seen"))
96
+ drafted = ctx.state.get("drafted", [])
97
+
98
+ for inc in incidents:
99
+ if inc["id"] in drafted:
100
+ continue
101
+
102
+ logs = fetch_logs(inc["service"], inc["start"], inc["end"])
103
+ metrics = fetch_metrics(inc["service"], inc["start"], inc["end"])
104
+ timeline = inc["alert_history"]
105
+
106
+ resp = completion(model="claude-sonnet-4-20250514", messages=[{"role": "user", "content": f"""
107
+ Write a post-mortem for this incident.
108
+
109
+ Service: {inc['service']}
110
+ Duration: {inc['start']} to {inc['end']}
111
+ Alert timeline: {timeline}
112
+ Logs (last 200 lines): {logs[-5000:]}
113
+ Metrics: {metrics}
114
+
115
+ Include: summary, root cause, impact, timeline, action items.
116
+ Be specific. Reference actual log lines and metric values."""}])
117
+
118
+ post_to_confluence(f"Post-mortem: {inc['service']} - {inc['id']}", resp.choices[0].message.content)
119
+ drafted.append(inc["id"])
120
+
121
+ ctx.state["drafted"] = drafted[-100:]
122
+ ctx.state["last_seen"] = now_iso()
123
+ ```
124
+
125
+ ### Customer churn predictor
126
+
127
+ Analyzes support tickets, usage metrics, and billing patterns to identify customers showing early signs of churn. It builds a profile per customer over time, so each week's analysis has more context than the last.
128
+
129
+ ```python
130
+ @agent(schedule=every.monday.at("06:00"))
131
+ def churn_radar(ctx):
132
+ profiles = ctx.state.get("customer_profiles", {})
133
+ customers = fetch_active_customers()
134
+
135
+ at_risk = []
136
+ for cust in customers:
137
+ tickets = fetch_tickets(cust["id"], days=30)
138
+ usage = fetch_usage_trend(cust["id"], days=90)
139
+ history = profiles.get(cust["id"], "New customer, no prior analysis.")
140
+
141
+ resp = completion(model="gpt-4o", messages=[{"role": "user", "content": f"""
142
+ Customer: {cust['name']} ({cust['plan']}, ${cust['mrr']}/mo)
143
+ Previous analysis: {history}
144
+ Recent tickets: {tickets}
145
+ Usage trend (90d): {usage}
146
+
147
+ Assess churn risk (low/medium/high). Explain your reasoning.
148
+ Compare against your previous analysis: is the trend improving or worsening?"""}])
149
+
150
+ analysis = resp.choices[0].message.content
151
+ profiles[cust["id"]] = analysis
152
+
153
+ if "high" in analysis.lower()[:100]:
154
+ at_risk.append({"name": cust["name"], "mrr": cust["mrr"], "analysis": analysis})
155
+
156
+ ctx.state["customer_profiles"] = profiles
157
+ if at_risk:
158
+ total_mrr = sum(c["mrr"] for c in at_risk)
159
+ post_to_slack(f"{len(at_risk)} customers at risk (${total_mrr:,.0f} MRR)")
160
+ ```
161
+
162
+ ### Security log analyst
163
+
164
+ Reads authentication logs, network events, and access patterns. Learns what "normal" looks like for your environment over weeks of observation, then flags anomalies that rule-based systems miss: unusual access sequences, subtle privilege escalation patterns, logins that are technically valid but contextually suspicious.
165
+
166
+ ```python
167
+ @agent(schedule=every.minutes(10))
168
+ def security_analyst(ctx):
169
+ baseline = ctx.state.get("baseline", "No baseline established yet.")
170
+ alert_history = ctx.state.get("alerts", [])
171
+ run_count = ctx.state.get("runs", 0) + 1
172
+
173
+ auth_logs = fetch_auth_logs(minutes=10)
174
+ network = fetch_network_events(minutes=10)
175
+
176
+ resp = completion(model="claude-sonnet-4-20250514", messages=[{"role": "user", "content": f"""
177
+ You are a security analyst reviewing the last 10 minutes of activity.
178
+
179
+ Established baseline: {baseline}
180
+ Recent alerts you've raised: {alert_history[-10:]}
181
+
182
+ Auth logs: {auth_logs[-3000:]}
183
+ Network events: {network[-3000:]}
184
+
185
+ Identify anything suspicious. Consider:
186
+ - Access patterns that are technically allowed but contextually unusual
187
+ - Sequences of actions that suggest lateral movement
188
+ - Timing anomalies (off-hours access, rapid sequential logins)
189
+ - Anything that deviates from the established baseline
190
+
191
+ Respond with JSON: {{"suspicious": true/false, "findings": [...], "baseline_update": "..."}}"""}])
192
+
193
+ result = parse_json(resp.choices[0].message.content)
194
+
195
+ if run_count % 500 == 0:
196
+ ctx.state["baseline"] = result.get("baseline_update", baseline)
197
+
198
+ if result.get("suspicious"):
199
+ alert_history.extend(result["findings"])
200
+ ctx.log.error("security_alert", findings=result["findings"])
201
+ page_oncall(result["findings"])
202
+
203
+ ctx.state["alerts"] = alert_history[-200:]
204
+ ctx.state["runs"] = run_count
205
+ ```
206
+
207
+ ### Regulatory change tracker
208
+
209
+ Monitors government and regulatory websites for policy changes relevant to your industry. Compares new language against previous versions it has stored, identifies what changed, assesses business impact, and routes to the right compliance team.
210
+
211
+ ```python
212
+ @agent(schedule=every.day.at("08:00"))
213
+ def regulatory_watch(ctx):
214
+ sources = ctx.state.get("sources", REGULATORY_URLS)
215
+ previous = ctx.state.get("previous_content", {})
216
+
217
+ for name, url in sources.items():
218
+ current = fetch_page_text(url)
219
+ prev = previous.get(name, "")
220
+
221
+ if current == prev:
222
+ continue
223
+
224
+ resp = completion(model="gpt-4o", messages=[{"role": "user", "content": f"""
225
+ A regulatory page has changed.
226
+ Source: {name}
227
+
228
+ Previous version (truncated): {prev[:3000]}
229
+ Current version (truncated): {current[:3000]}
230
+
231
+ 1. What specifically changed?
232
+ 2. Is this a material change or cosmetic (formatting, typos)?
233
+ 3. If material: what business functions are affected?
234
+ 4. Recommended action and urgency (low/medium/high)."""}])
235
+
236
+ analysis = resp.choices[0].message.content
237
+ previous[name] = current
238
+
239
+ if "cosmetic" not in analysis.lower()[:200]:
240
+ post_to_slack(f"Regulatory change detected: {name}\n{analysis}")
241
+
242
+ ctx.state["previous_content"] = previous
243
+ ```
244
+
245
+ ## Agent context
246
+
247
+ Every agent receives a `ctx` object:
248
+
249
+ - **`ctx.state`** - persistent key/value store across runs (dict-like, SQLite-backed)
250
+ - **`ctx.log`** - structured logger
251
+ - **`ctx.history`** - last 10 runs for this agent
252
+ - **`ctx.agent_name`** / **`ctx.run_id`** - identity
253
+
254
+ State is the key primitive. It's what turns a script into an agent: each run builds on what the previous run learned.
255
+
256
+ ## Scheduling
257
+
258
+ ```python
259
+ from watchd import every
260
+
261
+ every.minutes(5) # every 5 minutes
262
+ every.hour # every hour
263
+ every.day.at("09:00") # daily at 9 AM
264
+ every.monday.at("08:00") # weekly
265
+ every.cron("*/5 * * * *") # raw crontab
266
+ ```
267
+
268
+ ## CLI
269
+
270
+ ```bash
271
+ watchd init # scaffold project
272
+ watchd new <name> # create agent file
273
+ watchd list # show agents + schedules
274
+ watchd run <name> # run one now
275
+ watchd up # start scheduler
276
+ watchd logs <name> # view captured output
277
+ watchd history # run history
278
+ watchd state <name> # inspect persisted state
279
+ ```
280
+
281
+ ## How it works
282
+
283
+ 1. **Scheduler** wraps APScheduler 3.x. Your agents run on their defined schedules.
284
+ 2. **Run tracker** logs every execution with status, timing, captured stdout, and errors.
285
+ 3. **State store** gives each agent persistent memory across runs.
286
+
287
+ Everything lives in one SQLite file. No external services.
288
+
289
+ ## Local development
290
+
291
+ ```bash
292
+ git clone https://github.com/level09/watchd.git
293
+ cd watchd
294
+ uv sync
295
+ uv run python -m pytest
296
+ uv run watchd init
297
+ uv run watchd run example
298
+ ```
299
+
300
+ ## License
301
+
302
+ MIT
303
+
watchd-0.1.0/README.md ADDED
@@ -0,0 +1,268 @@
1
+ # watchd
2
+
3
+ Autonomous AI agents that watch, understand, and act. On a schedule. With memory.
4
+
5
+ One SQLite file. No Redis, no Docker, no queue.
6
+
7
+ ## Install
8
+
9
+ ```bash
10
+ uv add "watchd[ai]"
11
+ ```
12
+
13
+ ## Quick start
14
+
15
+ ```bash
16
+ watchd init # creates watchd.toml + watchd_agents/
17
+ watchd new my_agent # scaffold a new agent
18
+ watchd run my_agent # run once
19
+ watchd up # start all agents on their schedules
20
+ ```
21
+
22
+ ## What makes this different
23
+
24
+ Before LLMs, scheduled tasks were dumb: check a threshold, send an alert. watchd agents **understand context**, **build memory across runs**, and **take intelligent action**. Things that required a team of analysts now fit in a single Python file.
25
+
26
+ ### Contract compliance watchdog
27
+
28
+ An agent that reads your vendor contracts, cross-references them against incoming invoices, and flags discrepancies. It remembers pricing terms across runs, so it catches slow drift that no human would notice.
29
+
30
+ ```python
31
+ from watchd import agent, every
32
+ from litellm import completion
33
+
34
+ @agent(schedule=every.day.at("07:00"))
35
+ def contract_compliance(ctx):
36
+ invoices = fetch_new_invoices(since=ctx.state.get("last_check"))
37
+ terms = ctx.state.get("contract_terms", {})
38
+
39
+ for inv in invoices:
40
+ resp = completion(model="gpt-4o", messages=[{"role": "user", "content": f"""
41
+ Contract terms for {inv['vendor']}: {terms.get(inv['vendor'], 'unknown')}
42
+ Invoice: {inv['line_items']}
43
+ Flag any line item that exceeds contracted rates or introduces
44
+ charges not covered by the agreement."""}])
45
+
46
+ analysis = resp.choices[0].message.content
47
+ if "flag" in analysis.lower() or "exceeds" in analysis.lower():
48
+ ctx.log.error("invoice_discrepancy", vendor=inv["vendor"])
49
+ post_to_slack(f"Invoice issue: {inv['vendor']}\n{analysis}")
50
+
51
+ ctx.state["last_check"] = now_iso()
52
+ ```
53
+
54
+ ### Incident post-mortem writer
55
+
56
+ Watches your monitoring stack. When an incident resolves, it pulls logs, metrics, and the alert timeline, then drafts a post-mortem with root cause analysis, impact summary, and action items. By the time your team opens Slack on Monday, the write-up is already there.
57
+
58
+ ```python
59
+ @agent(schedule=every.minutes(15))
60
+ def postmortem_drafter(ctx):
61
+ incidents = fetch_resolved_incidents(since=ctx.state.get("last_seen"))
62
+ drafted = ctx.state.get("drafted", [])
63
+
64
+ for inc in incidents:
65
+ if inc["id"] in drafted:
66
+ continue
67
+
68
+ logs = fetch_logs(inc["service"], inc["start"], inc["end"])
69
+ metrics = fetch_metrics(inc["service"], inc["start"], inc["end"])
70
+ timeline = inc["alert_history"]
71
+
72
+ resp = completion(model="claude-sonnet-4-20250514", messages=[{"role": "user", "content": f"""
73
+ Write a post-mortem for this incident.
74
+
75
+ Service: {inc['service']}
76
+ Duration: {inc['start']} to {inc['end']}
77
+ Alert timeline: {timeline}
78
+ Logs (last 200 lines): {logs[-5000:]}
79
+ Metrics: {metrics}
80
+
81
+ Include: summary, root cause, impact, timeline, action items.
82
+ Be specific. Reference actual log lines and metric values."""}])
83
+
84
+ post_to_confluence(f"Post-mortem: {inc['service']} - {inc['id']}", resp.choices[0].message.content)
85
+ drafted.append(inc["id"])
86
+
87
+ ctx.state["drafted"] = drafted[-100:]
88
+ ctx.state["last_seen"] = now_iso()
89
+ ```
90
+
91
+ ### Customer churn predictor
92
+
93
+ Analyzes support tickets, usage metrics, and billing patterns to identify customers showing early signs of churn. It builds a profile per customer over time, so each week's analysis has more context than the last.
94
+
95
+ ```python
96
+ @agent(schedule=every.monday.at("06:00"))
97
+ def churn_radar(ctx):
98
+ profiles = ctx.state.get("customer_profiles", {})
99
+ customers = fetch_active_customers()
100
+
101
+ at_risk = []
102
+ for cust in customers:
103
+ tickets = fetch_tickets(cust["id"], days=30)
104
+ usage = fetch_usage_trend(cust["id"], days=90)
105
+ history = profiles.get(cust["id"], "New customer, no prior analysis.")
106
+
107
+ resp = completion(model="gpt-4o", messages=[{"role": "user", "content": f"""
108
+ Customer: {cust['name']} ({cust['plan']}, ${cust['mrr']}/mo)
109
+ Previous analysis: {history}
110
+ Recent tickets: {tickets}
111
+ Usage trend (90d): {usage}
112
+
113
+ Assess churn risk (low/medium/high). Explain your reasoning.
114
+ Compare against your previous analysis: is the trend improving or worsening?"""}])
115
+
116
+ analysis = resp.choices[0].message.content
117
+ profiles[cust["id"]] = analysis
118
+
119
+ if "high" in analysis.lower()[:100]:
120
+ at_risk.append({"name": cust["name"], "mrr": cust["mrr"], "analysis": analysis})
121
+
122
+ ctx.state["customer_profiles"] = profiles
123
+ if at_risk:
124
+ total_mrr = sum(c["mrr"] for c in at_risk)
125
+ post_to_slack(f"{len(at_risk)} customers at risk (${total_mrr:,.0f} MRR)")
126
+ ```
127
+
128
+ ### Security log analyst
129
+
130
+ Reads authentication logs, network events, and access patterns. Learns what "normal" looks like for your environment over weeks of observation, then flags anomalies that rule-based systems miss: unusual access sequences, subtle privilege escalation patterns, logins that are technically valid but contextually suspicious.
131
+
132
+ ```python
133
+ @agent(schedule=every.minutes(10))
134
+ def security_analyst(ctx):
135
+ baseline = ctx.state.get("baseline", "No baseline established yet.")
136
+ alert_history = ctx.state.get("alerts", [])
137
+ run_count = ctx.state.get("runs", 0) + 1
138
+
139
+ auth_logs = fetch_auth_logs(minutes=10)
140
+ network = fetch_network_events(minutes=10)
141
+
142
+ resp = completion(model="claude-sonnet-4-20250514", messages=[{"role": "user", "content": f"""
143
+ You are a security analyst reviewing the last 10 minutes of activity.
144
+
145
+ Established baseline: {baseline}
146
+ Recent alerts you've raised: {alert_history[-10:]}
147
+
148
+ Auth logs: {auth_logs[-3000:]}
149
+ Network events: {network[-3000:]}
150
+
151
+ Identify anything suspicious. Consider:
152
+ - Access patterns that are technically allowed but contextually unusual
153
+ - Sequences of actions that suggest lateral movement
154
+ - Timing anomalies (off-hours access, rapid sequential logins)
155
+ - Anything that deviates from the established baseline
156
+
157
+ Respond with JSON: {{"suspicious": true/false, "findings": [...], "baseline_update": "..."}}"""}])
158
+
159
+ result = parse_json(resp.choices[0].message.content)
160
+
161
+ if run_count % 500 == 0:
162
+ ctx.state["baseline"] = result.get("baseline_update", baseline)
163
+
164
+ if result.get("suspicious"):
165
+ alert_history.extend(result["findings"])
166
+ ctx.log.error("security_alert", findings=result["findings"])
167
+ page_oncall(result["findings"])
168
+
169
+ ctx.state["alerts"] = alert_history[-200:]
170
+ ctx.state["runs"] = run_count
171
+ ```
172
+
173
+ ### Regulatory change tracker
174
+
175
+ Monitors government and regulatory websites for policy changes relevant to your industry. Compares new language against previous versions it has stored, identifies what changed, assesses business impact, and routes to the right compliance team.
176
+
177
+ ```python
178
+ @agent(schedule=every.day.at("08:00"))
179
+ def regulatory_watch(ctx):
180
+ sources = ctx.state.get("sources", REGULATORY_URLS)
181
+ previous = ctx.state.get("previous_content", {})
182
+
183
+ for name, url in sources.items():
184
+ current = fetch_page_text(url)
185
+ prev = previous.get(name, "")
186
+
187
+ if current == prev:
188
+ continue
189
+
190
+ resp = completion(model="gpt-4o", messages=[{"role": "user", "content": f"""
191
+ A regulatory page has changed.
192
+ Source: {name}
193
+
194
+ Previous version (truncated): {prev[:3000]}
195
+ Current version (truncated): {current[:3000]}
196
+
197
+ 1. What specifically changed?
198
+ 2. Is this a material change or cosmetic (formatting, typos)?
199
+ 3. If material: what business functions are affected?
200
+ 4. Recommended action and urgency (low/medium/high)."""}])
201
+
202
+ analysis = resp.choices[0].message.content
203
+ previous[name] = current
204
+
205
+ if "cosmetic" not in analysis.lower()[:200]:
206
+ post_to_slack(f"Regulatory change detected: {name}\n{analysis}")
207
+
208
+ ctx.state["previous_content"] = previous
209
+ ```
210
+
211
+ ## Agent context
212
+
213
+ Every agent receives a `ctx` object:
214
+
215
+ - **`ctx.state`** - persistent key/value store across runs (dict-like, SQLite-backed)
216
+ - **`ctx.log`** - structured logger
217
+ - **`ctx.history`** - last 10 runs for this agent
218
+ - **`ctx.agent_name`** / **`ctx.run_id`** - identity
219
+
220
+ State is the key primitive. It's what turns a script into an agent: each run builds on what the previous run learned.
221
+
222
+ ## Scheduling
223
+
224
+ ```python
225
+ from watchd import every
226
+
227
+ every.minutes(5) # every 5 minutes
228
+ every.hour # every hour
229
+ every.day.at("09:00") # daily at 9 AM
230
+ every.monday.at("08:00") # weekly
231
+ every.cron("*/5 * * * *") # raw crontab
232
+ ```
233
+
234
+ ## CLI
235
+
236
+ ```bash
237
+ watchd init # scaffold project
238
+ watchd new <name> # create agent file
239
+ watchd list # show agents + schedules
240
+ watchd run <name> # run one now
241
+ watchd up # start scheduler
242
+ watchd logs <name> # view captured output
243
+ watchd history # run history
244
+ watchd state <name> # inspect persisted state
245
+ ```
246
+
247
+ ## How it works
248
+
249
+ 1. **Scheduler** wraps APScheduler 3.x. Your agents run on their defined schedules.
250
+ 2. **Run tracker** logs every execution with status, timing, captured stdout, and errors.
251
+ 3. **State store** gives each agent persistent memory across runs.
252
+
253
+ Everything lives in one SQLite file. No external services.
254
+
255
+ ## Local development
256
+
257
+ ```bash
258
+ git clone https://github.com/level09/watchd.git
259
+ cd watchd
260
+ uv sync
261
+ uv run python -m pytest
262
+ uv run watchd init
263
+ uv run watchd run example
264
+ ```
265
+
266
+ ## License
267
+
268
+ MIT
@@ -0,0 +1,57 @@
1
+ [project]
2
+ name = "watchd"
3
+ dynamic = ["version"]
4
+ description = "Schedule, run, and track AI agents with zero infra."
5
+ readme = "README.md"
6
+ license = "MIT"
7
+ authors = [
8
+ { name = "level09", email = "level09@gmail.com" }
9
+ ]
10
+ requires-python = ">=3.11"
11
+ keywords = ["ai", "agents", "scheduler", "cron", "monitoring", "automation"]
12
+ classifiers = [
13
+ "Development Status :: 3 - Alpha",
14
+ "Intended Audience :: Developers",
15
+ "Programming Language :: Python :: 3",
16
+ "Programming Language :: Python :: 3.11",
17
+ "Programming Language :: Python :: 3.12",
18
+ "Programming Language :: Python :: 3.13",
19
+ "Topic :: Software Development :: Libraries",
20
+ "Topic :: System :: Monitoring",
21
+ "Typing :: Typed",
22
+ ]
23
+ dependencies = [
24
+ "apscheduler>=3.10,<4",
25
+ "structlog",
26
+ "cyclopts",
27
+ ]
28
+
29
+ [project.optional-dependencies]
30
+ anthropic = ["anthropic>=0.40"]
31
+ openai = ["openai>=1.50"]
32
+ ai = ["anthropic>=0.40", "openai>=1.50"]
33
+
34
+ [project.urls]
35
+ Homepage = "https://watchd.dev"
36
+ Documentation = "https://watchd.dev"
37
+ Repository = "https://github.com/level09/watchd"
38
+ Issues = "https://github.com/level09/watchd/issues"
39
+
40
+ [project.scripts]
41
+ watchd = "watchd.cli:main"
42
+
43
+ [build-system]
44
+ requires = ["flit_core>=3.9,<4"]
45
+ build-backend = "flit_core.buildapi"
46
+
47
+ [tool.ruff]
48
+ line-length = 100
49
+
50
+ [tool.pytest.ini_options]
51
+ testpaths = ["tests"]
52
+
53
+ [dependency-groups]
54
+ dev = [
55
+ "pytest>=9.0.2",
56
+ "ruff>=0.15.2",
57
+ ]
@@ -0,0 +1,9 @@
1
+ """watchd - Schedule, run, and track AI agents with zero infra."""
2
+
3
+ __version__ = "0.1.0"
4
+
5
+ from watchd.app import Watchd
6
+ from watchd.registry import agent
7
+ from watchd.schedule import every
8
+
9
+ __all__ = ["Watchd", "agent", "every"]
@@ -0,0 +1,5 @@
1
+ """Allow running as `python -m watchd`."""
2
+
3
+ from watchd.cli import main
4
+
5
+ main()