buildlog 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
buildlog/confidence.py ADDED
@@ -0,0 +1,311 @@
1
+ """Confidence scoring for rules and patterns.
2
+
3
+ Confidence represents structural inertia - how hard it would be for the system
4
+ to stop believing a rule. It reflects accumulated mass from reinforcement,
5
+ not objective correctness.
6
+
7
+ A rule gains mass when:
8
+ - It shows up again (frequency)
9
+ - It shows up recently (recency)
10
+ - It survives contradictions
11
+
12
+ A rule loses mass when:
13
+ - It's unused (time decay)
14
+ - It's contradicted
15
+ - It's contextually bypassed
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import math
21
+ from dataclasses import dataclass, field
22
+ from datetime import datetime, timezone
23
+ from enum import Enum
24
+ from typing import TypedDict
25
+
26
+ __all__ = [
27
+ "ConfidenceTier",
28
+ "ConfidenceConfig",
29
+ "ConfidenceMetrics",
30
+ "ConfidenceMetricsDict",
31
+ "calculate_confidence",
32
+ "get_confidence_tier",
33
+ "merge_confidence_metrics",
34
+ "add_contradiction",
35
+ ]
36
+
37
+
38
+ class ConfidenceTier(str, Enum):
39
+ """Descriptive tiers for rule confidence.
40
+
41
+ These are purely descriptive labels for human interpretation.
42
+ No logic gates or hard thresholds are enforced by the system.
43
+ """
44
+
45
+ SPECULATIVE = "speculative" # Low mass, recently introduced
46
+ PROVISIONAL = "provisional" # Growing mass, some reinforcement
47
+ STABLE = "stable" # Consistent reinforcement, moderate mass
48
+ ENTRENCHED = "entrenched" # High mass, sustained over time
49
+
50
+
51
+ @dataclass(frozen=True)
52
+ class ConfidenceConfig:
53
+ """Configuration parameters for confidence calculation.
54
+
55
+ Attributes:
56
+ tau: Half-life for recency decay (in days). Smaller = twitchier system.
57
+ k: Saturation constant for frequency. Larger = slower saturation.
58
+ lambda_: Decay constant for contradiction penalty.
59
+ tier_thresholds: Confidence score thresholds for each tier.
60
+ """
61
+
62
+ tau: float = 30.0 # 30-day half-life by default
63
+ k: float = 5.0 # Frequency saturation constant
64
+ lambda_: float = 2.0 # Contradiction decay constant
65
+ tier_thresholds: tuple[float, float, float] = (0.2, 0.4, 0.7)
66
+
67
+ def __post_init__(self) -> None:
68
+ if self.tau <= 0:
69
+ raise ValueError("tau must be positive")
70
+ if self.k <= 0:
71
+ raise ValueError("k must be positive")
72
+ if self.lambda_ <= 0:
73
+ raise ValueError("lambda_ must be positive")
74
+ low, mid, high = self.tier_thresholds
75
+ if not (0 <= low <= mid <= high <= 1):
76
+ raise ValueError(
77
+ "tier_thresholds must be monotonically increasing in [0, 1]"
78
+ )
79
+
80
+
81
+ class ConfidenceMetricsDict(TypedDict):
82
+ """Serializable form of confidence metrics."""
83
+
84
+ reinforcement_count: int
85
+ last_reinforced: str # ISO format timestamp
86
+ contradiction_count: int
87
+ first_seen: str # ISO format timestamp
88
+
89
+
90
+ @dataclass
91
+ class ConfidenceMetrics:
92
+ """Tracked metrics for confidence calculation.
93
+
94
+ These are the raw inputs that feed into the confidence formula.
95
+ """
96
+
97
+ reinforcement_count: int = 1
98
+ last_reinforced: datetime = field(
99
+ default_factory=lambda: datetime.now(timezone.utc)
100
+ )
101
+ contradiction_count: int = 0
102
+ first_seen: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
103
+
104
+ def __post_init__(self) -> None:
105
+ if self.reinforcement_count < 0:
106
+ raise ValueError("reinforcement_count must be non-negative")
107
+ if self.contradiction_count < 0:
108
+ raise ValueError("contradiction_count must be non-negative")
109
+
110
+ def to_dict(self) -> ConfidenceMetricsDict:
111
+ """Convert to serializable dictionary."""
112
+ return {
113
+ "reinforcement_count": self.reinforcement_count,
114
+ "last_reinforced": self.last_reinforced.isoformat(),
115
+ "contradiction_count": self.contradiction_count,
116
+ "first_seen": self.first_seen.isoformat(),
117
+ }
118
+
119
+ @classmethod
120
+ def from_dict(cls, data: ConfidenceMetricsDict) -> ConfidenceMetrics:
121
+ """Reconstruct from serialized dictionary.
122
+
123
+ Note: Timezone-naive datetimes are assumed to be UTC.
124
+ """
125
+ last_reinforced = datetime.fromisoformat(data["last_reinforced"])
126
+ first_seen = datetime.fromisoformat(data["first_seen"])
127
+
128
+ # Ensure timezone awareness (assume UTC for naive datetimes)
129
+ if last_reinforced.tzinfo is None:
130
+ last_reinforced = last_reinforced.replace(tzinfo=timezone.utc)
131
+ if first_seen.tzinfo is None:
132
+ first_seen = first_seen.replace(tzinfo=timezone.utc)
133
+
134
+ return cls(
135
+ reinforcement_count=data["reinforcement_count"],
136
+ last_reinforced=last_reinforced,
137
+ contradiction_count=data["contradiction_count"],
138
+ first_seen=first_seen,
139
+ )
140
+
141
+
142
+ def calculate_frequency_weight(n: int, k: float) -> float:
143
+ """Calculate frequency weight with saturation.
144
+
145
+ Uses bounded exponential: 1 - exp(-n/k)
146
+ This makes early reinforcement matter more than late spam.
147
+
148
+ Args:
149
+ n: Reinforcement count
150
+ k: Saturation constant (larger = slower saturation)
151
+
152
+ Returns:
153
+ Weight in range (0, 1), approaching 1 as n grows
154
+ """
155
+ return 1.0 - math.exp(-n / k)
156
+
157
+
158
+ def calculate_recency_weight(
159
+ t_last: datetime,
160
+ t_now: datetime,
161
+ tau: float,
162
+ ) -> float:
163
+ """Calculate recency weight with exponential decay.
164
+
165
+ Uses: exp(-(t_now - t_last) / tau)
166
+
167
+ Args:
168
+ t_last: Timestamp of last reinforcement
169
+ t_now: Current timestamp
170
+ tau: Half-life in days
171
+
172
+ Returns:
173
+ Weight in range (0, 1], decaying over time.
174
+ If t_last is in the future, clamps to 1.0.
175
+ """
176
+ days_elapsed = (t_now - t_last).total_seconds() / (24 * 60 * 60)
177
+ if days_elapsed < 0:
178
+ return 1.0 # Future timestamps treated as "just now"
179
+ return math.exp(-days_elapsed / tau)
180
+
181
+
182
+ def calculate_contradiction_penalty(c: int, lambda_: float) -> float:
183
+ """Calculate contradiction penalty (drag).
184
+
185
+ Rules don't die from contradictions, they get heavy and sink.
186
+ Uses: exp(-c / lambda)
187
+
188
+ Args:
189
+ c: Contradiction count
190
+ lambda_: Decay constant
191
+
192
+ Returns:
193
+ Penalty multiplier in range (0, 1]
194
+ """
195
+ return math.exp(-c / lambda_)
196
+
197
+
198
+ def calculate_confidence(
199
+ metrics: ConfidenceMetrics,
200
+ config: ConfidenceConfig | None = None,
201
+ t_now: datetime | None = None,
202
+ ) -> float:
203
+ """Calculate confidence score for a rule.
204
+
205
+ Confidence = frequency_weight * recency_weight * contradiction_penalty
206
+
207
+ This gives a scalar that:
208
+ - Rises fast early
209
+ - Decays naturally over time
210
+ - Never quite hits zero
211
+ - Never explodes to infinity
212
+
213
+ Args:
214
+ metrics: Tracked metrics for the rule
215
+ config: Scoring configuration (uses defaults if None)
216
+ t_now: Current time (uses now if None)
217
+
218
+ Returns:
219
+ Confidence score in range (0, 1)
220
+ """
221
+ if config is None:
222
+ config = ConfidenceConfig()
223
+ if t_now is None:
224
+ t_now = datetime.now(timezone.utc)
225
+
226
+ freq = calculate_frequency_weight(metrics.reinforcement_count, config.k)
227
+ recency = calculate_recency_weight(metrics.last_reinforced, t_now, config.tau)
228
+ penalty = calculate_contradiction_penalty(
229
+ metrics.contradiction_count, config.lambda_
230
+ )
231
+
232
+ return freq * recency * penalty
233
+
234
+
235
+ def get_confidence_tier(
236
+ score: float,
237
+ config: ConfidenceConfig | None = None,
238
+ ) -> ConfidenceTier:
239
+ """Map confidence score to descriptive tier.
240
+
241
+ Args:
242
+ score: Confidence score in range [0, 1]
243
+ config: Configuration with tier thresholds
244
+
245
+ Returns:
246
+ Descriptive tier label
247
+
248
+ Raises:
249
+ ValueError: If score is outside [0, 1] range
250
+ """
251
+ if not (0.0 <= score <= 1.0):
252
+ raise ValueError(f"score must be in [0, 1], got {score}")
253
+
254
+ if config is None:
255
+ config = ConfidenceConfig()
256
+
257
+ low, mid, high = config.tier_thresholds
258
+
259
+ if score < low:
260
+ return ConfidenceTier.SPECULATIVE
261
+ elif score < mid:
262
+ return ConfidenceTier.PROVISIONAL
263
+ elif score < high:
264
+ return ConfidenceTier.STABLE
265
+ else:
266
+ return ConfidenceTier.ENTRENCHED
267
+
268
+
269
+ def merge_confidence_metrics(
270
+ existing: ConfidenceMetrics,
271
+ new_occurrence: datetime | None = None,
272
+ ) -> ConfidenceMetrics:
273
+ """Merge a new occurrence into existing metrics.
274
+
275
+ This is called when a rule is reinforced (seen again).
276
+
277
+ Args:
278
+ existing: Current metrics for the rule
279
+ new_occurrence: Timestamp of new occurrence (uses now if None)
280
+
281
+ Returns:
282
+ Updated metrics with incremented count and updated timestamp
283
+ """
284
+ if new_occurrence is None:
285
+ new_occurrence = datetime.now(timezone.utc)
286
+
287
+ return ConfidenceMetrics(
288
+ reinforcement_count=existing.reinforcement_count + 1,
289
+ last_reinforced=new_occurrence,
290
+ contradiction_count=existing.contradiction_count,
291
+ first_seen=existing.first_seen,
292
+ )
293
+
294
+
295
+ def add_contradiction(metrics: ConfidenceMetrics) -> ConfidenceMetrics:
296
+ """Record a contradiction against a rule.
297
+
298
+ Contradictions add drag but don't invalidate rules.
299
+
300
+ Args:
301
+ metrics: Current metrics for the rule
302
+
303
+ Returns:
304
+ Updated metrics with incremented contradiction count
305
+ """
306
+ return ConfidenceMetrics(
307
+ reinforcement_count=metrics.reinforcement_count,
308
+ last_reinforced=metrics.last_reinforced,
309
+ contradiction_count=metrics.contradiction_count + 1,
310
+ first_seen=metrics.first_seen,
311
+ )
buildlog/skills.py CHANGED
@@ -5,11 +5,13 @@ from __future__ import annotations
5
5
  __all__ = [
6
6
  "Skill",
7
7
  "SkillSet",
8
+ "ConfidenceConfig", # Re-exported for convenience
8
9
  "_deduplicate_insights",
9
10
  "_calculate_confidence",
10
11
  "_extract_tags",
11
12
  "_generate_skill_id",
12
13
  "_to_imperative",
14
+ "_build_confidence_metrics",
13
15
  "generate_skills",
14
16
  "format_skills",
15
17
  ]
@@ -23,6 +25,9 @@ from datetime import date, datetime, timezone
23
25
  from pathlib import Path
24
26
  from typing import Final, Literal, TypedDict
25
27
 
28
+ from buildlog.confidence import ConfidenceConfig, ConfidenceMetrics
29
+ from buildlog.confidence import calculate_confidence as calculate_continuous_confidence
30
+ from buildlog.confidence import get_confidence_tier
26
31
  from buildlog.distill import CATEGORIES, PatternDict, distill_all
27
32
  from buildlog.embeddings import EmbeddingBackend, get_backend, get_default_backend
28
33
 
@@ -39,8 +44,8 @@ OutputFormat = Literal["yaml", "json", "markdown", "rules", "settings"]
39
44
  ConfidenceLevel = Literal["high", "medium", "low"]
40
45
 
41
46
 
42
- class SkillDict(TypedDict):
43
- """Type for skill dictionary representation."""
47
+ class _SkillDictRequired(TypedDict):
48
+ """Required fields for skill dictionary (base class)."""
44
49
 
45
50
  id: str
46
51
  category: str
@@ -51,6 +56,17 @@ class SkillDict(TypedDict):
51
56
  tags: list[str]
52
57
 
53
58
 
59
+ class SkillDict(_SkillDictRequired, total=False):
60
+ """Type for skill dictionary representation.
61
+
62
+ Inherits required fields from _SkillDictRequired.
63
+ Optional fields are only present when continuous confidence is enabled.
64
+ """
65
+
66
+ confidence_score: float
67
+ confidence_tier: str
68
+
69
+
54
70
  class SkillSetDict(TypedDict):
55
71
  """Type for full skill set dictionary."""
56
72
 
@@ -66,6 +82,17 @@ class Skill:
66
82
 
67
83
  Represents a single actionable rule derived from one or more
68
84
  similar insights across buildlog entries.
85
+
86
+ Attributes:
87
+ id: Stable identifier for the skill.
88
+ category: Category (architectural, workflow, etc.).
89
+ rule: The actionable rule text.
90
+ frequency: How many times this pattern was seen.
91
+ confidence: Discrete confidence level (high/medium/low).
92
+ sources: List of source files where this pattern appeared.
93
+ tags: Extracted technology/concept tags.
94
+ confidence_score: Continuous confidence score (0-1), if calculated.
95
+ confidence_tier: Descriptive tier (speculative/provisional/stable/entrenched).
69
96
  """
70
97
 
71
98
  id: str
@@ -75,10 +102,16 @@ class Skill:
75
102
  confidence: ConfidenceLevel
76
103
  sources: list[str] = field(default_factory=list)
77
104
  tags: list[str] = field(default_factory=list)
105
+ confidence_score: float | None = None
106
+ confidence_tier: str | None = None
78
107
 
79
108
  def to_dict(self) -> SkillDict:
80
- """Convert to dictionary for serialization."""
81
- return SkillDict(
109
+ """Convert to dictionary for serialization.
110
+
111
+ Only includes optional fields (confidence_score, confidence_tier)
112
+ when they are set.
113
+ """
114
+ result = SkillDict(
82
115
  id=self.id,
83
116
  category=self.category,
84
117
  rule=self.rule,
@@ -87,6 +120,11 @@ class Skill:
87
120
  sources=self.sources,
88
121
  tags=self.tags,
89
122
  )
123
+ if self.confidence_score is not None:
124
+ result["confidence_score"] = self.confidence_score
125
+ if self.confidence_tier is not None:
126
+ result["confidence_tier"] = self.confidence_tier
127
+ return result
90
128
 
91
129
 
92
130
  @dataclass
@@ -253,7 +291,7 @@ def _deduplicate_insights(
253
291
  patterns: list[PatternDict],
254
292
  threshold: float = MIN_SIMILARITY_THRESHOLD,
255
293
  backend: EmbeddingBackend | None = None,
256
- ) -> list[tuple[str, int, list[str], date | None]]:
294
+ ) -> list[tuple[str, int, list[str], date | None, date | None]]:
257
295
  """Deduplicate similar insights into merged rules.
258
296
 
259
297
  Args:
@@ -262,7 +300,8 @@ def _deduplicate_insights(
262
300
  backend: Embedding backend for similarity computation.
263
301
 
264
302
  Returns:
265
- List of (rule, frequency, sources, most_recent_date) tuples.
303
+ List of (rule, frequency, sources, most_recent_date, earliest_date) tuples.
304
+ Both dates can be None if no valid dates are found in the patterns.
266
305
  """
267
306
  if not patterns:
268
307
  return []
@@ -289,7 +328,7 @@ def _deduplicate_insights(
289
328
  groups.append([pattern])
290
329
 
291
330
  # Convert groups to deduplicated rules
292
- results: list[tuple[str, int, list[str], date | None]] = []
331
+ results: list[tuple[str, int, list[str], date | None, date | None]] = []
293
332
 
294
333
  for group in groups:
295
334
  # Use the shortest insight as the canonical rule (often cleaner)
@@ -298,7 +337,7 @@ def _deduplicate_insights(
298
337
  frequency = len(group)
299
338
  sources = sorted(set(p["source"] for p in group))
300
339
 
301
- # Find most recent date
340
+ # Find most recent and earliest dates
302
341
  dates: list[date] = []
303
342
  for p in group:
304
343
  try:
@@ -307,16 +346,58 @@ def _deduplicate_insights(
307
346
  pass
308
347
 
309
348
  most_recent = max(dates) if dates else None
310
- results.append((rule, frequency, sources, most_recent))
349
+ earliest = min(dates) if dates else None
350
+ results.append((rule, frequency, sources, most_recent, earliest))
311
351
 
312
352
  return results
313
353
 
314
354
 
355
+ def _build_confidence_metrics(
356
+ frequency: int,
357
+ most_recent: date | None,
358
+ earliest: date | None,
359
+ ) -> ConfidenceMetrics:
360
+ """Build ConfidenceMetrics from deduplication results.
361
+
362
+ Args:
363
+ frequency: Number of times the pattern was seen.
364
+ most_recent: Most recent occurrence date.
365
+ earliest: Earliest occurrence date.
366
+
367
+ Returns:
368
+ ConfidenceMetrics for continuous confidence calculation.
369
+ """
370
+ # Use midnight UTC for date-based timestamps
371
+ now = datetime.now(timezone.utc)
372
+
373
+ if most_recent is not None:
374
+ last_reinforced = datetime(
375
+ most_recent.year, most_recent.month, most_recent.day, tzinfo=timezone.utc
376
+ )
377
+ else:
378
+ last_reinforced = now
379
+
380
+ if earliest is not None:
381
+ first_seen = datetime(
382
+ earliest.year, earliest.month, earliest.day, tzinfo=timezone.utc
383
+ )
384
+ else:
385
+ first_seen = last_reinforced
386
+
387
+ return ConfidenceMetrics(
388
+ reinforcement_count=frequency,
389
+ last_reinforced=last_reinforced,
390
+ contradiction_count=0, # Deferred: no contradiction tracking yet
391
+ first_seen=first_seen,
392
+ )
393
+
394
+
315
395
  def generate_skills(
316
396
  buildlog_dir: Path,
317
397
  min_frequency: int = 1,
318
398
  since_date: date | None = None,
319
399
  embedding_backend: str | None = None,
400
+ confidence_config: ConfidenceConfig | None = None,
320
401
  ) -> SkillSet:
321
402
  """Generate skills from buildlog patterns.
322
403
 
@@ -326,6 +407,9 @@ def generate_skills(
326
407
  since_date: Only include patterns from this date onward.
327
408
  embedding_backend: Name of embedding backend for deduplication.
328
409
  Options: "token" (default), "sentence-transformers", "openai".
410
+ confidence_config: Configuration for continuous confidence scoring.
411
+ If provided, skills will include confidence_score and confidence_tier.
412
+ If None, only discrete confidence levels (high/medium/low) are computed.
329
413
 
330
414
  Returns:
331
415
  SkillSet with generated skills.
@@ -341,6 +425,9 @@ def generate_skills(
341
425
  )
342
426
  logger.info("Using embedding backend: %s", backend.name)
343
427
 
428
+ # Capture reference time for confidence calculations
429
+ t_now = datetime.now(timezone.utc) if confidence_config else None
430
+
344
431
  skills_by_category: dict[str, list[Skill]] = {}
345
432
 
346
433
  for category in CATEGORIES:
@@ -348,10 +435,22 @@ def generate_skills(
348
435
  deduplicated = _deduplicate_insights(patterns, backend=backend)
349
436
 
350
437
  skills: list[Skill] = []
351
- for rule, frequency, sources, most_recent in deduplicated:
438
+ for rule, frequency, sources, most_recent, earliest in deduplicated:
352
439
  if frequency < min_frequency:
353
440
  continue
354
441
 
442
+ # Calculate continuous confidence if config provided
443
+ confidence_score: float | None = None
444
+ confidence_tier: str | None = None
445
+ if confidence_config is not None and t_now is not None:
446
+ metrics = _build_confidence_metrics(frequency, most_recent, earliest)
447
+ confidence_score = calculate_continuous_confidence(
448
+ metrics, confidence_config, t_now
449
+ )
450
+ confidence_tier = get_confidence_tier(
451
+ confidence_score, confidence_config
452
+ ).value
453
+
355
454
  skill = Skill(
356
455
  id=_generate_skill_id(category, rule),
357
456
  category=category,
@@ -360,6 +459,8 @@ def generate_skills(
360
459
  confidence=_calculate_confidence(frequency, most_recent),
361
460
  sources=sources,
362
461
  tags=_extract_tags(rule),
462
+ confidence_score=confidence_score,
463
+ confidence_tier=confidence_tier,
363
464
  )
364
465
  skills.append(skill)
365
466
 
@@ -1,13 +1,13 @@
1
1
  #!/usr/bin/env python3
2
2
  """Post-generation script to update CLAUDE.md with buildlog instructions."""
3
3
 
4
- import os
5
4
  from pathlib import Path
6
5
 
7
- CLAUDE_MD_SECTION = '''
6
+ CLAUDE_MD_SECTION = """
8
7
  ## Build Journal
9
8
 
10
- After completing significant work (features, debugging sessions, deployments, 2+ hour focused sessions), write a build journal entry.
9
+ After completing significant work (features, debugging sessions, deployments,
10
+ 2+ hour focused sessions), write a build journal entry.
11
11
 
12
12
  **Location:** `buildlog/YYYY-MM-DD-{slug}.md`
13
13
  **Template:** `buildlog/_TEMPLATE.md`
@@ -15,18 +15,21 @@ After completing significant work (features, debugging sessions, deployments, 2+
15
15
  ### Required Sections
16
16
  1. **The Goal** - What we built and why
17
17
  2. **What We Built** - Architecture diagram, components table
18
- 3. **The Journey** - Chronological INCLUDING mistakes, wrong turns, actual error messages
18
+ 3. **The Journey** - Chronological INCLUDING mistakes, wrong turns, actual errors
19
19
  4. **Test Results** - Actual commands run, actual outputs received
20
20
  5. **Code Samples** - Key snippets with context (not full files)
21
21
  6. **AI Experience Reflection** - Meta-commentary on the collaboration
22
- 7. **Improvements** - Actionable learnings: architectural, workflow, tool usage, domain knowledge
22
+ 7. **Improvements** - Actionable learnings: architectural, workflow, tool usage
23
23
 
24
- The **Improvements** section is critical - capture concrete insights like "Should have defined the API contract before implementing the client" not vague observations like "Should have planned better."
24
+ The **Improvements** section is critical - capture concrete insights like
25
+ "Should have defined the API contract before implementing the client"
26
+ not vague observations like "Should have planned better."
25
27
 
26
28
  **Quality bar:** Publishable as a $500+ Envato Tuts+/Manning tutorial.
27
29
 
28
30
  After significant work, ask: "Should I write a build journal entry for this?"
29
- '''
31
+ """
32
+
30
33
 
31
34
  def main():
32
35
  claude_md = Path("CLAUDE.md")
@@ -47,5 +50,6 @@ def main():
47
50
 
48
51
  print("Added Build Journal section to CLAUDE.md")
49
52
 
53
+
50
54
  if __name__ == "__main__":
51
55
  main()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: buildlog
3
- Version: 0.2.0
3
+ Version: 0.3.0
4
4
  Summary: Engineering notebook for AI-assisted development
5
5
  Project-URL: Homepage, https://github.com/Peleke/buildlog-template
6
6
  Project-URL: Repository, https://github.com/Peleke/buildlog-template
@@ -33,6 +33,7 @@ Requires-Dist: black>=24.0.0; extra == 'dev'
33
33
  Requires-Dist: flake8>=7.0.0; extra == 'dev'
34
34
  Requires-Dist: isort>=5.13.0; extra == 'dev'
35
35
  Requires-Dist: mypy>=1.8.0; extra == 'dev'
36
+ Requires-Dist: pre-commit>=3.6.0; extra == 'dev'
36
37
  Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
37
38
  Requires-Dist: pytest-cov>=4.0.0; extra == 'dev'
38
39
  Requires-Dist: pytest>=7.0.0; extra == 'dev'
@@ -59,7 +60,7 @@ Description-Content-Type: text/markdown
59
60
 
60
61
  **Capture your work as publishable content. Include the fuckups.**
61
62
 
62
- <img src="assets/hero3.png" alt="Chaos to Order - buildlog transforms messy development sessions into structured knowledge" width="800"/>
63
+ <img src="assets/hero-notebook.png" alt="buildlog - Engineering Notebook for AI-Assisted Development" width="800"/>
63
64
 
64
65
  [Quick Start](#-quick-start) · [The Pipeline](#-the-pipeline) · [Commands](#-commands) · [Philosophy](#-philosophy)
65
66
 
@@ -1,8 +1,9 @@
1
1
  buildlog/__init__.py,sha256=FrgjyZhC19YyB40rOXHJWTA4xKWx2Yn2heIhVraaZ7A,90
2
2
  buildlog/cli.py,sha256=cmg77_RVJx8mdtStApS1KXxBUUB8Id6psHZjtHo33iE,14350
3
+ buildlog/confidence.py,sha256=EOkPxIH1_y7k6B3Hl7Wn0iR2qK_lumvOyyyqUdafXVY,9382
3
4
  buildlog/distill.py,sha256=fqXW_YyBFIFhwIWhnR-TQ7U65gypqG-mcAzNBr-qaag,11262
4
5
  buildlog/embeddings.py,sha256=vPydWjJVkYp172zFou-lJ737qsu6vRMQAMs143RGIpA,12364
5
- buildlog/skills.py,sha256=gu26MuIBgo-Jm6GQcVvbXHmV6MNp-HX3h7WC5buiyJ8,20686
6
+ buildlog/skills.py,sha256=ZR3cTn19WCCB2DjDRN2jyJOhXs7FazDkulRXYAZnquU,24757
6
7
  buildlog/stats.py,sha256=2WdHdmzUNGobtWngmm9nA_UmqM7DQeAnZL8_rLQN8aw,13256
7
8
  buildlog/core/__init__.py,sha256=07N1gRiPQQTBtLp9rsEErh39sgXWZSlEKWBn708SoQk,412
8
9
  buildlog/core/operations.py,sha256=o01z2Sy0fgiBK6Z90Lkg6ACoqihH3-HC-hkPBSdj9mA,10656
@@ -15,15 +16,15 @@ buildlog/render/claude_md.py,sha256=Z_E6MbJyVM_hJSoB4KL2rvbt5UEQHekTpJijj106lsI,
15
16
  buildlog/render/settings_json.py,sha256=4DS5OWksPrFCa7MIgWIu0t4rxYmItpMdGfTqMX3aMNs,2495
16
17
  buildlog/render/skill.py,sha256=_7umIS1Ms1oQ2_PopYueFjX41nMq1p28yJp6DhXFdgU,5981
17
18
  buildlog/render/tracking.py,sha256=6O0RIU-1gjVG-_S5dmXLz6RCMsQoHOR2u5___UpqXEo,1294
18
- buildlog-0.2.0.data/data/share/buildlog/copier.yml,sha256=A-1JKV59kOe0BQosGUBgRCg7iQozP_qyA3zfoHwpBKY,927
19
- buildlog-0.2.0.data/data/share/buildlog/post_gen.py,sha256=ffVG-1MMXbffKT8OMNvaQmyVDcBjwD8qTYpCaoyyZAQ,1778
20
- buildlog-0.2.0.data/data/share/buildlog/template/buildlog/.gitkeep,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- buildlog-0.2.0.data/data/share/buildlog/template/buildlog/2026-01-01-example.md,sha256=7x9sKmydfmfKyNz9hV7MtYnQJuBwbxNanbPOcpQDDZQ,7040
22
- buildlog-0.2.0.data/data/share/buildlog/template/buildlog/BUILDLOG_SYSTEM.md,sha256=osclytWwl5jUiTgSpuT4cT3h3oPvCkZ5GPCnFuJZNcY,3802
23
- buildlog-0.2.0.data/data/share/buildlog/template/buildlog/_TEMPLATE.md,sha256=CUvxgcx1-9XT_EdQ8e_vnuPq_h-u1uhXJgForJU2Pso,2932
24
- buildlog-0.2.0.data/data/share/buildlog/template/buildlog/assets/.gitkeep,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
- buildlog-0.2.0.dist-info/METADATA,sha256=pRmXAYYlZhVCwjRXdbTQdtmf8Bz2KijyBGHn65hPYKQ,24161
26
- buildlog-0.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
27
- buildlog-0.2.0.dist-info/entry_points.txt,sha256=BMFclPOomp_sgaa0OqBg6LfqCMlqzjZV88ww5TrPPoo,87
28
- buildlog-0.2.0.dist-info/licenses/LICENSE,sha256=fAgt-akug9nAwIj6M-SIf8u3ck-T7pJTwfmy9vWYASk,1074
29
- buildlog-0.2.0.dist-info/RECORD,,
19
+ buildlog-0.3.0.data/data/share/buildlog/copier.yml,sha256=A-1JKV59kOe0BQosGUBgRCg7iQozP_qyA3zfoHwpBKY,927
20
+ buildlog-0.3.0.data/data/share/buildlog/post_gen.py,sha256=XFlo40LuPpAsBhIRRRtHqvU3_5POss4L401hp35ijhw,1744
21
+ buildlog-0.3.0.data/data/share/buildlog/template/buildlog/.gitkeep,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
+ buildlog-0.3.0.data/data/share/buildlog/template/buildlog/2026-01-01-example.md,sha256=7x9sKmydfmfKyNz9hV7MtYnQJuBwbxNanbPOcpQDDZQ,7040
23
+ buildlog-0.3.0.data/data/share/buildlog/template/buildlog/BUILDLOG_SYSTEM.md,sha256=osclytWwl5jUiTgSpuT4cT3h3oPvCkZ5GPCnFuJZNcY,3802
24
+ buildlog-0.3.0.data/data/share/buildlog/template/buildlog/_TEMPLATE.md,sha256=CUvxgcx1-9XT_EdQ8e_vnuPq_h-u1uhXJgForJU2Pso,2932
25
+ buildlog-0.3.0.data/data/share/buildlog/template/buildlog/assets/.gitkeep,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ buildlog-0.3.0.dist-info/METADATA,sha256=v2RWbkwAEgpZtshu6jiqr5X9cV713Ml_SGhhVywmLLA,24188
27
+ buildlog-0.3.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
28
+ buildlog-0.3.0.dist-info/entry_points.txt,sha256=BMFclPOomp_sgaa0OqBg6LfqCMlqzjZV88ww5TrPPoo,87
29
+ buildlog-0.3.0.dist-info/licenses/LICENSE,sha256=fAgt-akug9nAwIj6M-SIf8u3ck-T7pJTwfmy9vWYASk,1074
30
+ buildlog-0.3.0.dist-info/RECORD,,