agmem 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. {agmem-0.1.1.dist-info → agmem-0.1.2.dist-info}/METADATA +20 -3
  2. agmem-0.1.2.dist-info/RECORD +86 -0
  3. memvcs/__init__.py +1 -1
  4. memvcs/cli.py +35 -31
  5. memvcs/commands/__init__.py +9 -9
  6. memvcs/commands/add.py +77 -76
  7. memvcs/commands/blame.py +46 -53
  8. memvcs/commands/branch.py +13 -33
  9. memvcs/commands/checkout.py +27 -32
  10. memvcs/commands/clean.py +18 -23
  11. memvcs/commands/clone.py +4 -1
  12. memvcs/commands/commit.py +40 -39
  13. memvcs/commands/daemon.py +81 -76
  14. memvcs/commands/decay.py +77 -0
  15. memvcs/commands/diff.py +56 -57
  16. memvcs/commands/distill.py +74 -0
  17. memvcs/commands/fsck.py +55 -61
  18. memvcs/commands/garden.py +28 -37
  19. memvcs/commands/graph.py +41 -48
  20. memvcs/commands/init.py +16 -24
  21. memvcs/commands/log.py +25 -40
  22. memvcs/commands/merge.py +16 -28
  23. memvcs/commands/pack.py +129 -0
  24. memvcs/commands/pull.py +4 -1
  25. memvcs/commands/push.py +4 -2
  26. memvcs/commands/recall.py +145 -0
  27. memvcs/commands/reflog.py +13 -22
  28. memvcs/commands/remote.py +1 -0
  29. memvcs/commands/repair.py +66 -0
  30. memvcs/commands/reset.py +23 -33
  31. memvcs/commands/resurrect.py +82 -0
  32. memvcs/commands/search.py +3 -4
  33. memvcs/commands/serve.py +2 -1
  34. memvcs/commands/show.py +66 -36
  35. memvcs/commands/stash.py +34 -34
  36. memvcs/commands/status.py +27 -35
  37. memvcs/commands/tag.py +23 -47
  38. memvcs/commands/test.py +30 -44
  39. memvcs/commands/timeline.py +111 -0
  40. memvcs/commands/tree.py +26 -27
  41. memvcs/commands/verify.py +59 -0
  42. memvcs/commands/when.py +115 -0
  43. memvcs/core/access_index.py +167 -0
  44. memvcs/core/config_loader.py +3 -1
  45. memvcs/core/consistency.py +214 -0
  46. memvcs/core/decay.py +185 -0
  47. memvcs/core/diff.py +158 -143
  48. memvcs/core/distiller.py +277 -0
  49. memvcs/core/gardener.py +164 -132
  50. memvcs/core/hooks.py +48 -14
  51. memvcs/core/knowledge_graph.py +134 -138
  52. memvcs/core/merge.py +248 -171
  53. memvcs/core/objects.py +95 -96
  54. memvcs/core/pii_scanner.py +147 -146
  55. memvcs/core/refs.py +132 -115
  56. memvcs/core/repository.py +174 -164
  57. memvcs/core/schema.py +155 -113
  58. memvcs/core/staging.py +60 -65
  59. memvcs/core/storage/__init__.py +20 -18
  60. memvcs/core/storage/base.py +74 -70
  61. memvcs/core/storage/gcs.py +70 -68
  62. memvcs/core/storage/local.py +42 -40
  63. memvcs/core/storage/s3.py +105 -110
  64. memvcs/core/temporal_index.py +112 -0
  65. memvcs/core/test_runner.py +101 -93
  66. memvcs/core/vector_store.py +41 -35
  67. memvcs/integrations/mcp_server.py +1 -3
  68. memvcs/integrations/web_ui/server.py +25 -26
  69. memvcs/retrieval/__init__.py +22 -0
  70. memvcs/retrieval/base.py +54 -0
  71. memvcs/retrieval/pack.py +128 -0
  72. memvcs/retrieval/recaller.py +105 -0
  73. memvcs/retrieval/strategies.py +314 -0
  74. memvcs/utils/__init__.py +3 -3
  75. memvcs/utils/helpers.py +52 -52
  76. agmem-0.1.1.dist-info/RECORD +0 -67
  77. {agmem-0.1.1.dist-info → agmem-0.1.2.dist-info}/WHEEL +0 -0
  78. {agmem-0.1.1.dist-info → agmem-0.1.2.dist-info}/entry_points.txt +0 -0
  79. {agmem-0.1.1.dist-info → agmem-0.1.2.dist-info}/licenses/LICENSE +0 -0
  80. {agmem-0.1.1.dist-info → agmem-0.1.2.dist-info}/top_level.txt +0 -0
memvcs/core/storage/s3.py CHANGED
@@ -13,6 +13,7 @@ from datetime import datetime
13
13
  try:
14
14
  import boto3
15
15
  from botocore.exceptions import ClientError
16
+
16
17
  BOTO3_AVAILABLE = True
17
18
  except ImportError:
18
19
  BOTO3_AVAILABLE = False
@@ -26,6 +27,7 @@ def _apply_s3_config(kwargs: Dict[str, Any], config: Optional[Dict[str, Any]]) -
26
27
  return
27
28
  try:
28
29
  from memvcs.core.config_loader import get_s3_options_from_config
30
+
29
31
  opts = get_s3_options_from_config(config)
30
32
  for key in ("region", "endpoint_url", "lock_table"):
31
33
  if opts.get(key) is not None:
@@ -39,7 +41,7 @@ def _apply_s3_config(kwargs: Dict[str, Any], config: Optional[Dict[str, Any]]) -
39
41
 
40
42
  class S3StorageAdapter(StorageAdapter):
41
43
  """Storage adapter for S3 and S3-compatible storage (MinIO, etc.)."""
42
-
44
+
43
45
  def __init__(
44
46
  self,
45
47
  bucket: str,
@@ -48,11 +50,11 @@ class S3StorageAdapter(StorageAdapter):
48
50
  endpoint_url: Optional[str] = None,
49
51
  access_key: Optional[str] = None,
50
52
  secret_key: Optional[str] = None,
51
- lock_table: Optional[str] = None
53
+ lock_table: Optional[str] = None,
52
54
  ):
53
55
  """
54
56
  Initialize S3 storage adapter.
55
-
57
+
56
58
  Args:
57
59
  bucket: S3 bucket name
58
60
  prefix: Key prefix for all operations
@@ -63,54 +65,56 @@ class S3StorageAdapter(StorageAdapter):
63
65
  lock_table: DynamoDB table for distributed locks (optional)
64
66
  """
65
67
  if not BOTO3_AVAILABLE:
66
- raise ImportError("boto3 is required for S3 storage. Install with: pip install agmem[cloud]")
67
-
68
+ raise ImportError(
69
+ "boto3 is required for S3 storage. Install with: pip install agmem[cloud]"
70
+ )
71
+
68
72
  self.bucket = bucket
69
- self.prefix = prefix.strip('/')
73
+ self.prefix = prefix.strip("/")
70
74
  self.lock_table = lock_table
71
75
  self._lock_id = str(uuid.uuid4()) # Unique ID for this instance
72
-
76
+
73
77
  # Build S3 client
74
78
  client_kwargs = {}
75
79
  if region:
76
- client_kwargs['region_name'] = region
80
+ client_kwargs["region_name"] = region
77
81
  if endpoint_url:
78
- client_kwargs['endpoint_url'] = endpoint_url
82
+ client_kwargs["endpoint_url"] = endpoint_url
79
83
  if access_key and secret_key:
80
- client_kwargs['aws_access_key_id'] = access_key
81
- client_kwargs['aws_secret_access_key'] = secret_key
82
-
83
- self.s3 = boto3.client('s3', **client_kwargs)
84
-
84
+ client_kwargs["aws_access_key_id"] = access_key
85
+ client_kwargs["aws_secret_access_key"] = secret_key
86
+
87
+ self.s3 = boto3.client("s3", **client_kwargs)
88
+
85
89
  # DynamoDB for locks (optional)
86
90
  if lock_table:
87
- self.dynamodb = boto3.client('dynamodb', **client_kwargs)
91
+ self.dynamodb = boto3.client("dynamodb", **client_kwargs)
88
92
  else:
89
93
  self.dynamodb = None
90
-
94
+
91
95
  @classmethod
92
- def from_url(cls, url: str, config: Optional[Dict[str, Any]] = None) -> 'S3StorageAdapter':
96
+ def from_url(cls, url: str, config: Optional[Dict[str, Any]] = None) -> "S3StorageAdapter":
93
97
  """
94
98
  Create adapter from S3 URL. Optional config supplies region, endpoint,
95
99
  and env var names for credentials; credentials are resolved from env only.
96
-
100
+
97
101
  Args:
98
102
  url: S3 URL (s3://bucket/prefix)
99
103
  config: Optional agmem config dict (cloud.s3); credentials from env vars
100
-
104
+
101
105
  Returns:
102
106
  S3StorageAdapter instance
103
107
  """
104
- if not url.startswith('s3://'):
108
+ if not url.startswith("s3://"):
105
109
  raise ValueError(f"Invalid S3 URL: {url}")
106
110
  path = url[5:] # Remove 's3://'
107
- parts = path.split('/', 1)
111
+ parts = path.split("/", 1)
108
112
  bucket = parts[0]
109
113
  prefix = parts[1] if len(parts) > 1 else ""
110
114
  kwargs: Dict[str, Any] = {"bucket": bucket, "prefix": prefix}
111
115
  _apply_s3_config(kwargs, config)
112
116
  return cls(**kwargs)
113
-
117
+
114
118
  def _key(self, path: str) -> str:
115
119
  """Convert relative path to S3 key."""
116
120
  if not path:
@@ -118,24 +122,24 @@ class S3StorageAdapter(StorageAdapter):
118
122
  if self.prefix:
119
123
  return f"{self.prefix}/{path}"
120
124
  return path
121
-
125
+
122
126
  def _path(self, key: str) -> str:
123
127
  """Convert S3 key to relative path."""
124
- if self.prefix and key.startswith(self.prefix + '/'):
125
- return key[len(self.prefix) + 1:]
128
+ if self.prefix and key.startswith(self.prefix + "/"):
129
+ return key[len(self.prefix) + 1 :]
126
130
  return key
127
-
131
+
128
132
  def read_file(self, path: str) -> bytes:
129
133
  """Read a file's contents from S3."""
130
134
  key = self._key(path)
131
135
  try:
132
136
  response = self.s3.get_object(Bucket=self.bucket, Key=key)
133
- return response['Body'].read()
137
+ return response["Body"].read()
134
138
  except ClientError as e:
135
- if e.response['Error']['Code'] == 'NoSuchKey':
139
+ if e.response["Error"]["Code"] == "NoSuchKey":
136
140
  raise StorageError(f"File not found: {path}")
137
141
  raise StorageError(f"Error reading {path}: {e}")
138
-
142
+
139
143
  def write_file(self, path: str, data: bytes) -> None:
140
144
  """Write data to S3."""
141
145
  key = self._key(path)
@@ -143,7 +147,7 @@ class S3StorageAdapter(StorageAdapter):
143
147
  self.s3.put_object(Bucket=self.bucket, Key=key, Body=data)
144
148
  except ClientError as e:
145
149
  raise StorageError(f"Error writing {path}: {e}")
146
-
150
+
147
151
  def exists(self, path: str) -> bool:
148
152
  """Check if a key exists in S3."""
149
153
  key = self._key(path)
@@ -152,13 +156,9 @@ class S3StorageAdapter(StorageAdapter):
152
156
  return True
153
157
  except ClientError:
154
158
  # Check if it's a "directory" (has keys with this prefix)
155
- response = self.s3.list_objects_v2(
156
- Bucket=self.bucket,
157
- Prefix=key + '/',
158
- MaxKeys=1
159
- )
160
- return response.get('KeyCount', 0) > 0
161
-
159
+ response = self.s3.list_objects_v2(Bucket=self.bucket, Prefix=key + "/", MaxKeys=1)
160
+ return response.get("KeyCount", 0) > 0
161
+
162
162
  def delete(self, path: str) -> bool:
163
163
  """Delete an object from S3."""
164
164
  key = self._key(path)
@@ -167,202 +167,197 @@ class S3StorageAdapter(StorageAdapter):
167
167
  return True
168
168
  except ClientError:
169
169
  return False
170
-
170
+
171
171
  def list_dir(self, path: str = "") -> List[FileInfo]:
172
172
  """List contents of a "directory" in S3."""
173
173
  prefix = self._key(path)
174
- if prefix and not prefix.endswith('/'):
175
- prefix += '/'
176
-
174
+ if prefix and not prefix.endswith("/"):
175
+ prefix += "/"
176
+
177
177
  result = []
178
178
  seen_dirs = set()
179
-
179
+
180
180
  try:
181
- paginator = self.s3.get_paginator('list_objects_v2')
182
-
183
- for page in paginator.paginate(Bucket=self.bucket, Prefix=prefix, Delimiter='/'):
181
+ paginator = self.s3.get_paginator("list_objects_v2")
182
+
183
+ for page in paginator.paginate(Bucket=self.bucket, Prefix=prefix, Delimiter="/"):
184
184
  # Add "directories" (common prefixes)
185
- for cp in page.get('CommonPrefixes', []):
186
- dir_prefix = cp['Prefix'].rstrip('/')
187
- dir_name = dir_prefix.split('/')[-1]
185
+ for cp in page.get("CommonPrefixes", []):
186
+ dir_prefix = cp["Prefix"].rstrip("/")
187
+ dir_name = dir_prefix.split("/")[-1]
188
188
  if dir_name not in seen_dirs:
189
189
  seen_dirs.add(dir_name)
190
- result.append(FileInfo(
191
- path=self._path(dir_prefix),
192
- size=0,
193
- is_dir=True
194
- ))
195
-
190
+ result.append(FileInfo(path=self._path(dir_prefix), size=0, is_dir=True))
191
+
196
192
  # Add files
197
- for obj in page.get('Contents', []):
198
- key = obj['Key']
193
+ for obj in page.get("Contents", []):
194
+ key = obj["Key"]
199
195
  if key == prefix:
200
196
  continue # Skip the prefix itself
201
-
202
- result.append(FileInfo(
203
- path=self._path(key),
204
- size=obj['Size'],
205
- modified=obj['LastModified'].isoformat(),
206
- is_dir=False
207
- ))
208
-
197
+
198
+ result.append(
199
+ FileInfo(
200
+ path=self._path(key),
201
+ size=obj["Size"],
202
+ modified=obj["LastModified"].isoformat(),
203
+ is_dir=False,
204
+ )
205
+ )
206
+
209
207
  except ClientError as e:
210
208
  raise StorageError(f"Error listing {path}: {e}")
211
-
209
+
212
210
  return result
213
-
211
+
214
212
  def makedirs(self, path: str) -> None:
215
213
  """
216
214
  Create a "directory" in S3.
217
-
215
+
218
216
  S3 doesn't have real directories, so this is a no-op.
219
217
  Directories are created implicitly when objects are written.
220
218
  """
221
219
  pass
222
-
220
+
223
221
  def is_dir(self, path: str) -> bool:
224
222
  """Check if path is a "directory" in S3."""
225
223
  key = self._key(path)
226
224
  if not key:
227
225
  return True # Root is always a directory
228
-
226
+
229
227
  # Check if there are any keys with this prefix
230
- response = self.s3.list_objects_v2(
231
- Bucket=self.bucket,
232
- Prefix=key + '/',
233
- MaxKeys=1
234
- )
235
- return response.get('KeyCount', 0) > 0
236
-
228
+ response = self.s3.list_objects_v2(Bucket=self.bucket, Prefix=key + "/", MaxKeys=1)
229
+ return response.get("KeyCount", 0) > 0
230
+
237
231
  def acquire_lock(self, lock_name: str, timeout: int = 30) -> bool:
238
232
  """
239
233
  Acquire a distributed lock.
240
-
234
+
241
235
  Uses DynamoDB if configured, otherwise uses S3 conditional writes.
242
236
  """
243
237
  if self.dynamodb and self.lock_table:
244
238
  return self._acquire_dynamodb_lock(lock_name, timeout)
245
239
  else:
246
240
  return self._acquire_s3_lock(lock_name, timeout)
247
-
241
+
248
242
  def _acquire_dynamodb_lock(self, lock_name: str, timeout: int) -> bool:
249
243
  """Acquire lock using DynamoDB."""
250
244
  start_time = time.time()
251
245
  lock_key = f"{self.prefix}/{lock_name}" if self.prefix else lock_name
252
-
246
+
253
247
  while True:
254
248
  try:
255
249
  # Try to create lock item with conditional write
256
250
  self.dynamodb.put_item(
257
251
  TableName=self.lock_table,
258
252
  Item={
259
- 'LockKey': {'S': lock_key},
260
- 'LockId': {'S': self._lock_id},
261
- 'Timestamp': {'N': str(int(time.time()))},
262
- 'TTL': {'N': str(int(time.time()) + 300)} # 5 min TTL
253
+ "LockKey": {"S": lock_key},
254
+ "LockId": {"S": self._lock_id},
255
+ "Timestamp": {"N": str(int(time.time()))},
256
+ "TTL": {"N": str(int(time.time()) + 300)}, # 5 min TTL
263
257
  },
264
- ConditionExpression='attribute_not_exists(LockKey)'
258
+ ConditionExpression="attribute_not_exists(LockKey)",
265
259
  )
266
260
  return True
267
-
261
+
268
262
  except ClientError as e:
269
- if e.response['Error']['Code'] == 'ConditionalCheckFailedException':
263
+ if e.response["Error"]["Code"] == "ConditionalCheckFailedException":
270
264
  # Lock exists, check if it's stale
271
265
  if time.time() - start_time >= timeout:
272
266
  raise LockError(f"Could not acquire lock '{lock_name}' within {timeout}s")
273
267
  time.sleep(0.5)
274
268
  else:
275
269
  raise StorageError(f"Error acquiring lock: {e}")
276
-
270
+
277
271
  def _acquire_s3_lock(self, lock_name: str, timeout: int) -> bool:
278
272
  """Acquire lock using S3 conditional writes."""
279
273
  start_time = time.time()
280
274
  lock_key = self._key(f".locks/{lock_name}.lock")
281
-
275
+
282
276
  while True:
283
277
  try:
284
278
  # Try to create lock file only if it doesn't exist
285
279
  lock_data = f"{self._lock_id}:{int(time.time())}".encode()
286
-
280
+
287
281
  # Check if lock exists and is not stale (> 5 minutes old)
288
282
  try:
289
283
  response = self.s3.get_object(Bucket=self.bucket, Key=lock_key)
290
- existing = response['Body'].read().decode()
291
- _, ts = existing.split(':')
284
+ existing = response["Body"].read().decode()
285
+ _, ts = existing.split(":")
292
286
  if int(time.time()) - int(ts) < 300: # Lock is fresh
293
287
  if time.time() - start_time >= timeout:
294
- raise LockError(f"Could not acquire lock '{lock_name}' within {timeout}s")
288
+ raise LockError(
289
+ f"Could not acquire lock '{lock_name}' within {timeout}s"
290
+ )
295
291
  time.sleep(0.5)
296
292
  continue
297
293
  except ClientError:
298
294
  pass # Lock doesn't exist
299
-
295
+
300
296
  # Create or overwrite stale lock
301
297
  self.s3.put_object(Bucket=self.bucket, Key=lock_key, Body=lock_data)
302
-
298
+
303
299
  # Verify we own the lock
304
300
  time.sleep(0.1)
305
301
  response = self.s3.get_object(Bucket=self.bucket, Key=lock_key)
306
- if response['Body'].read().decode().startswith(self._lock_id):
302
+ if response["Body"].read().decode().startswith(self._lock_id):
307
303
  return True
308
-
304
+
309
305
  # Someone else got it
310
306
  if time.time() - start_time >= timeout:
311
307
  raise LockError(f"Could not acquire lock '{lock_name}' within {timeout}s")
312
308
  time.sleep(0.5)
313
-
309
+
314
310
  except ClientError as e:
315
311
  raise StorageError(f"Error acquiring lock: {e}")
316
-
312
+
317
313
  def release_lock(self, lock_name: str) -> None:
318
314
  """Release a distributed lock."""
319
315
  if self.dynamodb and self.lock_table:
320
316
  self._release_dynamodb_lock(lock_name)
321
317
  else:
322
318
  self._release_s3_lock(lock_name)
323
-
319
+
324
320
  def _release_dynamodb_lock(self, lock_name: str) -> None:
325
321
  """Release DynamoDB lock."""
326
322
  lock_key = f"{self.prefix}/{lock_name}" if self.prefix else lock_name
327
323
  try:
328
324
  self.dynamodb.delete_item(
329
325
  TableName=self.lock_table,
330
- Key={'LockKey': {'S': lock_key}},
331
- ConditionExpression='LockId = :id',
332
- ExpressionAttributeValues={':id': {'S': self._lock_id}}
326
+ Key={"LockKey": {"S": lock_key}},
327
+ ConditionExpression="LockId = :id",
328
+ ExpressionAttributeValues={":id": {"S": self._lock_id}},
333
329
  )
334
330
  except ClientError:
335
331
  pass # Lock may have expired or been released
336
-
332
+
337
333
  def _release_s3_lock(self, lock_name: str) -> None:
338
334
  """Release S3 lock."""
339
335
  lock_key = self._key(f".locks/{lock_name}.lock")
340
336
  try:
341
337
  # Only delete if we own the lock
342
338
  response = self.s3.get_object(Bucket=self.bucket, Key=lock_key)
343
- if response['Body'].read().decode().startswith(self._lock_id):
339
+ if response["Body"].read().decode().startswith(self._lock_id):
344
340
  self.s3.delete_object(Bucket=self.bucket, Key=lock_key)
345
341
  except ClientError:
346
342
  pass
347
-
343
+
348
344
  def is_locked(self, lock_name: str) -> bool:
349
345
  """Check if a lock is currently held."""
350
346
  if self.dynamodb and self.lock_table:
351
347
  lock_key = f"{self.prefix}/{lock_name}" if self.prefix else lock_name
352
348
  try:
353
349
  response = self.dynamodb.get_item(
354
- TableName=self.lock_table,
355
- Key={'LockKey': {'S': lock_key}}
350
+ TableName=self.lock_table, Key={"LockKey": {"S": lock_key}}
356
351
  )
357
- return 'Item' in response
352
+ return "Item" in response
358
353
  except ClientError:
359
354
  return False
360
355
  else:
361
356
  lock_key = self._key(f".locks/{lock_name}.lock")
362
357
  try:
363
358
  response = self.s3.get_object(Bucket=self.bucket, Key=lock_key)
364
- existing = response['Body'].read().decode()
365
- _, ts = existing.split(':')
359
+ existing = response["Body"].read().decode()
360
+ _, ts = existing.split(":")
366
361
  # Lock is valid if less than 5 minutes old
367
362
  return int(time.time()) - int(ts) < 300
368
363
  except ClientError:
@@ -0,0 +1,112 @@
1
+ """
2
+ Temporal index for agmem - maps timestamps to commits for time-travel queries.
3
+
4
+ Builds index from reflog and commit objects; binary search for nearest commit at or before T.
5
+ """
6
+
7
+ import bisect
8
+ from datetime import datetime
9
+ from pathlib import Path
10
+ from typing import Optional, List, Tuple
11
+
12
+ from .objects import Commit, ObjectStore
13
+
14
+
15
+ def _parse_iso_timestamp(s: str) -> Optional[datetime]:
16
+ """Parse ISO 8601 timestamp string to datetime."""
17
+ s = s.strip()
18
+ if not s:
19
+ return None
20
+ try:
21
+ if s.endswith("Z"):
22
+ s = s[:-1] + "+00:00"
23
+ return datetime.fromisoformat(s)
24
+ except ValueError:
25
+ return None
26
+
27
+
28
+ class TemporalIndex:
29
+ """Maps timestamps to commit hashes for temporal querying."""
30
+
31
+ def __init__(self, mem_dir: Path, object_store: ObjectStore):
32
+ self.mem_dir = Path(mem_dir)
33
+ self.object_store = object_store
34
+ self.refs = None # Injected by caller
35
+
36
+ def _build_commit_timeline(self) -> List[Tuple[datetime, str]]:
37
+ """
38
+ Build sorted list of (timestamp, commit_hash) from reflog and all commits.
39
+
40
+ Walks HEAD reflog and follow parent chains to collect all commits with timestamps.
41
+ """
42
+ from .refs import RefsManager
43
+
44
+ refs = RefsManager(self.mem_dir)
45
+ seen = set()
46
+ timeline: List[Tuple[datetime, str]] = []
47
+
48
+ # Collect from reflog first (recent history)
49
+ reflog = refs.get_reflog("HEAD", max_count=10000)
50
+ for entry in reflog:
51
+ h = entry.get("hash")
52
+ ts_str = entry.get("timestamp", "")
53
+ if h and ts_str and h not in seen:
54
+ dt = _parse_iso_timestamp(ts_str)
55
+ if dt:
56
+ seen.add(h)
57
+ timeline.append((dt, h))
58
+
59
+ # Also walk from HEAD and all branches to get full history
60
+ def walk_commits(commit_hash: str) -> None:
61
+ current = commit_hash
62
+ while current and current not in seen:
63
+ commit = Commit.load(self.object_store, current)
64
+ if not commit:
65
+ break
66
+ seen.add(current)
67
+ dt = _parse_iso_timestamp(commit.timestamp)
68
+ if dt:
69
+ timeline.append((dt, current))
70
+ if not commit.parents:
71
+ break
72
+ current = commit.parents[0]
73
+
74
+ head = refs.get_head()
75
+ if head["type"] == "branch":
76
+ h = refs.get_branch_commit(head["value"])
77
+ else:
78
+ h = head.get("value")
79
+ if h:
80
+ walk_commits(h)
81
+
82
+ for branch in refs.list_branches():
83
+ bh = refs.get_branch_commit(branch)
84
+ if bh:
85
+ walk_commits(bh)
86
+
87
+ timeline.sort(key=lambda x: x[0])
88
+ return timeline
89
+
90
+ def resolve_at(self, timestamp_str: str) -> Optional[str]:
91
+ """
92
+ Resolve timestamp to nearest commit at or before that time.
93
+
94
+ Args:
95
+ timestamp_str: ISO 8601 date or datetime (e.g., "2025-12-01", "2025-12-01T14:00:00")
96
+
97
+ Returns:
98
+ Commit hash or None if no commit found
99
+ """
100
+ dt = _parse_iso_timestamp(timestamp_str)
101
+ if not dt:
102
+ return None
103
+
104
+ timeline = self._build_commit_timeline()
105
+ if not timeline:
106
+ return None
107
+
108
+ timestamps = [t[0] for t in timeline]
109
+ idx = bisect.bisect_right(timestamps, dt)
110
+ if idx == 0:
111
+ return None # All commits are after the requested time
112
+ return timeline[idx - 1][1]