agmem 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. agmem-0.1.1.dist-info/METADATA +656 -0
  2. agmem-0.1.1.dist-info/RECORD +67 -0
  3. agmem-0.1.1.dist-info/WHEEL +5 -0
  4. agmem-0.1.1.dist-info/entry_points.txt +2 -0
  5. agmem-0.1.1.dist-info/licenses/LICENSE +21 -0
  6. agmem-0.1.1.dist-info/top_level.txt +1 -0
  7. memvcs/__init__.py +9 -0
  8. memvcs/cli.py +178 -0
  9. memvcs/commands/__init__.py +23 -0
  10. memvcs/commands/add.py +258 -0
  11. memvcs/commands/base.py +23 -0
  12. memvcs/commands/blame.py +169 -0
  13. memvcs/commands/branch.py +110 -0
  14. memvcs/commands/checkout.py +101 -0
  15. memvcs/commands/clean.py +76 -0
  16. memvcs/commands/clone.py +91 -0
  17. memvcs/commands/commit.py +174 -0
  18. memvcs/commands/daemon.py +267 -0
  19. memvcs/commands/diff.py +157 -0
  20. memvcs/commands/fsck.py +203 -0
  21. memvcs/commands/garden.py +107 -0
  22. memvcs/commands/graph.py +151 -0
  23. memvcs/commands/init.py +61 -0
  24. memvcs/commands/log.py +103 -0
  25. memvcs/commands/mcp.py +59 -0
  26. memvcs/commands/merge.py +88 -0
  27. memvcs/commands/pull.py +65 -0
  28. memvcs/commands/push.py +143 -0
  29. memvcs/commands/reflog.py +52 -0
  30. memvcs/commands/remote.py +51 -0
  31. memvcs/commands/reset.py +98 -0
  32. memvcs/commands/search.py +163 -0
  33. memvcs/commands/serve.py +54 -0
  34. memvcs/commands/show.py +125 -0
  35. memvcs/commands/stash.py +97 -0
  36. memvcs/commands/status.py +112 -0
  37. memvcs/commands/tag.py +117 -0
  38. memvcs/commands/test.py +132 -0
  39. memvcs/commands/tree.py +156 -0
  40. memvcs/core/__init__.py +21 -0
  41. memvcs/core/config_loader.py +245 -0
  42. memvcs/core/constants.py +12 -0
  43. memvcs/core/diff.py +380 -0
  44. memvcs/core/gardener.py +466 -0
  45. memvcs/core/hooks.py +151 -0
  46. memvcs/core/knowledge_graph.py +381 -0
  47. memvcs/core/merge.py +474 -0
  48. memvcs/core/objects.py +323 -0
  49. memvcs/core/pii_scanner.py +343 -0
  50. memvcs/core/refs.py +447 -0
  51. memvcs/core/remote.py +278 -0
  52. memvcs/core/repository.py +522 -0
  53. memvcs/core/schema.py +414 -0
  54. memvcs/core/staging.py +227 -0
  55. memvcs/core/storage/__init__.py +72 -0
  56. memvcs/core/storage/base.py +359 -0
  57. memvcs/core/storage/gcs.py +308 -0
  58. memvcs/core/storage/local.py +182 -0
  59. memvcs/core/storage/s3.py +369 -0
  60. memvcs/core/test_runner.py +371 -0
  61. memvcs/core/vector_store.py +313 -0
  62. memvcs/integrations/__init__.py +5 -0
  63. memvcs/integrations/mcp_server.py +267 -0
  64. memvcs/integrations/web_ui/__init__.py +1 -0
  65. memvcs/integrations/web_ui/server.py +352 -0
  66. memvcs/utils/__init__.py +9 -0
  67. memvcs/utils/helpers.py +178 -0
@@ -0,0 +1,369 @@
1
+ """
2
+ S3/MinIO storage adapter for agmem.
3
+
4
+ Supports Amazon S3, MinIO, and any S3-compatible storage.
5
+ Credentials are resolved from config via env var names only (never stored in config).
6
+ """
7
+
8
+ import time
9
+ import uuid
10
+ from typing import Any, Dict, List, Optional
11
+ from datetime import datetime
12
+
13
+ try:
14
+ import boto3
15
+ from botocore.exceptions import ClientError
16
+ BOTO3_AVAILABLE = True
17
+ except ImportError:
18
+ BOTO3_AVAILABLE = False
19
+
20
+ from .base import StorageAdapter, StorageError, LockError, FileInfo
21
+
22
+
23
+ def _apply_s3_config(kwargs: Dict[str, Any], config: Optional[Dict[str, Any]]) -> None:
24
+ """Merge S3 options from agmem config into kwargs; credentials from env only."""
25
+ if not config:
26
+ return
27
+ try:
28
+ from memvcs.core.config_loader import get_s3_options_from_config
29
+ opts = get_s3_options_from_config(config)
30
+ for key in ("region", "endpoint_url", "lock_table"):
31
+ if opts.get(key) is not None:
32
+ kwargs[key] = opts[key]
33
+ if opts.get("access_key") is not None and opts.get("secret_key") is not None:
34
+ kwargs["access_key"] = opts["access_key"]
35
+ kwargs["secret_key"] = opts["secret_key"]
36
+ except ImportError:
37
+ pass
38
+
39
+
40
+ class S3StorageAdapter(StorageAdapter):
41
+ """Storage adapter for S3 and S3-compatible storage (MinIO, etc.)."""
42
+
43
+ def __init__(
44
+ self,
45
+ bucket: str,
46
+ prefix: str = "",
47
+ region: Optional[str] = None,
48
+ endpoint_url: Optional[str] = None,
49
+ access_key: Optional[str] = None,
50
+ secret_key: Optional[str] = None,
51
+ lock_table: Optional[str] = None
52
+ ):
53
+ """
54
+ Initialize S3 storage adapter.
55
+
56
+ Args:
57
+ bucket: S3 bucket name
58
+ prefix: Key prefix for all operations
59
+ region: AWS region
60
+ endpoint_url: Custom endpoint URL (for MinIO)
61
+ access_key: AWS access key (optional, uses default credentials if not provided)
62
+ secret_key: AWS secret key
63
+ lock_table: DynamoDB table for distributed locks (optional)
64
+ """
65
+ if not BOTO3_AVAILABLE:
66
+ raise ImportError("boto3 is required for S3 storage. Install with: pip install agmem[cloud]")
67
+
68
+ self.bucket = bucket
69
+ self.prefix = prefix.strip('/')
70
+ self.lock_table = lock_table
71
+ self._lock_id = str(uuid.uuid4()) # Unique ID for this instance
72
+
73
+ # Build S3 client
74
+ client_kwargs = {}
75
+ if region:
76
+ client_kwargs['region_name'] = region
77
+ if endpoint_url:
78
+ client_kwargs['endpoint_url'] = endpoint_url
79
+ if access_key and secret_key:
80
+ client_kwargs['aws_access_key_id'] = access_key
81
+ client_kwargs['aws_secret_access_key'] = secret_key
82
+
83
+ self.s3 = boto3.client('s3', **client_kwargs)
84
+
85
+ # DynamoDB for locks (optional)
86
+ if lock_table:
87
+ self.dynamodb = boto3.client('dynamodb', **client_kwargs)
88
+ else:
89
+ self.dynamodb = None
90
+
91
+ @classmethod
92
+ def from_url(cls, url: str, config: Optional[Dict[str, Any]] = None) -> 'S3StorageAdapter':
93
+ """
94
+ Create adapter from S3 URL. Optional config supplies region, endpoint,
95
+ and env var names for credentials; credentials are resolved from env only.
96
+
97
+ Args:
98
+ url: S3 URL (s3://bucket/prefix)
99
+ config: Optional agmem config dict (cloud.s3); credentials from env vars
100
+
101
+ Returns:
102
+ S3StorageAdapter instance
103
+ """
104
+ if not url.startswith('s3://'):
105
+ raise ValueError(f"Invalid S3 URL: {url}")
106
+ path = url[5:] # Remove 's3://'
107
+ parts = path.split('/', 1)
108
+ bucket = parts[0]
109
+ prefix = parts[1] if len(parts) > 1 else ""
110
+ kwargs: Dict[str, Any] = {"bucket": bucket, "prefix": prefix}
111
+ _apply_s3_config(kwargs, config)
112
+ return cls(**kwargs)
113
+
114
+ def _key(self, path: str) -> str:
115
+ """Convert relative path to S3 key."""
116
+ if not path:
117
+ return self.prefix
118
+ if self.prefix:
119
+ return f"{self.prefix}/{path}"
120
+ return path
121
+
122
+ def _path(self, key: str) -> str:
123
+ """Convert S3 key to relative path."""
124
+ if self.prefix and key.startswith(self.prefix + '/'):
125
+ return key[len(self.prefix) + 1:]
126
+ return key
127
+
128
+ def read_file(self, path: str) -> bytes:
129
+ """Read a file's contents from S3."""
130
+ key = self._key(path)
131
+ try:
132
+ response = self.s3.get_object(Bucket=self.bucket, Key=key)
133
+ return response['Body'].read()
134
+ except ClientError as e:
135
+ if e.response['Error']['Code'] == 'NoSuchKey':
136
+ raise StorageError(f"File not found: {path}")
137
+ raise StorageError(f"Error reading {path}: {e}")
138
+
139
+ def write_file(self, path: str, data: bytes) -> None:
140
+ """Write data to S3."""
141
+ key = self._key(path)
142
+ try:
143
+ self.s3.put_object(Bucket=self.bucket, Key=key, Body=data)
144
+ except ClientError as e:
145
+ raise StorageError(f"Error writing {path}: {e}")
146
+
147
+ def exists(self, path: str) -> bool:
148
+ """Check if a key exists in S3."""
149
+ key = self._key(path)
150
+ try:
151
+ self.s3.head_object(Bucket=self.bucket, Key=key)
152
+ return True
153
+ except ClientError:
154
+ # Check if it's a "directory" (has keys with this prefix)
155
+ response = self.s3.list_objects_v2(
156
+ Bucket=self.bucket,
157
+ Prefix=key + '/',
158
+ MaxKeys=1
159
+ )
160
+ return response.get('KeyCount', 0) > 0
161
+
162
+ def delete(self, path: str) -> bool:
163
+ """Delete an object from S3."""
164
+ key = self._key(path)
165
+ try:
166
+ self.s3.delete_object(Bucket=self.bucket, Key=key)
167
+ return True
168
+ except ClientError:
169
+ return False
170
+
171
+ def list_dir(self, path: str = "") -> List[FileInfo]:
172
+ """List contents of a "directory" in S3."""
173
+ prefix = self._key(path)
174
+ if prefix and not prefix.endswith('/'):
175
+ prefix += '/'
176
+
177
+ result = []
178
+ seen_dirs = set()
179
+
180
+ try:
181
+ paginator = self.s3.get_paginator('list_objects_v2')
182
+
183
+ for page in paginator.paginate(Bucket=self.bucket, Prefix=prefix, Delimiter='/'):
184
+ # Add "directories" (common prefixes)
185
+ for cp in page.get('CommonPrefixes', []):
186
+ dir_prefix = cp['Prefix'].rstrip('/')
187
+ dir_name = dir_prefix.split('/')[-1]
188
+ if dir_name not in seen_dirs:
189
+ seen_dirs.add(dir_name)
190
+ result.append(FileInfo(
191
+ path=self._path(dir_prefix),
192
+ size=0,
193
+ is_dir=True
194
+ ))
195
+
196
+ # Add files
197
+ for obj in page.get('Contents', []):
198
+ key = obj['Key']
199
+ if key == prefix:
200
+ continue # Skip the prefix itself
201
+
202
+ result.append(FileInfo(
203
+ path=self._path(key),
204
+ size=obj['Size'],
205
+ modified=obj['LastModified'].isoformat(),
206
+ is_dir=False
207
+ ))
208
+
209
+ except ClientError as e:
210
+ raise StorageError(f"Error listing {path}: {e}")
211
+
212
+ return result
213
+
214
+ def makedirs(self, path: str) -> None:
215
+ """
216
+ Create a "directory" in S3.
217
+
218
+ S3 doesn't have real directories, so this is a no-op.
219
+ Directories are created implicitly when objects are written.
220
+ """
221
+ pass
222
+
223
+ def is_dir(self, path: str) -> bool:
224
+ """Check if path is a "directory" in S3."""
225
+ key = self._key(path)
226
+ if not key:
227
+ return True # Root is always a directory
228
+
229
+ # Check if there are any keys with this prefix
230
+ response = self.s3.list_objects_v2(
231
+ Bucket=self.bucket,
232
+ Prefix=key + '/',
233
+ MaxKeys=1
234
+ )
235
+ return response.get('KeyCount', 0) > 0
236
+
237
+ def acquire_lock(self, lock_name: str, timeout: int = 30) -> bool:
238
+ """
239
+ Acquire a distributed lock.
240
+
241
+ Uses DynamoDB if configured, otherwise uses S3 conditional writes.
242
+ """
243
+ if self.dynamodb and self.lock_table:
244
+ return self._acquire_dynamodb_lock(lock_name, timeout)
245
+ else:
246
+ return self._acquire_s3_lock(lock_name, timeout)
247
+
248
+ def _acquire_dynamodb_lock(self, lock_name: str, timeout: int) -> bool:
249
+ """Acquire lock using DynamoDB."""
250
+ start_time = time.time()
251
+ lock_key = f"{self.prefix}/{lock_name}" if self.prefix else lock_name
252
+
253
+ while True:
254
+ try:
255
+ # Try to create lock item with conditional write
256
+ self.dynamodb.put_item(
257
+ TableName=self.lock_table,
258
+ Item={
259
+ 'LockKey': {'S': lock_key},
260
+ 'LockId': {'S': self._lock_id},
261
+ 'Timestamp': {'N': str(int(time.time()))},
262
+ 'TTL': {'N': str(int(time.time()) + 300)} # 5 min TTL
263
+ },
264
+ ConditionExpression='attribute_not_exists(LockKey)'
265
+ )
266
+ return True
267
+
268
+ except ClientError as e:
269
+ if e.response['Error']['Code'] == 'ConditionalCheckFailedException':
270
+ # Lock exists, check if it's stale
271
+ if time.time() - start_time >= timeout:
272
+ raise LockError(f"Could not acquire lock '{lock_name}' within {timeout}s")
273
+ time.sleep(0.5)
274
+ else:
275
+ raise StorageError(f"Error acquiring lock: {e}")
276
+
277
+ def _acquire_s3_lock(self, lock_name: str, timeout: int) -> bool:
278
+ """Acquire lock using S3 conditional writes."""
279
+ start_time = time.time()
280
+ lock_key = self._key(f".locks/{lock_name}.lock")
281
+
282
+ while True:
283
+ try:
284
+ # Try to create lock file only if it doesn't exist
285
+ lock_data = f"{self._lock_id}:{int(time.time())}".encode()
286
+
287
+ # Check if lock exists and is not stale (> 5 minutes old)
288
+ try:
289
+ response = self.s3.get_object(Bucket=self.bucket, Key=lock_key)
290
+ existing = response['Body'].read().decode()
291
+ _, ts = existing.split(':')
292
+ if int(time.time()) - int(ts) < 300: # Lock is fresh
293
+ if time.time() - start_time >= timeout:
294
+ raise LockError(f"Could not acquire lock '{lock_name}' within {timeout}s")
295
+ time.sleep(0.5)
296
+ continue
297
+ except ClientError:
298
+ pass # Lock doesn't exist
299
+
300
+ # Create or overwrite stale lock
301
+ self.s3.put_object(Bucket=self.bucket, Key=lock_key, Body=lock_data)
302
+
303
+ # Verify we own the lock
304
+ time.sleep(0.1)
305
+ response = self.s3.get_object(Bucket=self.bucket, Key=lock_key)
306
+ if response['Body'].read().decode().startswith(self._lock_id):
307
+ return True
308
+
309
+ # Someone else got it
310
+ if time.time() - start_time >= timeout:
311
+ raise LockError(f"Could not acquire lock '{lock_name}' within {timeout}s")
312
+ time.sleep(0.5)
313
+
314
+ except ClientError as e:
315
+ raise StorageError(f"Error acquiring lock: {e}")
316
+
317
+ def release_lock(self, lock_name: str) -> None:
318
+ """Release a distributed lock."""
319
+ if self.dynamodb and self.lock_table:
320
+ self._release_dynamodb_lock(lock_name)
321
+ else:
322
+ self._release_s3_lock(lock_name)
323
+
324
+ def _release_dynamodb_lock(self, lock_name: str) -> None:
325
+ """Release DynamoDB lock."""
326
+ lock_key = f"{self.prefix}/{lock_name}" if self.prefix else lock_name
327
+ try:
328
+ self.dynamodb.delete_item(
329
+ TableName=self.lock_table,
330
+ Key={'LockKey': {'S': lock_key}},
331
+ ConditionExpression='LockId = :id',
332
+ ExpressionAttributeValues={':id': {'S': self._lock_id}}
333
+ )
334
+ except ClientError:
335
+ pass # Lock may have expired or been released
336
+
337
+ def _release_s3_lock(self, lock_name: str) -> None:
338
+ """Release S3 lock."""
339
+ lock_key = self._key(f".locks/{lock_name}.lock")
340
+ try:
341
+ # Only delete if we own the lock
342
+ response = self.s3.get_object(Bucket=self.bucket, Key=lock_key)
343
+ if response['Body'].read().decode().startswith(self._lock_id):
344
+ self.s3.delete_object(Bucket=self.bucket, Key=lock_key)
345
+ except ClientError:
346
+ pass
347
+
348
+ def is_locked(self, lock_name: str) -> bool:
349
+ """Check if a lock is currently held."""
350
+ if self.dynamodb and self.lock_table:
351
+ lock_key = f"{self.prefix}/{lock_name}" if self.prefix else lock_name
352
+ try:
353
+ response = self.dynamodb.get_item(
354
+ TableName=self.lock_table,
355
+ Key={'LockKey': {'S': lock_key}}
356
+ )
357
+ return 'Item' in response
358
+ except ClientError:
359
+ return False
360
+ else:
361
+ lock_key = self._key(f".locks/{lock_name}.lock")
362
+ try:
363
+ response = self.s3.get_object(Bucket=self.bucket, Key=lock_key)
364
+ existing = response['Body'].read().decode()
365
+ _, ts = existing.split(':')
366
+ # Lock is valid if less than 5 minutes old
367
+ return int(time.time()) - int(ts) < 300
368
+ except ClientError:
369
+ return False