agmem 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agmem-0.1.1.dist-info → agmem-0.1.3.dist-info}/METADATA +157 -16
- agmem-0.1.3.dist-info/RECORD +105 -0
- memvcs/__init__.py +1 -1
- memvcs/cli.py +45 -31
- memvcs/commands/__init__.py +9 -9
- memvcs/commands/add.py +83 -76
- memvcs/commands/audit.py +59 -0
- memvcs/commands/blame.py +46 -53
- memvcs/commands/branch.py +13 -33
- memvcs/commands/checkout.py +27 -32
- memvcs/commands/clean.py +18 -23
- memvcs/commands/clone.py +11 -1
- memvcs/commands/commit.py +40 -39
- memvcs/commands/daemon.py +109 -76
- memvcs/commands/decay.py +77 -0
- memvcs/commands/diff.py +56 -57
- memvcs/commands/distill.py +90 -0
- memvcs/commands/federated.py +53 -0
- memvcs/commands/fsck.py +86 -61
- memvcs/commands/garden.py +40 -35
- memvcs/commands/gc.py +51 -0
- memvcs/commands/graph.py +41 -48
- memvcs/commands/init.py +16 -24
- memvcs/commands/log.py +25 -40
- memvcs/commands/merge.py +69 -27
- memvcs/commands/pack.py +129 -0
- memvcs/commands/prove.py +66 -0
- memvcs/commands/pull.py +31 -1
- memvcs/commands/push.py +4 -2
- memvcs/commands/recall.py +145 -0
- memvcs/commands/reflog.py +13 -22
- memvcs/commands/remote.py +1 -0
- memvcs/commands/repair.py +66 -0
- memvcs/commands/reset.py +23 -33
- memvcs/commands/resolve.py +130 -0
- memvcs/commands/resurrect.py +82 -0
- memvcs/commands/search.py +3 -4
- memvcs/commands/serve.py +2 -1
- memvcs/commands/show.py +66 -36
- memvcs/commands/stash.py +34 -34
- memvcs/commands/status.py +27 -35
- memvcs/commands/tag.py +23 -47
- memvcs/commands/test.py +30 -44
- memvcs/commands/timeline.py +111 -0
- memvcs/commands/tree.py +26 -27
- memvcs/commands/verify.py +110 -0
- memvcs/commands/when.py +115 -0
- memvcs/core/access_index.py +167 -0
- memvcs/core/audit.py +124 -0
- memvcs/core/config_loader.py +3 -1
- memvcs/core/consistency.py +214 -0
- memvcs/core/crypto_verify.py +280 -0
- memvcs/core/decay.py +185 -0
- memvcs/core/diff.py +158 -143
- memvcs/core/distiller.py +277 -0
- memvcs/core/encryption.py +169 -0
- memvcs/core/federated.py +86 -0
- memvcs/core/gardener.py +176 -145
- memvcs/core/hooks.py +48 -14
- memvcs/core/ipfs_remote.py +39 -0
- memvcs/core/knowledge_graph.py +135 -138
- memvcs/core/llm/__init__.py +10 -0
- memvcs/core/llm/anthropic_provider.py +50 -0
- memvcs/core/llm/base.py +27 -0
- memvcs/core/llm/factory.py +30 -0
- memvcs/core/llm/openai_provider.py +36 -0
- memvcs/core/merge.py +260 -170
- memvcs/core/objects.py +110 -101
- memvcs/core/pack.py +92 -0
- memvcs/core/pii_scanner.py +147 -146
- memvcs/core/privacy_budget.py +63 -0
- memvcs/core/refs.py +132 -115
- memvcs/core/remote.py +38 -0
- memvcs/core/repository.py +254 -164
- memvcs/core/schema.py +155 -113
- memvcs/core/staging.py +60 -65
- memvcs/core/storage/__init__.py +20 -18
- memvcs/core/storage/base.py +74 -70
- memvcs/core/storage/gcs.py +70 -68
- memvcs/core/storage/local.py +42 -40
- memvcs/core/storage/s3.py +105 -110
- memvcs/core/temporal_index.py +121 -0
- memvcs/core/test_runner.py +101 -93
- memvcs/core/trust.py +103 -0
- memvcs/core/vector_store.py +56 -36
- memvcs/core/zk_proofs.py +26 -0
- memvcs/integrations/mcp_server.py +1 -3
- memvcs/integrations/web_ui/server.py +25 -26
- memvcs/retrieval/__init__.py +22 -0
- memvcs/retrieval/base.py +54 -0
- memvcs/retrieval/pack.py +128 -0
- memvcs/retrieval/recaller.py +105 -0
- memvcs/retrieval/strategies.py +314 -0
- memvcs/utils/__init__.py +3 -3
- memvcs/utils/helpers.py +52 -52
- agmem-0.1.1.dist-info/RECORD +0 -67
- {agmem-0.1.1.dist-info → agmem-0.1.3.dist-info}/WHEEL +0 -0
- {agmem-0.1.1.dist-info → agmem-0.1.3.dist-info}/entry_points.txt +0 -0
- {agmem-0.1.1.dist-info → agmem-0.1.3.dist-info}/licenses/LICENSE +0 -0
- {agmem-0.1.1.dist-info → agmem-0.1.3.dist-info}/top_level.txt +0 -0
memvcs/core/merge.py
CHANGED
|
@@ -18,24 +18,29 @@ from .schema import FrontmatterParser, FrontmatterData, compare_timestamps
|
|
|
18
18
|
|
|
19
19
|
class MergeStrategy(Enum):
|
|
20
20
|
"""Merge strategies for different memory types."""
|
|
21
|
-
|
|
22
|
-
|
|
21
|
+
|
|
22
|
+
EPISODIC = "episodic" # Append chronologically
|
|
23
|
+
SEMANTIC = "semantic" # Smart consolidation with conflict detection
|
|
23
24
|
PROCEDURAL = "procedural" # Prefer newer, validate compatibility
|
|
24
25
|
|
|
25
26
|
|
|
26
27
|
@dataclass
|
|
27
28
|
class Conflict:
|
|
28
29
|
"""Represents a merge conflict."""
|
|
30
|
+
|
|
29
31
|
path: str
|
|
30
32
|
base_content: Optional[str]
|
|
31
33
|
ours_content: Optional[str]
|
|
32
34
|
theirs_content: Optional[str]
|
|
33
35
|
message: str
|
|
36
|
+
memory_type: Optional[str] = None # episodic, semantic, procedural
|
|
37
|
+
payload: Optional[Dict[str, Any]] = None # type-specific (e.g. fact strings, step diffs)
|
|
34
38
|
|
|
35
39
|
|
|
36
40
|
@dataclass
|
|
37
41
|
class MergeResult:
|
|
38
42
|
"""Result of a merge operation."""
|
|
43
|
+
|
|
39
44
|
success: bool
|
|
40
45
|
commit_hash: Optional[str]
|
|
41
46
|
conflicts: List[Conflict]
|
|
@@ -44,215 +49,283 @@ class MergeResult:
|
|
|
44
49
|
|
|
45
50
|
class MergeEngine:
|
|
46
51
|
"""Engine for merging memory branches."""
|
|
47
|
-
|
|
52
|
+
|
|
48
53
|
def __init__(self, repo: Repository):
|
|
49
54
|
self.repo = repo
|
|
50
55
|
self.object_store = repo.object_store
|
|
51
|
-
|
|
56
|
+
|
|
52
57
|
def detect_memory_type(self, filepath: str) -> MergeStrategy:
|
|
53
58
|
"""
|
|
54
59
|
Detect the memory type from file path.
|
|
55
|
-
|
|
60
|
+
|
|
56
61
|
Args:
|
|
57
62
|
filepath: Path to the file
|
|
58
|
-
|
|
63
|
+
|
|
59
64
|
Returns:
|
|
60
65
|
MergeStrategy for this file type
|
|
61
66
|
"""
|
|
62
67
|
path_lower = filepath.lower()
|
|
63
|
-
|
|
64
|
-
if
|
|
68
|
+
|
|
69
|
+
if "episodic" in path_lower:
|
|
65
70
|
return MergeStrategy.EPISODIC
|
|
66
|
-
elif
|
|
71
|
+
elif "semantic" in path_lower:
|
|
67
72
|
return MergeStrategy.SEMANTIC
|
|
68
|
-
elif
|
|
73
|
+
elif "procedural" in path_lower or "workflow" in path_lower:
|
|
69
74
|
return MergeStrategy.PROCEDURAL
|
|
70
|
-
|
|
75
|
+
|
|
71
76
|
# Default to semantic for unknown types
|
|
72
77
|
return MergeStrategy.SEMANTIC
|
|
73
|
-
|
|
78
|
+
|
|
74
79
|
def find_common_ancestor(self, commit1: str, commit2: str) -> Optional[str]:
|
|
75
80
|
"""
|
|
76
81
|
Find the common ancestor of two commits.
|
|
77
|
-
|
|
82
|
+
|
|
78
83
|
Args:
|
|
79
84
|
commit1: First commit hash
|
|
80
85
|
commit2: Second commit hash
|
|
81
|
-
|
|
86
|
+
|
|
82
87
|
Returns:
|
|
83
88
|
Common ancestor commit hash or None
|
|
84
89
|
"""
|
|
85
90
|
# Build ancestor chain for commit1
|
|
86
91
|
ancestors1 = set()
|
|
87
92
|
current = commit1
|
|
88
|
-
|
|
93
|
+
|
|
89
94
|
while current:
|
|
90
95
|
ancestors1.add(current)
|
|
91
96
|
commit = Commit.load(self.object_store, current)
|
|
92
97
|
if not commit or not commit.parents:
|
|
93
98
|
break
|
|
94
99
|
current = commit.parents[0] # Follow first parent
|
|
95
|
-
|
|
100
|
+
|
|
96
101
|
# Walk back from commit2 and find first common ancestor
|
|
97
102
|
current = commit2
|
|
98
103
|
while current:
|
|
99
104
|
if current in ancestors1:
|
|
100
105
|
return current
|
|
101
|
-
|
|
106
|
+
|
|
102
107
|
commit = Commit.load(self.object_store, current)
|
|
103
108
|
if not commit or not commit.parents:
|
|
104
109
|
break
|
|
105
110
|
current = commit.parents[0]
|
|
106
|
-
|
|
111
|
+
|
|
107
112
|
return None
|
|
108
|
-
|
|
113
|
+
|
|
109
114
|
def get_tree_files(self, tree_hash: str) -> Dict[str, str]:
|
|
110
115
|
"""
|
|
111
116
|
Get all files in a tree.
|
|
112
|
-
|
|
117
|
+
|
|
113
118
|
Args:
|
|
114
119
|
tree_hash: Hash of tree object
|
|
115
|
-
|
|
120
|
+
|
|
116
121
|
Returns:
|
|
117
122
|
Dict mapping file paths to blob hashes
|
|
118
123
|
"""
|
|
119
124
|
files = {}
|
|
120
125
|
tree = Tree.load(self.object_store, tree_hash)
|
|
121
|
-
|
|
126
|
+
|
|
122
127
|
if tree:
|
|
123
128
|
for entry in tree.entries:
|
|
124
|
-
path = entry.path +
|
|
129
|
+
path = entry.path + "/" + entry.name if entry.path else entry.name
|
|
125
130
|
files[path] = entry.hash
|
|
126
|
-
|
|
131
|
+
|
|
127
132
|
return files
|
|
128
|
-
|
|
129
|
-
def merge_episodic(
|
|
130
|
-
|
|
133
|
+
|
|
134
|
+
def merge_episodic(
|
|
135
|
+
self,
|
|
136
|
+
base_content: Optional[str],
|
|
137
|
+
ours_content: Optional[str],
|
|
138
|
+
theirs_content: Optional[str],
|
|
139
|
+
) -> Tuple[str, bool]:
|
|
131
140
|
"""
|
|
132
141
|
Merge episodic memory (append chronologically).
|
|
133
|
-
|
|
142
|
+
|
|
134
143
|
Returns:
|
|
135
144
|
Tuple of (merged_content, had_conflict)
|
|
136
145
|
"""
|
|
137
146
|
# Episodic logs are append-only
|
|
138
147
|
parts = []
|
|
139
|
-
|
|
148
|
+
|
|
140
149
|
if base_content:
|
|
141
150
|
parts.append(base_content)
|
|
142
|
-
|
|
151
|
+
|
|
143
152
|
# Add ours if different from base
|
|
144
153
|
if ours_content and ours_content != base_content:
|
|
145
154
|
parts.append(ours_content)
|
|
146
|
-
|
|
155
|
+
|
|
147
156
|
# Add theirs if different from base and ours
|
|
148
157
|
if theirs_content and theirs_content != base_content and theirs_content != ours_content:
|
|
149
158
|
parts.append(theirs_content)
|
|
150
|
-
|
|
159
|
+
|
|
151
160
|
# Combine with clear separators
|
|
152
|
-
merged =
|
|
161
|
+
merged = "\n\n---\n\n".join(parts)
|
|
153
162
|
return merged, False # Episodic never conflicts
|
|
154
|
-
|
|
155
|
-
def
|
|
156
|
-
|
|
163
|
+
|
|
164
|
+
def _get_semantic_merge_config(self) -> Dict[str, Any]:
|
|
165
|
+
"""Get merge config for semantic memory."""
|
|
166
|
+
config = self.repo.get_config()
|
|
167
|
+
return config.get("merge", {}).get("semantic", {})
|
|
168
|
+
|
|
169
|
+
def merge_semantic(
|
|
170
|
+
self,
|
|
171
|
+
base_content: Optional[str],
|
|
172
|
+
ours_content: Optional[str],
|
|
173
|
+
theirs_content: Optional[str],
|
|
174
|
+
) -> Tuple[str, bool]:
|
|
157
175
|
"""
|
|
158
176
|
Merge semantic memory (smart consolidation).
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
- Neither has frontmatter
|
|
163
|
-
- Low confidence scores require review
|
|
164
|
-
|
|
165
|
-
Returns:
|
|
166
|
-
Tuple of (merged_content, had_conflict)
|
|
177
|
+
|
|
178
|
+
Dispatches to strategy from config: recency-wins, confidence-wins,
|
|
179
|
+
append-both, or llm-arbitrate.
|
|
167
180
|
"""
|
|
168
181
|
# If ours == theirs, no conflict
|
|
169
182
|
if ours_content == theirs_content:
|
|
170
|
-
return ours_content or
|
|
171
|
-
|
|
183
|
+
return ours_content or "", False
|
|
184
|
+
|
|
172
185
|
# If one is same as base, use the other
|
|
173
186
|
if ours_content == base_content:
|
|
174
|
-
return theirs_content or
|
|
187
|
+
return theirs_content or "", False
|
|
175
188
|
if theirs_content == base_content:
|
|
176
|
-
return ours_content or
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
189
|
+
return ours_content or "", False
|
|
190
|
+
|
|
191
|
+
cfg = self._get_semantic_merge_config()
|
|
192
|
+
strategy = cfg.get("strategy", "recency-wins")
|
|
193
|
+
threshold = float(cfg.get("auto_resolve_threshold", 0.8))
|
|
194
|
+
|
|
195
|
+
if strategy == "recency-wins":
|
|
196
|
+
return self._merge_semantic_recency(ours_content, theirs_content)
|
|
197
|
+
if strategy == "confidence-wins":
|
|
198
|
+
return self._merge_semantic_confidence(ours_content, theirs_content, threshold)
|
|
199
|
+
if strategy == "append-both":
|
|
200
|
+
return self._merge_semantic_append(ours_content, theirs_content)
|
|
201
|
+
if strategy == "llm-arbitrate":
|
|
202
|
+
return self._merge_semantic_llm(ours_content, theirs_content)
|
|
203
|
+
# Default
|
|
204
|
+
return self._merge_semantic_recency(ours_content, theirs_content)
|
|
205
|
+
|
|
206
|
+
def _merge_semantic_recency(
|
|
207
|
+
self,
|
|
208
|
+
ours_content: Optional[str],
|
|
209
|
+
theirs_content: Optional[str],
|
|
210
|
+
) -> Tuple[str, bool]:
|
|
211
|
+
"""Recency-wins: newer memory wins, keep older as deprecated."""
|
|
212
|
+
ours_fm, _ = FrontmatterParser.parse(ours_content or "")
|
|
213
|
+
theirs_fm, _ = FrontmatterParser.parse(theirs_content or "")
|
|
183
214
|
if ours_fm and theirs_fm and ours_fm.last_updated and theirs_fm.last_updated:
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
"""
|
|
215
|
+
c = compare_timestamps(ours_fm.last_updated, theirs_fm.last_updated)
|
|
216
|
+
if c > 0:
|
|
217
|
+
return ours_content or "", False
|
|
218
|
+
if c < 0:
|
|
219
|
+
return theirs_content or "", False
|
|
220
|
+
return ours_content or "", False # Fallback to ours
|
|
221
|
+
|
|
222
|
+
def _merge_semantic_confidence(
|
|
223
|
+
self,
|
|
224
|
+
ours_content: Optional[str],
|
|
225
|
+
theirs_content: Optional[str],
|
|
226
|
+
threshold: float,
|
|
227
|
+
) -> Tuple[str, bool]:
|
|
228
|
+
"""Confidence-wins: user-stated (high confidence) > inferred."""
|
|
229
|
+
ours_fm, _ = FrontmatterParser.parse(ours_content or "")
|
|
230
|
+
theirs_fm, _ = FrontmatterParser.parse(theirs_content or "")
|
|
231
|
+
ours_conf = ours_fm.confidence_score if ours_fm else 0.5
|
|
232
|
+
theirs_conf = theirs_fm.confidence_score if theirs_fm else 0.5
|
|
233
|
+
if ours_conf >= threshold and theirs_conf < threshold:
|
|
234
|
+
return ours_content or "", False
|
|
235
|
+
if theirs_conf >= threshold and ours_conf < threshold:
|
|
236
|
+
return theirs_content or "", False
|
|
237
|
+
if ours_conf >= theirs_conf:
|
|
238
|
+
return ours_content or "", False
|
|
239
|
+
return theirs_content or "", False
|
|
240
|
+
|
|
241
|
+
def _merge_semantic_append(
|
|
242
|
+
self,
|
|
243
|
+
ours_content: Optional[str],
|
|
244
|
+
theirs_content: Optional[str],
|
|
245
|
+
) -> Tuple[str, bool]:
|
|
246
|
+
"""Append-both: keep both with validity periods."""
|
|
247
|
+
ours_fm, ours_body = FrontmatterParser.parse(ours_content or "")
|
|
248
|
+
theirs_fm, theirs_body = FrontmatterParser.parse(theirs_content or "")
|
|
249
|
+
parts = []
|
|
250
|
+
if ours_content:
|
|
251
|
+
parts.append(f"<!-- valid_from: ours -->\n{ours_content}")
|
|
252
|
+
if theirs_content and theirs_content != ours_content:
|
|
253
|
+
parts.append(f"<!-- valid_from: theirs -->\n{theirs_content}")
|
|
254
|
+
return "\n\n---\n\n".join(parts) if parts else "", False
|
|
255
|
+
|
|
256
|
+
def _merge_semantic_llm(
|
|
257
|
+
self,
|
|
258
|
+
ours_content: Optional[str],
|
|
259
|
+
theirs_content: Optional[str],
|
|
260
|
+
) -> Tuple[str, bool]:
|
|
261
|
+
"""LLM arbitration: call LLM to resolve contradiction (multi-provider)."""
|
|
262
|
+
try:
|
|
263
|
+
from .llm import get_provider
|
|
264
|
+
|
|
265
|
+
provider = get_provider()
|
|
266
|
+
if provider:
|
|
267
|
+
merged = provider.complete(
|
|
268
|
+
[
|
|
269
|
+
{
|
|
270
|
+
"role": "system",
|
|
271
|
+
"content": "Resolve the contradiction between two memory versions. Output the merged content that best reflects the combined truth.",
|
|
272
|
+
},
|
|
273
|
+
{
|
|
274
|
+
"role": "user",
|
|
275
|
+
"content": f"OURS:\n{ours_content}\n\nTHEIRS:\n{theirs_content}",
|
|
276
|
+
},
|
|
277
|
+
],
|
|
278
|
+
max_tokens=1000,
|
|
279
|
+
)
|
|
280
|
+
return (merged or "").strip(), False
|
|
281
|
+
except Exception:
|
|
282
|
+
pass
|
|
283
|
+
# Fallback to conflict markers
|
|
284
|
+
merged = f"<<<<<<< OURS\n{ours_content}\n=======\n{theirs_content}\n>>>>>>> THEIRS"
|
|
216
285
|
return merged, True
|
|
217
|
-
|
|
218
|
-
def merge_procedural(
|
|
219
|
-
|
|
286
|
+
|
|
287
|
+
def merge_procedural(
|
|
288
|
+
self,
|
|
289
|
+
base_content: Optional[str],
|
|
290
|
+
ours_content: Optional[str],
|
|
291
|
+
theirs_content: Optional[str],
|
|
292
|
+
) -> Tuple[str, bool]:
|
|
220
293
|
"""
|
|
221
294
|
Merge procedural memory (prefer newer, validate).
|
|
222
|
-
|
|
295
|
+
|
|
223
296
|
Uses frontmatter timestamps to determine which version is newer.
|
|
224
297
|
Procedural memory is more likely to auto-resolve using Last-Write-Wins
|
|
225
298
|
since workflows typically should be replaced, not merged.
|
|
226
|
-
|
|
299
|
+
|
|
227
300
|
Returns:
|
|
228
301
|
Tuple of (merged_content, had_conflict)
|
|
229
302
|
"""
|
|
230
303
|
# If ours == theirs, no conflict
|
|
231
304
|
if ours_content == theirs_content:
|
|
232
|
-
return ours_content or
|
|
233
|
-
|
|
305
|
+
return ours_content or "", False
|
|
306
|
+
|
|
234
307
|
# If one is same as base, use the other
|
|
235
308
|
if ours_content == base_content:
|
|
236
|
-
return theirs_content or
|
|
309
|
+
return theirs_content or "", False
|
|
237
310
|
if theirs_content == base_content:
|
|
238
|
-
return ours_content or
|
|
239
|
-
|
|
311
|
+
return ours_content or "", False
|
|
312
|
+
|
|
240
313
|
# Both changed - try to use frontmatter timestamps
|
|
241
|
-
ours_fm, _ = FrontmatterParser.parse(ours_content or
|
|
242
|
-
theirs_fm, _ = FrontmatterParser.parse(theirs_content or
|
|
243
|
-
|
|
314
|
+
ours_fm, _ = FrontmatterParser.parse(ours_content or "")
|
|
315
|
+
theirs_fm, _ = FrontmatterParser.parse(theirs_content or "")
|
|
316
|
+
|
|
244
317
|
# Use timestamps if available
|
|
245
318
|
if ours_fm and theirs_fm and ours_fm.last_updated and theirs_fm.last_updated:
|
|
246
319
|
comparison = compare_timestamps(ours_fm.last_updated, theirs_fm.last_updated)
|
|
247
|
-
|
|
320
|
+
|
|
248
321
|
if comparison > 0:
|
|
249
322
|
# Ours is newer - keep it
|
|
250
|
-
return ours_content or
|
|
323
|
+
return ours_content or "", False
|
|
251
324
|
elif comparison < 0:
|
|
252
325
|
# Theirs is newer - use it
|
|
253
|
-
return theirs_content or
|
|
326
|
+
return theirs_content or "", False
|
|
254
327
|
# Equal timestamps - fall through to conflict
|
|
255
|
-
|
|
328
|
+
|
|
256
329
|
# No timestamps or equal - flag for manual review
|
|
257
330
|
merged = f"""<<<<<<< OURS (Current)
|
|
258
331
|
{ours_content}
|
|
@@ -261,49 +334,50 @@ class MergeEngine:
|
|
|
261
334
|
>>>>>>> THEIRS (Incoming)
|
|
262
335
|
"""
|
|
263
336
|
return merged, True
|
|
264
|
-
|
|
265
|
-
def merge_files(
|
|
266
|
-
|
|
337
|
+
|
|
338
|
+
def merge_files(
|
|
339
|
+
self, base_files: Dict[str, str], ours_files: Dict[str, str], theirs_files: Dict[str, str]
|
|
340
|
+
) -> Tuple[Dict[str, str], List[Conflict]]:
|
|
267
341
|
"""
|
|
268
342
|
Merge file sets from three trees.
|
|
269
|
-
|
|
343
|
+
|
|
270
344
|
Returns:
|
|
271
345
|
Tuple of (merged_files, conflicts)
|
|
272
346
|
"""
|
|
273
347
|
merged = {}
|
|
274
348
|
conflicts = []
|
|
275
|
-
|
|
349
|
+
|
|
276
350
|
# Get all unique file paths
|
|
277
351
|
all_paths = set(base_files.keys()) | set(ours_files.keys()) | set(theirs_files.keys())
|
|
278
|
-
|
|
352
|
+
|
|
279
353
|
for path in all_paths:
|
|
280
354
|
base_hash = base_files.get(path)
|
|
281
355
|
ours_hash = ours_files.get(path)
|
|
282
356
|
theirs_hash = theirs_files.get(path)
|
|
283
|
-
|
|
357
|
+
|
|
284
358
|
# Get content
|
|
285
359
|
base_content = None
|
|
286
360
|
ours_content = None
|
|
287
361
|
theirs_content = None
|
|
288
|
-
|
|
362
|
+
|
|
289
363
|
if base_hash:
|
|
290
364
|
blob = Blob.load(self.object_store, base_hash)
|
|
291
365
|
if blob:
|
|
292
|
-
base_content = blob.content.decode(
|
|
293
|
-
|
|
366
|
+
base_content = blob.content.decode("utf-8", errors="replace")
|
|
367
|
+
|
|
294
368
|
if ours_hash:
|
|
295
369
|
blob = Blob.load(self.object_store, ours_hash)
|
|
296
370
|
if blob:
|
|
297
|
-
ours_content = blob.content.decode(
|
|
298
|
-
|
|
371
|
+
ours_content = blob.content.decode("utf-8", errors="replace")
|
|
372
|
+
|
|
299
373
|
if theirs_hash:
|
|
300
374
|
blob = Blob.load(self.object_store, theirs_hash)
|
|
301
375
|
if blob:
|
|
302
|
-
theirs_content = blob.content.decode(
|
|
303
|
-
|
|
376
|
+
theirs_content = blob.content.decode("utf-8", errors="replace")
|
|
377
|
+
|
|
304
378
|
# Determine merge strategy
|
|
305
379
|
strategy = self.detect_memory_type(path)
|
|
306
|
-
|
|
380
|
+
|
|
307
381
|
# Apply merge
|
|
308
382
|
if strategy == MergeStrategy.EPISODIC:
|
|
309
383
|
merged_content, had_conflict = self.merge_episodic(
|
|
@@ -317,35 +391,49 @@ class MergeEngine:
|
|
|
317
391
|
merged_content, had_conflict = self.merge_semantic(
|
|
318
392
|
base_content, ours_content, theirs_content
|
|
319
393
|
)
|
|
320
|
-
|
|
394
|
+
|
|
321
395
|
# Store merged content
|
|
322
396
|
if merged_content is not None:
|
|
323
|
-
blob = Blob(content=merged_content.encode(
|
|
397
|
+
blob = Blob(content=merged_content.encode("utf-8"))
|
|
324
398
|
merged_hash = blob.store(self.object_store)
|
|
325
399
|
merged[path] = merged_hash
|
|
326
|
-
|
|
400
|
+
|
|
327
401
|
# Record conflict if any
|
|
328
402
|
if had_conflict:
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
403
|
+
payload = {}
|
|
404
|
+
if ours_content:
|
|
405
|
+
payload["ours_preview"] = (
|
|
406
|
+
ours_content[:300] if len(ours_content) > 300 else ours_content
|
|
407
|
+
)
|
|
408
|
+
if theirs_content:
|
|
409
|
+
payload["theirs_preview"] = (
|
|
410
|
+
theirs_content[:300] if len(theirs_content) > 300 else theirs_content
|
|
411
|
+
)
|
|
412
|
+
conflicts.append(
|
|
413
|
+
Conflict(
|
|
414
|
+
path=path,
|
|
415
|
+
base_content=base_content,
|
|
416
|
+
ours_content=ours_content,
|
|
417
|
+
theirs_content=theirs_content,
|
|
418
|
+
message=f"{strategy.value} merge conflict in {path}",
|
|
419
|
+
memory_type=strategy.value,
|
|
420
|
+
payload=payload or None,
|
|
421
|
+
)
|
|
422
|
+
)
|
|
423
|
+
|
|
337
424
|
return merged, conflicts
|
|
338
|
-
|
|
339
|
-
def merge(
|
|
340
|
-
|
|
425
|
+
|
|
426
|
+
def merge(
|
|
427
|
+
self, source_branch: str, target_branch: Optional[str] = None, message: Optional[str] = None
|
|
428
|
+
) -> MergeResult:
|
|
341
429
|
"""
|
|
342
430
|
Merge source branch into target branch (or current branch).
|
|
343
|
-
|
|
431
|
+
|
|
344
432
|
Args:
|
|
345
433
|
source_branch: Branch to merge from
|
|
346
434
|
target_branch: Branch to merge into (None for current)
|
|
347
435
|
message: Merge commit message
|
|
348
|
-
|
|
436
|
+
|
|
349
437
|
Returns:
|
|
350
438
|
MergeResult with success status and conflicts
|
|
351
439
|
"""
|
|
@@ -356,9 +444,9 @@ class MergeEngine:
|
|
|
356
444
|
success=False,
|
|
357
445
|
commit_hash=None,
|
|
358
446
|
conflicts=[],
|
|
359
|
-
message=f"Source branch not found: {source_branch}"
|
|
447
|
+
message=f"Source branch not found: {source_branch}",
|
|
360
448
|
)
|
|
361
|
-
|
|
449
|
+
|
|
362
450
|
if target_branch:
|
|
363
451
|
target_commit_hash = self.repo.resolve_ref(target_branch)
|
|
364
452
|
if not target_commit_hash:
|
|
@@ -366,109 +454,111 @@ class MergeEngine:
|
|
|
366
454
|
success=False,
|
|
367
455
|
commit_hash=None,
|
|
368
456
|
conflicts=[],
|
|
369
|
-
message=f"Target branch not found: {target_branch}"
|
|
457
|
+
message=f"Target branch not found: {target_branch}",
|
|
370
458
|
)
|
|
371
459
|
else:
|
|
372
460
|
head = self.repo.refs.get_head()
|
|
373
|
-
if head[
|
|
374
|
-
target_commit_hash = self.repo.refs.get_branch_commit(head[
|
|
461
|
+
if head["type"] == "branch":
|
|
462
|
+
target_commit_hash = self.repo.refs.get_branch_commit(head["value"])
|
|
375
463
|
else:
|
|
376
|
-
target_commit_hash = head[
|
|
377
|
-
|
|
464
|
+
target_commit_hash = head["value"]
|
|
465
|
+
|
|
378
466
|
# Find common ancestor
|
|
379
467
|
ancestor_hash = self.find_common_ancestor(source_commit_hash, target_commit_hash)
|
|
380
|
-
|
|
468
|
+
|
|
381
469
|
if ancestor_hash == source_commit_hash:
|
|
382
470
|
# Already up to date
|
|
383
471
|
return MergeResult(
|
|
384
472
|
success=True,
|
|
385
473
|
commit_hash=target_commit_hash,
|
|
386
474
|
conflicts=[],
|
|
387
|
-
message="Already up to date"
|
|
475
|
+
message="Already up to date",
|
|
388
476
|
)
|
|
389
|
-
|
|
477
|
+
|
|
390
478
|
if ancestor_hash == target_commit_hash:
|
|
391
479
|
# Fast-forward
|
|
392
480
|
if not target_branch:
|
|
393
481
|
target_branch = self.repo.refs.get_current_branch()
|
|
394
|
-
|
|
482
|
+
|
|
395
483
|
self.repo.refs.set_branch_commit(target_branch, source_commit_hash)
|
|
396
|
-
|
|
484
|
+
|
|
397
485
|
return MergeResult(
|
|
398
486
|
success=True,
|
|
399
487
|
commit_hash=source_commit_hash,
|
|
400
488
|
conflicts=[],
|
|
401
|
-
message=f"Fast-forward to {source_branch}"
|
|
489
|
+
message=f"Fast-forward to {source_branch}",
|
|
402
490
|
)
|
|
403
|
-
|
|
491
|
+
|
|
404
492
|
# Three-way merge
|
|
405
493
|
# Get trees
|
|
406
494
|
ancestor_commit = Commit.load(self.object_store, ancestor_hash)
|
|
407
495
|
ours_commit = Commit.load(self.object_store, target_commit_hash)
|
|
408
496
|
theirs_commit = Commit.load(self.object_store, source_commit_hash)
|
|
409
|
-
|
|
497
|
+
|
|
410
498
|
base_files = self.get_tree_files(ancestor_commit.tree)
|
|
411
499
|
ours_files = self.get_tree_files(ours_commit.tree)
|
|
412
500
|
theirs_files = self.get_tree_files(theirs_commit.tree)
|
|
413
|
-
|
|
501
|
+
|
|
414
502
|
# Merge files
|
|
415
503
|
merged_files, conflicts = self.merge_files(base_files, ours_files, theirs_files)
|
|
416
|
-
|
|
504
|
+
|
|
417
505
|
if conflicts:
|
|
418
506
|
# Stage merged files for manual resolution
|
|
419
507
|
for path, hash_id in merged_files.items():
|
|
420
508
|
content = Blob.load(self.object_store, hash_id).content
|
|
421
509
|
self.repo.staging.add(path, hash_id, content)
|
|
422
|
-
|
|
510
|
+
|
|
423
511
|
return MergeResult(
|
|
424
512
|
success=False,
|
|
425
513
|
commit_hash=None,
|
|
426
514
|
conflicts=conflicts,
|
|
427
|
-
message=f"Merge conflict in {len(conflicts)} file(s). Resolve conflicts and commit."
|
|
515
|
+
message=f"Merge conflict in {len(conflicts)} file(s). Resolve conflicts and commit.",
|
|
428
516
|
)
|
|
429
|
-
|
|
517
|
+
|
|
430
518
|
# Create merge commit
|
|
431
519
|
# Build tree from merged files
|
|
432
520
|
entries = []
|
|
433
521
|
for path, hash_id in merged_files.items():
|
|
434
522
|
path_obj = Path(path)
|
|
435
|
-
entries.append(
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
523
|
+
entries.append(
|
|
524
|
+
TreeEntry(
|
|
525
|
+
mode="100644",
|
|
526
|
+
obj_type="blob",
|
|
527
|
+
hash=hash_id,
|
|
528
|
+
name=path_obj.name,
|
|
529
|
+
path=str(path_obj.parent) if str(path_obj.parent) != "." else "",
|
|
530
|
+
)
|
|
531
|
+
)
|
|
532
|
+
|
|
443
533
|
tree = Tree(entries=entries)
|
|
444
534
|
tree_hash = tree.store(self.object_store)
|
|
445
535
|
|
|
446
536
|
merge_message = message or f"Merge branch '{source_branch}'"
|
|
447
|
-
|
|
537
|
+
|
|
448
538
|
merge_commit = Commit(
|
|
449
539
|
tree=tree_hash,
|
|
450
540
|
parents=[target_commit_hash, source_commit_hash],
|
|
451
541
|
author=self.repo.get_author(),
|
|
452
|
-
timestamp=datetime.utcnow().isoformat() +
|
|
542
|
+
timestamp=datetime.utcnow().isoformat() + "Z",
|
|
453
543
|
message=merge_message,
|
|
454
|
-
metadata={
|
|
544
|
+
metadata={"merge": True, "source_branch": source_branch},
|
|
455
545
|
)
|
|
456
|
-
|
|
546
|
+
|
|
457
547
|
merge_hash = merge_commit.store(self.object_store)
|
|
458
|
-
|
|
548
|
+
|
|
459
549
|
# Update target branch
|
|
460
550
|
if not target_branch:
|
|
461
551
|
target_branch = self.repo.refs.get_current_branch()
|
|
462
|
-
|
|
552
|
+
|
|
463
553
|
if target_branch:
|
|
464
554
|
self.repo.refs.set_branch_commit(target_branch, merge_hash)
|
|
465
555
|
else:
|
|
466
556
|
# Detached HEAD
|
|
467
557
|
self.repo.refs.set_head_detached(merge_hash)
|
|
468
|
-
|
|
558
|
+
|
|
469
559
|
return MergeResult(
|
|
470
560
|
success=True,
|
|
471
561
|
commit_hash=merge_hash,
|
|
472
562
|
conflicts=[],
|
|
473
|
-
message=f"Successfully merged {source_branch}"
|
|
563
|
+
message=f"Successfully merged {source_branch}",
|
|
474
564
|
)
|