roampal 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- roampal/__init__.py +29 -0
- roampal/__main__.py +6 -0
- roampal/backend/__init__.py +1 -0
- roampal/backend/modules/__init__.py +1 -0
- roampal/backend/modules/memory/__init__.py +43 -0
- roampal/backend/modules/memory/chromadb_adapter.py +623 -0
- roampal/backend/modules/memory/config.py +102 -0
- roampal/backend/modules/memory/content_graph.py +543 -0
- roampal/backend/modules/memory/context_service.py +455 -0
- roampal/backend/modules/memory/embedding_service.py +96 -0
- roampal/backend/modules/memory/knowledge_graph_service.py +1052 -0
- roampal/backend/modules/memory/memory_bank_service.py +433 -0
- roampal/backend/modules/memory/memory_types.py +296 -0
- roampal/backend/modules/memory/outcome_service.py +400 -0
- roampal/backend/modules/memory/promotion_service.py +473 -0
- roampal/backend/modules/memory/routing_service.py +444 -0
- roampal/backend/modules/memory/scoring_service.py +324 -0
- roampal/backend/modules/memory/search_service.py +646 -0
- roampal/backend/modules/memory/tests/__init__.py +1 -0
- roampal/backend/modules/memory/tests/conftest.py +12 -0
- roampal/backend/modules/memory/tests/unit/__init__.py +1 -0
- roampal/backend/modules/memory/tests/unit/conftest.py +7 -0
- roampal/backend/modules/memory/tests/unit/test_knowledge_graph_service.py +517 -0
- roampal/backend/modules/memory/tests/unit/test_memory_bank_service.py +504 -0
- roampal/backend/modules/memory/tests/unit/test_outcome_service.py +485 -0
- roampal/backend/modules/memory/tests/unit/test_scoring_service.py +255 -0
- roampal/backend/modules/memory/tests/unit/test_search_service.py +413 -0
- roampal/backend/modules/memory/tests/unit/test_unified_memory_system.py +418 -0
- roampal/backend/modules/memory/unified_memory_system.py +1277 -0
- roampal/cli.py +638 -0
- roampal/hooks/__init__.py +16 -0
- roampal/hooks/session_manager.py +587 -0
- roampal/hooks/stop_hook.py +176 -0
- roampal/hooks/user_prompt_submit_hook.py +103 -0
- roampal/mcp/__init__.py +7 -0
- roampal/mcp/server.py +611 -0
- roampal/server/__init__.py +7 -0
- roampal/server/main.py +744 -0
- roampal-0.1.4.dist-info/METADATA +179 -0
- roampal-0.1.4.dist-info/RECORD +44 -0
- roampal-0.1.4.dist-info/WHEEL +5 -0
- roampal-0.1.4.dist-info/entry_points.txt +2 -0
- roampal-0.1.4.dist-info/licenses/LICENSE +190 -0
- roampal-0.1.4.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,485 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Unit Tests for OutcomeService
|
|
3
|
+
|
|
4
|
+
Tests the extracted outcome recording logic.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import sys
|
|
8
|
+
import os
|
|
9
|
+
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '..', '..')))
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import pytest
|
|
13
|
+
from unittest.mock import MagicMock, AsyncMock, patch
|
|
14
|
+
from datetime import datetime, timedelta
|
|
15
|
+
|
|
16
|
+
from roampal.backend.modules.memory.outcome_service import OutcomeService
|
|
17
|
+
from roampal.backend.modules.memory.config import MemoryConfig
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TestOutcomeServiceInit:
|
|
21
|
+
"""Test OutcomeService initialization."""
|
|
22
|
+
|
|
23
|
+
def test_init_with_defaults(self):
|
|
24
|
+
"""Should initialize with default config."""
|
|
25
|
+
service = OutcomeService(collections={})
|
|
26
|
+
assert service.config.promotion_score_threshold == 0.7
|
|
27
|
+
|
|
28
|
+
def test_init_with_custom_config(self):
|
|
29
|
+
"""Should use custom config."""
|
|
30
|
+
config = MemoryConfig(promotion_score_threshold=0.8)
|
|
31
|
+
service = OutcomeService(collections={}, config=config)
|
|
32
|
+
assert service.config.promotion_score_threshold == 0.8
|
|
33
|
+
|
|
34
|
+
def test_init_with_services(self):
|
|
35
|
+
"""Should accept KG and promotion services."""
|
|
36
|
+
kg_mock = MagicMock()
|
|
37
|
+
promo_mock = MagicMock()
|
|
38
|
+
service = OutcomeService(
|
|
39
|
+
collections={},
|
|
40
|
+
kg_service=kg_mock,
|
|
41
|
+
promotion_service=promo_mock
|
|
42
|
+
)
|
|
43
|
+
assert service.kg_service == kg_mock
|
|
44
|
+
assert service.promotion_service == promo_mock
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class TestRecordOutcome:
|
|
48
|
+
"""Test outcome recording."""
|
|
49
|
+
|
|
50
|
+
@pytest.fixture
|
|
51
|
+
def mock_collections(self):
|
|
52
|
+
"""Create mock collections."""
|
|
53
|
+
working = MagicMock()
|
|
54
|
+
working.get_fragment = MagicMock(return_value={
|
|
55
|
+
"content": "test content",
|
|
56
|
+
"metadata": {
|
|
57
|
+
"text": "test content",
|
|
58
|
+
"score": 0.5,
|
|
59
|
+
"uses": 0,
|
|
60
|
+
"outcome_history": "[]"
|
|
61
|
+
}
|
|
62
|
+
})
|
|
63
|
+
working.update_fragment_metadata = MagicMock()
|
|
64
|
+
working.collection = MagicMock()
|
|
65
|
+
working.collection.count = MagicMock(return_value=10)
|
|
66
|
+
|
|
67
|
+
return {"working": working, "history": MagicMock()}
|
|
68
|
+
|
|
69
|
+
@pytest.fixture
|
|
70
|
+
def service(self, mock_collections):
|
|
71
|
+
"""Create OutcomeService instance."""
|
|
72
|
+
return OutcomeService(collections=mock_collections)
|
|
73
|
+
|
|
74
|
+
@pytest.mark.asyncio
|
|
75
|
+
async def test_record_worked_outcome(self, service, mock_collections):
|
|
76
|
+
"""Should increase score for worked outcome."""
|
|
77
|
+
result = await service.record_outcome(
|
|
78
|
+
doc_id="working_test123",
|
|
79
|
+
outcome="worked"
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
assert result is not None
|
|
83
|
+
assert result["score"] > 0.5 # Score increased
|
|
84
|
+
assert result["uses"] == 1
|
|
85
|
+
assert result["last_outcome"] == "worked"
|
|
86
|
+
mock_collections["working"].update_fragment_metadata.assert_called_once()
|
|
87
|
+
|
|
88
|
+
@pytest.mark.asyncio
|
|
89
|
+
async def test_record_failed_outcome(self, service, mock_collections):
|
|
90
|
+
"""Should decrease score for failed outcome."""
|
|
91
|
+
result = await service.record_outcome(
|
|
92
|
+
doc_id="working_test123",
|
|
93
|
+
outcome="failed",
|
|
94
|
+
failure_reason="Did not work"
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
assert result is not None
|
|
98
|
+
assert result["score"] < 0.5 # Score decreased
|
|
99
|
+
assert result["uses"] == 0 # Uses not incremented for failure
|
|
100
|
+
assert result["last_outcome"] == "failed"
|
|
101
|
+
|
|
102
|
+
@pytest.mark.asyncio
|
|
103
|
+
async def test_record_partial_outcome(self, service, mock_collections):
|
|
104
|
+
"""Should slightly increase score for partial outcome."""
|
|
105
|
+
result = await service.record_outcome(
|
|
106
|
+
doc_id="working_test123",
|
|
107
|
+
outcome="partial"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
assert result is not None
|
|
111
|
+
assert result["score"] > 0.5 # Score slightly increased
|
|
112
|
+
assert result["uses"] == 1 # Uses incremented for partial
|
|
113
|
+
assert result["last_outcome"] == "partial"
|
|
114
|
+
|
|
115
|
+
@pytest.mark.asyncio
|
|
116
|
+
async def test_safeguard_books(self, service):
|
|
117
|
+
"""Should not score book chunks."""
|
|
118
|
+
result = await service.record_outcome(
|
|
119
|
+
doc_id="books_test123",
|
|
120
|
+
outcome="worked"
|
|
121
|
+
)
|
|
122
|
+
assert result is None
|
|
123
|
+
|
|
124
|
+
@pytest.mark.asyncio
|
|
125
|
+
async def test_safeguard_memory_bank(self, service):
|
|
126
|
+
"""Should not score memory bank items."""
|
|
127
|
+
result = await service.record_outcome(
|
|
128
|
+
doc_id="memory_bank_test123",
|
|
129
|
+
outcome="worked"
|
|
130
|
+
)
|
|
131
|
+
assert result is None
|
|
132
|
+
|
|
133
|
+
@pytest.mark.asyncio
|
|
134
|
+
async def test_not_found(self, service, mock_collections):
|
|
135
|
+
"""Should return None for non-existent document."""
|
|
136
|
+
mock_collections["working"].get_fragment = MagicMock(return_value=None)
|
|
137
|
+
|
|
138
|
+
result = await service.record_outcome(
|
|
139
|
+
doc_id="working_nonexistent",
|
|
140
|
+
outcome="worked"
|
|
141
|
+
)
|
|
142
|
+
assert result is None
|
|
143
|
+
|
|
144
|
+
@pytest.mark.asyncio
|
|
145
|
+
async def test_outcome_history_tracking(self, service, mock_collections):
|
|
146
|
+
"""Should track outcome history."""
|
|
147
|
+
result = await service.record_outcome(
|
|
148
|
+
doc_id="working_test123",
|
|
149
|
+
outcome="worked"
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
history = json.loads(result["outcome_history"])
|
|
153
|
+
assert len(history) == 1
|
|
154
|
+
assert history[0]["outcome"] == "worked"
|
|
155
|
+
|
|
156
|
+
@pytest.mark.asyncio
|
|
157
|
+
async def test_failure_reason_tracking(self, service, mock_collections):
|
|
158
|
+
"""Should track failure reasons."""
|
|
159
|
+
result = await service.record_outcome(
|
|
160
|
+
doc_id="working_test123",
|
|
161
|
+
outcome="failed",
|
|
162
|
+
failure_reason="Test failure"
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
reasons = json.loads(result["failure_reasons"])
|
|
166
|
+
assert len(reasons) == 1
|
|
167
|
+
assert reasons[0]["reason"] == "Test failure"
|
|
168
|
+
|
|
169
|
+
@pytest.mark.asyncio
|
|
170
|
+
async def test_success_context_tracking(self, service, mock_collections):
|
|
171
|
+
"""Should track success contexts."""
|
|
172
|
+
result = await service.record_outcome(
|
|
173
|
+
doc_id="working_test123",
|
|
174
|
+
outcome="worked",
|
|
175
|
+
context={"topic": "test"}
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
contexts = json.loads(result["success_contexts"])
|
|
179
|
+
assert len(contexts) == 1
|
|
180
|
+
assert contexts[0]["topic"] == "test"
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
class TestTimeWeighting:
|
|
184
|
+
"""Test time-weighted score updates."""
|
|
185
|
+
|
|
186
|
+
@pytest.fixture
|
|
187
|
+
def service(self):
|
|
188
|
+
return OutcomeService(collections={})
|
|
189
|
+
|
|
190
|
+
def test_recent_full_weight(self, service):
|
|
191
|
+
"""Recent items should have full weight."""
|
|
192
|
+
weight = service._calculate_time_weight(datetime.now().isoformat())
|
|
193
|
+
assert weight > 0.9
|
|
194
|
+
|
|
195
|
+
def test_old_reduced_weight(self, service):
|
|
196
|
+
"""Old items should have reduced weight."""
|
|
197
|
+
old_time = (datetime.now() - timedelta(days=30)).isoformat()
|
|
198
|
+
weight = service._calculate_time_weight(old_time)
|
|
199
|
+
assert 0.4 < weight < 0.6 # ~0.5 after 30 days
|
|
200
|
+
|
|
201
|
+
def test_none_full_weight(self, service):
|
|
202
|
+
"""None should return full weight."""
|
|
203
|
+
weight = service._calculate_time_weight(None)
|
|
204
|
+
assert weight == 1.0
|
|
205
|
+
|
|
206
|
+
def test_invalid_full_weight(self, service):
|
|
207
|
+
"""Invalid timestamp should return full weight."""
|
|
208
|
+
weight = service._calculate_time_weight("invalid")
|
|
209
|
+
assert weight == 1.0
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
class TestScoreCalculation:
|
|
213
|
+
"""Test score calculation logic."""
|
|
214
|
+
|
|
215
|
+
@pytest.fixture
|
|
216
|
+
def service(self):
|
|
217
|
+
return OutcomeService(collections={})
|
|
218
|
+
|
|
219
|
+
def test_worked_increases_score(self, service):
|
|
220
|
+
"""Worked should increase score."""
|
|
221
|
+
delta, new_score, uses = service._calculate_score_update(
|
|
222
|
+
"worked", 0.5, 0, 1.0
|
|
223
|
+
)
|
|
224
|
+
assert delta > 0
|
|
225
|
+
assert new_score > 0.5
|
|
226
|
+
assert uses == 1
|
|
227
|
+
|
|
228
|
+
def test_failed_decreases_score(self, service):
|
|
229
|
+
"""Failed should decrease score."""
|
|
230
|
+
delta, new_score, uses = service._calculate_score_update(
|
|
231
|
+
"failed", 0.5, 0, 1.0
|
|
232
|
+
)
|
|
233
|
+
assert delta < 0
|
|
234
|
+
assert new_score < 0.5
|
|
235
|
+
assert uses == 0
|
|
236
|
+
|
|
237
|
+
def test_partial_slightly_increases(self, service):
|
|
238
|
+
"""Partial should slightly increase score."""
|
|
239
|
+
delta, new_score, uses = service._calculate_score_update(
|
|
240
|
+
"partial", 0.5, 0, 1.0
|
|
241
|
+
)
|
|
242
|
+
assert delta > 0
|
|
243
|
+
assert delta < 0.1 # Small increase
|
|
244
|
+
assert new_score > 0.5
|
|
245
|
+
assert uses == 1
|
|
246
|
+
|
|
247
|
+
def test_score_capped_at_1(self, service):
|
|
248
|
+
"""Score should not exceed 1.0."""
|
|
249
|
+
delta, new_score, uses = service._calculate_score_update(
|
|
250
|
+
"worked", 0.95, 0, 1.0
|
|
251
|
+
)
|
|
252
|
+
assert new_score <= 1.0
|
|
253
|
+
|
|
254
|
+
def test_score_capped_at_0(self, service):
|
|
255
|
+
"""Score should not go below 0.0."""
|
|
256
|
+
delta, new_score, uses = service._calculate_score_update(
|
|
257
|
+
"failed", 0.1, 0, 1.0
|
|
258
|
+
)
|
|
259
|
+
assert new_score >= 0.0
|
|
260
|
+
|
|
261
|
+
def test_time_weight_affects_delta(self, service):
|
|
262
|
+
"""Time weight should affect score delta."""
|
|
263
|
+
delta_full, _, _ = service._calculate_score_update("worked", 0.5, 0, 1.0)
|
|
264
|
+
delta_half, _, _ = service._calculate_score_update("worked", 0.5, 0, 0.5)
|
|
265
|
+
|
|
266
|
+
assert abs(delta_half - delta_full * 0.5) < 0.01
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
class TestCountSuccesses:
|
|
270
|
+
"""Test success counting from history."""
|
|
271
|
+
|
|
272
|
+
@pytest.fixture
|
|
273
|
+
def service(self):
|
|
274
|
+
return OutcomeService(collections={})
|
|
275
|
+
|
|
276
|
+
def test_empty_returns_zero(self, service):
|
|
277
|
+
"""Empty history returns 0."""
|
|
278
|
+
assert service.count_successes_from_history("") == 0
|
|
279
|
+
assert service.count_successes_from_history("[]") == 0
|
|
280
|
+
|
|
281
|
+
def test_worked_counts_as_one(self, service):
|
|
282
|
+
"""Worked outcomes count as 1."""
|
|
283
|
+
history = json.dumps([{"outcome": "worked"}])
|
|
284
|
+
assert service.count_successes_from_history(history) == 1.0
|
|
285
|
+
|
|
286
|
+
def test_partial_counts_as_half(self, service):
|
|
287
|
+
"""Partial outcomes count as 0.5."""
|
|
288
|
+
history = json.dumps([{"outcome": "partial"}])
|
|
289
|
+
assert service.count_successes_from_history(history) == 0.5
|
|
290
|
+
|
|
291
|
+
def test_failed_counts_as_zero(self, service):
|
|
292
|
+
"""Failed outcomes count as 0."""
|
|
293
|
+
history = json.dumps([{"outcome": "failed"}])
|
|
294
|
+
assert service.count_successes_from_history(history) == 0
|
|
295
|
+
|
|
296
|
+
def test_mixed_outcomes(self, service):
|
|
297
|
+
"""Mixed outcomes sum correctly."""
|
|
298
|
+
history = json.dumps([
|
|
299
|
+
{"outcome": "worked"},
|
|
300
|
+
{"outcome": "partial"},
|
|
301
|
+
{"outcome": "failed"},
|
|
302
|
+
{"outcome": "worked"}
|
|
303
|
+
])
|
|
304
|
+
assert service.count_successes_from_history(history) == 2.5
|
|
305
|
+
|
|
306
|
+
def test_invalid_json_returns_zero(self, service):
|
|
307
|
+
"""Invalid JSON returns 0."""
|
|
308
|
+
assert service.count_successes_from_history("invalid") == 0
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
class TestOutcomeStats:
|
|
312
|
+
"""Test outcome statistics retrieval."""
|
|
313
|
+
|
|
314
|
+
@pytest.fixture
|
|
315
|
+
def mock_collections(self):
|
|
316
|
+
working = MagicMock()
|
|
317
|
+
working.get_fragment = MagicMock(return_value={
|
|
318
|
+
"content": "test",
|
|
319
|
+
"metadata": {
|
|
320
|
+
"score": 0.8,
|
|
321
|
+
"uses": 5,
|
|
322
|
+
"last_outcome": "worked",
|
|
323
|
+
"outcome_history": json.dumps([
|
|
324
|
+
{"outcome": "worked"},
|
|
325
|
+
{"outcome": "worked"},
|
|
326
|
+
{"outcome": "partial"},
|
|
327
|
+
{"outcome": "failed"},
|
|
328
|
+
{"outcome": "worked"}
|
|
329
|
+
])
|
|
330
|
+
}
|
|
331
|
+
})
|
|
332
|
+
return {"working": working}
|
|
333
|
+
|
|
334
|
+
@pytest.fixture
|
|
335
|
+
def service(self, mock_collections):
|
|
336
|
+
return OutcomeService(collections=mock_collections)
|
|
337
|
+
|
|
338
|
+
def test_get_outcome_stats(self, service):
|
|
339
|
+
"""Should return correct outcome stats."""
|
|
340
|
+
stats = service.get_outcome_stats("working_test123")
|
|
341
|
+
|
|
342
|
+
assert stats["doc_id"] == "working_test123"
|
|
343
|
+
assert stats["collection"] == "working"
|
|
344
|
+
assert stats["score"] == 0.8
|
|
345
|
+
assert stats["uses"] == 5
|
|
346
|
+
assert stats["last_outcome"] == "worked"
|
|
347
|
+
assert stats["outcomes"]["worked"] == 3
|
|
348
|
+
assert stats["outcomes"]["partial"] == 1
|
|
349
|
+
assert stats["outcomes"]["failed"] == 1
|
|
350
|
+
assert stats["total_outcomes"] == 5
|
|
351
|
+
|
|
352
|
+
def test_get_stats_not_found(self, service, mock_collections):
|
|
353
|
+
"""Should return error for non-existent document."""
|
|
354
|
+
mock_collections["working"].get_fragment = MagicMock(return_value=None)
|
|
355
|
+
|
|
356
|
+
stats = service.get_outcome_stats("working_nonexistent")
|
|
357
|
+
assert stats["error"] == "not_found"
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
class TestKGIntegration:
|
|
361
|
+
"""Test KG service integration."""
|
|
362
|
+
|
|
363
|
+
@pytest.fixture
|
|
364
|
+
def mock_kg_service(self):
|
|
365
|
+
kg = MagicMock()
|
|
366
|
+
kg.extract_concepts = MagicMock(return_value=["test", "concept"])
|
|
367
|
+
kg.update_kg_routing = AsyncMock()
|
|
368
|
+
kg.build_concept_relationships = MagicMock()
|
|
369
|
+
kg.add_problem_category = MagicMock()
|
|
370
|
+
kg.add_solution_pattern = MagicMock()
|
|
371
|
+
kg.update_success_rate = MagicMock()
|
|
372
|
+
kg.add_failure_pattern = MagicMock()
|
|
373
|
+
kg.add_problem_solution = MagicMock()
|
|
374
|
+
kg.add_solution_pattern_entry = MagicMock()
|
|
375
|
+
kg.debounced_save_kg = AsyncMock()
|
|
376
|
+
return kg
|
|
377
|
+
|
|
378
|
+
@pytest.fixture
|
|
379
|
+
def mock_collections(self):
|
|
380
|
+
working = MagicMock()
|
|
381
|
+
working.get_fragment = MagicMock(return_value={
|
|
382
|
+
"content": "test solution",
|
|
383
|
+
"metadata": {
|
|
384
|
+
"text": "test solution",
|
|
385
|
+
"query": "test problem",
|
|
386
|
+
"score": 0.5,
|
|
387
|
+
"uses": 0,
|
|
388
|
+
"outcome_history": "[]"
|
|
389
|
+
}
|
|
390
|
+
})
|
|
391
|
+
working.update_fragment_metadata = MagicMock()
|
|
392
|
+
working.collection = MagicMock()
|
|
393
|
+
working.collection.count = MagicMock(return_value=10)
|
|
394
|
+
return {"working": working}
|
|
395
|
+
|
|
396
|
+
@pytest.fixture
|
|
397
|
+
def service(self, mock_collections, mock_kg_service):
|
|
398
|
+
return OutcomeService(
|
|
399
|
+
collections=mock_collections,
|
|
400
|
+
kg_service=mock_kg_service
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
@pytest.mark.asyncio
|
|
404
|
+
async def test_updates_kg_routing(self, service, mock_kg_service):
|
|
405
|
+
"""Should update KG routing on outcome."""
|
|
406
|
+
await service.record_outcome("working_test123", "worked")
|
|
407
|
+
|
|
408
|
+
assert mock_kg_service.update_kg_routing.call_count >= 1 # Desktop calls twice
|
|
409
|
+
|
|
410
|
+
@pytest.mark.asyncio
|
|
411
|
+
async def test_builds_relationships_on_success(self, service, mock_kg_service):
|
|
412
|
+
"""Should build concept relationships on worked outcome."""
|
|
413
|
+
await service.record_outcome("working_test123", "worked")
|
|
414
|
+
|
|
415
|
+
mock_kg_service.build_concept_relationships.assert_called()
|
|
416
|
+
mock_kg_service.add_problem_category.assert_called()
|
|
417
|
+
|
|
418
|
+
@pytest.mark.asyncio
|
|
419
|
+
async def test_tracks_failure_patterns(self, service, mock_kg_service):
|
|
420
|
+
"""Should track failure patterns on failed outcome."""
|
|
421
|
+
await service.record_outcome(
|
|
422
|
+
"working_test123",
|
|
423
|
+
"failed",
|
|
424
|
+
failure_reason="Test failure"
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
mock_kg_service.add_failure_pattern.assert_called()
|
|
428
|
+
|
|
429
|
+
@pytest.mark.asyncio
|
|
430
|
+
async def test_saves_kg_after_update(self, service, mock_kg_service):
|
|
431
|
+
"""Should save KG after updates."""
|
|
432
|
+
await service.record_outcome("working_test123", "worked")
|
|
433
|
+
|
|
434
|
+
mock_kg_service.debounced_save_kg.assert_called()
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
class TestPromotionIntegration:
|
|
438
|
+
"""Test promotion service integration."""
|
|
439
|
+
|
|
440
|
+
@pytest.fixture
|
|
441
|
+
def mock_promotion_service(self):
|
|
442
|
+
promo = MagicMock()
|
|
443
|
+
promo.handle_promotion = AsyncMock()
|
|
444
|
+
return promo
|
|
445
|
+
|
|
446
|
+
@pytest.fixture
|
|
447
|
+
def mock_collections(self):
|
|
448
|
+
working = MagicMock()
|
|
449
|
+
working.get_fragment = MagicMock(return_value={
|
|
450
|
+
"content": "test",
|
|
451
|
+
"metadata": {"score": 0.5, "uses": 0, "outcome_history": "[]"}
|
|
452
|
+
})
|
|
453
|
+
working.update_fragment_metadata = MagicMock()
|
|
454
|
+
working.collection = MagicMock()
|
|
455
|
+
working.collection.count = MagicMock(return_value=10)
|
|
456
|
+
return {"working": working}
|
|
457
|
+
|
|
458
|
+
@pytest.fixture
|
|
459
|
+
def service(self, mock_collections, mock_promotion_service):
|
|
460
|
+
return OutcomeService(
|
|
461
|
+
collections=mock_collections,
|
|
462
|
+
promotion_service=mock_promotion_service
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
@pytest.mark.asyncio
|
|
466
|
+
async def test_calls_promotion_handler(self, service, mock_promotion_service):
|
|
467
|
+
"""Should call promotion handler after outcome."""
|
|
468
|
+
await service.record_outcome("working_test123", "worked")
|
|
469
|
+
|
|
470
|
+
mock_promotion_service.handle_promotion.assert_called_once()
|
|
471
|
+
|
|
472
|
+
@pytest.mark.asyncio
|
|
473
|
+
async def test_passes_correct_params_to_promotion(self, service, mock_promotion_service):
|
|
474
|
+
"""Should pass correct parameters to promotion handler."""
|
|
475
|
+
await service.record_outcome("working_test123", "worked")
|
|
476
|
+
|
|
477
|
+
call_args = mock_promotion_service.handle_promotion.call_args
|
|
478
|
+
assert call_args[1]["doc_id"] == "working_test123"
|
|
479
|
+
assert call_args[1]["collection"] == "working"
|
|
480
|
+
assert "score" in call_args[1]
|
|
481
|
+
assert "uses" in call_args[1]
|
|
482
|
+
|
|
483
|
+
|
|
484
|
+
if __name__ == "__main__":
|
|
485
|
+
pytest.main([__file__, "-v"])
|