reme-ai 0.1.4__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. reme_ai/__init__.py +1 -1
  2. reme_ai/app.py +1 -1
  3. reme_ai/config/default.yaml +40 -5
  4. reme_ai/react/simple_react_op.py +11 -8
  5. reme_ai/retrieve/personal/extract_time_op.py +2 -3
  6. reme_ai/retrieve/personal/fuse_rerank_op.py +1 -1
  7. reme_ai/retrieve/personal/print_memory_op.py +1 -1
  8. reme_ai/retrieve/personal/read_message_op.py +1 -1
  9. reme_ai/retrieve/personal/retrieve_memory_op.py +34 -4
  10. reme_ai/retrieve/personal/semantic_rank_op.py +4 -4
  11. reme_ai/retrieve/personal/set_query_op.py +1 -1
  12. reme_ai/retrieve/task/build_query_op.py +2 -2
  13. reme_ai/retrieve/task/merge_memory_op.py +1 -1
  14. reme_ai/retrieve/task/rerank_memory_op.py +4 -4
  15. reme_ai/retrieve/task/rewrite_memory_op.py +6 -6
  16. reme_ai/service/__init__.py +0 -0
  17. reme_ai/service/base_memory_service.py +112 -0
  18. reme_ai/service/personal_memory_service.py +128 -0
  19. reme_ai/service/task_memory_service.py +126 -0
  20. reme_ai/summary/personal/contra_repeat_op.py +2 -2
  21. reme_ai/summary/personal/get_observation_op.py +4 -4
  22. reme_ai/summary/personal/get_observation_with_time_op.py +4 -4
  23. reme_ai/summary/personal/get_reflection_subject_op.py +4 -4
  24. reme_ai/summary/personal/info_filter_op.py +4 -4
  25. reme_ai/summary/personal/load_today_memory_op.py +6 -7
  26. reme_ai/summary/personal/long_contra_repeat_op.py +4 -4
  27. reme_ai/summary/personal/update_insight_op.py +4 -4
  28. reme_ai/summary/task/comparative_extraction_op.py +9 -7
  29. reme_ai/summary/task/failure_extraction_op.py +7 -5
  30. reme_ai/summary/task/memory_deduplication_op.py +6 -6
  31. reme_ai/summary/task/memory_validation_op.py +8 -6
  32. reme_ai/summary/task/simple_comparative_summary_op.py +6 -4
  33. reme_ai/summary/task/simple_summary_op.py +6 -4
  34. reme_ai/summary/task/success_extraction_op.py +7 -5
  35. reme_ai/summary/task/trajectory_preprocess_op.py +1 -1
  36. reme_ai/summary/task/trajectory_segmentation_op.py +6 -4
  37. reme_ai/vector_store/delete_memory_op.py +1 -1
  38. reme_ai/vector_store/recall_vector_store_op.py +3 -3
  39. reme_ai/vector_store/update_memory_freq_op.py +1 -1
  40. reme_ai/vector_store/update_memory_utility_op.py +1 -1
  41. reme_ai/vector_store/update_vector_store_op.py +3 -3
  42. reme_ai/vector_store/vector_store_action_op.py +21 -18
  43. {reme_ai-0.1.4.dist-info → reme_ai-0.1.7.dist-info}/METADATA +8 -6
  44. reme_ai-0.1.7.dist-info/RECORD +87 -0
  45. reme_ai-0.1.4.dist-info/RECORD +0 -83
  46. {reme_ai-0.1.4.dist-info → reme_ai-0.1.7.dist-info}/WHEEL +0 -0
  47. {reme_ai-0.1.4.dist-info → reme_ai-0.1.7.dist-info}/entry_points.txt +0 -0
  48. {reme_ai-0.1.4.dist-info → reme_ai-0.1.7.dist-info}/licenses/LICENSE +0 -0
  49. {reme_ai-0.1.4.dist-info → reme_ai-0.1.7.dist-info}/top_level.txt +0 -0
@@ -10,7 +10,7 @@ from reme_ai.schema.memory import BaseMemory
10
10
  class MemoryDeduplicationOp(BaseOp):
11
11
  file_path: str = __file__
12
12
 
13
- def execute(self):
13
+ async def async_execute(self):
14
14
  """Remove duplicate task memories"""
15
15
  # Get task memories to deduplicate
16
16
  task_memories: List[BaseMemory] = self.context.memory_list
@@ -22,7 +22,7 @@ class MemoryDeduplicationOp(BaseOp):
22
22
  logger.info(f"Starting deduplication for {len(task_memories)} task memories")
23
23
 
24
24
  # Perform deduplication
25
- deduplicated_task_memories = self._deduplicate_task_memories(task_memories)
25
+ deduplicated_task_memories = await self._deduplicate_task_memories(task_memories)
26
26
 
27
27
  logger.info(
28
28
  f"Deduplication complete: {len(deduplicated_task_memories)} deduplicated task memories out of {len(task_memories)}")
@@ -30,7 +30,7 @@ class MemoryDeduplicationOp(BaseOp):
30
30
  # Update context
31
31
  self.context.memory_list = deduplicated_task_memories
32
32
 
33
- def _deduplicate_task_memories(self, task_memories: List[BaseMemory]) -> List[BaseMemory]:
33
+ async def _deduplicate_task_memories(self, task_memories: List[BaseMemory]) -> List[BaseMemory]:
34
34
  """Remove duplicate task memories"""
35
35
  if not task_memories:
36
36
  return task_memories
@@ -41,7 +41,7 @@ class MemoryDeduplicationOp(BaseOp):
41
41
  unique_task_memories = []
42
42
 
43
43
  # Get existing task memory embeddings
44
- existing_embeddings = self._get_existing_task_memory_embeddings(workspace_id)
44
+ existing_embeddings = await self._get_existing_task_memory_embeddings(workspace_id)
45
45
 
46
46
  for task_memory in task_memories:
47
47
  # Generate embedding for current task memory
@@ -67,14 +67,14 @@ class MemoryDeduplicationOp(BaseOp):
67
67
 
68
68
  return unique_task_memories
69
69
 
70
- def _get_existing_task_memory_embeddings(self, workspace_id: str) -> List[List[float]]:
70
+ async def _get_existing_task_memory_embeddings(self, workspace_id: str) -> List[List[float]]:
71
71
  """Get embeddings of existing task memories"""
72
72
  try:
73
73
  if not hasattr(self.context, 'vector_store') or not self.context.vector_store or not workspace_id:
74
74
  return []
75
75
 
76
76
  # Query existing task memory nodes
77
- existing_nodes = self.context.vector_store.search(
77
+ existing_nodes = await self.context.vector_store.async_search(
78
78
  query="...", # Empty query to get all
79
79
  workspace_id=workspace_id,
80
80
  top_k=self.op_params.get("max_existing_task_memories", 1000)
@@ -3,6 +3,8 @@ import re
3
3
  from typing import List, Dict, Any
4
4
 
5
5
  from flowllm import C, BaseLLMOp
6
+ from flowllm.enumeration.role import Role
7
+ from flowllm.schema.message import Message as FlowMessage
6
8
  from loguru import logger
7
9
 
8
10
  from reme_ai.schema import Message
@@ -13,7 +15,7 @@ from reme_ai.schema.memory import BaseMemory
13
15
  class MemoryValidationOp(BaseLLMOp):
14
16
  file_path: str = __file__
15
17
 
16
- def execute(self):
18
+ async def async_execute(self):
17
19
  """Validate quality of extracted task memories"""
18
20
 
19
21
  task_memories: List[BaseMemory] = []
@@ -31,7 +33,7 @@ class MemoryValidationOp(BaseLLMOp):
31
33
  validated_task_memories = []
32
34
 
33
35
  for task_memory in task_memories:
34
- validation_result = self._validate_single_task_memory(task_memory)
36
+ validation_result = await self._validate_single_task_memory(task_memory)
35
37
  if validation_result and validation_result.get("is_valid", False):
36
38
  task_memory.score = validation_result.get("score", 0.0)
37
39
  validated_task_memories.append(task_memory)
@@ -45,13 +47,13 @@ class MemoryValidationOp(BaseLLMOp):
45
47
  self.context.response.answer = json.dumps([x.model_dump() for x in validated_task_memories])
46
48
  self.context.response.metadata["memory_list"] = validated_task_memories
47
49
 
48
- def _validate_single_task_memory(self, task_memory: BaseMemory) -> Dict[str, Any]:
50
+ async def _validate_single_task_memory(self, task_memory: BaseMemory) -> Dict[str, Any]:
49
51
  """Validate single task memory"""
50
- validation_info = self._llm_validate_task_memory(task_memory)
52
+ validation_info = await self._llm_validate_task_memory(task_memory)
51
53
  logger.info(f"Validating: {validation_info}")
52
54
  return validation_info
53
55
 
54
- def _llm_validate_task_memory(self, task_memory: BaseMemory) -> Dict[str, Any]:
56
+ async def _llm_validate_task_memory(self, task_memory: BaseMemory) -> Dict[str, Any]:
55
57
  """Validate task memory using LLM"""
56
58
  try:
57
59
  prompt = self.prompt_format(
@@ -96,7 +98,7 @@ class MemoryValidationOp(BaseLLMOp):
96
98
  "reason": f"Parse error: {str(e_inner)}"
97
99
  }
98
100
 
99
- return self.llm.chat(messages=[Message(content=prompt)], callback_fn=parse_validation)
101
+ return await self.llm.achat(messages=[FlowMessage(role=Role.USER, content=prompt)], callback_fn=parse_validation)
100
102
 
101
103
  except Exception as e:
102
104
  logger.error(f"LLM validation failed: {e}")
@@ -2,6 +2,8 @@ import json
2
2
  from typing import List, Dict
3
3
 
4
4
  from flowllm import C, BaseLLMOp
5
+ from flowllm.enumeration.role import Role
6
+ from flowllm.schema.message import Message as FlowMessage
5
7
  from loguru import logger
6
8
 
7
9
  from reme_ai.schema import Message, Trajectory
@@ -13,7 +15,7 @@ from reme_ai.utils.op_utils import merge_messages_content
13
15
  class SimpleComparativeSummaryOp(BaseLLMOp):
14
16
  file_path: str = __file__
15
17
 
16
- def compare_summary_trajectory(self, trajectory_a: Trajectory, trajectory_b: Trajectory) -> List[BaseMemory]:
18
+ async def compare_summary_trajectory(self, trajectory_a: Trajectory, trajectory_b: Trajectory) -> List[BaseMemory]:
17
19
  summary_prompt = self.prompt_format(prompt_name="summary_prompt",
18
20
  execution_process_a=merge_messages_content(trajectory_a.messages),
19
21
  execution_process_b=merge_messages_content(trajectory_b.messages),
@@ -42,9 +44,9 @@ class SimpleComparativeSummaryOp(BaseLLMOp):
42
44
  logger.exception(f"parse content failed!\n{content}")
43
45
  raise e
44
46
 
45
- return self.llm.chat(messages=[Message(content=summary_prompt)], callback_fn=parse_content)
47
+ return await self.llm.achat(messages=[FlowMessage(role=Role.USER, content=summary_prompt)], callback_fn=parse_content)
46
48
 
47
- def execute(self):
49
+ async def async_execute(self):
48
50
  trajectories: list = self.context.get("trajectories", [])
49
51
  trajectories: List[Trajectory] = [Trajectory(**x) if isinstance(x, dict) else x for x in trajectories]
50
52
 
@@ -61,7 +63,7 @@ class SimpleComparativeSummaryOp(BaseLLMOp):
61
63
  continue
62
64
 
63
65
  if task_trajectories[0].score > task_trajectories[-1].score:
64
- task_memories = self.compare_summary_trajectory(trajectory_a=task_trajectories[0],
66
+ task_memories = await self.compare_summary_trajectory(trajectory_a=task_trajectories[0],
65
67
  trajectory_b=task_trajectories[-1])
66
68
  memory_list.extend(task_memories)
67
69
 
@@ -2,6 +2,8 @@ import json
2
2
  from typing import List
3
3
 
4
4
  from flowllm import C, BaseLLMOp
5
+ from flowllm.enumeration.role import Role
6
+ from flowllm.schema.message import Message as FlowMessage
5
7
  from loguru import logger
6
8
 
7
9
  from reme_ai.schema import Message, Trajectory
@@ -13,7 +15,7 @@ from reme_ai.utils.op_utils import merge_messages_content
13
15
  class SimpleSummaryOp(BaseLLMOp):
14
16
  file_path: str = __file__
15
17
 
16
- def summary_trajectory(self, trajectory: Trajectory) -> List[BaseMemory]:
18
+ async def summary_trajectory(self, trajectory: Trajectory) -> List[BaseMemory]:
17
19
  execution_process = merge_messages_content(trajectory.messages)
18
20
  success_score_threshold: float = self.op_params.get("success_score_threshold", 0.9)
19
21
  logger.info(f"success_score_threshold={success_score_threshold}")
@@ -49,15 +51,15 @@ class SimpleSummaryOp(BaseLLMOp):
49
51
  logger.exception(f"parse content failed!\n{content}")
50
52
  raise e
51
53
 
52
- return self.llm.chat(messages=[Message(content=summary_prompt)], callback_fn=parse_content)
54
+ return await self.llm.achat(messages=[FlowMessage(role=Role.USER, content=summary_prompt)], callback_fn=parse_content)
53
55
 
54
- def execute(self):
56
+ async def async_execute(self):
55
57
  trajectories: list = self.context.trajectories
56
58
  trajectories: List[Trajectory] = [Trajectory(**x) if isinstance(x, dict) else x for x in trajectories]
57
59
 
58
60
  memory_list: List[BaseMemory] = []
59
61
  for trajectory in trajectories:
60
- memories = self.summary_trajectory(trajectory)
62
+ memories = await self.summary_trajectory(trajectory)
61
63
  if memories:
62
64
  memory_list.extend(memories)
63
65
 
@@ -1,6 +1,8 @@
1
1
  from typing import List
2
2
 
3
3
  from flowllm import C, BaseLLMOp
4
+ from flowllm.enumeration.role import Role
5
+ from flowllm.schema.message import Message as FlowMessage
4
6
  from loguru import logger
5
7
 
6
8
  from reme_ai.schema import Message, Trajectory
@@ -12,7 +14,7 @@ from reme_ai.utils.op_utils import merge_messages_content, parse_json_experience
12
14
  class SuccessExtractionOp(BaseLLMOp):
13
15
  file_path: str = __file__
14
16
 
15
- def execute(self):
17
+ async def async_execute(self):
16
18
  """Extract task memories from successful trajectories"""
17
19
  success_trajectories: List[Trajectory] = self.context.success_trajectories
18
20
 
@@ -29,11 +31,11 @@ class SuccessExtractionOp(BaseLLMOp):
29
31
  if "segments" in trajectory.metadata:
30
32
  # Process segmented step sequences
31
33
  for segment in trajectory.metadata["segments"]:
32
- task_memories = self._extract_success_task_memory_from_steps(segment, trajectory)
34
+ task_memories = await self._extract_success_task_memory_from_steps(segment, trajectory)
33
35
  success_task_memories.extend(task_memories)
34
36
  else:
35
37
  # Process entire trajectory
36
- task_memories = self._extract_success_task_memory_from_steps(trajectory.messages, trajectory)
38
+ task_memories = await self._extract_success_task_memory_from_steps(trajectory.messages, trajectory)
37
39
  success_task_memories.extend(task_memories)
38
40
 
39
41
  logger.info(f"Extracted {len(success_task_memories)} success task memories")
@@ -41,7 +43,7 @@ class SuccessExtractionOp(BaseLLMOp):
41
43
  # Add task memories to context
42
44
  self.context.success_task_memories = success_task_memories
43
45
 
44
- def _extract_success_task_memory_from_steps(self, steps: List[Message], trajectory: Trajectory) -> List[BaseMemory]:
46
+ async def _extract_success_task_memory_from_steps(self, steps: List[Message], trajectory: Trajectory) -> List[BaseMemory]:
45
47
  """Extract task memory from successful step sequences"""
46
48
  step_content = merge_messages_content(steps)
47
49
  context = get_trajectory_context(trajectory, steps)
@@ -70,4 +72,4 @@ class SuccessExtractionOp(BaseLLMOp):
70
72
 
71
73
  return task_memories
72
74
 
73
- return self.llm.chat(messages=[Message(content=prompt)], callback_fn=parse_task_memories)
75
+ return await self.llm.achat(messages=[FlowMessage(role=Role.USER, content=prompt)], callback_fn=parse_task_memories)
@@ -10,7 +10,7 @@ from reme_ai.schema import Trajectory
10
10
  class TrajectoryPreprocessOp(BaseOp):
11
11
  file_path: str = __file__
12
12
 
13
- def execute(self):
13
+ async def async_execute(self):
14
14
  """Preprocess trajectories: validate and classify"""
15
15
  trajectories: list = self.context.get("trajectories", [])
16
16
  trajectories: List[Trajectory] = [Trajectory(**x) if isinstance(x, dict) else x for x in trajectories]
@@ -3,6 +3,8 @@ import re
3
3
  from typing import List
4
4
 
5
5
  from flowllm import C, BaseLLMOp
6
+ from flowllm.enumeration.role import Role
7
+ from flowllm.schema.message import Message as FlowMessage
6
8
  from loguru import logger
7
9
 
8
10
  from reme_ai.schema import Message, Trajectory
@@ -12,7 +14,7 @@ from reme_ai.schema import Message, Trajectory
12
14
  class TrajectorySegmentationOp(BaseLLMOp):
13
15
  file_path: str = __file__
14
16
 
15
- def execute(self):
17
+ async def async_execute(self):
16
18
  """Segment trajectories into meaningful steps"""
17
19
  # Get trajectories from context
18
20
  all_trajectories: List[Trajectory] = self.context.get("all_trajectories", [])
@@ -30,7 +32,7 @@ class TrajectorySegmentationOp(BaseLLMOp):
30
32
  # Add segmentation info to trajectories
31
33
  segmented_count = 0
32
34
  for trajectory in target_trajectories:
33
- segments = self._llm_segment_trajectory(trajectory)
35
+ segments = await self._llm_segment_trajectory(trajectory)
34
36
  trajectory.metadata["segments"] = segments
35
37
  segmented_count += 1
36
38
 
@@ -51,7 +53,7 @@ class TrajectorySegmentationOp(BaseLLMOp):
51
53
  else:
52
54
  return all_trajectories
53
55
 
54
- def _llm_segment_trajectory(self, trajectory: Trajectory) -> List[List[Message]]:
56
+ async def _llm_segment_trajectory(self, trajectory: Trajectory) -> List[List[Message]]:
55
57
  """Use LLM for trajectory segmentation"""
56
58
  trajectory_content = self._format_trajectory_content(trajectory)
57
59
 
@@ -80,7 +82,7 @@ class TrajectorySegmentationOp(BaseLLMOp):
80
82
 
81
83
  return segments if segments else [trajectory.messages]
82
84
 
83
- return self.llm.chat(messages=[Message(content=prompt)], callback_fn=parse_segmentation,
85
+ return await self.llm.achat(messages=[FlowMessage(role=Role.USER, content=prompt)], callback_fn=parse_segmentation,
84
86
  default_value=[trajectory.messages])
85
87
 
86
88
  @staticmethod
@@ -8,7 +8,7 @@ from flowllm.schema.vector_node import VectorNode
8
8
  class DeleteMemoryOp(BaseLLMOp):
9
9
  file_path: str = __file__
10
10
 
11
- def execute(self):
11
+ async def async_execute(self):
12
12
  workspace_id: str = self.context.workspace_id
13
13
  freq_threshold: int = self.context.freq_threshold
14
14
  utility_threshold: float = self.context.utility_threshold
@@ -10,15 +10,15 @@ from reme_ai.schema.memory import BaseMemory, vector_node_to_memory
10
10
  @C.register_op()
11
11
  class RecallVectorStoreOp(BaseLLMOp):
12
12
 
13
- def execute(self):
13
+ async def async_execute(self):
14
14
  recall_key: str = self.op_params.get("recall_key", "query")
15
- top_k: int = self.op_params.get("top_k", 3)
15
+ top_k: int = self.context.get("top_k", 3)
16
16
 
17
17
  query: str = self.context[recall_key]
18
18
  assert query, "query should be not empty!"
19
19
 
20
20
  workspace_id: str = self.context.workspace_id
21
- nodes: List[VectorNode] = self.vector_store.search(query=query, workspace_id=workspace_id, top_k=top_k)
21
+ nodes: List[VectorNode] = await self.vector_store.async_search(query=query, workspace_id=workspace_id, top_k=top_k)
22
22
  memory_list: List[BaseMemory] = []
23
23
  memory_content_list: List[str] = []
24
24
  for node in nodes:
@@ -10,7 +10,7 @@ from reme_ai.schema.memory import BaseMemory, dict_to_memory
10
10
  class UpdateMemoryFreqOp(BaseOp):
11
11
  file_path: str = __file__
12
12
 
13
- def execute(self):
13
+ async def async_execute(self):
14
14
  memory_dicts: List[dict] = self.context.memory_dicts
15
15
 
16
16
  if not memory_dicts:
@@ -10,7 +10,7 @@ from reme_ai.schema.memory import BaseMemory
10
10
  class UpdateMemoryUtilityOp(BaseOp):
11
11
  file_path: str = __file__
12
12
 
13
- def execute(self):
13
+ async def async_execute(self):
14
14
  memory_dicts: List[dict] = self.context.memory_dicts
15
15
  update_utility = self.context.update_utility
16
16
 
@@ -11,18 +11,18 @@ from reme_ai.schema.memory import BaseMemory
11
11
  @C.register_op()
12
12
  class UpdateVectorStoreOp(BaseLLMOp):
13
13
 
14
- def execute(self):
14
+ async def async_execute(self):
15
15
  workspace_id: str = self.context.workspace_id
16
16
 
17
17
  deleted_memory_ids: List[str] = self.context.response.metadata.get("deleted_memory_ids", [])
18
18
  if deleted_memory_ids:
19
- self.vector_store.delete(node_ids=deleted_memory_ids, workspace_id=workspace_id)
19
+ await self.vector_store.async_delete(node_ids=deleted_memory_ids, workspace_id=workspace_id)
20
20
  logger.info(f"delete memory_ids={json.dumps(deleted_memory_ids, indent=2)}")
21
21
 
22
22
  insert_memory_list: List[BaseMemory] = self.context.response.metadata.get("memory_list", [])
23
23
  if insert_memory_list:
24
24
  insert_nodes: List[VectorNode] = [x.to_vector_node() for x in insert_memory_list]
25
- self.vector_store.insert(nodes=insert_nodes, workspace_id=workspace_id)
25
+ await self.vector_store.async_insert(nodes=insert_nodes, workspace_id=workspace_id)
26
26
  logger.info(f"insert insert_node.size={len(insert_nodes)}")
27
27
 
28
28
  # Store results in context
@@ -7,22 +7,22 @@ from reme_ai.schema.memory import vector_node_to_memory, dict_to_memory, BaseMem
7
7
  @C.register_op()
8
8
  class VectorStoreActionOp(BaseLLMOp):
9
9
 
10
- def execute(self):
10
+ async def async_execute(self):
11
11
  workspace_id: str = self.context.workspace_id
12
12
  action: str = self.context.action
13
-
13
+ result = ""
14
14
  if action == "copy":
15
15
  src_workspace_id: str = self.context.src_workspace_id
16
- result = self.vector_store.copy_workspace(src_workspace_id=src_workspace_id,
17
- dest_workspace_id=workspace_id)
16
+ result = await self.vector_store.async_copy_workspace(src_workspace_id=src_workspace_id,
17
+ dest_workspace_id=workspace_id)
18
18
 
19
19
  elif action == "delete":
20
- if self.vector_store.exist_workspace(workspace_id):
21
- result = self.vector_store.delete_workspace(workspace_id=workspace_id)
20
+ if await self.vector_store.async_exist_workspace(workspace_id):
21
+ result = await self.vector_store.async_delete_workspace(workspace_id=workspace_id)
22
22
 
23
23
  elif action == "delete_ids":
24
24
  memory_ids: list = self.context.memory_ids
25
- result = self.vector_store.delete(workspace_id=workspace_id, node_ids=memory_ids)
25
+ result = await self.vector_store.async_delete(workspace_id=workspace_id, node_ids=memory_ids)
26
26
 
27
27
  elif action == "dump":
28
28
  path: str = self.context.path
@@ -30,9 +30,16 @@ class VectorStoreActionOp(BaseLLMOp):
30
30
  def node_to_memory(node: VectorNode) -> dict:
31
31
  return vector_node_to_memory(node).model_dump()
32
32
 
33
- result = self.vector_store.dump_workspace(workspace_id=workspace_id,
34
- path=path,
35
- callback_fn=node_to_memory)
33
+ result = await self.vector_store.async_dump_workspace(workspace_id=workspace_id,
34
+ path=path,
35
+ callback_fn=node_to_memory)
36
+
37
+ elif action == "list":
38
+ def node_to_memory(node: VectorNode) -> dict:
39
+ return vector_node_to_memory(node).model_dump()
40
+
41
+ result = await self.vector_store.async_iter_workspace_nodes(workspace_id=workspace_id)
42
+ result = [node_to_memory(node) for node in result]
36
43
 
37
44
  elif action == "load":
38
45
  path: str = self.context.path
@@ -41,15 +48,11 @@ class VectorStoreActionOp(BaseLLMOp):
41
48
  memory: BaseMemory = dict_to_memory(memory_dict=memory_dict)
42
49
  return memory.to_vector_node()
43
50
 
44
- result = self.vector_store.load_workspace(workspace_id=workspace_id,
45
- path=path,
46
- callback_fn=memory_dict_to_node)
51
+ result = await self.vector_store.async_load_workspace(workspace_id=workspace_id,
52
+ path=path,
53
+ callback_fn=memory_dict_to_node)
47
54
 
48
55
  else:
49
56
  raise ValueError(f"invalid action={action}")
50
57
 
51
- # Store results in context
52
- if isinstance(result, dict):
53
- self.context.response.metadata["action_result"] = result
54
- else:
55
- self.context.response.metadata["action_result"] = {"result": str(result)}
58
+ self.context.response.metadata["action_result"] = result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: reme_ai
3
- Version: 0.1.4
3
+ Version: 0.1.7
4
4
  Summary: Remember me
5
5
  Author-email: "jinli.yl" <jinli.yl@alibaba-inc.com>, "dengjiaji.djj" <dengjiaji.djj@alibaba-inc.com>, "caozouying.czy" <caozouying.czy@alibaba-inc.com>
6
6
  License: Apache License
@@ -212,16 +212,18 @@ Classifier: Operating System :: OS Independent
212
212
  Requires-Python: >=3.11
213
213
  Description-Content-Type: text/markdown
214
214
  License-File: LICENSE
215
- Requires-Dist: flowllm>=0.1.3
215
+ Requires-Dist: flowllm>=0.1.6
216
216
  Dynamic: license-file
217
217
 
218
+ English | [**中文**](./README_ZH.md)
219
+
218
220
  <p align="center">
219
221
  <img src="docs/figure/reme_logo.png" alt="ReMe Logo" width="50%">
220
222
  </p>
221
223
 
222
224
  <p align="center">
223
225
  <a href="https://pypi.org/project/reme-ai/"><img src="https://img.shields.io/badge/python-3.12+-blue" alt="Python Version"></a>
224
- <a href="https://pypi.org/project/reme-ai/"><img src="https://img.shields.io/badge/pypi-v0.1.x-blue?logo=pypi" alt="PyPI Version"></a>
226
+ <a href="https://pypi.org/project/reme-ai/"><img src="https://img.shields.io/badge/pypi-v0.1-blue?logo=pypi" alt="PyPI Version"></a>
225
227
  <a href="./LICENSE"><img src="https://img.shields.io/badge/license-Apache--2.0-black" alt="License"></a>
226
228
  <a href="https://github.com/modelscope/ReMe"><img src="https://img.shields.io/github/stars/modelscope/ReMe?style=social" alt="GitHub Stars"></a>
227
229
  </p>
@@ -255,7 +257,7 @@ Personal memory helps "**understand user preferences**", while task memory helps
255
257
  - **[2025-08]** 🚀 MCP protocol support is now available -> [MCP Quick Start](docs/mcp_quick_start.md).
256
258
  - **[2025-06]** 🚀 Multiple backend vector storage support (Elasticsearch &
257
259
  ChromaDB) -> [Vector DB quick start](docs/vector_store_api_guide.md).
258
- - **[2024-09]** 🧠 [MemoryScope](https://github.com/modelscope/Reme/tree/memoryscope_branch) v0.1.x released,
260
+ - **[2024-09]** 🧠 [MemoryScope](https://github.com/modelscope/Reme/tree/memoryscope_branch) v0.1 released,
259
261
  personalized and time-aware memory storage and usage.
260
262
 
261
263
  ---
@@ -549,7 +551,7 @@ ReMe provides pre-built memory libraries that agents can immediately use with ve
549
551
  response = requests.post("http://localhost:8002/vector_store", json={
550
552
  "workspace_id": "appworld",
551
553
  "action": "load",
552
- "path": "./library/"
554
+ "path": "./docs/library/"
553
555
  })
554
556
 
555
557
  # Query relevant memories
@@ -633,7 +635,7 @@ We believe the best memory systems come from collective wisdom. Contributions we
633
635
 
634
636
  ```bibtex
635
637
  @software{ReMe2025,
636
- title = {ReMe: Memory Framework for AI Agent},
638
+ title = {ReMe: Memory Management Framework for Agents},
637
639
  author = {Li Yu, Jiaji Deng, Zouying Cao},
638
640
  url = {https://github.com/modelscope/ReMe},
639
641
  year = {2025}
@@ -0,0 +1,87 @@
1
+ reme_ai/__init__.py,sha256=u-F1nTDZa6oqZy_J-Yo94qB-W9VEr7xaSz47-LYynsE,139
2
+ reme_ai/app.py,sha256=vzcC8cZdeTl6JZJK_JhJE8Kt7MBsIXlAQvg90bq8g-Q,325
3
+ reme_ai/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ reme_ai/config/config_parser.py,sha256=gk9bsMNnAlLLjChdalHWHgpOazyvZh5bItTUZcIGqbU,189
5
+ reme_ai/config/default.yaml,sha256=fxe9f43Lf6Affn-6cF-ZpuLkZCz0JKyzmRJGpaqBTnc,6324
6
+ reme_ai/constants/__init__.py,sha256=HdNweT3fTmdsCfoyTVKpBIPF9EELepLVNCrpKpJymY4,128
7
+ reme_ai/constants/common_constants.py,sha256=0JuJS8y--bdQ9Knx2f8f0bnNaWTBFLLMYYp2xhathjc,1038
8
+ reme_ai/constants/language_constants.py,sha256=bCNJJ8by5aNIaClDT6q2WqF7Xia6pXdHSMEK8DKLtfA,4754
9
+ reme_ai/enumeration/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ reme_ai/enumeration/language_enum.py,sha256=zaWc0L1Etb4r3QcfNvVrkX6hwYNAHUms962PlUKLtas,261
11
+ reme_ai/react/__init__.py,sha256=-EEF7Moo56-R6O2CoWMbkXz12l8tsbpMht5vSUdRnbU,43
12
+ reme_ai/react/simple_react_op.py,sha256=H_rbqB03A9sqAq2qXqxD0p7jD80atA2aoHEJJ68GBsA,608
13
+ reme_ai/retrieve/__init__.py,sha256=K3qulFpRGZU_UsyYjIouR1F2CXwDBf9NkGiqYmIU2hQ,42
14
+ reme_ai/retrieve/personal/__init__.py,sha256=v5rpUQgBuMBMAodUzXPaRHg27ckGz82Vq5PXFSwHbks,462
15
+ reme_ai/retrieve/personal/extract_time_op.py,sha256=22znvAwW2gPIZ8gTzKW-f87MrqecRZ60EmrxJ1z_tYE,4154
16
+ reme_ai/retrieve/personal/extract_time_prompt.yaml,sha256=gO1RM4yFIDE5zr7EM6B_R-5a7mrtgqpeMRnKtzkeaOQ,5247
17
+ reme_ai/retrieve/personal/fuse_rerank_op.py,sha256=vDkkphPnN-Lph0yl5rL8u04xit_MKgn7ejO16G9llpw,7601
18
+ reme_ai/retrieve/personal/print_memory_op.py,sha256=X-iilW-LX0MJwaCqqlB6sV9ANItPQdYFyd0RtSy4dyY,4359
19
+ reme_ai/retrieve/personal/print_memory_prompt.yaml,sha256=fJ1DVLwu7crKLnNy9tsb-BYY56Y-kLRkuqthuEakgmM,487
20
+ reme_ai/retrieve/personal/read_message_op.py,sha256=_k-kNPPs8X3oC4R7A0qJr0xNVlhXJwvrLVcQ1G_2GBM,1724
21
+ reme_ai/retrieve/personal/retrieve_memory_op.py,sha256=KlpPmYKKxynwiTLjoc7AIhuz4wMX5Qo1SnOgpa4Th0k,1888
22
+ reme_ai/retrieve/personal/semantic_rank_op.py,sha256=ezCIQCSOuV6wfx2U78Q4c5z6Sz2TK1O1aNzFXTlEeHY,6614
23
+ reme_ai/retrieve/personal/set_query_op.py,sha256=UPq8_65x2AewBWZOxLhqNhw1fYAAClb9aUnW2pdtcc8,1322
24
+ reme_ai/retrieve/task/__init__.py,sha256=jTB7b3WpDbU3cyloqPD96TAdZkCIRs5enjSXybebbII,176
25
+ reme_ai/retrieve/task/build_query_op.py,sha256=8se7mzhXgHHOkqk7mmCXr9Iag5Q9OulaRZIVgbg-2HU,1492
26
+ reme_ai/retrieve/task/build_query_prompt.yaml,sha256=NQTvGe6u9w5k4J_tMo-AeXhRD4h7_urrl3NFhJlc8_8,323
27
+ reme_ai/retrieve/task/merge_memory_op.py,sha256=bR5kTBHsOS8NIlVwbh-dcEqzKAgQGFQkrYP2hznhztc,913
28
+ reme_ai/retrieve/task/rerank_memory_op.py,sha256=8DNuVufujOyGyVfGWrfxgDBRSR6jBqLu7bvCqksVl-Y,5567
29
+ reme_ai/retrieve/task/rerank_memory_prompt.yaml,sha256=_YuJCGa6N5gJ8LVcbeC8y6AFJHGkWuAyorB8NkUla34,965
30
+ reme_ai/retrieve/task/rewrite_memory_op.py,sha256=_yiyikbyRcJBKZueyyYYhzQz1TH3hPaFSP8Rmts5iJw,5519
31
+ reme_ai/retrieve/task/rewrite_memory_prompt.yaml,sha256=JY4gmTplmfxxZzDfdaUDyJoXXqWHXpc7Z-4_ry_Zy74,1594
32
+ reme_ai/schema/__init__.py,sha256=tbBjMfV_ojRr2yybA-XSPzN71A6K9HXZ8Gaw7I-2hBM,69
33
+ reme_ai/schema/memory.py,sha256=ur1ldmrrm3YZermeb6zhV9LgrBoUEJ29Ppz6J2Q8yhM,5483
34
+ reme_ai/service/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
+ reme_ai/service/base_memory_service.py,sha256=nBDVAgoEqyb98t1Kl43u1v0aCStcsy7XbcWmEuk6dac,3212
36
+ reme_ai/service/personal_memory_service.py,sha256=AbTBbu4GY_me_mUxj5p9TBA8ME1AzIm6wATccUaC5tM,5038
37
+ reme_ai/service/task_memory_service.py,sha256=NjfWa5Z3R9v2iOxgb_De07bhMydeKf3qFhQOUFPmEJk,4891
38
+ reme_ai/summary/__init__.py,sha256=K3qulFpRGZU_UsyYjIouR1F2CXwDBf9NkGiqYmIU2hQ,42
39
+ reme_ai/summary/personal/__init__.py,sha256=z2taU9ejTWvZaAJ6Mr0jKKRh4fcBdZVxpiFWreTDhTg,417
40
+ reme_ai/summary/personal/contra_repeat_op.py,sha256=cs3gnOkmFJgIH3KOS17FrxIiscW7lNqZiicCXZkgu9A,6297
41
+ reme_ai/summary/personal/contra_repeat_prompt.yaml,sha256=u5sp8MN944pU0HEeTKQmehdVlnqLXsIA7rAAh-bMJTw,7205
42
+ reme_ai/summary/personal/get_observation_op.py,sha256=IOVFnKgvzPREdwTjrvOLHodjQ4erSudDbb5DEqzn9mY,6431
43
+ reme_ai/summary/personal/get_observation_prompt.yaml,sha256=PK5iBDPMt6MqgsQ9Mj6YKp5Iq1PCA-MOYOp-rEFd-b8,15032
44
+ reme_ai/summary/personal/get_observation_with_time_op.py,sha256=2AE0HwxJzwoqTR0-OW42s92M3Ql6dXLlhQRBVbWRPBo,7905
45
+ reme_ai/summary/personal/get_observation_with_time_prompt.yaml,sha256=lRIlkU7BmgLzkghLLX-hx7hxibDTgsqa-IuIUIwyeMs,16389
46
+ reme_ai/summary/personal/get_reflection_subject_op.py,sha256=wP4zVOC2kBJlxWgXEHRmoSMvy6tgoXhrSc5y0LWhcg8,7712
47
+ reme_ai/summary/personal/get_reflection_subject_prompt.yaml,sha256=YYSCsgZhD7H7zkzRWrj6zzxOlprfuB1setUokjgvfK0,11139
48
+ reme_ai/summary/personal/info_filter_op.py,sha256=c0f91IpXDtYo4EgUWAleV9SgTlGVO1DP6mUHRo_7N6U,7897
49
+ reme_ai/summary/personal/info_filter_prompt.yaml,sha256=rgVLz_ptumi6lLYgLKRmTuap8vA5jQsfeFGpPiMpjMo,11192
50
+ reme_ai/summary/personal/load_today_memory_op.py,sha256=N37X9iDKdgnrGqZhzo46Jr3DVjdvNy9RHRHrt__9-IM,4060
51
+ reme_ai/summary/personal/long_contra_repeat_op.py,sha256=H2vodsFbLGmdlkEqGbAbTbBnRyOY-ywsRFo_nY6iEgM,9089
52
+ reme_ai/summary/personal/long_contra_repeat_prompt.yaml,sha256=6q3Y4xZx3liZwXLcJsOgeCIQZzAE2PwqVNQCFMNAWsI,7516
53
+ reme_ai/summary/personal/update_insight_op.py,sha256=S3bdRQaUoYVxcS4ts_mO08-h1vUpMdiSVYAL34VuesQ,11025
54
+ reme_ai/summary/personal/update_insight_prompt.yaml,sha256=02EPEC5vQGTEYOfHeq29GUEhlWxdLAcMnaj6jOgeI8c,10295
55
+ reme_ai/summary/task/__init__.py,sha256=GApTaFsmX7q-NbGtk5cTfx23cf-X-DBt-RORy0UMcvI,528
56
+ reme_ai/summary/task/comparative_extraction_op.py,sha256=vedE7i8UsZEW9hxWAc-0ew0SLG5c63TEE7EASVHIse8,10845
57
+ reme_ai/summary/task/comparative_extraction_prompt.yaml,sha256=EgsveH1fHcXle5sHDqyxZ_KxTfTUn2IGbkZ0KjZRl1I,3331
58
+ reme_ai/summary/task/failure_extraction_op.py,sha256=yWwC4_rNknAMAHDHNci92JRpBgpzkSTu9VuDTbf-tAQ,3199
59
+ reme_ai/summary/task/failure_extraction_prompt.yaml,sha256=kWz55BRxtEd_CjoaKWliCdYR_pzNCZwBETA0Ajdwggk,1534
60
+ reme_ai/summary/task/memory_deduplication_op.py,sha256=rg_k4tJkCKCERUV0Uuy5eVCBr4hEz3rhDBMg-J3DyiQ,6886
61
+ reme_ai/summary/task/memory_validation_op.py,sha256=pUk3mu4FpeQrC5bSIFqXwO8HJ9RIpbAMFp4PSuOPd9I,4525
62
+ reme_ai/summary/task/memory_validation_prompt.yaml,sha256=CwqT76ktjnkCXcZFEe0XtvJPkhZTpt32_N--A0gH3k0,1230
63
+ reme_ai/summary/task/simple_comparative_summary_op.py,sha256=4dAX0QqyIfu9UIuouSrhv30dqelMnI7Vm7toD56WRow,3587
64
+ reme_ai/summary/task/simple_comparative_summary_prompt.yaml,sha256=FGGj-jE8SvgEDEJAiq33ptB_-pI2qmBulsLDQyy8_bM,1140
65
+ reme_ai/summary/task/simple_summary_op.py,sha256=fsOogsmu6Na-UdRkxb2xOTqx7jBwXhEolGhKn4OBKPY,3126
66
+ reme_ai/summary/task/simple_summary_prompt.yaml,sha256=o0JbPBtGqKJ6_GIDhI_wGdBOSOy74bpaehLAKqtBYco,943
67
+ reme_ai/summary/task/success_extraction_op.py,sha256=W5rtY-KNovn9yvW6Q7-XDPWk_yrVbReUeJGVwDvK8no,3194
68
+ reme_ai/summary/task/success_extraction_prompt.yaml,sha256=rr_5sm9j2r_Ea5JgBBPG-yCnFMmCdTi391rqWovqraw,1527
69
+ reme_ai/summary/task/trajectory_preprocess_op.py,sha256=vbs-0dvN1EYg5_t_aY6pUBdLv6heWyaS1eUtdTXA6tE,1722
70
+ reme_ai/summary/task/trajectory_segmentation_op.py,sha256=nG-ri3UxXD85uSFokRi95iHom-5-xaRB495WNcaKn5Y,4777
71
+ reme_ai/summary/task/trajectory_segmentation_prompt.yaml,sha256=8E5nDQn9x7DHC6P1dsF3l_bd8UITla9AyfmSLoyFLpM,1193
72
+ reme_ai/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
+ reme_ai/utils/datetime_handler.py,sha256=mTf-c7Ko7crrTzh1hU5v-A7Hbg_8gQRCeN2c-aoP3Tw,14451
74
+ reme_ai/utils/op_utils.py,sha256=tYsAl5LcQKe0I7YyEe8VXjrjJGVZ7Q_oD28qorJsCB0,4175
75
+ reme_ai/vector_store/__init__.py,sha256=1Yh14F5UZirKHxM7cgWiATpoZUqWGo147KlJK_wD03Q,327
76
+ reme_ai/vector_store/delete_memory_op.py,sha256=MqV-0UCt4S7h7yvR5EfmLQes-COgK1m6SqHIiWtGG08,917
77
+ reme_ai/vector_store/recall_vector_store_op.py,sha256=AUcEOTTllvppcClkSjksERnihekZIi2FOx_XBG036QY,1476
78
+ reme_ai/vector_store/update_memory_freq_op.py,sha256=nZ7WIqC8hN-Pv9QZrTviax6DA1CVyoCX2cF0XrmTAV4,1021
79
+ reme_ai/vector_store/update_memory_utility_op.py,sha256=fXYAtgfUxmhJkgKA2kHucOBalXAk9O6vvG8Bn9asOTQ,1025
80
+ reme_ai/vector_store/update_vector_store_op.py,sha256=_8T94tHe2nd5phsgmPZQdrYfVR7FvydMm9MJzlrESl4,1333
81
+ reme_ai/vector_store/vector_store_action_op.py,sha256=qeoB_e0WBTnB9yQUkyc8jNNRO9JK_bcCwS2oU0LTJxs,2535
82
+ reme_ai-0.1.7.dist-info/licenses/LICENSE,sha256=zFTWearO11HAlvEgtmY1XBBtk5TSj5P23zU5c_bNfb4,11343
83
+ reme_ai-0.1.7.dist-info/METADATA,sha256=5GAggIi4CcLZa41Hz1b4WPtZ--1ylraHTl7QrkhIiKw,27137
84
+ reme_ai-0.1.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
85
+ reme_ai-0.1.7.dist-info/entry_points.txt,sha256=6SP3ncXOMyKotdT4LHWPeaXo3-Sv-1qmK7OhVw76Xhw,42
86
+ reme_ai-0.1.7.dist-info/top_level.txt,sha256=3ca2UBk97aSfPmGdg8LlVqyeLikb5qEnBEbfGgtzao0,8
87
+ reme_ai-0.1.7.dist-info/RECORD,,