reme-ai 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. reme_ai/__init__.py +1 -1
  2. reme_ai/app.py +1 -1
  3. reme_ai/config/default.yaml +56 -5
  4. reme_ai/constants/common_constants.py +0 -2
  5. reme_ai/constants/language_constants.py +1 -1
  6. reme_ai/enumeration/language_enum.py +14 -0
  7. reme_ai/react/simple_react_op.py +11 -8
  8. reme_ai/retrieve/personal/extract_time_op.py +2 -3
  9. reme_ai/retrieve/personal/fuse_rerank_op.py +1 -1
  10. reme_ai/retrieve/personal/print_memory_op.py +1 -1
  11. reme_ai/retrieve/personal/read_message_op.py +1 -1
  12. reme_ai/retrieve/personal/retrieve_memory_op.py +34 -4
  13. reme_ai/retrieve/personal/semantic_rank_op.py +4 -4
  14. reme_ai/retrieve/personal/set_query_op.py +1 -1
  15. reme_ai/retrieve/task/build_query_op.py +2 -2
  16. reme_ai/retrieve/task/merge_memory_op.py +1 -1
  17. reme_ai/retrieve/task/rerank_memory_op.py +4 -4
  18. reme_ai/retrieve/task/rewrite_memory_op.py +6 -6
  19. reme_ai/service/__init__.py +0 -0
  20. reme_ai/service/base_memory_service.py +112 -0
  21. reme_ai/service/personal_memory_service.py +128 -0
  22. reme_ai/service/task_memory_service.py +126 -0
  23. reme_ai/summary/personal/contra_repeat_op.py +2 -2
  24. reme_ai/summary/personal/get_observation_op.py +4 -4
  25. reme_ai/summary/personal/get_observation_with_time_op.py +4 -4
  26. reme_ai/summary/personal/get_reflection_subject_op.py +4 -4
  27. reme_ai/summary/personal/info_filter_op.py +4 -4
  28. reme_ai/summary/personal/load_today_memory_op.py +6 -7
  29. reme_ai/summary/personal/long_contra_repeat_op.py +4 -4
  30. reme_ai/summary/personal/update_insight_op.py +4 -4
  31. reme_ai/summary/task/__init__.py +0 -1
  32. reme_ai/summary/task/comparative_extraction_op.py +9 -7
  33. reme_ai/summary/task/failure_extraction_op.py +7 -5
  34. reme_ai/summary/task/memory_deduplication_op.py +6 -6
  35. reme_ai/summary/task/memory_validation_op.py +8 -6
  36. reme_ai/summary/task/simple_comparative_summary_op.py +6 -4
  37. reme_ai/summary/task/simple_summary_op.py +6 -4
  38. reme_ai/summary/task/success_extraction_op.py +7 -5
  39. reme_ai/summary/task/trajectory_preprocess_op.py +3 -32
  40. reme_ai/summary/task/trajectory_segmentation_op.py +6 -4
  41. reme_ai/utils/datetime_handler.py +1 -1
  42. reme_ai/vector_store/delete_memory_op.py +1 -1
  43. reme_ai/vector_store/recall_vector_store_op.py +3 -3
  44. reme_ai/vector_store/update_memory_freq_op.py +1 -1
  45. reme_ai/vector_store/update_memory_utility_op.py +1 -1
  46. reme_ai/vector_store/update_vector_store_op.py +3 -3
  47. reme_ai/vector_store/vector_store_action_op.py +21 -18
  48. {reme_ai-0.1.3.dist-info → reme_ai-0.1.5.dist-info}/METADATA +298 -133
  49. reme_ai-0.1.5.dist-info/RECORD +87 -0
  50. reme_ai/enumeration/language_constants.py +0 -215
  51. reme_ai/summary/task/pdf_preprocess_op_wrapper.py +0 -50
  52. reme_ai/utils/miner_u_pdf_processor.py +0 -726
  53. reme_ai-0.1.3.dist-info/RECORD +0 -85
  54. {reme_ai-0.1.3.dist-info → reme_ai-0.1.5.dist-info}/WHEEL +0 -0
  55. {reme_ai-0.1.3.dist-info → reme_ai-0.1.5.dist-info}/entry_points.txt +0 -0
  56. {reme_ai-0.1.3.dist-info → reme_ai-0.1.5.dist-info}/licenses/LICENSE +0 -0
  57. {reme_ai-0.1.3.dist-info → reme_ai-0.1.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,128 @@
1
+ import asyncio
2
+ from typing import Optional, Dict, Any, List
3
+
4
+ from flowllm import C
5
+ from flowllm.flow import BaseToolFlow
6
+ from flowllm.schema.flow_response import FlowResponse
7
+ from loguru import logger
8
+ from pydantic import Field, BaseModel
9
+
10
+ from reme_ai.config.config_parser import ConfigParser
11
+ from reme_ai.schema.memory import PersonalMemory
12
+ from reme_ai.service.base_memory_service import BaseMemoryService
13
+
14
+
15
+ class PersonalMemoryService(BaseMemoryService):
16
+
17
+ async def start(self) -> None:
18
+ C.set_service_config(parser=ConfigParser, config_name="config=default").init_by_service_config()
19
+
20
+ async def stop(self) -> None:
21
+ C.stop_by_service_config()
22
+
23
+ async def health(self) -> bool:
24
+ return True
25
+
26
+ async def add_memory(self, user_id: str, messages: list, session_id: Optional[str] = None) -> None:
27
+ summary_flow: BaseToolFlow = C.flow_dict["summary_personal_memory"]
28
+
29
+ new_messages: List[dict] = []
30
+ for message in messages:
31
+ if isinstance(message, dict):
32
+ new_messages.append(message)
33
+ elif isinstance(message, BaseModel):
34
+ new_messages.append(message.model_dump())
35
+ else:
36
+ raise ValueError(f"Invalid message type={type(message)}")
37
+
38
+ kwargs = {
39
+ "workspace_id": user_id,
40
+ "trajectories": [
41
+ {"messages": messages, "score": 1.0}
42
+ ]
43
+ }
44
+
45
+ result: FlowResponse = await summary_flow(**kwargs)
46
+ memory_list: List[PersonalMemory] = result.metadata.get("memory_list", [])
47
+ for memory in memory_list:
48
+ memory_id = memory.memory_id
49
+ self.add_session_memory_id(session_id, memory_id)
50
+ logger.info(f"[personal_memory_service] user_id={user_id} session_id={session_id} add memory: {memory}")
51
+
52
+ async def search_memory(self, user_id: str, messages: list, filters: Optional[Dict[str, Any]] = Field(
53
+ description="Associated filters for the messages, "
54
+ "such as top_k, score etc.",
55
+ default=None,
56
+ )) -> list:
57
+
58
+ retrieve_flow: BaseToolFlow = C.flow_dict["retrieve_personal_memory"]
59
+
60
+ new_messages: List[dict] = []
61
+ for message in messages:
62
+ if isinstance(message, dict):
63
+ new_messages.append(message)
64
+ elif isinstance(message, BaseModel):
65
+ new_messages.append(message.model_dump())
66
+ else:
67
+ raise ValueError(f"Invalid message type={type(message)}")
68
+
69
+ # Extract query from the last message
70
+ query = messages[-1]["content"] if messages else ""
71
+
72
+ kwargs = {
73
+ "workspace_id": user_id,
74
+ "query": query,
75
+ "top_k": filters.get("top_k", 1) if filters else 1
76
+ }
77
+
78
+ result: FlowResponse = await retrieve_flow(**kwargs)
79
+ logger.info(f"[personal_memory_service] user_id={user_id} search result: {result.model_dump_json()}")
80
+
81
+ return [result.answer]
82
+
83
+ async def list_memory(self, user_id: str, filters: Optional[Dict[str, Any]] = Field(
84
+ description="Associated filters for the messages, "
85
+ "such as top_k, score etc.",
86
+ default=None,
87
+ )) -> list:
88
+ vector_store_flow: BaseToolFlow = C.flow_dict["vector_store"]
89
+ result = await vector_store_flow(workspace_id=user_id, action="list")
90
+ logger.info(f"[personal_memory_service] list_memory result: {result}")
91
+
92
+ result = result.metadata["action_result"]
93
+ for i, line in enumerate(result):
94
+ logger.info(f"[personal_memory_service] list memory.{i}={line}")
95
+ return result
96
+
97
+ async def delete_memory(self, user_id: str, session_id: Optional[str] = None) -> None:
98
+ delete_ids = self.session_id_dict.get(session_id, [])
99
+ if not delete_ids:
100
+ return
101
+
102
+ vector_store_flow: BaseToolFlow = C.flow_dict["vector_store"]
103
+ result = await vector_store_flow(workspace_id=user_id, action="delete_ids", memory_ids=delete_ids)
104
+ result = result.metadata["action_result"]
105
+ logger.info(f"[personal_memory_service] delete memory result={result}")
106
+
107
+
108
+ async def main():
109
+ async with PersonalMemoryService() as service:
110
+ logger.info("========== start personal memory service ==========")
111
+
112
+ await service.add_memory(user_id="u_12345",
113
+ messages=[{"content": "I really enjoy playing tennis on weekends"}],
114
+ session_id="s_123456")
115
+
116
+ await service.search_memory(user_id="u_12345",
117
+ messages=[{"content": "What do I like to do for fun?"}],
118
+ filters={"top_k": 1})
119
+
120
+ await service.list_memory(user_id="u_12345")
121
+ await service.delete_memory(user_id="u_12345", session_id="s_123456")
122
+ await service.list_memory(user_id="u_12345")
123
+
124
+ logger.info("========== end personal memory service ==========")
125
+
126
+
127
+ if __name__ == "__main__":
128
+ asyncio.run(main())
@@ -0,0 +1,126 @@
1
+ import asyncio
2
+ from typing import Optional, Dict, Any, List
3
+
4
+ from flowllm import C
5
+ from flowllm.flow import BaseToolFlow
6
+ from flowllm.schema.flow_response import FlowResponse
7
+ from loguru import logger
8
+ from pydantic import Field, BaseModel
9
+
10
+ from reme_ai.config.config_parser import ConfigParser
11
+ from reme_ai.schema.memory import TaskMemory
12
+ from reme_ai.service.base_memory_service import BaseMemoryService
13
+
14
+
15
+ class TaskMemoryService(BaseMemoryService):
16
+
17
+ async def start(self) -> None:
18
+ C.set_service_config(parser=ConfigParser, config_name="config=default").init_by_service_config()
19
+
20
+ async def stop(self) -> None:
21
+ C.stop_by_service_config()
22
+
23
+ async def health(self) -> bool:
24
+ return True
25
+
26
+ async def add_memory(self, user_id: str, messages: list, session_id: Optional[str] = None) -> None:
27
+ summary_flow: BaseToolFlow = C.flow_dict["summary_task_memory"]
28
+
29
+ new_messages: List[dict] = []
30
+ for message in messages:
31
+ if isinstance(message, dict):
32
+ new_messages.append(message)
33
+ elif isinstance(message, BaseModel):
34
+ new_messages.append(message.model_dump())
35
+ else:
36
+ raise ValueError(f"Invalid message type={type(message)}")
37
+
38
+ kwargs = {
39
+ "workspace_id": user_id,
40
+ "trajectories": [
41
+ {"messages": messages, "score": 1.0}
42
+ ]
43
+ }
44
+
45
+ result: FlowResponse = await summary_flow(**kwargs)
46
+ memory_list: List[TaskMemory] = result.metadata.get("memory_list", [])
47
+ for memory in memory_list:
48
+ memory_id = memory.memory_id
49
+ self.add_session_memory_id(session_id, memory_id)
50
+ logger.info(f"[task_memory_service] user_id={user_id} session_id={session_id} add memory: {memory}")
51
+
52
+ async def search_memory(self, user_id: str, messages: list, filters: Optional[Dict[str, Any]] = Field(
53
+ description="Associated filters for the messages, "
54
+ "such as top_k, score etc.",
55
+ default=None,
56
+ )) -> list:
57
+
58
+ retrieve_flow: BaseToolFlow = C.flow_dict["retrieve_task_memory"]
59
+
60
+ new_messages: List[dict] = []
61
+ for message in messages:
62
+ if isinstance(message, dict):
63
+ new_messages.append(message)
64
+ elif isinstance(message, BaseModel):
65
+ new_messages.append(message.model_dump())
66
+ else:
67
+ raise ValueError(f"Invalid message type={type(message)}")
68
+
69
+ kwargs = {
70
+ "workspace_id": user_id,
71
+ "messages": messages,
72
+ "top_k": filters.get("top_k", 1)
73
+ }
74
+
75
+ result: FlowResponse = await retrieve_flow(**kwargs)
76
+ logger.info(f"[task_memory_service] user_id={user_id} add result: {result.model_dump_json()}")
77
+
78
+ return [result.answer]
79
+
80
+ async def list_memory(self, user_id: str, filters: Optional[Dict[str, Any]] = Field(
81
+ description="Associated filters for the messages, "
82
+ "such as top_k, score etc.",
83
+ default=None,
84
+ )) -> list:
85
+ vector_store_flow: BaseToolFlow = C.flow_dict["vector_store"]
86
+ result = await vector_store_flow(workspace_id=user_id, action="list")
87
+ print("list_memory result:", result)
88
+
89
+
90
+ result = result.metadata["action_result"]
91
+ for i, line in enumerate(result):
92
+ logger.info(f"[task_memory_service] list memory.{i}={line}")
93
+ return result
94
+
95
+ async def delete_memory(self, user_id: str, session_id: Optional[str] = None) -> None:
96
+ delete_ids = self.session_id_dict.get(session_id, [])
97
+ if not delete_ids:
98
+ return
99
+
100
+ vector_store_flow: BaseToolFlow = C.flow_dict["vector_store"]
101
+ result = await vector_store_flow(workspace_id=user_id, action="delete_ids", memory_ids=delete_ids)
102
+ result = result.metadata["action_result"]
103
+ logger.info(f"[task_memory_service] delete memory result={result}")
104
+
105
+
106
+ async def main():
107
+ async with TaskMemoryService() as service:
108
+ logger.info("========== start task memory service ==========")
109
+
110
+ await service.add_memory(user_id="u_123456",
111
+ messages=[{"content": "please use web search tool to search financial news:"}],
112
+ session_id="s_123456")
113
+
114
+ await service.search_memory(user_id="u_123456",
115
+ messages=[{"content": "please use web search tool to search financial news"}],
116
+ filters={"top_k": 1})
117
+
118
+ await service.list_memory(user_id="u_123456")
119
+ await service.delete_memory(user_id="u_123456", session_id="s_123456")
120
+ await service.list_memory(user_id="u_123456")
121
+
122
+ logger.info("========== end task memory service ==========")
123
+
124
+
125
+ if __name__ == "__main__":
126
+ asyncio.run(main())
@@ -24,7 +24,7 @@ class ContraRepeatOp(BaseLLMOp):
24
24
  """
25
25
  file_path: str = __file__
26
26
 
27
- def execute(self):
27
+ async def async_execute(self):
28
28
  """
29
29
  Executes the primary routine of the ContraRepeatOp which involves:
30
30
  1. Gets memory list from context
@@ -82,7 +82,7 @@ class ContraRepeatOp(BaseLLMOp):
82
82
  logger.info(f"contra_repeat_prompt={full_prompt}")
83
83
 
84
84
  # Call LLM
85
- response = self.llm.chat([Message(role=Role.USER, content=full_prompt)])
85
+ response = await self.llm.achat([Message(role=Role.USER, content=full_prompt)])
86
86
 
87
87
  # Return if empty
88
88
  if not response or not response.content:
@@ -16,7 +16,7 @@ class GetObservationOp(BaseLLMOp):
16
16
  """
17
17
  file_path: str = __file__
18
18
 
19
- def execute(self):
19
+ async def async_execute(self):
20
20
  """Extract personal observations from chat messages"""
21
21
  # Get messages from context - guaranteed to exist by flow input
22
22
  messages: List[Message] = self.context.messages
@@ -34,7 +34,7 @@ class GetObservationOp(BaseLLMOp):
34
34
  logger.info(f"Extracting observations from {len(filtered_messages)} filtered messages")
35
35
 
36
36
  # Extract observations using LLM
37
- observation_memories = self._extract_observations_from_messages(filtered_messages)
37
+ observation_memories = await self._extract_observations_from_messages(filtered_messages)
38
38
 
39
39
  # Store results in context using standardized key
40
40
  self.context.observation_memories = observation_memories
@@ -58,7 +58,7 @@ class GetObservationOp(BaseLLMOp):
58
58
  logger.info(f"Filtered messages from {len(messages)} to {len(filtered_messages)}")
59
59
  return filtered_messages
60
60
 
61
- def _extract_observations_from_messages(self, filtered_messages: List[Message]) -> List[BaseMemory]:
61
+ async def _extract_observations_from_messages(self, filtered_messages: List[Message]) -> List[BaseMemory]:
62
62
  """Extract observations from filtered messages using LLM"""
63
63
  user_name = self.context.get("user_name", "user")
64
64
 
@@ -113,7 +113,7 @@ class GetObservationOp(BaseLLMOp):
113
113
  return observation_memories
114
114
 
115
115
  # Use LLM chat with callback function
116
- return self.llm.chat(messages=[Message(content=full_prompt)], callback_fn=parse_observations)
116
+ return await self.llm.achat(messages=[Message(content=full_prompt)], callback_fn=parse_observations)
117
117
 
118
118
  @staticmethod
119
119
  def parse_observation_response(response_text: str) -> List[dict]:
@@ -16,7 +16,7 @@ class GetObservationWithTimeOp(BaseLLMOp):
16
16
  """
17
17
  file_path: str = __file__
18
18
 
19
- def execute(self):
19
+ async def async_execute(self):
20
20
  """Extract personal observations with time information from chat messages"""
21
21
  # Get messages from context - guaranteed to exist by flow input
22
22
  messages: List[Message] = self.context.messages
@@ -34,7 +34,7 @@ class GetObservationWithTimeOp(BaseLLMOp):
34
34
  logger.info(f"Extracting observations with time from {len(filtered_messages)} filtered messages")
35
35
 
36
36
  # Extract observations using LLM
37
- observation_memories_with_time = self._extract_observations_with_time_from_messages(filtered_messages)
37
+ observation_memories_with_time = await self._extract_observations_with_time_from_messages(filtered_messages)
38
38
 
39
39
  # Store results in context using standardized key
40
40
  self.context.observation_memories_with_time = observation_memories_with_time
@@ -58,7 +58,7 @@ class GetObservationWithTimeOp(BaseLLMOp):
58
58
  logger.info(f"Filtered messages from {len(messages)} to {len(filtered_messages)}")
59
59
  return filtered_messages
60
60
 
61
- def _extract_observations_with_time_from_messages(self, filtered_messages: List[Message]) -> List[BaseMemory]:
61
+ async def _extract_observations_with_time_from_messages(self, filtered_messages: List[Message]) -> List[BaseMemory]:
62
62
  """Extract observations with time information from filtered messages using LLM"""
63
63
  user_name = self.context.get("user_name", "user")
64
64
 
@@ -123,7 +123,7 @@ class GetObservationWithTimeOp(BaseLLMOp):
123
123
  return observation_memories
124
124
 
125
125
  # Use LLM chat with callback function
126
- return self.llm.chat(messages=[Message(content=full_prompt)], callback_fn=parse_observations)
126
+ return await self.llm.achat(messages=[Message(content=full_prompt)], callback_fn=parse_observations)
127
127
 
128
128
  def _get_colon_word(self) -> str:
129
129
  """Get language-specific colon word"""
@@ -39,7 +39,7 @@ class GetReflectionSubjectOp(BaseLLMOp):
39
39
  }
40
40
  )
41
41
 
42
- def execute(self):
42
+ async def async_execute(self):
43
43
  """
44
44
  Generate reflection subjects (topics) from personal memories for insight extraction.
45
45
 
@@ -85,7 +85,7 @@ class GetReflectionSubjectOp(BaseLLMOp):
85
85
  return
86
86
 
87
87
  # Generate reflection subjects using LLM
88
- insight_memories = self._generate_reflection_subjects(
88
+ insight_memories = await self._generate_reflection_subjects(
89
89
  memory_contents, existing_subjects, user_name, reflect_num_questions
90
90
  )
91
91
 
@@ -93,7 +93,7 @@ class GetReflectionSubjectOp(BaseLLMOp):
93
93
  self.context.response.metadata["insight_memories"] = insight_memories
94
94
  logger.info(f"Generated {len(insight_memories)} new reflection subject memories")
95
95
 
96
- def _generate_reflection_subjects(self, memory_contents: List[str], existing_subjects: List[str],
96
+ async def _generate_reflection_subjects(self, memory_contents: List[str], existing_subjects: List[str],
97
97
  user_name: str, num_questions: int) -> List[BaseMemory]:
98
98
  """
99
99
  Generate new reflection subjects using LLM analysis of memory contents.
@@ -148,7 +148,7 @@ class GetReflectionSubjectOp(BaseLLMOp):
148
148
  return insight_memories
149
149
 
150
150
  # Generate subjects using LLM
151
- return self.llm.chat(messages=[Message(content=full_prompt)], callback_fn=parse_reflection_response)
151
+ return await self.llm.achat(messages=[Message(content=full_prompt)], callback_fn=parse_reflection_response)
152
152
 
153
153
  def get_language_value(self, value_dict: dict):
154
154
  """Get language-specific value from dictionary"""
@@ -16,7 +16,7 @@ class InfoFilterOp(BaseLLMOp):
16
16
  """
17
17
  file_path: str = __file__
18
18
 
19
- def execute(self):
19
+ async def async_execute(self):
20
20
  """Filter messages based on information content scores"""
21
21
  # Get messages from context - guaranteed to exist by flow input
22
22
  trajectories: list = self.context.trajectories
@@ -45,7 +45,7 @@ class InfoFilterOp(BaseLLMOp):
45
45
  logger.info(f"Filtering {len(info_messages)} messages for information content")
46
46
 
47
47
  # Filter messages using LLM
48
- filtered_memories = self._filter_messages_with_llm(info_messages, user_name, preserved_scores)
48
+ filtered_memories = await self._filter_messages_with_llm(info_messages, user_name, preserved_scores)
49
49
 
50
50
  # Store results in context using standardized key
51
51
  self.context.messages = filtered_memories
@@ -81,7 +81,7 @@ class InfoFilterOp(BaseLLMOp):
81
81
  logger.info(f"Filtered messages from {len(messages)} to {len(info_messages)}")
82
82
  return info_messages
83
83
 
84
- def _filter_messages_with_llm(self, info_messages: List[Message], user_name: str, preserved_scores: str) -> List[
84
+ async def _filter_messages_with_llm(self, info_messages: List[Message], user_name: str, preserved_scores: str) -> List[
85
85
  PersonalMemory]:
86
86
  """Filter messages using LLM to score information content"""
87
87
 
@@ -146,7 +146,7 @@ class InfoFilterOp(BaseLLMOp):
146
146
  return filtered_memories
147
147
 
148
148
  # Use LLM chat with callback function
149
- return self.llm.chat(messages=[Message(content=full_prompt)], callback_fn=parse_and_filter)
149
+ return await self.llm.achat(messages=[Message(content=full_prompt)], callback_fn=parse_and_filter)
150
150
 
151
151
  def _get_colon_word(self) -> str:
152
152
  """Get language-specific colon word"""
@@ -16,7 +16,7 @@ class LoadTodayMemoryOp(BaseLLMOp):
16
16
  """
17
17
  file_path: str = __file__
18
18
 
19
- def execute(self):
19
+ async def async_execute(self):
20
20
  """
21
21
  Load today's memories from vector store and perform deduplication.
22
22
 
@@ -36,7 +36,7 @@ class LoadTodayMemoryOp(BaseLLMOp):
36
36
  logger.info(f"Loading today's memories for user: {user_name} (top_k: {top_k})")
37
37
 
38
38
  # Get today's memories from vector store
39
- today_memories = self._retrieve_today_memories(workspace_id, user_name, top_k)
39
+ today_memories = await self._retrieve_today_memories(workspace_id, user_name, top_k)
40
40
 
41
41
  if not today_memories:
42
42
  logger.info("No memories found for today")
@@ -47,7 +47,7 @@ class LoadTodayMemoryOp(BaseLLMOp):
47
47
  self.context.today_memories = today_memories
48
48
  logger.info(f"Final today's memory list size: {len(today_memories)}")
49
49
 
50
- def _retrieve_today_memories(self, workspace_id: str, user_name: str, top_k: int) -> List[BaseMemory]:
50
+ async def _retrieve_today_memories(self, workspace_id: str, user_name: str, top_k: int) -> List[BaseMemory]:
51
51
  """
52
52
  Retrieve memories from today using vector store with date filtering.
53
53
 
@@ -74,12 +74,11 @@ class LoadTodayMemoryOp(BaseLLMOp):
74
74
  }
75
75
 
76
76
  # Search vector store with date filter
77
- nodes: List[VectorNode] = self.vector_store.search(
78
- query="", # Empty query to get all results for today
77
+ nodes: List[VectorNode] = await self.vector_store.async_search(
78
+ query=" ",
79
79
  workspace_id=workspace_id,
80
80
  top_k=top_k,
81
- filter_dict=filter_dict
82
- )
81
+ filter_dict=filter_dict)
83
82
 
84
83
  logger.info(f"Vector store returned {len(nodes)} nodes for today")
85
84
 
@@ -19,7 +19,7 @@ class LongContraRepeatOp(BaseLLMOp):
19
19
  """
20
20
  file_path: str = __file__
21
21
 
22
- def execute(self):
22
+ async def async_execute(self):
23
23
  """
24
24
  Analyze memories for contradictions and redundancies, resolving conflicts.
25
25
 
@@ -62,13 +62,13 @@ class LongContraRepeatOp(BaseLLMOp):
62
62
  logger.info(f"Processing {len(sorted_memories)} memories for contradictions and redundancies")
63
63
 
64
64
  # Analyze and resolve contradictions
65
- filtered_memories = self._analyze_and_resolve_conflicts(sorted_memories)
65
+ filtered_memories = await self._analyze_and_resolve_conflicts(sorted_memories)
66
66
 
67
67
  # Store results in context
68
68
  self.context.response.metadata["memory_list"] = filtered_memories
69
69
  logger.info(f"Conflict resolution: {len(sorted_memories)} -> {len(filtered_memories)} memories")
70
70
 
71
- def _analyze_and_resolve_conflicts(self, memories: List[BaseMemory]) -> List[BaseMemory]:
71
+ async def _analyze_and_resolve_conflicts(self, memories: List[BaseMemory]) -> List[BaseMemory]:
72
72
  """
73
73
  Analyze memories for contradictions and redundancies using LLM.
74
74
 
@@ -104,7 +104,7 @@ class LongContraRepeatOp(BaseLLMOp):
104
104
  logger.info(f"Contradiction analysis prompt length: {len(full_prompt)} chars")
105
105
 
106
106
  # Get LLM analysis
107
- response = self.llm.chat([Message(role=Role.USER, content=full_prompt)])
107
+ response = await self.llm.achat([Message(role=Role.USER, content=full_prompt)])
108
108
 
109
109
  if not response or not response.content:
110
110
  logger.warning("Empty response from LLM, keeping all memories")
@@ -17,7 +17,7 @@ class UpdateInsightOp(BaseLLMOp):
17
17
  """
18
18
  file_path: str = __file__
19
19
 
20
- def execute(self):
20
+ async def async_execute(self):
21
21
  """
22
22
  Update insight values based on new observation memories.
23
23
 
@@ -65,7 +65,7 @@ class UpdateInsightOp(BaseLLMOp):
65
65
  # Update each selected insight
66
66
  updated_insights = []
67
67
  for insight_memory, relevance_score, relevant_observations in top_insights:
68
- updated_insight = self._update_insight_with_observations(
68
+ updated_insight = await self._update_insight_with_observations(
69
69
  insight_memory, relevant_observations, user_name
70
70
  )
71
71
  if updated_insight:
@@ -135,7 +135,7 @@ class UpdateInsightOp(BaseLLMOp):
135
135
 
136
136
  return intersection / union if union > 0 else 0.0
137
137
 
138
- def _update_insight_with_observations(self, insight_memory: PersonalMemory,
138
+ async def _update_insight_with_observations(self, insight_memory: PersonalMemory,
139
139
  relevant_observations: List[PersonalMemory],
140
140
  user_name: str) -> PersonalMemory:
141
141
  """
@@ -208,7 +208,7 @@ class UpdateInsightOp(BaseLLMOp):
208
208
 
209
209
  # Use LLM chat with callback function
210
210
  try:
211
- return self.llm.chat(messages=[Message(content=full_prompt)], callback_fn=parse_update_response)
211
+ return await self.llm.achat(messages=[Message(content=full_prompt)], callback_fn=parse_update_response)
212
212
  except Exception as e:
213
213
  logger.error(f"Error updating insight: {e}")
214
214
  return insight_memory
@@ -2,7 +2,6 @@ from .comparative_extraction_op import ComparativeExtractionOp
2
2
  from .failure_extraction_op import FailureExtractionOp
3
3
  from .memory_deduplication_op import MemoryDeduplicationOp
4
4
  from .memory_validation_op import MemoryValidationOp
5
- from .pdf_preprocess_op_wrapper import PDFPreprocessOp
6
5
  from .simple_comparative_summary_op import SimpleComparativeSummaryOp
7
6
  from .simple_summary_op import SimpleSummaryOp
8
7
  from .success_extraction_op import SuccessExtractionOp
@@ -1,6 +1,8 @@
1
1
  from typing import List, Tuple, Optional
2
2
 
3
3
  from flowllm import C, BaseLLMOp
4
+ from flowllm.enumeration.role import Role
5
+ from flowllm.schema.message import Message as FlowMessage
4
6
  from loguru import logger
5
7
 
6
8
  from reme_ai.schema import Message, Trajectory
@@ -12,7 +14,7 @@ from reme_ai.utils.op_utils import merge_messages_content, parse_json_experience
12
14
  class ComparativeExtractionOp(BaseLLMOp):
13
15
  file_path: str = __file__
14
16
 
15
- def execute(self):
17
+ async def async_execute(self):
16
18
  """Extract comparative task memories by comparing different scoring trajectories"""
17
19
  all_trajectories: List[Trajectory] = self.context.get("all_trajectories", [])
18
20
  success_trajectories: List[Trajectory] = self.context.get("success_trajectories", [])
@@ -26,7 +28,7 @@ class ComparativeExtractionOp(BaseLLMOp):
26
28
  if highest_traj and lowest_traj and highest_traj.score > lowest_traj.score:
27
29
  logger.info(
28
30
  f"Extracting soft comparative task memories: highest ({highest_traj.score:.2f}) vs lowest ({lowest_traj.score:.2f})")
29
- soft_task_memories = self._extract_soft_comparative_task_memory(highest_traj, lowest_traj)
31
+ soft_task_memories = await self._extract_soft_comparative_task_memory(highest_traj, lowest_traj)
30
32
  comparative_task_memories.extend(soft_task_memories)
31
33
 
32
34
  # Hard comparison: success vs failure (if similarity search is enabled)
@@ -37,7 +39,7 @@ class ComparativeExtractionOp(BaseLLMOp):
37
39
  logger.info(f"Found {len(similar_pairs)} similar pairs for hard comparison")
38
40
 
39
41
  for success_steps, failure_steps, similarity_score in similar_pairs:
40
- hard_task_memories = self._extract_hard_comparative_task_memory(success_steps, failure_steps,
42
+ hard_task_memories = await self._extract_hard_comparative_task_memory(success_steps, failure_steps,
41
43
  similarity_score)
42
44
  comparative_task_memories.extend(hard_task_memories)
43
45
 
@@ -73,7 +75,7 @@ class ComparativeExtractionOp(BaseLLMOp):
73
75
  """Get trajectory score"""
74
76
  return trajectory.score
75
77
 
76
- def _extract_soft_comparative_task_memory(self, higher_traj: Trajectory, lower_traj: Trajectory) -> List[
78
+ async def _extract_soft_comparative_task_memory(self, higher_traj: Trajectory, lower_traj: Trajectory) -> List[
77
79
  BaseMemory]:
78
80
  """Extract soft comparative task memory (high score vs low score)"""
79
81
  higher_steps = self._get_trajectory_steps(higher_traj)
@@ -105,9 +107,9 @@ class ComparativeExtractionOp(BaseLLMOp):
105
107
 
106
108
  return task_memories
107
109
 
108
- return self.llm.chat(messages=[Message(content=prompt)], callback_fn=parse_task_memories)
110
+ return await self.llm.achat(messages=[FlowMessage(role=Role.USER, content=prompt)], callback_fn=parse_task_memories)
109
111
 
110
- def _extract_hard_comparative_task_memory(self, success_steps: List[Message],
112
+ async def _extract_hard_comparative_task_memory(self, success_steps: List[Message],
111
113
  failure_steps: List[Message], similarity_score: float) -> List[
112
114
  BaseMemory]:
113
115
  """Extract hard comparative task memory (success vs failure)"""
@@ -134,7 +136,7 @@ class ComparativeExtractionOp(BaseLLMOp):
134
136
 
135
137
  return task_memories
136
138
 
137
- return self.llm.chat(messages=[Message(content=prompt)], callback_fn=parse_task_memories)
139
+ return await self.llm.achat(messages=[FlowMessage(role=Role.USER, content=prompt)], callback_fn=parse_task_memories)
138
140
 
139
141
  @staticmethod
140
142
  def _get_trajectory_steps(trajectory: Trajectory) -> List[Message]:
@@ -1,6 +1,8 @@
1
1
  from typing import List
2
2
 
3
3
  from flowllm import C, BaseLLMOp
4
+ from flowllm.enumeration.role import Role
5
+ from flowllm.schema.message import Message as FlowMessage
4
6
  from loguru import logger
5
7
 
6
8
  from reme_ai.schema import Message, Trajectory
@@ -12,7 +14,7 @@ from reme_ai.utils.op_utils import merge_messages_content, parse_json_experience
12
14
  class FailureExtractionOp(BaseLLMOp):
13
15
  file_path: str = __file__
14
16
 
15
- def execute(self):
17
+ async def async_execute(self):
16
18
  """Extract task memories from failed trajectories"""
17
19
  failure_trajectories: List[Trajectory] = self.context.get("failure_trajectories", [])
18
20
 
@@ -29,11 +31,11 @@ class FailureExtractionOp(BaseLLMOp):
29
31
  if hasattr(trajectory, 'segments') and trajectory.segments:
30
32
  # Process segmented step sequences
31
33
  for segment in trajectory.segments:
32
- task_memories = self._extract_failure_task_memory_from_steps(segment, trajectory)
34
+ task_memories = await self._extract_failure_task_memory_from_steps(segment, trajectory)
33
35
  failure_task_memories.extend(task_memories)
34
36
  else:
35
37
  # Process entire trajectory
36
- task_memories = self._extract_failure_task_memory_from_steps(trajectory.messages, trajectory)
38
+ task_memories = await self._extract_failure_task_memory_from_steps(trajectory.messages, trajectory)
37
39
  failure_task_memories.extend(task_memories)
38
40
 
39
41
  logger.info(f"Extracted {len(failure_task_memories)} failure task memories")
@@ -41,7 +43,7 @@ class FailureExtractionOp(BaseLLMOp):
41
43
  # Add task memories to context
42
44
  self.context.failure_task_memories = failure_task_memories
43
45
 
44
- def _extract_failure_task_memory_from_steps(self, steps: List[Message], trajectory: Trajectory) -> List[BaseMemory]:
46
+ async def _extract_failure_task_memory_from_steps(self, steps: List[Message], trajectory: Trajectory) -> List[BaseMemory]:
45
47
  """Extract task memory from failed step sequences"""
46
48
  step_content = merge_messages_content(steps)
47
49
  context = get_trajectory_context(trajectory, steps)
@@ -70,4 +72,4 @@ class FailureExtractionOp(BaseLLMOp):
70
72
 
71
73
  return task_memories
72
74
 
73
- return self.llm.chat(messages=[Message(content=prompt)], callback_fn=parse_task_memories)
75
+ return await self.llm.achat(messages=[FlowMessage(role=Role.USER, content=prompt)], callback_fn=parse_task_memories)