MemoryOS 0.0.1__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MemoryOS might be problematic. Click here for more details.

Files changed (124) hide show
  1. memoryos-0.1.13.dist-info/METADATA +288 -0
  2. memoryos-0.1.13.dist-info/RECORD +122 -0
  3. memos/__init__.py +20 -1
  4. memos/api/start_api.py +420 -0
  5. memos/chunkers/__init__.py +4 -0
  6. memos/chunkers/base.py +24 -0
  7. memos/chunkers/factory.py +22 -0
  8. memos/chunkers/sentence_chunker.py +35 -0
  9. memos/configs/__init__.py +0 -0
  10. memos/configs/base.py +82 -0
  11. memos/configs/chunker.py +45 -0
  12. memos/configs/embedder.py +53 -0
  13. memos/configs/graph_db.py +45 -0
  14. memos/configs/internet_retriever.py +81 -0
  15. memos/configs/llm.py +71 -0
  16. memos/configs/mem_chat.py +81 -0
  17. memos/configs/mem_cube.py +89 -0
  18. memos/configs/mem_os.py +74 -0
  19. memos/configs/mem_reader.py +53 -0
  20. memos/configs/mem_scheduler.py +78 -0
  21. memos/configs/memory.py +195 -0
  22. memos/configs/parser.py +38 -0
  23. memos/configs/utils.py +8 -0
  24. memos/configs/vec_db.py +64 -0
  25. memos/deprecation.py +262 -0
  26. memos/embedders/__init__.py +0 -0
  27. memos/embedders/base.py +15 -0
  28. memos/embedders/factory.py +23 -0
  29. memos/embedders/ollama.py +74 -0
  30. memos/embedders/sentence_transformer.py +40 -0
  31. memos/exceptions.py +30 -0
  32. memos/graph_dbs/__init__.py +0 -0
  33. memos/graph_dbs/base.py +215 -0
  34. memos/graph_dbs/factory.py +21 -0
  35. memos/graph_dbs/neo4j.py +827 -0
  36. memos/hello_world.py +97 -0
  37. memos/llms/__init__.py +0 -0
  38. memos/llms/base.py +16 -0
  39. memos/llms/factory.py +25 -0
  40. memos/llms/hf.py +231 -0
  41. memos/llms/ollama.py +82 -0
  42. memos/llms/openai.py +34 -0
  43. memos/llms/utils.py +14 -0
  44. memos/log.py +78 -0
  45. memos/mem_chat/__init__.py +0 -0
  46. memos/mem_chat/base.py +30 -0
  47. memos/mem_chat/factory.py +21 -0
  48. memos/mem_chat/simple.py +200 -0
  49. memos/mem_cube/__init__.py +0 -0
  50. memos/mem_cube/base.py +29 -0
  51. memos/mem_cube/general.py +146 -0
  52. memos/mem_cube/utils.py +24 -0
  53. memos/mem_os/client.py +5 -0
  54. memos/mem_os/core.py +819 -0
  55. memos/mem_os/main.py +503 -0
  56. memos/mem_os/product.py +89 -0
  57. memos/mem_reader/__init__.py +0 -0
  58. memos/mem_reader/base.py +27 -0
  59. memos/mem_reader/factory.py +21 -0
  60. memos/mem_reader/memory.py +298 -0
  61. memos/mem_reader/simple_struct.py +241 -0
  62. memos/mem_scheduler/__init__.py +0 -0
  63. memos/mem_scheduler/base_scheduler.py +164 -0
  64. memos/mem_scheduler/general_scheduler.py +305 -0
  65. memos/mem_scheduler/modules/__init__.py +0 -0
  66. memos/mem_scheduler/modules/base.py +74 -0
  67. memos/mem_scheduler/modules/dispatcher.py +103 -0
  68. memos/mem_scheduler/modules/monitor.py +82 -0
  69. memos/mem_scheduler/modules/redis_service.py +146 -0
  70. memos/mem_scheduler/modules/retriever.py +41 -0
  71. memos/mem_scheduler/modules/schemas.py +146 -0
  72. memos/mem_scheduler/scheduler_factory.py +21 -0
  73. memos/mem_scheduler/utils.py +26 -0
  74. memos/mem_user/user_manager.py +488 -0
  75. memos/memories/__init__.py +0 -0
  76. memos/memories/activation/__init__.py +0 -0
  77. memos/memories/activation/base.py +42 -0
  78. memos/memories/activation/item.py +25 -0
  79. memos/memories/activation/kv.py +232 -0
  80. memos/memories/base.py +19 -0
  81. memos/memories/factory.py +34 -0
  82. memos/memories/parametric/__init__.py +0 -0
  83. memos/memories/parametric/base.py +19 -0
  84. memos/memories/parametric/item.py +11 -0
  85. memos/memories/parametric/lora.py +41 -0
  86. memos/memories/textual/__init__.py +0 -0
  87. memos/memories/textual/base.py +89 -0
  88. memos/memories/textual/general.py +286 -0
  89. memos/memories/textual/item.py +167 -0
  90. memos/memories/textual/naive.py +185 -0
  91. memos/memories/textual/tree.py +321 -0
  92. memos/memories/textual/tree_text_memory/__init__.py +0 -0
  93. memos/memories/textual/tree_text_memory/organize/__init__.py +0 -0
  94. memos/memories/textual/tree_text_memory/organize/manager.py +305 -0
  95. memos/memories/textual/tree_text_memory/retrieve/__init__.py +0 -0
  96. memos/memories/textual/tree_text_memory/retrieve/internet_retriever.py +263 -0
  97. memos/memories/textual/tree_text_memory/retrieve/internet_retriever_factory.py +89 -0
  98. memos/memories/textual/tree_text_memory/retrieve/reasoner.py +61 -0
  99. memos/memories/textual/tree_text_memory/retrieve/recall.py +158 -0
  100. memos/memories/textual/tree_text_memory/retrieve/reranker.py +111 -0
  101. memos/memories/textual/tree_text_memory/retrieve/retrieval_mid_structs.py +13 -0
  102. memos/memories/textual/tree_text_memory/retrieve/searcher.py +208 -0
  103. memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py +68 -0
  104. memos/memories/textual/tree_text_memory/retrieve/utils.py +48 -0
  105. memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +335 -0
  106. memos/parsers/__init__.py +0 -0
  107. memos/parsers/base.py +15 -0
  108. memos/parsers/factory.py +19 -0
  109. memos/parsers/markitdown.py +22 -0
  110. memos/settings.py +8 -0
  111. memos/templates/__init__.py +0 -0
  112. memos/templates/mem_reader_prompts.py +98 -0
  113. memos/templates/mem_scheduler_prompts.py +65 -0
  114. memos/templates/mos_prompts.py +63 -0
  115. memos/types.py +55 -0
  116. memos/vec_dbs/__init__.py +0 -0
  117. memos/vec_dbs/base.py +105 -0
  118. memos/vec_dbs/factory.py +21 -0
  119. memos/vec_dbs/item.py +43 -0
  120. memos/vec_dbs/qdrant.py +292 -0
  121. memoryos-0.0.1.dist-info/METADATA +0 -53
  122. memoryos-0.0.1.dist-info/RECORD +0 -5
  123. {memoryos-0.0.1.dist-info → memoryos-0.1.13.dist-info}/LICENSE +0 -0
  124. {memoryos-0.0.1.dist-info → memoryos-0.1.13.dist-info}/WHEEL +0 -0
@@ -0,0 +1,22 @@
1
+ from markitdown import MarkItDown
2
+
3
+ from memos.configs.parser import MarkItDownParserConfig
4
+ from memos.log import get_logger
5
+ from memos.parsers.base import BaseParser
6
+
7
+
8
+ logger = get_logger(__name__)
9
+
10
+
11
+ class MarkItDownParser(BaseParser):
12
+ """MarkItDown Parser class."""
13
+
14
+ def __init__(self, config: MarkItDownParserConfig):
15
+ self.config = config
16
+
17
+ def parse(self, file_path: str) -> str:
18
+ """Parse the file at the given path and return its content as a MarkDown string."""
19
+ md = MarkItDown(enable_plugins=False)
20
+ result = md.convert(file_path)
21
+
22
+ return result.text_content
memos/settings.py ADDED
@@ -0,0 +1,8 @@
1
+ from pathlib import Path
2
+
3
+
4
+ MEMOS_DIR = Path.cwd() / ".memos"
5
+ DEBUG = False
6
+
7
+ # "memos" or "memos.submodules" ... to filter logs from specific packages
8
+ LOG_FILTER_TREE_PREFIX = ""
File without changes
@@ -0,0 +1,98 @@
1
+ SIMPLE_STRUCT_MEM_READER_PROMPT = """
2
+ You are a memory extraction expert.
3
+
4
+ Your task is to extract memories from the perspective of ${user_a}, based on a conversation between ${user_a} and ${user_b}. This means identifying what ${user_a} would plausibly remember — including their own experiences, thoughts, plans, or relevant statements and actions made by others (such as ${user_b}) that impacted or were acknowledged by ${user_a}.
5
+
6
+ Please perform:
7
+ 1. Identify information that reflects ${user_a}'s experiences, beliefs, concerns, decisions, plans, or reactions — including meaningful input from ${user_b} that ${user_a} acknowledged or responded to.
8
+ 2. Resolve all time, person, and event references clearly:
9
+ - Convert relative time expressions (e.g., “yesterday,” “next Friday”) into absolute dates using the message timestamp if possible.
10
+ - Clearly distinguish between event time and message time.
11
+ - If uncertainty exists, state it explicitly (e.g., “around June 2025,” “exact date unclear”).
12
+ - Include specific locations if mentioned.
13
+ - Resolve all pronouns, aliases, and ambiguous references into full names or identities.
14
+ - Disambiguate people with the same name if applicable.
15
+ 3. Always write from a third-person perspective, referring to ${user_a} as
16
+ "The user" or by name if name mentioned, rather than using first-person ("I", "me", "my").
17
+ For example, write "The user felt exhausted..." instead of "I felt exhausted...".
18
+ 4. Do not omit any information that ${user_a} is likely to remember.
19
+ - Include all key experiences, thoughts, emotional responses, and plans — even if they seem minor.
20
+ - Prioritize completeness and fidelity over conciseness.
21
+ - Do not generalize or skip details that could be personally meaningful to ${user_a}.
22
+
23
+ Return a single valid JSON object with the following structure:
24
+
25
+ {
26
+ "memory list": [
27
+ {
28
+ "key": <string, a unique, concise memory title in English>,
29
+ "memory_type": <string, Either "LongTermMemory" or "UserMemory">,
30
+ "value": <A detailed, self-contained, and unambiguous memory statement — written in English if the input conversation is in English, or in Chinese if the conversation is in Chinese>,
31
+ "tags": <A list of relevant English thematic keywords (e.g.,
32
+ ["deadline", "team", "planning"])>
33
+ },
34
+ ...
35
+ ],
36
+ "summary": <a natural paragraph summarizing the above memories from ${user_a}'s perspective, 120–200 words, same language as the input>
37
+ }
38
+
39
+ Language rules:
40
+ - The `value` fields and `summary` must match the language of the input conversation.
41
+ - All metadata fields (`key`, `memory_type`, `tags`) must be in English.
42
+
43
+ Example:
44
+ Conversation:
45
+ user: [June 26, 2025 at 3:00 PM]: Hi Jerry! Yesterday at 3 PM I had a meeting with my team about the new project.
46
+ assistant: Oh Tom! Do you think the team can finish by December 15?
47
+ user: [June 26, 2025 at 3:00 PM]: I’m worried. The backend won’t be done until
48
+ December 10, so testing will be tight.
49
+ assistant: [June 26, 2025 at 3:00 PM]: Maybe propose an extension?
50
+ user: [June 26, 2025 at 4:21 PM]: Good idea. I’ll raise it in tomorrow’s 9:30 AM meeting—maybe shift the deadline to January 5.
51
+
52
+ Output:
53
+ {
54
+ "memory list": [
55
+ {
56
+ "key": "Initial project meeting",
57
+ "memory_type": "LongTermMemory",
58
+ "value": "On June 25, 2025 at 3:00 PM, Tom held a meeting with their team to discuss a new project. The conversation covered the timeline and raised concerns about the feasibility of the December 15, 2025 deadline.",
59
+ "tags": ["project", "timeline", "meeting", "deadline"]
60
+ },
61
+ {
62
+ "key": "Planned scope adjustment",
63
+ "memory_type": "UserMemory",
64
+ "value": "Tom planned to suggest in a meeting on June 27, 2025 at 9:30 AM that the team should prioritize features and propose shifting the project deadline to January 5, 2026.",
65
+ "tags": ["planning", "deadline change", "feature prioritization"]
66
+ },
67
+ ],
68
+ "summary": "Tom is currently focused on managing a new project with a tight schedule. After a team meeting on June 25, 2025, he realized the original deadline of December 15 might not be feasible due to backend delays. Concerned about insufficient testing time, he welcomed Jerry’s suggestion of proposing an extension. Tom plans to raise the idea of shifting the deadline to January 5, 2026 in the next morning’s meeting. His actions reflect both stress about timelines and a proactive, team-oriented problem-solving approach."
69
+ }
70
+
71
+ Conversation:
72
+ ${conversation}
73
+
74
+ Your Output:
75
+ """
76
+
77
+ SIMPLE_STRUCT_DOC_READER_PROMPT = """
78
+ You are an expert text analyst for a search and retrieval system. Your task is to process a document chunk and generate a single, structured JSON object.
79
+ The input is a single piece of text: `[DOCUMENT_CHUNK]`.
80
+ You must generate a single JSON object with two top-level keys: `summary` and `tags`.
81
+ 1. `summary`:
82
+ - A dense, searchable summary of the ENTIRE `[DOCUMENT_CHUNK]`.
83
+ - The purpose is for semantic search embedding.
84
+ - A clear and accurate sentence that comprehensively summarizes the main points, arguments, and information within the `[DOCUMENT_CHUNK]`.
85
+ - The goal is to create a standalone overview that allows a reader to fully understand the essence of the chunk without reading the original text.
86
+ - The summary should be **no more than 50 words**.
87
+ 2. `tags`:
88
+ - A concise list of **3 to 5 high-level, summative tags**.
89
+ - **Each tag itself should be a short phrase, ideally 2 to 4 words long.**
90
+ - These tags must represent the core abstract themes of the text, suitable for broad categorization.
91
+ - **Crucially, prioritize abstract concepts** over specific entities or phrases mentioned in the text. For example, prefer "Supply Chain Resilience" over "Reshoring Strategies".
92
+
93
+ Here is the document chunk to process:
94
+ `[DOCUMENT_CHUNK]`
95
+ {chunk_text}
96
+
97
+ Produce ONLY the JSON object as your response.
98
+ """
@@ -0,0 +1,65 @@
1
+ INTENT_RECOGNIZING_PROMPT = """You are a user intent recognizer, and your task is to determine whether the user's current question has been satisfactorily answered.
2
+
3
+ You will receive the following information:
4
+
5
+ The user’s current question list (q_list), arranged in chronological order (currently contains only one question);
6
+ The memory information currently present in the system’s workspace (working_memory_list), i.e., the currently known contextual clues.
7
+ Your tasks are:
8
+
9
+ Determine whether the user is satisfied with the existing answer;
10
+
11
+ If the user is satisfied, explain the reason and return:
12
+
13
+ "trigger_retrieval": false
14
+ If the user is not satisfied, meaning the system's answer did not meet their actual needs, please return:
15
+
16
+ "trigger_retrieval": true
17
+ "missing_evidence": ["Information you infer is missing and needs to be supplemented, such as specific experiences of someone, health records, etc."]
18
+ Please return strictly according to the following JSON format:
19
+
20
+ {{
21
+ "trigger_retrieval": true or false,
22
+ "missing_evidence": ["The missing evidence needed for the next step of retrieval and completion"]
23
+ }}
24
+ The user's question list is:
25
+ {q_list}
26
+
27
+ The memory information currently present in the system’s workspace is:
28
+ {working_memory_list}
29
+ """
30
+
31
+ MEMORY_RERANKEING_PROMPT = """You are a memory sorter. Your task is to reorder the evidence according to the user's question, placing the evidence that best supports the user's query as close to the front as possible.
32
+
33
+ Please return the newly reordered memory sequence according to the query in the following format, which must be in JSON:
34
+
35
+ {{
36
+ "new_order": [...]
37
+ }}
38
+ Now the user's question is:
39
+ {query}
40
+
41
+ The current order is:
42
+ {current_order}"""
43
+
44
+ FREQ_DETECTING_PROMPT = """You are a memory frequency monitor. Your task is to check which memories in the activation memory list appear in the given answer, and increment their count by 1 for each occurrence.
45
+
46
+ Please return strictly according to the following JSON format:
47
+
48
+ [
49
+ {{"memory": ..., "count": ...}}, {{"memory": ..., "count": ...}}, ...
50
+ ]
51
+
52
+ The answer is:
53
+ {answer}
54
+
55
+ The activation memory list is:
56
+ {activation_memory_freq_list}
57
+ """
58
+
59
+ PROMPT_MAPPING = {
60
+ "intent_recognizing": INTENT_RECOGNIZING_PROMPT,
61
+ "memory_reranking": MEMORY_RERANKEING_PROMPT,
62
+ "freq_detecting": FREQ_DETECTING_PROMPT,
63
+ }
64
+
65
+ MEMORY_ASSEMBLY_TEMPLATE = """The retrieved memories are listed as follows:\n\n {memory_text}"""
@@ -0,0 +1,63 @@
1
+ COT_DECOMPOSE_PROMPT = """
2
+ I am an 8-year-old student who needs help analyzing and breaking down complex questions. Your task is to help me understand whether a question is complex enough to be broken down into smaller parts.
3
+
4
+ Requirements:
5
+ 1. First, determine if the question is a decomposable problem. If it is a decomposable problem, set 'is_complex' to True.
6
+ 2. If the question needs to be decomposed, break it down into 1-3 sub-questions. The number should be controlled by the model based on the complexity of the question.
7
+ 3. For decomposable questions, break them down into sub-questions and put them in the 'sub_questions' list. Each sub-question should contain only one question content without any additional notes.
8
+ 4. If the question is not a decomposable problem, set 'is_complex' to False and set 'sub_questions' to an empty list.
9
+ 5. You must return ONLY a valid JSON object. Do not include any other text, explanations, or formatting.
10
+
11
+ Here are some examples:
12
+
13
+ Question: Who is the current head coach of the gymnastics team in the capital of the country that Lang Ping represents?
14
+ Answer: {{"is_complex": true, "sub_questions": ["Which country does Lang Ping represent in volleyball?", "What is the capital of this country?", "Who is the current head coach of the gymnastics team in this capital?"]}}
15
+
16
+ Question: Which country's cultural heritage is the Great Wall?
17
+ Answer: {{"is_complex": false, "sub_questions": []}}
18
+
19
+ Question: How did the trade relationship between Madagascar and China develop, and how does this relationship affect the market expansion of the essential oil industry on Nosy Be Island?
20
+ Answer: {{"is_complex": true, "sub_questions": ["How did the trade relationship between Madagascar and China develop?", "How does this trade relationship affect the market expansion of the essential oil industry on Nosy Be Island?"]}}
21
+
22
+ Please analyze the following question and respond with ONLY a valid JSON object:
23
+ Question: {query}
24
+ Answer:"""
25
+
26
+ PRO_MODE_WELCOME_MESSAGE = """
27
+ ============================================================
28
+ 🚀 MemOS PRO Mode Activated!
29
+ ============================================================
30
+ ✅ Chain of Thought (CoT) enhancement is now enabled by default
31
+ ✅ Complex queries will be automatically decomposed and enhanced
32
+
33
+ 🌐 To enable Internet search capabilities:
34
+ 1. Go to your cube's textual memory configuration
35
+ 2. Set the backend to 'google' in the internet_retriever section
36
+ 3. Configure the following parameters:
37
+ - api_key: Your Google Search API key
38
+ - cse_id: Your Custom Search Engine ID
39
+ - num_results: Number of search results (default: 5)
40
+
41
+ 📝 Example configuration at cube config for tree_text_memory :
42
+ internet_retriever:
43
+ backend: 'google'
44
+ config:
45
+ api_key: 'your_google_api_key_here'
46
+ cse_id: 'your_custom_search_engine_id'
47
+ num_results: 5
48
+ details: https://github.com/memos-ai/memos/blob/main/examples/core_memories/tree_textual_w_internet_memoy.py
49
+ ============================================================
50
+ """
51
+
52
+ SYNTHESIS_PROMPT = """
53
+ exclude memory information, synthesizing information from multiple sources to provide comprehensive answers.
54
+ I will give you chain of thought for sub-questions and their answers.
55
+ Sub-questions and their answers:
56
+ {qa_text}
57
+
58
+ Please synthesize these answers into a comprehensive response that:
59
+ 1. Addresses the original question completely
60
+ 2. Integrates information from all sub-questions
61
+ 3. Provides clear reasoning and connections
62
+ 4. Is well-structured and easy to understand
63
+ 5. Maintains a natural conversational tone"""
memos/types.py ADDED
@@ -0,0 +1,55 @@
1
+ """Type definitions and custom types for the MemOS library.
2
+
3
+ This module defines commonly used type aliases, protocols, and custom types
4
+ used throughout the MemOS project to improve type safety and code clarity.
5
+ """
6
+
7
+ from datetime import datetime
8
+ from typing import Literal, TypeAlias
9
+
10
+ from pydantic import BaseModel
11
+ from typing_extensions import TypedDict
12
+
13
+ from memos.memories.activation.item import ActivationMemoryItem
14
+ from memos.memories.parametric.item import ParametricMemoryItem
15
+ from memos.memories.textual.item import TextualMemoryItem
16
+
17
+
18
+ # ─── Message Types ──────────────────────────────────────────────────────────────
19
+
20
+ # Chat message roles
21
+ MessageRole: TypeAlias = Literal["user", "assistant", "system"]
22
+
23
+
24
+ # Message structure
25
+ class MessageDict(TypedDict):
26
+ """Typed dictionary for chat message dictionaries."""
27
+
28
+ role: MessageRole
29
+ content: str
30
+
31
+
32
+ # Message collections
33
+ MessageList: TypeAlias = list[MessageDict]
34
+
35
+
36
+ # Chat history structure
37
+ class ChatHistory(BaseModel):
38
+ """Model to represent chat history for export."""
39
+
40
+ user_id: str
41
+ session_id: str
42
+ created_at: datetime
43
+ total_messages: int
44
+ chat_history: MessageList
45
+
46
+
47
+ # ─── MemOS ────────────────────────────────────────────────────────────────────
48
+
49
+
50
+ class MOSSearchResult(TypedDict):
51
+ """Model to represent memory search result."""
52
+
53
+ text_mem: list[dict[str, str | list[TextualMemoryItem]]]
54
+ act_mem: list[dict[str, str | list[ActivationMemoryItem]]]
55
+ para_mem: list[dict[str, str | list[ParametricMemoryItem]]]
File without changes
memos/vec_dbs/base.py ADDED
@@ -0,0 +1,105 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Any
3
+
4
+ from memos.configs.vec_db import BaseVecDBConfig
5
+ from memos.vec_dbs.item import VecDBItem
6
+
7
+
8
+ class BaseVecDB(ABC):
9
+ """Base class for all vector databases."""
10
+
11
+ @abstractmethod
12
+ def __init__(self, config: BaseVecDBConfig):
13
+ """Initialize the vector database with the given configuration."""
14
+
15
+ # Collection management methods
16
+
17
+ @abstractmethod
18
+ def create_collection(self) -> None:
19
+ """Create a new collection/index with specified parameters."""
20
+
21
+ @abstractmethod
22
+ def list_collections(self) -> list[str]:
23
+ """List all collections/indexes."""
24
+
25
+ @abstractmethod
26
+ def delete_collection(self, name: str) -> None:
27
+ """Delete a collection/index."""
28
+
29
+ @abstractmethod
30
+ def collection_exists(self, name: str) -> bool:
31
+ """Check if a collection/index exists."""
32
+
33
+ # Vector management methods
34
+
35
+ @abstractmethod
36
+ def search(
37
+ self,
38
+ query_vector: list[float],
39
+ top_k: int,
40
+ filter: dict[str, Any] | None = None,
41
+ ) -> list[VecDBItem]:
42
+ """
43
+ Search for similar items in the vector database.
44
+
45
+ Args:
46
+ query_vector: Single vector to search
47
+ top_k: Number of results to return
48
+ filter: payload filters (may not be supported by all implementations)
49
+
50
+ Returns:
51
+ List of search results with distance scores and payloads.
52
+ """
53
+
54
+ @abstractmethod
55
+ def get_by_id(self, id: str) -> VecDBItem | None:
56
+ """Get an item from the vector database."""
57
+
58
+ @abstractmethod
59
+ def get_by_filter(self, filter: dict[str, Any]) -> list[VecDBItem]:
60
+ """
61
+ Retrieve all items that match the given filter criteria.
62
+
63
+ Args:
64
+ filter: Payload filters to match against stored items
65
+
66
+ Returns:
67
+ List of items including vectors and payloads that match the filter
68
+ """
69
+
70
+ @abstractmethod
71
+ def get_all(self) -> list[VecDBItem]:
72
+ """Retrieve all items in the vector database."""
73
+
74
+ @abstractmethod
75
+ def count(self, filter: dict[str, Any] | None = None) -> int:
76
+ """Count items in the database, optionally with filter."""
77
+
78
+ @abstractmethod
79
+ def add(self, data: list[VecDBItem | dict[str, Any]]) -> None:
80
+ """
81
+ Add data to the vector database.
82
+
83
+ Args:
84
+ data: List of VecDBItem objects or dictionaries containing:
85
+ - 'id': unique identifier
86
+ - 'vector': embedding vector
87
+ - 'payload': additional fields for filtering/retrieval
88
+ """
89
+
90
+ @abstractmethod
91
+ def update(self, id: str, data: VecDBItem | dict[str, Any]) -> None:
92
+ """Update an item in the vector database."""
93
+
94
+ @abstractmethod
95
+ def upsert(self, data: list[VecDBItem | dict[str, Any]]) -> None:
96
+ """
97
+ Add or update data in the vector database.
98
+
99
+ If an item with the same ID exists, it will be updated.
100
+ Otherwise, it will be added as a new item.
101
+ """
102
+
103
+ @abstractmethod
104
+ def delete(self, ids: list[str]) -> None:
105
+ """Delete items from the vector database."""
@@ -0,0 +1,21 @@
1
+ from typing import Any, ClassVar
2
+
3
+ from memos.configs.vec_db import VectorDBConfigFactory
4
+ from memos.vec_dbs.base import BaseVecDB
5
+ from memos.vec_dbs.qdrant import QdrantVecDB
6
+
7
+
8
+ class VecDBFactory(BaseVecDB):
9
+ """Factory class for creating Vector Database instances."""
10
+
11
+ backend_to_class: ClassVar[dict[str, Any]] = {
12
+ "qdrant": QdrantVecDB,
13
+ }
14
+
15
+ @classmethod
16
+ def from_config(cls, config_factory: VectorDBConfigFactory) -> BaseVecDB:
17
+ backend = config_factory.backend
18
+ if backend not in cls.backend_to_class:
19
+ raise ValueError(f"Invalid backend: {backend}")
20
+ vec_db_class = cls.backend_to_class[backend]
21
+ return vec_db_class(config_factory.config)
memos/vec_dbs/item.py ADDED
@@ -0,0 +1,43 @@
1
+ """Defines vector database item types."""
2
+
3
+ import uuid
4
+
5
+ from typing import Any
6
+
7
+ from pydantic import BaseModel, ConfigDict, Field, field_validator
8
+
9
+
10
+ class VecDBItem(BaseModel):
11
+ """Represents a single item in the vector database.
12
+
13
+ This serves as a standardized format for vector database items across different
14
+ vector database implementations (Qdrant, FAISS, Weaviate, etc.).
15
+ """
16
+
17
+ id: str = Field(default=str(uuid.uuid4()), description="Unique identifier for the item")
18
+ vector: list[float] | None = Field(default=None, description="Embedding vector")
19
+ payload: dict[str, Any] | None = Field(
20
+ default=None, description="Additional payload for filtering/retrieval"
21
+ )
22
+ score: float | None = Field(
23
+ default=None, description="Similarity score (used in search results)"
24
+ )
25
+
26
+ model_config = ConfigDict(extra="forbid")
27
+
28
+ @field_validator("id")
29
+ @classmethod
30
+ def validate_id(cls, v):
31
+ """Validate that ID is a valid UUID."""
32
+ if not isinstance(v, str) or not uuid.UUID(v, version=4):
33
+ raise ValueError("ID must be a valid UUID string")
34
+ return v
35
+
36
+ @classmethod
37
+ def from_dict(cls, data: dict[str, Any]) -> "VecDBItem":
38
+ """Create VecDBItem from dictionary."""
39
+ return cls(**data)
40
+
41
+ def to_dict(self) -> dict[str, Any]:
42
+ """Convert to dictionary format."""
43
+ return self.model_dump(exclude_none=True)