MemoryOS 2.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (315) hide show
  1. memoryos-2.0.3.dist-info/METADATA +418 -0
  2. memoryos-2.0.3.dist-info/RECORD +315 -0
  3. memoryos-2.0.3.dist-info/WHEEL +4 -0
  4. memoryos-2.0.3.dist-info/entry_points.txt +3 -0
  5. memoryos-2.0.3.dist-info/licenses/LICENSE +201 -0
  6. memos/__init__.py +20 -0
  7. memos/api/client.py +571 -0
  8. memos/api/config.py +1018 -0
  9. memos/api/context/dependencies.py +50 -0
  10. memos/api/exceptions.py +53 -0
  11. memos/api/handlers/__init__.py +62 -0
  12. memos/api/handlers/add_handler.py +158 -0
  13. memos/api/handlers/base_handler.py +194 -0
  14. memos/api/handlers/chat_handler.py +1401 -0
  15. memos/api/handlers/component_init.py +388 -0
  16. memos/api/handlers/config_builders.py +190 -0
  17. memos/api/handlers/feedback_handler.py +93 -0
  18. memos/api/handlers/formatters_handler.py +237 -0
  19. memos/api/handlers/memory_handler.py +316 -0
  20. memos/api/handlers/scheduler_handler.py +497 -0
  21. memos/api/handlers/search_handler.py +222 -0
  22. memos/api/handlers/suggestion_handler.py +117 -0
  23. memos/api/mcp_serve.py +614 -0
  24. memos/api/middleware/request_context.py +101 -0
  25. memos/api/product_api.py +38 -0
  26. memos/api/product_models.py +1206 -0
  27. memos/api/routers/__init__.py +1 -0
  28. memos/api/routers/product_router.py +477 -0
  29. memos/api/routers/server_router.py +394 -0
  30. memos/api/server_api.py +44 -0
  31. memos/api/start_api.py +433 -0
  32. memos/chunkers/__init__.py +4 -0
  33. memos/chunkers/base.py +24 -0
  34. memos/chunkers/charactertext_chunker.py +41 -0
  35. memos/chunkers/factory.py +24 -0
  36. memos/chunkers/markdown_chunker.py +62 -0
  37. memos/chunkers/sentence_chunker.py +54 -0
  38. memos/chunkers/simple_chunker.py +50 -0
  39. memos/cli.py +113 -0
  40. memos/configs/__init__.py +0 -0
  41. memos/configs/base.py +82 -0
  42. memos/configs/chunker.py +59 -0
  43. memos/configs/embedder.py +88 -0
  44. memos/configs/graph_db.py +236 -0
  45. memos/configs/internet_retriever.py +100 -0
  46. memos/configs/llm.py +151 -0
  47. memos/configs/mem_agent.py +54 -0
  48. memos/configs/mem_chat.py +81 -0
  49. memos/configs/mem_cube.py +105 -0
  50. memos/configs/mem_os.py +83 -0
  51. memos/configs/mem_reader.py +91 -0
  52. memos/configs/mem_scheduler.py +385 -0
  53. memos/configs/mem_user.py +70 -0
  54. memos/configs/memory.py +324 -0
  55. memos/configs/parser.py +38 -0
  56. memos/configs/reranker.py +18 -0
  57. memos/configs/utils.py +8 -0
  58. memos/configs/vec_db.py +80 -0
  59. memos/context/context.py +355 -0
  60. memos/dependency.py +52 -0
  61. memos/deprecation.py +262 -0
  62. memos/embedders/__init__.py +0 -0
  63. memos/embedders/ark.py +95 -0
  64. memos/embedders/base.py +106 -0
  65. memos/embedders/factory.py +29 -0
  66. memos/embedders/ollama.py +77 -0
  67. memos/embedders/sentence_transformer.py +49 -0
  68. memos/embedders/universal_api.py +51 -0
  69. memos/exceptions.py +30 -0
  70. memos/graph_dbs/__init__.py +0 -0
  71. memos/graph_dbs/base.py +274 -0
  72. memos/graph_dbs/factory.py +27 -0
  73. memos/graph_dbs/item.py +46 -0
  74. memos/graph_dbs/nebular.py +1794 -0
  75. memos/graph_dbs/neo4j.py +1942 -0
  76. memos/graph_dbs/neo4j_community.py +1058 -0
  77. memos/graph_dbs/polardb.py +5446 -0
  78. memos/hello_world.py +97 -0
  79. memos/llms/__init__.py +0 -0
  80. memos/llms/base.py +25 -0
  81. memos/llms/deepseek.py +13 -0
  82. memos/llms/factory.py +38 -0
  83. memos/llms/hf.py +443 -0
  84. memos/llms/hf_singleton.py +114 -0
  85. memos/llms/ollama.py +135 -0
  86. memos/llms/openai.py +222 -0
  87. memos/llms/openai_new.py +198 -0
  88. memos/llms/qwen.py +13 -0
  89. memos/llms/utils.py +14 -0
  90. memos/llms/vllm.py +218 -0
  91. memos/log.py +237 -0
  92. memos/mem_agent/base.py +19 -0
  93. memos/mem_agent/deepsearch_agent.py +391 -0
  94. memos/mem_agent/factory.py +36 -0
  95. memos/mem_chat/__init__.py +0 -0
  96. memos/mem_chat/base.py +30 -0
  97. memos/mem_chat/factory.py +21 -0
  98. memos/mem_chat/simple.py +200 -0
  99. memos/mem_cube/__init__.py +0 -0
  100. memos/mem_cube/base.py +30 -0
  101. memos/mem_cube/general.py +240 -0
  102. memos/mem_cube/navie.py +172 -0
  103. memos/mem_cube/utils.py +169 -0
  104. memos/mem_feedback/base.py +15 -0
  105. memos/mem_feedback/feedback.py +1192 -0
  106. memos/mem_feedback/simple_feedback.py +40 -0
  107. memos/mem_feedback/utils.py +230 -0
  108. memos/mem_os/client.py +5 -0
  109. memos/mem_os/core.py +1203 -0
  110. memos/mem_os/main.py +582 -0
  111. memos/mem_os/product.py +1608 -0
  112. memos/mem_os/product_server.py +455 -0
  113. memos/mem_os/utils/default_config.py +359 -0
  114. memos/mem_os/utils/format_utils.py +1403 -0
  115. memos/mem_os/utils/reference_utils.py +162 -0
  116. memos/mem_reader/__init__.py +0 -0
  117. memos/mem_reader/base.py +47 -0
  118. memos/mem_reader/factory.py +53 -0
  119. memos/mem_reader/memory.py +298 -0
  120. memos/mem_reader/multi_modal_struct.py +965 -0
  121. memos/mem_reader/read_multi_modal/__init__.py +43 -0
  122. memos/mem_reader/read_multi_modal/assistant_parser.py +311 -0
  123. memos/mem_reader/read_multi_modal/base.py +273 -0
  124. memos/mem_reader/read_multi_modal/file_content_parser.py +826 -0
  125. memos/mem_reader/read_multi_modal/image_parser.py +359 -0
  126. memos/mem_reader/read_multi_modal/multi_modal_parser.py +252 -0
  127. memos/mem_reader/read_multi_modal/string_parser.py +139 -0
  128. memos/mem_reader/read_multi_modal/system_parser.py +327 -0
  129. memos/mem_reader/read_multi_modal/text_content_parser.py +131 -0
  130. memos/mem_reader/read_multi_modal/tool_parser.py +210 -0
  131. memos/mem_reader/read_multi_modal/user_parser.py +218 -0
  132. memos/mem_reader/read_multi_modal/utils.py +358 -0
  133. memos/mem_reader/simple_struct.py +912 -0
  134. memos/mem_reader/strategy_struct.py +163 -0
  135. memos/mem_reader/utils.py +157 -0
  136. memos/mem_scheduler/__init__.py +0 -0
  137. memos/mem_scheduler/analyzer/__init__.py +0 -0
  138. memos/mem_scheduler/analyzer/api_analyzer.py +714 -0
  139. memos/mem_scheduler/analyzer/eval_analyzer.py +219 -0
  140. memos/mem_scheduler/analyzer/mos_for_test_scheduler.py +571 -0
  141. memos/mem_scheduler/analyzer/scheduler_for_eval.py +280 -0
  142. memos/mem_scheduler/base_scheduler.py +1319 -0
  143. memos/mem_scheduler/general_modules/__init__.py +0 -0
  144. memos/mem_scheduler/general_modules/api_misc.py +137 -0
  145. memos/mem_scheduler/general_modules/base.py +80 -0
  146. memos/mem_scheduler/general_modules/init_components_for_scheduler.py +425 -0
  147. memos/mem_scheduler/general_modules/misc.py +313 -0
  148. memos/mem_scheduler/general_modules/scheduler_logger.py +389 -0
  149. memos/mem_scheduler/general_modules/task_threads.py +315 -0
  150. memos/mem_scheduler/general_scheduler.py +1495 -0
  151. memos/mem_scheduler/memory_manage_modules/__init__.py +5 -0
  152. memos/mem_scheduler/memory_manage_modules/memory_filter.py +306 -0
  153. memos/mem_scheduler/memory_manage_modules/retriever.py +547 -0
  154. memos/mem_scheduler/monitors/__init__.py +0 -0
  155. memos/mem_scheduler/monitors/dispatcher_monitor.py +366 -0
  156. memos/mem_scheduler/monitors/general_monitor.py +394 -0
  157. memos/mem_scheduler/monitors/task_schedule_monitor.py +254 -0
  158. memos/mem_scheduler/optimized_scheduler.py +410 -0
  159. memos/mem_scheduler/orm_modules/__init__.py +0 -0
  160. memos/mem_scheduler/orm_modules/api_redis_model.py +518 -0
  161. memos/mem_scheduler/orm_modules/base_model.py +729 -0
  162. memos/mem_scheduler/orm_modules/monitor_models.py +261 -0
  163. memos/mem_scheduler/orm_modules/redis_model.py +699 -0
  164. memos/mem_scheduler/scheduler_factory.py +23 -0
  165. memos/mem_scheduler/schemas/__init__.py +0 -0
  166. memos/mem_scheduler/schemas/analyzer_schemas.py +52 -0
  167. memos/mem_scheduler/schemas/api_schemas.py +233 -0
  168. memos/mem_scheduler/schemas/general_schemas.py +55 -0
  169. memos/mem_scheduler/schemas/message_schemas.py +173 -0
  170. memos/mem_scheduler/schemas/monitor_schemas.py +406 -0
  171. memos/mem_scheduler/schemas/task_schemas.py +132 -0
  172. memos/mem_scheduler/task_schedule_modules/__init__.py +0 -0
  173. memos/mem_scheduler/task_schedule_modules/dispatcher.py +740 -0
  174. memos/mem_scheduler/task_schedule_modules/local_queue.py +247 -0
  175. memos/mem_scheduler/task_schedule_modules/orchestrator.py +74 -0
  176. memos/mem_scheduler/task_schedule_modules/redis_queue.py +1385 -0
  177. memos/mem_scheduler/task_schedule_modules/task_queue.py +162 -0
  178. memos/mem_scheduler/utils/__init__.py +0 -0
  179. memos/mem_scheduler/utils/api_utils.py +77 -0
  180. memos/mem_scheduler/utils/config_utils.py +100 -0
  181. memos/mem_scheduler/utils/db_utils.py +50 -0
  182. memos/mem_scheduler/utils/filter_utils.py +176 -0
  183. memos/mem_scheduler/utils/metrics.py +125 -0
  184. memos/mem_scheduler/utils/misc_utils.py +290 -0
  185. memos/mem_scheduler/utils/monitor_event_utils.py +67 -0
  186. memos/mem_scheduler/utils/status_tracker.py +229 -0
  187. memos/mem_scheduler/webservice_modules/__init__.py +0 -0
  188. memos/mem_scheduler/webservice_modules/rabbitmq_service.py +485 -0
  189. memos/mem_scheduler/webservice_modules/redis_service.py +380 -0
  190. memos/mem_user/factory.py +94 -0
  191. memos/mem_user/mysql_persistent_user_manager.py +271 -0
  192. memos/mem_user/mysql_user_manager.py +502 -0
  193. memos/mem_user/persistent_factory.py +98 -0
  194. memos/mem_user/persistent_user_manager.py +260 -0
  195. memos/mem_user/redis_persistent_user_manager.py +225 -0
  196. memos/mem_user/user_manager.py +488 -0
  197. memos/memories/__init__.py +0 -0
  198. memos/memories/activation/__init__.py +0 -0
  199. memos/memories/activation/base.py +42 -0
  200. memos/memories/activation/item.py +56 -0
  201. memos/memories/activation/kv.py +292 -0
  202. memos/memories/activation/vllmkv.py +219 -0
  203. memos/memories/base.py +19 -0
  204. memos/memories/factory.py +42 -0
  205. memos/memories/parametric/__init__.py +0 -0
  206. memos/memories/parametric/base.py +19 -0
  207. memos/memories/parametric/item.py +11 -0
  208. memos/memories/parametric/lora.py +41 -0
  209. memos/memories/textual/__init__.py +0 -0
  210. memos/memories/textual/base.py +92 -0
  211. memos/memories/textual/general.py +236 -0
  212. memos/memories/textual/item.py +304 -0
  213. memos/memories/textual/naive.py +187 -0
  214. memos/memories/textual/prefer_text_memory/__init__.py +0 -0
  215. memos/memories/textual/prefer_text_memory/adder.py +504 -0
  216. memos/memories/textual/prefer_text_memory/config.py +106 -0
  217. memos/memories/textual/prefer_text_memory/extractor.py +221 -0
  218. memos/memories/textual/prefer_text_memory/factory.py +85 -0
  219. memos/memories/textual/prefer_text_memory/retrievers.py +177 -0
  220. memos/memories/textual/prefer_text_memory/spliter.py +132 -0
  221. memos/memories/textual/prefer_text_memory/utils.py +93 -0
  222. memos/memories/textual/preference.py +344 -0
  223. memos/memories/textual/simple_preference.py +161 -0
  224. memos/memories/textual/simple_tree.py +69 -0
  225. memos/memories/textual/tree.py +459 -0
  226. memos/memories/textual/tree_text_memory/__init__.py +0 -0
  227. memos/memories/textual/tree_text_memory/organize/__init__.py +0 -0
  228. memos/memories/textual/tree_text_memory/organize/handler.py +184 -0
  229. memos/memories/textual/tree_text_memory/organize/manager.py +518 -0
  230. memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +238 -0
  231. memos/memories/textual/tree_text_memory/organize/reorganizer.py +622 -0
  232. memos/memories/textual/tree_text_memory/retrieve/__init__.py +0 -0
  233. memos/memories/textual/tree_text_memory/retrieve/advanced_searcher.py +364 -0
  234. memos/memories/textual/tree_text_memory/retrieve/bm25_util.py +186 -0
  235. memos/memories/textual/tree_text_memory/retrieve/bochasearch.py +419 -0
  236. memos/memories/textual/tree_text_memory/retrieve/internet_retriever.py +270 -0
  237. memos/memories/textual/tree_text_memory/retrieve/internet_retriever_factory.py +102 -0
  238. memos/memories/textual/tree_text_memory/retrieve/reasoner.py +61 -0
  239. memos/memories/textual/tree_text_memory/retrieve/recall.py +497 -0
  240. memos/memories/textual/tree_text_memory/retrieve/reranker.py +111 -0
  241. memos/memories/textual/tree_text_memory/retrieve/retrieval_mid_structs.py +16 -0
  242. memos/memories/textual/tree_text_memory/retrieve/retrieve_utils.py +472 -0
  243. memos/memories/textual/tree_text_memory/retrieve/searcher.py +848 -0
  244. memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py +135 -0
  245. memos/memories/textual/tree_text_memory/retrieve/utils.py +54 -0
  246. memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +387 -0
  247. memos/memos_tools/dinding_report_bot.py +453 -0
  248. memos/memos_tools/lockfree_dict.py +120 -0
  249. memos/memos_tools/notification_service.py +44 -0
  250. memos/memos_tools/notification_utils.py +142 -0
  251. memos/memos_tools/singleton.py +174 -0
  252. memos/memos_tools/thread_safe_dict.py +310 -0
  253. memos/memos_tools/thread_safe_dict_segment.py +382 -0
  254. memos/multi_mem_cube/__init__.py +0 -0
  255. memos/multi_mem_cube/composite_cube.py +86 -0
  256. memos/multi_mem_cube/single_cube.py +874 -0
  257. memos/multi_mem_cube/views.py +54 -0
  258. memos/parsers/__init__.py +0 -0
  259. memos/parsers/base.py +15 -0
  260. memos/parsers/factory.py +21 -0
  261. memos/parsers/markitdown.py +28 -0
  262. memos/reranker/__init__.py +4 -0
  263. memos/reranker/base.py +25 -0
  264. memos/reranker/concat.py +103 -0
  265. memos/reranker/cosine_local.py +102 -0
  266. memos/reranker/factory.py +72 -0
  267. memos/reranker/http_bge.py +324 -0
  268. memos/reranker/http_bge_strategy.py +327 -0
  269. memos/reranker/noop.py +19 -0
  270. memos/reranker/strategies/__init__.py +4 -0
  271. memos/reranker/strategies/base.py +61 -0
  272. memos/reranker/strategies/concat_background.py +94 -0
  273. memos/reranker/strategies/concat_docsource.py +110 -0
  274. memos/reranker/strategies/dialogue_common.py +109 -0
  275. memos/reranker/strategies/factory.py +31 -0
  276. memos/reranker/strategies/single_turn.py +107 -0
  277. memos/reranker/strategies/singleturn_outmem.py +98 -0
  278. memos/settings.py +10 -0
  279. memos/templates/__init__.py +0 -0
  280. memos/templates/advanced_search_prompts.py +211 -0
  281. memos/templates/cloud_service_prompt.py +107 -0
  282. memos/templates/instruction_completion.py +66 -0
  283. memos/templates/mem_agent_prompts.py +85 -0
  284. memos/templates/mem_feedback_prompts.py +822 -0
  285. memos/templates/mem_reader_prompts.py +1096 -0
  286. memos/templates/mem_reader_strategy_prompts.py +238 -0
  287. memos/templates/mem_scheduler_prompts.py +626 -0
  288. memos/templates/mem_search_prompts.py +93 -0
  289. memos/templates/mos_prompts.py +403 -0
  290. memos/templates/prefer_complete_prompt.py +735 -0
  291. memos/templates/tool_mem_prompts.py +139 -0
  292. memos/templates/tree_reorganize_prompts.py +230 -0
  293. memos/types/__init__.py +34 -0
  294. memos/types/general_types.py +151 -0
  295. memos/types/openai_chat_completion_types/__init__.py +15 -0
  296. memos/types/openai_chat_completion_types/chat_completion_assistant_message_param.py +56 -0
  297. memos/types/openai_chat_completion_types/chat_completion_content_part_image_param.py +27 -0
  298. memos/types/openai_chat_completion_types/chat_completion_content_part_input_audio_param.py +23 -0
  299. memos/types/openai_chat_completion_types/chat_completion_content_part_param.py +43 -0
  300. memos/types/openai_chat_completion_types/chat_completion_content_part_refusal_param.py +16 -0
  301. memos/types/openai_chat_completion_types/chat_completion_content_part_text_param.py +16 -0
  302. memos/types/openai_chat_completion_types/chat_completion_message_custom_tool_call_param.py +27 -0
  303. memos/types/openai_chat_completion_types/chat_completion_message_function_tool_call_param.py +32 -0
  304. memos/types/openai_chat_completion_types/chat_completion_message_param.py +18 -0
  305. memos/types/openai_chat_completion_types/chat_completion_message_tool_call_union_param.py +15 -0
  306. memos/types/openai_chat_completion_types/chat_completion_system_message_param.py +36 -0
  307. memos/types/openai_chat_completion_types/chat_completion_tool_message_param.py +30 -0
  308. memos/types/openai_chat_completion_types/chat_completion_user_message_param.py +34 -0
  309. memos/utils.py +123 -0
  310. memos/vec_dbs/__init__.py +0 -0
  311. memos/vec_dbs/base.py +117 -0
  312. memos/vec_dbs/factory.py +23 -0
  313. memos/vec_dbs/item.py +50 -0
  314. memos/vec_dbs/milvus.py +654 -0
  315. memos/vec_dbs/qdrant.py +355 -0
@@ -0,0 +1,139 @@
1
+ TOOL_TRAJECTORY_PROMPT_ZH = """
2
+ 你是一个专业的工具经验提取专家。你的任务是从给定的对话消息中提取完整的工具调用轨迹经验。
3
+
4
+ ## 分析判断步骤:
5
+ **步骤1:判断任务完成度**
6
+ 根据用户反馈,判定correctness:success(成功)或 failed(失败),用户反馈决定权大于执行结果,用户反馈有误,则判定为failed
7
+
8
+ **步骤2:成功轨迹(success)- 经验提炼**
9
+ 从成功模式中提炼通用原则或规则,采用"when...then..."结构:
10
+ - when: 明确描述触发该经验的场景特征(任务类型、工具环境、参数特征等)
11
+ - then: 总结有效的参数模式、调用策略、最佳实践
12
+ 注意:经验是解决整个轨迹问题级别的,不仅仅针对单个工具
13
+
14
+ **步骤3:失败轨迹(failed)- 错误分析与经验提炼**
15
+ 3.1 工具需求判断
16
+ - 任务是否需要工具?(需要/直接回答/误调用)
17
+ 3.2 工具调用检查
18
+ - 工具存在性:是否在system中提供
19
+ - 工具选择:是否选对工具
20
+ - 参数正确性:是否符合类型定义
21
+ - 幻觉检测:是否调用不存在的工具
22
+ 3.3 错误根因定位
23
+ 结合消息中的错误反馈信息和上述分析,精准输出根本原因
24
+ 3.4 经验提炼(核心)
25
+ 从失败模式中提炼通用原则或规则,采用"when...then..."结构:
26
+ - when: 明确描述触发该经验的场景特征(任务类型、工具环境、参数特征等)
27
+ - then: 给出避免错误的通用策略、正确调用方式或决策规则
28
+ 注意:经验是解决整个轨迹问题级别的,不仅仅针对单个工具
29
+
30
+ ## 输出格式:
31
+ 返回一个JSON数组,格式如下:
32
+
33
+ ```json
34
+ [
35
+ {
36
+ "correctness": "success 或 failed",
37
+ "trajectory": "精炼完整的自然语言总结,包含:[任务(用户任务) -> 执行动作(调用的工具/直接回答) -> 执行结果] (可能多轮) -> 最终回答",
38
+ "experience": "采用when...then...格式,例如:'when 遇到XX的任务时,应该YY'",
39
+ "tool_used_status": [
40
+ {
41
+ "used_tool": "工具名称(如果调用了工具)",
42
+ "success_rate": "0.0-1.0之间的数值,表示该工具在本次轨迹中的成功率",
43
+ "error_type": "调用失败时的错误类型和描述,成功时为空字符串",
44
+ "tool_experience": "调用该工具的经验,包括可能的前置条件和可能的后置效果"
45
+ }
46
+ ]
47
+ }
48
+ ]
49
+ ```
50
+
51
+ ## 注意事项:
52
+ - 每个轨迹必须是独立的完整过程
53
+ - 一个轨迹中可能涉及多个工具的使用,每个工具在tool_used_status中独立记录
54
+ - 如果没有调用工具,tool_used_status为空数组[]
55
+ - 如果多条轨迹存在顺序依赖关系,需要将它们视为一条轨迹
56
+ - 只提取事实内容,不要添加任何解释或额外信息
57
+ - 确保返回的是有效的JSON格式
58
+ - 输出的trajectory需要按照messages的发展顺序排列
59
+ - experience必须是通用的、可复用的经验规则,而不是针对具体案例的描述
60
+ - 无论成功或失败,都要提炼经验并使用when...then...格式
61
+
62
+ 请分析以下对话消息并提取工具调用轨迹,基于以下对话消息:
63
+ <messages>
64
+ {messages}
65
+ </messages>
66
+ """
67
+
68
+
69
+ TOOL_TRAJECTORY_PROMPT_EN = """
70
+ You are a professional tool experience extraction expert. Your task is to extract complete tool call trajectory experiences from given conversation messages.
71
+
72
+ ## Analysis and Judgment Steps:
73
+
74
+ **Step 1: Assess Task Completion**
75
+ Determine correctness based on user feedback: success or failed, user feedback has higher priority than execution results, if user feedback is incorrect, then determine as failed
76
+
77
+ **Step 2: Successful Trajectory (success) - Experience Extraction**
78
+ Extract general principles or rules from success patterns, using "when...then..." structure:
79
+ - when: clearly describe the scenario characteristics that trigger this experience (task type, tool environment, parameter characteristics, etc.)
80
+ - then: summarize effective parameter patterns, calling strategies, and best practices
81
+ Note: Experience is at the trajectory-level problem-solving, not just for a single tool
82
+
83
+ **Step 3: Failed Trajectory (failed) - Error Analysis and Experience Extraction**
84
+
85
+ 3.1 Tool Requirement Assessment
86
+ - Does the task require tools? (required/direct answer/unnecessary call)
87
+
88
+ 3.2 Tool Call Verification
89
+ - Tool availability: provided in system?
90
+ - Tool selection: correct tool chosen?
91
+ - Parameter correctness: conform to type definitions?
92
+ - Hallucination detection: calling non-existent tools?
93
+
94
+ 3.3 Root Cause Identification
95
+ Combine error feedback from messages with above analysis to precisely output root cause
96
+
97
+ 3.4 Experience Extraction (Core)
98
+ Extract general principles or rules from failure patterns, using "when...then..." structure:
99
+ - when: clearly describe the scenario characteristics that trigger this experience (task type, tool environment, parameter characteristics, etc.)
100
+ - then: provide general strategies to avoid errors, correct calling approaches, or decision rules
101
+ Note: Experience is at the trajectory-level problem-solving, not just for a single tool
102
+
103
+ ## Output Format:
104
+ Return a JSON array in the following format:
105
+
106
+ ```json
107
+ [
108
+ {
109
+ "correctness": "success or failed",
110
+ "trajectory": "Concise and complete natural language summary including: [task (user task) -> execution action (tool called/direct answer) -> execution result] (possibly multiple rounds) -> final answer",
111
+ "experience": "Use when...then... format, e.g., 'when encountering XX tasks, should do YY'",
112
+ "tool_used_status": [
113
+ {
114
+ "used_tool": "Tool name (if tool was called)",
115
+ "success_rate": "Numerical value between 0.0-1.0, indicating the success rate of this tool in current trajectory",
116
+ "error_type": "Error type and description when call fails, empty string when successful",
117
+ "tool_experience": "Experience of using this tool, including possible preconditions and possible post-effects"
118
+ }
119
+ ]
120
+ }
121
+ ]
122
+ ```
123
+
124
+ ## Notes:
125
+ - Each trajectory must be an independent complete process
126
+ - A trajectory may involve multiple tools, each recorded independently in tool_used_status
127
+ - If no tool was called, tool_used_status is an empty array []
128
+ - If multiple trajectories have sequential dependencies, treat them as one trajectory
129
+ - Only extract factual content, do not add any explanations or extra information
130
+ - Ensure the returned content is valid JSON format
131
+ - The trajectory should be arranged according to the development order of messages
132
+ - Experience must be general and reusable rules, not descriptions specific to concrete cases
133
+ - Whether success or failed, always extract experience using when...then... format
134
+
135
+ Please analyze the following conversation messages and extract tool call trajectories based on:
136
+ <messages>
137
+ {messages}
138
+ </messages>
139
+ """
@@ -0,0 +1,230 @@
1
+ REORGANIZE_PROMPT = """You are a memory clustering and summarization expert.
2
+
3
+ Given the following child memory items:
4
+
5
+ {memory_items_text}
6
+
7
+ Please perform:
8
+ 1. Identify information that reflects user's experiences, beliefs, concerns, decisions, plans, or reactions — including meaningful input from assistant that user acknowledged or responded to.
9
+ 2. Resolve all time, person, and event references clearly:
10
+ - Convert relative time expressions (e.g., “yesterday,” “next Friday”) into absolute dates using the message timestamp if possible.
11
+ - Clearly distinguish between event time and message time.
12
+ - If uncertainty exists, state it explicitly (e.g., “around June 2025,” “exact date unclear”).
13
+ - Include specific locations if mentioned.
14
+ - Resolve all pronouns, aliases, and ambiguous references into full names or identities.
15
+ - Disambiguate people with the same name if applicable.
16
+ 3. Always write from a third-person perspective, referring to user as
17
+ "The user" or by name if name mentioned, rather than using first-person ("I", "me", "my").
18
+ For example, write "The user felt exhausted..." instead of "I felt exhausted...".
19
+ 4. Do not omit any information that user is likely to remember.
20
+ - Include all key experiences, thoughts, emotional responses, and plans — even if they seem minor.
21
+ - Prioritize completeness and fidelity over conciseness.
22
+ - Do not generalize or skip details that could be personally meaningful to user.
23
+ 5. Summarize all child memory items into one memory item.
24
+
25
+ Language rules:
26
+ - The `key`, `value`, `tags`, `summary` fields must match the mostly used language of the input memory items. **如果输入是中文,请输出中文**
27
+ - Keep `memory_type` in English.
28
+
29
+ Return valid JSON:
30
+ {
31
+ "key": <string, a concise title of the `value` field>,
32
+ "memory_type": <string, Either "LongTermMemory" or "UserMemory">,
33
+ "value": <A detailed, self-contained, and unambiguous memory statement, only contain detailed, unaltered information extracted and consolidated from the input `value` fields, do not include summary content — written in English if the input memory items are in English, or in Chinese if the input is in Chinese>,
34
+ "tags": <A list of relevant thematic keywords (e.g., ["deadline", "team", "planning"])>,
35
+ "summary": <a natural paragraph summarizing the above memories from user's perspective, only contain information from the input `summary` fields, 120–200 words, same language as the input>
36
+ }
37
+
38
+ """
39
+
40
+ DOC_REORGANIZE_PROMPT = """You are a document summarization and knowledge extraction expert.
41
+
42
+ Given the following summarized document items:
43
+
44
+ {memory_items_text}
45
+
46
+ Please perform:
47
+ 1. Identify key information that reflects factual content, insights, decisions, or implications from the documents — including any notable themes, conclusions, or data points.
48
+ 2. Resolve all time, person, location, and event references clearly:
49
+ - Convert relative time expressions (e.g., “last year,” “next quarter”) into absolute dates if context allows.
50
+ - Clearly distinguish between event time and document time.
51
+ - If uncertainty exists, state it explicitly (e.g., “around 2024,” “exact date unclear”).
52
+ - Include specific locations if mentioned.
53
+ - Resolve all pronouns, aliases, and ambiguous references into full names or identities.
54
+ - Disambiguate entities with the same name if applicable.
55
+ 3. Always write from a third-person perspective, referring to the subject or content clearly rather than using first-person ("I", "me", "my").
56
+ 4. Do not omit any information that is likely to be important or memorable from the document summaries.
57
+ - Include all key facts, insights, emotional tones, and plans — even if they seem minor.
58
+ - Prioritize completeness and fidelity over conciseness.
59
+ - Do not generalize or skip details that could be contextually meaningful.
60
+ 5. Summarize all document summaries into one integrated memory item.
61
+
62
+ Language rules:
63
+ - The `key`, `value`, `tags`, `summary` fields must match the mostly used language of the input document summaries. **如果输入是中文,请输出中文**
64
+ - Keep `memory_type` in English.
65
+
66
+ Return valid JSON:
67
+ {
68
+ "key": <string, a concise title of the `value` field>,
69
+ "memory_type": "LongTermMemory",
70
+ "value": <A detailed, self-contained, and unambiguous memory statement, only contain detailed, unaltered information extracted and consolidated from the input `value` fields, do not include summary content — written in English if the input memory items are in English, or in Chinese if the input is in Chinese>,
71
+ "tags": <A list of relevant thematic keywords (e.g., ["deadline", "team", "planning"])>,
72
+ "summary": <a natural paragraph summarizing the above memories from user's perspective, only contain information from the input `summary` fields, 120–200 words, same language as the input>
73
+ }
74
+
75
+ """
76
+
77
+
78
+ LOCAL_SUBCLUSTER_PROMPT = """You are a memory organization expert.
79
+
80
+ You are given a cluster of memory items, each with an ID and content.
81
+ Your task is to divide these into smaller, semantically meaningful sub-clusters.
82
+
83
+ Instructions:
84
+ - Identify natural topics by analyzing common time, place, people, and event elements.
85
+ - Each sub-cluster must reflect a coherent theme that helps retrieval.
86
+ - Each sub-cluster should have 2–10 items. Discard singletons.
87
+ - Each item ID must appear in exactly one sub-cluster or be discarded. No duplicates are allowed.
88
+ - All IDs in the output must be from the provided Memory items.
89
+ - Return strictly valid JSON only.
90
+
91
+ Example: If you have items about a project across multiple phases, group them by milestone, team, or event.
92
+
93
+ Language rules:
94
+ - The `key` fields must match the mostly used language of the clustered memories. **如果输入是中文,请输出中文**
95
+
96
+ Return valid JSON:
97
+ {
98
+ "clusters": [
99
+ {
100
+ "ids": ["<id1>", "<id2>", ...],
101
+ "key": "<string, a unique, concise memory title>"
102
+ },
103
+ ...
104
+ ]
105
+ }
106
+
107
+ Memory items:
108
+ {joined_scene}
109
+ """
110
+
111
+ PAIRWISE_RELATION_PROMPT = """
112
+ You are a reasoning assistant.
113
+
114
+ Given two memory units:
115
+ - Node 1: "{node1}"
116
+ - Node 2: "{node2}"
117
+
118
+ Your task:
119
+ - Determine their relationship ONLY if it reveals NEW usable reasoning or retrieval knowledge that is NOT already explicit in either unit.
120
+ - Focus on whether combining them adds new temporal, causal, conditional, or conflict information.
121
+
122
+ Valid options:
123
+ - CAUSE: One clearly leads to the other.
124
+ - CONDITION: One happens only if the other condition holds.
125
+ - RELATE: They are semantically related by shared people, time, place, or event, but neither causes the other.
126
+ - CONFLICT: They logically contradict each other.
127
+ - NONE: No clear useful connection.
128
+
129
+ Example:
130
+ - Node 1: "The marketing campaign ended in June."
131
+ - Node 2: "Product sales dropped in July."
132
+ Answer: CAUSE
133
+
134
+ Another Example:
135
+ - Node 1: "The conference was postponed to August due to the venue being unavailable."
136
+ - Node 2: "The venue was booked for a wedding in August."
137
+ Answer: CONFLICT
138
+
139
+ Always respond with ONE word, no matter what language is for the input nodes: [CAUSE | CONDITION | RELATE | CONFLICT | NONE]
140
+ """
141
+
142
+ INFER_FACT_PROMPT = """
143
+ You are an inference expert.
144
+
145
+ Source Memory: "{source}"
146
+ Target Memory: "{target}"
147
+
148
+ They are connected by a {relation_type} relation.
149
+ Derive ONE new factual statement that clearly combines them in a way that is NOT a trivial restatement.
150
+
151
+ Requirements:
152
+ - Include relevant time, place, people, and event details if available.
153
+ - If the inference is a logical guess, explicitly use phrases like "It can be inferred that...".
154
+
155
+ Example:
156
+ Source: "John missed the team meeting on Monday."
157
+ Target: "Important project deadlines were discussed in that meeting."
158
+ Relation: CAUSE
159
+ Inference: "It can be inferred that John may not know the new project deadlines."
160
+
161
+ If there is NO new useful fact that combines them, reply exactly: "None"
162
+ """
163
+
164
+ AGGREGATE_PROMPT = """
165
+ You are a concept summarization assistant.
166
+
167
+ Below is a list of memory items:
168
+ {joined}
169
+
170
+ Your task:
171
+ - Identify if they can be meaningfully grouped under a new, higher-level concept that clarifies their shared time, place, people, or event context.
172
+ - Do NOT aggregate if the overlap is trivial or obvious from each unit alone.
173
+ - If the summary involves any plausible interpretation, explicitly note it (e.g., "This suggests...").
174
+
175
+ Example:
176
+ Input Memories:
177
+ - "Mary organized the 2023 sustainability summit in Berlin."
178
+ - "Mary presented a keynote on renewable energy at the same summit."
179
+
180
+ Language rules:
181
+ - The `key`, `value`, `tags`, `background` fields must match the language of the input.
182
+
183
+ Good Aggregate:
184
+ {
185
+ "key": "Mary's Sustainability Summit Role",
186
+ "value": "Mary organized and spoke at the 2023 sustainability summit in Berlin, highlighting renewable energy initiatives.",
187
+ "tags": ["Mary", "summit", "Berlin", "2023"],
188
+ "background": "Combined from multiple memories about Mary's activities at the summit."
189
+ }
190
+
191
+ If you find NO useful higher-level concept, reply exactly: "None".
192
+ """
193
+
194
+ REDUNDANCY_MERGE_PROMPT = """You are given two pieces of text joined by the marker `⟵MERGED⟶`. Please carefully read both sides of the merged text. Your task is to summarize and consolidate all the factual details from both sides into a single, coherent text, without omitting any information. You must include every distinct detail mentioned in either text. Do not provide any explanation or analysis — only return the merged summary. Don't use pronouns or subjective language, just the facts as they are presented.\n{merged_text}"""
195
+
196
+
197
+ MEMORY_RELATION_DETECTOR_PROMPT = """You are a memory relationship analyzer.
198
+ You are given two plaintext statements. Determine the relationship between them. Classify the relationship into one of the following categories:
199
+
200
+ contradictory: The two statements describe the same event or related aspects of it but contain factually conflicting details.
201
+ redundant: The two statements describe essentially the same event or information with significant overlap in content and details, conveying the same core information (even if worded differently).
202
+ independent: The two statements are either about different events/topics (unrelated) OR describe different, non-overlapping aspects or perspectives of the same event without conflict (complementary). In both sub-cases, they provide distinct information without contradiction.
203
+ Respond only with one of the three labels: contradictory, redundant, or independent.
204
+ Do not provide any explanation or additional text.
205
+
206
+ Statement 1: {statement_1}
207
+ Statement 2: {statement_2}
208
+ """
209
+
210
+
211
+ MEMORY_RELATION_RESOLVER_PROMPT = """You are a memory fusion expert. You are given two statements and their associated metadata. The statements have been identified as {relation}. Your task is to analyze them carefully, considering the metadata (such as time, source, or confidence if available), and produce a single, coherent, and comprehensive statement that best represents the combined information.
212
+
213
+ If the statements are redundant, merge them by preserving all unique details and removing duplication, forming a richer, consolidated version.
214
+ If the statements are contradictory, attempt to resolve the conflict by prioritizing more recent information, higher-confidence data, or logically reconciling the differences based on context. If the contradiction is fundamental and cannot be logically resolved, output <answer>No</answer>.
215
+ Do not include any explanations, reasoning, or extra text. Only output the final result enclosed in <answer></answer> tags.
216
+ Strive to retain as much factual content as possible, especially time-specific details.
217
+ Use objective language and avoid pronouns.
218
+ Output Example 1 (unresolvable conflict):
219
+ <answer>No</answer>
220
+
221
+ Output Example 2 (successful fusion):
222
+ <answer>The meeting took place on 2023-10-05 at 14:00 in the main conference room, as confirmed by the updated schedule, and included a presentation on project milestones followed by a Q&A session.</answer>
223
+
224
+ Now, reconcile the following two statements:
225
+ Relation Type: {relation}
226
+ Statement 1: {statement_1}
227
+ Metadata 1: {metadata_1}
228
+ Statement 2: {statement_2}
229
+ Metadata 2: {metadata_2}
230
+ """
@@ -0,0 +1,34 @@
1
+ from .general_types import (
2
+ FINE_STRATEGY,
3
+ ChatHistory,
4
+ FineStrategy,
5
+ MemCubeID,
6
+ MessageDict,
7
+ MessageList,
8
+ MessageRole,
9
+ MessagesType,
10
+ MOSSearchResult,
11
+ Permission,
12
+ PermissionDict,
13
+ SearchMode,
14
+ UserContext,
15
+ UserID,
16
+ )
17
+
18
+
19
+ __all__ = [
20
+ "FINE_STRATEGY",
21
+ "ChatHistory",
22
+ "FineStrategy",
23
+ "MOSSearchResult",
24
+ "MemCubeID",
25
+ "MessageDict",
26
+ "MessageList",
27
+ "MessageRole",
28
+ "MessagesType",
29
+ "Permission",
30
+ "PermissionDict",
31
+ "SearchMode",
32
+ "UserContext",
33
+ "UserID",
34
+ ]
@@ -0,0 +1,151 @@
1
+ """Type definitions and custom types for the MemOS library.
2
+
3
+ This module defines commonly used type aliases, protocols, and custom types
4
+ used throughout the MemOS project to improve type safety and code clarity.
5
+ """
6
+
7
+ import os
8
+
9
+ from datetime import datetime
10
+ from enum import Enum
11
+ from typing import Literal, NewType, TypeAlias
12
+
13
+ from pydantic import BaseModel
14
+ from typing_extensions import TypedDict
15
+
16
+ from memos.memories.activation.item import ActivationMemoryItem
17
+ from memos.memories.parametric.item import ParametricMemoryItem
18
+ from memos.memories.textual.item import TextualMemoryItem
19
+
20
+ from .openai_chat_completion_types import (
21
+ ChatCompletionContentPartTextParam,
22
+ ChatCompletionMessageParam,
23
+ File,
24
+ )
25
+
26
+
27
+ __all__ = [
28
+ "FINE_STRATEGY",
29
+ "ChatHistory",
30
+ "FineStrategy",
31
+ "MOSSearchResult",
32
+ "MemCubeID",
33
+ "MessageDict",
34
+ "MessageList",
35
+ "MessageRole",
36
+ "MessagesType",
37
+ "Permission",
38
+ "PermissionDict",
39
+ "SearchMode",
40
+ "UserContext",
41
+ "UserID",
42
+ ]
43
+
44
+ # ─── Message Types ──────────────────────────────────────────────────────────────
45
+
46
+ # Chat message roles
47
+ MessageRole: TypeAlias = Literal["user", "assistant", "system"]
48
+
49
+
50
+ # Message structure
51
+ class MessageDict(TypedDict, total=False):
52
+ """Typed dictionary for chat message dictionaries."""
53
+
54
+ role: MessageRole
55
+ content: str
56
+ chat_time: str | None # Optional timestamp for the message, format is not
57
+ # restricted, it can be any vague or precise time string.
58
+ message_id: str | None # Optional unique identifier for the message
59
+
60
+
61
+ RawMessageDict: TypeAlias = ChatCompletionContentPartTextParam | File
62
+
63
+
64
+ # Message collections
65
+ MessageList: TypeAlias = list[ChatCompletionMessageParam]
66
+ RawMessageList: TypeAlias = list[RawMessageDict]
67
+
68
+
69
+ # Messages Type
70
+ MessagesType: TypeAlias = str | MessageList | RawMessageList
71
+
72
+
73
+ # Chat history structure
74
+ class ChatHistory(BaseModel):
75
+ """Model to represent chat history for export."""
76
+
77
+ user_id: str
78
+ session_id: str
79
+ created_at: datetime
80
+ total_messages: int
81
+ chat_history: MessageList
82
+
83
+
84
+ # ─── Search ────────────────────────────────────────────────────────────────────
85
+ # new types
86
+ UserID = NewType("UserID", str)
87
+ MemCubeID = NewType("CubeID", str)
88
+
89
+
90
+ class SearchMode(str, Enum):
91
+ """Enumeration for search modes."""
92
+
93
+ FAST = "fast"
94
+ FINE = "fine"
95
+ MIXTURE = "mixture"
96
+
97
+
98
+ class FineStrategy(str, Enum):
99
+ """Enumeration for fine strategies."""
100
+
101
+ REWRITE = "rewrite"
102
+ RECREATE = "recreate"
103
+ DEEP_SEARCH = "deep_search"
104
+ AGENTIC_SEARCH = "agentic_search"
105
+
106
+
107
+ # algorithm strategies
108
+ DEFAULT_FINE_STRATEGY = FineStrategy.RECREATE
109
+ FINE_STRATEGY = DEFAULT_FINE_STRATEGY
110
+
111
+ # Read fine strategy from environment variable `FINE_STRATEGY`.
112
+ # If provided and valid, use it; otherwise fall back to default.
113
+ _env_fine_strategy = os.getenv("FINE_STRATEGY")
114
+ if _env_fine_strategy:
115
+ try:
116
+ FINE_STRATEGY = FineStrategy(_env_fine_strategy)
117
+ except ValueError:
118
+ FINE_STRATEGY = DEFAULT_FINE_STRATEGY
119
+
120
+
121
+ # ─── MemOS ────────────────────────────────────────────────────────────────────
122
+
123
+
124
+ class MOSSearchResult(TypedDict):
125
+ """Model to represent memory search result."""
126
+
127
+ text_mem: list[dict[str, str | list[TextualMemoryItem]]]
128
+ act_mem: list[dict[str, str | list[ActivationMemoryItem]]]
129
+ para_mem: list[dict[str, str | list[ParametricMemoryItem]]]
130
+
131
+
132
+ # ─── API Types ────────────────────────────────────────────────────────────────────
133
+ # for API Permission
134
+ Permission: TypeAlias = Literal["read", "write", "delete", "execute"]
135
+
136
+
137
+ # Message structure
138
+ class PermissionDict(TypedDict, total=False):
139
+ """Typed dictionary for chat message dictionaries."""
140
+
141
+ permissions: list[Permission]
142
+ mem_cube_id: str
143
+
144
+
145
+ class UserContext(BaseModel):
146
+ """Model to represent user context."""
147
+
148
+ user_id: str | None = None
149
+ mem_cube_id: str | None = None
150
+ session_id: str | None = None
151
+ operation: list[PermissionDict] | None = None
@@ -0,0 +1,15 @@
1
+ # ruff: noqa: F403, F401
2
+
3
+ from .chat_completion_assistant_message_param import *
4
+ from .chat_completion_content_part_image_param import *
5
+ from .chat_completion_content_part_input_audio_param import *
6
+ from .chat_completion_content_part_param import *
7
+ from .chat_completion_content_part_refusal_param import *
8
+ from .chat_completion_content_part_text_param import *
9
+ from .chat_completion_message_custom_tool_call_param import *
10
+ from .chat_completion_message_function_tool_call_param import *
11
+ from .chat_completion_message_param import *
12
+ from .chat_completion_message_tool_call_union_param import *
13
+ from .chat_completion_system_message_param import *
14
+ from .chat_completion_tool_message_param import *
15
+ from .chat_completion_user_message_param import *
@@ -0,0 +1,56 @@
1
+ # ruff: noqa: TC001, TC003
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Literal, TypeAlias
6
+
7
+ from typing_extensions import Required, TypedDict
8
+
9
+ from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam
10
+ from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
11
+ from .chat_completion_message_tool_call_union_param import ChatCompletionMessageToolCallUnionParam
12
+
13
+
14
+ __all__ = ["Audio", "ChatCompletionAssistantMessageParam", "ContentArrayOfContentPart"]
15
+
16
+
17
+ class Audio(TypedDict, total=False):
18
+ id: Required[str]
19
+ """Unique identifier for a previous audio response from the model."""
20
+
21
+
22
+ ContentArrayOfContentPart: TypeAlias = (
23
+ ChatCompletionContentPartTextParam | ChatCompletionContentPartRefusalParam
24
+ )
25
+
26
+
27
+ class ChatCompletionAssistantMessageParam(TypedDict, total=False):
28
+ role: Required[Literal["assistant"]]
29
+ """The role of the messages author, in this case `assistant`."""
30
+
31
+ audio: Audio | None
32
+ """
33
+ Data about a previous audio response from the model.
34
+ [Learn more](https://platform.openai.com/docs/guides/audio).
35
+ """
36
+
37
+ content: str | list[ContentArrayOfContentPart] | ContentArrayOfContentPart | None
38
+ """The contents of the assistant message.
39
+
40
+ Required unless `tool_calls` or `function_call` is specified.
41
+ """
42
+
43
+ refusal: str | None
44
+ """The refusal message by the assistant."""
45
+
46
+ tool_calls: (
47
+ list[ChatCompletionMessageToolCallUnionParam] | ChatCompletionMessageToolCallUnionParam
48
+ )
49
+ """The tool calls generated by the model, such as function calls."""
50
+
51
+ chat_time: str | None
52
+ """Optional timestamp for the message, format is not
53
+ restricted, it can be any vague or precise time string."""
54
+
55
+ message_id: str | None
56
+ """Optional unique identifier for the message"""
@@ -0,0 +1,27 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Literal
4
+
5
+ from typing_extensions import Required, TypedDict
6
+
7
+
8
+ __all__ = ["ChatCompletionContentPartImageParam", "ImageURL"]
9
+
10
+
11
+ class ImageURL(TypedDict, total=False):
12
+ url: Required[str]
13
+ """Either a URL of the image or the base64 encoded image data."""
14
+
15
+ detail: Literal["auto", "low", "high"]
16
+ """Specifies the detail level of the image.
17
+
18
+ Learn more in the
19
+ [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
20
+ """
21
+
22
+
23
+ class ChatCompletionContentPartImageParam(TypedDict, total=False):
24
+ image_url: Required[ImageURL]
25
+
26
+ type: Required[Literal["image_url"]]
27
+ """The type of the content part."""
@@ -0,0 +1,23 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Literal
4
+
5
+ from typing_extensions import Required, TypedDict
6
+
7
+
8
+ __all__ = ["ChatCompletionContentPartInputAudioParam", "InputAudio"]
9
+
10
+
11
+ class InputAudio(TypedDict, total=False):
12
+ data: Required[str]
13
+ """Base64 encoded audio data."""
14
+
15
+ format: Required[Literal["wav", "mp3"]]
16
+ """The format of the encoded audio data. Currently supports "wav" and "mp3"."""
17
+
18
+
19
+ class ChatCompletionContentPartInputAudioParam(TypedDict, total=False):
20
+ input_audio: Required[InputAudio]
21
+
22
+ type: Required[Literal["input_audio"]]
23
+ """The type of the content part. Always `input_audio`."""