evermemos 0.3.9__py3-none-any.whl → 0.3.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. evermemos/_base_client.py +5 -2
  2. evermemos/_compat.py +3 -3
  3. evermemos/_utils/_json.py +35 -0
  4. evermemos/_version.py +1 -1
  5. evermemos/resources/v1/__init__.py +14 -0
  6. evermemos/resources/v1/memories/conversation_meta.py +138 -10
  7. evermemos/resources/v1/memories/memories.py +165 -182
  8. evermemos/resources/v1/status/__init__.py +33 -0
  9. evermemos/resources/v1/status/request.py +175 -0
  10. evermemos/resources/v1/status/status.py +102 -0
  11. evermemos/resources/v1/v1.py +32 -0
  12. evermemos/types/v1/__init__.py +2 -4
  13. evermemos/types/v1/memories/conversation_meta_create_params.py +112 -6
  14. evermemos/types/v1/memories/conversation_meta_create_response.py +74 -6
  15. evermemos/types/v1/memories/conversation_meta_get_response.py +74 -6
  16. evermemos/types/v1/memories/conversation_meta_update_params.py +85 -2
  17. evermemos/types/v1/memories/conversation_meta_update_response.py +11 -0
  18. evermemos/types/v1/memory_add_params.py +60 -0
  19. evermemos/types/v1/{memory_create_response.py → memory_add_response.py} +2 -2
  20. evermemos/types/v1/memory_delete_params.py +9 -0
  21. evermemos/types/v1/memory_delete_response.py +7 -0
  22. evermemos/types/v1/memory_get_response.py +12 -5
  23. evermemos/types/v1/memory_search_response.py +5 -0
  24. evermemos/types/v1/status/__init__.py +6 -0
  25. evermemos/types/v1/status/request_get_params.py +13 -0
  26. evermemos/types/v1/status/request_get_response.py +21 -0
  27. {evermemos-0.3.9.dist-info → evermemos-0.3.10.dist-info}/METADATA +24 -26
  28. {evermemos-0.3.9.dist-info → evermemos-0.3.10.dist-info}/RECORD +30 -27
  29. evermemos/types/v1/global_user_profile/__init__.py +0 -3
  30. evermemos/types/v1/memory_create_params.py +0 -32
  31. evermemos/types/v1/memory_load_params.py +0 -56
  32. evermemos/types/v1/memory_load_response.py +0 -19
  33. evermemos/types/v1/stats/__init__.py +0 -3
  34. {evermemos-0.3.9.dist-info → evermemos-0.3.10.dist-info}/WHEEL +0 -0
  35. {evermemos-0.3.9.dist-info → evermemos-0.3.10.dist-info}/licenses/LICENSE +0 -0
@@ -4,38 +4,106 @@ from typing import Dict, List, Optional
4
4
 
5
5
  from ...._models import BaseModel
6
6
 
7
- __all__ = ["ConversationMetaGetResponse", "Result"]
7
+ __all__ = [
8
+ "ConversationMetaGetResponse",
9
+ "Result",
10
+ "ResultLlmCustomSetting",
11
+ "ResultLlmCustomSettingBoundary",
12
+ "ResultLlmCustomSettingExtraction",
13
+ ]
8
14
 
9
15
 
10
- class Result(BaseModel):
11
- id: str
16
+ class ResultLlmCustomSettingBoundary(BaseModel):
17
+ """LLM config for boundary detection (fast, cheap model recommended)"""
18
+
19
+ model: str
20
+ """Model name"""
21
+
22
+ provider: str
23
+ """LLM provider name"""
24
+
25
+ extra: Optional[Dict[str, object]] = None
26
+ """Additional provider-specific configuration"""
27
+
28
+
29
+ class ResultLlmCustomSettingExtraction(BaseModel):
30
+ """LLM config for memory extraction (high quality model recommended)"""
31
+
32
+ model: str
33
+ """Model name"""
34
+
35
+ provider: str
36
+ """LLM provider name"""
37
+
38
+ extra: Optional[Dict[str, object]] = None
39
+ """Additional provider-specific configuration"""
40
+
12
41
 
13
- conversation_created_at: str
42
+ class ResultLlmCustomSetting(BaseModel):
43
+ """LLM custom settings (only for global config)"""
14
44
 
15
- name: str
45
+ boundary: Optional[ResultLlmCustomSettingBoundary] = None
46
+ """LLM config for boundary detection (fast, cheap model recommended)"""
16
47
 
17
- scene: str
48
+ extra: Optional[Dict[str, object]] = None
49
+ """Additional task-specific LLM configurations"""
50
+
51
+ extraction: Optional[ResultLlmCustomSettingExtraction] = None
52
+ """LLM config for memory extraction (high quality model recommended)"""
53
+
54
+
55
+ class Result(BaseModel):
56
+ """Conversation metadata"""
57
+
58
+ id: str
59
+ """Document ID"""
60
+
61
+ conversation_created_at: Optional[str] = None
62
+ """Conversation creation time"""
18
63
 
19
64
  created_at: Optional[str] = None
65
+ """Record creation time"""
20
66
 
21
67
  default_timezone: Optional[str] = None
68
+ """Default timezone"""
69
+
70
+ description: Optional[str] = None
71
+ """Description"""
22
72
 
23
73
  group_id: Optional[str] = None
74
+ """Group ID (null for global config)"""
24
75
 
25
76
  is_default: Optional[bool] = None
77
+ """Whether this is the global (default) config"""
78
+
79
+ llm_custom_setting: Optional[ResultLlmCustomSetting] = None
80
+ """LLM custom settings (only for global config)"""
81
+
82
+ name: Optional[str] = None
83
+ """Group/conversation name (only for group config)"""
84
+
85
+ scene: Optional[str] = None
86
+ """Scene identifier (only for global config)"""
26
87
 
27
88
  scene_desc: Optional[Dict[str, object]] = None
89
+ """Scene description (only for global config)"""
28
90
 
29
91
  tags: Optional[List[str]] = None
92
+ """Tags"""
30
93
 
31
94
  updated_at: Optional[str] = None
95
+ """Record update time"""
32
96
 
33
97
  user_details: Optional[Dict[str, Dict[str, object]]] = None
98
+ """User details"""
34
99
 
35
100
 
36
101
  class ConversationMetaGetResponse(BaseModel):
37
102
  result: Result
103
+ """Conversation metadata"""
38
104
 
39
105
  message: Optional[str] = None
106
+ """Response message"""
40
107
 
41
108
  status: Optional[str] = None
109
+ """Response status"""
@@ -3,32 +3,115 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from typing import Dict, Optional
6
- from typing_extensions import TypedDict
6
+ from typing_extensions import Required, TypedDict
7
7
 
8
8
  from ...._types import SequenceNotStr
9
9
 
10
- __all__ = ["ConversationMetaUpdateParams", "UserDetails"]
10
+ __all__ = [
11
+ "ConversationMetaUpdateParams",
12
+ "LlmCustomSetting",
13
+ "LlmCustomSettingBoundary",
14
+ "LlmCustomSettingExtraction",
15
+ "UserDetails",
16
+ ]
11
17
 
12
18
 
13
19
  class ConversationMetaUpdateParams(TypedDict, total=False):
14
20
  default_timezone: Optional[str]
21
+ """New default timezone"""
22
+
23
+ description: Optional[str]
24
+ """New description"""
15
25
 
16
26
  group_id: Optional[str]
27
+ """Group ID to update. When null, updates the global (default) config."""
28
+
29
+ llm_custom_setting: Optional[LlmCustomSetting]
30
+ """New LLM custom settings.
31
+
32
+ **Only allowed for global config (group_id=null). Not allowed for group config
33
+ (inherited from global config).**
34
+ """
17
35
 
18
36
  name: Optional[str]
37
+ """New group/conversation name.
38
+
39
+ **Only allowed for group config (group_id provided). Not allowed for global
40
+ config.**
41
+ """
19
42
 
20
43
  scene_desc: Optional[Dict[str, object]]
44
+ """New scene description.
45
+
46
+ **Only allowed for global config (group_id=null). Not allowed for group config
47
+ (inherited from global config).**
48
+ """
21
49
 
22
50
  tags: Optional[SequenceNotStr[str]]
51
+ """New tag list"""
23
52
 
24
53
  user_details: Optional[Dict[str, UserDetails]]
54
+ """New user details (will completely replace existing user_details)"""
55
+
56
+
57
+ class LlmCustomSettingBoundary(TypedDict, total=False):
58
+ """LLM config for boundary detection (fast, cheap model recommended)"""
59
+
60
+ model: Required[str]
61
+ """Model name"""
62
+
63
+ provider: Required[str]
64
+ """LLM provider name"""
65
+
66
+ extra: Optional[Dict[str, object]]
67
+ """Additional provider-specific configuration"""
68
+
69
+
70
+ class LlmCustomSettingExtraction(TypedDict, total=False):
71
+ """LLM config for memory extraction (high quality model recommended)"""
72
+
73
+ model: Required[str]
74
+ """Model name"""
75
+
76
+ provider: Required[str]
77
+ """LLM provider name"""
78
+
79
+ extra: Optional[Dict[str, object]]
80
+ """Additional provider-specific configuration"""
81
+
82
+
83
+ class LlmCustomSetting(TypedDict, total=False):
84
+ """New LLM custom settings.
85
+
86
+ **Only allowed for global config (group_id=null).
87
+ Not allowed for group config (inherited from global config).**
88
+ """
89
+
90
+ boundary: Optional[LlmCustomSettingBoundary]
91
+ """LLM config for boundary detection (fast, cheap model recommended)"""
92
+
93
+ extra: Optional[Dict[str, object]]
94
+ """Additional task-specific LLM configurations"""
95
+
96
+ extraction: Optional[LlmCustomSettingExtraction]
97
+ """LLM config for memory extraction (high quality model recommended)"""
25
98
 
26
99
 
27
100
  class UserDetails(TypedDict, total=False):
28
101
  custom_role: Optional[str]
102
+ """User's job/position role (e.g. developer, designer, manager)"""
29
103
 
30
104
  extra: Optional[Dict[str, object]]
105
+ """Additional information"""
31
106
 
32
107
  full_name: Optional[str]
108
+ """User full name"""
33
109
 
34
110
  role: Optional[str]
111
+ """
112
+ User type role, used to identify if this user is a human or AI. Enum values from
113
+ MessageSenderRole:
114
+
115
+ - user: Human user
116
+ - assistant: AI assistant/bot
117
+ """
@@ -8,22 +8,33 @@ __all__ = ["ConversationMetaUpdateResponse", "Result"]
8
8
 
9
9
 
10
10
  class Result(BaseModel):
11
+ """Patch result with updated fields"""
12
+
11
13
  id: str
14
+ """Document ID"""
12
15
 
13
16
  group_id: Optional[str] = None
17
+ """Group ID (null for default config)"""
14
18
 
15
19
  name: Optional[str] = None
20
+ """Conversation name"""
16
21
 
17
22
  scene: Optional[str] = None
23
+ """Scene identifier"""
18
24
 
19
25
  updated_at: Optional[str] = None
26
+ """Record update time"""
20
27
 
21
28
  updated_fields: Optional[List[str]] = None
29
+ """List of updated field names"""
22
30
 
23
31
 
24
32
  class ConversationMetaUpdateResponse(BaseModel):
25
33
  result: Result
34
+ """Patch result with updated fields"""
26
35
 
27
36
  message: Optional[str] = None
37
+ """Response message"""
28
38
 
29
39
  status: Optional[str] = None
40
+ """Response status"""
@@ -0,0 +1,60 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Optional
6
+ from typing_extensions import Required, TypedDict
7
+
8
+ from ..._types import SequenceNotStr
9
+
10
+ __all__ = ["MemoryAddParams"]
11
+
12
+
13
+ class MemoryAddParams(TypedDict, total=False):
14
+ content: Required[str]
15
+ """Message content"""
16
+
17
+ create_time: Required[str]
18
+ """Message creation time (ISO 8601 format)"""
19
+
20
+ message_id: Required[str]
21
+ """Message unique identifier"""
22
+
23
+ sender: Required[str]
24
+ """Sender user ID (required).
25
+
26
+ Also used as user_id internally for memory ownership.
27
+ """
28
+
29
+ flush: bool
30
+ """Force boundary trigger.
31
+
32
+ When True, immediately triggers memory extraction instead of waiting for natural
33
+ boundary detection.
34
+ """
35
+
36
+ group_id: Optional[str]
37
+ """Group ID.
38
+
39
+ If not provided, will automatically generate based on hash(sender) + '\\__group'
40
+ suffix, representing single-user mode where each user's messages are extracted
41
+ into separate memory spaces.
42
+ """
43
+
44
+ group_name: Optional[str]
45
+ """Group name"""
46
+
47
+ refer_list: Optional[SequenceNotStr[str]]
48
+ """List of referenced message IDs"""
49
+
50
+ role: Optional[str]
51
+ """
52
+ Message sender role, used to identify the source of the message. Enum values
53
+ from MessageSenderRole:
54
+
55
+ - user: Message from a human user
56
+ - assistant: Message from an AI assistant
57
+ """
58
+
59
+ sender_name: Optional[str]
60
+ """Sender name (uses sender if not provided)"""
@@ -2,10 +2,10 @@
2
2
 
3
3
  from ..._models import BaseModel
4
4
 
5
- __all__ = ["MemoryCreateResponse"]
5
+ __all__ = ["MemoryAddResponse"]
6
6
 
7
7
 
8
- class MemoryCreateResponse(BaseModel):
8
+ class MemoryAddResponse(BaseModel):
9
9
  message: str
10
10
 
11
11
  request_id: str
@@ -9,8 +9,17 @@ __all__ = ["MemoryDeleteParams"]
9
9
 
10
10
 
11
11
  class MemoryDeleteParams(TypedDict, total=False):
12
+ id: Optional[str]
13
+ """Alias for memory_id (backward compatibility)"""
14
+
15
+ event_id: Optional[str]
16
+ """Alias for memory_id (backward compatibility)"""
17
+
12
18
  group_id: Optional[str]
19
+ """Group ID (filter condition)"""
13
20
 
14
21
  memory_id: Optional[str]
22
+ """Memory id (filter condition)"""
15
23
 
16
24
  user_id: Optional[str]
25
+ """User ID (filter condition)"""
@@ -8,14 +8,21 @@ __all__ = ["MemoryDeleteResponse", "Result"]
8
8
 
9
9
 
10
10
  class Result(BaseModel):
11
+ """Delete operation result"""
12
+
11
13
  count: Optional[int] = None
14
+ """Number of memories deleted"""
12
15
 
13
16
  filters: Optional[List[str]] = None
17
+ """List of filter types used for deletion"""
14
18
 
15
19
 
16
20
  class MemoryDeleteResponse(BaseModel):
17
21
  result: Result
22
+ """Delete operation result"""
18
23
 
19
24
  message: Optional[str] = None
25
+ """Response message"""
20
26
 
21
27
  status: Optional[str] = None
28
+ """Response status"""
@@ -125,23 +125,21 @@ class ResultMemoryEpisodicMemoryModel(BaseModel):
125
125
 
126
126
  episode_id: str
127
127
 
128
- summary: str
129
-
130
- title: str
131
-
132
128
  user_id: str
133
129
 
134
130
  created_at: Optional[datetime] = None
135
131
 
136
132
  end_time: Optional[datetime] = None
137
133
 
134
+ episode: Optional[str] = None
135
+
138
136
  extend: Optional[Dict[str, object]] = None
139
137
 
140
138
  group_id: Optional[str] = None
141
139
 
142
140
  group_name: Optional[str] = None
143
141
 
144
- key_events: Optional[List[str]] = None
142
+ keywords: Optional[List[str]] = None
145
143
 
146
144
  location: Optional[str] = None
147
145
 
@@ -157,6 +155,8 @@ class ResultMemoryEpisodicMemoryModel(BaseModel):
157
155
 
158
156
  subject: Optional[str] = None
159
157
 
158
+ summary: Optional[str] = None
159
+
160
160
  timestamp: Optional[datetime] = None
161
161
 
162
162
  updated_at: Optional[datetime] = None
@@ -203,6 +203,8 @@ class ResultMemoryForesightModel(BaseModel):
203
203
 
204
204
  content: str
205
205
 
206
+ foresight: str
207
+
206
208
  parent_id: str
207
209
 
208
210
  parent_type: str
@@ -249,6 +251,8 @@ ResultMemory: TypeAlias = Union[
249
251
 
250
252
 
251
253
  class Result(BaseModel):
254
+ """Memory fetch result"""
255
+
252
256
  has_more: Optional[bool] = None
253
257
 
254
258
  memories: Optional[List[ResultMemory]] = None
@@ -260,7 +264,10 @@ class Result(BaseModel):
260
264
 
261
265
  class MemoryGetResponse(BaseModel):
262
266
  result: Result
267
+ """Memory fetch result"""
263
268
 
264
269
  message: Optional[str] = None
270
+ """Response message"""
265
271
 
266
272
  status: Optional[str] = None
273
+ """Response status"""
@@ -74,6 +74,8 @@ class ResultPendingMessage(BaseModel):
74
74
 
75
75
 
76
76
  class Result(BaseModel):
77
+ """Memory search result"""
78
+
77
79
  has_more: Optional[bool] = None
78
80
 
79
81
  memories: Optional[List[ResultMemory]] = None
@@ -93,7 +95,10 @@ class Result(BaseModel):
93
95
 
94
96
  class MemorySearchResponse(BaseModel):
95
97
  result: Result
98
+ """Memory search result"""
96
99
 
97
100
  message: Optional[str] = None
101
+ """Response message"""
98
102
 
99
103
  status: Optional[str] = None
104
+ """Response status"""
@@ -0,0 +1,6 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from .request_get_params import RequestGetParams as RequestGetParams
6
+ from .request_get_response import RequestGetResponse as RequestGetResponse
@@ -0,0 +1,13 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Optional
6
+ from typing_extensions import Required, TypedDict
7
+
8
+ __all__ = ["RequestGetParams"]
9
+
10
+
11
+ class RequestGetParams(TypedDict, total=False):
12
+ request_id: Required[Optional[str]]
13
+ """which is returned by add_memories api"""
@@ -0,0 +1,21 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Dict, Optional
4
+
5
+ from ...._models import BaseModel
6
+
7
+ __all__ = ["RequestGetResponse"]
8
+
9
+
10
+ class RequestGetResponse(BaseModel):
11
+ success: bool
12
+ """Whether the query was successful"""
13
+
14
+ data: Optional[Dict[str, object]] = None
15
+ """Request status data"""
16
+
17
+ found: Optional[bool] = None
18
+ """Whether the request status was found"""
19
+
20
+ message: Optional[str] = None
21
+ """Message"""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: evermemos
3
- Version: 0.3.9
3
+ Version: 0.3.10
4
4
  Summary: The official Python library for the EverMemOS API
5
5
  Project-URL: Homepage, https://github.com/evermemos/evermemos-python
6
6
  Project-URL: Repository, https://github.com/evermemos/evermemos-python
@@ -67,13 +67,13 @@ client = EverMemOS(
67
67
  api_key=os.environ.get("EVERMEMOS_API_KEY"), # This is the default and can be omitted
68
68
  )
69
69
 
70
- memory = client.v1.memories.create(
70
+ response = client.v1.memories.add(
71
71
  content="Let's discuss the technical solution for the new feature today",
72
72
  create_time="2025-01-15T10:00:00+00:00",
73
73
  message_id="msg_001",
74
74
  sender="user_001",
75
75
  )
76
- print(memory.request_id)
76
+ print(response.request_id)
77
77
  ```
78
78
 
79
79
  While you can provide an `api_key` keyword argument,
@@ -96,13 +96,13 @@ client = AsyncEverMemOS(
96
96
 
97
97
 
98
98
  async def main() -> None:
99
- memory = await client.v1.memories.create(
99
+ response = await client.v1.memories.add(
100
100
  content="Let's discuss the technical solution for the new feature today",
101
101
  create_time="2025-01-15T10:00:00+00:00",
102
102
  message_id="msg_001",
103
103
  sender="user_001",
104
104
  )
105
- print(memory.request_id)
105
+ print(response.request_id)
106
106
 
107
107
 
108
108
  asyncio.run(main())
@@ -135,13 +135,13 @@ async def main() -> None:
135
135
  api_key=os.environ.get("EVERMEMOS_API_KEY"), # This is the default and can be omitted
136
136
  http_client=DefaultAioHttpClient(),
137
137
  ) as client:
138
- memory = await client.v1.memories.create(
138
+ response = await client.v1.memories.add(
139
139
  content="Let's discuss the technical solution for the new feature today",
140
140
  create_time="2025-01-15T10:00:00+00:00",
141
141
  message_id="msg_001",
142
142
  sender="user_001",
143
143
  )
144
- print(memory.request_id)
144
+ print(response.request_id)
145
145
 
146
146
 
147
147
  asyncio.run(main())
@@ -165,22 +165,20 @@ from evermemos import EverMemOS
165
165
 
166
166
  client = EverMemOS()
167
167
 
168
- response = client.v1.memories.load(
169
- conversation_meta={
170
- "group_id": "chat_user_001_assistant",
171
- "created_at": "2025-06-26T00:00:00Z",
172
- "default_timezone": "UTC",
173
- "name": "User Support Chat",
174
- "scene": "assistant",
175
- "scene_desc": {},
176
- "tags": ["support"],
177
- "user_details": {
178
- "user_001": "bar",
179
- "robot_001": "bar",
168
+ conversation_meta = client.v1.memories.conversation_meta.create(
169
+ created_at="2025-01-15T10:00:00+00:00",
170
+ llm_custom_setting={
171
+ "boundary": {
172
+ "model": "gpt-4o-mini",
173
+ "provider": "openai",
174
+ },
175
+ "extraction": {
176
+ "model": "gpt-4o",
177
+ "provider": "openai",
180
178
  },
181
179
  },
182
180
  )
183
- print(response.conversation_meta)
181
+ print(conversation_meta.llm_custom_setting)
184
182
  ```
185
183
 
186
184
  ## Handling errors
@@ -199,7 +197,7 @@ from evermemos import EverMemOS
199
197
  client = EverMemOS()
200
198
 
201
199
  try:
202
- client.v1.memories.create(
200
+ client.v1.memories.add(
203
201
  content="Let's discuss the technical solution for the new feature today",
204
202
  create_time="2025-01-15T10:00:00+00:00",
205
203
  message_id="msg_001",
@@ -247,7 +245,7 @@ client = EverMemOS(
247
245
  )
248
246
 
249
247
  # Or, configure per-request:
250
- client.with_options(max_retries=5).v1.memories.create(
248
+ client.with_options(max_retries=5).v1.memories.add(
251
249
  content="Let's discuss the technical solution for the new feature today",
252
250
  create_time="2025-01-15T10:00:00+00:00",
253
251
  message_id="msg_001",
@@ -275,7 +273,7 @@ client = EverMemOS(
275
273
  )
276
274
 
277
275
  # Override per-request:
278
- client.with_options(timeout=5.0).v1.memories.create(
276
+ client.with_options(timeout=5.0).v1.memories.add(
279
277
  content="Let's discuss the technical solution for the new feature today",
280
278
  create_time="2025-01-15T10:00:00+00:00",
281
279
  message_id="msg_001",
@@ -321,7 +319,7 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
321
319
  from evermemos import EverMemOS
322
320
 
323
321
  client = EverMemOS()
324
- response = client.v1.memories.with_raw_response.create(
322
+ response = client.v1.memories.with_raw_response.add(
325
323
  content="Let's discuss the technical solution for the new feature today",
326
324
  create_time="2025-01-15T10:00:00+00:00",
327
325
  message_id="msg_001",
@@ -329,7 +327,7 @@ response = client.v1.memories.with_raw_response.create(
329
327
  )
330
328
  print(response.headers.get('X-My-Header'))
331
329
 
332
- memory = response.parse() # get the object that `v1.memories.create()` would have returned
330
+ memory = response.parse() # get the object that `v1.memories.add()` would have returned
333
331
  print(memory.request_id)
334
332
  ```
335
333
 
@@ -344,7 +342,7 @@ The above interface eagerly reads the full response body when you make the reque
344
342
  To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
345
343
 
346
344
  ```python
347
- with client.v1.memories.with_streaming_response.create(
345
+ with client.v1.memories.with_streaming_response.add(
348
346
  content="Let's discuss the technical solution for the new feature today",
349
347
  create_time="2025-01-15T10:00:00+00:00",
350
348
  message_id="msg_001",