evermemos 0.3.6__py3-none-any.whl → 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evermemos/_version.py +1 -1
- evermemos/resources/v1/__init__.py +28 -0
- evermemos/resources/v1/global_user_profile/__init__.py +33 -0
- evermemos/resources/v1/global_user_profile/custom.py +185 -0
- evermemos/resources/v1/global_user_profile/global_user_profile.py +102 -0
- evermemos/resources/v1/memories/conversation_meta.py +2 -78
- evermemos/resources/v1/memories/memories.py +172 -101
- evermemos/resources/v1/stats/__init__.py +33 -0
- evermemos/resources/v1/stats/request.py +175 -0
- evermemos/resources/v1/stats/stats.py +102 -0
- evermemos/resources/v1/v1.py +64 -0
- evermemos/types/v1/__init__.py +3 -1
- evermemos/types/v1/global_user_profile/__init__.py +6 -0
- evermemos/types/v1/global_user_profile/custom_upsert_params.py +24 -0
- evermemos/types/v1/global_user_profile/custom_upsert_response.py +23 -0
- evermemos/types/v1/memories/conversation_meta_create_params.py +0 -3
- evermemos/types/v1/memories/conversation_meta_create_response.py +0 -3
- evermemos/types/v1/memories/conversation_meta_get_response.py +0 -3
- evermemos/types/v1/memory_create_params.py +10 -2
- evermemos/types/v1/{memory_list_response.py → memory_get_response.py} +84 -3
- evermemos/types/v1/memory_load_params.py +83 -0
- evermemos/types/v1/memory_load_response.py +26 -0
- evermemos/types/v1/stats/__init__.py +6 -0
- evermemos/types/v1/stats/request_get_params.py +13 -0
- evermemos/types/v1/stats/request_get_response.py +26 -0
- {evermemos-0.3.6.dist-info → evermemos-0.3.7.dist-info}/METADATA +45 -17
- {evermemos-0.3.6.dist-info → evermemos-0.3.7.dist-info}/RECORD +29 -15
- {evermemos-0.3.6.dist-info → evermemos-0.3.7.dist-info}/WHEEL +0 -0
- {evermemos-0.3.6.dist-info → evermemos-0.3.7.dist-info}/licenses/LICENSE +0 -0
evermemos/resources/v1/v1.py
CHANGED
|
@@ -4,6 +4,14 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
from ..._compat import cached_property
|
|
6
6
|
from ..._resource import SyncAPIResource, AsyncAPIResource
|
|
7
|
+
from .stats.stats import (
|
|
8
|
+
StatsResource,
|
|
9
|
+
AsyncStatsResource,
|
|
10
|
+
StatsResourceWithRawResponse,
|
|
11
|
+
AsyncStatsResourceWithRawResponse,
|
|
12
|
+
StatsResourceWithStreamingResponse,
|
|
13
|
+
AsyncStatsResourceWithStreamingResponse,
|
|
14
|
+
)
|
|
7
15
|
from .memories.memories import (
|
|
8
16
|
MemoriesResource,
|
|
9
17
|
AsyncMemoriesResource,
|
|
@@ -12,6 +20,14 @@ from .memories.memories import (
|
|
|
12
20
|
MemoriesResourceWithStreamingResponse,
|
|
13
21
|
AsyncMemoriesResourceWithStreamingResponse,
|
|
14
22
|
)
|
|
23
|
+
from .global_user_profile.global_user_profile import (
|
|
24
|
+
GlobalUserProfileResource,
|
|
25
|
+
AsyncGlobalUserProfileResource,
|
|
26
|
+
GlobalUserProfileResourceWithRawResponse,
|
|
27
|
+
AsyncGlobalUserProfileResourceWithRawResponse,
|
|
28
|
+
GlobalUserProfileResourceWithStreamingResponse,
|
|
29
|
+
AsyncGlobalUserProfileResourceWithStreamingResponse,
|
|
30
|
+
)
|
|
15
31
|
|
|
16
32
|
__all__ = ["V1Resource", "AsyncV1Resource"]
|
|
17
33
|
|
|
@@ -21,6 +37,14 @@ class V1Resource(SyncAPIResource):
|
|
|
21
37
|
def memories(self) -> MemoriesResource:
|
|
22
38
|
return MemoriesResource(self._client)
|
|
23
39
|
|
|
40
|
+
@cached_property
|
|
41
|
+
def global_user_profile(self) -> GlobalUserProfileResource:
|
|
42
|
+
return GlobalUserProfileResource(self._client)
|
|
43
|
+
|
|
44
|
+
@cached_property
|
|
45
|
+
def stats(self) -> StatsResource:
|
|
46
|
+
return StatsResource(self._client)
|
|
47
|
+
|
|
24
48
|
@cached_property
|
|
25
49
|
def with_raw_response(self) -> V1ResourceWithRawResponse:
|
|
26
50
|
"""
|
|
@@ -46,6 +70,14 @@ class AsyncV1Resource(AsyncAPIResource):
|
|
|
46
70
|
def memories(self) -> AsyncMemoriesResource:
|
|
47
71
|
return AsyncMemoriesResource(self._client)
|
|
48
72
|
|
|
73
|
+
@cached_property
|
|
74
|
+
def global_user_profile(self) -> AsyncGlobalUserProfileResource:
|
|
75
|
+
return AsyncGlobalUserProfileResource(self._client)
|
|
76
|
+
|
|
77
|
+
@cached_property
|
|
78
|
+
def stats(self) -> AsyncStatsResource:
|
|
79
|
+
return AsyncStatsResource(self._client)
|
|
80
|
+
|
|
49
81
|
@cached_property
|
|
50
82
|
def with_raw_response(self) -> AsyncV1ResourceWithRawResponse:
|
|
51
83
|
"""
|
|
@@ -74,6 +106,14 @@ class V1ResourceWithRawResponse:
|
|
|
74
106
|
def memories(self) -> MemoriesResourceWithRawResponse:
|
|
75
107
|
return MemoriesResourceWithRawResponse(self._v1.memories)
|
|
76
108
|
|
|
109
|
+
@cached_property
|
|
110
|
+
def global_user_profile(self) -> GlobalUserProfileResourceWithRawResponse:
|
|
111
|
+
return GlobalUserProfileResourceWithRawResponse(self._v1.global_user_profile)
|
|
112
|
+
|
|
113
|
+
@cached_property
|
|
114
|
+
def stats(self) -> StatsResourceWithRawResponse:
|
|
115
|
+
return StatsResourceWithRawResponse(self._v1.stats)
|
|
116
|
+
|
|
77
117
|
|
|
78
118
|
class AsyncV1ResourceWithRawResponse:
|
|
79
119
|
def __init__(self, v1: AsyncV1Resource) -> None:
|
|
@@ -83,6 +123,14 @@ class AsyncV1ResourceWithRawResponse:
|
|
|
83
123
|
def memories(self) -> AsyncMemoriesResourceWithRawResponse:
|
|
84
124
|
return AsyncMemoriesResourceWithRawResponse(self._v1.memories)
|
|
85
125
|
|
|
126
|
+
@cached_property
|
|
127
|
+
def global_user_profile(self) -> AsyncGlobalUserProfileResourceWithRawResponse:
|
|
128
|
+
return AsyncGlobalUserProfileResourceWithRawResponse(self._v1.global_user_profile)
|
|
129
|
+
|
|
130
|
+
@cached_property
|
|
131
|
+
def stats(self) -> AsyncStatsResourceWithRawResponse:
|
|
132
|
+
return AsyncStatsResourceWithRawResponse(self._v1.stats)
|
|
133
|
+
|
|
86
134
|
|
|
87
135
|
class V1ResourceWithStreamingResponse:
|
|
88
136
|
def __init__(self, v1: V1Resource) -> None:
|
|
@@ -92,6 +140,14 @@ class V1ResourceWithStreamingResponse:
|
|
|
92
140
|
def memories(self) -> MemoriesResourceWithStreamingResponse:
|
|
93
141
|
return MemoriesResourceWithStreamingResponse(self._v1.memories)
|
|
94
142
|
|
|
143
|
+
@cached_property
|
|
144
|
+
def global_user_profile(self) -> GlobalUserProfileResourceWithStreamingResponse:
|
|
145
|
+
return GlobalUserProfileResourceWithStreamingResponse(self._v1.global_user_profile)
|
|
146
|
+
|
|
147
|
+
@cached_property
|
|
148
|
+
def stats(self) -> StatsResourceWithStreamingResponse:
|
|
149
|
+
return StatsResourceWithStreamingResponse(self._v1.stats)
|
|
150
|
+
|
|
95
151
|
|
|
96
152
|
class AsyncV1ResourceWithStreamingResponse:
|
|
97
153
|
def __init__(self, v1: AsyncV1Resource) -> None:
|
|
@@ -100,3 +156,11 @@ class AsyncV1ResourceWithStreamingResponse:
|
|
|
100
156
|
@cached_property
|
|
101
157
|
def memories(self) -> AsyncMemoriesResourceWithStreamingResponse:
|
|
102
158
|
return AsyncMemoriesResourceWithStreamingResponse(self._v1.memories)
|
|
159
|
+
|
|
160
|
+
@cached_property
|
|
161
|
+
def global_user_profile(self) -> AsyncGlobalUserProfileResourceWithStreamingResponse:
|
|
162
|
+
return AsyncGlobalUserProfileResourceWithStreamingResponse(self._v1.global_user_profile)
|
|
163
|
+
|
|
164
|
+
@cached_property
|
|
165
|
+
def stats(self) -> AsyncStatsResourceWithStreamingResponse:
|
|
166
|
+
return AsyncStatsResourceWithStreamingResponse(self._v1.stats)
|
evermemos/types/v1/__init__.py
CHANGED
|
@@ -4,9 +4,11 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
from .metadata import Metadata as Metadata
|
|
6
6
|
from .memory_type import MemoryType as MemoryType
|
|
7
|
+
from .memory_load_params import MemoryLoadParams as MemoryLoadParams
|
|
8
|
+
from .memory_get_response import MemoryGetResponse as MemoryGetResponse
|
|
7
9
|
from .memory_create_params import MemoryCreateParams as MemoryCreateParams
|
|
8
10
|
from .memory_delete_params import MemoryDeleteParams as MemoryDeleteParams
|
|
9
|
-
from .
|
|
11
|
+
from .memory_load_response import MemoryLoadResponse as MemoryLoadResponse
|
|
10
12
|
from .memory_create_response import MemoryCreateResponse as MemoryCreateResponse
|
|
11
13
|
from .memory_delete_response import MemoryDeleteResponse as MemoryDeleteResponse
|
|
12
14
|
from .memory_search_response import MemorySearchResponse as MemorySearchResponse
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .custom_upsert_params import CustomUpsertParams as CustomUpsertParams
|
|
6
|
+
from .custom_upsert_response import CustomUpsertResponse as CustomUpsertResponse
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing_extensions import Required, TypedDict
|
|
6
|
+
|
|
7
|
+
from ...._types import SequenceNotStr
|
|
8
|
+
|
|
9
|
+
__all__ = ["CustomUpsertParams", "CustomProfileData"]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class CustomUpsertParams(TypedDict, total=False):
|
|
13
|
+
custom_profile_data: Required[CustomProfileData]
|
|
14
|
+
"""Custom profile data to upsert"""
|
|
15
|
+
|
|
16
|
+
user_id: Required[str]
|
|
17
|
+
"""User ID"""
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class CustomProfileData(TypedDict, total=False):
|
|
21
|
+
"""Custom profile data to upsert"""
|
|
22
|
+
|
|
23
|
+
initial_profile: Required[SequenceNotStr[str]]
|
|
24
|
+
"""List of profile sentences describing the user"""
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import Dict, Optional
|
|
4
|
+
|
|
5
|
+
from ...._models import BaseModel
|
|
6
|
+
|
|
7
|
+
__all__ = ["CustomUpsertResponse"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class CustomUpsertResponse(BaseModel):
|
|
11
|
+
"""Upsert custom profile response
|
|
12
|
+
|
|
13
|
+
Response for upsert custom profile API.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
success: bool
|
|
17
|
+
"""Whether the operation was successful"""
|
|
18
|
+
|
|
19
|
+
data: Optional[Dict[str, object]] = None
|
|
20
|
+
"""Created/updated profile data"""
|
|
21
|
+
|
|
22
|
+
message: Optional[str] = None
|
|
23
|
+
"""Message"""
|
|
@@ -29,9 +29,6 @@ class ConversationMetaCreateParams(TypedDict, total=False):
|
|
|
29
29
|
scene_desc: Required[Dict[str, object]]
|
|
30
30
|
"""Scene description object, can include fields like description"""
|
|
31
31
|
|
|
32
|
-
version: Required[str]
|
|
33
|
-
"""Metadata version number"""
|
|
34
|
-
|
|
35
32
|
default_timezone: Optional[str]
|
|
36
33
|
"""Default timezone"""
|
|
37
34
|
|
|
@@ -21,10 +21,18 @@ class MemoryCreateParams(TypedDict, total=False):
|
|
|
21
21
|
"""Message unique identifier"""
|
|
22
22
|
|
|
23
23
|
sender: Required[str]
|
|
24
|
-
"""Sender user ID
|
|
24
|
+
"""Sender user ID (required).
|
|
25
|
+
|
|
26
|
+
Also used as user_id internally for memory ownership.
|
|
27
|
+
"""
|
|
25
28
|
|
|
26
29
|
group_id: Optional[str]
|
|
27
|
-
"""Group ID
|
|
30
|
+
"""Group ID.
|
|
31
|
+
|
|
32
|
+
If not provided, will automatically generate based on hash(sender) + '\\__group'
|
|
33
|
+
suffix, representing single-user mode where each user's messages are extracted
|
|
34
|
+
into separate memory spaces.
|
|
35
|
+
"""
|
|
28
36
|
|
|
29
37
|
group_name: Optional[str]
|
|
30
38
|
"""Group name"""
|
|
@@ -8,10 +8,14 @@ from .metadata import Metadata
|
|
|
8
8
|
from ..._models import BaseModel
|
|
9
9
|
|
|
10
10
|
__all__ = [
|
|
11
|
-
"
|
|
11
|
+
"MemoryGetResponse",
|
|
12
12
|
"Result",
|
|
13
13
|
"ResultMemory",
|
|
14
14
|
"ResultMemoryProfileModel",
|
|
15
|
+
"ResultMemoryGlobalUserProfileModel",
|
|
16
|
+
"ResultMemoryCombinedProfileModel",
|
|
17
|
+
"ResultMemoryCombinedProfileModelGlobalProfile",
|
|
18
|
+
"ResultMemoryCombinedProfileModelProfile",
|
|
15
19
|
"ResultMemoryEpisodicMemoryModel",
|
|
16
20
|
"ResultMemoryEventLogModel",
|
|
17
21
|
"ResultMemoryForesightModel",
|
|
@@ -44,6 +48,78 @@ class ResultMemoryProfileModel(BaseModel):
|
|
|
44
48
|
version: Optional[int] = None
|
|
45
49
|
|
|
46
50
|
|
|
51
|
+
class ResultMemoryGlobalUserProfileModel(BaseModel):
|
|
52
|
+
id: str
|
|
53
|
+
|
|
54
|
+
user_id: str
|
|
55
|
+
|
|
56
|
+
confidence: Optional[float] = None
|
|
57
|
+
|
|
58
|
+
created_at: Optional[datetime] = None
|
|
59
|
+
|
|
60
|
+
custom_profile_data: Optional[Dict[str, object]] = None
|
|
61
|
+
|
|
62
|
+
memcell_count: Optional[int] = None
|
|
63
|
+
|
|
64
|
+
profile_data: Optional[Dict[str, object]] = None
|
|
65
|
+
|
|
66
|
+
updated_at: Optional[datetime] = None
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class ResultMemoryCombinedProfileModelGlobalProfile(BaseModel):
|
|
70
|
+
id: str
|
|
71
|
+
|
|
72
|
+
user_id: str
|
|
73
|
+
|
|
74
|
+
confidence: Optional[float] = None
|
|
75
|
+
|
|
76
|
+
created_at: Optional[datetime] = None
|
|
77
|
+
|
|
78
|
+
custom_profile_data: Optional[Dict[str, object]] = None
|
|
79
|
+
|
|
80
|
+
memcell_count: Optional[int] = None
|
|
81
|
+
|
|
82
|
+
profile_data: Optional[Dict[str, object]] = None
|
|
83
|
+
|
|
84
|
+
updated_at: Optional[datetime] = None
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class ResultMemoryCombinedProfileModelProfile(BaseModel):
|
|
88
|
+
id: str
|
|
89
|
+
|
|
90
|
+
group_id: str
|
|
91
|
+
|
|
92
|
+
user_id: str
|
|
93
|
+
|
|
94
|
+
cluster_ids: Optional[List[str]] = None
|
|
95
|
+
|
|
96
|
+
confidence: Optional[float] = None
|
|
97
|
+
|
|
98
|
+
created_at: Optional[datetime] = None
|
|
99
|
+
|
|
100
|
+
last_updated_cluster: Optional[str] = None
|
|
101
|
+
|
|
102
|
+
memcell_count: Optional[int] = None
|
|
103
|
+
|
|
104
|
+
profile_data: Optional[Dict[str, object]] = None
|
|
105
|
+
|
|
106
|
+
scenario: Optional[str] = None
|
|
107
|
+
|
|
108
|
+
updated_at: Optional[datetime] = None
|
|
109
|
+
|
|
110
|
+
version: Optional[int] = None
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
class ResultMemoryCombinedProfileModel(BaseModel):
|
|
114
|
+
user_id: str
|
|
115
|
+
|
|
116
|
+
global_profile: Optional[ResultMemoryCombinedProfileModelGlobalProfile] = None
|
|
117
|
+
|
|
118
|
+
group_id: Optional[str] = None
|
|
119
|
+
|
|
120
|
+
profiles: Optional[List[ResultMemoryCombinedProfileModelProfile]] = None
|
|
121
|
+
|
|
122
|
+
|
|
47
123
|
class ResultMemoryEpisodicMemoryModel(BaseModel):
|
|
48
124
|
id: str
|
|
49
125
|
|
|
@@ -161,7 +237,12 @@ class ResultMemoryForesightModel(BaseModel):
|
|
|
161
237
|
|
|
162
238
|
|
|
163
239
|
ResultMemory: TypeAlias = Union[
|
|
164
|
-
ResultMemoryProfileModel,
|
|
240
|
+
ResultMemoryProfileModel,
|
|
241
|
+
ResultMemoryGlobalUserProfileModel,
|
|
242
|
+
ResultMemoryCombinedProfileModel,
|
|
243
|
+
ResultMemoryEpisodicMemoryModel,
|
|
244
|
+
ResultMemoryEventLogModel,
|
|
245
|
+
ResultMemoryForesightModel,
|
|
165
246
|
]
|
|
166
247
|
|
|
167
248
|
|
|
@@ -177,7 +258,7 @@ class Result(BaseModel):
|
|
|
177
258
|
total_count: Optional[int] = None
|
|
178
259
|
|
|
179
260
|
|
|
180
|
-
class
|
|
261
|
+
class MemoryGetResponse(BaseModel):
|
|
181
262
|
"""Memory fetch API response
|
|
182
263
|
|
|
183
264
|
Response for GET /api/v1/memories endpoint.
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Union, Iterable, Optional
|
|
6
|
+
from typing_extensions import Required, TypedDict
|
|
7
|
+
|
|
8
|
+
from ..._types import SequenceNotStr
|
|
9
|
+
|
|
10
|
+
__all__ = ["MemoryLoadParams", "ConversationMeta", "ConversationList"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class MemoryLoadParams(TypedDict, total=False):
|
|
14
|
+
conversation_meta: Required[ConversationMeta]
|
|
15
|
+
"""Conversation metadata for batch import."""
|
|
16
|
+
|
|
17
|
+
conversation_list: Optional[Iterable[ConversationList]]
|
|
18
|
+
"""Message list."""
|
|
19
|
+
|
|
20
|
+
version: Optional[str]
|
|
21
|
+
"""Format version."""
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ConversationMeta(TypedDict, total=False):
|
|
25
|
+
"""Conversation metadata for batch import."""
|
|
26
|
+
|
|
27
|
+
group_id: Required[str]
|
|
28
|
+
"""Conversation group ID (unique)."""
|
|
29
|
+
|
|
30
|
+
created_at: Optional[str]
|
|
31
|
+
"""Conversation creation time (ISO 8601)."""
|
|
32
|
+
|
|
33
|
+
default_timezone: Optional[str]
|
|
34
|
+
"""Default timezone."""
|
|
35
|
+
|
|
36
|
+
description: Optional[str]
|
|
37
|
+
"""Conversation description."""
|
|
38
|
+
|
|
39
|
+
name: Optional[str]
|
|
40
|
+
"""Conversation name."""
|
|
41
|
+
|
|
42
|
+
scene: Optional[str]
|
|
43
|
+
"""Scene type (e.g., assistant, group_chat)."""
|
|
44
|
+
|
|
45
|
+
scene_desc: Optional[Dict[str, object]]
|
|
46
|
+
"""Scene description object."""
|
|
47
|
+
|
|
48
|
+
tags: Optional[SequenceNotStr[str]]
|
|
49
|
+
"""Tags for the conversation."""
|
|
50
|
+
|
|
51
|
+
user_details: Optional[Dict[str, object]]
|
|
52
|
+
"""User details map keyed by user ID."""
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ConversationList(TypedDict, total=False):
|
|
56
|
+
"""Message item for batch import."""
|
|
57
|
+
|
|
58
|
+
content: Optional[str]
|
|
59
|
+
"""Message content."""
|
|
60
|
+
|
|
61
|
+
create_time: Union[str, int, None]
|
|
62
|
+
"""Creation time (ISO 8601 or timestamp)."""
|
|
63
|
+
|
|
64
|
+
extra: Optional[Dict[str, object]]
|
|
65
|
+
"""Additional data."""
|
|
66
|
+
|
|
67
|
+
message_id: Optional[str]
|
|
68
|
+
"""Message ID."""
|
|
69
|
+
|
|
70
|
+
refer_list: Optional[Iterable[Dict[str, object]]]
|
|
71
|
+
"""Referenced messages."""
|
|
72
|
+
|
|
73
|
+
role: Optional[str]
|
|
74
|
+
"""Role (e.g., user, assistant)."""
|
|
75
|
+
|
|
76
|
+
sender: Optional[str]
|
|
77
|
+
"""Sender ID."""
|
|
78
|
+
|
|
79
|
+
sender_name: Optional[str]
|
|
80
|
+
"""Sender name."""
|
|
81
|
+
|
|
82
|
+
type: Optional[str]
|
|
83
|
+
"""Message type (e.g., text, image)."""
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from ..._models import BaseModel
|
|
6
|
+
|
|
7
|
+
__all__ = ["MemoryLoadResponse"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class MemoryLoadResponse(BaseModel):
|
|
11
|
+
"""Batch import response."""
|
|
12
|
+
|
|
13
|
+
imported_meta: Optional[bool] = None
|
|
14
|
+
"""Whether conversation metadata was imported."""
|
|
15
|
+
|
|
16
|
+
message: Optional[str] = None
|
|
17
|
+
"""Status message."""
|
|
18
|
+
|
|
19
|
+
request_id: Optional[str] = None
|
|
20
|
+
"""Request ID of the last imported message."""
|
|
21
|
+
|
|
22
|
+
status: Optional[str] = None
|
|
23
|
+
"""Queue status."""
|
|
24
|
+
|
|
25
|
+
total_count: Optional[int] = None
|
|
26
|
+
"""Total number of messages."""
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .request_get_params import RequestGetParams as RequestGetParams
|
|
6
|
+
from .request_get_response import RequestGetResponse as RequestGetResponse
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Optional
|
|
6
|
+
from typing_extensions import Required, TypedDict
|
|
7
|
+
|
|
8
|
+
__all__ = ["RequestGetParams"]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class RequestGetParams(TypedDict, total=False):
|
|
12
|
+
request_id: Required[Optional[str]]
|
|
13
|
+
"""Request ID (recommended)"""
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import Dict, Optional
|
|
4
|
+
|
|
5
|
+
from ...._models import BaseModel
|
|
6
|
+
|
|
7
|
+
__all__ = ["RequestGetResponse"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class RequestGetResponse(BaseModel):
|
|
11
|
+
"""Request status response
|
|
12
|
+
|
|
13
|
+
Contains detailed status information of the request.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
success: bool
|
|
17
|
+
"""Whether the query was successful"""
|
|
18
|
+
|
|
19
|
+
data: Optional[Dict[str, object]] = None
|
|
20
|
+
"""Request status data"""
|
|
21
|
+
|
|
22
|
+
found: Optional[bool] = None
|
|
23
|
+
"""Whether the request status was found"""
|
|
24
|
+
|
|
25
|
+
message: Optional[str] = None
|
|
26
|
+
"""Message"""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: evermemos
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.7
|
|
4
4
|
Summary: The official Python library for the EverMemOS API
|
|
5
5
|
Project-URL: Homepage, https://github.com/evermemos/evermemos-python
|
|
6
6
|
Project-URL: Repository, https://github.com/evermemos/evermemos-python
|
|
@@ -68,10 +68,10 @@ client = EverMemOS(
|
|
|
68
68
|
)
|
|
69
69
|
|
|
70
70
|
memory = client.v1.memories.create(
|
|
71
|
-
content="
|
|
71
|
+
content="Let's discuss the technical solution for the new feature today",
|
|
72
72
|
create_time="2025-01-15T10:00:00+00:00",
|
|
73
73
|
message_id="msg_001",
|
|
74
|
-
sender="
|
|
74
|
+
sender="user_001",
|
|
75
75
|
)
|
|
76
76
|
print(memory.message)
|
|
77
77
|
```
|
|
@@ -97,10 +97,10 @@ client = AsyncEverMemOS(
|
|
|
97
97
|
|
|
98
98
|
async def main() -> None:
|
|
99
99
|
memory = await client.v1.memories.create(
|
|
100
|
-
content="
|
|
100
|
+
content="Let's discuss the technical solution for the new feature today",
|
|
101
101
|
create_time="2025-01-15T10:00:00+00:00",
|
|
102
102
|
message_id="msg_001",
|
|
103
|
-
sender="
|
|
103
|
+
sender="user_001",
|
|
104
104
|
)
|
|
105
105
|
print(memory.message)
|
|
106
106
|
|
|
@@ -136,10 +136,10 @@ async def main() -> None:
|
|
|
136
136
|
http_client=DefaultAioHttpClient(),
|
|
137
137
|
) as client:
|
|
138
138
|
memory = await client.v1.memories.create(
|
|
139
|
-
content="
|
|
139
|
+
content="Let's discuss the technical solution for the new feature today",
|
|
140
140
|
create_time="2025-01-15T10:00:00+00:00",
|
|
141
141
|
message_id="msg_001",
|
|
142
|
-
sender="
|
|
142
|
+
sender="user_001",
|
|
143
143
|
)
|
|
144
144
|
print(memory.message)
|
|
145
145
|
|
|
@@ -156,6 +156,34 @@ Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typ
|
|
|
156
156
|
|
|
157
157
|
Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`.
|
|
158
158
|
|
|
159
|
+
## Nested params
|
|
160
|
+
|
|
161
|
+
Nested parameters are dictionaries, typed using `TypedDict`, for example:
|
|
162
|
+
|
|
163
|
+
```python
|
|
164
|
+
from evermemos import EverMemOS
|
|
165
|
+
|
|
166
|
+
client = EverMemOS()
|
|
167
|
+
|
|
168
|
+
response = client.v1.memories.load(
|
|
169
|
+
conversation_meta={
|
|
170
|
+
"group_id": "chat_user_001_assistant",
|
|
171
|
+
"created_at": "2025-06-26T00:00:00Z",
|
|
172
|
+
"default_timezone": "UTC",
|
|
173
|
+
"description": "Conversation between user and assistant",
|
|
174
|
+
"name": "User Support Chat",
|
|
175
|
+
"scene": "assistant",
|
|
176
|
+
"scene_desc": {"description": "bar"},
|
|
177
|
+
"tags": ["support"],
|
|
178
|
+
"user_details": {
|
|
179
|
+
"user_001": "bar",
|
|
180
|
+
"robot_001": "bar",
|
|
181
|
+
},
|
|
182
|
+
},
|
|
183
|
+
)
|
|
184
|
+
print(response.conversation_meta)
|
|
185
|
+
```
|
|
186
|
+
|
|
159
187
|
## Handling errors
|
|
160
188
|
|
|
161
189
|
When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `evermemos.APIConnectionError` is raised.
|
|
@@ -173,10 +201,10 @@ client = EverMemOS()
|
|
|
173
201
|
|
|
174
202
|
try:
|
|
175
203
|
client.v1.memories.create(
|
|
176
|
-
content="
|
|
204
|
+
content="Let's discuss the technical solution for the new feature today",
|
|
177
205
|
create_time="2025-01-15T10:00:00+00:00",
|
|
178
206
|
message_id="msg_001",
|
|
179
|
-
sender="
|
|
207
|
+
sender="user_001",
|
|
180
208
|
)
|
|
181
209
|
except evermemos.APIConnectionError as e:
|
|
182
210
|
print("The server could not be reached")
|
|
@@ -221,10 +249,10 @@ client = EverMemOS(
|
|
|
221
249
|
|
|
222
250
|
# Or, configure per-request:
|
|
223
251
|
client.with_options(max_retries=5).v1.memories.create(
|
|
224
|
-
content="
|
|
252
|
+
content="Let's discuss the technical solution for the new feature today",
|
|
225
253
|
create_time="2025-01-15T10:00:00+00:00",
|
|
226
254
|
message_id="msg_001",
|
|
227
|
-
sender="
|
|
255
|
+
sender="user_001",
|
|
228
256
|
)
|
|
229
257
|
```
|
|
230
258
|
|
|
@@ -249,10 +277,10 @@ client = EverMemOS(
|
|
|
249
277
|
|
|
250
278
|
# Override per-request:
|
|
251
279
|
client.with_options(timeout=5.0).v1.memories.create(
|
|
252
|
-
content="
|
|
280
|
+
content="Let's discuss the technical solution for the new feature today",
|
|
253
281
|
create_time="2025-01-15T10:00:00+00:00",
|
|
254
282
|
message_id="msg_001",
|
|
255
|
-
sender="
|
|
283
|
+
sender="user_001",
|
|
256
284
|
)
|
|
257
285
|
```
|
|
258
286
|
|
|
@@ -295,10 +323,10 @@ from evermemos import EverMemOS
|
|
|
295
323
|
|
|
296
324
|
client = EverMemOS()
|
|
297
325
|
response = client.v1.memories.with_raw_response.create(
|
|
298
|
-
content="
|
|
326
|
+
content="Let's discuss the technical solution for the new feature today",
|
|
299
327
|
create_time="2025-01-15T10:00:00+00:00",
|
|
300
328
|
message_id="msg_001",
|
|
301
|
-
sender="
|
|
329
|
+
sender="user_001",
|
|
302
330
|
)
|
|
303
331
|
print(response.headers.get('X-My-Header'))
|
|
304
332
|
|
|
@@ -318,10 +346,10 @@ To stream the response body, use `.with_streaming_response` instead, which requi
|
|
|
318
346
|
|
|
319
347
|
```python
|
|
320
348
|
with client.v1.memories.with_streaming_response.create(
|
|
321
|
-
content="
|
|
349
|
+
content="Let's discuss the technical solution for the new feature today",
|
|
322
350
|
create_time="2025-01-15T10:00:00+00:00",
|
|
323
351
|
message_id="msg_001",
|
|
324
|
-
sender="
|
|
352
|
+
sender="user_001",
|
|
325
353
|
) as response:
|
|
326
354
|
print(response.headers.get("X-My-Header"))
|
|
327
355
|
|