jl-ecms-client 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jl_ecms_client-0.2.0.dist-info/METADATA +295 -0
- jl_ecms_client-0.2.0.dist-info/RECORD +51 -0
- jl_ecms_client-0.2.0.dist-info/WHEEL +5 -0
- jl_ecms_client-0.2.0.dist-info/licenses/LICENSE +190 -0
- jl_ecms_client-0.2.0.dist-info/top_level.txt +1 -0
- mirix/client/__init__.py +72 -0
- mirix/client/client.py +2594 -0
- mirix/client/remote_client.py +1136 -0
- mirix/helpers/__init__.py +1 -0
- mirix/helpers/converters.py +429 -0
- mirix/helpers/datetime_helpers.py +90 -0
- mirix/helpers/json_helpers.py +47 -0
- mirix/helpers/message_helpers.py +74 -0
- mirix/helpers/tool_rule_solver.py +166 -0
- mirix/schemas/__init__.py +1 -0
- mirix/schemas/agent.py +400 -0
- mirix/schemas/block.py +188 -0
- mirix/schemas/cloud_file_mapping.py +29 -0
- mirix/schemas/embedding_config.py +114 -0
- mirix/schemas/enums.py +69 -0
- mirix/schemas/environment_variables.py +82 -0
- mirix/schemas/episodic_memory.py +170 -0
- mirix/schemas/file.py +57 -0
- mirix/schemas/health.py +10 -0
- mirix/schemas/knowledge_vault.py +181 -0
- mirix/schemas/llm_config.py +187 -0
- mirix/schemas/memory.py +318 -0
- mirix/schemas/message.py +1315 -0
- mirix/schemas/mirix_base.py +107 -0
- mirix/schemas/mirix_message.py +411 -0
- mirix/schemas/mirix_message_content.py +230 -0
- mirix/schemas/mirix_request.py +39 -0
- mirix/schemas/mirix_response.py +183 -0
- mirix/schemas/openai/__init__.py +1 -0
- mirix/schemas/openai/chat_completion_request.py +122 -0
- mirix/schemas/openai/chat_completion_response.py +144 -0
- mirix/schemas/openai/chat_completions.py +127 -0
- mirix/schemas/openai/embedding_response.py +11 -0
- mirix/schemas/openai/openai.py +229 -0
- mirix/schemas/organization.py +38 -0
- mirix/schemas/procedural_memory.py +151 -0
- mirix/schemas/providers.py +816 -0
- mirix/schemas/resource_memory.py +134 -0
- mirix/schemas/sandbox_config.py +132 -0
- mirix/schemas/semantic_memory.py +162 -0
- mirix/schemas/source.py +96 -0
- mirix/schemas/step.py +53 -0
- mirix/schemas/tool.py +241 -0
- mirix/schemas/tool_rule.py +209 -0
- mirix/schemas/usage.py +31 -0
- mirix/schemas/user.py +67 -0
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
import datetime
|
|
2
|
+
from typing import Dict, List, Literal, Optional, Union
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
|
|
6
|
+
# class ToolCallFunction(BaseModel):
|
|
7
|
+
# name: str
|
|
8
|
+
# arguments: str
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class FunctionCall(BaseModel):
|
|
12
|
+
arguments: str
|
|
13
|
+
name: str
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ToolCall(BaseModel):
|
|
17
|
+
id: str
|
|
18
|
+
# "Currently, only function is supported"
|
|
19
|
+
type: Literal["function"] = "function"
|
|
20
|
+
# function: ToolCallFunction
|
|
21
|
+
function: FunctionCall
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class LogProbToken(BaseModel):
|
|
25
|
+
token: str
|
|
26
|
+
logprob: float
|
|
27
|
+
bytes: Optional[List[int]]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class MessageContentLogProb(BaseModel):
|
|
31
|
+
token: str
|
|
32
|
+
logprob: float
|
|
33
|
+
bytes: Optional[List[int]]
|
|
34
|
+
top_logprobs: Optional[List[LogProbToken]]
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class Message(BaseModel):
|
|
38
|
+
content: Optional[str] = None
|
|
39
|
+
tool_calls: Optional[List[ToolCall]] = None
|
|
40
|
+
role: str
|
|
41
|
+
function_call: Optional[FunctionCall] = None # Deprecated
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class Choice(BaseModel):
|
|
45
|
+
finish_reason: str
|
|
46
|
+
index: int
|
|
47
|
+
message: Message
|
|
48
|
+
logprobs: Optional[Dict[str, Union[List[MessageContentLogProb], None]]] = None
|
|
49
|
+
seed: Optional[int] = None # found in TogetherAI
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class UsageStatistics(BaseModel):
|
|
53
|
+
completion_tokens: int = 0
|
|
54
|
+
prompt_tokens: int = 0
|
|
55
|
+
total_tokens: int = 0
|
|
56
|
+
last_prompt_tokens: int = 0
|
|
57
|
+
last_completion_tokens: int = 0
|
|
58
|
+
|
|
59
|
+
def __add__(self, other: "UsageStatistics") -> "UsageStatistics":
|
|
60
|
+
return UsageStatistics(
|
|
61
|
+
completion_tokens=self.completion_tokens + other.completion_tokens,
|
|
62
|
+
prompt_tokens=self.prompt_tokens + other.prompt_tokens,
|
|
63
|
+
total_tokens=self.total_tokens + other.total_tokens,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class ChatCompletionResponse(BaseModel):
|
|
68
|
+
"""https://platform.openai.com/docs/api-reference/chat/object"""
|
|
69
|
+
|
|
70
|
+
id: str
|
|
71
|
+
choices: List[Choice]
|
|
72
|
+
created: datetime.datetime
|
|
73
|
+
model: Optional[str] = (
|
|
74
|
+
None # NOTE: this is not consistent with OpenAI API standard, however is necessary to support local LLMs
|
|
75
|
+
)
|
|
76
|
+
# system_fingerprint: str # docs say this is mandatory, but in reality API returns None
|
|
77
|
+
system_fingerprint: Optional[str] = None
|
|
78
|
+
# object: str = Field(default="chat.completion")
|
|
79
|
+
object: Literal["chat.completion"] = "chat.completion"
|
|
80
|
+
usage: UsageStatistics
|
|
81
|
+
|
|
82
|
+
def __str__(self):
|
|
83
|
+
return self.model_dump_json(indent=4)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class FunctionCallDelta(BaseModel):
|
|
87
|
+
# arguments: Optional[str] = None
|
|
88
|
+
name: Optional[str] = None
|
|
89
|
+
arguments: str
|
|
90
|
+
# name: str
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class ToolCallDelta(BaseModel):
|
|
94
|
+
index: int
|
|
95
|
+
id: Optional[str] = None
|
|
96
|
+
# "Currently, only function is supported"
|
|
97
|
+
type: Literal["function"] = "function"
|
|
98
|
+
# function: ToolCallFunction
|
|
99
|
+
function: Optional[FunctionCallDelta] = None
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class MessageDelta(BaseModel):
|
|
103
|
+
"""Partial delta stream of a Message
|
|
104
|
+
|
|
105
|
+
Example ChunkResponse:
|
|
106
|
+
{
|
|
107
|
+
'id': 'chatcmpl-9EOCkKdicNo1tiL1956kPvCnL2lLS',
|
|
108
|
+
'object': 'chat.completion.chunk',
|
|
109
|
+
'created': 1713216662,
|
|
110
|
+
'model': 'gpt-4-0613',
|
|
111
|
+
'system_fingerprint': None,
|
|
112
|
+
'choices': [{
|
|
113
|
+
'index': 0,
|
|
114
|
+
'delta': {'content': 'User'},
|
|
115
|
+
'logprobs': None,
|
|
116
|
+
'finish_reason': None
|
|
117
|
+
}]
|
|
118
|
+
}
|
|
119
|
+
"""
|
|
120
|
+
|
|
121
|
+
content: Optional[str] = None
|
|
122
|
+
tool_calls: Optional[List[ToolCallDelta]] = None
|
|
123
|
+
# role: Optional[str] = None
|
|
124
|
+
function_call: Optional[FunctionCallDelta] = None # Deprecated
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
class ChunkChoice(BaseModel):
|
|
128
|
+
finish_reason: Optional[str] = None # NOTE: when streaming will be null
|
|
129
|
+
index: int
|
|
130
|
+
delta: MessageDelta
|
|
131
|
+
logprobs: Optional[Dict[str, Union[List[MessageContentLogProb], None]]] = None
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class ChatCompletionChunkResponse(BaseModel):
|
|
135
|
+
"""https://platform.openai.com/docs/api-reference/chat/streaming"""
|
|
136
|
+
|
|
137
|
+
id: str
|
|
138
|
+
choices: List[ChunkChoice]
|
|
139
|
+
created: datetime.datetime
|
|
140
|
+
model: str
|
|
141
|
+
# system_fingerprint: str # docs say this is mandatory, but in reality API returns None
|
|
142
|
+
system_fingerprint: Optional[str] = None
|
|
143
|
+
# object: str = Field(default="chat.completion")
|
|
144
|
+
object: Literal["chat.completion.chunk"] = "chat.completion.chunk"
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Literal, Optional, Union
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SystemMessage(BaseModel):
|
|
7
|
+
content: str
|
|
8
|
+
role: str = "system"
|
|
9
|
+
name: Optional[str] = None
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class UserMessage(BaseModel):
|
|
13
|
+
content: Union[str, List[str]]
|
|
14
|
+
role: str = "user"
|
|
15
|
+
name: Optional[str] = None
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ToolCallFunction(BaseModel):
|
|
19
|
+
name: str = Field(..., description="The name of the function to call")
|
|
20
|
+
arguments: str = Field(
|
|
21
|
+
..., description="The arguments to pass to the function (JSON dump)"
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class ToolCall(BaseModel):
|
|
26
|
+
id: str = Field(..., description="The ID of the tool call")
|
|
27
|
+
type: str = "function"
|
|
28
|
+
function: ToolCallFunction = Field(
|
|
29
|
+
..., description="The arguments and name for the function"
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class AssistantMessage(BaseModel):
|
|
34
|
+
content: Optional[str] = None
|
|
35
|
+
role: str = "assistant"
|
|
36
|
+
name: Optional[str] = None
|
|
37
|
+
tool_calls: Optional[List[ToolCall]] = None
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ToolMessage(BaseModel):
|
|
41
|
+
content: str
|
|
42
|
+
role: str = "tool"
|
|
43
|
+
tool_call_id: str
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
ChatMessage = Union[SystemMessage, UserMessage, AssistantMessage, ToolMessage]
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
# TODO: this might not be necessary with the validator
|
|
50
|
+
def cast_message_to_subtype(m_dict: dict) -> ChatMessage:
|
|
51
|
+
"""Cast a dictionary to one of the individual message types"""
|
|
52
|
+
role = m_dict.get("role")
|
|
53
|
+
if role == "system":
|
|
54
|
+
return SystemMessage(**m_dict)
|
|
55
|
+
elif role == "user":
|
|
56
|
+
return UserMessage(**m_dict)
|
|
57
|
+
elif role == "assistant":
|
|
58
|
+
return AssistantMessage(**m_dict)
|
|
59
|
+
elif role == "tool":
|
|
60
|
+
return ToolMessage(**m_dict)
|
|
61
|
+
else:
|
|
62
|
+
raise ValueError("Unknown message role")
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class ResponseFormat(BaseModel):
|
|
66
|
+
type: str = Field(default="text", pattern="^(text|json_object)$")
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
## tool_choice ##
|
|
70
|
+
class FunctionCall(BaseModel):
|
|
71
|
+
name: str
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class ToolFunctionChoice(BaseModel):
|
|
75
|
+
# The type of the tool. Currently, only function is supported
|
|
76
|
+
type: Literal["function"] = "function"
|
|
77
|
+
# type: str = Field(default="function", const=True)
|
|
78
|
+
function: FunctionCall
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
ToolChoice = Union[Literal["none", "auto"], ToolFunctionChoice]
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
## tools ##
|
|
85
|
+
class FunctionSchema(BaseModel):
|
|
86
|
+
name: str
|
|
87
|
+
description: Optional[str] = None
|
|
88
|
+
parameters: Optional[Dict[str, Any]] = None # JSON Schema for the parameters
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class Tool(BaseModel):
|
|
92
|
+
# The type of the tool. Currently, only function is supported
|
|
93
|
+
type: Literal["function"] = "function"
|
|
94
|
+
# type: str = Field(default="function", const=True)
|
|
95
|
+
function: FunctionSchema
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
## function_call ##
|
|
99
|
+
FunctionCallChoice = Union[Literal["none", "auto"], FunctionCall]
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class ChatCompletionRequest(BaseModel):
|
|
103
|
+
"""https://platform.openai.com/docs/api-reference/chat/create"""
|
|
104
|
+
|
|
105
|
+
model: str
|
|
106
|
+
messages: List[ChatMessage]
|
|
107
|
+
frequency_penalty: Optional[float] = 0
|
|
108
|
+
logit_bias: Optional[Dict[str, int]] = None
|
|
109
|
+
logprobs: Optional[bool] = False
|
|
110
|
+
top_logprobs: Optional[int] = None
|
|
111
|
+
max_tokens: Optional[int] = None
|
|
112
|
+
n: Optional[int] = 1
|
|
113
|
+
presence_penalty: Optional[float] = 0
|
|
114
|
+
response_format: Optional[ResponseFormat] = None
|
|
115
|
+
seed: Optional[int] = None
|
|
116
|
+
stop: Optional[Union[str, List[str]]] = None
|
|
117
|
+
stream: Optional[bool] = False
|
|
118
|
+
temperature: Optional[float] = 1
|
|
119
|
+
top_p: Optional[float] = 1
|
|
120
|
+
user: Optional[str] = None # unique ID of the end-user (for monitoring)
|
|
121
|
+
|
|
122
|
+
# function-calling related
|
|
123
|
+
tools: Optional[List[Tool]] = None
|
|
124
|
+
tool_choice: Optional[ToolChoice] = "none"
|
|
125
|
+
# deprecated scheme
|
|
126
|
+
functions: Optional[List[FunctionSchema]] = None
|
|
127
|
+
function_call: Optional[FunctionCallChoice] = None
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from typing import List, Literal
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class EmbeddingResponse(BaseModel):
|
|
7
|
+
"""OpenAI embedding response model: https://platform.openai.com/docs/api-reference/embeddings/object"""
|
|
8
|
+
|
|
9
|
+
index: int # the index of the embedding in the list of embeddings
|
|
10
|
+
embedding: List[float]
|
|
11
|
+
object: Literal["embedding"] = "embedding"
|
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
from typing import Dict, List, Optional, Union
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ImageFile(BaseModel):
|
|
8
|
+
type: str = "image_file"
|
|
9
|
+
file_id: str
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Text(BaseModel):
|
|
13
|
+
object: str = "text"
|
|
14
|
+
text: str = Field(..., description="The text content to be processed by the agent.")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MessageRoleType(str, Enum):
|
|
18
|
+
user = "user"
|
|
19
|
+
system = "system"
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class OpenAIAssistant(BaseModel):
|
|
23
|
+
"""Represents an OpenAI assistant (equivalent to Mirix preset)"""
|
|
24
|
+
|
|
25
|
+
id: str = Field(..., description="The unique identifier of the assistant.")
|
|
26
|
+
name: str = Field(..., description="The name of the assistant.")
|
|
27
|
+
object: str = "assistant"
|
|
28
|
+
description: Optional[str] = Field(
|
|
29
|
+
None, description="The description of the assistant."
|
|
30
|
+
)
|
|
31
|
+
created_at: int = Field(
|
|
32
|
+
..., description="The unix timestamp of when the assistant was created."
|
|
33
|
+
)
|
|
34
|
+
model: str = Field(..., description="The model used by the assistant.")
|
|
35
|
+
instructions: str = Field(..., description="The instructions for the assistant.")
|
|
36
|
+
tools: Optional[List[str]] = Field(
|
|
37
|
+
None, description="The tools used by the assistant."
|
|
38
|
+
)
|
|
39
|
+
file_ids: Optional[List[str]] = Field(
|
|
40
|
+
None, description="List of file IDs associated with the assistant."
|
|
41
|
+
)
|
|
42
|
+
metadata: Optional[dict] = Field(
|
|
43
|
+
None, description="Metadata associated with the assistant."
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class OpenAIMessage(BaseModel):
|
|
48
|
+
id: str = Field(..., description="The unique identifier of the message.")
|
|
49
|
+
object: str = "thread.message"
|
|
50
|
+
created_at: int = Field(
|
|
51
|
+
..., description="The unix timestamp of when the message was created."
|
|
52
|
+
)
|
|
53
|
+
thread_id: str = Field(..., description="The unique identifier of the thread.")
|
|
54
|
+
role: str = Field(
|
|
55
|
+
..., description="Role of the message sender (either 'user' or 'system')"
|
|
56
|
+
)
|
|
57
|
+
content: List[Union[Text, ImageFile]] = Field(
|
|
58
|
+
None, description="The message content to be processed by the agent."
|
|
59
|
+
)
|
|
60
|
+
assistant_id: str = Field(
|
|
61
|
+
..., description="The unique identifier of the assistant."
|
|
62
|
+
)
|
|
63
|
+
run_id: Optional[str] = Field(None, description="The unique identifier of the run.")
|
|
64
|
+
file_ids: Optional[List[str]] = Field(
|
|
65
|
+
None, description="List of file IDs associated with the message."
|
|
66
|
+
)
|
|
67
|
+
metadata: Optional[Dict] = Field(
|
|
68
|
+
None, description="Metadata associated with the message."
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class OpenAIThread(BaseModel):
|
|
73
|
+
"""Represents an OpenAI thread (equivalent to Mirix agent)"""
|
|
74
|
+
|
|
75
|
+
id: str = Field(..., description="The unique identifier of the thread.")
|
|
76
|
+
object: str = "thread"
|
|
77
|
+
created_at: int = Field(
|
|
78
|
+
..., description="The unix timestamp of when the thread was created."
|
|
79
|
+
)
|
|
80
|
+
metadata: dict = Field(None, description="Metadata associated with the thread.")
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class AssistantFile(BaseModel):
|
|
84
|
+
id: str = Field(..., description="The unique identifier of the file.")
|
|
85
|
+
object: str = "assistant.file"
|
|
86
|
+
created_at: int = Field(
|
|
87
|
+
..., description="The unix timestamp of when the file was created."
|
|
88
|
+
)
|
|
89
|
+
assistant_id: str = Field(
|
|
90
|
+
..., description="The unique identifier of the assistant."
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class MessageFile(BaseModel):
|
|
95
|
+
id: str = Field(..., description="The unique identifier of the file.")
|
|
96
|
+
object: str = "thread.message.file"
|
|
97
|
+
created_at: int = Field(
|
|
98
|
+
..., description="The unix timestamp of when the file was created."
|
|
99
|
+
)
|
|
100
|
+
message_id: str = Field(..., description="The unique identifier of the message.")
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class Function(BaseModel):
|
|
104
|
+
name: str = Field(..., description="The name of the function.")
|
|
105
|
+
arguments: str = Field(..., description="The arguments of the function.")
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class ToolCall(BaseModel):
|
|
109
|
+
id: str = Field(..., description="The unique identifier of the tool call.")
|
|
110
|
+
type: str = "function"
|
|
111
|
+
function: Function = Field(..., description="The function call.")
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class ToolCallOutput(BaseModel):
|
|
115
|
+
tool_call_id: str = Field(
|
|
116
|
+
..., description="The unique identifier of the tool call."
|
|
117
|
+
)
|
|
118
|
+
output: str = Field(..., description="The output of the tool call.")
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class RequiredAction(BaseModel):
|
|
122
|
+
type: str = "submit_tool_outputs"
|
|
123
|
+
submit_tool_outputs: List[ToolCall]
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class OpenAIError(BaseModel):
|
|
127
|
+
code: str = Field(..., description="The error code.")
|
|
128
|
+
message: str = Field(..., description="The error message.")
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class OpenAIUsage(BaseModel):
|
|
132
|
+
completion_tokens: int = Field(
|
|
133
|
+
..., description="The number of tokens used for the run."
|
|
134
|
+
)
|
|
135
|
+
prompt_tokens: int = Field(
|
|
136
|
+
..., description="The number of tokens used for the prompt."
|
|
137
|
+
)
|
|
138
|
+
total_tokens: int = Field(
|
|
139
|
+
..., description="The total number of tokens used for the run."
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class OpenAIMessageCreationStep(BaseModel):
|
|
144
|
+
type: str = "message_creation"
|
|
145
|
+
message_id: str = Field(..., description="The unique identifier of the message.")
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class OpenAIToolCallsStep(BaseModel):
|
|
149
|
+
type: str = "tool_calls"
|
|
150
|
+
tool_calls: List[ToolCall] = Field(..., description="The tool calls.")
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
class OpenAIRun(BaseModel):
|
|
154
|
+
id: str = Field(..., description="The unique identifier of the run.")
|
|
155
|
+
object: str = "thread.run"
|
|
156
|
+
created_at: int = Field(
|
|
157
|
+
..., description="The unix timestamp of when the run was created."
|
|
158
|
+
)
|
|
159
|
+
thread_id: str = Field(..., description="The unique identifier of the thread.")
|
|
160
|
+
assistant_id: str = Field(
|
|
161
|
+
..., description="The unique identifier of the assistant."
|
|
162
|
+
)
|
|
163
|
+
status: str = Field(..., description="The status of the run.")
|
|
164
|
+
required_action: Optional[RequiredAction] = Field(
|
|
165
|
+
None, description="The required action of the run."
|
|
166
|
+
)
|
|
167
|
+
last_error: Optional[OpenAIError] = Field(
|
|
168
|
+
None, description="The last error of the run."
|
|
169
|
+
)
|
|
170
|
+
expires_at: int = Field(
|
|
171
|
+
..., description="The unix timestamp of when the run expires."
|
|
172
|
+
)
|
|
173
|
+
started_at: Optional[int] = Field(
|
|
174
|
+
None, description="The unix timestamp of when the run started."
|
|
175
|
+
)
|
|
176
|
+
cancelled_at: Optional[int] = Field(
|
|
177
|
+
None, description="The unix timestamp of when the run was cancelled."
|
|
178
|
+
)
|
|
179
|
+
failed_at: Optional[int] = Field(
|
|
180
|
+
None, description="The unix timestamp of when the run failed."
|
|
181
|
+
)
|
|
182
|
+
completed_at: Optional[int] = Field(
|
|
183
|
+
None, description="The unix timestamp of when the run completed."
|
|
184
|
+
)
|
|
185
|
+
model: str = Field(..., description="The model used by the run.")
|
|
186
|
+
instructions: str = Field(..., description="The instructions for the run.")
|
|
187
|
+
tools: Optional[List[ToolCall]] = Field(
|
|
188
|
+
None, description="The tools used by the run."
|
|
189
|
+
) # TODO: also add code interpreter / retrieval
|
|
190
|
+
file_ids: Optional[List[str]] = Field(
|
|
191
|
+
None, description="List of file IDs associated with the run."
|
|
192
|
+
)
|
|
193
|
+
metadata: Optional[dict] = Field(
|
|
194
|
+
None, description="Metadata associated with the run."
|
|
195
|
+
)
|
|
196
|
+
usage: Optional[OpenAIUsage] = Field(None, description="The usage of the run.")
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
class OpenAIRunStep(BaseModel):
|
|
200
|
+
id: str = Field(..., description="The unique identifier of the run step.")
|
|
201
|
+
object: str = "thread.run.step"
|
|
202
|
+
created_at: int = Field(
|
|
203
|
+
..., description="The unix timestamp of when the run step was created."
|
|
204
|
+
)
|
|
205
|
+
assistant_id: str = Field(
|
|
206
|
+
..., description="The unique identifier of the assistant."
|
|
207
|
+
)
|
|
208
|
+
thread_id: str = Field(..., description="The unique identifier of the thread.")
|
|
209
|
+
run_id: str = Field(..., description="The unique identifier of the run.")
|
|
210
|
+
type: str = Field(
|
|
211
|
+
..., description="The type of the run step."
|
|
212
|
+
) # message_creation, tool_calls
|
|
213
|
+
status: str = Field(..., description="The status of the run step.")
|
|
214
|
+
step_defaults: Union[OpenAIToolCallsStep, OpenAIMessageCreationStep] = Field(
|
|
215
|
+
..., description="The step defaults."
|
|
216
|
+
)
|
|
217
|
+
last_error: Optional[OpenAIError] = Field(
|
|
218
|
+
None, description="The last error of the run step."
|
|
219
|
+
)
|
|
220
|
+
expired_at: Optional[int] = Field(
|
|
221
|
+
None, description="The unix timestamp of when the run step expired."
|
|
222
|
+
)
|
|
223
|
+
failed_at: Optional[int] = Field(
|
|
224
|
+
None, description="The unix timestamp of when the run failed."
|
|
225
|
+
)
|
|
226
|
+
completed_at: Optional[int] = Field(
|
|
227
|
+
None, description="The unix timestamp of when the run completed."
|
|
228
|
+
)
|
|
229
|
+
usage: Optional[OpenAIUsage] = Field(None, description="The usage of the run.")
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from typing import Optional
|
|
3
|
+
import uuid
|
|
4
|
+
|
|
5
|
+
from pydantic import Field
|
|
6
|
+
|
|
7
|
+
from mirix.schemas.mirix_base import MirixBase
|
|
8
|
+
from mirix.utils import create_random_username, get_utc_time
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class OrganizationBase(MirixBase):
|
|
12
|
+
__id_prefix__ = "org"
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _generate_org_id() -> str:
|
|
16
|
+
"""Generate a random organization ID."""
|
|
17
|
+
return f"org-{uuid.uuid4().hex[:8]}"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class Organization(OrganizationBase):
|
|
21
|
+
id: str = Field(
|
|
22
|
+
default_factory=_generate_org_id,
|
|
23
|
+
description="The unique identifier of the organization.",
|
|
24
|
+
)
|
|
25
|
+
name: str = Field(
|
|
26
|
+
create_random_username(),
|
|
27
|
+
description="The name of the organization.",
|
|
28
|
+
json_schema_extra={"default": "SincereYogurt"},
|
|
29
|
+
)
|
|
30
|
+
created_at: Optional[datetime] = Field(
|
|
31
|
+
default_factory=get_utc_time,
|
|
32
|
+
description="The creation date of the organization.",
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class OrganizationCreate(OrganizationBase):
|
|
37
|
+
id: Optional[str] = Field(None, description="The unique identifier of the organization.")
|
|
38
|
+
name: Optional[str] = Field(None, description="The name of the organization.")
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from typing import Any, Dict, List, Optional
|
|
3
|
+
|
|
4
|
+
from pydantic import Field, field_validator
|
|
5
|
+
|
|
6
|
+
from mirix.constants import MAX_EMBEDDING_DIM
|
|
7
|
+
from mirix.schemas.embedding_config import EmbeddingConfig
|
|
8
|
+
from mirix.schemas.mirix_base import MirixBase
|
|
9
|
+
from mirix.utils import get_utc_time
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ProceduralMemoryItemBase(MirixBase):
|
|
13
|
+
"""
|
|
14
|
+
Base schema for storing procedural knowledge (e.g., workflows, methods).
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
__id_prefix__ = "proc_item"
|
|
18
|
+
entry_type: str = Field(
|
|
19
|
+
..., description="Category (e.g., 'workflow', 'guide', 'script')"
|
|
20
|
+
)
|
|
21
|
+
summary: str = Field(..., description="Short descriptive text about the procedure")
|
|
22
|
+
steps: List[str] = Field(
|
|
23
|
+
..., description="Step-by-step instructions as a list of strings"
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ProceduralMemoryItem(ProceduralMemoryItemBase):
|
|
28
|
+
"""
|
|
29
|
+
Full procedural memory item schema, with database-related fields.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
id: Optional[str] = Field(
|
|
33
|
+
None, description="Unique identifier for the procedural memory item"
|
|
34
|
+
)
|
|
35
|
+
agent_id: Optional[str] = Field(
|
|
36
|
+
None, description="The id of the agent this procedural memory item belongs to"
|
|
37
|
+
)
|
|
38
|
+
user_id: str = Field(
|
|
39
|
+
..., description="The id of the user who generated the procedure"
|
|
40
|
+
)
|
|
41
|
+
created_at: datetime = Field(
|
|
42
|
+
default_factory=get_utc_time, description="Creation timestamp"
|
|
43
|
+
)
|
|
44
|
+
updated_at: Optional[datetime] = Field(None, description="Last update timestamp")
|
|
45
|
+
last_modify: Dict[str, Any] = Field(
|
|
46
|
+
default_factory=lambda: {
|
|
47
|
+
"timestamp": get_utc_time().isoformat(),
|
|
48
|
+
"operation": "created",
|
|
49
|
+
},
|
|
50
|
+
description="Last modification info including timestamp and operation type",
|
|
51
|
+
)
|
|
52
|
+
organization_id: str = Field(
|
|
53
|
+
..., description="The unique identifier of the organization"
|
|
54
|
+
)
|
|
55
|
+
summary_embedding: Optional[List[float]] = Field(
|
|
56
|
+
None, description="The embedding of the summary"
|
|
57
|
+
)
|
|
58
|
+
steps_embedding: Optional[List[float]] = Field(
|
|
59
|
+
None, description="The embedding of the steps"
|
|
60
|
+
)
|
|
61
|
+
embedding_config: Optional[EmbeddingConfig] = Field(
|
|
62
|
+
None, description="The embedding configuration used by the event"
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
# NEW: Filter tags for flexible filtering and categorization
|
|
66
|
+
filter_tags: Optional[Dict[str, Any]] = Field(
|
|
67
|
+
default=None,
|
|
68
|
+
description="Custom filter tags for filtering and categorization",
|
|
69
|
+
examples=[
|
|
70
|
+
{
|
|
71
|
+
"project_id": "proj-abc",
|
|
72
|
+
"session_id": "sess-xyz",
|
|
73
|
+
"tags": ["important", "work"],
|
|
74
|
+
"priority": "high"
|
|
75
|
+
}
|
|
76
|
+
]
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# need to validate both steps_embedding and summary_embedding to ensure they are the same size
|
|
80
|
+
# NEW: Filter tags for flexible filtering and categorization
|
|
81
|
+
filter_tags: Optional[Dict[str, Any]] = Field(
|
|
82
|
+
default=None,
|
|
83
|
+
description="Custom filter tags for filtering and categorization",
|
|
84
|
+
examples=[
|
|
85
|
+
{
|
|
86
|
+
"project_id": "proj-abc",
|
|
87
|
+
"session_id": "sess-xyz",
|
|
88
|
+
"tags": ["important", "work"],
|
|
89
|
+
"priority": "high"
|
|
90
|
+
}
|
|
91
|
+
]
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
@field_validator("summary_embedding", "steps_embedding")
|
|
95
|
+
@classmethod
|
|
96
|
+
def pad_embeddings(cls, embedding: List[float]) -> List[float]:
|
|
97
|
+
"""Pad embeddings to `MAX_EMBEDDING_SIZE`. This is necessary to ensure all stored embeddings are the same size."""
|
|
98
|
+
import numpy as np
|
|
99
|
+
|
|
100
|
+
if embedding and len(embedding) != MAX_EMBEDDING_DIM:
|
|
101
|
+
np_embedding = np.array(embedding)
|
|
102
|
+
padded_embedding = np.pad(
|
|
103
|
+
np_embedding,
|
|
104
|
+
(0, MAX_EMBEDDING_DIM - np_embedding.shape[0]),
|
|
105
|
+
mode="constant",
|
|
106
|
+
)
|
|
107
|
+
return padded_embedding.tolist()
|
|
108
|
+
return embedding
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class ProceduralMemoryItemUpdate(MirixBase):
|
|
112
|
+
"""Schema for updating an existing procedural memory item."""
|
|
113
|
+
|
|
114
|
+
id: str = Field(..., description="Unique ID for this procedural memory entry")
|
|
115
|
+
agent_id: Optional[str] = Field(
|
|
116
|
+
None, description="The id of the agent this procedural memory item belongs to"
|
|
117
|
+
)
|
|
118
|
+
entry_type: Optional[str] = Field(
|
|
119
|
+
None, description="Category (e.g., 'workflow', 'guide', 'script')"
|
|
120
|
+
)
|
|
121
|
+
summary: Optional[str] = Field(None, description="Short descriptive text")
|
|
122
|
+
steps: Optional[List[str]] = Field(
|
|
123
|
+
None, description="Step-by-step instructions as a list of strings"
|
|
124
|
+
)
|
|
125
|
+
organization_id: Optional[str] = Field(None, description="The organization ID")
|
|
126
|
+
updated_at: datetime = Field(
|
|
127
|
+
default_factory=get_utc_time, description="Update timestamp"
|
|
128
|
+
)
|
|
129
|
+
last_modify: Optional[Dict[str, Any]] = Field(
|
|
130
|
+
None,
|
|
131
|
+
description="Last modification info including timestamp and operation type",
|
|
132
|
+
)
|
|
133
|
+
steps_embedding: Optional[List[float]] = Field(
|
|
134
|
+
None, description="The embedding of the event"
|
|
135
|
+
)
|
|
136
|
+
summary_embedding: Optional[List[float]] = Field(
|
|
137
|
+
None, description="The embedding of the summary"
|
|
138
|
+
)
|
|
139
|
+
embedding_config: Optional[EmbeddingConfig] = Field(
|
|
140
|
+
None, description="The embedding configuration used by the event"
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
filter_tags: Optional[Dict[str, Any]] = Field(
|
|
145
|
+
None, description="Custom filter tags for filtering and categorization"
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
class ProceduralMemoryItemResponse(ProceduralMemoryItem):
|
|
149
|
+
"""Response schema for procedural memory item."""
|
|
150
|
+
|
|
151
|
+
pass
|