jl-ecms-client 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. jl_ecms_client-0.2.0.dist-info/METADATA +295 -0
  2. jl_ecms_client-0.2.0.dist-info/RECORD +51 -0
  3. jl_ecms_client-0.2.0.dist-info/WHEEL +5 -0
  4. jl_ecms_client-0.2.0.dist-info/licenses/LICENSE +190 -0
  5. jl_ecms_client-0.2.0.dist-info/top_level.txt +1 -0
  6. mirix/client/__init__.py +72 -0
  7. mirix/client/client.py +2594 -0
  8. mirix/client/remote_client.py +1136 -0
  9. mirix/helpers/__init__.py +1 -0
  10. mirix/helpers/converters.py +429 -0
  11. mirix/helpers/datetime_helpers.py +90 -0
  12. mirix/helpers/json_helpers.py +47 -0
  13. mirix/helpers/message_helpers.py +74 -0
  14. mirix/helpers/tool_rule_solver.py +166 -0
  15. mirix/schemas/__init__.py +1 -0
  16. mirix/schemas/agent.py +400 -0
  17. mirix/schemas/block.py +188 -0
  18. mirix/schemas/cloud_file_mapping.py +29 -0
  19. mirix/schemas/embedding_config.py +114 -0
  20. mirix/schemas/enums.py +69 -0
  21. mirix/schemas/environment_variables.py +82 -0
  22. mirix/schemas/episodic_memory.py +170 -0
  23. mirix/schemas/file.py +57 -0
  24. mirix/schemas/health.py +10 -0
  25. mirix/schemas/knowledge_vault.py +181 -0
  26. mirix/schemas/llm_config.py +187 -0
  27. mirix/schemas/memory.py +318 -0
  28. mirix/schemas/message.py +1315 -0
  29. mirix/schemas/mirix_base.py +107 -0
  30. mirix/schemas/mirix_message.py +411 -0
  31. mirix/schemas/mirix_message_content.py +230 -0
  32. mirix/schemas/mirix_request.py +39 -0
  33. mirix/schemas/mirix_response.py +183 -0
  34. mirix/schemas/openai/__init__.py +1 -0
  35. mirix/schemas/openai/chat_completion_request.py +122 -0
  36. mirix/schemas/openai/chat_completion_response.py +144 -0
  37. mirix/schemas/openai/chat_completions.py +127 -0
  38. mirix/schemas/openai/embedding_response.py +11 -0
  39. mirix/schemas/openai/openai.py +229 -0
  40. mirix/schemas/organization.py +38 -0
  41. mirix/schemas/procedural_memory.py +151 -0
  42. mirix/schemas/providers.py +816 -0
  43. mirix/schemas/resource_memory.py +134 -0
  44. mirix/schemas/sandbox_config.py +132 -0
  45. mirix/schemas/semantic_memory.py +162 -0
  46. mirix/schemas/source.py +96 -0
  47. mirix/schemas/step.py +53 -0
  48. mirix/schemas/tool.py +241 -0
  49. mirix/schemas/tool_rule.py +209 -0
  50. mirix/schemas/usage.py +31 -0
  51. mirix/schemas/user.py +67 -0
mirix/schemas/file.py ADDED
@@ -0,0 +1,57 @@
1
+ from datetime import datetime
2
+ from typing import Optional
3
+
4
+ from pydantic import Field
5
+
6
+ from mirix.helpers.datetime_helpers import get_utc_time
7
+ from mirix.schemas.mirix_base import MirixBase
8
+
9
+
10
+ class FileMetadataBase(MirixBase):
11
+ """Base class for FileMetadata schemas"""
12
+
13
+ __id_prefix__ = "file"
14
+
15
+
16
+ class FileMetadata(FileMetadataBase):
17
+ """Representation of a single FileMetadata"""
18
+
19
+ id: str = FileMetadataBase.generate_id_field()
20
+ organization_id: Optional[str] = Field(
21
+ None,
22
+ description="The unique identifier of the organization associated with the document.",
23
+ )
24
+ source_id: Optional[str] = Field(
25
+ None,
26
+ description="The unique identifier of the source associated with the file.",
27
+ )
28
+ file_name: Optional[str] = Field(None, description="The name of the file.")
29
+ file_path: Optional[str] = Field(
30
+ None, description="The path to the file on the local filesystem."
31
+ )
32
+ source_url: Optional[str] = Field(
33
+ None, description="The URL of the remote file (for files not stored locally)."
34
+ )
35
+ google_cloud_url: Optional[str] = Field(
36
+ None,
37
+ description="The Google Cloud URI for files stored in Google Cloud (e.g., Google Gemini files).",
38
+ )
39
+ file_type: Optional[str] = Field(
40
+ None, description="The type of the file (MIME type)."
41
+ )
42
+ file_size: Optional[int] = Field(None, description="The size of the file in bytes.")
43
+ file_creation_date: Optional[str] = Field(
44
+ None, description="The creation date of the file."
45
+ )
46
+ file_last_modified_date: Optional[str] = Field(
47
+ None, description="The last modified date of the file."
48
+ )
49
+
50
+ # orm metadata, optional fields
51
+ created_at: Optional[datetime] = Field(
52
+ default_factory=get_utc_time, description="The creation date of the file."
53
+ )
54
+ updated_at: Optional[datetime] = Field(
55
+ default_factory=get_utc_time, description="The update date of the file."
56
+ )
57
+ is_deleted: bool = Field(False, description="Whether this file is deleted or not.")
@@ -0,0 +1,10 @@
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class Health(BaseModel):
5
+ """
6
+ Health check response body
7
+ """
8
+
9
+ version: str
10
+ status: str
@@ -0,0 +1,181 @@
1
+ from datetime import datetime
2
+ from typing import Any, Dict, List, Optional
3
+
4
+ from pydantic import Field, field_validator
5
+
6
+ from mirix.constants import MAX_EMBEDDING_DIM
7
+ from mirix.schemas.embedding_config import EmbeddingConfig
8
+ from mirix.schemas.mirix_base import MirixBase
9
+ from mirix.utils import get_utc_time
10
+
11
+
12
+ class KnowledgeVaultItemBase(MirixBase):
13
+ """
14
+ Base schema for knowledge vault items containing common fields.
15
+ """
16
+
17
+ __id_prefix__ = "kv_item"
18
+ entry_type: str = Field(
19
+ ..., description="Category (e.g., 'credential', 'bookmark', 'api_key')"
20
+ )
21
+ source: str = Field(..., description="Information on who/where it was provided")
22
+ sensitivity: str = Field(
23
+ ..., description="Data sensitivity level ('low', 'medium', 'high')"
24
+ )
25
+ secret_value: str = Field(..., description="The actual credential or data value")
26
+ caption: str = Field(
27
+ ...,
28
+ description="Description of the knowledge vault item (e.g. 'API key for OpenAI Service')",
29
+ )
30
+
31
+
32
+ class KnowledgeVaultItem(KnowledgeVaultItemBase):
33
+ """
34
+ Representation of a knowledge vault item for storing credentials, bookmarks, etc.
35
+
36
+ Additional Parameters:
37
+ id (str): Unique ID for this knowledge vault entry.
38
+ created_at (datetime): Creation timestamp.
39
+ updated_at (Optional[datetime]): Last update timestamp.
40
+ """
41
+
42
+ id: Optional[str] = Field(
43
+ None, description="Unique identifier for the knowledge vault item"
44
+ )
45
+ agent_id: Optional[str] = Field(
46
+ None, description="The id of the agent this knowledge vault item belongs to"
47
+ )
48
+ user_id: str = Field(
49
+ ..., description="The id of the user who generated the knowledge vault item"
50
+ )
51
+ created_at: datetime = Field(
52
+ default_factory=get_utc_time,
53
+ description="The creation date of the knowledge vault item",
54
+ )
55
+ updated_at: Optional[datetime] = Field(
56
+ None, description="The last update date of the knowledge vault item"
57
+ )
58
+ last_modify: Dict[str, Any] = Field(
59
+ default_factory=lambda: {
60
+ "timestamp": get_utc_time().isoformat(),
61
+ "operation": "created",
62
+ },
63
+ description="Last modification info including timestamp and operation type",
64
+ )
65
+ organization_id: str = Field(
66
+ ..., description="The unique identifier of the organization"
67
+ )
68
+ caption_embedding: Optional[List[float]] = Field(
69
+ None, description="The embedding of the summary"
70
+ )
71
+ embedding_config: Optional[EmbeddingConfig] = Field(
72
+ None, description="The embedding configuration used by the event"
73
+ )
74
+
75
+ # NEW: Filter tags for flexible filtering and categorization
76
+ filter_tags: Optional[Dict[str, Any]] = Field(
77
+ default=None,
78
+ description="Custom filter tags for filtering and categorization",
79
+ examples=[
80
+ {
81
+ "project_id": "proj-abc",
82
+ "session_id": "sess-xyz",
83
+ "tags": ["important", "work"],
84
+ "priority": "high"
85
+ }
86
+ ]
87
+ )
88
+
89
+ # need to validate both details_embedding and summary_embedding to ensure they are the same size
90
+ # NEW: Filter tags for flexible filtering and categorization
91
+ filter_tags: Optional[Dict[str, Any]] = Field(
92
+ default=None,
93
+ description="Custom filter tags for filtering and categorization",
94
+ examples=[
95
+ {
96
+ "project_id": "proj-abc",
97
+ "session_id": "sess-xyz",
98
+ "tags": ["important", "work"],
99
+ "priority": "high"
100
+ }
101
+ ]
102
+ )
103
+
104
+ @field_validator("caption_embedding")
105
+ @classmethod
106
+ def pad_embeddings(cls, embedding: List[float]) -> List[float]:
107
+ """Pad embeddings to `MAX_EMBEDDING_SIZE`. This is necessary to ensure all stored embeddings are the same size."""
108
+ import numpy as np
109
+
110
+ if embedding and len(embedding) != MAX_EMBEDDING_DIM:
111
+ np_embedding = np.array(embedding)
112
+ padded_embedding = np.pad(
113
+ np_embedding,
114
+ (0, MAX_EMBEDDING_DIM - np_embedding.shape[0]),
115
+ mode="constant",
116
+ )
117
+ return padded_embedding.tolist()
118
+ return embedding
119
+
120
+
121
+ class KnowledgeVaultItemCreate(KnowledgeVaultItemBase):
122
+ """
123
+ Schema for creating a new knowledge vault item.
124
+
125
+ Inherits all required fields from KnowledgeVaultItemBase.
126
+ """
127
+
128
+ pass
129
+
130
+
131
+ class KnowledgeVaultItemUpdate(MirixBase):
132
+ """
133
+ Schema for updating an existing knowledge vault item.
134
+
135
+ All fields (except id) are optional so that only provided fields are updated.
136
+ """
137
+
138
+ id: str = Field(..., description="Unique ID for this knowledge vault entry")
139
+ agent_id: Optional[str] = Field(
140
+ None, description="The id of the agent this knowledge vault item belongs to"
141
+ )
142
+ entry_type: Optional[str] = Field(
143
+ None, description="Category (e.g., 'credential', 'bookmark', 'api_key')"
144
+ )
145
+ source: Optional[str] = Field(
146
+ None, description="Information on who/where it was provided"
147
+ )
148
+ sensitivity: Optional[str] = Field(
149
+ None, description="Data sensitivity level ('low', 'medium', 'high')"
150
+ )
151
+ secret_value: Optional[str] = Field(
152
+ None, description="The actual credential or data value"
153
+ )
154
+ organization_id: Optional[str] = Field(
155
+ None, description="The unique identifier of the organization"
156
+ )
157
+ updated_at: datetime = Field(
158
+ default_factory=get_utc_time, description="The update date"
159
+ )
160
+ last_modify: Optional[Dict[str, Any]] = Field(
161
+ None,
162
+ description="Last modification info including timestamp and operation type",
163
+ )
164
+ caption_embedding: Optional[List[float]] = Field(
165
+ None, description="The embedding of the summary"
166
+ )
167
+ embedding_config: Optional[EmbeddingConfig] = Field(
168
+ None, description="The embedding configuration used by the event"
169
+ )
170
+
171
+
172
+ filter_tags: Optional[Dict[str, Any]] = Field(
173
+ None, description="Custom filter tags for filtering and categorization"
174
+ )
175
+
176
+ class KnowledgeVaultItemResponse(KnowledgeVaultItem):
177
+ """
178
+ Response schema for knowledge vault items with additional fields that might be needed by the API.
179
+ """
180
+
181
+ pass
@@ -0,0 +1,187 @@
1
+ from typing import Literal, Optional
2
+
3
+ from pydantic import BaseModel, ConfigDict, Field, model_validator
4
+
5
+ from mirix.log import get_logger
6
+
7
+ logger = get_logger(__name__)
8
+
9
+ class LLMConfig(BaseModel):
10
+ """
11
+ Configuration for a Language Model (LLM) model. This object specifies all the information necessary to access an LLM model to usage with Letta, except for secret keys.
12
+
13
+ Attributes:
14
+ model (str): The name of the LLM model.
15
+ model_endpoint_type (str): The endpoint type for the model.
16
+ model_endpoint (str): The endpoint for the model.
17
+ model_wrapper (str): The wrapper for the model. This is used to wrap additional text around the input/output of the model. This is useful for text-to-text completions, such as the Completions API in OpenAI.
18
+ context_window (int): The context window size for the model.
19
+ temperature (float): The temperature to use when generating text with the model. A higher temperature will result in more random text.
20
+ max_tokens (int): The maximum number of tokens to generate.
21
+ api_key (str, optional): Custom API key for this specific model configuration.
22
+ api_version (str, optional): The API version for Azure OpenAI (e.g., '2024-10-01-preview').
23
+ azure_endpoint (str, optional): The Azure endpoint for the model (e.g., 'https://your-resource.openai.azure.com/').
24
+ azure_deployment (str, optional): The Azure deployment name for the model.
25
+ """
26
+
27
+ # TODO: 🤮 don't default to a vendor! bug city!
28
+ model: str = Field(..., description="LLM model name. ")
29
+ model_endpoint_type: Literal[
30
+ "openai",
31
+ "anthropic",
32
+ "cohere",
33
+ "google_ai",
34
+ "google_vertex",
35
+ "azure_openai",
36
+ "groq",
37
+ "ollama",
38
+ "webui",
39
+ "webui-legacy",
40
+ "lmstudio",
41
+ "lmstudio-legacy",
42
+ "lmstudio-chatcompletions",
43
+ "llamacpp",
44
+ "koboldcpp",
45
+ "vllm",
46
+ "hugging-face",
47
+ "mistral",
48
+ "together", # completions endpoint
49
+ "bedrock",
50
+ "deepseek",
51
+ "xai",
52
+ ] = Field(..., description="The endpoint type for the model.")
53
+ model_endpoint: Optional[str] = Field(
54
+ None, description="The endpoint for the model."
55
+ )
56
+ model_wrapper: Optional[str] = Field(None, description="The wrapper for the model.")
57
+ context_window: int = Field(
58
+ ..., description="The context window size for the model."
59
+ )
60
+ handle: Optional[str] = Field(
61
+ None,
62
+ description="The handle for this config, in the format provider/model-name.",
63
+ )
64
+ temperature: float = Field(
65
+ 0.7,
66
+ description="The temperature to use when generating text with the model. A higher temperature will result in more random text.",
67
+ )
68
+ max_tokens: Optional[int] = Field(
69
+ 4096,
70
+ description="The maximum number of tokens to generate. If not set, the model will use its default value.",
71
+ )
72
+ enable_reasoner: bool = Field(
73
+ False,
74
+ description="Whether or not the model should use extended thinking if it is a 'reasoning' style model",
75
+ )
76
+ reasoning_effort: Optional[Literal["low", "medium", "high"]] = Field(
77
+ None,
78
+ description="The reasoning effort to use when generating text reasoning models",
79
+ )
80
+ max_reasoning_tokens: int = Field(
81
+ 0,
82
+ description="Configurable thinking budget for extended thinking, only used if enable_reasoner is True. Minimum value is 1024.",
83
+ )
84
+ api_key: Optional[str] = Field(
85
+ None,
86
+ description="Custom API key for this specific model configuration (used for custom models)",
87
+ )
88
+
89
+ # Azure-specific fields (Azure OpenAI only)
90
+ api_version: Optional[str] = Field(
91
+ None,
92
+ description="The API version for Azure OpenAI (e.g., '2024-10-01-preview')",
93
+ )
94
+ azure_endpoint: Optional[str] = Field(
95
+ None,
96
+ description="The Azure endpoint for the model (e.g., 'https://your-resource.openai.azure.com/')",
97
+ )
98
+ azure_deployment: Optional[str] = Field(
99
+ None, description="The Azure deployment name for the model"
100
+ )
101
+
102
+ # FIXME hack to silence pydantic protected namespace warning
103
+ model_config = ConfigDict(protected_namespaces=())
104
+
105
+ @model_validator(mode="before")
106
+ @classmethod
107
+ def set_default_enable_reasoner(cls, values):
108
+ if any(
109
+ openai_reasoner_model in values.get("model", "")
110
+ for openai_reasoner_model in ["o3-mini", "o1"]
111
+ ):
112
+ values["enable_reasoner"] = True
113
+ return values
114
+
115
+ @model_validator(mode="after")
116
+ def issue_warning_for_reasoning_constraints(self) -> "LLMConfig":
117
+ if self.enable_reasoner:
118
+ if self.max_reasoning_tokens is None:
119
+ logger.warning(
120
+ "max_reasoning_tokens must be set when enable_reasoner is True"
121
+ )
122
+ if (
123
+ self.max_tokens is not None
124
+ and self.max_reasoning_tokens >= self.max_tokens
125
+ ):
126
+ logger.warning(
127
+ "max_tokens must be greater than max_reasoning_tokens (thinking budget)"
128
+ )
129
+ elif self.max_reasoning_tokens and not self.enable_reasoner:
130
+ logger.warning(
131
+ "model will not use reasoning unless enable_reasoner is set to True"
132
+ )
133
+
134
+ return self
135
+
136
+ @classmethod
137
+ def default_config(cls, model_name: str):
138
+ """
139
+ Convenience function to generate a default `LLMConfig` from a model name. Only some models are supported in this function.
140
+
141
+ Args:
142
+ model_name (str): The name of the model (gpt-4, gpt-4o-mini, letta).
143
+ """
144
+ if model_name == "gpt-4":
145
+ return cls(
146
+ model="gpt-4",
147
+ model_endpoint_type="openai",
148
+ model_endpoint="https://api.openai.com/v1",
149
+ model_wrapper=None,
150
+ context_window=8192,
151
+ )
152
+ elif model_name == "gpt-4o-mini":
153
+ return cls(
154
+ model="gpt-4o-mini",
155
+ model_endpoint_type="openai",
156
+ model_endpoint="https://api.openai.com/v1",
157
+ model_wrapper=None,
158
+ context_window=128000,
159
+ )
160
+ elif model_name == "gpt-4o":
161
+ return cls(
162
+ model="gpt-4o",
163
+ model_endpoint_type="openai",
164
+ model_endpoint="https://api.openai.com/v1",
165
+ model_wrapper=None,
166
+ context_window=128000,
167
+ )
168
+ elif model_name == "letta":
169
+ return cls(
170
+ model="memgpt-openai",
171
+ model_endpoint_type="openai",
172
+ model_endpoint="https://inference.memgpt.ai",
173
+ context_window=8192,
174
+ )
175
+ else:
176
+ raise ValueError(f"Model {model_name} not supported.")
177
+
178
+ def pretty_print(self) -> str:
179
+ return (
180
+ f"{self.model}"
181
+ + (
182
+ f" [type={self.model_endpoint_type}]"
183
+ if self.model_endpoint_type
184
+ else ""
185
+ )
186
+ + (f" [ip={self.model_endpoint}]" if self.model_endpoint else "")
187
+ )