memorisdk 1.0.1__tar.gz → 1.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of memorisdk might be problematic. Click here for more details.

Files changed (49) hide show
  1. {memorisdk-1.0.1/memorisdk.egg-info → memorisdk-1.0.2}/PKG-INFO +109 -26
  2. {memorisdk-1.0.1 → memorisdk-1.0.2}/README.md +108 -25
  3. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/core/memory.py +40 -5
  4. {memorisdk-1.0.1 → memorisdk-1.0.2/memorisdk.egg-info}/PKG-INFO +109 -26
  5. {memorisdk-1.0.1 → memorisdk-1.0.2}/pyproject.toml +2 -2
  6. {memorisdk-1.0.1 → memorisdk-1.0.2}/LICENSE +0 -0
  7. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/__init__.py +0 -0
  8. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/agents/__init__.py +0 -0
  9. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/agents/conscious_agent.py +0 -0
  10. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/agents/memory_agent.py +0 -0
  11. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/agents/retrieval_agent.py +0 -0
  12. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/config/__init__.py +0 -0
  13. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/config/manager.py +0 -0
  14. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/config/settings.py +0 -0
  15. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/core/__init__.py +0 -0
  16. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/core/database.py +0 -0
  17. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/__init__.py +0 -0
  18. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/connectors/__init__.py +0 -0
  19. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/connectors/mysql_connector.py +0 -0
  20. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/connectors/postgres_connector.py +0 -0
  21. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/connectors/sqlite_connector.py +0 -0
  22. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/queries/__init__.py +0 -0
  23. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/queries/base_queries.py +0 -0
  24. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/queries/chat_queries.py +0 -0
  25. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/queries/entity_queries.py +0 -0
  26. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/queries/memory_queries.py +0 -0
  27. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/templates/__init__.py +0 -0
  28. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/templates/basic_template.py +0 -0
  29. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/database/templates/schemas/__init__.py +0 -0
  30. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/integrations/__init__.py +0 -0
  31. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/integrations/anthropic_integration.py +0 -0
  32. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/integrations/litellm_integration.py +0 -0
  33. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/integrations/openai_integration.py +0 -0
  34. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/scripts/llm_text.py +0 -0
  35. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/tools/__init__.py +0 -0
  36. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/tools/memory_tool.py +0 -0
  37. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/utils/__init__.py +0 -0
  38. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/utils/exceptions.py +0 -0
  39. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/utils/helpers.py +0 -0
  40. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/utils/logging.py +0 -0
  41. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/utils/pydantic_models.py +0 -0
  42. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/utils/schemas.py +0 -0
  43. {memorisdk-1.0.1 → memorisdk-1.0.2}/memori/utils/validators.py +0 -0
  44. {memorisdk-1.0.1 → memorisdk-1.0.2}/memorisdk.egg-info/SOURCES.txt +0 -0
  45. {memorisdk-1.0.1 → memorisdk-1.0.2}/memorisdk.egg-info/dependency_links.txt +0 -0
  46. {memorisdk-1.0.1 → memorisdk-1.0.2}/memorisdk.egg-info/entry_points.txt +0 -0
  47. {memorisdk-1.0.1 → memorisdk-1.0.2}/memorisdk.egg-info/requires.txt +0 -0
  48. {memorisdk-1.0.1 → memorisdk-1.0.2}/memorisdk.egg-info/top_level.txt +0 -0
  49. {memorisdk-1.0.1 → memorisdk-1.0.2}/setup.cfg +0 -0
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: memorisdk
3
- Version: 1.0.1
3
+ Version: 1.0.2
4
4
  Summary: The Open-Source Memory Layer for AI Agents & Multi-Agent Systems
5
- Author-email: Memori Team <contact@memori.dev>
5
+ Author-email: GibsonAI Team <noc@gibsonai.com>
6
6
  License: Apache-2.0
7
7
  Project-URL: Homepage, https://github.com/GibsonAI/memori
8
8
  Project-URL: Documentation, https://gibsonai.github.io/memori
@@ -70,15 +70,38 @@ Requires-Dist: litellm>=1.0.0; extra == "all"
70
70
  Requires-Dist: anthropic>=0.3.0; extra == "all"
71
71
  Dynamic: license-file
72
72
 
73
- # Memori
74
-
75
- **The Open-Source Memory Layer for AI Agents & Multi-Agent Systems v1.2**
76
-
77
- *Give your AI agents structured, persistent memory with intelligent context injection - no more repeating yourself!*
78
-
79
- [![PyPI version](https://badge.fury.io/py/memori.svg)](https://badge.fury.io/py/memori)
80
- [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
81
- [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
73
+ [![GibsonAI](https://github.com/user-attachments/assets/878e341b-5a93-4489-a398-abeca91b6b11)](https://gibsonai.com/)
74
+
75
+ # memori
76
+
77
+ <p align="center">
78
+ <strong>Open-Source Memory Engine for LLMs, AI Agents & Multi-Agent Systems</strong>
79
+ </p>
80
+
81
+ <p align="center">
82
+ <i>Make LLMs context-aware with human-like memory, dual-mode retrieval, and automatic context injection.</i>
83
+ </p>
84
+
85
+ <p align="center">
86
+ <a href="https://gibsonai.github.io/memori/">Learn more</a>
87
+ ·
88
+ <a href="https://www.gibsonai.com/discord">Join Discord</a>
89
+ </p>
90
+
91
+ <p align="center">
92
+ <a href="https://badge.fury.io/py/memorisdk">
93
+ <img src="https://badge.fury.io/py/memori.svg" alt="PyPI version">
94
+ </a>
95
+ <a href="https://pepy.tech/projects/memorisdk">
96
+ <img src="https://static.pepy.tech/badge/memorisdk" alt="Downloads">
97
+ </a>
98
+ <a href="https://opensource.org/licenses/MIT">
99
+ <img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT">
100
+ </a>
101
+ <a href="https://www.python.org/downloads/">
102
+ <img src="https://img.shields.io/badge/python-3.8+-blue.svg" alt="Python 3.8+">
103
+ </a>
104
+ </p>
82
105
 
83
106
  ---
84
107
 
@@ -92,32 +115,69 @@ Dynamic: license-file
92
115
 
93
116
  ## ⚡ Quick Start
94
117
 
118
+ Install Memori:
119
+
95
120
  ```bash
96
121
  pip install memorisdk
97
122
  ```
98
123
 
124
+ ### Example with LiteLLM
125
+
126
+ 1. Install LiteLLM:
127
+
128
+ ```bash
129
+ pip install litellm
130
+ ```
131
+
132
+ 2. Set OpenAI API Key:
133
+
134
+ ```bash
135
+ export OPENAI_API_KEY="sk-your-openai-key-here"
136
+ ```
137
+
138
+ 3. Run this Python script:
139
+
99
140
  ```python
100
141
  from memori import Memori
142
+ from litellm import completion
101
143
 
102
- # Create your workspace memory with conscious mode
103
- office_work = Memori(
104
- database_connect="sqlite:///office_memory.db",
105
- conscious_ingest=True, # Short-term working memory (one-shot context)
106
- openai_api_key="your-key"
107
- )
144
+ # Initialize memory
145
+ memori = Memori(conscious_ingest=True)
146
+ memori.enable()
108
147
 
109
- office_work.enable() # Start recording conversations
148
+ print("=== First Conversation - Establishing Context ===")
149
+ response1 = completion(
150
+ model="gpt-4o-mini",
151
+ messages=[{
152
+ "role": "user",
153
+ "content": "I'm working on a Python FastAPI project"
154
+ }]
155
+ )
110
156
 
111
- # Use ANY LLM library - context automatically injected!
112
- from litellm import completion
157
+ print("Assistant:", response1.choices[0].message.content)
158
+ print("\n" + "="*50)
159
+ print("=== Second Conversation - Memory Provides Context ===")
113
160
 
114
- response = completion(
115
- model="gpt-4o",
116
- messages=[{"role": "user", "content": "Help me with Python testing"}]
161
+ response2 = completion(
162
+ model="gpt-4o-mini",
163
+ messages=[{
164
+ "role": "user",
165
+ "content": "Help me add user authentication"
166
+ }]
117
167
  )
118
- # ✨ Short-term working memory automatically included once per session
168
+ print("Assistant:", response2.choices[0].message.content)
169
+ print("\n💡 Notice: Memori automatically knows about your FastAPI Python project!")
119
170
  ```
120
171
 
172
+ ---
173
+
174
+ **🚀 Ready to explore more?**
175
+ - [📖 Examples](#examples) - Basic usage patterns and code samples
176
+ - [🔌 Framework Integrations](#framework-integrations) - LangChain, Agno & CrewAI examples
177
+ - [🎮 Interactive Demos](#interactive-demos) - Live applications & tutorials
178
+
179
+ ---
180
+
121
181
  ## 🧠 How It Works
122
182
 
123
183
  ### 1. **Universal Recording**
@@ -365,7 +425,7 @@ memori/
365
425
  └── tools/ # Memory search tools
366
426
  ```
367
427
 
368
- ## 🚀 Examples
428
+ ## Examples
369
429
 
370
430
  - **[Basic Usage](./examples/basic_usage.py)** - Simple memory setup with conscious ingestion
371
431
  - **[Personal Assistant](./examples/personal_assistant.py)** - AI assistant with intelligent memory
@@ -373,9 +433,32 @@ memori/
373
433
  - **[Advanced Config](./examples/advanced_config.py)** - Production configuration
374
434
  - **[Interactive Demo](./memori_example.py)** - Live conscious ingestion showcase
375
435
 
436
+ ## Framework Integrations
437
+
438
+ Memori works seamlessly with popular AI frameworks:
439
+
440
+ | Framework | Description | Example | Features |
441
+ |-----------|-------------|---------|----------|
442
+ | 🤖 [Agno](./examples/integrations/agno_example.py) | Memory-enhanced agent framework integration with persistent conversations | Simple chat agent with memory search | Memory tools, conversation persistence, contextual responses |
443
+ | 👥 [CrewAI](./examples/integrations/crewai_example.py) | Multi-agent system with shared memory across agent interactions | Collaborative agents with memory | Agent coordination, shared memory, task-based workflows |
444
+ | 🌊 [Digital Ocean AI](./examples/integrations/digital_ocean_example.py) | Memory-enhanced customer support using Digital Ocean's AI platform | Customer support assistant with conversation history | Context injection, session continuity, support analytics |
445
+ | 🔗 [LangChain](./examples/integrations/langchain_example.py) | Enterprise-grade agent framework with advanced memory integration | AI assistant with LangChain tools and memory | Custom tools, agent executors, memory persistence, error handling |
446
+ | 🚀 [Swarms](./examples/integrations/swarms_example.py) | Multi-agent system framework with persistent memory capabilities | Memory-enhanced Swarms agents with auto/conscious ingestion | Agent memory persistence, multi-agent coordination, contextual awareness |
447
+
448
+ ## Interactive Demos
449
+
450
+ Explore Memori's capabilities through these interactive demonstrations:
451
+
452
+ | Title | Description | Tools Used | Live Demo |
453
+ |------------|-------------|------------|-----------|
454
+ | 🌟 [Personal Diary Assistant](./demos/personal_diary_assistant/) | A comprehensive diary assistant with mood tracking, pattern analysis, and personalized recommendations. | Streamlit, LiteLLM, OpenAI, SQLite | [Run Demo](https://personal-diary-assistant.streamlit.app/) |
455
+ | 🌍 [Travel Planner Agent](./demos/travel_planner/) | Intelligent travel planning with CrewAI agents, real-time web search, and memory-based personalization. Plans complete itineraries with budget analysis. | CrewAI, Streamlit, OpenAI, SQLite | |
456
+ | 🧑‍🔬 [Researcher Agent](./demos/researcher_agent/) | Advanced AI research assistant with persistent memory, real-time web search, and comprehensive report generation. Builds upon previous research sessions. | Agno, Streamlit, OpenAI, ExaAI, SQLite | [Run Demo](https://researcher-agent-memori.streamlit.app/) |
457
+
376
458
  ## 🤝 Contributing
377
459
 
378
- See [CONTRIBUTING.md](./CONTRIBUTING.md) for development setup and guidelines.
460
+ - See [CONTRIBUTING.md](./CONTRIBUTING.md) for development setup and guidelines.
461
+ - Community: [Discord](https://www.gibsonai.com/discord)
379
462
 
380
463
  ## 📄 License
381
464
 
@@ -1,12 +1,35 @@
1
- # Memori
2
-
3
- **The Open-Source Memory Layer for AI Agents & Multi-Agent Systems v1.2**
4
-
5
- *Give your AI agents structured, persistent memory with intelligent context injection - no more repeating yourself!*
6
-
7
- [![PyPI version](https://badge.fury.io/py/memori.svg)](https://badge.fury.io/py/memori)
8
- [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
9
- [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
1
+ [![GibsonAI](https://github.com/user-attachments/assets/878e341b-5a93-4489-a398-abeca91b6b11)](https://gibsonai.com/)
2
+
3
+ # memori
4
+
5
+ <p align="center">
6
+ <strong>Open-Source Memory Engine for LLMs, AI Agents & Multi-Agent Systems</strong>
7
+ </p>
8
+
9
+ <p align="center">
10
+ <i>Make LLMs context-aware with human-like memory, dual-mode retrieval, and automatic context injection.</i>
11
+ </p>
12
+
13
+ <p align="center">
14
+ <a href="https://gibsonai.github.io/memori/">Learn more</a>
15
+ ·
16
+ <a href="https://www.gibsonai.com/discord">Join Discord</a>
17
+ </p>
18
+
19
+ <p align="center">
20
+ <a href="https://badge.fury.io/py/memorisdk">
21
+ <img src="https://badge.fury.io/py/memori.svg" alt="PyPI version">
22
+ </a>
23
+ <a href="https://pepy.tech/projects/memorisdk">
24
+ <img src="https://static.pepy.tech/badge/memorisdk" alt="Downloads">
25
+ </a>
26
+ <a href="https://opensource.org/licenses/MIT">
27
+ <img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT">
28
+ </a>
29
+ <a href="https://www.python.org/downloads/">
30
+ <img src="https://img.shields.io/badge/python-3.8+-blue.svg" alt="Python 3.8+">
31
+ </a>
32
+ </p>
10
33
 
11
34
  ---
12
35
 
@@ -20,32 +43,69 @@
20
43
 
21
44
  ## ⚡ Quick Start
22
45
 
46
+ Install Memori:
47
+
23
48
  ```bash
24
49
  pip install memorisdk
25
50
  ```
26
51
 
52
+ ### Example with LiteLLM
53
+
54
+ 1. Install LiteLLM:
55
+
56
+ ```bash
57
+ pip install litellm
58
+ ```
59
+
60
+ 2. Set OpenAI API Key:
61
+
62
+ ```bash
63
+ export OPENAI_API_KEY="sk-your-openai-key-here"
64
+ ```
65
+
66
+ 3. Run this Python script:
67
+
27
68
  ```python
28
69
  from memori import Memori
70
+ from litellm import completion
29
71
 
30
- # Create your workspace memory with conscious mode
31
- office_work = Memori(
32
- database_connect="sqlite:///office_memory.db",
33
- conscious_ingest=True, # Short-term working memory (one-shot context)
34
- openai_api_key="your-key"
35
- )
72
+ # Initialize memory
73
+ memori = Memori(conscious_ingest=True)
74
+ memori.enable()
36
75
 
37
- office_work.enable() # Start recording conversations
76
+ print("=== First Conversation - Establishing Context ===")
77
+ response1 = completion(
78
+ model="gpt-4o-mini",
79
+ messages=[{
80
+ "role": "user",
81
+ "content": "I'm working on a Python FastAPI project"
82
+ }]
83
+ )
38
84
 
39
- # Use ANY LLM library - context automatically injected!
40
- from litellm import completion
85
+ print("Assistant:", response1.choices[0].message.content)
86
+ print("\n" + "="*50)
87
+ print("=== Second Conversation - Memory Provides Context ===")
41
88
 
42
- response = completion(
43
- model="gpt-4o",
44
- messages=[{"role": "user", "content": "Help me with Python testing"}]
89
+ response2 = completion(
90
+ model="gpt-4o-mini",
91
+ messages=[{
92
+ "role": "user",
93
+ "content": "Help me add user authentication"
94
+ }]
45
95
  )
46
- # ✨ Short-term working memory automatically included once per session
96
+ print("Assistant:", response2.choices[0].message.content)
97
+ print("\n💡 Notice: Memori automatically knows about your FastAPI Python project!")
47
98
  ```
48
99
 
100
+ ---
101
+
102
+ **🚀 Ready to explore more?**
103
+ - [📖 Examples](#examples) - Basic usage patterns and code samples
104
+ - [🔌 Framework Integrations](#framework-integrations) - LangChain, Agno & CrewAI examples
105
+ - [🎮 Interactive Demos](#interactive-demos) - Live applications & tutorials
106
+
107
+ ---
108
+
49
109
  ## 🧠 How It Works
50
110
 
51
111
  ### 1. **Universal Recording**
@@ -293,7 +353,7 @@ memori/
293
353
  └── tools/ # Memory search tools
294
354
  ```
295
355
 
296
- ## 🚀 Examples
356
+ ## Examples
297
357
 
298
358
  - **[Basic Usage](./examples/basic_usage.py)** - Simple memory setup with conscious ingestion
299
359
  - **[Personal Assistant](./examples/personal_assistant.py)** - AI assistant with intelligent memory
@@ -301,9 +361,32 @@ memori/
301
361
  - **[Advanced Config](./examples/advanced_config.py)** - Production configuration
302
362
  - **[Interactive Demo](./memori_example.py)** - Live conscious ingestion showcase
303
363
 
364
+ ## Framework Integrations
365
+
366
+ Memori works seamlessly with popular AI frameworks:
367
+
368
+ | Framework | Description | Example | Features |
369
+ |-----------|-------------|---------|----------|
370
+ | 🤖 [Agno](./examples/integrations/agno_example.py) | Memory-enhanced agent framework integration with persistent conversations | Simple chat agent with memory search | Memory tools, conversation persistence, contextual responses |
371
+ | 👥 [CrewAI](./examples/integrations/crewai_example.py) | Multi-agent system with shared memory across agent interactions | Collaborative agents with memory | Agent coordination, shared memory, task-based workflows |
372
+ | 🌊 [Digital Ocean AI](./examples/integrations/digital_ocean_example.py) | Memory-enhanced customer support using Digital Ocean's AI platform | Customer support assistant with conversation history | Context injection, session continuity, support analytics |
373
+ | 🔗 [LangChain](./examples/integrations/langchain_example.py) | Enterprise-grade agent framework with advanced memory integration | AI assistant with LangChain tools and memory | Custom tools, agent executors, memory persistence, error handling |
374
+ | 🚀 [Swarms](./examples/integrations/swarms_example.py) | Multi-agent system framework with persistent memory capabilities | Memory-enhanced Swarms agents with auto/conscious ingestion | Agent memory persistence, multi-agent coordination, contextual awareness |
375
+
376
+ ## Interactive Demos
377
+
378
+ Explore Memori's capabilities through these interactive demonstrations:
379
+
380
+ | Title | Description | Tools Used | Live Demo |
381
+ |------------|-------------|------------|-----------|
382
+ | 🌟 [Personal Diary Assistant](./demos/personal_diary_assistant/) | A comprehensive diary assistant with mood tracking, pattern analysis, and personalized recommendations. | Streamlit, LiteLLM, OpenAI, SQLite | [Run Demo](https://personal-diary-assistant.streamlit.app/) |
383
+ | 🌍 [Travel Planner Agent](./demos/travel_planner/) | Intelligent travel planning with CrewAI agents, real-time web search, and memory-based personalization. Plans complete itineraries with budget analysis. | CrewAI, Streamlit, OpenAI, SQLite | |
384
+ | 🧑‍🔬 [Researcher Agent](./demos/researcher_agent/) | Advanced AI research assistant with persistent memory, real-time web search, and comprehensive report generation. Builds upon previous research sessions. | Agno, Streamlit, OpenAI, ExaAI, SQLite | [Run Demo](https://researcher-agent-memori.streamlit.app/) |
385
+
304
386
  ## 🤝 Contributing
305
387
 
306
- See [CONTRIBUTING.md](./CONTRIBUTING.md) for development setup and guidelines.
388
+ - See [CONTRIBUTING.md](./CONTRIBUTING.md) for development setup and guidelines.
389
+ - Community: [Discord](https://www.gibsonai.com/discord)
307
390
 
308
391
  ## 📄 License
309
392
 
@@ -311,4 +394,4 @@ MIT License - see [LICENSE](./LICENSE) for details.
311
394
 
312
395
  ---
313
396
 
314
- *Made for developers who want their AI agents to remember and learn*
397
+ *Made for developers who want their AI agents to remember and learn*
@@ -89,6 +89,7 @@ class Memori:
89
89
  self.search_engine = None
90
90
  self.conscious_agent = None
91
91
  self._background_task = None
92
+ self._conscious_init_pending = False
92
93
 
93
94
  if conscious_ingest or auto_ingest:
94
95
  try:
@@ -168,16 +169,44 @@ class Memori:
168
169
  "Conscious-ingest: Starting conscious agent analysis at startup"
169
170
  )
170
171
 
171
- # Run conscious agent analysis in background
172
- if self._background_task is None or self._background_task.done():
173
- self._background_task = asyncio.create_task(
174
- self._run_conscious_initialization()
172
+ # Check if there's a running event loop
173
+ try:
174
+ loop = asyncio.get_running_loop()
175
+ # If we're in an event loop, create the task
176
+ if self._background_task is None or self._background_task.done():
177
+ self._background_task = loop.create_task(
178
+ self._run_conscious_initialization()
179
+ )
180
+ logger.debug(
181
+ "Conscious-ingest: Background initialization task started"
182
+ )
183
+ except RuntimeError:
184
+ # No event loop running, defer initialization until first async call
185
+ logger.debug(
186
+ "Conscious-ingest: No event loop available, deferring initialization"
175
187
  )
176
- logger.debug("Conscious-ingest: Background initialization task started")
188
+ self._conscious_init_pending = True
177
189
 
178
190
  except Exception as e:
179
191
  logger.error(f"Failed to initialize conscious memory: {e}")
180
192
 
193
+ def _check_deferred_initialization(self):
194
+ """Check and handle deferred conscious memory initialization"""
195
+ if self._conscious_init_pending and self.conscious_agent:
196
+ try:
197
+ loop = asyncio.get_running_loop()
198
+ if self._background_task is None or self._background_task.done():
199
+ self._background_task = loop.create_task(
200
+ self._run_conscious_initialization()
201
+ )
202
+ logger.debug(
203
+ "Conscious-ingest: Deferred initialization task started"
204
+ )
205
+ self._conscious_init_pending = False
206
+ except RuntimeError:
207
+ # Still no event loop, keep pending
208
+ pass
209
+
181
210
  async def _run_conscious_initialization(self):
182
211
  """Run conscious agent initialization in background"""
183
212
  try:
@@ -475,6 +504,8 @@ class Memori:
475
504
  def _inject_openai_context(self, kwargs):
476
505
  """Inject context for OpenAI calls"""
477
506
  try:
507
+ # Check for deferred conscious initialization
508
+ self._check_deferred_initialization()
478
509
  # Extract user input from messages
479
510
  user_input = ""
480
511
  for msg in reversed(kwargs.get("messages", [])):
@@ -513,6 +544,8 @@ class Memori:
513
544
  def _inject_anthropic_context(self, kwargs):
514
545
  """Inject context for Anthropic calls"""
515
546
  try:
547
+ # Check for deferred conscious initialization
548
+ self._check_deferred_initialization()
516
549
  # Extract user input from messages
517
550
  user_input = ""
518
551
  for msg in reversed(kwargs.get("messages", [])):
@@ -563,6 +596,8 @@ class Memori:
563
596
  mode: "conscious" (one-shot short-term) or "auto" (continuous retrieval)
564
597
  """
565
598
  try:
599
+ # Check for deferred conscious initialization
600
+ self._check_deferred_initialization()
566
601
  # Extract user input from messages
567
602
  user_input = ""
568
603
  messages = params.get("messages", [])
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: memorisdk
3
- Version: 1.0.1
3
+ Version: 1.0.2
4
4
  Summary: The Open-Source Memory Layer for AI Agents & Multi-Agent Systems
5
- Author-email: Memori Team <contact@memori.dev>
5
+ Author-email: GibsonAI Team <noc@gibsonai.com>
6
6
  License: Apache-2.0
7
7
  Project-URL: Homepage, https://github.com/GibsonAI/memori
8
8
  Project-URL: Documentation, https://gibsonai.github.io/memori
@@ -70,15 +70,38 @@ Requires-Dist: litellm>=1.0.0; extra == "all"
70
70
  Requires-Dist: anthropic>=0.3.0; extra == "all"
71
71
  Dynamic: license-file
72
72
 
73
- # Memori
74
-
75
- **The Open-Source Memory Layer for AI Agents & Multi-Agent Systems v1.2**
76
-
77
- *Give your AI agents structured, persistent memory with intelligent context injection - no more repeating yourself!*
78
-
79
- [![PyPI version](https://badge.fury.io/py/memori.svg)](https://badge.fury.io/py/memori)
80
- [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
81
- [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
73
+ [![GibsonAI](https://github.com/user-attachments/assets/878e341b-5a93-4489-a398-abeca91b6b11)](https://gibsonai.com/)
74
+
75
+ # memori
76
+
77
+ <p align="center">
78
+ <strong>Open-Source Memory Engine for LLMs, AI Agents & Multi-Agent Systems</strong>
79
+ </p>
80
+
81
+ <p align="center">
82
+ <i>Make LLMs context-aware with human-like memory, dual-mode retrieval, and automatic context injection.</i>
83
+ </p>
84
+
85
+ <p align="center">
86
+ <a href="https://gibsonai.github.io/memori/">Learn more</a>
87
+ ·
88
+ <a href="https://www.gibsonai.com/discord">Join Discord</a>
89
+ </p>
90
+
91
+ <p align="center">
92
+ <a href="https://badge.fury.io/py/memorisdk">
93
+ <img src="https://badge.fury.io/py/memori.svg" alt="PyPI version">
94
+ </a>
95
+ <a href="https://pepy.tech/projects/memorisdk">
96
+ <img src="https://static.pepy.tech/badge/memorisdk" alt="Downloads">
97
+ </a>
98
+ <a href="https://opensource.org/licenses/MIT">
99
+ <img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT">
100
+ </a>
101
+ <a href="https://www.python.org/downloads/">
102
+ <img src="https://img.shields.io/badge/python-3.8+-blue.svg" alt="Python 3.8+">
103
+ </a>
104
+ </p>
82
105
 
83
106
  ---
84
107
 
@@ -92,32 +115,69 @@ Dynamic: license-file
92
115
 
93
116
  ## ⚡ Quick Start
94
117
 
118
+ Install Memori:
119
+
95
120
  ```bash
96
121
  pip install memorisdk
97
122
  ```
98
123
 
124
+ ### Example with LiteLLM
125
+
126
+ 1. Install LiteLLM:
127
+
128
+ ```bash
129
+ pip install litellm
130
+ ```
131
+
132
+ 2. Set OpenAI API Key:
133
+
134
+ ```bash
135
+ export OPENAI_API_KEY="sk-your-openai-key-here"
136
+ ```
137
+
138
+ 3. Run this Python script:
139
+
99
140
  ```python
100
141
  from memori import Memori
142
+ from litellm import completion
101
143
 
102
- # Create your workspace memory with conscious mode
103
- office_work = Memori(
104
- database_connect="sqlite:///office_memory.db",
105
- conscious_ingest=True, # Short-term working memory (one-shot context)
106
- openai_api_key="your-key"
107
- )
144
+ # Initialize memory
145
+ memori = Memori(conscious_ingest=True)
146
+ memori.enable()
108
147
 
109
- office_work.enable() # Start recording conversations
148
+ print("=== First Conversation - Establishing Context ===")
149
+ response1 = completion(
150
+ model="gpt-4o-mini",
151
+ messages=[{
152
+ "role": "user",
153
+ "content": "I'm working on a Python FastAPI project"
154
+ }]
155
+ )
110
156
 
111
- # Use ANY LLM library - context automatically injected!
112
- from litellm import completion
157
+ print("Assistant:", response1.choices[0].message.content)
158
+ print("\n" + "="*50)
159
+ print("=== Second Conversation - Memory Provides Context ===")
113
160
 
114
- response = completion(
115
- model="gpt-4o",
116
- messages=[{"role": "user", "content": "Help me with Python testing"}]
161
+ response2 = completion(
162
+ model="gpt-4o-mini",
163
+ messages=[{
164
+ "role": "user",
165
+ "content": "Help me add user authentication"
166
+ }]
117
167
  )
118
- # ✨ Short-term working memory automatically included once per session
168
+ print("Assistant:", response2.choices[0].message.content)
169
+ print("\n💡 Notice: Memori automatically knows about your FastAPI Python project!")
119
170
  ```
120
171
 
172
+ ---
173
+
174
+ **🚀 Ready to explore more?**
175
+ - [📖 Examples](#examples) - Basic usage patterns and code samples
176
+ - [🔌 Framework Integrations](#framework-integrations) - LangChain, Agno & CrewAI examples
177
+ - [🎮 Interactive Demos](#interactive-demos) - Live applications & tutorials
178
+
179
+ ---
180
+
121
181
  ## 🧠 How It Works
122
182
 
123
183
  ### 1. **Universal Recording**
@@ -365,7 +425,7 @@ memori/
365
425
  └── tools/ # Memory search tools
366
426
  ```
367
427
 
368
- ## 🚀 Examples
428
+ ## Examples
369
429
 
370
430
  - **[Basic Usage](./examples/basic_usage.py)** - Simple memory setup with conscious ingestion
371
431
  - **[Personal Assistant](./examples/personal_assistant.py)** - AI assistant with intelligent memory
@@ -373,9 +433,32 @@ memori/
373
433
  - **[Advanced Config](./examples/advanced_config.py)** - Production configuration
374
434
  - **[Interactive Demo](./memori_example.py)** - Live conscious ingestion showcase
375
435
 
436
+ ## Framework Integrations
437
+
438
+ Memori works seamlessly with popular AI frameworks:
439
+
440
+ | Framework | Description | Example | Features |
441
+ |-----------|-------------|---------|----------|
442
+ | 🤖 [Agno](./examples/integrations/agno_example.py) | Memory-enhanced agent framework integration with persistent conversations | Simple chat agent with memory search | Memory tools, conversation persistence, contextual responses |
443
+ | 👥 [CrewAI](./examples/integrations/crewai_example.py) | Multi-agent system with shared memory across agent interactions | Collaborative agents with memory | Agent coordination, shared memory, task-based workflows |
444
+ | 🌊 [Digital Ocean AI](./examples/integrations/digital_ocean_example.py) | Memory-enhanced customer support using Digital Ocean's AI platform | Customer support assistant with conversation history | Context injection, session continuity, support analytics |
445
+ | 🔗 [LangChain](./examples/integrations/langchain_example.py) | Enterprise-grade agent framework with advanced memory integration | AI assistant with LangChain tools and memory | Custom tools, agent executors, memory persistence, error handling |
446
+ | 🚀 [Swarms](./examples/integrations/swarms_example.py) | Multi-agent system framework with persistent memory capabilities | Memory-enhanced Swarms agents with auto/conscious ingestion | Agent memory persistence, multi-agent coordination, contextual awareness |
447
+
448
+ ## Interactive Demos
449
+
450
+ Explore Memori's capabilities through these interactive demonstrations:
451
+
452
+ | Title | Description | Tools Used | Live Demo |
453
+ |------------|-------------|------------|-----------|
454
+ | 🌟 [Personal Diary Assistant](./demos/personal_diary_assistant/) | A comprehensive diary assistant with mood tracking, pattern analysis, and personalized recommendations. | Streamlit, LiteLLM, OpenAI, SQLite | [Run Demo](https://personal-diary-assistant.streamlit.app/) |
455
+ | 🌍 [Travel Planner Agent](./demos/travel_planner/) | Intelligent travel planning with CrewAI agents, real-time web search, and memory-based personalization. Plans complete itineraries with budget analysis. | CrewAI, Streamlit, OpenAI, SQLite | |
456
+ | 🧑‍🔬 [Researcher Agent](./demos/researcher_agent/) | Advanced AI research assistant with persistent memory, real-time web search, and comprehensive report generation. Builds upon previous research sessions. | Agno, Streamlit, OpenAI, ExaAI, SQLite | [Run Demo](https://researcher-agent-memori.streamlit.app/) |
457
+
376
458
  ## 🤝 Contributing
377
459
 
378
- See [CONTRIBUTING.md](./CONTRIBUTING.md) for development setup and guidelines.
460
+ - See [CONTRIBUTING.md](./CONTRIBUTING.md) for development setup and guidelines.
461
+ - Community: [Discord](https://www.gibsonai.com/discord)
379
462
 
380
463
  ## 📄 License
381
464
 
@@ -4,9 +4,9 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "memorisdk"
7
- version = "1.0.1"
7
+ version = "1.0.2"
8
8
  description = "The Open-Source Memory Layer for AI Agents & Multi-Agent Systems"
9
- authors = [{name = "Memori Team", email = "contact@memori.dev"}]
9
+ authors = [{name = "GibsonAI Team", email = "noc@gibsonai.com"}]
10
10
  license = {text = "Apache-2.0"}
11
11
  readme = "README.md"
12
12
  requires-python = ">=3.8"
File without changes
File without changes
File without changes