semantio 0.0.4__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
semantio/agent.py CHANGED
@@ -16,6 +16,7 @@ from .tools.base_tool import BaseTool
16
16
  from pathlib import Path
17
17
  import importlib
18
18
  import os
19
+ from .memory import Memory
19
20
 
20
21
  # Configure logging
21
22
  logging.basicConfig(level=logging.INFO)
@@ -48,6 +49,13 @@ class Agent(BaseModel):
48
49
  semantic_model: Optional[Any] = Field(None, description="SentenceTransformer model for semantic matching.")
49
50
  team: Optional[List['Agent']] = Field(None, description="List of assistants in the team.")
50
51
  auto_tool: bool = Field(False, description="Whether to automatically detect and call tools.")
52
+ memory: Memory = Field(default_factory=Memory)
53
+ memory_config: Dict = Field(
54
+ default_factory=lambda: {
55
+ "max_context_length": 4000,
56
+ "summarization_threshold": 3000
57
+ }
58
+ )
51
59
 
52
60
  # Allow arbitrary types
53
61
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -56,6 +64,11 @@ class Agent(BaseModel):
56
64
  super().__init__(**kwargs)
57
65
  # Initialize the model and tools here if needed
58
66
  self._initialize_model()
67
+ # Initialize memory with config
68
+ self.memory = Memory(
69
+ max_context_length=self.memory_config.get("max_context_length", 4000),
70
+ summarization_threshold=self.memory_config.get("summarization_threshold", 3000)
71
+ )
59
72
  # Initialize tools as an empty list if not provided
60
73
  if self.tools is None:
61
74
  self.tools = []
@@ -218,20 +231,31 @@ class Agent(BaseModel):
218
231
  markdown: bool = False,
219
232
  team: Optional[List['Agent']] = None,
220
233
  **kwargs,
221
- ) -> Union[str, Dict]: # Add return type hint
234
+ ) -> Union[str, Dict]:
222
235
  """Print the agent's response to the console and return it."""
236
+
237
+ # Store user message if provided
238
+ if message and isinstance(message, str):
239
+ self.memory.add_message(role="user", content=message)
223
240
 
224
241
  if stream:
225
242
  # Handle streaming response
226
243
  response = ""
227
244
  for chunk in self._stream_response(message, markdown=markdown, **kwargs):
228
- print(chunk)
245
+ print(chunk, end="", flush=True)
229
246
  response += chunk
247
+ # Store agent response
248
+ if response:
249
+ self.memory.add_message(role="assistant", content=response)
250
+ print() # New line after streaming
230
251
  return response
231
252
  else:
232
253
  # Generate and return the response
233
254
  response = self._generate_response(message, markdown=markdown, team=team, **kwargs)
234
255
  print(response) # Print the response to the console
256
+ # Store agent response
257
+ if response:
258
+ self.memory.add_message(role="assistant", content=response)
235
259
  return response
236
260
 
237
261
 
@@ -294,12 +318,10 @@ class Agent(BaseModel):
294
318
  # Use the specified team if provided
295
319
  if team is not None:
296
320
  return self._generate_team_response(message, team, markdown=markdown, **kwargs)
297
-
298
321
  # Initialize tool_outputs as an empty dictionary
299
322
  tool_outputs = {}
300
323
  responses = []
301
324
  tool_calls = []
302
-
303
325
  # Use the LLM to analyze the query and dynamically select tools when auto_tool is enabled
304
326
  if self.auto_tool:
305
327
  tool_calls = self._analyze_query_and_select_tools(message)
@@ -347,13 +369,17 @@ class Agent(BaseModel):
347
369
  try:
348
370
  # Prepare the context for the LLM
349
371
  context = {
372
+ "conversation_history": self.memory.get_context(self.llm_instance),
350
373
  "tool_outputs": tool_outputs,
351
374
  "rag_context": self.rag.retrieve(message) if self.rag else None,
352
- "knowledge_base_context": self._find_all_relevant_keys(message, self._flatten_data(self.knowledge_base)) if self.knowledge_base else None,
375
+ "knowledge_base": self._get_knowledge_context(message) if self.knowledge_base else None,
353
376
  }
354
-
377
+ # 3. Build a memory-aware prompt.
378
+ prompt = self._build_memory_prompt(message, context)
379
+ # To (convert MemoryEntry objects to dicts and remove metadata):
380
+ memory_entries = [{"role": e.role, "content": e.content} for e in self.memory.storage.retrieve()]
355
381
  # Generate a response using the LLM
356
- llm_response = self.llm_instance.generate(prompt=message, context=context, **kwargs)
382
+ llm_response = self.llm_instance.generate(prompt=prompt, context=context, memory=memory_entries, **kwargs)
357
383
  responses.append(f"**Analysis:**\n\n{llm_response}")
358
384
  except Exception as e:
359
385
  logger.error(f"Failed to generate LLM response: {e}")
@@ -363,25 +389,30 @@ class Agent(BaseModel):
363
389
  # Retrieve relevant context using RAG
364
390
  rag_context = self.rag.retrieve(message) if self.rag else None
365
391
  # Retrieve relevant context from the knowledge base (API result)
366
- knowledge_base_context = None
367
- if self.knowledge_base:
368
- # Flatten the knowledge base
369
- flattened_data = self._flatten_data(self.knowledge_base)
370
- # Find all relevant key-value pairs in the knowledge base
371
- relevant_values = self._find_all_relevant_keys(message, flattened_data)
372
- if relevant_values:
373
- knowledge_base_context = ", ".join(relevant_values)
392
+ # knowledge_base_context = None
393
+ # if self.knowledge_base:
394
+ # # Flatten the knowledge base
395
+ # flattened_data = self._flatten_data(self.knowledge_base)
396
+ # # Find all relevant key-value pairs in the knowledge base
397
+ # relevant_values = self._find_all_relevant_keys(message, flattened_data)
398
+ # if relevant_values:
399
+ # knowledge_base_context = ", ".join(relevant_values)
374
400
 
375
401
  # Combine both contexts (RAG and knowledge base)
376
402
  context = {
403
+ "conversation_history": self.memory.get_context(self.llm_instance),
377
404
  "rag_context": rag_context,
378
- "knowledge_base_context": knowledge_base_context,
405
+ "knowledge_base": self._get_knowledge_context(message),
379
406
  }
380
407
  # Prepare the prompt with instructions, description, and context
381
- prompt = self._build_prompt(message, context)
408
+ # 3. Build a memory-aware prompt.
409
+ prompt = self._build_memory_prompt(message, context)
410
+ # To (convert MemoryEntry objects to dicts and remove metadata):
411
+ memory_entries = [{"role": e.role, "content": e.content} for e in self.memory.storage.retrieve()]
382
412
 
383
413
  # Generate the response using the LLM
384
- response = self.llm_instance.generate(prompt=prompt, context=context, **kwargs)
414
+ response = self.llm_instance.generate(prompt=prompt, context=context, memory=memory_entries, **kwargs)
415
+
385
416
 
386
417
  # Format the response based on the json_output flag
387
418
  if self.json_output:
@@ -394,9 +425,37 @@ class Agent(BaseModel):
394
425
  if markdown:
395
426
  return f"**Response:**\n\n{response}"
396
427
  return response
397
- # Combine all responses into a single string
398
428
  return "\n\n".join(responses)
399
429
 
430
+ # Modified prompt construction with memory integration
431
+ def _build_memory_prompt(self, user_input: str, context: dict) -> str:
432
+ """Enhanced prompt builder with memory context."""
433
+ prompt_parts = []
434
+
435
+ if self.description:
436
+ prompt_parts.append(f"# ROLE\n{self.description}")
437
+
438
+ if self.instructions:
439
+ prompt_parts.append(f"# INSTRUCTIONS\n" + "\n".join(f"- {i}" for i in self.instructions))
440
+
441
+ if context['conversation_history']:
442
+ prompt_parts.append(f"# CONVERSATION HISTORY\n{context['conversation_history']}")
443
+
444
+ if context['knowledge_base']:
445
+ prompt_parts.append(f"# KNOWLEDGE BASE\n{context['knowledge_base']}")
446
+
447
+ prompt_parts.append(f"# USER INPUT\n{user_input}")
448
+
449
+ return "\n\n".join(prompt_parts)
450
+
451
+ def _get_knowledge_context(self, message: str) -> str:
452
+ """Retrieve and format knowledge base context."""
453
+ if not self.knowledge_base:
454
+ return ""
455
+
456
+ flattened = self._flatten_data(self.knowledge_base)
457
+ relevant = self._find_all_relevant_keys(message, flattened)
458
+ return "\n".join(f"- {item}" for item in relevant) if relevant else ""
400
459
  def _generate_team_response(self, message: str, team: List['Agent'], markdown: bool = False, **kwargs) -> str:
401
460
  """Generate a response using a team of assistants."""
402
461
  responses = []
@@ -543,17 +602,21 @@ class Agent(BaseModel):
543
602
  """Run the agent in a CLI app."""
544
603
  from rich.prompt import Prompt
545
604
 
605
+ # Print initial message if provided
546
606
  if message:
547
607
  self.print_response(message=message, **kwargs)
548
608
 
549
609
  _exit_on = exit_on or ["exit", "quit", "bye"]
550
610
  while True:
551
- message = Prompt.ask(f"[bold] {self.emoji} {self.user_name} [/bold]")
552
- if message in _exit_on:
611
+ try:
612
+ message = Prompt.ask(f"[bold] {self.emoji} {self.user_name} [/bold]")
613
+ if message in _exit_on:
614
+ break
615
+ self.print_response(message=message, **kwargs)
616
+ except KeyboardInterrupt:
617
+ print("\n\nSession ended. Goodbye!")
553
618
  break
554
619
 
555
- self.print_response(message=message, **kwargs)
556
-
557
620
  def _generate_api(self):
558
621
  """Generate an API for the agent if api=True."""
559
622
  from .api.api_generator import APIGenerator
semantio/memory.py CHANGED
@@ -1,11 +1,54 @@
1
- from typing import List, Dict
2
-
1
+ from .models import MemoryEntry
2
+ from .storage import BaseMemoryStorage, InMemoryStorage, FileStorage
3
+ from typing import List, Dict, Optional
4
+ from .llm.base_llm import BaseLLM
3
5
  class Memory:
4
- def __init__(self):
5
- self.history = []
6
+ def __init__(
7
+ self,
8
+ storage: BaseMemoryStorage = InMemoryStorage(),
9
+ max_context_length: int = 4000,
10
+ summarization_threshold: int = 3000
11
+ ):
12
+ self.storage = storage
13
+ self.max_context_length = max_context_length
14
+ self.summarization_threshold = summarization_threshold
15
+ self._current_context = ""
16
+
17
+ def add_message(self, role: str, content: str, metadata: Optional[Dict] = None):
18
+ entry = MemoryEntry(
19
+ role=role,
20
+ content=content,
21
+ metadata=metadata or {}
22
+ )
23
+ self.storage.store(entry)
24
+ self._manage_context()
25
+
26
+ def get_context(self, llm: Optional[BaseLLM] = None) -> str:
27
+ if len(self._current_context) < self.summarization_threshold:
28
+ return self._current_context
29
+
30
+ # Automatic summarization when context grows too large
31
+ if llm:
32
+ return self.summarize(llm)
33
+ return self._current_context[:self.max_context_length]
34
+ def _manage_context(self):
35
+ # Include roles in the conversation history
36
+ full_history = "\n".join([f"{e.role}: {e.content}" for e in self.storage.retrieve()])
37
+ if len(full_history) > self.max_context_length:
38
+ self._current_context = full_history[-self.max_context_length:]
39
+ else:
40
+ self._current_context = full_history
6
41
 
7
- def add_message(self, role: str, content: str):
8
- self.history.append({"role": role, "content": content})
42
+ def summarize(self, llm: BaseLLM) -> str:
43
+ # Include roles in the history for summarization
44
+ history = "\n".join([f"{e.role}: {e.content}" for e in self.storage.retrieve()])
45
+ prompt = f"""
46
+ Summarize this conversation history maintaining key details and references:
47
+ {history[-self.summarization_threshold:]}
48
+ """
49
+ self._current_context = llm.generate(prompt)
50
+ return self._current_context
9
51
 
10
- def get_history(self) -> List[Dict]:
11
- return self.history
52
+ def clear(self):
53
+ self.storage = InMemoryStorage()
54
+ self._current_context = ""
semantio/models.py ADDED
@@ -0,0 +1,9 @@
1
+ from pydantic import BaseModel, Field
2
+ from datetime import datetime
3
+ from typing import Dict
4
+
5
+ class MemoryEntry(BaseModel):
6
+ role: str # "user" or "assistant"
7
+ content: str
8
+ timestamp: datetime = Field(default_factory=datetime.now)
9
+ metadata: Dict = Field(default_factory=dict)
@@ -0,0 +1,5 @@
1
+ from .base_storage import BaseMemoryStorage
2
+ from .in_memory_storage import InMemoryStorage
3
+ from .local_storage import FileStorage
4
+
5
+ __all__ = ['BaseMemoryStorage', 'InMemoryStorage', 'FileStorage']
@@ -0,0 +1,12 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Optional
3
+ from ..models import MemoryEntry
4
+
5
+ class BaseMemoryStorage(ABC):
6
+ @abstractmethod
7
+ def store(self, entry: MemoryEntry):
8
+ pass
9
+
10
+ @abstractmethod
11
+ def retrieve(self, query: Optional[str] = None, limit: int = 20) -> List[MemoryEntry]:
12
+ pass
@@ -0,0 +1,14 @@
1
+ # hashai/storage/in_memory_storage.py
2
+ from typing import List, Optional
3
+ from ..models import MemoryEntry
4
+ from .base_storage import BaseMemoryStorage
5
+
6
+ class InMemoryStorage(BaseMemoryStorage):
7
+ def __init__(self):
8
+ self.history: List[MemoryEntry] = []
9
+
10
+ def store(self, entry: MemoryEntry):
11
+ self.history.append(entry)
12
+
13
+ def retrieve(self, query: Optional[str] = None, limit: int = 10) -> List[MemoryEntry]:
14
+ return self.history[-limit:]
@@ -0,0 +1,29 @@
1
+ import json
2
+ from typing import List, Optional
3
+ from ..models import MemoryEntry
4
+ from .base_storage import BaseMemoryStorage
5
+
6
+ class FileStorage(BaseMemoryStorage):
7
+ def __init__(self, file_path: str = "memory.json"):
8
+ self.file_path = file_path
9
+ self.history = self._load_from_file()
10
+
11
+ def _load_from_file(self) -> List[MemoryEntry]:
12
+ try:
13
+ with open(self.file_path, "r") as f:
14
+ data = json.load(f)
15
+ return [MemoryEntry(**entry) for entry in data]
16
+ except (FileNotFoundError, json.JSONDecodeError):
17
+ return []
18
+
19
+ def _save_to_file(self):
20
+ with open(self.file_path, "w") as f:
21
+ data = [entry.dict() for entry in self.history]
22
+ json.dump(data, f, default=str)
23
+
24
+ def store(self, entry: MemoryEntry):
25
+ self.history.append(entry)
26
+ self._save_to_file()
27
+
28
+ def retrieve(self, query: Optional[str] = None, limit: int = 20) -> List[MemoryEntry]:
29
+ return self.history[-limit:]
@@ -1,6 +1,6 @@
1
1
  MIT License
2
2
 
3
- Copyright (c) 2025 Syenah
3
+ Copyright (c) 2025 Syenah (Semantio)
4
4
 
5
5
  Permission is hereby granted, free of charge, to any person obtaining a copy
6
6
  of this software and associated documentation files (the "Software"), to deal
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: semantio
3
- Version: 0.0.4
3
+ Version: 0.0.5
4
4
  Summary: A powerful SDK for building AI agents
5
5
  Home-page: https://github.com/Syenah/semantio
6
6
  Author: Rakesh
@@ -1,6 +1,7 @@
1
1
  semantio/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- semantio/agent.py,sha256=hKytSI5LqNnxqVvwI2hOINqPgrdhUXY9MS_90_crZPs,28584
3
- semantio/memory.py,sha256=eNAwyAokppHzMcIyFgOw2hT2wnLQBd9GL4T5eallNV4,281
2
+ semantio/agent.py,sha256=uPFz1WP2eb-z-tryQOX8necS8_tv4Il6qxNmZux9hNk,31709
3
+ semantio/memory.py,sha256=en9n3UySnj4rA0x3uR1sEdEzA7EkboQNbEHQ5KuEehw,2115
4
+ semantio/models.py,sha256=7hmP-F_aSU8WvsG3NGeC_hep-rUbiSbjUFMDVbpKxQE,289
4
5
  semantio/rag.py,sha256=ROy3Pa1NURcDs6qQZ8IMoa5Xlzt6I-msEq0C1p8UgB0,472
5
6
  semantio/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
7
  semantio/api/api_generator.py,sha256=Q-USITEpluRESEaQuOmF7m1vhLKYU9P8eGlQppKT9J4,829
@@ -19,9 +20,11 @@ semantio/llm/gemini.py,sha256=er3zv1jOvWQBGbPuv4fS4pR_c_abHyhroe-rkXupOO4,1959
19
20
  semantio/llm/groq.py,sha256=1AH30paKzDIQjBjWPQPN44QwFHsIOVwI-a587-cDIVc,4285
20
21
  semantio/llm/mistral.py,sha256=NpvaB1cE6-jMEBdT0mTf6Ca4Qq2LS8QivDKI6AgdRjE,1061
21
22
  semantio/llm/openai.py,sha256=I3ab-d_zFxm-TDhYk6t1PzDtElPJEEQ2eSiARBNIGi4,5174
22
- semantio/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
+ semantio/storage/__init__.py,sha256=bGSJjA1qk6DUDrBijmWcQk3Y1a2K00MPoKI5KH43Ang,196
24
+ semantio/storage/base_storage.py,sha256=R9tQfidVZlCN6CyvnhB-Tc2lIZ7yQsyX4cbMoud64XM,336
23
25
  semantio/storage/cloud_storage.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
- semantio/storage/local_storage.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ semantio/storage/in_memory_storage.py,sha256=aZT8rRHF6Kz_udaqf0rux7XRFKf9Hr3d4c3Ylry7J14,474
27
+ semantio/storage/local_storage.py,sha256=Z8jCPo2MwZ8tuhQywWkHyxTrdSyYtzAPSNd46DTCth8,1007
25
28
  semantio/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
29
  semantio/tools/base_tool.py,sha256=xBNSa_8a8WmA4BGRLG2dE7wj9GnBcZo7-P2SyD86GvY,571
27
30
  semantio/tools/crypto.py,sha256=mut1ztvpPcUUP3b563dh_FmKtP68KmNis3Qm8WENj8w,5559
@@ -34,9 +37,9 @@ semantio/utils/date_utils.py,sha256=x3oqRGv6ee_KCJ0LvCqqZh_FSgS6YGOHBwZQS4TJetY,
34
37
  semantio/utils/file_utils.py,sha256=b_cMuJINEGk9ikNuNHSn9lsmICWwvtnCDZ03ndH_S2I,1779
35
38
  semantio/utils/logger.py,sha256=TmGbP8BRjLMWjXi2GWzZ0RIXt70x9qX3FuIqghCNlwM,510
36
39
  semantio/utils/validation_utils.py,sha256=iwoxEb4Q5ILqV6tbesMjPWPCCoL3AmPLejGUy6q8YvQ,1284
37
- semantio-0.0.4.dist-info/LICENSE,sha256=teQbWD2Zlcl1_Fo29o2tNbs6G26hbCQiUzds5fQGYlY,1063
38
- semantio-0.0.4.dist-info/METADATA,sha256=youxODbkR3gNERG-mD7zbUbe5ix-0lUiWCHUI1_Y5IY,6913
39
- semantio-0.0.4.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
40
- semantio-0.0.4.dist-info/entry_points.txt,sha256=zbPgevSLwcLpdRHqI_atE8EOt8lK2vRF1AoDflDTo18,53
41
- semantio-0.0.4.dist-info/top_level.txt,sha256=Yte_6mb-bh-I_lQwMjk1GijZkxPoX4Zmp3kBftC1ZlA,9
42
- semantio-0.0.4.dist-info/RECORD,,
40
+ semantio-0.0.5.dist-info/LICENSE,sha256=mziLlfb9hZ8HKxm9V6BiHpmgJvmcDvswu1QBlDB-6vU,1074
41
+ semantio-0.0.5.dist-info/METADATA,sha256=PtDbsZ-tWXbte0RR40K5O_OklMKZiUsb-3dxGlmjklQ,6913
42
+ semantio-0.0.5.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
43
+ semantio-0.0.5.dist-info/entry_points.txt,sha256=zbPgevSLwcLpdRHqI_atE8EOt8lK2vRF1AoDflDTo18,53
44
+ semantio-0.0.5.dist-info/top_level.txt,sha256=Yte_6mb-bh-I_lQwMjk1GijZkxPoX4Zmp3kBftC1ZlA,9
45
+ semantio-0.0.5.dist-info/RECORD,,