semantio 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,70 @@
1
+ from fastapi import FastAPI, HTTPException
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from typing import Optional, Dict, List
4
+ from ..agent import Agent
5
+
6
+ def create_fastapi_app(agent: Agent, api_config: Optional[Dict] = None) -> FastAPI:
7
+ """
8
+ Create a FastAPI app for the given agent.
9
+
10
+ Args:
11
+ agent (Agent): The agent instance for which the API is being created.
12
+ api_config (Optional[Dict]): Configuration for the API, including CORS settings.
13
+
14
+ Returns:
15
+ FastAPI: A FastAPI app with endpoints for interacting with the agent.
16
+ """
17
+ app = FastAPI()
18
+
19
+ # Default CORS settings (allow all)
20
+ cors_config = {
21
+ "allow_origins": ["*"],
22
+ "allow_credentials": True,
23
+ "allow_methods": ["*"],
24
+ "allow_headers": ["*"],
25
+ }
26
+
27
+ # Override default CORS settings with user-provided settings
28
+ if api_config and "cors" in api_config:
29
+ cors_config.update(api_config["cors"])
30
+
31
+ # Add CORS middleware
32
+ app.add_middleware(
33
+ CORSMiddleware,
34
+ allow_origins=cors_config["allow_origins"],
35
+ allow_credentials=cors_config["allow_credentials"],
36
+ allow_methods=cors_config["allow_methods"],
37
+ allow_headers=cors_config["allow_headers"],
38
+ )
39
+
40
+ @app.post("/chat")
41
+ async def chat(message: str):
42
+ """
43
+ Endpoint to interact with the agent.
44
+ """
45
+ response = agent.print_response(message=message)
46
+
47
+ if agent.json_output:
48
+ return response
49
+ else:
50
+ return {"response": response}
51
+
52
+ @app.get("/tools")
53
+ async def get_tools():
54
+ """
55
+ Endpoint to get the list of tools available to the agent.
56
+ """
57
+ return {"tools": agent.tools}
58
+
59
+ @app.post("/load_image")
60
+ async def load_image(image_url: str):
61
+ """
62
+ Endpoint to load an image from a URL.
63
+ """
64
+ try:
65
+ image = agent.load_image_from_url(image_url)
66
+ return {"status": "success", "image": "Image loaded successfully"}
67
+ except Exception as e:
68
+ raise HTTPException(status_code=400, detail=str(e))
69
+
70
+ return app
File without changes
semantio/cli/main.py ADDED
@@ -0,0 +1,31 @@
1
+ import argparse
2
+ import warnings
3
+ from hashai.assistant import Assistant
4
+ from hashai.llm import get_llm
5
+ from urllib3.exceptions import NotOpenSSLWarning
6
+
7
+ # Suppress the NotOpenSSLWarning
8
+ warnings.filterwarnings("ignore", category=NotOpenSSLWarning)
9
+
10
+ def main():
11
+ parser = argparse.ArgumentParser(description="opAi CLI")
12
+ parser.add_argument("--message", type=str, required=True, help="Message to send to the assistant")
13
+ parser.add_argument("--provider", type=str, required=True, help="LLM provider (e.g., groq, openai)")
14
+ parser.add_argument("--api-key", type=str, required=True, help="API key for the LLM provider")
15
+ parser.add_argument("--model", type=str, default=None, help="Model name (e.g., mixtral-8x7b-32768)")
16
+ args = parser.parse_args()
17
+
18
+ # Initialize LLM
19
+ llm_config = {"api_key": args.api_key}
20
+ if args.model:
21
+ llm_config["model"] = args.model
22
+
23
+ llm = get_llm(provider=args.provider, **llm_config)
24
+
25
+ # Create an assistant
26
+ assistant = Assistant(model=args.provider, llm=llm)
27
+ assistant.print_response(args.message)
28
+
29
+
30
+ if __name__ == "__main__":
31
+ main()
@@ -0,0 +1,5 @@
1
+ from .document_loader import DocumentLoader
2
+ from .retriever import Retriever
3
+ from .vector_store import VectorStore
4
+
5
+ __all__ = ["DocumentLoader", "Retriever", "VectorStore"]
@@ -0,0 +1,61 @@
1
+ from typing import List, Dict, Any
2
+ from pathlib import Path
3
+
4
+ class DocumentLoader:
5
+ """
6
+ A class to load documents from various sources (e.g., files, URLs) into the knowledge base.
7
+ """
8
+
9
+ def __init__(self):
10
+ """
11
+ Initialize the DocumentLoader.
12
+ """
13
+ pass
14
+
15
+ def load_from_file(self, file_path: str) -> List[Dict[str, Any]]:
16
+ """
17
+ Load documents from a file.
18
+
19
+ Args:
20
+ file_path (str): The path to the file.
21
+
22
+ Returns:
23
+ List[Dict[str, Any]]: A list of documents, where each document is a dictionary.
24
+ """
25
+ file_path = Path(file_path)
26
+ if not file_path.exists():
27
+ raise FileNotFoundError(f"File not found: {file_path}")
28
+
29
+ # Example: Load a JSON file
30
+ if file_path.suffix == ".json":
31
+ import json
32
+ with open(file_path, "r") as f:
33
+ return json.load(f)
34
+ # Example: Load a text file
35
+ elif file_path.suffix == ".txt":
36
+ with open(file_path, "r") as f:
37
+ return [{"text": f.read()}]
38
+ else:
39
+ raise ValueError(f"Unsupported file type: {file_path.suffix}")
40
+
41
+ def load_from_url(self, url: str) -> List[Dict[str, Any]]:
42
+ """
43
+ Load documents from a URL.
44
+
45
+ Args:
46
+ url (str): The URL to load documents from.
47
+
48
+ Returns:
49
+ List[Dict[str, Any]]: A list of documents, where each document is a dictionary.
50
+ """
51
+ import requests
52
+ response = requests.get(url)
53
+ if response.status_code != 200:
54
+ raise ValueError(f"Failed to fetch data from URL: {url}")
55
+
56
+ # Example: Load JSON data from a URL
57
+ if "application/json" in response.headers.get("Content-Type", ""):
58
+ return response.json()
59
+ # Example: Load text data from a URL
60
+ else:
61
+ return [{"text": response.text}]
@@ -0,0 +1,41 @@
1
+ from typing import List, Dict
2
+ import numpy as np
3
+
4
+ class Retriever:
5
+ def __init__(self, vector_store):
6
+ self.vector_store = vector_store
7
+
8
+ def retrieve(self, query: str, k: int = 5) -> List[Dict]:
9
+ """
10
+ Retrieve relevant documents for a given query.
11
+
12
+ Args:
13
+ query (str): The query string.
14
+ k (int): Number of documents to retrieve.
15
+
16
+ Returns:
17
+ List[Dict]: List of relevant documents.
18
+ """
19
+ # Convert the query to an embedding (dummy implementation for now)
20
+ query_embedding = self._embed_query(query)
21
+
22
+ # Search the vector store for similar embeddings
23
+ indices = self.vector_store.search(query_embedding, k=k)
24
+
25
+ # Retrieve the documents (dummy implementation for now)
26
+ documents = [{"content": f"Document {i}", "score": 0.9} for i in indices]
27
+
28
+ return documents
29
+
30
+ def _embed_query(self, query: str) -> np.ndarray:
31
+ """
32
+ Convert a query string to an embedding.
33
+
34
+ Args:
35
+ query (str): The query string.
36
+
37
+ Returns:
38
+ np.ndarray: The query embedding.
39
+ """
40
+ # Dummy implementation: return a random embedding
41
+ return np.random.rand(768) # Assuming 768-dimensional embeddings
@@ -0,0 +1,35 @@
1
+ import numpy as np
2
+ import faiss
3
+
4
+ class VectorStore:
5
+ def __init__(self, dimension: int = 768):
6
+ """
7
+ Initialize a vector store.
8
+
9
+ Args:
10
+ dimension (int): Dimensionality of the embeddings.
11
+ """
12
+ self.index = faiss.IndexFlatL2(dimension)
13
+
14
+ def add_embeddings(self, embeddings: np.ndarray):
15
+ """
16
+ Add embeddings to the vector store.
17
+
18
+ Args:
19
+ embeddings (np.ndarray): Array of embeddings to add.
20
+ """
21
+ self.index.add(embeddings)
22
+
23
+ def search(self, query_embedding: np.ndarray, k: int = 5) -> np.ndarray:
24
+ """
25
+ Search for similar embeddings.
26
+
27
+ Args:
28
+ query_embedding (np.ndarray): The query embedding.
29
+ k (int): Number of nearest neighbors to retrieve.
30
+
31
+ Returns:
32
+ np.ndarray: Indices of the nearest neighbors.
33
+ """
34
+ distances, indices = self.index.search(query_embedding.reshape(1, -1), k)
35
+ return indices[0]
@@ -0,0 +1,17 @@
1
+ from .openai import OpenAILlm
2
+ from .anthropic import AnthropicLlm
3
+ # from .llama import LlamaLlm
4
+ from .groq import GroqLlm
5
+
6
+ def get_llm(provider: str, **kwargs):
7
+ provider = provider.lower() # Convert provider name to lowercase
8
+ if provider == "openai":
9
+ return OpenAILlm(**kwargs)
10
+ elif provider == "anthropic":
11
+ return AnthropicLlm(**kwargs)
12
+ # elif provider == "llama":
13
+ # return LlamaLlm(**kwargs)
14
+ elif provider == "groq":
15
+ return GroqLlm(**kwargs)
16
+ else:
17
+ raise ValueError(f"Unsupported LLM provider: {provider}")
@@ -0,0 +1,39 @@
1
+ from typing import List, Dict, Optional
2
+ from .base_llm import BaseLLM
3
+ import anthropic
4
+ import os
5
+
6
+ class AnthropicLlm(BaseLLM):
7
+ def __init__(
8
+ self,
9
+ model: str = "claude-2.1", # Default Anthropic model
10
+ api_key: Optional[str] = None,
11
+ ):
12
+ self.model = model
13
+ self.api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
14
+ if not self.api_key:
15
+ raise ValueError("Anthropic API key is required. Set ANTHROPIC_API_KEY environment variable or pass it explicitly.")
16
+ self.client = anthropic.Client(api_key=self.api_key)
17
+
18
+ def generate(
19
+ self,
20
+ prompt: str,
21
+ context: Optional[List[Dict]] = None,
22
+ memory: Optional[List[Dict]] = None,
23
+ ) -> str:
24
+ # Prepare messages for the Anthropic API
25
+ messages = []
26
+ if memory:
27
+ messages.extend(memory)
28
+ if context:
29
+ messages.append({"role": "system", "content": "Context: " + str(context)})
30
+ messages.append({"role": "user", "content": prompt})
31
+
32
+ # Call Anthropic API
33
+ response = self.client.completion(
34
+ model=self.model,
35
+ messages=messages,
36
+ )
37
+
38
+ # Extract and return the response
39
+ return response.choices[0].message.content
@@ -0,0 +1,12 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Dict, Optional
3
+
4
+ class BaseLLM(ABC):
5
+ @abstractmethod
6
+ def generate(
7
+ self,
8
+ prompt: str,
9
+ context: Optional[List[Dict]] = None,
10
+ memory: Optional[List[Dict]] = None,
11
+ ) -> str:
12
+ pass
semantio/llm/groq.py ADDED
@@ -0,0 +1,39 @@
1
+ from typing import List, Dict, Optional
2
+ from .base_llm import BaseLLM
3
+ import groq
4
+ import os
5
+
6
+ class GroqLlm(BaseLLM):
7
+ def __init__(
8
+ self,
9
+ model: str = "mixtral-8x7b-32768", # Default Groq model
10
+ api_key: Optional[str] = None,
11
+ ):
12
+ self.model = model
13
+ self.api_key = api_key or os.getenv("GROQ_API_KEY")
14
+ if not self.api_key:
15
+ raise ValueError("Groq API key is required. Set GROQ_API_KEY environment variable or pass it explicitly.")
16
+ self.client = groq.Client(api_key=self.api_key)
17
+
18
+ def generate(
19
+ self,
20
+ prompt: str,
21
+ context: Optional[List[Dict]] = None,
22
+ memory: Optional[List[Dict]] = None,
23
+ ) -> str:
24
+ # Prepare messages for the Groq API
25
+ messages = []
26
+ if memory:
27
+ messages.extend(memory)
28
+ if context:
29
+ messages.append({"role": "system", "content": "Context: " + str(context)})
30
+ messages.append({"role": "user", "content": prompt})
31
+
32
+ # Call Groq API
33
+ response = self.client.chat.completions.create(
34
+ model=self.model,
35
+ messages=messages,
36
+ )
37
+
38
+ # Extract and return the response
39
+ return response.choices[0].message.content
semantio/llm/llama.py ADDED
File without changes
semantio/llm/openai.py ADDED
@@ -0,0 +1,26 @@
1
+ from typing import List, Dict, Optional
2
+ from .base_llm import BaseLLM
3
+ import openai
4
+ import os
5
+
6
+ class OpenAILlm(BaseLLM):
7
+ def __init__(self, model: str = "gpt-4", api_key: Optional[str] = None):
8
+ self.model = model
9
+ self.api_key = api_key or os.getenv("OPENAI_API_KEY")
10
+ if not self.api_key:
11
+ raise ValueError("OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it explicitly.")
12
+ openai.api_key = self.api_key
13
+
14
+ def generate(self, prompt: str, context: Optional[List[Dict]] = None, memory: Optional[List[Dict]] = None) -> str:
15
+ messages = []
16
+ if memory:
17
+ messages.extend(memory)
18
+ if context:
19
+ messages.append({"role": "system", "content": "Context: " + str(context)})
20
+ messages.append({"role": "user", "content": prompt})
21
+
22
+ response = openai.ChatCompletion.create(
23
+ model=self.model,
24
+ messages=messages,
25
+ )
26
+ return response.choices[0].message["content"]
semantio/memory.py ADDED
@@ -0,0 +1,11 @@
1
+ from typing import List, Dict
2
+
3
+ class Memory:
4
+ def __init__(self):
5
+ self.history = []
6
+
7
+ def add_message(self, role: str, content: str):
8
+ self.history.append({"role": role, "content": content})
9
+
10
+ def get_history(self) -> List[Dict]:
11
+ return self.history
semantio/rag.py ADDED
@@ -0,0 +1,18 @@
1
+ from typing import List, Dict
2
+ from .knowledge_base.retriever import Retriever
3
+
4
+ class RAG:
5
+ def __init__(self, retriever: Retriever):
6
+ self.retriever = retriever
7
+
8
+ def retrieve(self, query: str) -> List[Dict]:
9
+ """
10
+ Retrieve relevant context for a given query.
11
+
12
+ Args:
13
+ query (str): The query string.
14
+
15
+ Returns:
16
+ List[Dict]: List of relevant documents.
17
+ """
18
+ return self.retriever.retrieve(query)
File without changes
File without changes
File without changes
File without changes
@@ -0,0 +1,12 @@
1
+ # base_tool.py
2
+ from typing import Dict, Any, Optional
3
+ from pydantic import BaseModel, Field
4
+
5
+ class BaseTool(BaseModel):
6
+ name: str = Field(..., description="The name of the tool.")
7
+ description: str = Field(..., description="A brief description of the tool's functionality.")
8
+ llm: Optional[Any] = Field(None, description="The LLM instance to use for tool execution.")
9
+
10
+ def execute(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
11
+ """Execute the tool's functionality."""
12
+ raise NotImplementedError("Subclasses must implement this method.")
@@ -0,0 +1,133 @@
1
+ from .base_tool import BaseTool
2
+ import yfinance as yf
3
+ from typing import Dict, Any, List, Optional
4
+ from pydantic import Field
5
+ import logging
6
+ import re
7
+
8
+ # Configure logging
9
+ logging.basicConfig(level=logging.INFO)
10
+ logger = logging.getLogger(__name__)
11
+
12
+ class CryptoPriceChecker(BaseTool):
13
+ default_symbol: str = Field(default="BTC-USD", description="Default cryptocurrency symbol (e.g., 'BTC-USD').")
14
+ enable_historical_data: bool = Field(default=True, description="Enable fetching historical data.")
15
+ enable_multiple_symbols: bool = Field(default=True, description="Enable fetching data for multiple cryptocurrencies.")
16
+ enable_metrics: bool = Field(default=True, description="Enable additional metrics like volume and market cap.")
17
+ llm: Optional[Any] = Field(None, description="The LLM instance to use for symbol extraction.")
18
+
19
+ def __init__(self, **kwargs):
20
+ super().__init__(
21
+ name="CryptoPriceChecker",
22
+ description="Fetch real-time and historical cryptocurrency prices and metrics.",
23
+ **kwargs
24
+ )
25
+
26
+ def execute(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
27
+ """
28
+ Execute the cryptocurrency price check based on the provided input.
29
+
30
+ Args:
31
+ input_data (Dict[str, Any]): Input data containing the query and optional parameters.
32
+
33
+ Returns:
34
+ Dict[str, Any]: Cryptocurrency price and additional metrics.
35
+ """
36
+ try:
37
+ query = input_data.get("query", "")
38
+ symbols = self._extract_symbols(query) if query else [self.default_symbol]
39
+ time_range = input_data.get("time_range", "1d") # Default to 1 day
40
+ metrics = input_data.get("metrics", ["price"]) # Default to price only
41
+
42
+ results = {}
43
+
44
+ for symbol in symbols:
45
+ # Validate the symbol
46
+ if not self._is_valid_symbol(symbol):
47
+ logger.warning(f"Invalid cryptocurrency symbol: {symbol}")
48
+ results[symbol] = {"error": "Invalid symbol"}
49
+ continue
50
+
51
+ try:
52
+ crypto = yf.Ticker(symbol)
53
+ data = {}
54
+
55
+ # Fetch real-time price
56
+ if "price" in metrics:
57
+ history = crypto.history(period=time_range)
58
+ if not history.empty:
59
+ data["price"] = history["Close"].iloc[-1]
60
+ else:
61
+ data["price"] = "No data available"
62
+
63
+ # Fetch historical data
64
+ if self.enable_historical_data and "history" in metrics:
65
+ history = crypto.history(period=time_range)
66
+ if not history.empty:
67
+ data["history"] = history.to_dict()
68
+ else:
69
+ data["history"] = "No historical data available"
70
+
71
+ # Fetch additional metrics
72
+ if self.enable_metrics:
73
+ info = crypto.info
74
+ if "volume" in metrics:
75
+ data["volume"] = info.get("volume24Hr", "No volume data available")
76
+ if "market_cap" in metrics:
77
+ data["market_cap"] = info.get("marketCap", "No market cap data available")
78
+
79
+ results[symbol] = data
80
+ except Exception as e:
81
+ results[symbol] = {"error": str(e)}
82
+
83
+ return results
84
+ except Exception as e:
85
+ return {"error": str(e)}
86
+
87
+ def _extract_symbols(self, query: str) -> List[str]:
88
+ """
89
+ Use the LLM to extract cryptocurrency symbols from the user's query.
90
+ """
91
+ if not self.llm:
92
+ logger.error("LLM instance not available for symbol extraction.")
93
+ return []
94
+
95
+ # Create a prompt for the LLM
96
+ prompt = f"""
97
+ Extract the cryptocurrency symbols from the following user query.
98
+ Return the symbols as a comma-separated list in the format CRYPTO-FIAT (e.g., "BTC-USD,ETH-USD").
99
+ If no symbols are found, return "None".
100
+
101
+ User Query: "{query}"
102
+ """
103
+
104
+ try:
105
+ # Call the LLM to generate the response
106
+ response = self.llm.generate(prompt=prompt)
107
+ logger.info(f"LLM response: {response}")
108
+ symbols = response.strip().replace('"', '').replace("'", "")
109
+
110
+ # Fallback: Extract symbols from verbose responses
111
+ if ":" in symbols or "\n" in symbols:
112
+ # Look for patterns like "BTC-USD,ETH-USD" in the response
113
+ symbol_pattern = re.compile(r"\b[A-Z]{2,5}-[A-Z]{2,5}\b")
114
+ symbols = ",".join(symbol_pattern.findall(symbols))
115
+ logger.info(f"Extracted symbols: {symbols}")
116
+
117
+ # Parse the response into a list of symbols
118
+ if symbols.lower() == "none":
119
+ return []
120
+ return [s.strip() for s in symbols.split(",")]
121
+ except Exception as e:
122
+ logger.error(f"Failed to extract symbols: {e}")
123
+ return []
124
+
125
+ def _is_valid_symbol(self, symbol: str) -> bool:
126
+ """
127
+ Validate if the symbol is a valid cryptocurrency symbol.
128
+ """
129
+ # Check if the symbol follows the CRYPTO-FIAT format (e.g., BTC-USD, ETH-USD)
130
+ if "-" not in symbol:
131
+ return False
132
+ crypto, fiat = symbol.split("-")
133
+ return bool(crypto.strip()) and bool(fiat.strip())