raw-agent 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,10 @@
1
+ # Python-generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # Virtual environments
10
+ .venv
@@ -0,0 +1 @@
1
+ 3.14
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Siddharth
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,179 @@
1
+ Metadata-Version: 2.4
2
+ Name: raw-agent
3
+ Version: 0.1.0
4
+ Summary: A powerful and flexible LLM interface library for Gemini and other models.
5
+ Project-URL: Homepage, https://github.com/siddharth200119/RAW
6
+ Project-URL: Repository, https://github.com/siddharth200119/RAW
7
+ Project-URL: Issues, https://github.com/siddharth200119/RAW/issues
8
+ Author-email: Siddharth <siddharth@example.com>
9
+ License-Expression: MIT
10
+ License-File: LICENSE
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Operating System :: OS Independent
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.14
17
+ Requires-Python: >=3.14
18
+ Requires-Dist: httpx>=0.28.1
19
+ Requires-Dist: jsonschema>=4.26.0
20
+ Requires-Dist: numpy>=2.0.0
21
+ Requires-Dist: opencv-python>=4.13.0.92
22
+ Requires-Dist: pydantic>=2.12.5
23
+ Description-Content-Type: text/markdown
24
+
25
+ # RAW Project
26
+
27
+ A powerful and flexible LLM interface library for Gemini and other models, designed for modern Python applications, featuring a robust Agent framework.
28
+
29
+ ## Features
30
+
31
+ - **Gemini Integration**: Seamless support for Google's Gemini models (default: `gemini-1.5-flash`).
32
+ - **Unified Interface**: Clean, consistent API for chat, completion, and tool usage.
33
+ - **Agent Framework**: Build autonomous agents with tool-calling capabilities.
34
+ - **Async Support**: Fully asynchronous implementation using `httpx` and `asyncio`.
35
+ - **Tool Calling**: Easy-to-use function calling/tool usage with schema generation.
36
+ - **Streaming**: Native support for streaming responses.
37
+
38
+ ## Installation
39
+
40
+ ```bash
41
+ pip install raw
42
+ ```
43
+
44
+ or with `uv`:
45
+
46
+ ```bash
47
+ uv add raw
48
+ ```
49
+
50
+ ## Quick Start / Documentation
51
+
52
+ ### 1. Using LLMs
53
+
54
+ The project supports LLMs via the `BaseLLM` interface. The primary implementation provided is `GeminiLLM`.
55
+
56
+ #### Initialization
57
+
58
+ To use `GeminiLLM`, you need a Google Gemini API key.
59
+
60
+ ```python
61
+ import os
62
+ from RAW.llms.gemini import GeminiLLM
63
+ from RAW.utils import Logger
64
+
65
+ # Check/Set API Key
66
+ api_key = os.environ.get("GEMINI_API_KEY")
67
+ if not api_key:
68
+ raise ValueError("GEMINI_API_KEY not set")
69
+
70
+ # Optional: Configure Logger
71
+ logger = Logger("MyLLM")
72
+
73
+ # Initialize
74
+ # Default model: gemini-1.5-flash (configurable via model argument)
75
+ llm = GeminiLLM(api_key=api_key, logger=logger, model="gemini-1.5-flash")
76
+ ```
77
+
78
+ #### Basic Chat
79
+
80
+ You can interact with the LLM using the `chat` method, which accepts a list of `Message` objects.
81
+
82
+ ```python
83
+ from RAW.modals import Message
84
+
85
+ messages = [
86
+ Message(role="user", content="Hello, who are you?")
87
+ ]
88
+
89
+ # Non-streaming
90
+ response = await llm.chat(messages=messages)
91
+ print(response.content)
92
+
93
+ # Streaming
94
+ async for chunk in await llm.chat(messages=messages, stream=True):
95
+ print(chunk.content)
96
+ ```
97
+
98
+ ### 2. Making Tools
99
+
100
+ Tools allow the Agent to perform actions or retrieve information. A `Tool` consists of a name, description, parameters, and a python function.
101
+
102
+ #### Define the Function
103
+
104
+ Create an async function that performs the desired action.
105
+
106
+ ```python
107
+ async def get_weather(location: str):
108
+ """Fetches weather for a given location."""
109
+ # Your logic here (e.g., API call)
110
+ return f"The weather in {location} is sunny."
111
+ ```
112
+
113
+ #### Define the Tool Definition
114
+
115
+ Wrap the function in a `Tool` object, specifying its schema using `ToolParam`.
116
+
117
+ ```python
118
+ from RAW.modals import Tool
119
+ from RAW.modals.tools import ToolParam
120
+
121
+ weather_tool = Tool(
122
+ name="get_weather",
123
+ description="Get the weather for a specific location",
124
+ parameters=[
125
+ ToolParam(
126
+ name="location",
127
+ type="string",
128
+ description="City and State, e.g. New York, NY",
129
+ required=True
130
+ )
131
+ ],
132
+ function=get_weather
133
+ )
134
+ ```
135
+
136
+ ### 3. Making an Agent
137
+
138
+ The `Agent` orchestrates the interaction between the LLM, Tools, and the User. It manages the conversation history and tool execution loop.
139
+
140
+ #### Initialization
141
+
142
+ Combine the LLM and Tools into an Agent.
143
+
144
+ ```python
145
+ from RAW.agent import Agent
146
+
147
+ agent = Agent(
148
+ name="WeatherBot",
149
+ base_prompt="You are a helpful assistant that can check the weather.",
150
+ tools=[weather_tool], # List of Tool objects
151
+ llm=llm, # The LLM instance
152
+ logger=logger # Optional logger
153
+ )
154
+ ```
155
+
156
+ #### Running the Agent
157
+
158
+ The agent is callable. You can run it in a loop to handle user input.
159
+
160
+ ```python
161
+ user_input = "What is the weather in London?"
162
+
163
+ # The agent returns a generator yielding chunks of the response
164
+ async for chunk in agent(user_input, stream=True):
165
+ # 'chunk' can be a dictionary containing content or tool execution status
166
+ if isinstance(chunk, dict) and 'content' in chunk:
167
+ content_obj = chunk['content']
168
+ # content_obj might be a Message dump or a string
169
+ if isinstance(content_obj, dict) and 'content' in content_obj:
170
+ print(content_obj['content'], end="", flush=True)
171
+ ```
172
+
173
+ ### Full Example
174
+
175
+ See `main.py` in the project root for a complete, runnable example of a Chatbot Agent.
176
+
177
+ ## License
178
+
179
+ MIT
@@ -0,0 +1,155 @@
1
+ # RAW Project
2
+
3
+ A powerful and flexible LLM interface library for Gemini and other models, designed for modern Python applications, featuring a robust Agent framework.
4
+
5
+ ## Features
6
+
7
+ - **Gemini Integration**: Seamless support for Google's Gemini models (default: `gemini-1.5-flash`).
8
+ - **Unified Interface**: Clean, consistent API for chat, completion, and tool usage.
9
+ - **Agent Framework**: Build autonomous agents with tool-calling capabilities.
10
+ - **Async Support**: Fully asynchronous implementation using `httpx` and `asyncio`.
11
+ - **Tool Calling**: Easy-to-use function calling/tool usage with schema generation.
12
+ - **Streaming**: Native support for streaming responses.
13
+
14
+ ## Installation
15
+
16
+ ```bash
17
+ pip install raw
18
+ ```
19
+
20
+ or with `uv`:
21
+
22
+ ```bash
23
+ uv add raw
24
+ ```
25
+
26
+ ## Quick Start / Documentation
27
+
28
+ ### 1. Using LLMs
29
+
30
+ The project supports LLMs via the `BaseLLM` interface. The primary implementation provided is `GeminiLLM`.
31
+
32
+ #### Initialization
33
+
34
+ To use `GeminiLLM`, you need a Google Gemini API key.
35
+
36
+ ```python
37
+ import os
38
+ from RAW.llms.gemini import GeminiLLM
39
+ from RAW.utils import Logger
40
+
41
+ # Check/Set API Key
42
+ api_key = os.environ.get("GEMINI_API_KEY")
43
+ if not api_key:
44
+ raise ValueError("GEMINI_API_KEY not set")
45
+
46
+ # Optional: Configure Logger
47
+ logger = Logger("MyLLM")
48
+
49
+ # Initialize
50
+ # Default model: gemini-1.5-flash (configurable via model argument)
51
+ llm = GeminiLLM(api_key=api_key, logger=logger, model="gemini-1.5-flash")
52
+ ```
53
+
54
+ #### Basic Chat
55
+
56
+ You can interact with the LLM using the `chat` method, which accepts a list of `Message` objects.
57
+
58
+ ```python
59
+ from RAW.modals import Message
60
+
61
+ messages = [
62
+ Message(role="user", content="Hello, who are you?")
63
+ ]
64
+
65
+ # Non-streaming
66
+ response = await llm.chat(messages=messages)
67
+ print(response.content)
68
+
69
+ # Streaming
70
+ async for chunk in await llm.chat(messages=messages, stream=True):
71
+ print(chunk.content)
72
+ ```
73
+
74
+ ### 2. Making Tools
75
+
76
+ Tools allow the Agent to perform actions or retrieve information. A `Tool` consists of a name, description, parameters, and a python function.
77
+
78
+ #### Define the Function
79
+
80
+ Create an async function that performs the desired action.
81
+
82
+ ```python
83
+ async def get_weather(location: str):
84
+ """Fetches weather for a given location."""
85
+ # Your logic here (e.g., API call)
86
+ return f"The weather in {location} is sunny."
87
+ ```
88
+
89
+ #### Define the Tool Definition
90
+
91
+ Wrap the function in a `Tool` object, specifying its schema using `ToolParam`.
92
+
93
+ ```python
94
+ from RAW.modals import Tool
95
+ from RAW.modals.tools import ToolParam
96
+
97
+ weather_tool = Tool(
98
+ name="get_weather",
99
+ description="Get the weather for a specific location",
100
+ parameters=[
101
+ ToolParam(
102
+ name="location",
103
+ type="string",
104
+ description="City and State, e.g. New York, NY",
105
+ required=True
106
+ )
107
+ ],
108
+ function=get_weather
109
+ )
110
+ ```
111
+
112
+ ### 3. Making an Agent
113
+
114
+ The `Agent` orchestrates the interaction between the LLM, Tools, and the User. It manages the conversation history and tool execution loop.
115
+
116
+ #### Initialization
117
+
118
+ Combine the LLM and Tools into an Agent.
119
+
120
+ ```python
121
+ from RAW.agent import Agent
122
+
123
+ agent = Agent(
124
+ name="WeatherBot",
125
+ base_prompt="You are a helpful assistant that can check the weather.",
126
+ tools=[weather_tool], # List of Tool objects
127
+ llm=llm, # The LLM instance
128
+ logger=logger # Optional logger
129
+ )
130
+ ```
131
+
132
+ #### Running the Agent
133
+
134
+ The agent is callable. You can run it in a loop to handle user input.
135
+
136
+ ```python
137
+ user_input = "What is the weather in London?"
138
+
139
+ # The agent returns a generator yielding chunks of the response
140
+ async for chunk in agent(user_input, stream=True):
141
+ # 'chunk' can be a dictionary containing content or tool execution status
142
+ if isinstance(chunk, dict) and 'content' in chunk:
143
+ content_obj = chunk['content']
144
+ # content_obj might be a Message dump or a string
145
+ if isinstance(content_obj, dict) and 'content' in content_obj:
146
+ print(content_obj['content'], end="", flush=True)
147
+ ```
148
+
149
+ ### Full Example
150
+
151
+ See `main.py` in the project root for a complete, runnable example of a Chatbot Agent.
152
+
153
+ ## License
154
+
155
+ MIT
@@ -0,0 +1,102 @@
1
+ import asyncio
2
+ import os
3
+ from RAW.agent import Agent
4
+ from RAW.llms.gemini import GeminiLLM
5
+ from RAW.modals import Tool
6
+ from RAW.modals.tools import ToolParam
7
+ from RAW.utils import Logger
8
+
9
+ # Example tool
10
+ async def get_weather(location: str):
11
+ """Get the weather for a location."""
12
+ # Mock weather data
13
+ return f"The weather in {location} is sunny with a temperature of 25°C."
14
+
15
+ async def main():
16
+ api_key = os.environ.get("GEMINI_API_KEY")
17
+ if not api_key:
18
+ print("Please set GEMINI_API_KEY environment variable.")
19
+ return
20
+
21
+ import logging
22
+ log_level_str = os.environ.get("LOG_LEVEL", "INFO").upper()
23
+ log_level = getattr(logging, log_level_str, logging.INFO)
24
+ logger = Logger("Chatbot", level=log_level)
25
+
26
+ # Initialize LLM
27
+ model_name = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash-lite")
28
+ llm = GeminiLLM(api_key=api_key, logger=logger, model=model_name)
29
+
30
+ # Initialize Tools
31
+ weather_tool = Tool(
32
+ name="get_weather",
33
+ description="Get the weather for a specific location",
34
+ parameters=[
35
+ ToolParam(name="location", type="string", description="The city and state, e.g. San Francisco, CA", required=True)
36
+ ],
37
+ function=get_weather
38
+ )
39
+
40
+ # Initialize Agent
41
+ agent = Agent(
42
+ name="WeatherBot",
43
+ base_prompt="You are a helpful weather assistant.",
44
+ tools=[weather_tool],
45
+ llm=llm,
46
+ logger=logger
47
+ )
48
+
49
+ print("Chatbot started! Type 'exit' to quit.")
50
+
51
+ while True:
52
+ user_input = input("You: ")
53
+ if user_input.lower() in ["exit", "quit"]:
54
+ break
55
+
56
+ print("Bot: ", end="", flush=True)
57
+ async for chunk in agent(user_input, stream=True):
58
+ if isinstance(chunk, dict):
59
+ if "content" in chunk:
60
+ content = chunk["content"]
61
+ if isinstance(content, dict): # Message object dumped
62
+ # Skip full message dumps, we want the stream chunks if possible or just final text
63
+ pass
64
+ elif isinstance(content, str): # Tool response or error
65
+ # print(content) # Maybe don't print tool output directly to user
66
+ pass
67
+ else:
68
+ pass # String chunks ? Agent yield dicts.
69
+
70
+ # Agent yields:
71
+ # {"agent_name": name, "content": chunk} where chunk is Message.dump() or str (tool response)
72
+ # wait, agent.py yields:
73
+ # 1. {"agent_name": self.name, 'content': chunk.model_dump()} (if Message)
74
+ # 2. {"agent_name": self.name, 'content': chunk} (if str - tool response)
75
+
76
+ # Streaming from LLM yields Message objects with partial content?
77
+ # execute_stream yields Message objects constructed from chunks.
78
+ # Actually execute_stream yields:
79
+ # 1. Message (partial content)
80
+ # 2. Dict (tool call)
81
+ # 3. Dict (tool response)
82
+
83
+ # Agent.__call__ wraps these:
84
+ # If chunk is Message -> yields dict with content=chunk.dump()
85
+ # If chunk is other -> yields dict with content=chunk
86
+
87
+ # So if we want to print the token stream:
88
+ # The Agent logic in execute_stream accumulates content and yields a Message for each chunk?
89
+ # No.
90
+ # `yield Message(role="assistant", content=response.content ...)`
91
+ # So it yields a Message object for EACH chunk of text.
92
+
93
+ if isinstance(chunk, dict) and "content" in chunk:
94
+ content_obj = chunk["content"]
95
+ if isinstance(content_obj, dict) and "content" in content_obj and content_obj["content"]:
96
+ # It's a message dump
97
+ print(content_obj["content"], end="", flush=True)
98
+
99
+ print() # Newline after response
100
+
101
+ if __name__ == "__main__":
102
+ asyncio.run(main())
@@ -0,0 +1,48 @@
1
+ [project]
2
+ name = "raw-agent"
3
+ version = "0.1.0"
4
+ description = "A powerful and flexible LLM interface library for Gemini and other models."
5
+ readme = "README.md"
6
+ requires-python = ">=3.14"
7
+ authors = [
8
+ { name = "Siddharth", email = "siddharth@example.com" }
9
+ ]
10
+ license = "MIT"
11
+ classifiers = [
12
+ "Development Status :: 4 - Beta",
13
+ "Intended Audience :: Developers",
14
+ "License :: OSI Approved :: MIT License",
15
+ "Programming Language :: Python :: 3",
16
+ "Programming Language :: Python :: 3.14",
17
+ "Operating System :: OS Independent",
18
+ ]
19
+ dependencies = [
20
+ "httpx>=0.28.1",
21
+ "jsonschema>=4.26.0",
22
+ "opencv-python>=4.13.0.92",
23
+ "pydantic>=2.12.5",
24
+ "numpy>=2.0.0",
25
+ ]
26
+
27
+ [project.urls]
28
+ Homepage = "https://github.com/siddharth200119/RAW"
29
+ Repository = "https://github.com/siddharth200119/RAW"
30
+ Issues = "https://github.com/siddharth200119/RAW/issues"
31
+
32
+ [build-system]
33
+ requires = ["hatchling"]
34
+ build-backend = "hatchling.build"
35
+
36
+ [tool.hatch.build.targets.wheel]
37
+ packages = ["src/RAW"]
38
+ [dependency-groups]
39
+ dev = [
40
+ "mypy>=1.19.1",
41
+ "pytest>=9.0.2",
42
+ "ruff>=0.15.0",
43
+ "twine>=6.2.0",
44
+ "pytest-asyncio>=0.23.0",
45
+ ]
46
+ [tool.pytest.ini_options]
47
+ pythonpath = [".", "src"]
48
+ asyncio_mode = "auto"
@@ -0,0 +1 @@
1
+ __version__ = "0.1.0"