sonika-langchain-bot 0.0.10__tar.gz → 0.0.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. sonika_langchain_bot-0.0.12/PKG-INFO +215 -0
  2. sonika_langchain_bot-0.0.12/README.md +174 -0
  3. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/setup.py +17 -17
  4. sonika_langchain_bot-0.0.12/src/sonika_langchain_bot/langchain_bot_agent.py +722 -0
  5. sonika_langchain_bot-0.0.12/src/sonika_langchain_bot.egg-info/PKG-INFO +215 -0
  6. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/src/sonika_langchain_bot.egg-info/SOURCES.txt +1 -2
  7. sonika_langchain_bot-0.0.12/src/sonika_langchain_bot.egg-info/requires.txt +19 -0
  8. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/test/test.py +11 -19
  9. sonika_langchain_bot-0.0.10/PKG-INFO +0 -145
  10. sonika_langchain_bot-0.0.10/README.md +0 -113
  11. sonika_langchain_bot-0.0.10/src/sonika_langchain_bot/langchain_bdi.py +0 -176
  12. sonika_langchain_bot-0.0.10/src/sonika_langchain_bot/langchain_bot_agent_bdi.py +0 -216
  13. sonika_langchain_bot-0.0.10/src/sonika_langchain_bot.egg-info/PKG-INFO +0 -145
  14. sonika_langchain_bot-0.0.10/src/sonika_langchain_bot.egg-info/requires.txt +0 -20
  15. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/LICENSE +0 -0
  16. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/setup.cfg +0 -0
  17. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/src/sonika_langchain_bot/__init__.py +0 -0
  18. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/src/sonika_langchain_bot/langchain_clasificator.py +0 -0
  19. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/src/sonika_langchain_bot/langchain_class.py +0 -0
  20. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/src/sonika_langchain_bot/langchain_files.py +0 -0
  21. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/src/sonika_langchain_bot/langchain_models.py +0 -0
  22. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/src/sonika_langchain_bot/langchain_tools.py +0 -0
  23. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/src/sonika_langchain_bot.egg-info/dependency_links.txt +0 -0
  24. {sonika_langchain_bot-0.0.10 → sonika_langchain_bot-0.0.12}/src/sonika_langchain_bot.egg-info/top_level.txt +0 -0
@@ -0,0 +1,215 @@
1
+ Metadata-Version: 2.4
2
+ Name: sonika-langchain-bot
3
+ Version: 0.0.12
4
+ Summary: Agente langchain con LLM
5
+ Author: Erley Blanco Carvajal
6
+ License: MIT License
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.6
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
13
+ Requires-Dist: langchain-mcp-adapters==0.1.9
14
+ Requires-Dist: langchain-community==0.3.26
15
+ Requires-Dist: langchain-core==0.3.66
16
+ Requires-Dist: langchain-openai==0.3.24
17
+ Requires-Dist: langgraph==0.4.8
18
+ Requires-Dist: langgraph-checkpoint==2.1.0
19
+ Requires-Dist: langgraph-sdk==0.1.70
20
+ Requires-Dist: dataclasses-json==0.6.7
21
+ Requires-Dist: python-dateutil==2.9.0.post0
22
+ Requires-Dist: pydantic==2.11.7
23
+ Requires-Dist: faiss-cpu==1.11.0
24
+ Requires-Dist: pypdf==5.6.1
25
+ Requires-Dist: python-dotenv==1.0.1
26
+ Requires-Dist: typing_extensions==4.14.0
27
+ Requires-Dist: typing-inspect==0.9.0
28
+ Provides-Extra: dev
29
+ Requires-Dist: sphinx<9.0.0,>=8.1.3; extra == "dev"
30
+ Requires-Dist: sphinx-rtd-theme<4.0.0,>=3.0.1; extra == "dev"
31
+ Dynamic: author
32
+ Dynamic: classifier
33
+ Dynamic: description
34
+ Dynamic: description-content-type
35
+ Dynamic: license
36
+ Dynamic: license-file
37
+ Dynamic: provides-extra
38
+ Dynamic: requires-dist
39
+ Dynamic: requires-python
40
+ Dynamic: summary
41
+
42
+ # Sonika LangChain Bot <a href="https://pepy.tech/projects/sonika-langchain-bot"><img src="https://static.pepy.tech/badge/sonika-langchain-bot" alt="PyPI Downloads"></a>
43
+
44
+ A Python library that implements a conversational agent using LangChain with tool execution capabilities and text classification.
45
+
46
+ ## Installation
47
+
48
+ ```bash
49
+ pip install sonika-langchain-bot
50
+ ```
51
+
52
+ ## Prerequisites
53
+
54
+ You'll need the following API keys:
55
+
56
+ - OpenAI API Key
57
+
58
+ Create a `.env` file in the root of your project with the following variables:
59
+
60
+ ```env
61
+ OPENAI_API_KEY=your_api_key_here
62
+ ```
63
+
64
+ ## Key Features
65
+
66
+ - Conversational agent with tool execution capabilities
67
+ - Text classification with structured output
68
+ - Custom tool integration
69
+ - Streaming responses
70
+ - Conversation history management
71
+ - Flexible instruction-based behavior
72
+
73
+ ## Basic Usage
74
+
75
+ ### Agent with Tools Example
76
+
77
+ ```python
78
+ import os
79
+ from dotenv import load_dotenv
80
+ from langchain_openai import OpenAIEmbeddings
81
+ from sonika_langchain_bot.langchain_tools import EmailTool
82
+ from sonika_langchain_bot.langchain_bot_agent import LangChainBot
83
+ from sonika_langchain_bot.langchain_class import Message, ResponseModel
84
+ from sonika_langchain_bot.langchain_models import OpenAILanguageModel
85
+
86
+ # Load environment variables
87
+ load_dotenv()
88
+
89
+ # Get API key from .env file
90
+ api_key = os.getenv("OPENAI_API_KEY")
91
+
92
+ # Initialize language model and embeddings
93
+ language_model = OpenAILanguageModel(api_key, model_name='gpt-4o-mini-2024-07-18', temperature=1)
94
+ embeddings = OpenAIEmbeddings(api_key=api_key)
95
+
96
+ # Configure tools
97
+ tools = [EmailTool()]
98
+
99
+ # Create agent instance
100
+ bot = LangChainBot(language_model, embeddings, instructions="You are an agent", tools=tools)
101
+
102
+ # Load conversation history
103
+ bot.load_conversation_history([Message(content="My name is Erley", is_bot=False)])
104
+
105
+ # Get response
106
+ user_message = 'Send an email with the tool to erley@gmail.com with subject Hello and message Hello Erley'
107
+ response_model: ResponseModel = bot.get_response(user_message)
108
+
109
+ print(response_model)
110
+ ```
111
+
112
+ ### Streaming Response Example
113
+
114
+ ```python
115
+ import os
116
+ from dotenv import load_dotenv
117
+ from langchain_openai import OpenAIEmbeddings
118
+ from sonika_langchain_bot.langchain_bot_agent import LangChainBot
119
+ from sonika_langchain_bot.langchain_class import Message
120
+ from sonika_langchain_bot.langchain_models import OpenAILanguageModel
121
+
122
+ # Load environment variables
123
+ load_dotenv()
124
+
125
+ # Get API key from .env file
126
+ api_key = os.getenv("OPENAI_API_KEY")
127
+
128
+ # Initialize language model and embeddings
129
+ language_model = OpenAILanguageModel(api_key, model_name='gpt-4o-mini-2024-07-18', temperature=1)
130
+ embeddings = OpenAIEmbeddings(api_key=api_key)
131
+
132
+ # Create agent instance
133
+ bot = LangChainBot(language_model, embeddings, instructions="Only answers in english", tools=[])
134
+
135
+ # Load conversation history
136
+ bot.load_conversation_history([Message(content="My name is Erley", is_bot=False)])
137
+
138
+ # Get streaming response
139
+ user_message = 'Hello, what is my name?'
140
+ for chunk in bot.get_response_stream(user_message):
141
+ print(chunk)
142
+ ```
143
+
144
+ ### Text Classification Example
145
+
146
+ ```python
147
+ import os
148
+ from dotenv import load_dotenv
149
+ from sonika_langchain_bot.langchain_clasificator import TextClassifier
150
+ from sonika_langchain_bot.langchain_models import OpenAILanguageModel
151
+ from pydantic import BaseModel, Field
152
+
153
+ # Load environment variables
154
+ load_dotenv()
155
+
156
+ # Define classification structure with Pydantic
157
+ class Classification(BaseModel):
158
+ intention: str = Field()
159
+ sentiment: str = Field(..., enum=["happy", "neutral", "sad", "excited"])
160
+ aggressiveness: int = Field(
161
+ ...,
162
+ description="describes how aggressive the statement is, the higher the number the more aggressive",
163
+ enum=[1, 2, 3, 4, 5],
164
+ )
165
+ language: str = Field(
166
+ ..., enum=["spanish", "english", "french", "german", "italian"]
167
+ )
168
+
169
+ # Initialize classifier
170
+ api_key = os.getenv("OPENAI_API_KEY")
171
+ model = OpenAILanguageModel(api_key=api_key)
172
+ classifier = TextClassifier(llm=model, validation_class=Classification)
173
+
174
+ # Classify text
175
+ result = classifier.classify("how are you?")
176
+ print(result)
177
+ ```
178
+
179
+ ## Available Classes and Components
180
+
181
+ ### Core Classes
182
+
183
+ - **LangChainBot**: Main conversational agent for task execution with tools
184
+ - **OpenAILanguageModel**: Wrapper for OpenAI language models
185
+ - **TextClassifier**: Text classification using structured output
186
+ - **Message**: Message structure for conversation history
187
+ - **ResponseModel**: Response structure from agent interactions
188
+
189
+ ### Tools
190
+
191
+ - **EmailTool**: Tool for sending emails through the agent
192
+
193
+ ## Project Structure
194
+
195
+ ```
196
+ your_project/
197
+ ├── .env # Environment variables
198
+ ├── src/
199
+ │ └── sonika_langchain_bot/
200
+ │ ├── langchain_bot_agent.py
201
+ │ ├── langchain_clasificator.py
202
+ │ ├── langchain_class.py
203
+ │ ├── langchain_models.py
204
+ │ └── langchain_tools.py
205
+ └── tests/
206
+ └── test_bot.py
207
+ ```
208
+
209
+ ## Contributing
210
+
211
+ Contributions are welcome. Please open an issue to discuss major changes you'd like to make.
212
+
213
+ ## License
214
+
215
+ This project is licensed under the MIT License.
@@ -0,0 +1,174 @@
1
+ # Sonika LangChain Bot <a href="https://pepy.tech/projects/sonika-langchain-bot"><img src="https://static.pepy.tech/badge/sonika-langchain-bot" alt="PyPI Downloads"></a>
2
+
3
+ A Python library that implements a conversational agent using LangChain with tool execution capabilities and text classification.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pip install sonika-langchain-bot
9
+ ```
10
+
11
+ ## Prerequisites
12
+
13
+ You'll need the following API keys:
14
+
15
+ - OpenAI API Key
16
+
17
+ Create a `.env` file in the root of your project with the following variables:
18
+
19
+ ```env
20
+ OPENAI_API_KEY=your_api_key_here
21
+ ```
22
+
23
+ ## Key Features
24
+
25
+ - Conversational agent with tool execution capabilities
26
+ - Text classification with structured output
27
+ - Custom tool integration
28
+ - Streaming responses
29
+ - Conversation history management
30
+ - Flexible instruction-based behavior
31
+
32
+ ## Basic Usage
33
+
34
+ ### Agent with Tools Example
35
+
36
+ ```python
37
+ import os
38
+ from dotenv import load_dotenv
39
+ from langchain_openai import OpenAIEmbeddings
40
+ from sonika_langchain_bot.langchain_tools import EmailTool
41
+ from sonika_langchain_bot.langchain_bot_agent import LangChainBot
42
+ from sonika_langchain_bot.langchain_class import Message, ResponseModel
43
+ from sonika_langchain_bot.langchain_models import OpenAILanguageModel
44
+
45
+ # Load environment variables
46
+ load_dotenv()
47
+
48
+ # Get API key from .env file
49
+ api_key = os.getenv("OPENAI_API_KEY")
50
+
51
+ # Initialize language model and embeddings
52
+ language_model = OpenAILanguageModel(api_key, model_name='gpt-4o-mini-2024-07-18', temperature=1)
53
+ embeddings = OpenAIEmbeddings(api_key=api_key)
54
+
55
+ # Configure tools
56
+ tools = [EmailTool()]
57
+
58
+ # Create agent instance
59
+ bot = LangChainBot(language_model, embeddings, instructions="You are an agent", tools=tools)
60
+
61
+ # Load conversation history
62
+ bot.load_conversation_history([Message(content="My name is Erley", is_bot=False)])
63
+
64
+ # Get response
65
+ user_message = 'Send an email with the tool to erley@gmail.com with subject Hello and message Hello Erley'
66
+ response_model: ResponseModel = bot.get_response(user_message)
67
+
68
+ print(response_model)
69
+ ```
70
+
71
+ ### Streaming Response Example
72
+
73
+ ```python
74
+ import os
75
+ from dotenv import load_dotenv
76
+ from langchain_openai import OpenAIEmbeddings
77
+ from sonika_langchain_bot.langchain_bot_agent import LangChainBot
78
+ from sonika_langchain_bot.langchain_class import Message
79
+ from sonika_langchain_bot.langchain_models import OpenAILanguageModel
80
+
81
+ # Load environment variables
82
+ load_dotenv()
83
+
84
+ # Get API key from .env file
85
+ api_key = os.getenv("OPENAI_API_KEY")
86
+
87
+ # Initialize language model and embeddings
88
+ language_model = OpenAILanguageModel(api_key, model_name='gpt-4o-mini-2024-07-18', temperature=1)
89
+ embeddings = OpenAIEmbeddings(api_key=api_key)
90
+
91
+ # Create agent instance
92
+ bot = LangChainBot(language_model, embeddings, instructions="Only answers in english", tools=[])
93
+
94
+ # Load conversation history
95
+ bot.load_conversation_history([Message(content="My name is Erley", is_bot=False)])
96
+
97
+ # Get streaming response
98
+ user_message = 'Hello, what is my name?'
99
+ for chunk in bot.get_response_stream(user_message):
100
+ print(chunk)
101
+ ```
102
+
103
+ ### Text Classification Example
104
+
105
+ ```python
106
+ import os
107
+ from dotenv import load_dotenv
108
+ from sonika_langchain_bot.langchain_clasificator import TextClassifier
109
+ from sonika_langchain_bot.langchain_models import OpenAILanguageModel
110
+ from pydantic import BaseModel, Field
111
+
112
+ # Load environment variables
113
+ load_dotenv()
114
+
115
+ # Define classification structure with Pydantic
116
+ class Classification(BaseModel):
117
+ intention: str = Field()
118
+ sentiment: str = Field(..., enum=["happy", "neutral", "sad", "excited"])
119
+ aggressiveness: int = Field(
120
+ ...,
121
+ description="describes how aggressive the statement is, the higher the number the more aggressive",
122
+ enum=[1, 2, 3, 4, 5],
123
+ )
124
+ language: str = Field(
125
+ ..., enum=["spanish", "english", "french", "german", "italian"]
126
+ )
127
+
128
+ # Initialize classifier
129
+ api_key = os.getenv("OPENAI_API_KEY")
130
+ model = OpenAILanguageModel(api_key=api_key)
131
+ classifier = TextClassifier(llm=model, validation_class=Classification)
132
+
133
+ # Classify text
134
+ result = classifier.classify("how are you?")
135
+ print(result)
136
+ ```
137
+
138
+ ## Available Classes and Components
139
+
140
+ ### Core Classes
141
+
142
+ - **LangChainBot**: Main conversational agent for task execution with tools
143
+ - **OpenAILanguageModel**: Wrapper for OpenAI language models
144
+ - **TextClassifier**: Text classification using structured output
145
+ - **Message**: Message structure for conversation history
146
+ - **ResponseModel**: Response structure from agent interactions
147
+
148
+ ### Tools
149
+
150
+ - **EmailTool**: Tool for sending emails through the agent
151
+
152
+ ## Project Structure
153
+
154
+ ```
155
+ your_project/
156
+ ├── .env # Environment variables
157
+ ├── src/
158
+ │ └── sonika_langchain_bot/
159
+ │ ├── langchain_bot_agent.py
160
+ │ ├── langchain_clasificator.py
161
+ │ ├── langchain_class.py
162
+ │ ├── langchain_models.py
163
+ │ └── langchain_tools.py
164
+ └── tests/
165
+ └── test_bot.py
166
+ ```
167
+
168
+ ## Contributing
169
+
170
+ Contributions are welcome. Please open an issue to discuss major changes you'd like to make.
171
+
172
+ ## License
173
+
174
+ This project is licensed under the MIT License.
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="sonika-langchain-bot",
5
- version="0.0.10",
5
+ version="0.0.12",
6
6
  description="Agente langchain con LLM",
7
7
  author="Erley Blanco Carvajal",
8
8
  license="MIT License",
@@ -11,23 +11,23 @@ setup(
11
11
  packages=find_packages(where="src"), # Encuentra los paquetes dentro de "src"
12
12
  package_dir={"": "src"}, # Indica que los paquetes están en el directorio "src"
13
13
  install_requires=[
14
- "langchain>=0.3.0,<1.0.0", # Permite mayor flexibilidad
15
- "langchain-community>=0.3.0,<1.0.0",
16
- "langchain-core>=0.3.5,<1.0.0",
17
- "langchain-openai>=0.2.0,<1.0.0",
18
- "langgraph>=0.2.39,<1.0.0",
19
- "langgraph-checkpoint>=2.0.2,<3.0.0",
20
- "langgraph-sdk>=0.1.34,<2.0.0",
21
- "dataclasses-json>=0.6.7,<1.0.0",
22
- "python-dateutil>=2.9.0,<3.0.0",
23
- "tiktoken>=0.7.0,<1.0.0",
24
- "pydantic>=2.9.2,<3.0.0",
25
- "faiss-cpu>=1.8.0,<2.0.0",
26
- "pypdf>=5.0.0,<6.0.0",
27
- "python-dotenv>=1.0.1,<2.0.0",
28
- "typing_extensions>=4.12.0,<5.0.0",
29
- "typing-inspect>=0.9.0,<1.0.0",
14
+ "langchain-mcp-adapters==0.1.9",
15
+ "langchain-community==0.3.26",
16
+ "langchain-core==0.3.66",
17
+ "langchain-openai==0.3.24",
18
+ "langgraph==0.4.8",
19
+ "langgraph-checkpoint==2.1.0",
20
+ "langgraph-sdk==0.1.70",
21
+ "dataclasses-json==0.6.7",
22
+ "python-dateutil==2.9.0.post0",
23
+ "pydantic==2.11.7",
24
+ "faiss-cpu==1.11.0",
25
+ "pypdf==5.6.1",
26
+ "python-dotenv==1.0.1",
27
+ "typing_extensions==4.14.0",
28
+ "typing-inspect==0.9.0",
30
29
  ],
30
+
31
31
  extras_require={
32
32
  "dev": [
33
33
  "sphinx>=8.1.3,<9.0.0",