ai-parrot 0.3.4__cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ai-parrot might be problematic. Click here for more details.
- ai_parrot-0.3.4.dist-info/LICENSE +21 -0
- ai_parrot-0.3.4.dist-info/METADATA +319 -0
- ai_parrot-0.3.4.dist-info/RECORD +109 -0
- ai_parrot-0.3.4.dist-info/WHEEL +6 -0
- ai_parrot-0.3.4.dist-info/top_level.txt +3 -0
- parrot/__init__.py +21 -0
- parrot/chatbots/__init__.py +7 -0
- parrot/chatbots/abstract.py +728 -0
- parrot/chatbots/asktroc.py +16 -0
- parrot/chatbots/base.py +366 -0
- parrot/chatbots/basic.py +9 -0
- parrot/chatbots/bose.py +17 -0
- parrot/chatbots/cody.py +17 -0
- parrot/chatbots/copilot.py +83 -0
- parrot/chatbots/dataframe.py +103 -0
- parrot/chatbots/hragents.py +15 -0
- parrot/chatbots/odoo.py +17 -0
- parrot/chatbots/retrievals/__init__.py +578 -0
- parrot/chatbots/retrievals/constitutional.py +19 -0
- parrot/conf.py +110 -0
- parrot/crew/__init__.py +3 -0
- parrot/crew/tools/__init__.py +22 -0
- parrot/crew/tools/bing.py +13 -0
- parrot/crew/tools/config.py +43 -0
- parrot/crew/tools/duckgo.py +62 -0
- parrot/crew/tools/file.py +24 -0
- parrot/crew/tools/google.py +168 -0
- parrot/crew/tools/gtrends.py +16 -0
- parrot/crew/tools/md2pdf.py +25 -0
- parrot/crew/tools/rag.py +42 -0
- parrot/crew/tools/search.py +32 -0
- parrot/crew/tools/url.py +21 -0
- parrot/exceptions.cpython-311-x86_64-linux-gnu.so +0 -0
- parrot/handlers/__init__.py +4 -0
- parrot/handlers/bots.py +196 -0
- parrot/handlers/chat.py +162 -0
- parrot/interfaces/__init__.py +6 -0
- parrot/interfaces/database.py +29 -0
- parrot/llms/__init__.py +137 -0
- parrot/llms/abstract.py +47 -0
- parrot/llms/anthropic.py +42 -0
- parrot/llms/google.py +42 -0
- parrot/llms/groq.py +45 -0
- parrot/llms/hf.py +45 -0
- parrot/llms/openai.py +59 -0
- parrot/llms/pipes.py +114 -0
- parrot/llms/vertex.py +78 -0
- parrot/loaders/__init__.py +20 -0
- parrot/loaders/abstract.py +456 -0
- parrot/loaders/audio.py +106 -0
- parrot/loaders/basepdf.py +102 -0
- parrot/loaders/basevideo.py +280 -0
- parrot/loaders/csv.py +42 -0
- parrot/loaders/dir.py +37 -0
- parrot/loaders/excel.py +349 -0
- parrot/loaders/github.py +65 -0
- parrot/loaders/handlers/__init__.py +5 -0
- parrot/loaders/handlers/data.py +213 -0
- parrot/loaders/image.py +119 -0
- parrot/loaders/json.py +52 -0
- parrot/loaders/pdf.py +437 -0
- parrot/loaders/pdfchapters.py +142 -0
- parrot/loaders/pdffn.py +112 -0
- parrot/loaders/pdfimages.py +207 -0
- parrot/loaders/pdfmark.py +88 -0
- parrot/loaders/pdftables.py +145 -0
- parrot/loaders/ppt.py +30 -0
- parrot/loaders/qa.py +81 -0
- parrot/loaders/repo.py +103 -0
- parrot/loaders/rtd.py +65 -0
- parrot/loaders/txt.py +92 -0
- parrot/loaders/utils/__init__.py +1 -0
- parrot/loaders/utils/models.py +25 -0
- parrot/loaders/video.py +96 -0
- parrot/loaders/videolocal.py +120 -0
- parrot/loaders/vimeo.py +106 -0
- parrot/loaders/web.py +216 -0
- parrot/loaders/web_base.py +112 -0
- parrot/loaders/word.py +125 -0
- parrot/loaders/youtube.py +192 -0
- parrot/manager.py +166 -0
- parrot/models.py +372 -0
- parrot/py.typed +0 -0
- parrot/stores/__init__.py +48 -0
- parrot/stores/abstract.py +171 -0
- parrot/stores/milvus.py +632 -0
- parrot/stores/qdrant.py +153 -0
- parrot/tools/__init__.py +12 -0
- parrot/tools/abstract.py +53 -0
- parrot/tools/asknews.py +32 -0
- parrot/tools/bing.py +13 -0
- parrot/tools/duck.py +62 -0
- parrot/tools/google.py +170 -0
- parrot/tools/stack.py +26 -0
- parrot/tools/weather.py +70 -0
- parrot/tools/wikipedia.py +59 -0
- parrot/tools/zipcode.py +179 -0
- parrot/utils/__init__.py +2 -0
- parrot/utils/parsers/__init__.py +5 -0
- parrot/utils/parsers/toml.cpython-311-x86_64-linux-gnu.so +0 -0
- parrot/utils/toml.py +11 -0
- parrot/utils/types.cpython-311-x86_64-linux-gnu.so +0 -0
- parrot/utils/uv.py +11 -0
- parrot/version.py +10 -0
- resources/users/__init__.py +5 -0
- resources/users/handlers.py +13 -0
- resources/users/models.py +205 -0
- settings/__init__.py +0 -0
- settings/settings.py +51 -0
parrot/handlers/chat.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
from navigator_auth.decorators import (
|
|
2
|
+
is_authenticated,
|
|
3
|
+
user_session
|
|
4
|
+
)
|
|
5
|
+
from navigator.views import BaseView
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@is_authenticated()
|
|
9
|
+
@user_session()
|
|
10
|
+
class ChatHandler(BaseView):
|
|
11
|
+
"""
|
|
12
|
+
ChatHandler.
|
|
13
|
+
description: ChatHandler for Parrot Application.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
async def get(self, **kwargs):
|
|
17
|
+
"""
|
|
18
|
+
get.
|
|
19
|
+
description: Get method for ChatHandler.
|
|
20
|
+
"""
|
|
21
|
+
name = self.request.match_info.get('chatbot_name', None)
|
|
22
|
+
if not name:
|
|
23
|
+
return self.json_response({
|
|
24
|
+
"message": "Welcome to Parrot Chatbot Service."
|
|
25
|
+
})
|
|
26
|
+
else:
|
|
27
|
+
# retrieve chatbof information:
|
|
28
|
+
manager = self.request.app['chatbot_manager']
|
|
29
|
+
chatbot = manager.get_chatbot(name)
|
|
30
|
+
if not chatbot:
|
|
31
|
+
return self.error(
|
|
32
|
+
f"Chatbot {name} not found.",
|
|
33
|
+
status=404
|
|
34
|
+
)
|
|
35
|
+
return self.json_response({
|
|
36
|
+
"chatbot": chatbot.name,
|
|
37
|
+
"description": chatbot.description,
|
|
38
|
+
"role": chatbot.role,
|
|
39
|
+
"embedding_model_name": chatbot.embedding_model_name,
|
|
40
|
+
"llm": f"{chatbot.get_llm()!r}",
|
|
41
|
+
"temperature": chatbot.get_llm().temperature,
|
|
42
|
+
"args": chatbot.get_llm().args,
|
|
43
|
+
"config_file": chatbot.config_file
|
|
44
|
+
})
|
|
45
|
+
|
|
46
|
+
async def post(self, *args, **kwargs):
|
|
47
|
+
"""
|
|
48
|
+
post.
|
|
49
|
+
description: Post method for ChatHandler.
|
|
50
|
+
"""
|
|
51
|
+
app = self.request.app
|
|
52
|
+
name = self.request.match_info.get('chatbot_name', None)
|
|
53
|
+
qs = self.query_parameters(self.request)
|
|
54
|
+
data = await self.request.json()
|
|
55
|
+
if not 'query' in data:
|
|
56
|
+
return self.json_response(
|
|
57
|
+
{
|
|
58
|
+
"message": "No query was found."
|
|
59
|
+
},
|
|
60
|
+
status=400
|
|
61
|
+
)
|
|
62
|
+
if 'use_llm' in qs:
|
|
63
|
+
# passing another LLM to the Chatbot:
|
|
64
|
+
llm = qs.get('use_llm')
|
|
65
|
+
else:
|
|
66
|
+
llm = None
|
|
67
|
+
try:
|
|
68
|
+
manager = app['chatbot_manager']
|
|
69
|
+
except KeyError:
|
|
70
|
+
return self.json_response(
|
|
71
|
+
{
|
|
72
|
+
"message": "Chatbot Manager is not installed."
|
|
73
|
+
},
|
|
74
|
+
status=404
|
|
75
|
+
)
|
|
76
|
+
try:
|
|
77
|
+
chatbot = manager.get_chatbot(name)
|
|
78
|
+
if not chatbot:
|
|
79
|
+
raise KeyError(
|
|
80
|
+
f"Chatbot {name} not found."
|
|
81
|
+
)
|
|
82
|
+
except (TypeError, KeyError):
|
|
83
|
+
return self.json_response(
|
|
84
|
+
{
|
|
85
|
+
"message": f"Chatbot {name} not found."
|
|
86
|
+
},
|
|
87
|
+
status=404
|
|
88
|
+
)
|
|
89
|
+
# getting the question:
|
|
90
|
+
question = data.get('query')
|
|
91
|
+
session = self.request.session
|
|
92
|
+
try:
|
|
93
|
+
session_id = session.get('session_id', None)
|
|
94
|
+
memory_key = f'{session.session_id}_{name}_message_store'
|
|
95
|
+
memory = chatbot.get_memory(session_id=memory_key)
|
|
96
|
+
with chatbot.get_retrieval(request=self.request) as retrieval:
|
|
97
|
+
result = await retrieval.question(
|
|
98
|
+
question=question,
|
|
99
|
+
memory=memory
|
|
100
|
+
)
|
|
101
|
+
# Drop "memory" information:
|
|
102
|
+
result.chat_history = None
|
|
103
|
+
result.source_documents = None
|
|
104
|
+
return self.json_response(response=result)
|
|
105
|
+
except ValueError as exc:
|
|
106
|
+
return self.error(
|
|
107
|
+
f"{exc}",
|
|
108
|
+
exception=exc,
|
|
109
|
+
status=400
|
|
110
|
+
)
|
|
111
|
+
except Exception as exc:
|
|
112
|
+
return self.error(
|
|
113
|
+
f"Error invoking chatbot {name}: {exc}",
|
|
114
|
+
exception=exc,
|
|
115
|
+
status=400
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
@is_authenticated()
|
|
120
|
+
@user_session()
|
|
121
|
+
class BotHandler(BaseView):
|
|
122
|
+
"""BotHandler.
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
Use this handler to interact with a brand new chatbot, consuming a configuration.
|
|
126
|
+
"""
|
|
127
|
+
async def put(self):
|
|
128
|
+
"""Create a New Bot (passing a configuration).
|
|
129
|
+
"""
|
|
130
|
+
try:
|
|
131
|
+
manager = self.request.app['chatbot_manager']
|
|
132
|
+
except KeyError:
|
|
133
|
+
return self.json_response(
|
|
134
|
+
{
|
|
135
|
+
"message": "Chatbot Manager is not installed."
|
|
136
|
+
},
|
|
137
|
+
status=404
|
|
138
|
+
)
|
|
139
|
+
# TODO: Making a Validation of data
|
|
140
|
+
data = await self.request.json()
|
|
141
|
+
name = data.pop('name', None)
|
|
142
|
+
if not name:
|
|
143
|
+
return self.json_response(
|
|
144
|
+
{
|
|
145
|
+
"message": "Name for Bot Creation is required."
|
|
146
|
+
},
|
|
147
|
+
status=400
|
|
148
|
+
)
|
|
149
|
+
try:
|
|
150
|
+
chatbot = manager.create_chatbot(name=name, **data)
|
|
151
|
+
await chatbot.configure(name=name, app=self.request.app)
|
|
152
|
+
return self.json_response(
|
|
153
|
+
{
|
|
154
|
+
"message": f"Chatbot {name} created successfully."
|
|
155
|
+
}
|
|
156
|
+
)
|
|
157
|
+
except Exception as exc:
|
|
158
|
+
return self.error(
|
|
159
|
+
f"Error creating chatbot {name}: {exc}",
|
|
160
|
+
exception=exc,
|
|
161
|
+
status=400
|
|
162
|
+
)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""DB (asyncdb) Extension.
|
|
2
|
+
DB connection for any Application.
|
|
3
|
+
"""
|
|
4
|
+
from abc import ABCMeta
|
|
5
|
+
from asyncdb import AsyncDB
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class DBInterface(metaclass=ABCMeta):
|
|
10
|
+
"""
|
|
11
|
+
Interface for using database connections in an Application using AsyncDB.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
def get_database(
|
|
15
|
+
self,
|
|
16
|
+
driver: str,
|
|
17
|
+
dsn: str = None,
|
|
18
|
+
params: dict = None,
|
|
19
|
+
timeout: int = 60,
|
|
20
|
+
**kwargs
|
|
21
|
+
) -> AsyncDB:
|
|
22
|
+
"""Get the driver."""
|
|
23
|
+
return AsyncDB(
|
|
24
|
+
driver,
|
|
25
|
+
dsn=dsn,
|
|
26
|
+
params=params,
|
|
27
|
+
timeout=timeout,
|
|
28
|
+
**kwargs
|
|
29
|
+
)
|
parrot/llms/__init__.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
# Langchain:
|
|
2
|
+
from langchain import hub
|
|
3
|
+
from ..exceptions import ConfigError # pylint: disable=E0611
|
|
4
|
+
|
|
5
|
+
## LLM configuration
|
|
6
|
+
from .abstract import AbstractLLM
|
|
7
|
+
|
|
8
|
+
# Vertex
|
|
9
|
+
try:
|
|
10
|
+
from .vertex import VertexLLM
|
|
11
|
+
VERTEX_ENABLED = True
|
|
12
|
+
except (ModuleNotFoundError, ImportError):
|
|
13
|
+
VERTEX_ENABLED = False
|
|
14
|
+
|
|
15
|
+
# Anthropic:
|
|
16
|
+
try:
|
|
17
|
+
from .anthropic import Anthropic
|
|
18
|
+
ANTHROPIC_ENABLED = True
|
|
19
|
+
except (ModuleNotFoundError, ImportError):
|
|
20
|
+
ANTHROPIC_ENABLED = False
|
|
21
|
+
|
|
22
|
+
# OpenAI
|
|
23
|
+
try:
|
|
24
|
+
from .openai import OpenAILLM
|
|
25
|
+
OPENAI_ENABLED = True
|
|
26
|
+
except (ModuleNotFoundError, ImportError):
|
|
27
|
+
OPENAI_ENABLED = False
|
|
28
|
+
|
|
29
|
+
# LLM Transformers
|
|
30
|
+
try:
|
|
31
|
+
from .pipes import PipelineLLM
|
|
32
|
+
TRANSFORMERS_ENABLED = True
|
|
33
|
+
except (ModuleNotFoundError, ImportError):
|
|
34
|
+
TRANSFORMERS_ENABLED = False
|
|
35
|
+
|
|
36
|
+
# HuggingFaces Hub:
|
|
37
|
+
try:
|
|
38
|
+
from .hf import HuggingFace
|
|
39
|
+
HF_ENABLED = True
|
|
40
|
+
except (ModuleNotFoundError, ImportError):
|
|
41
|
+
HF_ENABLED = False
|
|
42
|
+
|
|
43
|
+
# GroQ:
|
|
44
|
+
try:
|
|
45
|
+
from .groq import GroqLLM
|
|
46
|
+
GROQ_ENABLED = True
|
|
47
|
+
except (ModuleNotFoundError, ImportError):
|
|
48
|
+
GROQ_ENABLED = False
|
|
49
|
+
|
|
50
|
+
# Mixtral:
|
|
51
|
+
try:
|
|
52
|
+
from .groq import GroqLLM
|
|
53
|
+
MIXTRAL_ENABLED = True
|
|
54
|
+
except (ModuleNotFoundError, ImportError):
|
|
55
|
+
MIXTRAL_ENABLED = False
|
|
56
|
+
|
|
57
|
+
# Google
|
|
58
|
+
try:
|
|
59
|
+
from .google import GoogleGenAI
|
|
60
|
+
GOOGLE_ENABLED = True
|
|
61
|
+
except (ModuleNotFoundError, ImportError):
|
|
62
|
+
GOOGLE_ENABLED = False
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def get_llm(llm_name: str, model_name: str, **kwargs) -> AbstractLLM:
|
|
66
|
+
"""get_llm.
|
|
67
|
+
|
|
68
|
+
Load the Language Model for the Chatbot.
|
|
69
|
+
"""
|
|
70
|
+
print('LLM > ', llm_name)
|
|
71
|
+
if llm_name == 'VertexLLM':
|
|
72
|
+
if VERTEX_ENABLED is False:
|
|
73
|
+
raise ConfigError(
|
|
74
|
+
"VertexAI enabled but not installed."
|
|
75
|
+
)
|
|
76
|
+
return VertexLLM(model=model_name, **kwargs)
|
|
77
|
+
if llm_name == 'Anthropic':
|
|
78
|
+
if ANTHROPIC_ENABLED is False:
|
|
79
|
+
raise ConfigError(
|
|
80
|
+
"ANTHROPIC is enabled but not installed."
|
|
81
|
+
)
|
|
82
|
+
return Anthropic(model=model_name, **kwargs)
|
|
83
|
+
if llm_name == 'OpenAI':
|
|
84
|
+
if OPENAI_ENABLED is False:
|
|
85
|
+
raise ConfigError(
|
|
86
|
+
"OpenAI is enabled but not installed."
|
|
87
|
+
)
|
|
88
|
+
return OpenAILLM(model=model_name, **kwargs)
|
|
89
|
+
if llm_name == 'hf':
|
|
90
|
+
if HF_ENABLED is False:
|
|
91
|
+
raise ConfigError(
|
|
92
|
+
"Hugginfaces Hub is enabled but not installed."
|
|
93
|
+
)
|
|
94
|
+
return HuggingFace(model=model_name, **kwargs)
|
|
95
|
+
if llm_name == 'pipe':
|
|
96
|
+
if TRANSFORMERS_ENABLED is False:
|
|
97
|
+
raise ConfigError(
|
|
98
|
+
"Transformes Pipelines are enabled, but not installed."
|
|
99
|
+
)
|
|
100
|
+
return PipelineLLM(model=model_name, **kwargs)
|
|
101
|
+
if llm_name in ('Groq', 'llama3', 'mixtral'):
|
|
102
|
+
if GROQ_ENABLED is False:
|
|
103
|
+
raise ConfigError(
|
|
104
|
+
"Groq is enabled but not installed."
|
|
105
|
+
)
|
|
106
|
+
return GroqLLM(model=model_name, **kwargs)
|
|
107
|
+
if llm_name == 'Google':
|
|
108
|
+
if GOOGLE_ENABLED is False:
|
|
109
|
+
raise ConfigError(
|
|
110
|
+
"Google is enabled but not installed."
|
|
111
|
+
)
|
|
112
|
+
return GoogleGenAI(model=model_name, **kwargs)
|
|
113
|
+
else:
|
|
114
|
+
# TODO: Add more LLMs
|
|
115
|
+
return hub.pull(llm_name)
|
|
116
|
+
|
|
117
|
+
def get_llm_list():
|
|
118
|
+
"""get_llm_list.
|
|
119
|
+
|
|
120
|
+
Get the list of available LLMs.
|
|
121
|
+
"""
|
|
122
|
+
llms = []
|
|
123
|
+
if VERTEX_ENABLED:
|
|
124
|
+
llms.append(VertexLLM.get_supported_models())
|
|
125
|
+
if ANTHROPIC_ENABLED:
|
|
126
|
+
llms.append(Anthropic.get_supported_models())
|
|
127
|
+
if OPENAI_ENABLED:
|
|
128
|
+
llms.append(OpenAILLM.get_supported_models())
|
|
129
|
+
if HF_ENABLED:
|
|
130
|
+
llms.append(HuggingFace.get_supported_models())
|
|
131
|
+
if TRANSFORMERS_ENABLED:
|
|
132
|
+
llms.append(PipelineLLM.get_supported_models())
|
|
133
|
+
if GROQ_ENABLED:
|
|
134
|
+
llms.append(GroqLLM.get_supported_models())
|
|
135
|
+
if GOOGLE_ENABLED:
|
|
136
|
+
llms.append(GoogleGenAI.get_supported_models())
|
|
137
|
+
return llms
|
parrot/llms/abstract.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from abc import ABC
|
|
2
|
+
from typing import List
|
|
3
|
+
from langchain_core.prompts import ChatPromptTemplate
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class AbstractLLM(ABC):
|
|
7
|
+
"""Abstract Language Model class.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
model: str = "databricks/dolly-v2-3b"
|
|
11
|
+
supported_models: List[str] = []
|
|
12
|
+
embed_model: str = None
|
|
13
|
+
max_tokens: int = 1024
|
|
14
|
+
max_retries: int = 3
|
|
15
|
+
|
|
16
|
+
@classmethod
|
|
17
|
+
def get_supported_models(cls):
|
|
18
|
+
return cls.supported_models
|
|
19
|
+
|
|
20
|
+
def __init__(self, *args, **kwargs):
|
|
21
|
+
self.model = kwargs.get("model", "databricks/dolly-v2-3b")
|
|
22
|
+
self.task = kwargs.get("task", "text-generation")
|
|
23
|
+
self.temperature: float = kwargs.get('temperature', 0.6)
|
|
24
|
+
self.max_tokens: int = kwargs.get('max_tokens', 1024)
|
|
25
|
+
self.top_k: float = kwargs.get('top_k', 10)
|
|
26
|
+
self.top_p: float = kwargs.get('top_p', 0.90)
|
|
27
|
+
self.args = {
|
|
28
|
+
"top_p": self.top_p,
|
|
29
|
+
"top_k": self.top_k,
|
|
30
|
+
}
|
|
31
|
+
self._llm = None
|
|
32
|
+
self._embed = None
|
|
33
|
+
|
|
34
|
+
def get_llm(self):
|
|
35
|
+
return self._llm
|
|
36
|
+
|
|
37
|
+
def get_embedding(self):
|
|
38
|
+
return self._embed
|
|
39
|
+
|
|
40
|
+
def __call__(self, text: str, **kwargs):
|
|
41
|
+
return self._llm.invoke(text, **kwargs)
|
|
42
|
+
|
|
43
|
+
def get_prompt(self, system: tuple, human: str) -> ChatPromptTemplate:
|
|
44
|
+
"""Get a prompt for the LLM."""
|
|
45
|
+
return ChatPromptTemplate.from_messages(
|
|
46
|
+
[("system", system), ("human", human)]
|
|
47
|
+
)
|
parrot/llms/anthropic.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from navconfig import config
|
|
2
|
+
from navconfig.logging import logging
|
|
3
|
+
from langchain_anthropic import ChatAnthropic # pylint: disable=E0401, E0611
|
|
4
|
+
from langchain_core.prompts import ChatPromptTemplate
|
|
5
|
+
from .abstract import AbstractLLM
|
|
6
|
+
|
|
7
|
+
logging.getLogger(name='anthropic').setLevel(logging.WARNING)
|
|
8
|
+
|
|
9
|
+
class Anthropic(AbstractLLM):
|
|
10
|
+
"""Anthropic.
|
|
11
|
+
|
|
12
|
+
Interact with Anthropic Language Model.
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
_type_: an instance of Anthropic (Claude) LLM Model.
|
|
16
|
+
"""
|
|
17
|
+
model: str = 'claude-3-opus-20240229'
|
|
18
|
+
embed_model: str = None
|
|
19
|
+
max_tokens: int = 1024
|
|
20
|
+
supported_models: list = [
|
|
21
|
+
'claude-3-opus-20240229',
|
|
22
|
+
'claude-3-sonnet-20240229',
|
|
23
|
+
'claude-3-haiku-20240307',
|
|
24
|
+
'claude-3-5-sonnet-20240620',
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
def __init__(self, *args, **kwargs):
|
|
28
|
+
super().__init__(*args, **kwargs)
|
|
29
|
+
self.model = kwargs.get("model", 'claude-3-opus-20240229')
|
|
30
|
+
self._api_key = kwargs.pop('api_key', config.get('ANTHROPIC_API_KEY'))
|
|
31
|
+
args = {
|
|
32
|
+
"temperature": self.temperature,
|
|
33
|
+
"max_retries": 4,
|
|
34
|
+
"top_p": self.top_p,
|
|
35
|
+
"top_k": self.top_k,
|
|
36
|
+
"verbose": True,
|
|
37
|
+
}
|
|
38
|
+
self._llm = ChatAnthropic(
|
|
39
|
+
model_name=self.model,
|
|
40
|
+
api_key=self._api_key,
|
|
41
|
+
**args
|
|
42
|
+
)
|
parrot/llms/google.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from langchain_google_genai import (
|
|
2
|
+
GoogleGenerativeAI,
|
|
3
|
+
ChatGoogleGenerativeAI,
|
|
4
|
+
GoogleGenerativeAIEmbeddings
|
|
5
|
+
)
|
|
6
|
+
from navconfig import config
|
|
7
|
+
from .abstract import AbstractLLM
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class GoogleGenAI(AbstractLLM):
|
|
11
|
+
"""GoogleGenAI.
|
|
12
|
+
Using Google Generative AI models with Google Cloud AI Platform.
|
|
13
|
+
"""
|
|
14
|
+
model: str = "gemini-pro"
|
|
15
|
+
supported_models: list = [
|
|
16
|
+
"models/text-bison-001",
|
|
17
|
+
"models/chat-bison-001",
|
|
18
|
+
"gemini-pro"
|
|
19
|
+
]
|
|
20
|
+
|
|
21
|
+
def __init__(self, *args, **kwargs):
|
|
22
|
+
self.model_type = kwargs.get("model_type", "chat")
|
|
23
|
+
super().__init__(*args, **kwargs)
|
|
24
|
+
self._api_key = kwargs.pop('api_key', config.get('GOOGLE_API_KEY'))
|
|
25
|
+
if self.model_type == 'chat':
|
|
26
|
+
base_llm = ChatGoogleGenerativeAI
|
|
27
|
+
else:
|
|
28
|
+
base_llm = GoogleGenerativeAI
|
|
29
|
+
self._llm = base_llm(
|
|
30
|
+
model=self.model,
|
|
31
|
+
api_key=self._api_key,
|
|
32
|
+
temperature=self.temperature,
|
|
33
|
+
**self.args
|
|
34
|
+
)
|
|
35
|
+
embed_model = kwargs.get("embed_model", "models/embedding-001")
|
|
36
|
+
self._embed = GoogleGenerativeAIEmbeddings(
|
|
37
|
+
model=embed_model,
|
|
38
|
+
google_api_key=self._api_key,
|
|
39
|
+
temperature=self.temperature,
|
|
40
|
+
top_p=self.top_p,
|
|
41
|
+
top_k=self.top_k,
|
|
42
|
+
)
|
parrot/llms/groq.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from langchain_core.prompts import ChatPromptTemplate
|
|
2
|
+
from langchain_groq import ChatGroq
|
|
3
|
+
from navconfig import config
|
|
4
|
+
from .abstract import AbstractLLM
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class GroqLLM(AbstractLLM):
|
|
8
|
+
"""GroqLLM.
|
|
9
|
+
Using Groq Open-source models.
|
|
10
|
+
"""
|
|
11
|
+
model: str = "mixtral-8x7b-32768"
|
|
12
|
+
supported_models: list = [
|
|
13
|
+
"llama3-70b-8192",
|
|
14
|
+
"llama3-80b-8192",
|
|
15
|
+
"llama-3.1-70b-versatile",
|
|
16
|
+
"llama-3.1-8b-instant",
|
|
17
|
+
"mixtral-8x7b-32768",
|
|
18
|
+
"whisper-large-v3",
|
|
19
|
+
"llama3-groq-70b-8192-tool-use-preview",
|
|
20
|
+
"gemma2-9b-it",
|
|
21
|
+
"gemma-7b-it",
|
|
22
|
+
"llama-guard-3-8b"
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
def __init__(self, *args, **kwargs):
|
|
26
|
+
self.model_type = kwargs.get("model_type", "text")
|
|
27
|
+
system = kwargs.pop('system_prompt', "You are a helpful assistant.")
|
|
28
|
+
human = kwargs.pop('human_prompt', "{question}")
|
|
29
|
+
super().__init__(*args, **kwargs)
|
|
30
|
+
self._api_key = kwargs.pop('api_key', config.get('GROQ_API_KEY'))
|
|
31
|
+
self._llm = ChatGroq(
|
|
32
|
+
model_name=self.model,
|
|
33
|
+
groq_api_key=self._api_key,
|
|
34
|
+
temperature=self.temperature,
|
|
35
|
+
max_retries=self.max_retries,
|
|
36
|
+
max_tokens=self.max_tokens,
|
|
37
|
+
model_kwargs={
|
|
38
|
+
"top_p": self.top_p,
|
|
39
|
+
# "top_k": self.top_k,
|
|
40
|
+
},
|
|
41
|
+
)
|
|
42
|
+
self._embed = None # Not supported
|
|
43
|
+
self.prompt = ChatPromptTemplate.from_messages(
|
|
44
|
+
[("system", system), ("human", human)]
|
|
45
|
+
)
|
parrot/llms/hf.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from langchain_community.llms import HuggingFacePipeline # pylint: disable=import-error, E0611
|
|
2
|
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
3
|
+
from .abstract import AbstractLLM
|
|
4
|
+
|
|
5
|
+
class HuggingFace(AbstractLLM):
|
|
6
|
+
"""HuggingFace.
|
|
7
|
+
|
|
8
|
+
Load a LLM (Language Model) from HuggingFace Hub.
|
|
9
|
+
|
|
10
|
+
Only supports text-generation, text2text-generation, summarization and translation for now.
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
_type_: an instance of HuggingFace LLM Model.
|
|
14
|
+
"""
|
|
15
|
+
model: str = "databricks/dolly-v2-3b"
|
|
16
|
+
embed_model: str = None
|
|
17
|
+
max_tokens: int = 1024
|
|
18
|
+
supported_models: list = [
|
|
19
|
+
"databricks/dolly-v2-3b",
|
|
20
|
+
"gpt2",
|
|
21
|
+
"bigscience/bloom-1b7",
|
|
22
|
+
"meta-llama/Llama-2-7b-hf"
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
def __init__(self, *args, **kwargs):
|
|
26
|
+
self.batch_size = kwargs.get('batch_size', 4)
|
|
27
|
+
super().__init__(*args, **kwargs)
|
|
28
|
+
self._tokenizer = AutoTokenizer.from_pretrained(self.model, chunk_size=self.max_tokens)
|
|
29
|
+
self._model = AutoModelForCausalLM.from_pretrained(self.model, trust_remote_code=True)
|
|
30
|
+
self._llm = HuggingFacePipeline.from_model_id(
|
|
31
|
+
model_id=self.model,
|
|
32
|
+
task=self.task,
|
|
33
|
+
device_map='auto',
|
|
34
|
+
batch_size=self.batch_size,
|
|
35
|
+
model_kwargs={
|
|
36
|
+
"max_length": self.max_tokens,
|
|
37
|
+
"trust_remote_code": True
|
|
38
|
+
},
|
|
39
|
+
pipeline_kwargs={
|
|
40
|
+
"temperature": self.temperature,
|
|
41
|
+
"repetition_penalty":1.1,
|
|
42
|
+
"max_new_tokens": self.max_tokens,
|
|
43
|
+
**self.args
|
|
44
|
+
}
|
|
45
|
+
)
|
parrot/llms/openai.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
from langchain_openai import ( # pylint: disable=E0401, E0611
|
|
2
|
+
OpenAI,
|
|
3
|
+
ChatOpenAI,
|
|
4
|
+
OpenAIEmbeddings
|
|
5
|
+
)
|
|
6
|
+
from navconfig import config
|
|
7
|
+
from .abstract import AbstractLLM
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class OpenAILLM(AbstractLLM):
|
|
11
|
+
"""OpenAI.
|
|
12
|
+
Interact with OpenAI Language Model.
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
_type_: an instance of OpenAI LLM Model.
|
|
16
|
+
"""
|
|
17
|
+
model: str = "gpt-4-turbo"
|
|
18
|
+
embed_model: str = "text-embedding-3-large"
|
|
19
|
+
max_tokens: int = 1024
|
|
20
|
+
supported_models: list = [
|
|
21
|
+
'gpt-4o-mini',
|
|
22
|
+
'gpt-4-turbo',
|
|
23
|
+
'gpt-4-turbo-preview',
|
|
24
|
+
'gpt-4o',
|
|
25
|
+
'gpt-3.5-turbo',
|
|
26
|
+
'gpt-3.5-turbo-instruct',
|
|
27
|
+
'dall-e-3'
|
|
28
|
+
'tts-1',
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
def __init__(self, *args, **kwargs):
|
|
32
|
+
self.model_type = kwargs.get("model_type", "text")
|
|
33
|
+
super().__init__(*args, **kwargs)
|
|
34
|
+
self.model = kwargs.get("model", "davinci")
|
|
35
|
+
self._api_key = kwargs.pop('api_key', config.get('OPENAI_API_KEY'))
|
|
36
|
+
organization = config.get("OPENAI_ORGANIZATION")
|
|
37
|
+
if self.model_type == 'chat':
|
|
38
|
+
base_llm = ChatOpenAI
|
|
39
|
+
else:
|
|
40
|
+
base_llm = OpenAI
|
|
41
|
+
self._llm = base_llm(
|
|
42
|
+
model_name=self.model,
|
|
43
|
+
api_key=self._api_key,
|
|
44
|
+
organization=organization,
|
|
45
|
+
temperature=self.temperature,
|
|
46
|
+
max_tokens=self.max_tokens,
|
|
47
|
+
**self.args
|
|
48
|
+
)
|
|
49
|
+
# Embedding
|
|
50
|
+
embed_model = kwargs.get("embed_model", "text-embedding-3-large")
|
|
51
|
+
self._embed = OpenAIEmbeddings(
|
|
52
|
+
model=embed_model,
|
|
53
|
+
dimensions=self.max_tokens,
|
|
54
|
+
api_key=self._api_key,
|
|
55
|
+
organization=organization,
|
|
56
|
+
temperature=self.temperature,
|
|
57
|
+
top_p=self.top_p,
|
|
58
|
+
top_k=self.top_k,
|
|
59
|
+
)
|