LLMFunctionObjects 0.1.7__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects/Evaluator.py +1 -2
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects/EvaluatorChatGPT.py +11 -2
- llmfunctionobjects-0.2.0/LLMFunctionObjects/EvaluatorChatGemini.py +91 -0
- llmfunctionobjects-0.2.0/LLMFunctionObjects/EvaluatorChatOllama.py +58 -0
- llmfunctionobjects-0.2.0/LLMFunctionObjects/EvaluatorChatPaLM.py +13 -0
- llmfunctionobjects-0.2.0/LLMFunctionObjects/EvaluatorGemini.py +84 -0
- llmfunctionobjects-0.2.0/LLMFunctionObjects/EvaluatorOllama.py +53 -0
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects/LLMFunctions.py +94 -55
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects/__init__.py +5 -1
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects.egg-info/PKG-INFO +194 -252
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects.egg-info/SOURCES.txt +4 -0
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects.egg-info/requires.txt +1 -0
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/PKG-INFO +194 -252
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/README.md +180 -250
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/setup.py +3 -3
- llmfunctionobjects-0.1.7/LLMFunctionObjects/EvaluatorChatPaLM.py +0 -102
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LICENSE +0 -0
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects/Chat.py +0 -0
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects/Configuration.py +0 -0
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects/EvaluatorChat.py +0 -0
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects/Functor.py +0 -0
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects/SubParser.py +0 -0
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects.egg-info/dependency_links.txt +0 -0
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects.egg-info/top_level.txt +0 -0
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/pyproject.toml +0 -0
- {llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/setup.cfg +0 -0
|
@@ -72,8 +72,7 @@ class Evaluator:
|
|
|
72
72
|
return resLocal
|
|
73
73
|
|
|
74
74
|
# This is a generic LLM evaluator method
|
|
75
|
-
# that works for OpenAI's text completions
|
|
76
|
-
# PaLM's text generation.
|
|
75
|
+
# that works for OpenAI's text completions.
|
|
77
76
|
# The children classes override this method completely.
|
|
78
77
|
# (instead of reusing it because of logging, etc.)
|
|
79
78
|
def eval(self, texts, **args):
|
{llmfunctionobjects-0.1.7 → llmfunctionobjects-0.2.0}/LLMFunctionObjects/EvaluatorChatGPT.py
RENAMED
|
@@ -20,5 +20,14 @@ class EvaluatorChatGPT(EvaluatorChat):
|
|
|
20
20
|
return res_messages
|
|
21
21
|
|
|
22
22
|
def result_values(self, res):
|
|
23
|
-
|
|
24
|
-
|
|
23
|
+
# Support both Chat Completions and Responses API shapes.
|
|
24
|
+
if hasattr(res, "output_text") and res.output_text is not None:
|
|
25
|
+
return res.output_text
|
|
26
|
+
if hasattr(res, "choices"):
|
|
27
|
+
return res.choices[0].message.content
|
|
28
|
+
if isinstance(res, dict):
|
|
29
|
+
if "output_text" in res and res["output_text"] is not None:
|
|
30
|
+
return res["output_text"]
|
|
31
|
+
if "choices" in res and res["choices"]:
|
|
32
|
+
return res["choices"][0]["message"]["content"]
|
|
33
|
+
return res
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
import google.generativeai as genai
|
|
2
|
+
|
|
3
|
+
from LLMFunctionObjects.EvaluatorChat import EvaluatorChat
|
|
4
|
+
from LLMFunctionObjects.EvaluatorGemini import _extract_gemini_text
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class EvaluatorChatGemini(EvaluatorChat):
|
|
8
|
+
def eval(self, texts, **args):
|
|
9
|
+
confDict = self.conf.to_dict()
|
|
10
|
+
|
|
11
|
+
echo = args.get("echo", False)
|
|
12
|
+
if echo:
|
|
13
|
+
print(f"Configuration: {self.conf}")
|
|
14
|
+
|
|
15
|
+
args2 = {**self.conf.to_dict(), **args}
|
|
16
|
+
|
|
17
|
+
# Handle argument renames
|
|
18
|
+
for k, v in confDict["argument_renames"].items():
|
|
19
|
+
args2[v] = args2.get(v, args2.get(k, None))
|
|
20
|
+
|
|
21
|
+
# Build context
|
|
22
|
+
delim = confDict["prompt_delimiter"] or "\n"
|
|
23
|
+
context = self.context or ""
|
|
24
|
+
fullPrompt = delim.join(confDict["prompts"])
|
|
25
|
+
if fullPrompt and context:
|
|
26
|
+
context = delim.join([fullPrompt, context])
|
|
27
|
+
elif fullPrompt:
|
|
28
|
+
context = fullPrompt
|
|
29
|
+
|
|
30
|
+
if echo:
|
|
31
|
+
print(f"Context: {context}")
|
|
32
|
+
|
|
33
|
+
# Form messages
|
|
34
|
+
messages = self.prompt_texts_combiner(prompt="", texts=texts, context=context)
|
|
35
|
+
|
|
36
|
+
if echo:
|
|
37
|
+
print(f"Messages: {messages}")
|
|
38
|
+
|
|
39
|
+
# Extract system instruction if present
|
|
40
|
+
system_instruction = None
|
|
41
|
+
if messages and list(messages[0].keys())[0] == self.system_role:
|
|
42
|
+
system_instruction = list(messages[0].values())[0]
|
|
43
|
+
messages = messages[1:]
|
|
44
|
+
|
|
45
|
+
# Map roles to Gemini format
|
|
46
|
+
res_messages = []
|
|
47
|
+
for d in messages:
|
|
48
|
+
role, content = list(d.items())[0]
|
|
49
|
+
if role == self.assistant_role:
|
|
50
|
+
role = "model"
|
|
51
|
+
elif role == self.user_role:
|
|
52
|
+
role = "user"
|
|
53
|
+
res_messages.append({"role": role, "parts": [content]})
|
|
54
|
+
|
|
55
|
+
model_name = args2.get("model", self.conf.model)
|
|
56
|
+
tools = args2.get("tools", None)
|
|
57
|
+
tool_config = args2.get("tool_config", None)
|
|
58
|
+
model_init_args = {}
|
|
59
|
+
if system_instruction:
|
|
60
|
+
model_init_args["system_instruction"] = system_instruction
|
|
61
|
+
if tools is not None:
|
|
62
|
+
model_init_args["tools"] = tools
|
|
63
|
+
if tool_config is not None:
|
|
64
|
+
model_init_args["tool_config"] = tool_config
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
model = genai.GenerativeModel(model_name, **model_init_args)
|
|
68
|
+
except TypeError:
|
|
69
|
+
# Fallback for older google-generativeai versions
|
|
70
|
+
model = genai.GenerativeModel(model_name)
|
|
71
|
+
if system_instruction:
|
|
72
|
+
res_messages.insert(0, {"role": "user", "parts": [system_instruction]})
|
|
73
|
+
|
|
74
|
+
model_args = {
|
|
75
|
+
"generation_config": args2.get("generation_config", None),
|
|
76
|
+
"safety_settings": args2.get("safety_settings", None),
|
|
77
|
+
"tools": None,
|
|
78
|
+
"tool_config": None,
|
|
79
|
+
"stream": args2.get("stream", None),
|
|
80
|
+
"request_options": args2.get("request_options", None),
|
|
81
|
+
}
|
|
82
|
+
model_args = {k: v for k, v in model_args.items() if v is not None}
|
|
83
|
+
|
|
84
|
+
self.llm_result = None
|
|
85
|
+
res = model.generate_content(res_messages, **model_args)
|
|
86
|
+
self.llm_result = res
|
|
87
|
+
|
|
88
|
+
if echo:
|
|
89
|
+
print(f"LLM result: {res}")
|
|
90
|
+
|
|
91
|
+
return self.post_process(_extract_gemini_text(res), form=args.get("form", None))
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import ollama
|
|
2
|
+
|
|
3
|
+
from LLMFunctionObjects.EvaluatorChat import EvaluatorChat
|
|
4
|
+
from LLMFunctionObjects.EvaluatorOllama import _extract_ollama_text
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class EvaluatorChatOllama(EvaluatorChat):
|
|
8
|
+
def _to_ollama_messages(self, messages):
|
|
9
|
+
res_messages = []
|
|
10
|
+
for d in messages:
|
|
11
|
+
for k, v in d.items():
|
|
12
|
+
res_messages.append({"role": k, "content": v})
|
|
13
|
+
return res_messages
|
|
14
|
+
|
|
15
|
+
def eval(self, texts, **args):
|
|
16
|
+
confDict = self.conf.to_dict()
|
|
17
|
+
|
|
18
|
+
echo = args.get("echo", False)
|
|
19
|
+
if echo:
|
|
20
|
+
print(f"Configuration: {self.conf}")
|
|
21
|
+
|
|
22
|
+
args2 = {**self.conf.to_dict(), **args}
|
|
23
|
+
|
|
24
|
+
# Handle argument renames
|
|
25
|
+
for k, v in confDict["argument_renames"].items():
|
|
26
|
+
args2[v] = args2.get(v, args2.get(k, None))
|
|
27
|
+
|
|
28
|
+
# Build context
|
|
29
|
+
delim = confDict["prompt_delimiter"] or "\n"
|
|
30
|
+
context = self.context or ""
|
|
31
|
+
fullPrompt = delim.join(confDict["prompts"])
|
|
32
|
+
if fullPrompt and context:
|
|
33
|
+
context = delim.join([fullPrompt, context])
|
|
34
|
+
elif fullPrompt:
|
|
35
|
+
context = fullPrompt
|
|
36
|
+
|
|
37
|
+
# Form messages
|
|
38
|
+
messages = self.prompt_texts_combiner(prompt="", texts=texts, context=context)
|
|
39
|
+
res_messages = self._to_ollama_messages(messages)
|
|
40
|
+
|
|
41
|
+
if echo:
|
|
42
|
+
print(f"Messages: {res_messages}")
|
|
43
|
+
|
|
44
|
+
model_name = args2.get("model", self.conf.model)
|
|
45
|
+
|
|
46
|
+
known_params = set(self.conf.known_params or [])
|
|
47
|
+
args3 = {k: v for k, v in args2.items() if k in known_params}
|
|
48
|
+
args3["model"] = model_name
|
|
49
|
+
args3["messages"] = res_messages
|
|
50
|
+
|
|
51
|
+
self.llm_result = None
|
|
52
|
+
res = ollama.chat(**args3)
|
|
53
|
+
self.llm_result = res
|
|
54
|
+
|
|
55
|
+
if echo:
|
|
56
|
+
print(f"LLM result: {res}")
|
|
57
|
+
|
|
58
|
+
return self.post_process(_extract_ollama_text(res), form=args.get("form", None))
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import warnings
|
|
2
|
+
|
|
3
|
+
from LLMFunctionObjects.EvaluatorChatGemini import EvaluatorChatGemini
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class EvaluatorChatPaLM(EvaluatorChatGemini):
|
|
7
|
+
def __init__(self, **kwargs):
|
|
8
|
+
warnings.warn(
|
|
9
|
+
"PaLM is deprecated and has been replaced with Gemini. "
|
|
10
|
+
"Use EvaluatorChatGemini or llm_configuration('ChatGemini').",
|
|
11
|
+
DeprecationWarning,
|
|
12
|
+
)
|
|
13
|
+
super().__init__(**kwargs)
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import google.generativeai as genai
|
|
2
|
+
|
|
3
|
+
from LLMFunctionObjects.Evaluator import Evaluator
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _extract_gemini_text(res):
|
|
7
|
+
if hasattr(res, "text") and res.text is not None:
|
|
8
|
+
return res.text
|
|
9
|
+
if isinstance(res, dict):
|
|
10
|
+
if "text" in res and res["text"] is not None:
|
|
11
|
+
return res["text"]
|
|
12
|
+
if hasattr(res, "candidates") and res.candidates:
|
|
13
|
+
cand = res.candidates[0]
|
|
14
|
+
if hasattr(cand, "content") and hasattr(cand.content, "parts"):
|
|
15
|
+
parts = cand.content.parts
|
|
16
|
+
if parts:
|
|
17
|
+
if isinstance(parts[0], str):
|
|
18
|
+
return "".join(parts)
|
|
19
|
+
if hasattr(parts[0], "text"):
|
|
20
|
+
return "".join([p.text for p in parts if hasattr(p, "text")])
|
|
21
|
+
return res
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class EvaluatorGemini(Evaluator):
|
|
25
|
+
def eval(self, texts, **args):
|
|
26
|
+
confDict = self.conf.to_dict()
|
|
27
|
+
|
|
28
|
+
echo = args.get("echo", False)
|
|
29
|
+
if echo:
|
|
30
|
+
print(f"Configuration: {self.conf}")
|
|
31
|
+
|
|
32
|
+
args2 = {**self.conf.to_dict(), **args}
|
|
33
|
+
|
|
34
|
+
# Handle argument renames
|
|
35
|
+
for k, v in confDict["argument_renames"].items():
|
|
36
|
+
args2[v] = args2.get(v, args2.get(k, None))
|
|
37
|
+
|
|
38
|
+
# Build prompt
|
|
39
|
+
fullPrompt = confDict["prompt_delimiter"].join(confDict["prompts"])
|
|
40
|
+
prompt = self.prompt_texts_combiner(fullPrompt, texts)
|
|
41
|
+
|
|
42
|
+
if echo:
|
|
43
|
+
print(f"Prompt: {prompt}")
|
|
44
|
+
|
|
45
|
+
# Configure model
|
|
46
|
+
model_name = args2.get("model", self.conf.model)
|
|
47
|
+
system_instruction = args2.get("system_instruction", None)
|
|
48
|
+
tools = args2.get("tools", None)
|
|
49
|
+
tool_config = args2.get("tool_config", None)
|
|
50
|
+
model_init_args = {}
|
|
51
|
+
if system_instruction:
|
|
52
|
+
model_init_args["system_instruction"] = system_instruction
|
|
53
|
+
if tools is not None:
|
|
54
|
+
model_init_args["tools"] = tools
|
|
55
|
+
if tool_config is not None:
|
|
56
|
+
model_init_args["tool_config"] = tool_config
|
|
57
|
+
|
|
58
|
+
try:
|
|
59
|
+
model = genai.GenerativeModel(model_name, **model_init_args)
|
|
60
|
+
except TypeError:
|
|
61
|
+
# Fallback for older google-generativeai versions
|
|
62
|
+
model = genai.GenerativeModel(model_name)
|
|
63
|
+
if system_instruction:
|
|
64
|
+
prompt = f"{system_instruction}\n\n{prompt}"
|
|
65
|
+
|
|
66
|
+
model_args = {
|
|
67
|
+
"generation_config": args2.get("generation_config", None),
|
|
68
|
+
"safety_settings": args2.get("safety_settings", None),
|
|
69
|
+
"tools": None,
|
|
70
|
+
"tool_config": None,
|
|
71
|
+
"stream": args2.get("stream", None),
|
|
72
|
+
"request_options": args2.get("request_options", None),
|
|
73
|
+
}
|
|
74
|
+
model_args = {k: v for k, v in model_args.items() if v is not None}
|
|
75
|
+
|
|
76
|
+
# Invoke Gemini
|
|
77
|
+
self.llm_result = None
|
|
78
|
+
res = model.generate_content(prompt, **model_args)
|
|
79
|
+
self.llm_result = res
|
|
80
|
+
|
|
81
|
+
if echo:
|
|
82
|
+
print(f"LLM result: {res}")
|
|
83
|
+
|
|
84
|
+
return self.post_process(_extract_gemini_text(res), form=args.get("form", None))
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import ollama
|
|
2
|
+
|
|
3
|
+
from LLMFunctionObjects.Evaluator import Evaluator
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _extract_ollama_text(res):
|
|
7
|
+
if isinstance(res, dict):
|
|
8
|
+
if "response" in res:
|
|
9
|
+
return res["response"]
|
|
10
|
+
if "message" in res and isinstance(res["message"], dict):
|
|
11
|
+
return res["message"].get("content", res)
|
|
12
|
+
if hasattr(res, "response"):
|
|
13
|
+
return res.response
|
|
14
|
+
if hasattr(res, "message") and hasattr(res.message, "content"):
|
|
15
|
+
return res.message.content
|
|
16
|
+
return res
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class EvaluatorOllama(Evaluator):
|
|
20
|
+
def eval(self, texts, **args):
|
|
21
|
+
confDict = self.conf.to_dict()
|
|
22
|
+
|
|
23
|
+
echo = args.get("echo", False)
|
|
24
|
+
if echo:
|
|
25
|
+
print(f"Configuration: {self.conf}")
|
|
26
|
+
|
|
27
|
+
args2 = {**self.conf.to_dict(), **args}
|
|
28
|
+
|
|
29
|
+
# Handle argument renames
|
|
30
|
+
for k, v in confDict["argument_renames"].items():
|
|
31
|
+
args2[v] = args2.get(v, args2.get(k, None))
|
|
32
|
+
|
|
33
|
+
fullPrompt = confDict["prompt_delimiter"].join(confDict["prompts"])
|
|
34
|
+
prompt = self.prompt_texts_combiner(fullPrompt, texts)
|
|
35
|
+
|
|
36
|
+
if echo:
|
|
37
|
+
print(f"Prompt: {prompt}")
|
|
38
|
+
|
|
39
|
+
model_name = args2.get("model", self.conf.model)
|
|
40
|
+
|
|
41
|
+
known_params = set(self.conf.known_params or [])
|
|
42
|
+
args3 = {k: v for k, v in args2.items() if k in known_params}
|
|
43
|
+
args3["model"] = model_name
|
|
44
|
+
args3["prompt"] = prompt
|
|
45
|
+
|
|
46
|
+
self.llm_result = None
|
|
47
|
+
res = ollama.generate(**args3)
|
|
48
|
+
self.llm_result = res
|
|
49
|
+
|
|
50
|
+
if echo:
|
|
51
|
+
print(f"LLM result: {res}")
|
|
52
|
+
|
|
53
|
+
return self.post_process(_extract_ollama_text(res), form=args.get("form", None))
|
|
@@ -5,12 +5,14 @@ from LLMFunctionObjects.Configuration import Configuration
|
|
|
5
5
|
from LLMFunctionObjects.Evaluator import Evaluator
|
|
6
6
|
from LLMFunctionObjects.EvaluatorChat import EvaluatorChat
|
|
7
7
|
from LLMFunctionObjects.EvaluatorChatGPT import EvaluatorChatGPT
|
|
8
|
-
from LLMFunctionObjects.
|
|
8
|
+
from LLMFunctionObjects.EvaluatorChatGemini import EvaluatorChatGemini
|
|
9
|
+
from LLMFunctionObjects.EvaluatorGemini import EvaluatorGemini
|
|
10
|
+
from LLMFunctionObjects.EvaluatorChatOllama import EvaluatorChatOllama
|
|
11
|
+
from LLMFunctionObjects.EvaluatorOllama import EvaluatorOllama
|
|
9
12
|
from LLMFunctionObjects.Functor import Functor
|
|
10
13
|
from LLMFunctionObjects.Chat import Chat
|
|
11
14
|
import openai
|
|
12
|
-
import google.generativeai
|
|
13
|
-
import warnings
|
|
15
|
+
import google.generativeai as genai
|
|
14
16
|
|
|
15
17
|
|
|
16
18
|
# ===========================================================
|
|
@@ -54,15 +56,21 @@ def llm_configuration(spec, **kwargs):
|
|
|
54
56
|
apiKey = kwargs.get("api_key", apiKey)
|
|
55
57
|
client = openai.OpenAI(api_key=apiKey)
|
|
56
58
|
|
|
59
|
+
default_chat_model = os.environ.get("OPENAI_CHAT_MODEL", os.environ.get("OPENAI_MODEL", "gpt-4.1-mini"))
|
|
60
|
+
|
|
57
61
|
confChatGPT = llm_configuration("openai",
|
|
58
62
|
name="chatgpt",
|
|
59
63
|
module='openai',
|
|
60
|
-
model=
|
|
64
|
+
model=default_chat_model,
|
|
61
65
|
function=client.chat.completions.create, # was openai.ChatCompletion.create,
|
|
66
|
+
argument_renames={"max_tokens": "max_completion_tokens"},
|
|
62
67
|
known_params=["model", "messages", "functions", "function_call",
|
|
63
|
-
"
|
|
64
|
-
"
|
|
65
|
-
"
|
|
68
|
+
"tools", "tool_choice", "response_format",
|
|
69
|
+
"temperature", "top_p", "n", "seed",
|
|
70
|
+
"stream", "logprobs", "stop",
|
|
71
|
+
"presence_penalty", "frequency_penalty", "logit_bias",
|
|
72
|
+
"max_completion_tokens",
|
|
73
|
+
"max_tokens",
|
|
66
74
|
"user"],
|
|
67
75
|
response_value_keys=[])
|
|
68
76
|
|
|
@@ -73,78 +81,99 @@ def llm_configuration(spec, **kwargs):
|
|
|
73
81
|
confChatGPT.llm_evaluator = None
|
|
74
82
|
|
|
75
83
|
return confChatGPT
|
|
76
|
-
elif isinstance(spec, str) and spec.lower() == '
|
|
84
|
+
elif isinstance(spec, str) and spec.lower() == 'Gemini'.lower():
|
|
77
85
|
|
|
78
86
|
# Set key
|
|
79
|
-
apiKey = os.environ.get("
|
|
87
|
+
apiKey = os.environ.get("GEMINI_API_KEY", os.environ.get("GOOGLE_API_KEY"))
|
|
80
88
|
apiKey = kwargs.get("api_key", apiKey)
|
|
81
|
-
|
|
89
|
+
genai.configure(api_key=apiKey)
|
|
90
|
+
|
|
91
|
+
default_gemini_model = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash")
|
|
82
92
|
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
name="palm",
|
|
93
|
+
confGemini = Configuration(
|
|
94
|
+
name="gemini",
|
|
86
95
|
api_key=None,
|
|
87
96
|
api_user_id="user",
|
|
88
97
|
module="google.generativeai",
|
|
89
|
-
model=
|
|
90
|
-
function=
|
|
98
|
+
model=default_gemini_model,
|
|
99
|
+
function=None,
|
|
91
100
|
temperature=0.2,
|
|
92
101
|
max_tokens=300,
|
|
93
102
|
total_probability_cutoff=0.03,
|
|
94
103
|
prompts=None,
|
|
95
104
|
prompt_delimiter=" ",
|
|
96
105
|
stop_tokens=None,
|
|
97
|
-
argument_renames={
|
|
98
|
-
"stop_tokens": "stop_sequences"},
|
|
106
|
+
argument_renames={},
|
|
99
107
|
fmt="values",
|
|
100
108
|
known_params=[
|
|
101
|
-
"model", "
|
|
102
|
-
"
|
|
109
|
+
"model", "generation_config", "safety_settings", "tools", "tool_config",
|
|
110
|
+
"stream", "request_options", "system_instruction"
|
|
103
111
|
],
|
|
104
|
-
response_object_attribute=
|
|
112
|
+
response_object_attribute=None,
|
|
105
113
|
response_value_keys=[],
|
|
106
114
|
llm_evaluator=None)
|
|
107
115
|
|
|
108
|
-
# Modify by additional arguments
|
|
109
116
|
if len(kwargs) > 0:
|
|
110
|
-
|
|
117
|
+
confGemini = confGemini.combine(kwargs)
|
|
118
|
+
return confGemini
|
|
111
119
|
|
|
112
|
-
|
|
113
|
-
return confPaLM
|
|
120
|
+
elif isinstance(spec, str) and spec.lower() == 'ChatGemini'.lower():
|
|
114
121
|
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
# Start as PaLM text completion configuration
|
|
118
|
-
confChatPaLM = llm_configuration("PaLM")
|
|
122
|
+
confChatGemini = llm_configuration("Gemini")
|
|
123
|
+
confChatGemini.name = "chatgemini"
|
|
119
124
|
|
|
120
|
-
|
|
121
|
-
|
|
125
|
+
if len(kwargs) > 0:
|
|
126
|
+
confChatGemini = confChatGemini.combine(kwargs)
|
|
122
127
|
|
|
123
|
-
|
|
124
|
-
confChatPaLM.function = google.generativeai.chat
|
|
128
|
+
return confChatGemini
|
|
125
129
|
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
130
|
+
elif isinstance(spec, str) and spec.lower() == 'PaLM'.lower():
|
|
131
|
+
warnings.warn("PaLM is deprecated; using Gemini instead.", DeprecationWarning)
|
|
132
|
+
return llm_configuration('Gemini', **kwargs)
|
|
129
133
|
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
confChatPaLM.known_params = [
|
|
134
|
-
"model", "context", "examples", "temperature", "candidate_count", "top_p", "top_k", "prompt"
|
|
135
|
-
]
|
|
134
|
+
elif isinstance(spec, str) and spec.lower() == 'ChatPaLM'.lower():
|
|
135
|
+
warnings.warn("ChatPaLM is deprecated; using ChatGemini instead.", DeprecationWarning)
|
|
136
|
+
return llm_configuration('ChatGemini', **kwargs)
|
|
136
137
|
|
|
137
|
-
|
|
138
|
-
|
|
138
|
+
elif isinstance(spec, str) and spec.lower() == 'Ollama'.lower():
|
|
139
|
+
default_ollama_model = os.environ.get("OLLAMA_MODEL", "llama3.2")
|
|
139
140
|
|
|
140
|
-
|
|
141
|
-
|
|
141
|
+
confOllama = Configuration(
|
|
142
|
+
name="ollama",
|
|
143
|
+
api_key=None,
|
|
144
|
+
api_user_id="user",
|
|
145
|
+
module="ollama",
|
|
146
|
+
model=default_ollama_model,
|
|
147
|
+
function=None,
|
|
148
|
+
temperature=0.2,
|
|
149
|
+
max_tokens=300,
|
|
150
|
+
total_probability_cutoff=0.03,
|
|
151
|
+
prompts=None,
|
|
152
|
+
prompt_delimiter=" ",
|
|
153
|
+
stop_tokens=None,
|
|
154
|
+
argument_renames={},
|
|
155
|
+
fmt="values",
|
|
156
|
+
known_params=[
|
|
157
|
+
"model", "prompt", "system", "template", "context", "stream",
|
|
158
|
+
"raw", "format", "options", "keep_alive"
|
|
159
|
+
],
|
|
160
|
+
response_object_attribute=None,
|
|
161
|
+
response_value_keys=[],
|
|
162
|
+
llm_evaluator=None)
|
|
142
163
|
|
|
143
|
-
# Combine with given additional parameters (if any)
|
|
144
164
|
if len(kwargs) > 0:
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
165
|
+
confOllama = confOllama.combine(kwargs)
|
|
166
|
+
return confOllama
|
|
167
|
+
|
|
168
|
+
elif isinstance(spec, str) and spec.lower() == 'ChatOllama'.lower():
|
|
169
|
+
confChatOllama = llm_configuration("Ollama")
|
|
170
|
+
confChatOllama.name = "chatollama"
|
|
171
|
+
confChatOllama.known_params = [
|
|
172
|
+
"model", "messages", "tools", "stream", "format", "options", "keep_alive"
|
|
173
|
+
]
|
|
174
|
+
if len(kwargs) > 0:
|
|
175
|
+
confChatOllama = confChatOllama.combine(kwargs)
|
|
176
|
+
return confChatOllama
|
|
148
177
|
else:
|
|
149
178
|
warnings.warn(f"Do not know what to do with given configuration spec: {spec}. Continuing with \"OpenAI\".")
|
|
150
179
|
return llm_configuration('OpenAI', **kwargs)
|
|
@@ -163,8 +192,14 @@ def llm_evaluator(spec, **args):
|
|
|
163
192
|
evaluator_class = EvaluatorChatGPT
|
|
164
193
|
elif spec.name.lower() == "OpenAI".lower():
|
|
165
194
|
evaluator_class = Evaluator
|
|
166
|
-
elif spec.name.lower()
|
|
167
|
-
evaluator_class =
|
|
195
|
+
elif spec.name.lower() in ["Gemini".lower(), "PaLM".lower()]:
|
|
196
|
+
evaluator_class = EvaluatorGemini
|
|
197
|
+
elif spec.name.lower() in ["ChatGemini".lower(), "ChatPaLM".lower()]:
|
|
198
|
+
evaluator_class = EvaluatorChatGemini
|
|
199
|
+
elif spec.name.lower() == "Ollama".lower():
|
|
200
|
+
evaluator_class = EvaluatorOllama
|
|
201
|
+
elif spec.name.lower() == "ChatOllama".lower():
|
|
202
|
+
evaluator_class = EvaluatorChatOllama
|
|
168
203
|
else:
|
|
169
204
|
raise ValueError(
|
|
170
205
|
'Cannot automatically deduce llm_evaluator_class from the given configuration object.')
|
|
@@ -335,7 +370,7 @@ def llm_synthesize(prompts, prop=None, **kwargs):
|
|
|
335
370
|
# Chat object creation
|
|
336
371
|
# ===========================================================
|
|
337
372
|
|
|
338
|
-
_mustPassConfKeys = ["name", "prompts", "examples", "temperature", "max_tokens",
|
|
373
|
+
_mustPassConfKeys = ["name", "model", "prompts", "examples", "temperature", "max_tokens",
|
|
339
374
|
"stop_tokens", "api_key", "api_user_id"]
|
|
340
375
|
|
|
341
376
|
|
|
@@ -366,10 +401,14 @@ def llm_chat(prompt: str = '', **kwargs):
|
|
|
366
401
|
|
|
367
402
|
# Obtain Evaluator class
|
|
368
403
|
if evaluator_class is None:
|
|
369
|
-
if 'palm' in conf.name.lower():
|
|
370
|
-
conf = llm_configuration('
|
|
404
|
+
if 'gemini' in conf.name.lower() or 'palm' in conf.name.lower():
|
|
405
|
+
conf = llm_configuration('ChatGemini',
|
|
406
|
+
**{k: v for k, v in conf.to_dict().items() if k in _mustPassConfKeys})
|
|
407
|
+
evaluator_class = EvaluatorChatGemini
|
|
408
|
+
elif 'ollama' in conf.name.lower():
|
|
409
|
+
conf = llm_configuration('ChatOllama',
|
|
371
410
|
**{k: v for k, v in conf.to_dict().items() if k in _mustPassConfKeys})
|
|
372
|
-
evaluator_class =
|
|
411
|
+
evaluator_class = EvaluatorChatOllama
|
|
373
412
|
else:
|
|
374
413
|
evaluator_class = EvaluatorChatGPT
|
|
375
414
|
|
|
@@ -3,6 +3,10 @@ from LLMFunctionObjects.Configuration import Configuration
|
|
|
3
3
|
from LLMFunctionObjects.Evaluator import Evaluator
|
|
4
4
|
from LLMFunctionObjects.EvaluatorChat import EvaluatorChat
|
|
5
5
|
from LLMFunctionObjects.EvaluatorChatGPT import EvaluatorChatGPT
|
|
6
|
+
from LLMFunctionObjects.EvaluatorChatGemini import EvaluatorChatGemini
|
|
7
|
+
from LLMFunctionObjects.EvaluatorGemini import EvaluatorGemini
|
|
8
|
+
from LLMFunctionObjects.EvaluatorChatOllama import EvaluatorChatOllama
|
|
9
|
+
from LLMFunctionObjects.EvaluatorOllama import EvaluatorOllama
|
|
6
10
|
from LLMFunctionObjects.EvaluatorChatPaLM import EvaluatorChatPaLM
|
|
7
11
|
from LLMFunctionObjects.Functor import Functor
|
|
8
12
|
from LLMFunctionObjects.Chat import Chat
|
|
@@ -18,4 +22,4 @@ from LLMFunctionObjects.SubParser import exact_parser
|
|
|
18
22
|
from LLMFunctionObjects.SubParser import catch_by_pattern
|
|
19
23
|
from LLMFunctionObjects.SubParser import extract_json_objects
|
|
20
24
|
from LLMFunctionObjects.SubParser import jsonify_text
|
|
21
|
-
from LLMFunctionObjects.SubParser import numify_text
|
|
25
|
+
from LLMFunctionObjects.SubParser import numify_text
|