LLMFunctionObjects 0.2.0__tar.gz → 0.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/Configuration.py +4 -0
  2. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/EvaluatorChatGemini.py +23 -29
  3. llmfunctionobjects-0.2.2/LLMFunctionObjects/EvaluatorGemini.py +189 -0
  4. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/LLMFunctions.py +15 -9
  5. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects.egg-info/PKG-INFO +6 -4
  6. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects.egg-info/requires.txt +1 -1
  7. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/PKG-INFO +6 -4
  8. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/README.md +5 -3
  9. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/setup.py +2 -2
  10. llmfunctionobjects-0.2.0/LLMFunctionObjects/EvaluatorGemini.py +0 -84
  11. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LICENSE +0 -0
  12. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/Chat.py +0 -0
  13. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/Evaluator.py +0 -0
  14. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/EvaluatorChat.py +0 -0
  15. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/EvaluatorChatGPT.py +0 -0
  16. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/EvaluatorChatOllama.py +0 -0
  17. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/EvaluatorChatPaLM.py +0 -0
  18. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/EvaluatorOllama.py +0 -0
  19. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/Functor.py +0 -0
  20. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/SubParser.py +0 -0
  21. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects/__init__.py +0 -0
  22. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects.egg-info/SOURCES.txt +0 -0
  23. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects.egg-info/dependency_links.txt +0 -0
  24. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/LLMFunctionObjects.egg-info/top_level.txt +0 -0
  25. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/pyproject.toml +0 -0
  26. {llmfunctionobjects-0.2.0 → llmfunctionobjects-0.2.2}/setup.cfg +0 -0
@@ -7,6 +7,7 @@ class Configuration:
7
7
  def __init__(self,
8
8
  name: str,
9
9
  api_key: Union[str, None] = None,
10
+ base_url: Union[str, None] = None,
10
11
  api_user_id: Union[str, None] = None,
11
12
  module: str = '',
12
13
  model: str = '',
@@ -41,6 +42,7 @@ class Configuration:
41
42
  # stop_tokens = ['.', '?', '!']
42
43
  self.name = name
43
44
  self.api_key = api_key
45
+ self.base_url = base_url
44
46
  self.api_user_id = api_user_id
45
47
  self.module = module
46
48
  self.model = model
@@ -111,6 +113,7 @@ class Configuration:
111
113
  newObj = type(self)(
112
114
  name=self.name,
113
115
  api_key=self.api_key,
116
+ base_url=self.base_url,
114
117
  api_user_id=self.api_user_id,
115
118
  module=self.module,
116
119
  model=self.model,
@@ -161,6 +164,7 @@ class Configuration:
161
164
  return {
162
165
  'name': self.name,
163
166
  'api_key': self.api_key,
167
+ 'base_url': self.base_url,
164
168
  'api_user_id': self.api_user_id,
165
169
  'module': self.module,
166
170
  'model': self.model,
@@ -1,7 +1,8 @@
1
- import google.generativeai as genai
2
-
3
1
  from LLMFunctionObjects.EvaluatorChat import EvaluatorChat
4
- from LLMFunctionObjects.EvaluatorGemini import _extract_gemini_text
2
+ from LLMFunctionObjects.EvaluatorGemini import (
3
+ _generate_with_google_genai,
4
+ _generate_with_google_generativeai,
5
+ )
5
6
 
6
7
 
7
8
  class EvaluatorChatGemini(EvaluatorChat):
@@ -55,37 +56,30 @@ class EvaluatorChatGemini(EvaluatorChat):
55
56
  model_name = args2.get("model", self.conf.model)
56
57
  tools = args2.get("tools", None)
57
58
  tool_config = args2.get("tool_config", None)
58
- model_init_args = {}
59
- if system_instruction:
60
- model_init_args["system_instruction"] = system_instruction
61
- if tools is not None:
62
- model_init_args["tools"] = tools
63
- if tool_config is not None:
64
- model_init_args["tool_config"] = tool_config
65
59
 
66
- try:
67
- model = genai.GenerativeModel(model_name, **model_init_args)
68
- except TypeError:
69
- # Fallback for older google-generativeai versions
70
- model = genai.GenerativeModel(model_name)
60
+ self.llm_result = None
61
+ res, text = _generate_with_google_genai(
62
+ model_name=model_name,
63
+ contents=res_messages,
64
+ args2=args2,
65
+ system_instruction=system_instruction,
66
+ tools=tools,
67
+ tool_config=tool_config,
68
+ )
69
+ if res is None:
71
70
  if system_instruction:
72
71
  res_messages.insert(0, {"role": "user", "parts": [system_instruction]})
73
-
74
- model_args = {
75
- "generation_config": args2.get("generation_config", None),
76
- "safety_settings": args2.get("safety_settings", None),
77
- "tools": None,
78
- "tool_config": None,
79
- "stream": args2.get("stream", None),
80
- "request_options": args2.get("request_options", None),
81
- }
82
- model_args = {k: v for k, v in model_args.items() if v is not None}
83
-
84
- self.llm_result = None
85
- res = model.generate_content(res_messages, **model_args)
72
+ res, text = _generate_with_google_generativeai(
73
+ model_name=model_name,
74
+ contents=res_messages,
75
+ args2=args2,
76
+ system_instruction=None,
77
+ tools=tools,
78
+ tool_config=tool_config,
79
+ )
86
80
  self.llm_result = res
87
81
 
88
82
  if echo:
89
83
  print(f"LLM result: {res}")
90
84
 
91
- return self.post_process(_extract_gemini_text(res), form=args.get("form", None))
85
+ return self.post_process(text, form=args.get("form", None))
@@ -0,0 +1,189 @@
1
+ import importlib
2
+ import os
3
+ import warnings
4
+
5
+ from LLMFunctionObjects.Evaluator import Evaluator
6
+
7
+
8
+ def _extract_gemini_text(res):
9
+ if hasattr(res, "text") and res.text is not None:
10
+ return res.text
11
+ if isinstance(res, dict):
12
+ if "text" in res and res["text"] is not None:
13
+ return res["text"]
14
+ if hasattr(res, "candidates") and res.candidates:
15
+ cand = res.candidates[0]
16
+ if hasattr(cand, "content") and hasattr(cand.content, "parts"):
17
+ parts = cand.content.parts
18
+ if parts:
19
+ if isinstance(parts[0], str):
20
+ return "".join(parts)
21
+ if hasattr(parts[0], "text"):
22
+ return "".join([p.text for p in parts if hasattr(p, "text")])
23
+ return res
24
+
25
+
26
+ def _import_google_genai():
27
+ try:
28
+ return importlib.import_module("google.genai")
29
+ except ImportError:
30
+ return None
31
+
32
+
33
+ def _import_google_generativeai():
34
+ try:
35
+ return importlib.import_module("google.generativeai")
36
+ except ImportError:
37
+ return None
38
+
39
+
40
+ def _gemini_config_from_args(args2, system_instruction=None, tools=None, tool_config=None):
41
+ config = {}
42
+ generation_config = args2.get("generation_config", None)
43
+ if isinstance(generation_config, dict):
44
+ config.update(generation_config)
45
+
46
+ if args2.get("temperature", None) is not None:
47
+ config["temperature"] = args2.get("temperature")
48
+ if args2.get("max_tokens", None) is not None and "max_output_tokens" not in config:
49
+ config["max_output_tokens"] = args2.get("max_tokens")
50
+ if args2.get("stop_tokens", None) and "stop_sequences" not in config:
51
+ config["stop_sequences"] = args2.get("stop_tokens")
52
+ if args2.get("safety_settings", None) is not None:
53
+ config["safety_settings"] = args2.get("safety_settings")
54
+ if system_instruction:
55
+ config["system_instruction"] = system_instruction
56
+ if tools is not None:
57
+ config["tools"] = tools
58
+ if tool_config is not None:
59
+ config["tool_config"] = tool_config
60
+
61
+ return config if len(config) > 0 else None
62
+
63
+
64
+ def _resolve_gemini_key(args2):
65
+ return args2.get("api_key", None) or os.environ.get("GEMINI_API_KEY", os.environ.get("GOOGLE_API_KEY"))
66
+
67
+
68
+ def _generate_with_google_genai(model_name, contents, args2, system_instruction=None, tools=None, tool_config=None):
69
+ google_genai = _import_google_genai()
70
+ if google_genai is None:
71
+ return None, None
72
+
73
+ api_key = _resolve_gemini_key(args2)
74
+ client = google_genai.Client(api_key=api_key)
75
+ config = _gemini_config_from_args(args2, system_instruction=system_instruction, tools=tools, tool_config=tool_config)
76
+
77
+ if args2.get("stream", False):
78
+ chunks = client.models.generate_content_stream(model=model_name, contents=contents, config=config)
79
+ text_chunks = []
80
+ last_chunk = None
81
+ for chunk in chunks:
82
+ last_chunk = chunk
83
+ chunk_text = _extract_gemini_text(chunk)
84
+ if isinstance(chunk_text, str):
85
+ text_chunks.append(chunk_text)
86
+ return last_chunk, "".join(text_chunks)
87
+
88
+ res = client.models.generate_content(model=model_name, contents=contents, config=config)
89
+ return res, _extract_gemini_text(res)
90
+
91
+
92
+ def _generate_with_google_generativeai(model_name, contents, args2, system_instruction=None, tools=None, tool_config=None):
93
+ genai = _import_google_generativeai()
94
+ if genai is None:
95
+ raise ImportError(
96
+ "Neither 'google.genai' nor 'google.generativeai' is available. "
97
+ "Install 'google-genai' to use Gemini."
98
+ )
99
+
100
+ warnings.warn(
101
+ "Using deprecated 'google.generativeai'. Install 'google-genai' to use the supported Gemini SDK.",
102
+ DeprecationWarning,
103
+ stacklevel=2,
104
+ )
105
+
106
+ api_key = _resolve_gemini_key(args2)
107
+ if api_key is not None:
108
+ genai.configure(api_key=api_key)
109
+
110
+ model_init_args = {}
111
+ if system_instruction:
112
+ model_init_args["system_instruction"] = system_instruction
113
+ if tools is not None:
114
+ model_init_args["tools"] = tools
115
+ if tool_config is not None:
116
+ model_init_args["tool_config"] = tool_config
117
+
118
+ try:
119
+ model = genai.GenerativeModel(model_name, **model_init_args)
120
+ except TypeError:
121
+ model = genai.GenerativeModel(model_name)
122
+ if system_instruction and isinstance(contents, str):
123
+ contents = f"{system_instruction}\n\n{contents}"
124
+
125
+ model_args = {
126
+ "generation_config": args2.get("generation_config", None),
127
+ "safety_settings": args2.get("safety_settings", None),
128
+ "tools": None,
129
+ "tool_config": None,
130
+ "stream": args2.get("stream", None),
131
+ "request_options": args2.get("request_options", None),
132
+ }
133
+ model_args = {k: v for k, v in model_args.items() if v is not None}
134
+ res = model.generate_content(contents, **model_args)
135
+ return res, _extract_gemini_text(res)
136
+
137
+
138
+ class EvaluatorGemini(Evaluator):
139
+ def eval(self, texts, **args):
140
+ confDict = self.conf.to_dict()
141
+
142
+ echo = args.get("echo", False)
143
+ if echo:
144
+ print(f"Configuration: {self.conf}")
145
+
146
+ args2 = {**self.conf.to_dict(), **args}
147
+
148
+ # Handle argument renames
149
+ for k, v in confDict["argument_renames"].items():
150
+ args2[v] = args2.get(v, args2.get(k, None))
151
+
152
+ # Build prompt
153
+ fullPrompt = confDict["prompt_delimiter"].join(confDict["prompts"])
154
+ prompt = self.prompt_texts_combiner(fullPrompt, texts)
155
+
156
+ if echo:
157
+ print(f"Prompt: {prompt}")
158
+
159
+ # Configure model
160
+ model_name = args2.get("model", self.conf.model)
161
+ system_instruction = args2.get("system_instruction", None)
162
+ tools = args2.get("tools", None)
163
+ tool_config = args2.get("tool_config", None)
164
+
165
+ # Invoke Gemini
166
+ self.llm_result = None
167
+ res, text = _generate_with_google_genai(
168
+ model_name=model_name,
169
+ contents=prompt,
170
+ args2=args2,
171
+ system_instruction=system_instruction,
172
+ tools=tools,
173
+ tool_config=tool_config,
174
+ )
175
+ if res is None:
176
+ res, text = _generate_with_google_generativeai(
177
+ model_name=model_name,
178
+ contents=prompt,
179
+ args2=args2,
180
+ system_instruction=system_instruction,
181
+ tools=tools,
182
+ tool_config=tool_config,
183
+ )
184
+ self.llm_result = res
185
+
186
+ if echo:
187
+ print(f"LLM result: {res}")
188
+
189
+ return self.post_process(text, form=args.get("form", None))
@@ -12,7 +12,6 @@ from LLMFunctionObjects.EvaluatorOllama import EvaluatorOllama
12
12
  from LLMFunctionObjects.Functor import Functor
13
13
  from LLMFunctionObjects.Chat import Chat
14
14
  import openai
15
- import google.generativeai as genai
16
15
 
17
16
 
18
17
  # ===========================================================
@@ -29,12 +28,13 @@ def llm_configuration(spec, **kwargs):
29
28
  confOpenAI = Configuration(
30
29
  name="openai",
31
30
  api_key=None,
31
+ base_url=None,
32
32
  api_user_id='user',
33
33
  module='openai',
34
34
  model='gpt-3.5-turbo-instruct', # was 'text-davinci-003'
35
35
  function=openai.completions.create, # was openai.Completion.create
36
36
  temperature=0.2,
37
- max_tokens=300,
37
+ max_tokens=512,
38
38
  total_probability_cutoff=0.03,
39
39
  prompts=None,
40
40
  prompt_delimiter=' ',
@@ -54,7 +54,11 @@ def llm_configuration(spec, **kwargs):
54
54
  # Client and key
55
55
  apiKey = os.environ.get("OPENAI_API_KEY")
56
56
  apiKey = kwargs.get("api_key", apiKey)
57
- client = openai.OpenAI(api_key=apiKey)
57
+ baseUrl = kwargs.get("base_url", os.environ.get("OPENAI_BASE_URL", "https://api.openai.com/v1"))
58
+ client_kwargs = {"api_key": apiKey}
59
+ if baseUrl is not None:
60
+ client_kwargs["base_url"] = baseUrl
61
+ client = openai.OpenAI(**client_kwargs)
58
62
 
59
63
  default_chat_model = os.environ.get("OPENAI_CHAT_MODEL", os.environ.get("OPENAI_MODEL", "gpt-4.1-mini"))
60
64
 
@@ -63,6 +67,7 @@ def llm_configuration(spec, **kwargs):
63
67
  module='openai',
64
68
  model=default_chat_model,
65
69
  function=client.chat.completions.create, # was openai.ChatCompletion.create,
70
+ max_tokens=8192,
66
71
  argument_renames={"max_tokens": "max_completion_tokens"},
67
72
  known_params=["model", "messages", "functions", "function_call",
68
73
  "tools", "tool_choice", "response_format",
@@ -74,6 +79,8 @@ def llm_configuration(spec, **kwargs):
74
79
  "user"],
75
80
  response_value_keys=[])
76
81
 
82
+ # Apparently, base_url cannot be included in known_params -- it is for the client object only.
83
+
77
84
  if len(kwargs) > 0:
78
85
  confChatGPT = confChatGPT.combine(kwargs)
79
86
 
@@ -83,22 +90,21 @@ def llm_configuration(spec, **kwargs):
83
90
  return confChatGPT
84
91
  elif isinstance(spec, str) and spec.lower() == 'Gemini'.lower():
85
92
 
86
- # Set key
93
+ # Resolve key eagerly but configure clients lazily in evaluators.
87
94
  apiKey = os.environ.get("GEMINI_API_KEY", os.environ.get("GOOGLE_API_KEY"))
88
95
  apiKey = kwargs.get("api_key", apiKey)
89
- genai.configure(api_key=apiKey)
90
96
 
91
97
  default_gemini_model = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash")
92
98
 
93
99
  confGemini = Configuration(
94
100
  name="gemini",
95
- api_key=None,
101
+ api_key=apiKey,
96
102
  api_user_id="user",
97
- module="google.generativeai",
103
+ module="google.genai",
98
104
  model=default_gemini_model,
99
105
  function=None,
100
106
  temperature=0.2,
101
- max_tokens=300,
107
+ max_tokens=8192,
102
108
  total_probability_cutoff=0.03,
103
109
  prompts=None,
104
110
  prompt_delimiter=" ",
@@ -146,7 +152,7 @@ def llm_configuration(spec, **kwargs):
146
152
  model=default_ollama_model,
147
153
  function=None,
148
154
  temperature=0.2,
149
- max_tokens=300,
155
+ max_tokens=8192,
150
156
  total_probability_cutoff=0.03,
151
157
  prompts=None,
152
158
  prompt_delimiter=" ",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLMFunctionObjects
3
- Version: 0.2.0
3
+ Version: 0.2.2
4
4
  Summary: Large Language Models (LLMs) functions package.
5
5
  Home-page: https://github.com/antononcube/Python-packages/tree/main/LLMFunctionObjects
6
6
  Author: Anton Antonov
@@ -16,7 +16,7 @@ Classifier: Operating System :: OS Independent
16
16
  Requires-Python: >=3.7
17
17
  Description-Content-Type: text/markdown
18
18
  License-File: LICENSE
19
- Requires-Dist: google-generativeai>=0.3.2
19
+ Requires-Dist: google-genai>=1.0.0
20
20
  Requires-Dist: openai>=1.3.0
21
21
  Requires-Dist: ollama>=0.1.7
22
22
  Dynamic: author
@@ -67,7 +67,7 @@ pip install LLMFunctionObjects
67
67
 
68
68
  ## Design
69
69
 
70
- "Out of the box" ["LLMFunctionObjects"](https://pypi.org/project/LLMFunctionObjects) uses ["openai"](https://pypi.org/project/openai/), [OAIp1], and ["google-generativeai"](https://pypi.org/project/google-generativeai/), [GAIp1], and ["ollama"](https://pypi.org/project/ollama/).
70
+ "Out of the box" ["LLMFunctionObjects"](https://pypi.org/project/LLMFunctionObjects) uses ["openai"](https://pypi.org/project/openai/), [OAIp1], and ["google-genai"](https://pypi.org/project/google-genai/), [GAIp1], and ["ollama"](https://pypi.org/project/ollama/).
71
71
 
72
72
  Other LLM access packages can be utilized via appropriate LLM configurations.
73
73
 
@@ -161,6 +161,8 @@ for k, v in llm_configuration('ChatGPT').to_dict().items():
161
161
  **Remark:** Both the "OpenAI" and "ChatGPT" configuration use functions of the package "openai", [OAIp1].
162
162
  The "OpenAI" configuration is for text-completions;
163
163
  the "ChatGPT" configuration is for chat-completions.
164
+ To use an OpenAI-compatible local server, pass `base_url` (or set `OPENAI_BASE_URL`), e.g.
165
+ `llm_configuration("ChatGPT", base_url="http://127.0.0.1:8080/v1")`.
164
166
 
165
167
  ### Gemini-based
166
168
 
@@ -562,7 +564,7 @@ Generally, speaking prefer using the "Chat" prefixed methods: "ChatGPT" and "Cha
562
564
  [PyPI.org/antononcube](https://pypi.org/user/antononcube/).
563
565
 
564
566
  [GAIp1] Google AI,
565
- [google-generativeai (Google Generative AI Python Client)](https://pypi.org/project/google-generativeai/),
567
+ [google-genai (Google Gen AI Python SDK)](https://pypi.org/project/google-genai/),
566
568
  (2023),
567
569
  [PyPI.org/google-ai](https://pypi.org/user/google-ai/).
568
570
 
@@ -1,3 +1,3 @@
1
- google-generativeai>=0.3.2
1
+ google-genai>=1.0.0
2
2
  openai>=1.3.0
3
3
  ollama>=0.1.7
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: LLMFunctionObjects
3
- Version: 0.2.0
3
+ Version: 0.2.2
4
4
  Summary: Large Language Models (LLMs) functions package.
5
5
  Home-page: https://github.com/antononcube/Python-packages/tree/main/LLMFunctionObjects
6
6
  Author: Anton Antonov
@@ -16,7 +16,7 @@ Classifier: Operating System :: OS Independent
16
16
  Requires-Python: >=3.7
17
17
  Description-Content-Type: text/markdown
18
18
  License-File: LICENSE
19
- Requires-Dist: google-generativeai>=0.3.2
19
+ Requires-Dist: google-genai>=1.0.0
20
20
  Requires-Dist: openai>=1.3.0
21
21
  Requires-Dist: ollama>=0.1.7
22
22
  Dynamic: author
@@ -67,7 +67,7 @@ pip install LLMFunctionObjects
67
67
 
68
68
  ## Design
69
69
 
70
- "Out of the box" ["LLMFunctionObjects"](https://pypi.org/project/LLMFunctionObjects) uses ["openai"](https://pypi.org/project/openai/), [OAIp1], and ["google-generativeai"](https://pypi.org/project/google-generativeai/), [GAIp1], and ["ollama"](https://pypi.org/project/ollama/).
70
+ "Out of the box" ["LLMFunctionObjects"](https://pypi.org/project/LLMFunctionObjects) uses ["openai"](https://pypi.org/project/openai/), [OAIp1], and ["google-genai"](https://pypi.org/project/google-genai/), [GAIp1], and ["ollama"](https://pypi.org/project/ollama/).
71
71
 
72
72
  Other LLM access packages can be utilized via appropriate LLM configurations.
73
73
 
@@ -161,6 +161,8 @@ for k, v in llm_configuration('ChatGPT').to_dict().items():
161
161
  **Remark:** Both the "OpenAI" and "ChatGPT" configuration use functions of the package "openai", [OAIp1].
162
162
  The "OpenAI" configuration is for text-completions;
163
163
  the "ChatGPT" configuration is for chat-completions.
164
+ To use an OpenAI-compatible local server, pass `base_url` (or set `OPENAI_BASE_URL`), e.g.
165
+ `llm_configuration("ChatGPT", base_url="http://127.0.0.1:8080/v1")`.
164
166
 
165
167
  ### Gemini-based
166
168
 
@@ -562,7 +564,7 @@ Generally, speaking prefer using the "Chat" prefixed methods: "ChatGPT" and "Cha
562
564
  [PyPI.org/antononcube](https://pypi.org/user/antononcube/).
563
565
 
564
566
  [GAIp1] Google AI,
565
- [google-generativeai (Google Generative AI Python Client)](https://pypi.org/project/google-generativeai/),
567
+ [google-genai (Google Gen AI Python SDK)](https://pypi.org/project/google-genai/),
566
568
  (2023),
567
569
  [PyPI.org/google-ai](https://pypi.org/user/google-ai/).
568
570
 
@@ -34,7 +34,7 @@ pip install LLMFunctionObjects
34
34
 
35
35
  ## Design
36
36
 
37
- "Out of the box" ["LLMFunctionObjects"](https://pypi.org/project/LLMFunctionObjects) uses ["openai"](https://pypi.org/project/openai/), [OAIp1], and ["google-generativeai"](https://pypi.org/project/google-generativeai/), [GAIp1], and ["ollama"](https://pypi.org/project/ollama/).
37
+ "Out of the box" ["LLMFunctionObjects"](https://pypi.org/project/LLMFunctionObjects) uses ["openai"](https://pypi.org/project/openai/), [OAIp1], and ["google-genai"](https://pypi.org/project/google-genai/), [GAIp1], and ["ollama"](https://pypi.org/project/ollama/).
38
38
 
39
39
  Other LLM access packages can be utilized via appropriate LLM configurations.
40
40
 
@@ -128,6 +128,8 @@ for k, v in llm_configuration('ChatGPT').to_dict().items():
128
128
  **Remark:** Both the "OpenAI" and "ChatGPT" configuration use functions of the package "openai", [OAIp1].
129
129
  The "OpenAI" configuration is for text-completions;
130
130
  the "ChatGPT" configuration is for chat-completions.
131
+ To use an OpenAI-compatible local server, pass `base_url` (or set `OPENAI_BASE_URL`), e.g.
132
+ `llm_configuration("ChatGPT", base_url="http://127.0.0.1:8080/v1")`.
131
133
 
132
134
  ### Gemini-based
133
135
 
@@ -529,7 +531,7 @@ Generally, speaking prefer using the "Chat" prefixed methods: "ChatGPT" and "Cha
529
531
  [PyPI.org/antononcube](https://pypi.org/user/antononcube/).
530
532
 
531
533
  [GAIp1] Google AI,
532
- [google-generativeai (Google Generative AI Python Client)](https://pypi.org/project/google-generativeai/),
534
+ [google-genai (Google Gen AI Python SDK)](https://pypi.org/project/google-genai/),
533
535
  (2023),
534
536
  [PyPI.org/google-ai](https://pypi.org/user/google-ai/).
535
537
 
@@ -541,4 +543,4 @@ Generally, speaking prefer using the "Chat" prefixed methods: "ChatGPT" and "Cha
541
543
  [WRIp1] Wolfram Research, Inc.
542
544
  [LLMFunctions paclet](https://resources.wolframcloud.com/PacletRepository/resources/Wolfram/LLMFunctionObjects/),
543
545
  (2023),
544
- [Wolfram Language Paclet Repository](https://resources.wolframcloud.com/PacletRepository/).
546
+ [Wolfram Language Paclet Repository](https://resources.wolframcloud.com/PacletRepository/).
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
5
5
 
6
6
  setuptools.setup(
7
7
  name="LLMFunctionObjects",
8
- version="0.2.0",
8
+ version="0.2.2",
9
9
  author="Anton Antonov",
10
10
  author_email="antononcube@posteo.net",
11
11
  description="Large Language Models (LLMs) functions package.",
@@ -13,7 +13,7 @@ setuptools.setup(
13
13
  long_description_content_type="text/markdown",
14
14
  url="https://github.com/antononcube/Python-packages/tree/main/LLMFunctionObjects",
15
15
  packages=setuptools.find_packages(),
16
- install_requires=["google-generativeai>=0.3.2", "openai>=1.3.0", "ollama>=0.1.7"],
16
+ install_requires=["google-genai>=1.0.0", "openai>=1.3.0", "ollama>=0.1.7"],
17
17
  classifiers=[
18
18
  "Intended Audience :: Science/Research",
19
19
  "Intended Audience :: Developers",
@@ -1,84 +0,0 @@
1
- import google.generativeai as genai
2
-
3
- from LLMFunctionObjects.Evaluator import Evaluator
4
-
5
-
6
- def _extract_gemini_text(res):
7
- if hasattr(res, "text") and res.text is not None:
8
- return res.text
9
- if isinstance(res, dict):
10
- if "text" in res and res["text"] is not None:
11
- return res["text"]
12
- if hasattr(res, "candidates") and res.candidates:
13
- cand = res.candidates[0]
14
- if hasattr(cand, "content") and hasattr(cand.content, "parts"):
15
- parts = cand.content.parts
16
- if parts:
17
- if isinstance(parts[0], str):
18
- return "".join(parts)
19
- if hasattr(parts[0], "text"):
20
- return "".join([p.text for p in parts if hasattr(p, "text")])
21
- return res
22
-
23
-
24
- class EvaluatorGemini(Evaluator):
25
- def eval(self, texts, **args):
26
- confDict = self.conf.to_dict()
27
-
28
- echo = args.get("echo", False)
29
- if echo:
30
- print(f"Configuration: {self.conf}")
31
-
32
- args2 = {**self.conf.to_dict(), **args}
33
-
34
- # Handle argument renames
35
- for k, v in confDict["argument_renames"].items():
36
- args2[v] = args2.get(v, args2.get(k, None))
37
-
38
- # Build prompt
39
- fullPrompt = confDict["prompt_delimiter"].join(confDict["prompts"])
40
- prompt = self.prompt_texts_combiner(fullPrompt, texts)
41
-
42
- if echo:
43
- print(f"Prompt: {prompt}")
44
-
45
- # Configure model
46
- model_name = args2.get("model", self.conf.model)
47
- system_instruction = args2.get("system_instruction", None)
48
- tools = args2.get("tools", None)
49
- tool_config = args2.get("tool_config", None)
50
- model_init_args = {}
51
- if system_instruction:
52
- model_init_args["system_instruction"] = system_instruction
53
- if tools is not None:
54
- model_init_args["tools"] = tools
55
- if tool_config is not None:
56
- model_init_args["tool_config"] = tool_config
57
-
58
- try:
59
- model = genai.GenerativeModel(model_name, **model_init_args)
60
- except TypeError:
61
- # Fallback for older google-generativeai versions
62
- model = genai.GenerativeModel(model_name)
63
- if system_instruction:
64
- prompt = f"{system_instruction}\n\n{prompt}"
65
-
66
- model_args = {
67
- "generation_config": args2.get("generation_config", None),
68
- "safety_settings": args2.get("safety_settings", None),
69
- "tools": None,
70
- "tool_config": None,
71
- "stream": args2.get("stream", None),
72
- "request_options": args2.get("request_options", None),
73
- }
74
- model_args = {k: v for k, v in model_args.items() if v is not None}
75
-
76
- # Invoke Gemini
77
- self.llm_result = None
78
- res = model.generate_content(prompt, **model_args)
79
- self.llm_result = res
80
-
81
- if echo:
82
- print(f"LLM result: {res}")
83
-
84
- return self.post_process(_extract_gemini_text(res), form=args.get("form", None))