llm-dialog-manager 0.4.1__tar.gz → 0.4.2__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/PKG-INFO +1 -1
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/llm_dialog_manager/__init__.py +1 -1
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/llm_dialog_manager/agent.py +11 -6
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/PKG-INFO +1 -1
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/pyproject.toml +1 -1
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/LICENSE +0 -0
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/README.md +0 -0
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/llm_dialog_manager/chat_history.py +0 -0
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/llm_dialog_manager/key_manager.py +0 -0
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/SOURCES.txt +0 -0
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/dependency_links.txt +0 -0
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/requires.txt +0 -0
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/top_level.txt +0 -0
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/setup.cfg +0 -0
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/tests/test_agent.py +0 -0
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/tests/test_chat_history.py +0 -0
- {llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/tests/test_key_manager.py +0 -0
@@ -74,7 +74,10 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
|
|
74
74
|
"""
|
75
75
|
try:
|
76
76
|
service = ""
|
77
|
-
if "
|
77
|
+
if "openai" in model:
|
78
|
+
service = "openai"
|
79
|
+
model
|
80
|
+
elif "claude" in model:
|
78
81
|
service = "anthropic"
|
79
82
|
elif "gemini" in model:
|
80
83
|
service = "gemini"
|
@@ -92,7 +95,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
|
|
92
95
|
|
93
96
|
def format_messages_for_api(model, messages):
|
94
97
|
"""Convert ChatHistory messages to the format required by the specific API."""
|
95
|
-
if "claude" in model:
|
98
|
+
if "claude" in model and "openai" not in model:
|
96
99
|
formatted = []
|
97
100
|
system_msg = ""
|
98
101
|
if messages and messages[0]["role"] == "system":
|
@@ -141,7 +144,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
|
|
141
144
|
formatted.append({"role": msg["role"], "content": combined_content})
|
142
145
|
return system_msg, formatted
|
143
146
|
|
144
|
-
elif "gemini" in model or "gpt" in model or "grok" in model:
|
147
|
+
elif ("gemini" in model or "gpt" in model or "grok" in model) and "openai" not in model:
|
145
148
|
formatted = []
|
146
149
|
for msg in messages:
|
147
150
|
content = msg["content"]
|
@@ -191,7 +194,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
|
|
191
194
|
|
192
195
|
system_msg, formatted_messages = format_messages_for_api(model, messages.copy())
|
193
196
|
|
194
|
-
if "claude" in model:
|
197
|
+
if "claude" in model and "openai" not in model:
|
195
198
|
# Check for Vertex configuration
|
196
199
|
vertex_project_id = os.getenv('VERTEX_PROJECT_ID')
|
197
200
|
vertex_region = os.getenv('VERTEX_REGION')
|
@@ -232,7 +235,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
|
|
232
235
|
|
233
236
|
return response.completion
|
234
237
|
|
235
|
-
elif "gemini" in model:
|
238
|
+
elif "gemini" in model and "openai" not in model:
|
236
239
|
try:
|
237
240
|
# First try OpenAI-style API
|
238
241
|
client = openai.OpenAI(
|
@@ -284,7 +287,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
|
|
284
287
|
|
285
288
|
return response.text
|
286
289
|
|
287
|
-
elif "grok" in model:
|
290
|
+
elif "grok" in model and "openai" not in model:
|
288
291
|
# Randomly choose between OpenAI and Anthropic SDK
|
289
292
|
use_anthropic = random.choice([True, False])
|
290
293
|
|
@@ -326,6 +329,8 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
|
|
326
329
|
return response.choices[0].message.content
|
327
330
|
|
328
331
|
else: # OpenAI models
|
332
|
+
if model.endswith("-openai"):
|
333
|
+
model = model[:-7] # Remove last 7 characters ("-openai")
|
329
334
|
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
330
335
|
# Set response_format based on json_format
|
331
336
|
response_format = {"type": "json_object"} if json_format else {"type": "plain_text"}
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "llm_dialog_manager"
|
7
|
-
version = "0.4.
|
7
|
+
version = "0.4.2"
|
8
8
|
description = "A Python package for managing LLM chat conversation history"
|
9
9
|
readme = "README.md"
|
10
10
|
classifiers = [ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Scientific/Engineering :: Artificial Intelligence",]
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/SOURCES.txt
RENAMED
File without changes
|
File without changes
|
{llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/requires.txt
RENAMED
File without changes
|
{llm_dialog_manager-0.4.1 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/top_level.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|