llm-dialog-manager 0.4.1__py3-none-any.whl → 0.4.3__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,4 +1,4 @@
1
1
  from .chat_history import ChatHistory
2
2
  from .agent import Agent
3
3
 
4
- __version__ = "0.4.1"
4
+ __version__ = "0.4.3"
@@ -74,7 +74,10 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
74
74
  """
75
75
  try:
76
76
  service = ""
77
- if "claude" in model:
77
+ if "openai" in model:
78
+ service = "openai"
79
+ model
80
+ elif "claude" in model:
78
81
  service = "anthropic"
79
82
  elif "gemini" in model:
80
83
  service = "gemini"
@@ -92,7 +95,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
92
95
 
93
96
  def format_messages_for_api(model, messages):
94
97
  """Convert ChatHistory messages to the format required by the specific API."""
95
- if "claude" in model:
98
+ if "claude" in model and "openai" not in model:
96
99
  formatted = []
97
100
  system_msg = ""
98
101
  if messages and messages[0]["role"] == "system":
@@ -141,7 +144,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
141
144
  formatted.append({"role": msg["role"], "content": combined_content})
142
145
  return system_msg, formatted
143
146
 
144
- elif "gemini" in model or "gpt" in model or "grok" in model:
147
+ elif ("gemini" in model or "gpt" in model or "grok" in model) and "openai" not in model:
145
148
  formatted = []
146
149
  for msg in messages:
147
150
  content = msg["content"]
@@ -191,7 +194,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
191
194
 
192
195
  system_msg, formatted_messages = format_messages_for_api(model, messages.copy())
193
196
 
194
- if "claude" in model:
197
+ if "claude" in model and "openai" not in model:
195
198
  # Check for Vertex configuration
196
199
  vertex_project_id = os.getenv('VERTEX_PROJECT_ID')
197
200
  vertex_region = os.getenv('VERTEX_REGION')
@@ -232,7 +235,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
232
235
 
233
236
  return response.completion
234
237
 
235
- elif "gemini" in model:
238
+ elif "gemini" in model and "openai" not in model:
236
239
  try:
237
240
  # First try OpenAI-style API
238
241
  client = openai.OpenAI(
@@ -284,7 +287,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
284
287
 
285
288
  return response.text
286
289
 
287
- elif "grok" in model:
290
+ elif "grok" in model and "openai" not in model:
288
291
  # Randomly choose between OpenAI and Anthropic SDK
289
292
  use_anthropic = random.choice([True, False])
290
293
 
@@ -326,6 +329,8 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
326
329
  return response.choices[0].message.content
327
330
 
328
331
  else: # OpenAI models
332
+ if model.endswith("-openai"):
333
+ model = model[:-7] # Remove last 7 characters ("-openai")
329
334
  client = openai.OpenAI(api_key=api_key, base_url=base_url)
330
335
  # Set response_format based on json_format
331
336
  response_format = {"type": "json_object"} if json_format else {"type": "plain_text"}
@@ -384,7 +389,7 @@ class Agent:
384
389
  if image_path:
385
390
  if not os.path.exists(image_path):
386
391
  raise FileNotFoundError(f"Image file {image_path} does not exist.")
387
- if "gemini" in self.model_name:
392
+ if "gemini" in self.model_name and "openai" not in self.model_name:
388
393
  # For Gemini, load as PIL.Image
389
394
  image_pil = Image.open(image_path)
390
395
  image_block = image_pil
@@ -401,7 +406,7 @@ class Agent:
401
406
  }
402
407
  else:
403
408
  # If image_url is provided
404
- if "gemini" in self.model_name:
409
+ if "gemini" in self.model_name and "openai" not in self.model_name:
405
410
  # For Gemini, you can pass image URLs directly
406
411
  image_block = {"type": "image_url", "image_url": {"url": image_url}}
407
412
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: llm_dialog_manager
3
- Version: 0.4.1
3
+ Version: 0.4.3
4
4
  Summary: A Python package for managing LLM chat conversation history
5
5
  Author-email: xihajun <work@2333.fun>
6
6
  License: MIT
@@ -0,0 +1,9 @@
1
+ llm_dialog_manager/__init__.py,sha256=VVVbwGXF9QXgGJSFY23fzwk-WD2zXY1pCIiMQsJ01Q4,86
2
+ llm_dialog_manager/agent.py,sha256=hnMJ0scLZ2QA0wSm_EufID1IHdCo9AyOu1nnWKD9dl4,24333
3
+ llm_dialog_manager/chat_history.py,sha256=DKKRnj_M6h-4JncnH6KekMTghX7vMgdN3J9uOwXKzMU,10347
4
+ llm_dialog_manager/key_manager.py,sha256=shvxmn4zUtQx_p-x1EFyOmnk-WlhigbpKtxTKve-zXk,4421
5
+ llm_dialog_manager-0.4.3.dist-info/LICENSE,sha256=vWGbYgGuWpWrXL8-xi6pNcX5UzD6pWoIAZmcetyfbus,1064
6
+ llm_dialog_manager-0.4.3.dist-info/METADATA,sha256=z1Idh3H29Ws4ea2qgiO68VAaGAfCBJcnuXZbZ1RaskA,4194
7
+ llm_dialog_manager-0.4.3.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
8
+ llm_dialog_manager-0.4.3.dist-info/top_level.txt,sha256=u2EQEXW0NGAt0AAHT7jx1odXZ4rZfjcgbmJhvKFuMkI,19
9
+ llm_dialog_manager-0.4.3.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- llm_dialog_manager/__init__.py,sha256=hTHvsXzvD5geKgv2XERYcp2f-T3LoVVc3arXfPtNS1k,86
2
- llm_dialog_manager/agent.py,sha256=ZKO3eKHTKcbmYpVRRIpzDy7Tlp_VgQ90ewr1758Ozgs,23931
3
- llm_dialog_manager/chat_history.py,sha256=DKKRnj_M6h-4JncnH6KekMTghX7vMgdN3J9uOwXKzMU,10347
4
- llm_dialog_manager/key_manager.py,sha256=shvxmn4zUtQx_p-x1EFyOmnk-WlhigbpKtxTKve-zXk,4421
5
- llm_dialog_manager-0.4.1.dist-info/LICENSE,sha256=vWGbYgGuWpWrXL8-xi6pNcX5UzD6pWoIAZmcetyfbus,1064
6
- llm_dialog_manager-0.4.1.dist-info/METADATA,sha256=LER5FN6lFQFPs_8A-fIM7VYmqN-fh0nCD6Dt8vslsiY,4194
7
- llm_dialog_manager-0.4.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
8
- llm_dialog_manager-0.4.1.dist-info/top_level.txt,sha256=u2EQEXW0NGAt0AAHT7jx1odXZ4rZfjcgbmJhvKFuMkI,19
9
- llm_dialog_manager-0.4.1.dist-info/RECORD,,