llm-dialog-manager 0.1.2480__tar.gz → 0.2.4__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (17) hide show
  1. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/PKG-INFO +1 -1
  2. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/llm_dialog_manager/__init__.py +1 -1
  3. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/llm_dialog_manager/agent.py +51 -25
  4. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/llm_dialog_manager.egg-info/PKG-INFO +1 -1
  5. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/pyproject.toml +1 -1
  6. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/tests/test_chat_history.py +1 -1
  7. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/LICENSE +0 -0
  8. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/README.md +0 -0
  9. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/llm_dialog_manager/chat_history.py +0 -0
  10. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/llm_dialog_manager/key_manager.py +0 -0
  11. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/llm_dialog_manager.egg-info/SOURCES.txt +0 -0
  12. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/llm_dialog_manager.egg-info/dependency_links.txt +0 -0
  13. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/llm_dialog_manager.egg-info/requires.txt +0 -0
  14. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/llm_dialog_manager.egg-info/top_level.txt +0 -0
  15. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/setup.cfg +0 -0
  16. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/tests/test_agent.py +0 -0
  17. {llm_dialog_manager-0.1.2480 → llm_dialog_manager-0.2.4}/tests/test_key_manager.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llm_dialog_manager
3
- Version: 0.1.2480
3
+ Version: 0.2.4
4
4
  Summary: A Python package for managing LLM chat conversation history
5
5
  Author-email: xihajun <work@2333.fun>
6
6
  License: MIT
@@ -1,4 +1,4 @@
1
1
  from .chat_history import ChatHistory
2
2
  from .agent import Agent
3
3
 
4
- __version__ = "0.1.2480"
4
+ __version__ = "0.2.4"
@@ -113,27 +113,53 @@ def completion(model: str, messages: List[Dict[str, str]], max_tokens: int = 100
113
113
  return response.content[0].text
114
114
 
115
115
  elif "gemini" in model:
116
- client = openai.OpenAI(
117
- api_key=api_key,
118
- base_url="https://generativelanguage.googleapis.com/v1beta/"
119
- )
120
- print(api_key)
121
- # Remove any system message from the beginning if present
122
- if messages and messages[0]["role"] == "system":
123
- system_msg = messages.pop(0)
124
- # Prepend system message to first user message if exists
125
- if messages:
126
- messages[0]["content"] = f"{system_msg['content']}\n\n{messages[0]['content']}"
127
-
128
- response = client.chat.completions.create(
129
- model=model,
130
- messages=messages,
131
- # max_tokens=max_tokens,
132
- temperature=temperature
133
- )
134
- print(len(response.choices))
135
-
136
- return response.choices[0].message.content
116
+ try:
117
+ # First try OpenAI-style API
118
+ client = openai.OpenAI(
119
+ api_key=api_key,
120
+ base_url="https://generativelanguage.googleapis.com/v1beta/"
121
+ )
122
+ # Remove any system message from the beginning if present
123
+ if messages and messages[0]["role"] == "system":
124
+ system_msg = messages.pop(0)
125
+ # Prepend system message to first user message if exists
126
+ if messages:
127
+ messages[0]["content"] = f"{system_msg['content']}\n\n{messages[0]['content']}"
128
+
129
+ response = client.chat.completions.create(
130
+ model=model,
131
+ messages=messages,
132
+ temperature=temperature
133
+ )
134
+
135
+ return response.choices[0].message.content
136
+
137
+ except Exception as e:
138
+ # If OpenAI-style API fails, fall back to Google's genai library
139
+ logger.info("Falling back to Google's genai library")
140
+ genai.configure(api_key=api_key)
141
+
142
+ # Convert messages to Gemini format
143
+ gemini_messages = []
144
+ for msg in messages:
145
+ if msg["role"] == "system":
146
+ # Prepend system message to first user message if exists
147
+ if gemini_messages:
148
+ gemini_messages[0].parts[0].text = f"{msg['content']}\n\n{gemini_messages[0].parts[0].text}"
149
+ else:
150
+ gemini_messages.append({"role": msg["role"], "parts": [{"text": msg["content"]}]})
151
+
152
+ # Create Gemini model and generate response
153
+ model = genai.GenerativeModel(model_name=model)
154
+ response = model.generate_content(
155
+ gemini_messages,
156
+ generation_config=genai.types.GenerationConfig(
157
+ temperature=temperature,
158
+ max_output_tokens=max_tokens
159
+ )
160
+ )
161
+
162
+ return response.text
137
163
 
138
164
  elif "grok" in model:
139
165
  # Randomly choose between OpenAI and Anthropic SDK
@@ -256,7 +282,7 @@ if __name__ == "__main__":
256
282
  # information_detector_agent.add_message("user", text)
257
283
  # response = information_detector_agent.generate_response()
258
284
  # print(response)
259
- agent = Agent("claude-3-5-sonnet-20241022", "you are an assistant", memory_enabled=True)
285
+ agent = Agent("gemini-1.5-pro-002", "you are an assistant", memory_enabled=True)
260
286
 
261
287
  # Format the prompt to check if the section is the last one in the outline
262
288
  prompt = f"Say: {text}\n"
@@ -265,8 +291,8 @@ if __name__ == "__main__":
265
291
  agent.add_message("user", prompt)
266
292
  agent.add_message("assistant", "the answer")
267
293
 
268
- print(agent.generate_response(max_tokens=20, temperature=0.0))
294
+ print(agent.generate_response())
269
295
  print(agent.history[:])
270
- a = agent.history.pop()
271
- print(a)
296
+ last_message = agent.history.pop()
297
+ print(last_message)
272
298
  print(agent.history[:])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llm_dialog_manager
3
- Version: 0.1.2480
3
+ Version: 0.2.4
4
4
  Summary: A Python package for managing LLM chat conversation history
5
5
  Author-email: xihajun <work@2333.fun>
6
6
  License: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "llm_dialog_manager"
7
- version = "0.1.2480"
7
+ version = "0.2.4"
8
8
  description = "A Python package for managing LLM chat conversation history"
9
9
  readme = "README.md"
10
10
  authors = [{ name = "xihajun", email = "work@2333.fun" }]
@@ -1,5 +1,5 @@
1
1
  import pytest
2
- from ai_chat_history import ChatHistory
2
+ from llm_dialog_manager import ChatHistory
3
3
 
4
4
  def test_chat_history_initialization():
5
5
  # Test empty initialization