llm-dialog-manager 0.2.7__tar.gz → 0.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (17) hide show
  1. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/PKG-INFO +23 -67
  2. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/README.md +22 -66
  3. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/llm_dialog_manager/__init__.py +1 -1
  4. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/llm_dialog_manager/agent.py +59 -23
  5. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/llm_dialog_manager.egg-info/PKG-INFO +23 -67
  6. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/pyproject.toml +1 -1
  7. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/tests/test_chat_history.py +1 -1
  8. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/LICENSE +0 -0
  9. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/llm_dialog_manager/chat_history.py +0 -0
  10. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/llm_dialog_manager/key_manager.py +0 -0
  11. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/llm_dialog_manager.egg-info/SOURCES.txt +0 -0
  12. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/llm_dialog_manager.egg-info/dependency_links.txt +0 -0
  13. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/llm_dialog_manager.egg-info/requires.txt +0 -0
  14. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/llm_dialog_manager.egg-info/top_level.txt +0 -0
  15. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/setup.cfg +0 -0
  16. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/tests/test_agent.py +0 -0
  17. {llm_dialog_manager-0.2.7 → llm_dialog_manager-0.3.1}/tests/test_key_manager.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llm_dialog_manager
3
- Version: 0.2.7
3
+ Version: 0.3.1
4
4
  Summary: A Python package for managing LLM chat conversation history
5
5
  Author-email: xihajun <work@2333.fun>
6
6
  License: MIT
@@ -73,23 +73,29 @@ pip install llm-dialog-manager
73
73
 
74
74
  ## Quick Start
75
75
 
76
- ### Basic Usage
77
76
 
78
- ```python
79
- from llm_dialog_manager import ChatHistory
77
+ ### Environment Variables
80
78
 
81
- # Initialize with a system message
82
- history = ChatHistory("You are a helpful assistant")
79
+ Create a `.env` file in your project root:
83
80
 
84
- # Add messages
85
- history.add_user_message("Hello!")
86
- history.add_assistant_message("Hi there! How can I help you today?")
81
+ ```bash
82
+ # OpenAI
83
+ OPENAI_API_KEY_1=your-key-1
84
+ OPENAI_API_BASE_1=https://api.openai.com/v1
87
85
 
88
- # Print conversation
89
- print(history)
86
+ # Anthropic
87
+ ANTHROPIC_API_KEY_1=your-anthropic-key
88
+ ANTHROPIC_API_BASE_1=https://api.anthropic.com
89
+
90
+ # Google
91
+ GEMINI_API_KEY=your-gemini-key
92
+
93
+ # X.AI
94
+ XAI_API_KEY=your-x-key
90
95
  ```
91
96
 
92
- ### Using the AI Agent
97
+ ### Basic Usage
98
+
93
99
 
94
100
  ```python
95
101
  from llm_dialog_manager import Agent
@@ -106,65 +112,15 @@ response = agent.generate_response()
106
112
  agent.save_conversation()
107
113
  ```
108
114
 
109
- ## Advanced Features
110
-
111
- ### Managing Multiple API Keys
112
-
113
- ```python
114
- from llm_dialog_manager import Agent
115
-
116
- # Use specific API key
117
- agent = Agent("gpt-4", api_key="your-api-key")
118
-
119
- # Or use environment variables
120
- # OPENAI_API_KEY_1=key1
121
- # OPENAI_API_KEY_2=key2
122
- # The system will automatically handle load balancing
123
- ```
124
-
125
- ### Conversation Management
126
-
127
- ```python
128
- from llm_dialog_manager import ChatHistory
129
-
130
- history = ChatHistory()
131
-
132
- # Add messages with role validation
133
- history.add_message("Hello system", "system")
134
- history.add_message("Hello user", "user")
135
- history.add_message("Hello assistant", "assistant")
136
-
137
- # Search conversations
138
- results = history.search_for_keyword("hello")
139
-
140
- # Get conversation status
141
- status = history.conversation_status()
142
- history.display_conversation_status()
143
-
144
- # Get conversation snippets
145
- snippet = history.get_conversation_snippet(1)
146
- history.display_snippet(1)
147
- ```
148
-
149
- ## Environment Variables
150
-
151
- Create a `.env` file in your project root:
115
+ ### Setup Debugging Console
152
116
 
153
117
  ```bash
154
- # OpenAI
155
- OPENAI_API_KEY_1=your-key-1
156
- OPENAI_API_BASE_1=https://api.openai.com/v1
157
-
158
- # Anthropic
159
- ANTHROPIC_API_KEY_1=your-anthropic-key
160
- ANTHROPIC_API_BASE_1=https://api.anthropic.com
118
+ python app.py
119
+ # open localhost:8000
120
+ ```
121
+ https://github.com/user-attachments/assets/5f640029-24e6-44ea-a3a3-02eb3de0d4df
161
122
 
162
- # Google
163
- GEMINI_API_KEY=your-gemini-key
164
123
 
165
- # X.AI
166
- XAI_API_KEY=your-x-key
167
- ```
168
124
 
169
125
  ## Development
170
126
 
@@ -26,23 +26,29 @@ pip install llm-dialog-manager
26
26
 
27
27
  ## Quick Start
28
28
 
29
- ### Basic Usage
30
29
 
31
- ```python
32
- from llm_dialog_manager import ChatHistory
30
+ ### Environment Variables
33
31
 
34
- # Initialize with a system message
35
- history = ChatHistory("You are a helpful assistant")
32
+ Create a `.env` file in your project root:
36
33
 
37
- # Add messages
38
- history.add_user_message("Hello!")
39
- history.add_assistant_message("Hi there! How can I help you today?")
34
+ ```bash
35
+ # OpenAI
36
+ OPENAI_API_KEY_1=your-key-1
37
+ OPENAI_API_BASE_1=https://api.openai.com/v1
40
38
 
41
- # Print conversation
42
- print(history)
39
+ # Anthropic
40
+ ANTHROPIC_API_KEY_1=your-anthropic-key
41
+ ANTHROPIC_API_BASE_1=https://api.anthropic.com
42
+
43
+ # Google
44
+ GEMINI_API_KEY=your-gemini-key
45
+
46
+ # X.AI
47
+ XAI_API_KEY=your-x-key
43
48
  ```
44
49
 
45
- ### Using the AI Agent
50
+ ### Basic Usage
51
+
46
52
 
47
53
  ```python
48
54
  from llm_dialog_manager import Agent
@@ -59,65 +65,15 @@ response = agent.generate_response()
59
65
  agent.save_conversation()
60
66
  ```
61
67
 
62
- ## Advanced Features
63
-
64
- ### Managing Multiple API Keys
65
-
66
- ```python
67
- from llm_dialog_manager import Agent
68
-
69
- # Use specific API key
70
- agent = Agent("gpt-4", api_key="your-api-key")
71
-
72
- # Or use environment variables
73
- # OPENAI_API_KEY_1=key1
74
- # OPENAI_API_KEY_2=key2
75
- # The system will automatically handle load balancing
76
- ```
77
-
78
- ### Conversation Management
79
-
80
- ```python
81
- from llm_dialog_manager import ChatHistory
82
-
83
- history = ChatHistory()
84
-
85
- # Add messages with role validation
86
- history.add_message("Hello system", "system")
87
- history.add_message("Hello user", "user")
88
- history.add_message("Hello assistant", "assistant")
89
-
90
- # Search conversations
91
- results = history.search_for_keyword("hello")
92
-
93
- # Get conversation status
94
- status = history.conversation_status()
95
- history.display_conversation_status()
96
-
97
- # Get conversation snippets
98
- snippet = history.get_conversation_snippet(1)
99
- history.display_snippet(1)
100
- ```
101
-
102
- ## Environment Variables
103
-
104
- Create a `.env` file in your project root:
68
+ ### Setup Debugging Console
105
69
 
106
70
  ```bash
107
- # OpenAI
108
- OPENAI_API_KEY_1=your-key-1
109
- OPENAI_API_BASE_1=https://api.openai.com/v1
110
-
111
- # Anthropic
112
- ANTHROPIC_API_KEY_1=your-anthropic-key
113
- ANTHROPIC_API_BASE_1=https://api.anthropic.com
71
+ python app.py
72
+ # open localhost:8000
73
+ ```
74
+ https://github.com/user-attachments/assets/5f640029-24e6-44ea-a3a3-02eb3de0d4df
114
75
 
115
- # Google
116
- GEMINI_API_KEY=your-gemini-key
117
76
 
118
- # X.AI
119
- XAI_API_KEY=your-x-key
120
- ```
121
77
 
122
78
  ## Development
123
79
 
@@ -1,4 +1,4 @@
1
1
  from .chat_history import ChatHistory
2
2
  from .agent import Agent
3
3
 
4
- __version__ = "0.2.7"
4
+ __version__ = "0.3.1"
@@ -113,25 +113,53 @@ def completion(model: str, messages: List[Dict[str, str]], max_tokens: int = 100
113
113
  return response.content[0].text
114
114
 
115
115
  elif "gemini" in model:
116
- client = openai.OpenAI(
117
- api_key=api_key,
118
- base_url="https://generativelanguage.googleapis.com/v1beta/"
119
- )
120
- # Remove any system message from the beginning if present
121
- if messages and messages[0]["role"] == "system":
122
- system_msg = messages.pop(0)
123
- # Prepend system message to first user message if exists
124
- if messages:
125
- messages[0]["content"] = f"{system_msg['content']}\n\n{messages[0]['content']}"
126
-
127
- response = client.chat.completions.create(
128
- model=model,
129
- messages=messages,
130
- # max_tokens=max_tokens,
131
- temperature=temperature
132
- )
133
-
134
- return response.choices[0].message.content
116
+ try:
117
+ # First try OpenAI-style API
118
+ client = openai.OpenAI(
119
+ api_key=api_key,
120
+ base_url="https://generativelanguage.googleapis.com/v1beta/"
121
+ )
122
+ # Remove any system message from the beginning if present
123
+ if messages and messages[0]["role"] == "system":
124
+ system_msg = messages.pop(0)
125
+ # Prepend system message to first user message if exists
126
+ if messages:
127
+ messages[0]["content"] = f"{system_msg['content']}\n\n{messages[0]['content']}"
128
+
129
+ response = client.chat.completions.create(
130
+ model=model,
131
+ messages=messages,
132
+ temperature=temperature
133
+ )
134
+
135
+ return response.choices[0].message.content
136
+
137
+ except Exception as e:
138
+ # If OpenAI-style API fails, fall back to Google's genai library
139
+ logger.info("Falling back to Google's genai library")
140
+ genai.configure(api_key=api_key)
141
+
142
+ # Convert messages to Gemini format
143
+ gemini_messages = []
144
+ for msg in messages:
145
+ if msg["role"] == "system":
146
+ # Prepend system message to first user message if exists
147
+ if gemini_messages:
148
+ gemini_messages[0].parts[0].text = f"{msg['content']}\n\n{gemini_messages[0].parts[0].text}"
149
+ else:
150
+ gemini_messages.append({"role": msg["role"], "parts": [{"text": msg["content"]}]})
151
+
152
+ # Create Gemini model and generate response
153
+ model = genai.GenerativeModel(model_name=model)
154
+ response = model.generate_content(
155
+ gemini_messages,
156
+ generation_config=genai.types.GenerationConfig(
157
+ temperature=temperature,
158
+ max_output_tokens=max_tokens
159
+ )
160
+ )
161
+
162
+ return response.text
135
163
 
136
164
  elif "grok" in model:
137
165
  # Randomly choose between OpenAI and Anthropic SDK
@@ -236,8 +264,9 @@ class Agent:
236
264
  with open(filename, 'w', encoding='utf-8') as file:
237
265
  json.dump(self.history.messages, file, ensure_ascii=False, indent=4)
238
266
 
239
- def load_conversation(self):
240
- filename = f"{self.id}.json"
267
+ def load_conversation(self, filename=None):
268
+ if filename is None:
269
+ filename = f"{self.id}.json"
241
270
  with open(filename, 'r', encoding='utf-8') as file:
242
271
  messages = json.load(file)
243
272
  self.history = ChatHistory(messages)
@@ -247,7 +276,14 @@ if __name__ == "__main__":
247
276
  # write a test for detect finding agent
248
277
  text = "I think the answer is 42"
249
278
 
250
- agent = Agent("claude-3-5-sonnet-20241022", "you are an assistant", memory_enabled=True)
279
+ # from agent.messageloader import information_detector_messages
280
+
281
+ # # Now you can print or use information_detector_messages as needed
282
+ # information_detector_agent = Agent("gemini-1.5-pro", information_detector_messages)
283
+ # information_detector_agent.add_message("user", text)
284
+ # response = information_detector_agent.generate_response()
285
+ # print(response)
286
+ agent = Agent("gemini-1.5-pro-002", "you are an assistant", memory_enabled=True)
251
287
 
252
288
  # Format the prompt to check if the section is the last one in the outline
253
289
  prompt = f"Say: {text}\n"
@@ -256,7 +292,7 @@ if __name__ == "__main__":
256
292
  agent.add_message("user", prompt)
257
293
  agent.add_message("assistant", "the answer")
258
294
 
259
- print(agent.generate_response(max_tokens=20, temperature=0.0))
295
+ print(agent.generate_response())
260
296
  print(agent.history[:])
261
297
  last_message = agent.history.pop()
262
298
  print(last_message)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llm_dialog_manager
3
- Version: 0.2.7
3
+ Version: 0.3.1
4
4
  Summary: A Python package for managing LLM chat conversation history
5
5
  Author-email: xihajun <work@2333.fun>
6
6
  License: MIT
@@ -73,23 +73,29 @@ pip install llm-dialog-manager
73
73
 
74
74
  ## Quick Start
75
75
 
76
- ### Basic Usage
77
76
 
78
- ```python
79
- from llm_dialog_manager import ChatHistory
77
+ ### Environment Variables
80
78
 
81
- # Initialize with a system message
82
- history = ChatHistory("You are a helpful assistant")
79
+ Create a `.env` file in your project root:
83
80
 
84
- # Add messages
85
- history.add_user_message("Hello!")
86
- history.add_assistant_message("Hi there! How can I help you today?")
81
+ ```bash
82
+ # OpenAI
83
+ OPENAI_API_KEY_1=your-key-1
84
+ OPENAI_API_BASE_1=https://api.openai.com/v1
87
85
 
88
- # Print conversation
89
- print(history)
86
+ # Anthropic
87
+ ANTHROPIC_API_KEY_1=your-anthropic-key
88
+ ANTHROPIC_API_BASE_1=https://api.anthropic.com
89
+
90
+ # Google
91
+ GEMINI_API_KEY=your-gemini-key
92
+
93
+ # X.AI
94
+ XAI_API_KEY=your-x-key
90
95
  ```
91
96
 
92
- ### Using the AI Agent
97
+ ### Basic Usage
98
+
93
99
 
94
100
  ```python
95
101
  from llm_dialog_manager import Agent
@@ -106,65 +112,15 @@ response = agent.generate_response()
106
112
  agent.save_conversation()
107
113
  ```
108
114
 
109
- ## Advanced Features
110
-
111
- ### Managing Multiple API Keys
112
-
113
- ```python
114
- from llm_dialog_manager import Agent
115
-
116
- # Use specific API key
117
- agent = Agent("gpt-4", api_key="your-api-key")
118
-
119
- # Or use environment variables
120
- # OPENAI_API_KEY_1=key1
121
- # OPENAI_API_KEY_2=key2
122
- # The system will automatically handle load balancing
123
- ```
124
-
125
- ### Conversation Management
126
-
127
- ```python
128
- from llm_dialog_manager import ChatHistory
129
-
130
- history = ChatHistory()
131
-
132
- # Add messages with role validation
133
- history.add_message("Hello system", "system")
134
- history.add_message("Hello user", "user")
135
- history.add_message("Hello assistant", "assistant")
136
-
137
- # Search conversations
138
- results = history.search_for_keyword("hello")
139
-
140
- # Get conversation status
141
- status = history.conversation_status()
142
- history.display_conversation_status()
143
-
144
- # Get conversation snippets
145
- snippet = history.get_conversation_snippet(1)
146
- history.display_snippet(1)
147
- ```
148
-
149
- ## Environment Variables
150
-
151
- Create a `.env` file in your project root:
115
+ ### Setup Debugging Console
152
116
 
153
117
  ```bash
154
- # OpenAI
155
- OPENAI_API_KEY_1=your-key-1
156
- OPENAI_API_BASE_1=https://api.openai.com/v1
157
-
158
- # Anthropic
159
- ANTHROPIC_API_KEY_1=your-anthropic-key
160
- ANTHROPIC_API_BASE_1=https://api.anthropic.com
118
+ python app.py
119
+ # open localhost:8000
120
+ ```
121
+ https://github.com/user-attachments/assets/5f640029-24e6-44ea-a3a3-02eb3de0d4df
161
122
 
162
- # Google
163
- GEMINI_API_KEY=your-gemini-key
164
123
 
165
- # X.AI
166
- XAI_API_KEY=your-x-key
167
- ```
168
124
 
169
125
  ## Development
170
126
 
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "llm_dialog_manager"
7
- version = "0.2.7"
7
+ version = "0.3.1"
8
8
  description = "A Python package for managing LLM chat conversation history"
9
9
  readme = "README.md"
10
10
  authors = [{ name = "xihajun", email = "work@2333.fun" }]
@@ -1,5 +1,5 @@
1
1
  import pytest
2
- from ai_chat_history import ChatHistory
2
+ from llm_dialog_manager import ChatHistory
3
3
 
4
4
  def test_chat_history_initialization():
5
5
  # Test empty initialization