llm-dialog-manager 0.2.7__py3-none-any.whl → 0.3.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- llm_dialog_manager/__init__.py +1 -1
- llm_dialog_manager/agent.py +59 -23
- {llm_dialog_manager-0.2.7.dist-info → llm_dialog_manager-0.3.1.dist-info}/METADATA +32 -76
- llm_dialog_manager-0.3.1.dist-info/RECORD +9 -0
- {llm_dialog_manager-0.2.7.dist-info → llm_dialog_manager-0.3.1.dist-info}/WHEEL +1 -1
- llm_dialog_manager-0.2.7.dist-info/RECORD +0 -9
- {llm_dialog_manager-0.2.7.dist-info → llm_dialog_manager-0.3.1.dist-info}/LICENSE +0 -0
- {llm_dialog_manager-0.2.7.dist-info → llm_dialog_manager-0.3.1.dist-info}/top_level.txt +0 -0
llm_dialog_manager/__init__.py
CHANGED
llm_dialog_manager/agent.py
CHANGED
@@ -113,25 +113,53 @@ def completion(model: str, messages: List[Dict[str, str]], max_tokens: int = 100
|
|
113
113
|
return response.content[0].text
|
114
114
|
|
115
115
|
elif "gemini" in model:
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
116
|
+
try:
|
117
|
+
# First try OpenAI-style API
|
118
|
+
client = openai.OpenAI(
|
119
|
+
api_key=api_key,
|
120
|
+
base_url="https://generativelanguage.googleapis.com/v1beta/"
|
121
|
+
)
|
122
|
+
# Remove any system message from the beginning if present
|
123
|
+
if messages and messages[0]["role"] == "system":
|
124
|
+
system_msg = messages.pop(0)
|
125
|
+
# Prepend system message to first user message if exists
|
126
|
+
if messages:
|
127
|
+
messages[0]["content"] = f"{system_msg['content']}\n\n{messages[0]['content']}"
|
128
|
+
|
129
|
+
response = client.chat.completions.create(
|
130
|
+
model=model,
|
131
|
+
messages=messages,
|
132
|
+
temperature=temperature
|
133
|
+
)
|
134
|
+
|
135
|
+
return response.choices[0].message.content
|
136
|
+
|
137
|
+
except Exception as e:
|
138
|
+
# If OpenAI-style API fails, fall back to Google's genai library
|
139
|
+
logger.info("Falling back to Google's genai library")
|
140
|
+
genai.configure(api_key=api_key)
|
141
|
+
|
142
|
+
# Convert messages to Gemini format
|
143
|
+
gemini_messages = []
|
144
|
+
for msg in messages:
|
145
|
+
if msg["role"] == "system":
|
146
|
+
# Prepend system message to first user message if exists
|
147
|
+
if gemini_messages:
|
148
|
+
gemini_messages[0].parts[0].text = f"{msg['content']}\n\n{gemini_messages[0].parts[0].text}"
|
149
|
+
else:
|
150
|
+
gemini_messages.append({"role": msg["role"], "parts": [{"text": msg["content"]}]})
|
151
|
+
|
152
|
+
# Create Gemini model and generate response
|
153
|
+
model = genai.GenerativeModel(model_name=model)
|
154
|
+
response = model.generate_content(
|
155
|
+
gemini_messages,
|
156
|
+
generation_config=genai.types.GenerationConfig(
|
157
|
+
temperature=temperature,
|
158
|
+
max_output_tokens=max_tokens
|
159
|
+
)
|
160
|
+
)
|
161
|
+
|
162
|
+
return response.text
|
135
163
|
|
136
164
|
elif "grok" in model:
|
137
165
|
# Randomly choose between OpenAI and Anthropic SDK
|
@@ -236,8 +264,9 @@ class Agent:
|
|
236
264
|
with open(filename, 'w', encoding='utf-8') as file:
|
237
265
|
json.dump(self.history.messages, file, ensure_ascii=False, indent=4)
|
238
266
|
|
239
|
-
def load_conversation(self):
|
240
|
-
filename
|
267
|
+
def load_conversation(self, filename=None):
|
268
|
+
if filename is None:
|
269
|
+
filename = f"{self.id}.json"
|
241
270
|
with open(filename, 'r', encoding='utf-8') as file:
|
242
271
|
messages = json.load(file)
|
243
272
|
self.history = ChatHistory(messages)
|
@@ -247,7 +276,14 @@ if __name__ == "__main__":
|
|
247
276
|
# write a test for detect finding agent
|
248
277
|
text = "I think the answer is 42"
|
249
278
|
|
250
|
-
|
279
|
+
# from agent.messageloader import information_detector_messages
|
280
|
+
|
281
|
+
# # Now you can print or use information_detector_messages as needed
|
282
|
+
# information_detector_agent = Agent("gemini-1.5-pro", information_detector_messages)
|
283
|
+
# information_detector_agent.add_message("user", text)
|
284
|
+
# response = information_detector_agent.generate_response()
|
285
|
+
# print(response)
|
286
|
+
agent = Agent("gemini-1.5-pro-002", "you are an assistant", memory_enabled=True)
|
251
287
|
|
252
288
|
# Format the prompt to check if the section is the last one in the outline
|
253
289
|
prompt = f"Say: {text}\n"
|
@@ -256,7 +292,7 @@ if __name__ == "__main__":
|
|
256
292
|
agent.add_message("user", prompt)
|
257
293
|
agent.add_message("assistant", "the answer")
|
258
294
|
|
259
|
-
print(agent.generate_response(
|
295
|
+
print(agent.generate_response())
|
260
296
|
print(agent.history[:])
|
261
297
|
last_message = agent.history.pop()
|
262
298
|
print(last_message)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: llm_dialog_manager
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.3.1
|
4
4
|
Summary: A Python package for managing LLM chat conversation history
|
5
5
|
Author-email: xihajun <work@2333.fun>
|
6
6
|
License: MIT
|
@@ -25,25 +25,25 @@ Requires-Dist: google-generativeai>=0.1.0
|
|
25
25
|
Requires-Dist: python-dotenv>=1.0.0
|
26
26
|
Requires-Dist: typing-extensions>=4.0.0
|
27
27
|
Requires-Dist: uuid>=1.30
|
28
|
-
Provides-Extra: all
|
29
|
-
Requires-Dist: pytest>=8.0.0; extra == "all"
|
30
|
-
Requires-Dist: pytest-asyncio>=0.21.1; extra == "all"
|
31
|
-
Requires-Dist: pytest-cov>=4.1.0; extra == "all"
|
32
|
-
Requires-Dist: black>=23.9.1; extra == "all"
|
33
|
-
Requires-Dist: isort>=5.12.0; extra == "all"
|
34
28
|
Provides-Extra: dev
|
35
29
|
Requires-Dist: pytest>=8.0.0; extra == "dev"
|
36
30
|
Requires-Dist: pytest-asyncio>=0.21.1; extra == "dev"
|
37
31
|
Requires-Dist: pytest-cov>=4.1.0; extra == "dev"
|
38
32
|
Requires-Dist: black>=23.9.1; extra == "dev"
|
39
33
|
Requires-Dist: isort>=5.12.0; extra == "dev"
|
40
|
-
Provides-Extra: lint
|
41
|
-
Requires-Dist: black>=22.0; extra == "lint"
|
42
|
-
Requires-Dist: isort>=5.0; extra == "lint"
|
43
34
|
Provides-Extra: test
|
44
35
|
Requires-Dist: pytest>=6.0; extra == "test"
|
45
36
|
Requires-Dist: pytest-asyncio>=0.14.0; extra == "test"
|
46
37
|
Requires-Dist: pytest-cov>=2.0; extra == "test"
|
38
|
+
Provides-Extra: lint
|
39
|
+
Requires-Dist: black>=22.0; extra == "lint"
|
40
|
+
Requires-Dist: isort>=5.0; extra == "lint"
|
41
|
+
Provides-Extra: all
|
42
|
+
Requires-Dist: pytest>=8.0.0; extra == "all"
|
43
|
+
Requires-Dist: pytest-asyncio>=0.21.1; extra == "all"
|
44
|
+
Requires-Dist: pytest-cov>=4.1.0; extra == "all"
|
45
|
+
Requires-Dist: black>=23.9.1; extra == "all"
|
46
|
+
Requires-Dist: isort>=5.12.0; extra == "all"
|
47
47
|
|
48
48
|
# LLM Dialog Manager
|
49
49
|
|
@@ -73,23 +73,29 @@ pip install llm-dialog-manager
|
|
73
73
|
|
74
74
|
## Quick Start
|
75
75
|
|
76
|
-
### Basic Usage
|
77
76
|
|
78
|
-
|
79
|
-
from llm_dialog_manager import ChatHistory
|
77
|
+
### Environment Variables
|
80
78
|
|
81
|
-
|
82
|
-
history = ChatHistory("You are a helpful assistant")
|
79
|
+
Create a `.env` file in your project root:
|
83
80
|
|
84
|
-
|
85
|
-
|
86
|
-
|
81
|
+
```bash
|
82
|
+
# OpenAI
|
83
|
+
OPENAI_API_KEY_1=your-key-1
|
84
|
+
OPENAI_API_BASE_1=https://api.openai.com/v1
|
87
85
|
|
88
|
-
#
|
89
|
-
|
86
|
+
# Anthropic
|
87
|
+
ANTHROPIC_API_KEY_1=your-anthropic-key
|
88
|
+
ANTHROPIC_API_BASE_1=https://api.anthropic.com
|
89
|
+
|
90
|
+
# Google
|
91
|
+
GEMINI_API_KEY=your-gemini-key
|
92
|
+
|
93
|
+
# X.AI
|
94
|
+
XAI_API_KEY=your-x-key
|
90
95
|
```
|
91
96
|
|
92
|
-
###
|
97
|
+
### Basic Usage
|
98
|
+
|
93
99
|
|
94
100
|
```python
|
95
101
|
from llm_dialog_manager import Agent
|
@@ -106,65 +112,15 @@ response = agent.generate_response()
|
|
106
112
|
agent.save_conversation()
|
107
113
|
```
|
108
114
|
|
109
|
-
|
110
|
-
|
111
|
-
### Managing Multiple API Keys
|
112
|
-
|
113
|
-
```python
|
114
|
-
from llm_dialog_manager import Agent
|
115
|
-
|
116
|
-
# Use specific API key
|
117
|
-
agent = Agent("gpt-4", api_key="your-api-key")
|
118
|
-
|
119
|
-
# Or use environment variables
|
120
|
-
# OPENAI_API_KEY_1=key1
|
121
|
-
# OPENAI_API_KEY_2=key2
|
122
|
-
# The system will automatically handle load balancing
|
123
|
-
```
|
124
|
-
|
125
|
-
### Conversation Management
|
126
|
-
|
127
|
-
```python
|
128
|
-
from llm_dialog_manager import ChatHistory
|
129
|
-
|
130
|
-
history = ChatHistory()
|
131
|
-
|
132
|
-
# Add messages with role validation
|
133
|
-
history.add_message("Hello system", "system")
|
134
|
-
history.add_message("Hello user", "user")
|
135
|
-
history.add_message("Hello assistant", "assistant")
|
136
|
-
|
137
|
-
# Search conversations
|
138
|
-
results = history.search_for_keyword("hello")
|
139
|
-
|
140
|
-
# Get conversation status
|
141
|
-
status = history.conversation_status()
|
142
|
-
history.display_conversation_status()
|
143
|
-
|
144
|
-
# Get conversation snippets
|
145
|
-
snippet = history.get_conversation_snippet(1)
|
146
|
-
history.display_snippet(1)
|
147
|
-
```
|
148
|
-
|
149
|
-
## Environment Variables
|
150
|
-
|
151
|
-
Create a `.env` file in your project root:
|
115
|
+
### Setup Debugging Console
|
152
116
|
|
153
117
|
```bash
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
# Anthropic
|
159
|
-
ANTHROPIC_API_KEY_1=your-anthropic-key
|
160
|
-
ANTHROPIC_API_BASE_1=https://api.anthropic.com
|
118
|
+
python app.py
|
119
|
+
# open localhost:8000
|
120
|
+
```
|
121
|
+
https://github.com/user-attachments/assets/5f640029-24e6-44ea-a3a3-02eb3de0d4df
|
161
122
|
|
162
|
-
# Google
|
163
|
-
GEMINI_API_KEY=your-gemini-key
|
164
123
|
|
165
|
-
# X.AI
|
166
|
-
XAI_API_KEY=your-x-key
|
167
|
-
```
|
168
124
|
|
169
125
|
## Development
|
170
126
|
|
@@ -0,0 +1,9 @@
|
|
1
|
+
llm_dialog_manager/__init__.py,sha256=NOEzUBSgPngSq5JIrDD7jEpusmMi4kes3PSc4pLJCnE,86
|
2
|
+
llm_dialog_manager/agent.py,sha256=m3LhktFjC8mDNx_TCm31QS09ZcUbKqcpW7QKrdfLsQc,11779
|
3
|
+
llm_dialog_manager/chat_history.py,sha256=xKA-oQCv8jv_g8EhXrG9h1S8Icbj2FfqPIhbty5vra4,6033
|
4
|
+
llm_dialog_manager/key_manager.py,sha256=shvxmn4zUtQx_p-x1EFyOmnk-WlhigbpKtxTKve-zXk,4421
|
5
|
+
llm_dialog_manager-0.3.1.dist-info/LICENSE,sha256=vWGbYgGuWpWrXL8-xi6pNcX5UzD6pWoIAZmcetyfbus,1064
|
6
|
+
llm_dialog_manager-0.3.1.dist-info/METADATA,sha256=hpaL6HcQTUvqlwLLnwtXuj_yzLEkyN-KXeWL3RRld7U,4152
|
7
|
+
llm_dialog_manager-0.3.1.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
8
|
+
llm_dialog_manager-0.3.1.dist-info/top_level.txt,sha256=u2EQEXW0NGAt0AAHT7jx1odXZ4rZfjcgbmJhvKFuMkI,19
|
9
|
+
llm_dialog_manager-0.3.1.dist-info/RECORD,,
|
@@ -1,9 +0,0 @@
|
|
1
|
-
llm_dialog_manager/__init__.py,sha256=bQb_jouDWIWf_n5x2kBA7uc43xkG2IMLeYcV1IevNek,86
|
2
|
-
llm_dialog_manager/agent.py,sha256=FF8vs-RyJGKWit5X82eLHyel1sThf76sLEa_1GVLeA0,9868
|
3
|
-
llm_dialog_manager/chat_history.py,sha256=xKA-oQCv8jv_g8EhXrG9h1S8Icbj2FfqPIhbty5vra4,6033
|
4
|
-
llm_dialog_manager/key_manager.py,sha256=shvxmn4zUtQx_p-x1EFyOmnk-WlhigbpKtxTKve-zXk,4421
|
5
|
-
llm_dialog_manager-0.2.7.dist-info/LICENSE,sha256=vWGbYgGuWpWrXL8-xi6pNcX5UzD6pWoIAZmcetyfbus,1064
|
6
|
-
llm_dialog_manager-0.2.7.dist-info/METADATA,sha256=kmdHmOHId-OSQxMBeDbtdAn7eWz-VGJfK_cBGs5BjqU,5193
|
7
|
-
llm_dialog_manager-0.2.7.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
|
8
|
-
llm_dialog_manager-0.2.7.dist-info/top_level.txt,sha256=u2EQEXW0NGAt0AAHT7jx1odXZ4rZfjcgbmJhvKFuMkI,19
|
9
|
-
llm_dialog_manager-0.2.7.dist-info/RECORD,,
|
File without changes
|
File without changes
|