llm-dialog-manager 0.4.6__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_dialog_manager/__init__.py +18 -2
- llm_dialog_manager/agent.py +141 -589
- {llm_dialog_manager-0.4.6.dist-info → llm_dialog_manager-0.5.0.dist-info}/METADATA +4 -3
- llm_dialog_manager-0.5.0.dist-info/RECORD +9 -0
- {llm_dialog_manager-0.4.6.dist-info → llm_dialog_manager-0.5.0.dist-info}/WHEEL +1 -1
- llm_dialog_manager-0.4.6.dist-info/RECORD +0 -9
- {llm_dialog_manager-0.4.6.dist-info → llm_dialog_manager-0.5.0.dist-info/licenses}/LICENSE +0 -0
- {llm_dialog_manager-0.4.6.dist-info → llm_dialog_manager-0.5.0.dist-info}/top_level.txt +0 -0
llm_dialog_manager/__init__.py
CHANGED
@@ -1,4 +1,20 @@
|
|
1
|
-
|
1
|
+
"""
|
2
|
+
LLM Dialog Manager
|
3
|
+
|
4
|
+
A modular framework for building conversational AI applications with
|
5
|
+
support for multiple LLM providers.
|
6
|
+
"""
|
7
|
+
|
8
|
+
__version__ = "0.5.0"
|
9
|
+
|
2
10
|
from .agent import Agent
|
11
|
+
from .chat_history import ChatHistory
|
12
|
+
from .key_manager import key_manager
|
13
|
+
|
14
|
+
# Import factory functions for easy access
|
15
|
+
from .clients import get_client
|
16
|
+
from .formatters import get_formatter
|
3
17
|
|
4
|
-
|
18
|
+
# Setup environment by default
|
19
|
+
from .utils.environment import load_env_vars
|
20
|
+
load_env_vars()
|
llm_dialog_manager/agent.py
CHANGED
@@ -1,637 +1,189 @@
|
|
1
|
+
"""
|
2
|
+
Agent class for managing LLM conversations
|
3
|
+
"""
|
1
4
|
# Standard library imports
|
2
|
-
import json
|
3
|
-
import os
|
4
5
|
import uuid
|
5
|
-
from typing import List, Dict, Union, Optional, Any
|
6
6
|
import logging
|
7
|
-
from
|
8
|
-
import random
|
9
|
-
import requests
|
10
|
-
import zipfile
|
11
|
-
import io
|
12
|
-
import base64
|
7
|
+
from typing import List, Dict, Optional, Union
|
13
8
|
from PIL import Image
|
14
9
|
|
15
|
-
# Third-party imports
|
16
|
-
import anthropic
|
17
|
-
from anthropic import AnthropicVertex
|
18
|
-
import google.generativeai as genai
|
19
|
-
import openai
|
20
|
-
from dotenv import load_dotenv
|
21
|
-
|
22
10
|
# Local imports
|
23
11
|
from .chat_history import ChatHistory
|
24
|
-
from .
|
12
|
+
from .clients import get_client
|
13
|
+
from .utils.environment import load_env_vars
|
14
|
+
from .utils.image_tools import load_image_from_path, load_image_from_url, create_image_content_block
|
25
15
|
|
26
|
-
#
|
27
|
-
logging.basicConfig(level=logging.INFO)
|
16
|
+
# Setup logging
|
28
17
|
logger = logging.getLogger(__name__)
|
29
18
|
|
30
19
|
# Load environment variables
|
31
|
-
def load_env_vars():
|
32
|
-
"""Load environment variables from .env file"""
|
33
|
-
env_path = Path(__file__).parent / '.env'
|
34
|
-
if env_path.exists():
|
35
|
-
load_dotenv(env_path)
|
36
|
-
else:
|
37
|
-
logger.warning(".env file not found. Using system environment variables.")
|
38
|
-
|
39
20
|
load_env_vars()
|
40
21
|
|
41
|
-
|
42
|
-
with open(image_path, "rb") as image_file:
|
43
|
-
return base64.b64encode(image_file.read()).decode("utf-8")
|
44
|
-
|
45
|
-
def format_messages_for_gemini(messages):
|
22
|
+
class Agent:
|
46
23
|
"""
|
47
|
-
|
48
|
-
|
49
|
-
|
24
|
+
Agent class for managing conversations with LLMs.
|
25
|
+
|
26
|
+
This class provides a high-level interface for interacting with different
|
27
|
+
LLM providers through a unified API.
|
50
28
|
"""
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
continue
|
60
|
-
|
61
|
-
# 处理 user/assistant 消息
|
62
|
-
# 如果 content 是单一对象,转换为列表
|
63
|
-
if not isinstance(content, list):
|
64
|
-
content = [content]
|
29
|
+
|
30
|
+
def __init__(self, model_name: str,
|
31
|
+
messages: Optional[Union[str, List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]]]] = None,
|
32
|
+
memory_enabled: bool = False,
|
33
|
+
api_key: Optional[str] = None,
|
34
|
+
base_url: Optional[str] = None) -> None:
|
35
|
+
"""
|
36
|
+
Initialize an Agent instance.
|
65
37
|
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]], max_tokens: int = 1000,
|
74
|
-
temperature: float = 0.5, top_p: float = 1.0, top_k: int = 40, api_key: Optional[str] = None,
|
75
|
-
base_url: Optional[str] = None, json_format: bool = False) -> str:
|
76
|
-
"""
|
77
|
-
Generate a completion using the specified model and messages.
|
78
|
-
"""
|
79
|
-
try:
|
80
|
-
service = ""
|
81
|
-
if "openai" in model:
|
82
|
-
service = "openai"
|
83
|
-
model
|
84
|
-
elif "claude" in model:
|
85
|
-
service = "anthropic"
|
86
|
-
elif "gemini" in model:
|
87
|
-
service = "gemini"
|
88
|
-
elif "grok" in model:
|
89
|
-
service = "x"
|
90
|
-
else:
|
91
|
-
service = "openai"
|
92
|
-
|
93
|
-
# Get API key and base URL from key manager if not provided
|
94
|
-
if not api_key:
|
95
|
-
# api_key, base_url = key_manager.get_config(service)
|
96
|
-
# Placeholder for key_manager
|
97
|
-
api_key = os.getenv(f"{service.upper()}_API_KEY")
|
98
|
-
base_url = os.getenv(f"{service.upper()}_BASE_URL")
|
99
|
-
|
100
|
-
def format_messages_for_api(
|
101
|
-
model: str,
|
102
|
-
messages: List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]]
|
103
|
-
) -> tuple[Optional[str], List[Dict[str, Any]]]:
|
104
|
-
"""
|
105
|
-
Convert ChatHistory messages to the format required by the specific API.
|
106
|
-
|
107
|
-
Args:
|
108
|
-
model: The model name (e.g., "claude", "gemini", "gpt")
|
109
|
-
messages: List of message dictionaries with role and content
|
110
|
-
|
111
|
-
Returns:
|
112
|
-
tuple: (system_message, formatted_messages)
|
113
|
-
- system_message is extracted system message for Claude, None for others
|
114
|
-
- formatted_messages is the list of formatted message dictionaries
|
115
|
-
"""
|
116
|
-
if "claude" in model and "openai" not in model:
|
117
|
-
formatted = []
|
118
|
-
system_msg = ""
|
119
|
-
|
120
|
-
# Extract system message if present
|
121
|
-
if messages and messages[0]["role"] == "system":
|
122
|
-
system_msg = messages.pop(0)["content"]
|
123
|
-
|
124
|
-
for msg in messages:
|
125
|
-
content = msg["content"]
|
126
|
-
if isinstance(content, str):
|
127
|
-
formatted.append({"role": msg["role"], "content": content})
|
128
|
-
elif isinstance(content, list):
|
129
|
-
# Combine content blocks into a single message
|
130
|
-
combined_content = []
|
131
|
-
for block in content:
|
132
|
-
if isinstance(block, str):
|
133
|
-
combined_content.append({
|
134
|
-
"type": "text",
|
135
|
-
"text": block
|
136
|
-
})
|
137
|
-
elif isinstance(block, Image.Image):
|
138
|
-
# Convert PIL.Image to base64
|
139
|
-
buffered = io.BytesIO()
|
140
|
-
block.save(buffered, format="PNG")
|
141
|
-
image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
142
|
-
combined_content.append({
|
143
|
-
"type": "image",
|
144
|
-
"source": {
|
145
|
-
"type": "base64",
|
146
|
-
"media_type": "image/png",
|
147
|
-
"data": image_base64
|
148
|
-
}
|
149
|
-
})
|
150
|
-
elif isinstance(block, dict):
|
151
|
-
if block.get("type") == "image_url":
|
152
|
-
combined_content.append({
|
153
|
-
"type": "image",
|
154
|
-
"source": {
|
155
|
-
"type": "url",
|
156
|
-
"url": block["image_url"]["url"]
|
157
|
-
}
|
158
|
-
})
|
159
|
-
elif block.get("type") == "image_base64":
|
160
|
-
combined_content.append({
|
161
|
-
"type": "image",
|
162
|
-
"source": {
|
163
|
-
"type": "base64",
|
164
|
-
"media_type": block["image_base64"]["media_type"],
|
165
|
-
"data": block["image_base64"]["data"]
|
166
|
-
}
|
167
|
-
})
|
168
|
-
formatted.append({
|
169
|
-
"role": msg["role"],
|
170
|
-
"content": combined_content
|
171
|
-
})
|
172
|
-
return system_msg, formatted
|
173
|
-
|
174
|
-
elif ("gemini" in model or "gpt" in model or "grok" in model) and "openai" not in model:
|
175
|
-
formatted = []
|
176
|
-
for msg in messages:
|
177
|
-
content = msg["content"]
|
178
|
-
if isinstance(content, str):
|
179
|
-
formatted.append({"role": msg["role"], "parts": [content]})
|
180
|
-
elif isinstance(content, list):
|
181
|
-
parts = []
|
182
|
-
for block in content:
|
183
|
-
if isinstance(block, str):
|
184
|
-
parts.append(block)
|
185
|
-
elif isinstance(block, Image.Image):
|
186
|
-
# Keep PIL.Image objects as is for Gemini
|
187
|
-
parts.append(block)
|
188
|
-
elif isinstance(block, dict):
|
189
|
-
if block.get("type") == "image_url":
|
190
|
-
parts.append({
|
191
|
-
"type": "image_url",
|
192
|
-
"image_url": {
|
193
|
-
"url": block["image_url"]["url"]
|
194
|
-
}
|
195
|
-
})
|
196
|
-
elif block.get("type") == "image_base64":
|
197
|
-
parts.append({
|
198
|
-
"type": "image_base64",
|
199
|
-
"image_base64": {
|
200
|
-
"data": block["image_base64"]["data"],
|
201
|
-
"media_type": block["image_base64"]["media_type"]
|
202
|
-
}
|
203
|
-
})
|
204
|
-
formatted.append({
|
205
|
-
"role": msg["role"],
|
206
|
-
"parts": parts
|
207
|
-
})
|
208
|
-
return None, formatted
|
209
|
-
|
210
|
-
else: # OpenAI models
|
211
|
-
formatted = []
|
212
|
-
for msg in messages:
|
213
|
-
content = msg["content"]
|
214
|
-
if isinstance(content, str):
|
215
|
-
formatted.append({
|
216
|
-
"role": msg["role"],
|
217
|
-
"content": content
|
218
|
-
})
|
219
|
-
elif isinstance(content, list):
|
220
|
-
formatted_content = []
|
221
|
-
for block in content:
|
222
|
-
if isinstance(block, str):
|
223
|
-
formatted_content.append({
|
224
|
-
"type": "text",
|
225
|
-
"text": block
|
226
|
-
})
|
227
|
-
elif isinstance(block, Image.Image):
|
228
|
-
# Convert PIL.Image to base64
|
229
|
-
buffered = io.BytesIO()
|
230
|
-
block.save(buffered, format="PNG")
|
231
|
-
image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
232
|
-
formatted_content.append({
|
233
|
-
"type": "image_url",
|
234
|
-
"image_url": {
|
235
|
-
"url": f"data:image/jpeg;base64,{image_base64}"
|
236
|
-
}
|
237
|
-
})
|
238
|
-
elif isinstance(block, dict):
|
239
|
-
if block.get("type") == "image_url":
|
240
|
-
formatted_content.append({
|
241
|
-
"type": "image_url",
|
242
|
-
"image_url": block["image_url"]
|
243
|
-
})
|
244
|
-
elif block.get("type") == "image_base64":
|
245
|
-
formatted_content.append({
|
246
|
-
"type": "image_url",
|
247
|
-
"image_url": {
|
248
|
-
"url": f"data:image/jpeg;base64,{block['image_base64']['data']}"
|
249
|
-
}
|
250
|
-
})
|
251
|
-
formatted.append({
|
252
|
-
"role": msg["role"],
|
253
|
-
"content": formatted_content
|
254
|
-
})
|
255
|
-
return None, formatted
|
256
|
-
|
257
|
-
system_msg, formatted_messages = format_messages_for_api(model, messages.copy())
|
258
|
-
|
259
|
-
if "claude" in model and "openai" not in model:
|
260
|
-
# Check for Vertex configuration
|
261
|
-
vertex_project_id = os.getenv('VERTEX_PROJECT_ID')
|
262
|
-
vertex_region = os.getenv('VERTEX_REGION')
|
263
|
-
|
264
|
-
if vertex_project_id and vertex_region:
|
265
|
-
client = AnthropicVertex(
|
266
|
-
region=vertex_region,
|
267
|
-
project_id=vertex_project_id
|
268
|
-
)
|
269
|
-
else:
|
270
|
-
client = anthropic.Anthropic(api_key=api_key, base_url=base_url)
|
271
|
-
|
272
|
-
response = client.messages.create(
|
273
|
-
model=model,
|
274
|
-
max_tokens=max_tokens,
|
275
|
-
temperature=temperature,
|
276
|
-
messages=formatted_messages,
|
277
|
-
system=system_msg
|
278
|
-
)
|
279
|
-
|
280
|
-
while response.stop_reason == "max_tokens":
|
281
|
-
if formatted_messages[-1]['role'] == "user":
|
282
|
-
formatted_messages.append({"role": "assistant", "content": response.completion})
|
283
|
-
else:
|
284
|
-
formatted_messages[-1]['content'] += response.completion
|
285
|
-
|
286
|
-
response = client.messages.create(
|
287
|
-
model=model,
|
288
|
-
max_tokens=max_tokens,
|
289
|
-
temperature=temperature,
|
290
|
-
messages=formatted_messages,
|
291
|
-
system=system_msg
|
292
|
-
)
|
293
|
-
|
294
|
-
if formatted_messages[-1]['role'] == "assistant" and response.stop_reason == "end_turn":
|
295
|
-
formatted_messages[-1]['content'] += response.completion
|
296
|
-
return formatted_messages[-1]['content']
|
297
|
-
|
298
|
-
return response.completion
|
299
|
-
|
300
|
-
elif "gemini" in model and "openai" not in model:
|
301
|
-
try:
|
302
|
-
# First try OpenAI-style API
|
303
|
-
client = openai.OpenAI(
|
304
|
-
api_key=api_key,
|
305
|
-
base_url="https://generativelanguage.googleapis.com/v1beta/"
|
306
|
-
)
|
307
|
-
# Set response_format based on json_format
|
308
|
-
response_format = {"type": "json_object"} if json_format else {"type": "plain_text"}
|
309
|
-
|
310
|
-
response = client.chat.completions.create(
|
311
|
-
model=model,
|
312
|
-
max_tokens=max_tokens,
|
313
|
-
top_p=top_p,
|
314
|
-
top_k=top_k,
|
315
|
-
messages=formatted_messages,
|
316
|
-
temperature=temperature,
|
317
|
-
response_format=response_format # Added response_format
|
318
|
-
)
|
319
|
-
return response.choices[0].message.content
|
320
|
-
|
321
|
-
except Exception as e:
|
322
|
-
# If OpenAI-style API fails, fall back to Google's genai library
|
323
|
-
logger.info("Falling back to Google's genai library")
|
324
|
-
genai.configure(api_key=api_key)
|
325
|
-
system_instruction = ""
|
326
|
-
for msg in messages:
|
327
|
-
if msg["role"] == "system":
|
328
|
-
system_instruction = msg["content"]
|
329
|
-
break
|
330
|
-
|
331
|
-
# 将其他消息转换为 gemini 格式
|
332
|
-
gemini_messages = format_messages_for_gemini(messages)
|
333
|
-
mime_type = "application/json" if json_format else "text/plain"
|
334
|
-
generation_config = genai.types.GenerationConfig(
|
335
|
-
temperature=temperature,
|
336
|
-
top_p=top_p,
|
337
|
-
top_k=top_k,
|
338
|
-
max_output_tokens=max_tokens,
|
339
|
-
response_mime_type=mime_type
|
340
|
-
)
|
341
|
-
|
342
|
-
model_instance = genai.GenerativeModel(
|
343
|
-
model_name=model,
|
344
|
-
system_instruction=system_instruction, # system 消息通过这里传入
|
345
|
-
generation_config=generation_config
|
346
|
-
)
|
347
|
-
|
348
|
-
response = model_instance.generate_content(gemini_messages, generation_config=generation_config)
|
349
|
-
|
350
|
-
return response.text
|
351
|
-
|
352
|
-
elif "grok" in model and "openai" not in model:
|
353
|
-
# Randomly choose between OpenAI and Anthropic SDK
|
354
|
-
use_anthropic = random.choice([True, False])
|
355
|
-
|
356
|
-
if use_anthropic:
|
357
|
-
logger.info("Using Anthropic for Grok model")
|
358
|
-
client = anthropic.Anthropic(
|
359
|
-
api_key=api_key,
|
360
|
-
base_url="https://api.x.ai"
|
361
|
-
)
|
362
|
-
|
363
|
-
system_msg = ""
|
364
|
-
if messages and messages[0]["role"] == "system":
|
365
|
-
system_msg = messages.pop(0)["content"]
|
366
|
-
|
367
|
-
response = client.messages.create(
|
368
|
-
model=model,
|
369
|
-
max_tokens=max_tokens,
|
370
|
-
temperature=temperature,
|
371
|
-
messages=formatted_messages,
|
372
|
-
system=system_msg
|
373
|
-
)
|
374
|
-
return response.completion
|
375
|
-
else:
|
376
|
-
logger.info("Using OpenAI for Grok model")
|
377
|
-
client = openai.OpenAI(
|
378
|
-
api_key=api_key,
|
379
|
-
base_url="https://api.x.ai/v1"
|
380
|
-
)
|
381
|
-
# Set response_format based on json_format
|
382
|
-
response_format = {"type": "json_object"} if json_format else {"type": "plain_text"}
|
383
|
-
|
384
|
-
response = client.chat.completions.create(
|
385
|
-
model=model,
|
386
|
-
messages=formatted_messages,
|
387
|
-
max_tokens=max_tokens,
|
388
|
-
temperature=temperature,
|
389
|
-
response_format=response_format # Added response_format
|
390
|
-
)
|
391
|
-
return response.choices[0].message.content
|
392
|
-
|
393
|
-
else: # OpenAI models
|
394
|
-
if model.endswith("-openai"):
|
395
|
-
model = model[:-7] # Remove last 7 characters ("-openai")
|
396
|
-
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
397
|
-
|
398
|
-
# Create base parameters
|
399
|
-
params = {
|
400
|
-
"model": model,
|
401
|
-
"messages": formatted_messages,
|
402
|
-
}
|
403
|
-
|
404
|
-
# Add optional parameters
|
405
|
-
if json_format:
|
406
|
-
params["response_format"] = {"type": "json_object"}
|
407
|
-
if not ("o1" in model or "o3" in model):
|
408
|
-
params["max_tokens"] = max_tokens
|
409
|
-
params["temperature"] = temperature
|
410
|
-
|
411
|
-
response = client.chat.completions.create(**params)
|
412
|
-
return response.choices[0].message.content
|
413
|
-
|
414
|
-
# Release the API key after successful use
|
415
|
-
if not api_key:
|
416
|
-
# key_manager.release_config(service, api_key)
|
417
|
-
pass
|
418
|
-
|
419
|
-
return response
|
420
|
-
|
421
|
-
except Exception as e:
|
422
|
-
logger.error(f"Error in completion: {str(e)}")
|
423
|
-
raise
|
424
|
-
|
425
|
-
class Agent:
|
426
|
-
def __init__(self, model_name: str, messages: Optional[Union[str, List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]]]] = None,
|
427
|
-
memory_enabled: bool = False, api_key: Optional[str] = None) -> None:
|
428
|
-
"""Initialize an Agent instance."""
|
38
|
+
Args:
|
39
|
+
model_name: Name of the LLM model to use
|
40
|
+
messages: Optional initial messages or system prompt
|
41
|
+
memory_enabled: Whether to enable conversation memory
|
42
|
+
api_key: Optional API key to use
|
43
|
+
base_url: Optional base URL for API requests
|
44
|
+
"""
|
429
45
|
self.id = f"{model_name}-{uuid.uuid4().hex[:8]}"
|
430
46
|
self.model_name = model_name
|
431
47
|
self.history = ChatHistory(messages) if messages else ChatHistory()
|
432
48
|
self.memory_enabled = memory_enabled
|
433
|
-
self.
|
49
|
+
self.client = get_client(model_name, api_key=api_key, base_url=base_url)
|
434
50
|
self.repo_content = []
|
435
|
-
|
51
|
+
|
436
52
|
def add_message(self, role: str, content: Union[str, List[Union[str, Image.Image, Dict]]]):
|
437
|
-
"""
|
53
|
+
"""
|
54
|
+
Add a message to the conversation.
|
55
|
+
|
56
|
+
Args:
|
57
|
+
role: Message role ('system', 'user', or 'assistant')
|
58
|
+
content: Message content (text, image, or mixed content)
|
59
|
+
"""
|
438
60
|
self.history.add_message(content, role)
|
439
|
-
|
61
|
+
|
440
62
|
def add_user_message(self, content: Union[str, List[Union[str, Image.Image, Dict]]]):
|
441
|
-
"""
|
63
|
+
"""
|
64
|
+
Add a user message to the conversation.
|
65
|
+
|
66
|
+
Args:
|
67
|
+
content: Message content (text, image, or mixed content)
|
68
|
+
"""
|
442
69
|
self.history.add_user_message(content)
|
443
|
-
|
70
|
+
|
444
71
|
def add_assistant_message(self, content: Union[str, List[Union[str, Image.Image, Dict]]]):
|
445
|
-
"""
|
72
|
+
"""
|
73
|
+
Add an assistant message to the conversation.
|
74
|
+
|
75
|
+
Args:
|
76
|
+
content: Message content (text, image, or mixed content)
|
77
|
+
"""
|
446
78
|
self.history.add_assistant_message(content)
|
447
|
-
|
448
|
-
def add_image(self, image_path: Optional[str] = None,
|
79
|
+
|
80
|
+
def add_image(self, image_path: Optional[str] = None,
|
81
|
+
image_url: Optional[str] = None,
|
82
|
+
media_type: Optional[str] = "image/jpeg"):
|
449
83
|
"""
|
450
84
|
Add an image to the conversation.
|
85
|
+
|
451
86
|
Either image_path or image_url must be provided.
|
87
|
+
|
88
|
+
Args:
|
89
|
+
image_path: Path to a local image file
|
90
|
+
image_url: URL of an image
|
91
|
+
media_type: MIME type of the image
|
92
|
+
|
93
|
+
Returns:
|
94
|
+
The image content block that was added
|
452
95
|
"""
|
453
|
-
if not image_path
|
96
|
+
if not (image_path or image_url):
|
454
97
|
raise ValueError("Either image_path or image_url must be provided.")
|
455
|
-
|
98
|
+
|
456
99
|
if image_path:
|
457
|
-
|
458
|
-
raise FileNotFoundError(f"Image file {image_path} does not exist.")
|
459
|
-
if "gemini" in self.model_name and "openai" not in self.model_name:
|
460
|
-
# For Gemini, load as PIL.Image
|
461
|
-
image_pil = Image.open(image_path)
|
462
|
-
image_block = image_pil
|
463
|
-
elif "claude" in self.model_name and "openai" not in self.model_name:
|
464
|
-
# For Claude and others, use base64 encoding
|
465
|
-
with open(image_path, "rb") as img_file:
|
466
|
-
image_data = base64.standard_b64encode(img_file.read()).decode("utf-8")
|
467
|
-
image_block = {
|
468
|
-
"type": "image",
|
469
|
-
"source": {
|
470
|
-
"type": "base64",
|
471
|
-
"media_type": media_type,
|
472
|
-
"data": image_data,
|
473
|
-
},
|
474
|
-
}
|
475
|
-
else:
|
476
|
-
# openai format
|
477
|
-
base64_image = encode_image(image_path)
|
478
|
-
image_block = {
|
479
|
-
"type": "image_url",
|
480
|
-
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
|
481
|
-
}
|
100
|
+
image = load_image_from_path(image_path)
|
482
101
|
else:
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
"type": "image",
|
493
|
-
"source": {
|
494
|
-
"type": "base64",
|
495
|
-
"media_type": media_type,
|
496
|
-
"data": image_data,
|
497
|
-
},
|
498
|
-
}
|
499
|
-
else:
|
500
|
-
# For Claude and others, use image URLs
|
501
|
-
image_block = {
|
502
|
-
"type": "image_url",
|
503
|
-
"image_url": {
|
504
|
-
"url": image_url
|
505
|
-
}
|
506
|
-
}
|
507
|
-
|
508
|
-
# Add the image block to the last user message or as a new user message
|
509
|
-
if self.history.last_role == "user":
|
510
|
-
current_content = self.history.messages[-1]["content"]
|
511
|
-
if isinstance(current_content, list):
|
512
|
-
current_content.append(image_block)
|
513
|
-
else:
|
514
|
-
self.history.messages[-1]["content"] = [current_content, image_block]
|
515
|
-
else:
|
516
|
-
# Start a new user message with the image
|
517
|
-
self.history.add_message([image_block], "user")
|
518
|
-
|
519
|
-
def generate_response(self, max_tokens=3585, temperature=0.7, top_p=1.0, top_k=40, json_format: bool = False) -> str:
|
520
|
-
"""Generate a response from the agent.
|
521
|
-
|
102
|
+
image = load_image_from_url(image_url)
|
103
|
+
|
104
|
+
return create_image_content_block(image, media_type)
|
105
|
+
|
106
|
+
def generate_response(self, max_tokens=3585, temperature=0.7,
|
107
|
+
top_p=1.0, top_k=40, json_format=False, **kwargs):
|
108
|
+
"""
|
109
|
+
Generate a response from the agent.
|
110
|
+
|
522
111
|
Args:
|
523
|
-
max_tokens
|
524
|
-
temperature
|
525
|
-
|
526
|
-
|
112
|
+
max_tokens: Maximum number of tokens to generate
|
113
|
+
temperature: Sampling temperature
|
114
|
+
top_p: Nucleus sampling parameter
|
115
|
+
top_k: Top-k sampling parameter
|
116
|
+
json_format: Whether to enable JSON output format
|
117
|
+
**kwargs: Additional model-specific parameters
|
118
|
+
|
527
119
|
Returns:
|
528
|
-
|
120
|
+
The generated response text
|
529
121
|
"""
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
messages = self.history.messages
|
534
|
-
print(self.model_name)
|
535
|
-
response_text = completion(
|
536
|
-
model=self.model_name,
|
537
|
-
messages=messages,
|
122
|
+
response = self.client.completion(
|
123
|
+
messages=self.history.messages,
|
538
124
|
max_tokens=max_tokens,
|
539
125
|
temperature=temperature,
|
540
126
|
top_p=top_p,
|
541
127
|
top_k=top_k,
|
542
|
-
|
543
|
-
|
128
|
+
json_format=json_format,
|
129
|
+
model=self.model_name,
|
130
|
+
**kwargs
|
544
131
|
)
|
545
|
-
if self.model_name.startswith("openai"):
|
546
|
-
# OpenAI does not support images, so responses are simple strings
|
547
|
-
if self.history.messages[-1]["role"] == "assistant":
|
548
|
-
self.history.messages[-1]["content"] = response_text
|
549
|
-
elif self.memory_enabled:
|
550
|
-
self.add_message("assistant", response_text)
|
551
|
-
elif "claude" in self.model_name:
|
552
|
-
if self.history.messages[-1]["role"] == "assistant":
|
553
|
-
self.history.messages[-1]["content"] = response_text
|
554
|
-
elif self.memory_enabled:
|
555
|
-
self.add_message("assistant", response_text)
|
556
|
-
elif "gemini" in self.model_name or "grok" in self.model_name:
|
557
|
-
if self.history.messages[-1]["role"] == "assistant":
|
558
|
-
if isinstance(self.history.messages[-1]["content"], list):
|
559
|
-
self.history.messages[-1]["content"].append(response_text)
|
560
|
-
else:
|
561
|
-
self.history.messages[-1]["content"] = [self.history.messages[-1]["content"], response_text]
|
562
|
-
elif self.memory_enabled:
|
563
|
-
self.add_message("assistant", response_text)
|
564
|
-
else:
|
565
|
-
# Handle other models similarly
|
566
|
-
if self.history.messages[-1]["role"] == "assistant":
|
567
|
-
self.history.messages[-1]["content"] = response_text
|
568
|
-
elif self.memory_enabled:
|
569
|
-
self.add_message("assistant", response_text)
|
570
132
|
|
571
|
-
|
133
|
+
# Add the response to history
|
134
|
+
if not json_format:
|
135
|
+
self.add_assistant_message(response)
|
136
|
+
|
137
|
+
return response
|
572
138
|
|
573
|
-
def save_conversation(self):
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
139
|
+
def save_conversation(self, filename=None):
|
140
|
+
"""
|
141
|
+
Save the conversation history to a file.
|
142
|
+
|
143
|
+
Args:
|
144
|
+
filename: Optional filename to save to
|
145
|
+
"""
|
579
146
|
if filename is None:
|
580
|
-
filename = f"{self.id}.json"
|
581
|
-
with open(filename, 'r', encoding='utf-8') as file:
|
582
|
-
messages = json.load(file)
|
583
|
-
# Handle deserialization of images if necessary
|
584
|
-
self.history = ChatHistory(messages)
|
585
|
-
|
586
|
-
def add_repo(self, repo_url: Optional[str] = None, username: Optional[str] = None, repo_name: Optional[str] = None, commit_hash: Optional[str] = None):
|
587
|
-
if username and repo_name:
|
588
|
-
if commit_hash:
|
589
|
-
repo_url = f"https://github.com/{username}/{repo_name}/archive/{commit_hash}.zip"
|
590
|
-
else:
|
591
|
-
repo_url = f"https://github.com/{username}/{repo_name}/archive/refs/heads/main.zip"
|
147
|
+
filename = f"conversation_{self.id}.json"
|
592
148
|
|
593
|
-
|
594
|
-
raise ValueError("Either repo_url or both username and repo_name must be provided")
|
149
|
+
import json
|
595
150
|
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
620
|
-
# Generate response with JSON format enabled
|
621
|
-
try:
|
622
|
-
response = agent.generate_response(json_format=True) # json_format set to True
|
623
|
-
print("Response:", response)
|
624
|
-
except Exception as e:
|
625
|
-
logger.error(f"Failed to generate response: {e}")
|
626
|
-
|
627
|
-
# Print the entire conversation history
|
628
|
-
print("Conversation History:")
|
629
|
-
print(agent.history)
|
151
|
+
# Convert any PIL.Image objects to base64 for serialization
|
152
|
+
serializable_history = []
|
153
|
+
for msg in self.history.messages:
|
154
|
+
role = msg["role"]
|
155
|
+
content = msg["content"]
|
156
|
+
|
157
|
+
if isinstance(content, str):
|
158
|
+
serializable_history.append({"role": role, "content": content})
|
159
|
+
elif isinstance(content, list):
|
160
|
+
serializable_content = []
|
161
|
+
for item in content:
|
162
|
+
if isinstance(item, str):
|
163
|
+
serializable_content.append(item)
|
164
|
+
elif isinstance(item, Image.Image):
|
165
|
+
serializable_content.append(create_image_content_block(item))
|
166
|
+
elif isinstance(item, dict):
|
167
|
+
serializable_content.append(item)
|
168
|
+
serializable_history.append({"role": role, "content": serializable_content})
|
169
|
+
|
170
|
+
with open(filename, 'w') as f:
|
171
|
+
json.dump(serializable_history, f, indent=2)
|
172
|
+
|
173
|
+
return filename
|
630
174
|
|
631
|
-
|
632
|
-
|
633
|
-
|
634
|
-
|
635
|
-
|
636
|
-
|
637
|
-
|
175
|
+
def load_conversation(self, filename):
|
176
|
+
"""
|
177
|
+
Load a conversation from a file.
|
178
|
+
|
179
|
+
Args:
|
180
|
+
filename: Path to the conversation file
|
181
|
+
"""
|
182
|
+
import json
|
183
|
+
|
184
|
+
with open(filename, 'r') as f:
|
185
|
+
history = json.load(f)
|
186
|
+
|
187
|
+
self.history = ChatHistory(history)
|
188
|
+
|
189
|
+
return self.history
|
@@ -1,6 +1,6 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.4
|
2
2
|
Name: llm_dialog_manager
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.5.0
|
4
4
|
Summary: A Python package for managing LLM chat conversation history
|
5
5
|
Author-email: xihajun <work@2333.fun>
|
6
6
|
License: MIT
|
@@ -44,6 +44,7 @@ Requires-Dist: pytest-asyncio>=0.21.1; extra == "all"
|
|
44
44
|
Requires-Dist: pytest-cov>=4.1.0; extra == "all"
|
45
45
|
Requires-Dist: black>=23.9.1; extra == "all"
|
46
46
|
Requires-Dist: isort>=5.12.0; extra == "all"
|
47
|
+
Dynamic: license-file
|
47
48
|
|
48
49
|
# LLM Dialog Manager
|
49
50
|
|
@@ -102,7 +103,7 @@ XAI_API_KEY=your-x-key
|
|
102
103
|
from llm_dialog_manager import Agent
|
103
104
|
|
104
105
|
# Initialize an agent with a specific model
|
105
|
-
agent = Agent("
|
106
|
+
agent = Agent("ep-20250319212209-j6tfj-openai", memory_enabled=True)
|
106
107
|
|
107
108
|
# Add messages and generate responses
|
108
109
|
agent.add_message("system", "You are a helpful assistant")
|
@@ -0,0 +1,9 @@
|
|
1
|
+
llm_dialog_manager/__init__.py,sha256=npNlH7E4TPUhiX5WNGa-OGwXx38OJ3ofZ6lh5f399Kk,463
|
2
|
+
llm_dialog_manager/agent.py,sha256=Am2p9fClcHC75_Yjz6b1bof_KeUYmJocATQIQ-KKcr0,6472
|
3
|
+
llm_dialog_manager/chat_history.py,sha256=DKKRnj_M6h-4JncnH6KekMTghX7vMgdN3J9uOwXKzMU,10347
|
4
|
+
llm_dialog_manager/key_manager.py,sha256=shvxmn4zUtQx_p-x1EFyOmnk-WlhigbpKtxTKve-zXk,4421
|
5
|
+
llm_dialog_manager-0.5.0.dist-info/licenses/LICENSE,sha256=vWGbYgGuWpWrXL8-xi6pNcX5UzD6pWoIAZmcetyfbus,1064
|
6
|
+
llm_dialog_manager-0.5.0.dist-info/METADATA,sha256=L-bvieVTyqHeCtxtKeQtHUZ-5vwT8_4XAVsAorNLxLA,4236
|
7
|
+
llm_dialog_manager-0.5.0.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
8
|
+
llm_dialog_manager-0.5.0.dist-info/top_level.txt,sha256=u2EQEXW0NGAt0AAHT7jx1odXZ4rZfjcgbmJhvKFuMkI,19
|
9
|
+
llm_dialog_manager-0.5.0.dist-info/RECORD,,
|
@@ -1,9 +0,0 @@
|
|
1
|
-
llm_dialog_manager/__init__.py,sha256=klLFvHayR7ew1Oh9xyhAXdXnfs82YnFUEFzw0YvxKJI,86
|
2
|
-
llm_dialog_manager/agent.py,sha256=NVQKIMebl4cYkqMaBceZ3qs1vYhq1bum9okAn8VcfCg,27680
|
3
|
-
llm_dialog_manager/chat_history.py,sha256=DKKRnj_M6h-4JncnH6KekMTghX7vMgdN3J9uOwXKzMU,10347
|
4
|
-
llm_dialog_manager/key_manager.py,sha256=shvxmn4zUtQx_p-x1EFyOmnk-WlhigbpKtxTKve-zXk,4421
|
5
|
-
llm_dialog_manager-0.4.6.dist-info/LICENSE,sha256=vWGbYgGuWpWrXL8-xi6pNcX5UzD6pWoIAZmcetyfbus,1064
|
6
|
-
llm_dialog_manager-0.4.6.dist-info/METADATA,sha256=-qTRYkfAJMJCQTkRqNrtHjUuN-xGLhLR4CJvSJURgeg,4194
|
7
|
-
llm_dialog_manager-0.4.6.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
8
|
-
llm_dialog_manager-0.4.6.dist-info/top_level.txt,sha256=u2EQEXW0NGAt0AAHT7jx1odXZ4rZfjcgbmJhvKFuMkI,19
|
9
|
-
llm_dialog_manager-0.4.6.dist-info/RECORD,,
|
File without changes
|
File without changes
|