llm-dialog-manager 0.4.7__py3-none-any.whl → 0.5.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_dialog_manager/__init__.py +18 -2
- llm_dialog_manager/agent.py +141 -594
- llm_dialog_manager/clients/__init__.py +31 -0
- llm_dialog_manager/clients/anthropic_client.py +143 -0
- llm_dialog_manager/clients/base.py +65 -0
- llm_dialog_manager/clients/gemini_client.py +78 -0
- llm_dialog_manager/clients/openai_client.py +97 -0
- llm_dialog_manager/clients/x_client.py +83 -0
- llm_dialog_manager/formatters/__init__.py +27 -0
- llm_dialog_manager/formatters/anthropic.py +76 -0
- llm_dialog_manager/formatters/base.py +23 -0
- llm_dialog_manager/formatters/gemini.py +59 -0
- llm_dialog_manager/formatters/openai.py +67 -0
- llm_dialog_manager/formatters/x.py +77 -0
- llm_dialog_manager/utils/__init__.py +3 -0
- llm_dialog_manager/utils/environment.py +66 -0
- llm_dialog_manager/utils/image_tools.py +81 -0
- llm_dialog_manager/utils/logging.py +35 -0
- {llm_dialog_manager-0.4.7.dist-info → llm_dialog_manager-0.5.3.dist-info}/METADATA +2 -2
- llm_dialog_manager-0.5.3.dist-info/RECORD +25 -0
- {llm_dialog_manager-0.4.7.dist-info → llm_dialog_manager-0.5.3.dist-info}/WHEEL +1 -1
- llm_dialog_manager-0.4.7.dist-info/RECORD +0 -9
- {llm_dialog_manager-0.4.7.dist-info → llm_dialog_manager-0.5.3.dist-info}/licenses/LICENSE +0 -0
- {llm_dialog_manager-0.4.7.dist-info → llm_dialog_manager-0.5.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,67 @@
|
|
1
|
+
"""
|
2
|
+
Message formatter for OpenAI models
|
3
|
+
"""
|
4
|
+
import io
|
5
|
+
import base64
|
6
|
+
from typing import List, Dict, Union, Optional
|
7
|
+
from PIL import Image
|
8
|
+
|
9
|
+
from .base import BaseMessageFormatter
|
10
|
+
|
11
|
+
class OpenAIFormatter(BaseMessageFormatter):
|
12
|
+
"""Formatter for OpenAI API messages"""
|
13
|
+
|
14
|
+
def format_messages(self, messages: List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]]) -> tuple:
|
15
|
+
"""
|
16
|
+
Format messages for the OpenAI API.
|
17
|
+
|
18
|
+
Args:
|
19
|
+
messages: List of message dictionaries in standard format
|
20
|
+
|
21
|
+
Returns:
|
22
|
+
A tuple containing (None, formatted_messages)
|
23
|
+
since OpenAI handles system messages in the message list
|
24
|
+
"""
|
25
|
+
formatted = []
|
26
|
+
|
27
|
+
for msg in messages:
|
28
|
+
content = msg["content"]
|
29
|
+
if isinstance(content, str):
|
30
|
+
formatted.append({"role": msg["role"], "content": content})
|
31
|
+
elif isinstance(content, list):
|
32
|
+
# For OpenAI with multimodal models like GPT-4V
|
33
|
+
formatted_content = []
|
34
|
+
|
35
|
+
for block in content:
|
36
|
+
if isinstance(block, str):
|
37
|
+
formatted_content.append({"type": "text", "text": block})
|
38
|
+
elif isinstance(block, Image.Image):
|
39
|
+
# Convert PIL.Image to base64
|
40
|
+
buffered = io.BytesIO()
|
41
|
+
block.save(buffered, format="PNG")
|
42
|
+
image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
43
|
+
formatted_content.append({
|
44
|
+
"type": "image_url",
|
45
|
+
"image_url": {
|
46
|
+
"url": f"data:image/png;base64,{image_base64}"
|
47
|
+
}
|
48
|
+
})
|
49
|
+
elif isinstance(block, dict):
|
50
|
+
if block.get("type") == "image_url":
|
51
|
+
formatted_content.append({
|
52
|
+
"type": "image_url",
|
53
|
+
"image_url": {
|
54
|
+
"url": block["image_url"]["url"]
|
55
|
+
}
|
56
|
+
})
|
57
|
+
elif block.get("type") == "image_base64":
|
58
|
+
formatted_content.append({
|
59
|
+
"type": "image_url",
|
60
|
+
"image_url": {
|
61
|
+
"url": f"data:{block['image_base64']['media_type']};base64,{block['image_base64']['data']}"
|
62
|
+
}
|
63
|
+
})
|
64
|
+
|
65
|
+
formatted.append({"role": msg["role"], "content": formatted_content})
|
66
|
+
|
67
|
+
return None, formatted
|
@@ -0,0 +1,77 @@
|
|
1
|
+
"""
|
2
|
+
Message formatter for X.AI (Grok) models
|
3
|
+
"""
|
4
|
+
from typing import List, Dict, Union, Optional
|
5
|
+
from PIL import Image
|
6
|
+
import io
|
7
|
+
import base64
|
8
|
+
|
9
|
+
from .base import BaseMessageFormatter
|
10
|
+
|
11
|
+
class XFormatter(BaseMessageFormatter):
|
12
|
+
"""Formatter for X.AI (Grok) API messages"""
|
13
|
+
|
14
|
+
def format_messages(self, messages: List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]]) -> tuple:
|
15
|
+
"""
|
16
|
+
Format messages for the X.AI (Grok) API.
|
17
|
+
|
18
|
+
Args:
|
19
|
+
messages: List of message dictionaries in standard format
|
20
|
+
|
21
|
+
Returns:
|
22
|
+
A tuple containing (system_message, formatted_messages)
|
23
|
+
"""
|
24
|
+
system_msg = None
|
25
|
+
formatted = []
|
26
|
+
|
27
|
+
for msg in messages:
|
28
|
+
# Extract system message if present, similar to many other APIs
|
29
|
+
if msg["role"] == "system":
|
30
|
+
system_msg = msg["content"] if isinstance(msg["content"], str) else str(msg["content"])
|
31
|
+
continue
|
32
|
+
|
33
|
+
content = msg["content"]
|
34
|
+
if isinstance(content, str):
|
35
|
+
formatted.append({"role": msg["role"], "content": content})
|
36
|
+
elif isinstance(content, list):
|
37
|
+
# Grok API format may need adjustments as it evolves
|
38
|
+
combined_content = []
|
39
|
+
|
40
|
+
for block in content:
|
41
|
+
if isinstance(block, str):
|
42
|
+
combined_content.append({"type": "text", "text": block})
|
43
|
+
elif isinstance(block, Image.Image):
|
44
|
+
# Convert PIL.Image to base64
|
45
|
+
buffered = io.BytesIO()
|
46
|
+
block.save(buffered, format="PNG")
|
47
|
+
image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
48
|
+
combined_content.append({
|
49
|
+
"type": "image",
|
50
|
+
"image": {
|
51
|
+
"type": "base64",
|
52
|
+
"media_type": "image/png",
|
53
|
+
"data": image_base64
|
54
|
+
}
|
55
|
+
})
|
56
|
+
elif isinstance(block, dict):
|
57
|
+
if block.get("type") == "image_url":
|
58
|
+
combined_content.append({
|
59
|
+
"type": "image",
|
60
|
+
"image": {
|
61
|
+
"type": "url",
|
62
|
+
"url": block["image_url"]["url"]
|
63
|
+
}
|
64
|
+
})
|
65
|
+
elif block.get("type") == "image_base64":
|
66
|
+
combined_content.append({
|
67
|
+
"type": "image",
|
68
|
+
"image": {
|
69
|
+
"type": "base64",
|
70
|
+
"media_type": block["image_base64"]["media_type"],
|
71
|
+
"data": block["image_base64"]["data"]
|
72
|
+
}
|
73
|
+
})
|
74
|
+
|
75
|
+
formatted.append({"role": msg["role"], "content": combined_content})
|
76
|
+
|
77
|
+
return system_msg, formatted
|
@@ -0,0 +1,66 @@
|
|
1
|
+
"""
|
2
|
+
Environment utilities for LLM Dialog Manager
|
3
|
+
"""
|
4
|
+
import os
|
5
|
+
import logging
|
6
|
+
from pathlib import Path
|
7
|
+
from dotenv import load_dotenv
|
8
|
+
|
9
|
+
logger = logging.getLogger(__name__)
|
10
|
+
|
11
|
+
def load_env_vars(env_path=None):
|
12
|
+
"""
|
13
|
+
Load environment variables from .env file.
|
14
|
+
|
15
|
+
Args:
|
16
|
+
env_path: Optional path to .env file
|
17
|
+
|
18
|
+
Returns:
|
19
|
+
True if env vars were loaded, False otherwise
|
20
|
+
"""
|
21
|
+
try:
|
22
|
+
# Default to .env in the current directory or parent directory
|
23
|
+
if not env_path:
|
24
|
+
if os.path.exists(".env"):
|
25
|
+
env_path = ".env"
|
26
|
+
elif os.path.exists("../.env"):
|
27
|
+
env_path = "../.env"
|
28
|
+
else:
|
29
|
+
# Try to find .env in parent directories
|
30
|
+
current_dir = Path.cwd()
|
31
|
+
for parent in current_dir.parents:
|
32
|
+
potential_path = parent / ".env"
|
33
|
+
if potential_path.exists():
|
34
|
+
env_path = str(potential_path)
|
35
|
+
break
|
36
|
+
|
37
|
+
if env_path and os.path.exists(env_path):
|
38
|
+
load_dotenv(env_path)
|
39
|
+
logger.info(f"Loaded environment variables from {env_path}")
|
40
|
+
|
41
|
+
# Log detected providers without showing sensitive data
|
42
|
+
providers = []
|
43
|
+
if os.getenv("OPENAI_API_KEY"):
|
44
|
+
providers.append("OpenAI")
|
45
|
+
if os.getenv("ANTHROPIC_API_KEY"):
|
46
|
+
providers.append("Anthropic")
|
47
|
+
if os.getenv("AWS_ACCESS_KEY_ID") and os.getenv("AWS_SECRET_ACCESS_KEY"):
|
48
|
+
providers.append("AWS Bedrock (for anthropic.* models)")
|
49
|
+
if os.getenv("VERTEX_PROJECT_ID") and os.getenv("VERTEX_REGION"):
|
50
|
+
providers.append("Anthropic Vertex")
|
51
|
+
if os.getenv("GEMINI_API_KEY"):
|
52
|
+
providers.append("Google Gemini")
|
53
|
+
if os.getenv("XAI_API_KEY"):
|
54
|
+
providers.append("X.AI Grok")
|
55
|
+
|
56
|
+
if providers:
|
57
|
+
logger.info(f"Detected LLM providers: {', '.join(providers)}")
|
58
|
+
|
59
|
+
return True
|
60
|
+
else:
|
61
|
+
logger.warning(f"Environment file not found: {env_path}")
|
62
|
+
return False
|
63
|
+
|
64
|
+
except Exception as e:
|
65
|
+
logger.error(f"Error loading environment variables: {e}")
|
66
|
+
return False
|
@@ -0,0 +1,81 @@
|
|
1
|
+
"""
|
2
|
+
Image handling utilities for LLM dialog manager
|
3
|
+
"""
|
4
|
+
import base64
|
5
|
+
import io
|
6
|
+
from typing import Optional
|
7
|
+
import requests
|
8
|
+
from PIL import Image
|
9
|
+
|
10
|
+
def load_image_from_path(image_path: str) -> Image.Image:
|
11
|
+
"""
|
12
|
+
Load an image from a local file path.
|
13
|
+
|
14
|
+
Args:
|
15
|
+
image_path: Path to the local image file
|
16
|
+
|
17
|
+
Returns:
|
18
|
+
PIL.Image object
|
19
|
+
"""
|
20
|
+
try:
|
21
|
+
image = Image.open(image_path)
|
22
|
+
# Store the filename for reference
|
23
|
+
image.filename = image_path
|
24
|
+
return image
|
25
|
+
except Exception as e:
|
26
|
+
raise ValueError(f"Failed to load image from {image_path}: {e}")
|
27
|
+
|
28
|
+
def load_image_from_url(image_url: str) -> Image.Image:
|
29
|
+
"""
|
30
|
+
Load an image from a URL.
|
31
|
+
|
32
|
+
Args:
|
33
|
+
image_url: URL to the image
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
PIL.Image object
|
37
|
+
"""
|
38
|
+
try:
|
39
|
+
response = requests.get(image_url, stream=True)
|
40
|
+
response.raise_for_status()
|
41
|
+
image = Image.open(io.BytesIO(response.content))
|
42
|
+
# Store the URL for reference
|
43
|
+
image.filename = image_url
|
44
|
+
return image
|
45
|
+
except Exception as e:
|
46
|
+
raise ValueError(f"Failed to load image from {image_url}: {e}")
|
47
|
+
|
48
|
+
def encode_image_to_base64(image: Image.Image, format: str = "PNG") -> str:
|
49
|
+
"""
|
50
|
+
Convert a PIL.Image to base64 string.
|
51
|
+
|
52
|
+
Args:
|
53
|
+
image: PIL.Image object
|
54
|
+
format: Image format (default: PNG)
|
55
|
+
|
56
|
+
Returns:
|
57
|
+
Base64-encoded string
|
58
|
+
"""
|
59
|
+
buffered = io.BytesIO()
|
60
|
+
image.save(buffered, format=format)
|
61
|
+
return base64.b64encode(buffered.getvalue()).decode("utf-8")
|
62
|
+
|
63
|
+
def create_image_content_block(image: Image.Image, media_type: str = "image/png") -> dict:
|
64
|
+
"""
|
65
|
+
Create a standard image content block for messages.
|
66
|
+
|
67
|
+
Args:
|
68
|
+
image: PIL.Image object
|
69
|
+
media_type: MIME type of the image
|
70
|
+
|
71
|
+
Returns:
|
72
|
+
Dictionary with image information
|
73
|
+
"""
|
74
|
+
image_base64 = encode_image_to_base64(image)
|
75
|
+
return {
|
76
|
+
"type": "image_base64",
|
77
|
+
"image_base64": {
|
78
|
+
"media_type": media_type,
|
79
|
+
"data": image_base64
|
80
|
+
}
|
81
|
+
}
|
@@ -0,0 +1,35 @@
|
|
1
|
+
"""
|
2
|
+
Logging configuration utilities
|
3
|
+
"""
|
4
|
+
import logging
|
5
|
+
import sys
|
6
|
+
|
7
|
+
def setup_logging(level=logging.INFO, log_file=None):
|
8
|
+
"""
|
9
|
+
Configure logging for the application.
|
10
|
+
|
11
|
+
Args:
|
12
|
+
level: Logging level (default: INFO)
|
13
|
+
log_file: Optional file path to write logs to
|
14
|
+
"""
|
15
|
+
# Create logger
|
16
|
+
logger = logging.getLogger('llm_dialog_manager')
|
17
|
+
logger.setLevel(level)
|
18
|
+
|
19
|
+
# Create formatter
|
20
|
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
21
|
+
|
22
|
+
# Create console handler
|
23
|
+
console_handler = logging.StreamHandler(sys.stdout)
|
24
|
+
console_handler.setLevel(level)
|
25
|
+
console_handler.setFormatter(formatter)
|
26
|
+
logger.addHandler(console_handler)
|
27
|
+
|
28
|
+
# Create file handler if log_file specified
|
29
|
+
if log_file:
|
30
|
+
file_handler = logging.FileHandler(log_file)
|
31
|
+
file_handler.setLevel(level)
|
32
|
+
file_handler.setFormatter(formatter)
|
33
|
+
logger.addHandler(file_handler)
|
34
|
+
|
35
|
+
return logger
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: llm_dialog_manager
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.5.3
|
4
4
|
Summary: A Python package for managing LLM chat conversation history
|
5
5
|
Author-email: xihajun <work@2333.fun>
|
6
6
|
License: MIT
|
@@ -103,7 +103,7 @@ XAI_API_KEY=your-x-key
|
|
103
103
|
from llm_dialog_manager import Agent
|
104
104
|
|
105
105
|
# Initialize an agent with a specific model
|
106
|
-
agent = Agent("
|
106
|
+
agent = Agent("ep-20250319212209-j6tfj-openai", memory_enabled=True)
|
107
107
|
|
108
108
|
# Add messages and generate responses
|
109
109
|
agent.add_message("system", "You are a helpful assistant")
|
@@ -0,0 +1,25 @@
|
|
1
|
+
llm_dialog_manager/__init__.py,sha256=T4hzIO_4IANFIcjmF0iEJ-_h7LR1_AT13kj5dc3hH-I,463
|
2
|
+
llm_dialog_manager/agent.py,sha256=Am2p9fClcHC75_Yjz6b1bof_KeUYmJocATQIQ-KKcr0,6472
|
3
|
+
llm_dialog_manager/chat_history.py,sha256=DKKRnj_M6h-4JncnH6KekMTghX7vMgdN3J9uOwXKzMU,10347
|
4
|
+
llm_dialog_manager/key_manager.py,sha256=shvxmn4zUtQx_p-x1EFyOmnk-WlhigbpKtxTKve-zXk,4421
|
5
|
+
llm_dialog_manager/clients/__init__.py,sha256=f8NFm-9bXUJUrFy37FxpJlyyCqVwHPu3VUtfNdKUVmE,1114
|
6
|
+
llm_dialog_manager/clients/anthropic_client.py,sha256=5Zqyosoy60JkWI0cjQAxJ9Uk4bcqHrVEXkauEfC8Egs,5404
|
7
|
+
llm_dialog_manager/clients/base.py,sha256=hSkuLLKrdyoKMCRpPav5qbsKLaJLQ9gtMDSxxdAHdII,2107
|
8
|
+
llm_dialog_manager/clients/gemini_client.py,sha256=AwIVJubdawrNa4DobCKE0KdtwuLU5XzmOtN_I-SzStY,2540
|
9
|
+
llm_dialog_manager/clients/openai_client.py,sha256=8glZbLaUQw2gftOgMeV0TN6aTJ--2LuRxn7wiCK8K4Q,3266
|
10
|
+
llm_dialog_manager/clients/x_client.py,sha256=rTaEU9NWTws2XIqfNQt1vysT_fdhWCMAGpfF1onMU6c,2688
|
11
|
+
llm_dialog_manager/formatters/__init__.py,sha256=LdvQOhkGbF5_iT7Lu6kTssXKRXtuQTq9LluK4CSplws,787
|
12
|
+
llm_dialog_manager/formatters/anthropic.py,sha256=0xFDooeEYq4DO1RWiGxbhuiL_DwtDdUNLEuiJyXbQnQ,3247
|
13
|
+
llm_dialog_manager/formatters/base.py,sha256=YBpnfR4Pc7-P4TbCVNe4Q6pOUq9mXwQQR1U9X1NSmwE,711
|
14
|
+
llm_dialog_manager/formatters/gemini.py,sha256=im-ZPGOkwDiWcgDFC2d0owKmpdKJ9wiLh5treoV2E-4,2425
|
15
|
+
llm_dialog_manager/formatters/openai.py,sha256=i6AAORyEkjRQwXAf9QkKBIxZGtD7a2k0CvcPbilSjBQ,2807
|
16
|
+
llm_dialog_manager/formatters/x.py,sha256=qxZdidngZUDp0DazYbolye9BnjsL8MMniLVdlaJ_uik,3248
|
17
|
+
llm_dialog_manager/utils/__init__.py,sha256=ta2BdLF2CSzlbk5ZwArzar4E7eip4reQjMAncvY7qhQ,52
|
18
|
+
llm_dialog_manager/utils/environment.py,sha256=8jRTe1_JL24Wz4SdX968XKx8vJMA4-NrZtHhXv-9PJM,2322
|
19
|
+
llm_dialog_manager/utils/image_tools.py,sha256=Q23gpY-JY0MIGOVWnLl-QS2IrpCMBiyzn2Zu0dWov1c,2110
|
20
|
+
llm_dialog_manager/utils/logging.py,sha256=38E_k3OSz7CNFg3ZsildR7IMtn3CzWro7IRY-Sj7dcc,989
|
21
|
+
llm_dialog_manager-0.5.3.dist-info/licenses/LICENSE,sha256=vWGbYgGuWpWrXL8-xi6pNcX5UzD6pWoIAZmcetyfbus,1064
|
22
|
+
llm_dialog_manager-0.5.3.dist-info/METADATA,sha256=sUFwt6A0CwO5hGGf6iAgctBJwm2T6-CVG2hhWHBU2hk,4236
|
23
|
+
llm_dialog_manager-0.5.3.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
24
|
+
llm_dialog_manager-0.5.3.dist-info/top_level.txt,sha256=u2EQEXW0NGAt0AAHT7jx1odXZ4rZfjcgbmJhvKFuMkI,19
|
25
|
+
llm_dialog_manager-0.5.3.dist-info/RECORD,,
|
@@ -1,9 +0,0 @@
|
|
1
|
-
llm_dialog_manager/__init__.py,sha256=uWTAXzyON6m5vqj_MS8_Pe7hga5TZ2YDhkMh0Z-FDew,86
|
2
|
-
llm_dialog_manager/agent.py,sha256=LGoX6erGcn9eaM4wRGfs0N5zS8Lop_57nPKzzBWrtk8,27871
|
3
|
-
llm_dialog_manager/chat_history.py,sha256=DKKRnj_M6h-4JncnH6KekMTghX7vMgdN3J9uOwXKzMU,10347
|
4
|
-
llm_dialog_manager/key_manager.py,sha256=shvxmn4zUtQx_p-x1EFyOmnk-WlhigbpKtxTKve-zXk,4421
|
5
|
-
llm_dialog_manager-0.4.7.dist-info/licenses/LICENSE,sha256=vWGbYgGuWpWrXL8-xi6pNcX5UzD6pWoIAZmcetyfbus,1064
|
6
|
-
llm_dialog_manager-0.4.7.dist-info/METADATA,sha256=mRNR-f-rRqDR_IFoFJe2oNcTVE27Ah6AN8ENGQwfzVg,4216
|
7
|
-
llm_dialog_manager-0.4.7.dist-info/WHEEL,sha256=1tXe9gY0PYatrMPMDd6jXqjfpz_B-Wqm32CPfRC58XU,91
|
8
|
-
llm_dialog_manager-0.4.7.dist-info/top_level.txt,sha256=u2EQEXW0NGAt0AAHT7jx1odXZ4rZfjcgbmJhvKFuMkI,19
|
9
|
-
llm_dialog_manager-0.4.7.dist-info/RECORD,,
|
File without changes
|
File without changes
|