llm-dialog-manager 0.5.0__py3-none-any.whl → 0.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,7 @@ A modular framework for building conversational AI applications with
5
5
  support for multiple LLM providers.
6
6
  """
7
7
 
8
- __version__ = "0.5.0"
8
+ __version__ = "0.5.3"
9
9
 
10
10
  from .agent import Agent
11
11
  from .chat_history import ChatHistory
@@ -0,0 +1,31 @@
1
+ """
2
+ LLM API client modules for different services.
3
+ """
4
+
5
+ from .base import BaseClient
6
+ from .anthropic_client import AnthropicClient
7
+ from .gemini_client import GeminiClient
8
+ from .openai_client import OpenAIClient
9
+ from .x_client import XClient
10
+
11
+ def get_client(model_name, api_key=None, base_url=None):
12
+ """Factory method to get the appropriate client for a given model.
13
+
14
+ Args:
15
+ model_name: Name of the model (e.g., "claude-3-opus", "gemini-1.5-pro")
16
+ api_key: Optional API key to use
17
+ base_url: Optional base URL to use
18
+
19
+ Returns:
20
+ An instance of the appropriate client class
21
+ """
22
+ if "-openai" in model_name:
23
+ return OpenAIClient(api_key=api_key, base_url=base_url)
24
+ if "claude" in model_name:
25
+ return AnthropicClient(api_key=api_key, base_url=base_url)
26
+ elif "gemini" in model_name:
27
+ return GeminiClient(api_key=api_key, base_url=base_url)
28
+ elif "grok" in model_name:
29
+ return XClient(api_key=api_key, base_url=base_url)
30
+ else: # Default to OpenAI client
31
+ return OpenAIClient(api_key=api_key, base_url=base_url)
@@ -0,0 +1,143 @@
1
+ """
2
+ Client implementation for Anthropic Claude models
3
+ """
4
+ import os
5
+ import logging
6
+ import httpx
7
+ from typing import List, Dict, Optional, Union
8
+
9
+ import anthropic
10
+ from anthropic import AnthropicVertex, AnthropicBedrock
11
+
12
+ from ..formatters import AnthropicFormatter
13
+ from .base import BaseClient
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ class AnthropicClient(BaseClient):
18
+ """Client for Anthropic Claude API"""
19
+
20
+ def _get_service_name(self) -> str:
21
+ return "anthropic"
22
+
23
+ def completion(self, messages, max_tokens=1000, temperature=0.5,
24
+ top_p=1.0, top_k=40, json_format=False, **kwargs):
25
+ """
26
+ Generate a completion using Anthropic Claude.
27
+
28
+ Args:
29
+ messages: List of message dictionaries
30
+ max_tokens: Maximum number of tokens to generate
31
+ temperature: Sampling temperature
32
+ top_p: Nucleus sampling parameter
33
+ top_k: Top-k sampling parameter
34
+ json_format: Whether to return JSON
35
+ **kwargs: Additional model-specific parameters
36
+
37
+ Returns:
38
+ Generated text response
39
+ """
40
+ try:
41
+ # Get API credentials if not set
42
+ self.get_credentials()
43
+
44
+ # Format messages for Anthropic API
45
+ formatter = AnthropicFormatter()
46
+ system_message, formatted_messages = formatter.format_messages(messages)
47
+
48
+ # Get the model name from kwargs or use default
49
+ model_name = kwargs.get("model", "claude-3-opus")
50
+
51
+ # Check for Vertex configuration
52
+ vertex_project_id = os.getenv('VERTEX_PROJECT_ID')
53
+ vertex_region = os.getenv('VERTEX_REGION')
54
+
55
+ # Check for AWS Bedrock configuration
56
+ aws_region = os.getenv('AWS_REGION', 'us-east-1')
57
+ aws_access_key = os.getenv('AWS_ACCESS_KEY_ID')
58
+ aws_secret_key = os.getenv('AWS_SECRET_ACCESS_KEY')
59
+ aws_session_token = os.getenv('AWS_SESSION_TOKEN')
60
+
61
+ # Get proxy configuration from environment or default to None
62
+ http_proxy = os.getenv("HTTP_PROXY")
63
+ https_proxy = os.getenv("HTTPS_PROXY")
64
+
65
+ # Determine if we should use Bedrock based on model name prefix
66
+ use_bedrock = "anthropic." in model_name
67
+
68
+ if use_bedrock:
69
+ logger.info(f"Using AWS Bedrock for model: {model_name}")
70
+ # Use AWS Bedrock for Claude
71
+ bedrock_kwargs = {
72
+ "aws_region": aws_region
73
+ }
74
+
75
+ # Only add credentials if explicitly provided
76
+ if aws_access_key and aws_secret_key:
77
+ bedrock_kwargs["aws_access_key"] = aws_access_key
78
+ bedrock_kwargs["aws_secret_key"] = aws_secret_key
79
+
80
+ client = AnthropicBedrock(**bedrock_kwargs)
81
+
82
+ response = client.messages.create(
83
+ model=model_name,
84
+ max_tokens=max_tokens,
85
+ temperature=temperature,
86
+ system=system_message,
87
+ messages=formatted_messages,
88
+ top_p=top_p,
89
+ top_k=top_k
90
+ )
91
+ elif vertex_project_id and vertex_region:
92
+ # Use Vertex AI for Claude
93
+ client = AnthropicVertex(
94
+ region=vertex_region,
95
+ project_id=vertex_project_id
96
+ )
97
+
98
+ response = client.messages.create(
99
+ model=model_name,
100
+ max_tokens=max_tokens,
101
+ temperature=temperature,
102
+ system=system_message,
103
+ messages=formatted_messages,
104
+ top_p=top_p,
105
+ top_k=top_k
106
+ )
107
+ else:
108
+ # Create httpx client with proxy settings if needed
109
+ http_options = {}
110
+ if http_proxy or https_proxy:
111
+ proxies = {}
112
+ if http_proxy:
113
+ proxies["http://"] = http_proxy
114
+ if https_proxy:
115
+ proxies["https://"] = https_proxy
116
+ http_options["proxies"] = proxies
117
+
118
+ # Use direct Anthropic API with proper http client
119
+ client = anthropic.Anthropic(
120
+ api_key=self.api_key,
121
+ base_url=self.base_url,
122
+ http_client=httpx.Client(**http_options) if http_options else None
123
+ )
124
+
125
+ response = client.messages.create(
126
+ model=model_name,
127
+ max_tokens=max_tokens,
128
+ temperature=temperature,
129
+ system=system_message,
130
+ messages=formatted_messages,
131
+ top_p=top_p,
132
+ top_k=top_k
133
+ )
134
+
135
+ # Release API credentials
136
+ self.release_credentials()
137
+
138
+ return response.content[0].text
139
+
140
+ except Exception as e:
141
+ logger.error(f"Anthropic API error: {e}")
142
+ self.report_error()
143
+ raise
@@ -0,0 +1,65 @@
1
+ """
2
+ Base client interface for LLM APIs
3
+ """
4
+ from abc import ABC, abstractmethod
5
+ from typing import List, Dict, Optional, Union
6
+ import logging
7
+
8
+ from ..key_manager import key_manager
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ class BaseClient(ABC):
13
+ """Base class for LLM API clients"""
14
+
15
+ def __init__(self, api_key=None, base_url=None):
16
+ """
17
+ Initialize client with optional API key and base URL.
18
+
19
+ Args:
20
+ api_key: Optional API key to use
21
+ base_url: Optional base URL for API requests
22
+ """
23
+ self.api_key = api_key
24
+ self.base_url = base_url
25
+ self.service_name = self._get_service_name()
26
+
27
+ @abstractmethod
28
+ def _get_service_name(self) -> str:
29
+ """Return the service name for this client (e.g., 'openai', 'anthropic')"""
30
+ pass
31
+
32
+ def get_credentials(self):
33
+ """Get API credentials from key manager if not set"""
34
+ if not self.api_key:
35
+ self.api_key, self.base_url = key_manager.get_config(self.service_name)
36
+
37
+ def release_credentials(self):
38
+ """Release API credentials in key manager"""
39
+ if self.api_key:
40
+ key_manager.release_config(self.service_name, self.api_key)
41
+
42
+ def report_error(self):
43
+ """Report API error to key manager"""
44
+ if self.api_key:
45
+ key_manager.report_error(self.service_name, self.api_key)
46
+
47
+ @abstractmethod
48
+ def completion(self, messages, max_tokens=1000, temperature=0.5,
49
+ top_p=1.0, top_k=40, json_format=False, **kwargs):
50
+ """
51
+ Generate a completion for the given messages.
52
+
53
+ Args:
54
+ messages: List of message dictionaries
55
+ max_tokens: Maximum number of tokens to generate
56
+ temperature: Sampling temperature
57
+ top_p: Nucleus sampling parameter
58
+ top_k: Top-k sampling parameter
59
+ json_format: Whether to return JSON
60
+ **kwargs: Additional model-specific parameters
61
+
62
+ Returns:
63
+ Generated text response
64
+ """
65
+ pass
@@ -0,0 +1,78 @@
1
+ """
2
+ Client implementation for Google Gemini models
3
+ """
4
+ import os
5
+ import logging
6
+ from typing import List, Dict, Optional, Union
7
+
8
+ import google.generativeai as genai
9
+
10
+ from ..formatters import GeminiFormatter
11
+ from .base import BaseClient
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ class GeminiClient(BaseClient):
16
+ """Client for Google Gemini API"""
17
+
18
+ def _get_service_name(self) -> str:
19
+ return "gemini"
20
+
21
+ def completion(self, messages, max_tokens=1000, temperature=0.5,
22
+ top_p=1.0, top_k=40, json_format=False, **kwargs):
23
+ """
24
+ Generate a completion using Google Gemini.
25
+
26
+ Args:
27
+ messages: List of message dictionaries
28
+ max_tokens: Maximum number of tokens to generate
29
+ temperature: Sampling temperature
30
+ top_p: Nucleus sampling parameter
31
+ top_k: Top-k sampling parameter
32
+ json_format: Whether to return JSON
33
+ **kwargs: Additional model-specific parameters
34
+
35
+ Returns:
36
+ Generated text response
37
+ """
38
+ try:
39
+ # Get API credentials if not set
40
+ self.get_credentials()
41
+
42
+ # Configure Google API
43
+ genai.configure(api_key=self.api_key)
44
+
45
+ # Format messages for Gemini API
46
+ formatter = GeminiFormatter()
47
+ system_message, formatted_messages = formatter.format_messages(messages)
48
+
49
+ # Create model configuration
50
+ model = genai.GenerativeModel(
51
+ model_name=kwargs.get("model", "gemini-1.5-pro"),
52
+ generation_config={
53
+ "max_output_tokens": max_tokens,
54
+ "temperature": temperature,
55
+ "top_p": top_p,
56
+ "top_k": top_k
57
+ },
58
+ system_instruction=system_message
59
+ )
60
+
61
+ # Generate response
62
+ if json_format:
63
+ response = model.generate_content(
64
+ formatted_messages,
65
+ generation_config={"response_mime_type": "application/json"}
66
+ )
67
+ else:
68
+ response = model.generate_content(formatted_messages)
69
+
70
+ # Release API credentials
71
+ self.release_credentials()
72
+
73
+ return response.text
74
+
75
+ except Exception as e:
76
+ logger.error(f"Gemini API error: {e}")
77
+ self.report_error()
78
+ raise
@@ -0,0 +1,97 @@
1
+ """
2
+ Client implementation for OpenAI models
3
+ """
4
+ import os
5
+ import logging
6
+ import httpx
7
+ from typing import List, Dict, Optional, Union
8
+
9
+ import openai
10
+
11
+ from ..formatters import OpenAIFormatter
12
+ from .base import BaseClient
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ class OpenAIClient(BaseClient):
17
+ """Client for OpenAI API"""
18
+
19
+ def _get_service_name(self) -> str:
20
+ return "openai"
21
+
22
+ def completion(self, messages, max_tokens=1000, temperature=0.5,
23
+ top_p=1.0, top_k=40, json_format=False, **kwargs):
24
+ """
25
+ Generate a completion using OpenAI API.
26
+
27
+ Args:
28
+ messages: List of message dictionaries
29
+ max_tokens: Maximum number of tokens to generate
30
+ temperature: Sampling temperature
31
+ top_p: Nucleus sampling parameter
32
+ top_k: Top-k sampling parameter (not used for OpenAI)
33
+ json_format: Whether to return JSON
34
+ **kwargs: Additional model-specific parameters
35
+
36
+ Returns:
37
+ Generated text response
38
+ """
39
+ try:
40
+ # Get API credentials if not set
41
+ self.get_credentials()
42
+
43
+ # Format messages for OpenAI API
44
+ formatter = OpenAIFormatter()
45
+ _, formatted_messages = formatter.format_messages(messages)
46
+
47
+ # Get proxy configuration from environment or default to None
48
+ http_proxy = os.getenv("HTTP_PROXY")
49
+ https_proxy = os.getenv("HTTPS_PROXY")
50
+
51
+ # Create httpx client with proxy settings if needed
52
+ http_options = {}
53
+ if http_proxy or https_proxy:
54
+ proxies = {}
55
+ if http_proxy:
56
+ proxies["http://"] = http_proxy
57
+ if https_proxy:
58
+ proxies["https://"] = https_proxy
59
+ http_options["proxies"] = proxies
60
+
61
+ # Create OpenAI client with proper configuration
62
+ client = openai.OpenAI(
63
+ api_key=self.api_key,
64
+ base_url=self.base_url,
65
+ http_client=httpx.Client(**http_options) if http_options else None
66
+ )
67
+
68
+ # Process model name
69
+ model = kwargs.get("model", "gpt-4")
70
+ if model.endswith("-openai"):
71
+ model = model[:-7] # Remove last 7 characters ("-openai")
72
+
73
+ # Create base parameters
74
+ params = {
75
+ "model": model,
76
+ "messages": formatted_messages,
77
+ "max_tokens": max_tokens,
78
+ "temperature": temperature,
79
+ "top_p": top_p
80
+ }
81
+
82
+ # Add optional parameters
83
+ if json_format:
84
+ params["response_format"] = {"type": "json_object"}
85
+
86
+ # Generate completion
87
+ response = client.chat.completions.create(**params)
88
+
89
+ # Release API credentials
90
+ self.release_credentials()
91
+
92
+ return response.choices[0].message.content
93
+
94
+ except Exception as e:
95
+ logger.error(f"OpenAI API error: {e}")
96
+ self.report_error()
97
+ raise
@@ -0,0 +1,83 @@
1
+ """
2
+ Client implementation for X.AI (Grok) models
3
+ """
4
+ import os
5
+ import logging
6
+ import openai
7
+ from typing import List, Dict, Optional, Union
8
+
9
+ from ..formatters import OpenAIFormatter
10
+ from .base import BaseClient
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ class XClient(BaseClient):
15
+ """Client for X.AI (Grok) API"""
16
+
17
+ def _get_service_name(self) -> str:
18
+ return "x"
19
+
20
+ def completion(self, messages, max_tokens=1000, temperature=0.5,
21
+ top_p=1.0, top_k=40, json_format=False, **kwargs):
22
+ """
23
+ Generate a completion using X.AI (Grok) API.
24
+
25
+ Args:
26
+ messages: List of message dictionaries
27
+ max_tokens: Maximum number of tokens to generate
28
+ temperature: Sampling temperature
29
+ top_p: Nucleus sampling parameter
30
+ top_k: Top-k sampling parameter
31
+ json_format: Whether to return JSON
32
+ **kwargs: Additional model-specific parameters
33
+
34
+ Returns:
35
+ Generated text response
36
+ """
37
+ try:
38
+ # Get API credentials if not set
39
+ self.get_credentials()
40
+
41
+ # Format messages for X.AI API using OpenAI formatter
42
+ # (X.AI uses the same message format as OpenAI)
43
+ formatter = OpenAIFormatter()
44
+ _, formatted_messages = formatter.format_messages(messages)
45
+
46
+ # Set default base URL if not already set
47
+ if not self.base_url:
48
+ self.base_url = "https://api.x.ai/v1"
49
+
50
+ # Initialize OpenAI client
51
+ client = openai.OpenAI(
52
+ api_key=self.api_key,
53
+ base_url=self.base_url
54
+ )
55
+
56
+ # Process model name
57
+ model = kwargs.get("model", "grok-3-beta")
58
+
59
+ # Create base parameters
60
+ params = {
61
+ "model": model,
62
+ "messages": formatted_messages,
63
+ "max_tokens": max_tokens,
64
+ "temperature": temperature,
65
+ "top_p": top_p,
66
+ }
67
+
68
+ # Add optional parameters
69
+ if json_format:
70
+ params["response_format"] = {"type": "json_object"}
71
+
72
+ # Generate completion using OpenAI client
73
+ response = client.chat.completions.create(**params)
74
+
75
+ # Release API credentials
76
+ self.release_credentials()
77
+
78
+ return response.choices[0].message.content
79
+
80
+ except Exception as e:
81
+ logger.error(f"X.AI API error: {e}")
82
+ self.report_error()
83
+ raise
@@ -0,0 +1,27 @@
1
+ """
2
+ Message formatters for different LLM services.
3
+ """
4
+
5
+ from .base import BaseMessageFormatter
6
+ from .anthropic import AnthropicFormatter
7
+ from .gemini import GeminiFormatter
8
+ from .openai import OpenAIFormatter
9
+ from .x import XFormatter
10
+
11
+ def get_formatter(model_name):
12
+ """Factory method to get the appropriate formatter for a given model.
13
+
14
+ Args:
15
+ model_name: Name of the model (e.g., "claude-3-opus", "gemini-1.5-pro")
16
+
17
+ Returns:
18
+ An instance of the appropriate formatter class
19
+ """
20
+ if "claude" in model_name:
21
+ return AnthropicFormatter()
22
+ elif "gemini" in model_name:
23
+ return GeminiFormatter()
24
+ elif "grok" in model_name:
25
+ return XFormatter()
26
+ else: # Default to OpenAI formatter
27
+ return OpenAIFormatter()
@@ -0,0 +1,76 @@
1
+ """
2
+ Message formatter for Anthropic Claude models
3
+ """
4
+ import io
5
+ import base64
6
+ from typing import List, Dict, Union, Optional
7
+ from PIL import Image
8
+
9
+ from .base import BaseMessageFormatter
10
+
11
+ class AnthropicFormatter(BaseMessageFormatter):
12
+ """Formatter for Anthropic Claude API messages"""
13
+
14
+ def format_messages(self, messages: List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]]) -> tuple:
15
+ """
16
+ Format messages for the Anthropic Claude API.
17
+
18
+ Args:
19
+ messages: List of message dictionaries in standard format
20
+
21
+ Returns:
22
+ A tuple containing (system_message, formatted_messages)
23
+ where system_message is extracted as a separate string
24
+ """
25
+ formatted = []
26
+ system_msg = ""
27
+
28
+ # Extract system message if present
29
+ if messages and messages[0]["role"] == "system":
30
+ system_msg = messages[0]["content"]
31
+ messages = messages[1:]
32
+
33
+ for msg in messages:
34
+ content = msg["content"]
35
+ if isinstance(content, str):
36
+ formatted.append({"role": msg["role"], "content": content})
37
+ elif isinstance(content, list):
38
+ # Combine content blocks into a single message
39
+ combined_content = []
40
+ for block in content:
41
+ if isinstance(block, str):
42
+ combined_content.append({"type": "text", "text": block})
43
+ elif isinstance(block, Image.Image):
44
+ # For Claude, convert PIL.Image to base64
45
+ buffered = io.BytesIO()
46
+ block.save(buffered, format="PNG")
47
+ image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
48
+ combined_content.append({
49
+ "type": "image",
50
+ "source": {
51
+ "type": "base64",
52
+ "media_type": "image/png",
53
+ "data": image_base64
54
+ }
55
+ })
56
+ elif isinstance(block, dict):
57
+ if block.get("type") == "image_url":
58
+ combined_content.append({
59
+ "type": "image",
60
+ "source": {
61
+ "type": "url",
62
+ "url": block["image_url"]["url"]
63
+ }
64
+ })
65
+ elif block.get("type") == "image_base64":
66
+ combined_content.append({
67
+ "type": "image",
68
+ "source": {
69
+ "type": "base64",
70
+ "media_type": block["image_base64"]["media_type"],
71
+ "data": block["image_base64"]["data"]
72
+ }
73
+ })
74
+ formatted.append({"role": msg["role"], "content": combined_content})
75
+
76
+ return system_msg, formatted
@@ -0,0 +1,23 @@
1
+ """
2
+ Base message formatter interface
3
+ """
4
+ from abc import ABC, abstractmethod
5
+ from typing import List, Dict, Union, Optional
6
+ from PIL import Image
7
+
8
+ class BaseMessageFormatter(ABC):
9
+ """Base class for message formatters"""
10
+
11
+ @abstractmethod
12
+ def format_messages(self, messages: List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]]) -> tuple:
13
+ """
14
+ Format messages for the specific LLM API.
15
+
16
+ Args:
17
+ messages: List of message dictionaries in standard format
18
+
19
+ Returns:
20
+ A tuple containing (system_message, formatted_messages)
21
+ where system_message can be None if not used by the API
22
+ """
23
+ pass
@@ -0,0 +1,59 @@
1
+ """
2
+ Message formatter for Google Gemini models
3
+ """
4
+ from typing import List, Dict, Union, Optional
5
+ from PIL import Image
6
+
7
+ from .base import BaseMessageFormatter
8
+
9
+ class GeminiFormatter(BaseMessageFormatter):
10
+ """Formatter for Google Gemini API messages"""
11
+
12
+ def format_messages(self, messages: List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]]) -> tuple:
13
+ """
14
+ Format messages for the Google Gemini API.
15
+
16
+ Args:
17
+ messages: List of message dictionaries in standard format
18
+
19
+ Returns:
20
+ A tuple containing (system_message, formatted_messages)
21
+ where system_message is extracted separately
22
+ """
23
+ system_msg = None
24
+ formatted = []
25
+
26
+ for msg in messages:
27
+ # Extract system message if present
28
+ if msg["role"] == "system":
29
+ system_msg = msg["content"] if isinstance(msg["content"], str) else str(msg["content"])
30
+ continue
31
+
32
+ content = msg["content"]
33
+ if isinstance(content, str):
34
+ formatted.append({"role": msg["role"], "parts": [content]})
35
+ elif isinstance(content, list):
36
+ parts = []
37
+ for block in content:
38
+ if isinstance(block, str):
39
+ parts.append(block)
40
+ elif isinstance(block, Image.Image):
41
+ parts.append(block) # Gemini supports PIL.Image directly
42
+ elif isinstance(block, dict):
43
+ if block.get("type") == "image_url":
44
+ parts.append({
45
+ "inline_data": {
46
+ "mime_type": "image/jpeg",
47
+ "data": block["image_url"]["url"]
48
+ }
49
+ })
50
+ elif block.get("type") == "image_base64":
51
+ parts.append({
52
+ "inline_data": {
53
+ "mime_type": block["image_base64"]["media_type"],
54
+ "data": block["image_base64"]["data"]
55
+ }
56
+ })
57
+ formatted.append({"role": msg["role"], "parts": parts})
58
+
59
+ return system_msg, formatted
@@ -0,0 +1,67 @@
1
+ """
2
+ Message formatter for OpenAI models
3
+ """
4
+ import io
5
+ import base64
6
+ from typing import List, Dict, Union, Optional
7
+ from PIL import Image
8
+
9
+ from .base import BaseMessageFormatter
10
+
11
+ class OpenAIFormatter(BaseMessageFormatter):
12
+ """Formatter for OpenAI API messages"""
13
+
14
+ def format_messages(self, messages: List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]]) -> tuple:
15
+ """
16
+ Format messages for the OpenAI API.
17
+
18
+ Args:
19
+ messages: List of message dictionaries in standard format
20
+
21
+ Returns:
22
+ A tuple containing (None, formatted_messages)
23
+ since OpenAI handles system messages in the message list
24
+ """
25
+ formatted = []
26
+
27
+ for msg in messages:
28
+ content = msg["content"]
29
+ if isinstance(content, str):
30
+ formatted.append({"role": msg["role"], "content": content})
31
+ elif isinstance(content, list):
32
+ # For OpenAI with multimodal models like GPT-4V
33
+ formatted_content = []
34
+
35
+ for block in content:
36
+ if isinstance(block, str):
37
+ formatted_content.append({"type": "text", "text": block})
38
+ elif isinstance(block, Image.Image):
39
+ # Convert PIL.Image to base64
40
+ buffered = io.BytesIO()
41
+ block.save(buffered, format="PNG")
42
+ image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
43
+ formatted_content.append({
44
+ "type": "image_url",
45
+ "image_url": {
46
+ "url": f"data:image/png;base64,{image_base64}"
47
+ }
48
+ })
49
+ elif isinstance(block, dict):
50
+ if block.get("type") == "image_url":
51
+ formatted_content.append({
52
+ "type": "image_url",
53
+ "image_url": {
54
+ "url": block["image_url"]["url"]
55
+ }
56
+ })
57
+ elif block.get("type") == "image_base64":
58
+ formatted_content.append({
59
+ "type": "image_url",
60
+ "image_url": {
61
+ "url": f"data:{block['image_base64']['media_type']};base64,{block['image_base64']['data']}"
62
+ }
63
+ })
64
+
65
+ formatted.append({"role": msg["role"], "content": formatted_content})
66
+
67
+ return None, formatted
@@ -0,0 +1,77 @@
1
+ """
2
+ Message formatter for X.AI (Grok) models
3
+ """
4
+ from typing import List, Dict, Union, Optional
5
+ from PIL import Image
6
+ import io
7
+ import base64
8
+
9
+ from .base import BaseMessageFormatter
10
+
11
+ class XFormatter(BaseMessageFormatter):
12
+ """Formatter for X.AI (Grok) API messages"""
13
+
14
+ def format_messages(self, messages: List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]]) -> tuple:
15
+ """
16
+ Format messages for the X.AI (Grok) API.
17
+
18
+ Args:
19
+ messages: List of message dictionaries in standard format
20
+
21
+ Returns:
22
+ A tuple containing (system_message, formatted_messages)
23
+ """
24
+ system_msg = None
25
+ formatted = []
26
+
27
+ for msg in messages:
28
+ # Extract system message if present, similar to many other APIs
29
+ if msg["role"] == "system":
30
+ system_msg = msg["content"] if isinstance(msg["content"], str) else str(msg["content"])
31
+ continue
32
+
33
+ content = msg["content"]
34
+ if isinstance(content, str):
35
+ formatted.append({"role": msg["role"], "content": content})
36
+ elif isinstance(content, list):
37
+ # Grok API format may need adjustments as it evolves
38
+ combined_content = []
39
+
40
+ for block in content:
41
+ if isinstance(block, str):
42
+ combined_content.append({"type": "text", "text": block})
43
+ elif isinstance(block, Image.Image):
44
+ # Convert PIL.Image to base64
45
+ buffered = io.BytesIO()
46
+ block.save(buffered, format="PNG")
47
+ image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
48
+ combined_content.append({
49
+ "type": "image",
50
+ "image": {
51
+ "type": "base64",
52
+ "media_type": "image/png",
53
+ "data": image_base64
54
+ }
55
+ })
56
+ elif isinstance(block, dict):
57
+ if block.get("type") == "image_url":
58
+ combined_content.append({
59
+ "type": "image",
60
+ "image": {
61
+ "type": "url",
62
+ "url": block["image_url"]["url"]
63
+ }
64
+ })
65
+ elif block.get("type") == "image_base64":
66
+ combined_content.append({
67
+ "type": "image",
68
+ "image": {
69
+ "type": "base64",
70
+ "media_type": block["image_base64"]["media_type"],
71
+ "data": block["image_base64"]["data"]
72
+ }
73
+ })
74
+
75
+ formatted.append({"role": msg["role"], "content": combined_content})
76
+
77
+ return system_msg, formatted
@@ -0,0 +1,3 @@
1
+ """
2
+ Utility modules for the LLM dialog manager.
3
+ """
@@ -0,0 +1,66 @@
1
+ """
2
+ Environment utilities for LLM Dialog Manager
3
+ """
4
+ import os
5
+ import logging
6
+ from pathlib import Path
7
+ from dotenv import load_dotenv
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ def load_env_vars(env_path=None):
12
+ """
13
+ Load environment variables from .env file.
14
+
15
+ Args:
16
+ env_path: Optional path to .env file
17
+
18
+ Returns:
19
+ True if env vars were loaded, False otherwise
20
+ """
21
+ try:
22
+ # Default to .env in the current directory or parent directory
23
+ if not env_path:
24
+ if os.path.exists(".env"):
25
+ env_path = ".env"
26
+ elif os.path.exists("../.env"):
27
+ env_path = "../.env"
28
+ else:
29
+ # Try to find .env in parent directories
30
+ current_dir = Path.cwd()
31
+ for parent in current_dir.parents:
32
+ potential_path = parent / ".env"
33
+ if potential_path.exists():
34
+ env_path = str(potential_path)
35
+ break
36
+
37
+ if env_path and os.path.exists(env_path):
38
+ load_dotenv(env_path)
39
+ logger.info(f"Loaded environment variables from {env_path}")
40
+
41
+ # Log detected providers without showing sensitive data
42
+ providers = []
43
+ if os.getenv("OPENAI_API_KEY"):
44
+ providers.append("OpenAI")
45
+ if os.getenv("ANTHROPIC_API_KEY"):
46
+ providers.append("Anthropic")
47
+ if os.getenv("AWS_ACCESS_KEY_ID") and os.getenv("AWS_SECRET_ACCESS_KEY"):
48
+ providers.append("AWS Bedrock (for anthropic.* models)")
49
+ if os.getenv("VERTEX_PROJECT_ID") and os.getenv("VERTEX_REGION"):
50
+ providers.append("Anthropic Vertex")
51
+ if os.getenv("GEMINI_API_KEY"):
52
+ providers.append("Google Gemini")
53
+ if os.getenv("XAI_API_KEY"):
54
+ providers.append("X.AI Grok")
55
+
56
+ if providers:
57
+ logger.info(f"Detected LLM providers: {', '.join(providers)}")
58
+
59
+ return True
60
+ else:
61
+ logger.warning(f"Environment file not found: {env_path}")
62
+ return False
63
+
64
+ except Exception as e:
65
+ logger.error(f"Error loading environment variables: {e}")
66
+ return False
@@ -0,0 +1,81 @@
1
+ """
2
+ Image handling utilities for LLM dialog manager
3
+ """
4
+ import base64
5
+ import io
6
+ from typing import Optional
7
+ import requests
8
+ from PIL import Image
9
+
10
+ def load_image_from_path(image_path: str) -> Image.Image:
11
+ """
12
+ Load an image from a local file path.
13
+
14
+ Args:
15
+ image_path: Path to the local image file
16
+
17
+ Returns:
18
+ PIL.Image object
19
+ """
20
+ try:
21
+ image = Image.open(image_path)
22
+ # Store the filename for reference
23
+ image.filename = image_path
24
+ return image
25
+ except Exception as e:
26
+ raise ValueError(f"Failed to load image from {image_path}: {e}")
27
+
28
+ def load_image_from_url(image_url: str) -> Image.Image:
29
+ """
30
+ Load an image from a URL.
31
+
32
+ Args:
33
+ image_url: URL to the image
34
+
35
+ Returns:
36
+ PIL.Image object
37
+ """
38
+ try:
39
+ response = requests.get(image_url, stream=True)
40
+ response.raise_for_status()
41
+ image = Image.open(io.BytesIO(response.content))
42
+ # Store the URL for reference
43
+ image.filename = image_url
44
+ return image
45
+ except Exception as e:
46
+ raise ValueError(f"Failed to load image from {image_url}: {e}")
47
+
48
+ def encode_image_to_base64(image: Image.Image, format: str = "PNG") -> str:
49
+ """
50
+ Convert a PIL.Image to base64 string.
51
+
52
+ Args:
53
+ image: PIL.Image object
54
+ format: Image format (default: PNG)
55
+
56
+ Returns:
57
+ Base64-encoded string
58
+ """
59
+ buffered = io.BytesIO()
60
+ image.save(buffered, format=format)
61
+ return base64.b64encode(buffered.getvalue()).decode("utf-8")
62
+
63
+ def create_image_content_block(image: Image.Image, media_type: str = "image/png") -> dict:
64
+ """
65
+ Create a standard image content block for messages.
66
+
67
+ Args:
68
+ image: PIL.Image object
69
+ media_type: MIME type of the image
70
+
71
+ Returns:
72
+ Dictionary with image information
73
+ """
74
+ image_base64 = encode_image_to_base64(image)
75
+ return {
76
+ "type": "image_base64",
77
+ "image_base64": {
78
+ "media_type": media_type,
79
+ "data": image_base64
80
+ }
81
+ }
@@ -0,0 +1,35 @@
1
+ """
2
+ Logging configuration utilities
3
+ """
4
+ import logging
5
+ import sys
6
+
7
+ def setup_logging(level=logging.INFO, log_file=None):
8
+ """
9
+ Configure logging for the application.
10
+
11
+ Args:
12
+ level: Logging level (default: INFO)
13
+ log_file: Optional file path to write logs to
14
+ """
15
+ # Create logger
16
+ logger = logging.getLogger('llm_dialog_manager')
17
+ logger.setLevel(level)
18
+
19
+ # Create formatter
20
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
21
+
22
+ # Create console handler
23
+ console_handler = logging.StreamHandler(sys.stdout)
24
+ console_handler.setLevel(level)
25
+ console_handler.setFormatter(formatter)
26
+ logger.addHandler(console_handler)
27
+
28
+ # Create file handler if log_file specified
29
+ if log_file:
30
+ file_handler = logging.FileHandler(log_file)
31
+ file_handler.setLevel(level)
32
+ file_handler.setFormatter(formatter)
33
+ logger.addHandler(file_handler)
34
+
35
+ return logger
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llm_dialog_manager
3
- Version: 0.5.0
3
+ Version: 0.5.3
4
4
  Summary: A Python package for managing LLM chat conversation history
5
5
  Author-email: xihajun <work@2333.fun>
6
6
  License: MIT
@@ -0,0 +1,25 @@
1
+ llm_dialog_manager/__init__.py,sha256=T4hzIO_4IANFIcjmF0iEJ-_h7LR1_AT13kj5dc3hH-I,463
2
+ llm_dialog_manager/agent.py,sha256=Am2p9fClcHC75_Yjz6b1bof_KeUYmJocATQIQ-KKcr0,6472
3
+ llm_dialog_manager/chat_history.py,sha256=DKKRnj_M6h-4JncnH6KekMTghX7vMgdN3J9uOwXKzMU,10347
4
+ llm_dialog_manager/key_manager.py,sha256=shvxmn4zUtQx_p-x1EFyOmnk-WlhigbpKtxTKve-zXk,4421
5
+ llm_dialog_manager/clients/__init__.py,sha256=f8NFm-9bXUJUrFy37FxpJlyyCqVwHPu3VUtfNdKUVmE,1114
6
+ llm_dialog_manager/clients/anthropic_client.py,sha256=5Zqyosoy60JkWI0cjQAxJ9Uk4bcqHrVEXkauEfC8Egs,5404
7
+ llm_dialog_manager/clients/base.py,sha256=hSkuLLKrdyoKMCRpPav5qbsKLaJLQ9gtMDSxxdAHdII,2107
8
+ llm_dialog_manager/clients/gemini_client.py,sha256=AwIVJubdawrNa4DobCKE0KdtwuLU5XzmOtN_I-SzStY,2540
9
+ llm_dialog_manager/clients/openai_client.py,sha256=8glZbLaUQw2gftOgMeV0TN6aTJ--2LuRxn7wiCK8K4Q,3266
10
+ llm_dialog_manager/clients/x_client.py,sha256=rTaEU9NWTws2XIqfNQt1vysT_fdhWCMAGpfF1onMU6c,2688
11
+ llm_dialog_manager/formatters/__init__.py,sha256=LdvQOhkGbF5_iT7Lu6kTssXKRXtuQTq9LluK4CSplws,787
12
+ llm_dialog_manager/formatters/anthropic.py,sha256=0xFDooeEYq4DO1RWiGxbhuiL_DwtDdUNLEuiJyXbQnQ,3247
13
+ llm_dialog_manager/formatters/base.py,sha256=YBpnfR4Pc7-P4TbCVNe4Q6pOUq9mXwQQR1U9X1NSmwE,711
14
+ llm_dialog_manager/formatters/gemini.py,sha256=im-ZPGOkwDiWcgDFC2d0owKmpdKJ9wiLh5treoV2E-4,2425
15
+ llm_dialog_manager/formatters/openai.py,sha256=i6AAORyEkjRQwXAf9QkKBIxZGtD7a2k0CvcPbilSjBQ,2807
16
+ llm_dialog_manager/formatters/x.py,sha256=qxZdidngZUDp0DazYbolye9BnjsL8MMniLVdlaJ_uik,3248
17
+ llm_dialog_manager/utils/__init__.py,sha256=ta2BdLF2CSzlbk5ZwArzar4E7eip4reQjMAncvY7qhQ,52
18
+ llm_dialog_manager/utils/environment.py,sha256=8jRTe1_JL24Wz4SdX968XKx8vJMA4-NrZtHhXv-9PJM,2322
19
+ llm_dialog_manager/utils/image_tools.py,sha256=Q23gpY-JY0MIGOVWnLl-QS2IrpCMBiyzn2Zu0dWov1c,2110
20
+ llm_dialog_manager/utils/logging.py,sha256=38E_k3OSz7CNFg3ZsildR7IMtn3CzWro7IRY-Sj7dcc,989
21
+ llm_dialog_manager-0.5.3.dist-info/licenses/LICENSE,sha256=vWGbYgGuWpWrXL8-xi6pNcX5UzD6pWoIAZmcetyfbus,1064
22
+ llm_dialog_manager-0.5.3.dist-info/METADATA,sha256=sUFwt6A0CwO5hGGf6iAgctBJwm2T6-CVG2hhWHBU2hk,4236
23
+ llm_dialog_manager-0.5.3.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
24
+ llm_dialog_manager-0.5.3.dist-info/top_level.txt,sha256=u2EQEXW0NGAt0AAHT7jx1odXZ4rZfjcgbmJhvKFuMkI,19
25
+ llm_dialog_manager-0.5.3.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- llm_dialog_manager/__init__.py,sha256=npNlH7E4TPUhiX5WNGa-OGwXx38OJ3ofZ6lh5f399Kk,463
2
- llm_dialog_manager/agent.py,sha256=Am2p9fClcHC75_Yjz6b1bof_KeUYmJocATQIQ-KKcr0,6472
3
- llm_dialog_manager/chat_history.py,sha256=DKKRnj_M6h-4JncnH6KekMTghX7vMgdN3J9uOwXKzMU,10347
4
- llm_dialog_manager/key_manager.py,sha256=shvxmn4zUtQx_p-x1EFyOmnk-WlhigbpKtxTKve-zXk,4421
5
- llm_dialog_manager-0.5.0.dist-info/licenses/LICENSE,sha256=vWGbYgGuWpWrXL8-xi6pNcX5UzD6pWoIAZmcetyfbus,1064
6
- llm_dialog_manager-0.5.0.dist-info/METADATA,sha256=L-bvieVTyqHeCtxtKeQtHUZ-5vwT8_4XAVsAorNLxLA,4236
7
- llm_dialog_manager-0.5.0.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
8
- llm_dialog_manager-0.5.0.dist-info/top_level.txt,sha256=u2EQEXW0NGAt0AAHT7jx1odXZ4rZfjcgbmJhvKFuMkI,19
9
- llm_dialog_manager-0.5.0.dist-info/RECORD,,