llm-dialog-manager 0.3.5__tar.gz → 0.4.2__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (18) hide show
  1. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/PKG-INFO +2 -1
  2. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/README.md +1 -0
  3. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/llm_dialog_manager/__init__.py +1 -1
  4. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/llm_dialog_manager/agent.py +68 -32
  5. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/PKG-INFO +2 -1
  6. llm_dialog_manager-0.4.2/pyproject.toml +32 -0
  7. llm_dialog_manager-0.3.5/pyproject.toml +0 -64
  8. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/LICENSE +0 -0
  9. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/llm_dialog_manager/chat_history.py +0 -0
  10. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/llm_dialog_manager/key_manager.py +0 -0
  11. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/SOURCES.txt +0 -0
  12. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/dependency_links.txt +0 -0
  13. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/requires.txt +0 -0
  14. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/llm_dialog_manager.egg-info/top_level.txt +0 -0
  15. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/setup.cfg +0 -0
  16. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/tests/test_agent.py +0 -0
  17. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/tests/test_chat_history.py +0 -0
  18. {llm_dialog_manager-0.3.5 → llm_dialog_manager-0.4.2}/tests/test_key_manager.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: llm_dialog_manager
3
- Version: 0.3.5
3
+ Version: 0.4.2
4
4
  Summary: A Python package for managing LLM chat conversation history
5
5
  Author-email: xihajun <work@2333.fun>
6
6
  License: MIT
@@ -64,6 +64,7 @@ A Python package for managing AI chat conversation history with support for mult
64
64
  - Memory management options
65
65
  - Conversation search and indexing
66
66
  - Rich conversation display options
67
+ - Vision & Json Output enabled [20240111]
67
68
 
68
69
  ## Installation
69
70
 
@@ -17,6 +17,7 @@ A Python package for managing AI chat conversation history with support for mult
17
17
  - Memory management options
18
18
  - Conversation search and indexing
19
19
  - Rich conversation display options
20
+ - Vision & Json Output enabled [20240111]
20
21
 
21
22
  ## Installation
22
23
 
@@ -1,4 +1,4 @@
1
1
  from .chat_history import ChatHistory
2
2
  from .agent import Agent
3
3
 
4
- __version__ = "0.3.5"
4
+ __version__ = "0.4.2"
@@ -38,15 +38,46 @@ def load_env_vars():
38
38
 
39
39
  load_env_vars()
40
40
 
41
+ def format_messages_for_gemini(messages):
42
+ """
43
+ 将标准化的消息格式转化为 Gemini 格式。
44
+ system 消息应该通过 GenerativeModel 的 system_instruction 参数传入,
45
+ 不在这个函数处理。
46
+ """
47
+ gemini_messages = []
48
+
49
+ for msg in messages:
50
+ role = msg["role"]
51
+ content = msg["content"]
52
+
53
+ # 跳过 system 消息,因为它会通过 system_instruction 设置
54
+ if role == "system":
55
+ continue
56
+
57
+ # 处理 user/assistant 消息
58
+ # 如果 content 是单一对象,转换为列表
59
+ if not isinstance(content, list):
60
+ content = [content]
61
+
62
+ gemini_messages.append({
63
+ "role": role,
64
+ "parts": content # content 可以包含文本和 FileMedia
65
+ })
66
+
67
+ return gemini_messages
68
+
41
69
  def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]], max_tokens: int = 1000,
42
- temperature: float = 0.5, api_key: Optional[str] = None,
70
+ temperature: float = 0.5, top_p: float = 1.0, top_k: int = 40, api_key: Optional[str] = None,
43
71
  base_url: Optional[str] = None, json_format: bool = False) -> str:
44
72
  """
45
73
  Generate a completion using the specified model and messages.
46
74
  """
47
75
  try:
48
76
  service = ""
49
- if "claude" in model:
77
+ if "openai" in model:
78
+ service = "openai"
79
+ model
80
+ elif "claude" in model:
50
81
  service = "anthropic"
51
82
  elif "gemini" in model:
52
83
  service = "gemini"
@@ -64,7 +95,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
64
95
 
65
96
  def format_messages_for_api(model, messages):
66
97
  """Convert ChatHistory messages to the format required by the specific API."""
67
- if "claude" in model:
98
+ if "claude" in model and "openai" not in model:
68
99
  formatted = []
69
100
  system_msg = ""
70
101
  if messages and messages[0]["role"] == "system":
@@ -113,7 +144,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
113
144
  formatted.append({"role": msg["role"], "content": combined_content})
114
145
  return system_msg, formatted
115
146
 
116
- elif "gemini" in model or "gpt" in model or "grok" in model:
147
+ elif ("gemini" in model or "gpt" in model or "grok" in model) and "openai" not in model:
117
148
  formatted = []
118
149
  for msg in messages:
119
150
  content = msg["content"]
@@ -163,7 +194,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
163
194
 
164
195
  system_msg, formatted_messages = format_messages_for_api(model, messages.copy())
165
196
 
166
- if "claude" in model:
197
+ if "claude" in model and "openai" not in model:
167
198
  # Check for Vertex configuration
168
199
  vertex_project_id = os.getenv('VERTEX_PROJECT_ID')
169
200
  vertex_region = os.getenv('VERTEX_REGION')
@@ -204,7 +235,7 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
204
235
 
205
236
  return response.completion
206
237
 
207
- elif "gemini" in model:
238
+ elif "gemini" in model and "openai" not in model:
208
239
  try:
209
240
  # First try OpenAI-style API
210
241
  client = openai.OpenAI(
@@ -216,6 +247,9 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
216
247
 
217
248
  response = client.chat.completions.create(
218
249
  model=model,
250
+ max_tokens=max_tokens,
251
+ top_p=top_p,
252
+ top_k=top_k,
219
253
  messages=formatted_messages,
220
254
  temperature=temperature,
221
255
  response_format=response_format # Added response_format
@@ -226,36 +260,34 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
226
260
  # If OpenAI-style API fails, fall back to Google's genai library
227
261
  logger.info("Falling back to Google's genai library")
228
262
  genai.configure(api_key=api_key)
229
-
230
- # Convert messages to Gemini format
231
- gemini_messages = []
263
+ system_instruction = ""
232
264
  for msg in messages:
233
265
  if msg["role"] == "system":
234
- # Prepend system message to first user message if exists
235
- if gemini_messages:
236
- first_msg = gemini_messages[0]
237
- if "parts" in first_msg and len(first_msg["parts"]) > 0:
238
- first_msg["parts"][0] = f"{msg['content']}\n\n{first_msg['parts'][0]}"
239
- else:
240
- gemini_messages.append({"role": msg["role"], "parts": msg["content"]})
241
-
242
- # Set response_mime_type based on json_format
266
+ system_instruction = msg["content"]
267
+ break
268
+
269
+ # 将其他消息转换为 gemini 格式
270
+ gemini_messages = format_messages_for_gemini(messages)
243
271
  mime_type = "application/json" if json_format else "text/plain"
272
+ generation_config = genai.types.GenerationConfig(
273
+ temperature=temperature,
274
+ top_p=top_p,
275
+ top_k=top_k,
276
+ max_output_tokens=max_tokens,
277
+ response_mime_type=mime_type
278
+ )
244
279
 
245
- # Create Gemini model and generate response
246
- model_instance = genai.GenerativeModel(model_name=model)
247
- response = model_instance.generate_content(
248
- gemini_messages,
249
- generation_config=genai.types.GenerationConfig(
250
- temperature=temperature,
251
- response_mime_type=mime_type, # Modified based on json_format
252
- max_output_tokens=max_tokens
253
- )
280
+ model_instance = genai.GenerativeModel(
281
+ model_name=model,
282
+ system_instruction=system_instruction, # system 消息通过这里传入
283
+ generation_config=generation_config
254
284
  )
255
285
 
286
+ response = model_instance.generate_content(gemini_messages, generation_config=generation_config)
287
+
256
288
  return response.text
257
289
 
258
- elif "grok" in model:
290
+ elif "grok" in model and "openai" not in model:
259
291
  # Randomly choose between OpenAI and Anthropic SDK
260
292
  use_anthropic = random.choice([True, False])
261
293
 
@@ -297,6 +329,8 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
297
329
  return response.choices[0].message.content
298
330
 
299
331
  else: # OpenAI models
332
+ if model.endswith("-openai"):
333
+ model = model[:-7] # Remove last 7 characters ("-openai")
300
334
  client = openai.OpenAI(api_key=api_key, base_url=base_url)
301
335
  # Set response_format based on json_format
302
336
  response_format = {"type": "json_object"} if json_format else {"type": "plain_text"}
@@ -395,7 +429,7 @@ class Agent:
395
429
  # Start a new user message with the image
396
430
  self.history.add_message([image_block], "user")
397
431
 
398
- def generate_response(self, max_tokens=3585, temperature=0.7, json_format: bool = False) -> str:
432
+ def generate_response(self, max_tokens=3585, temperature=0.7, top_p=1.0, top_k=40, json_format: bool = False) -> str:
399
433
  """Generate a response from the agent.
400
434
 
401
435
  Args:
@@ -410,12 +444,14 @@ class Agent:
410
444
  raise ValueError("No messages in history to generate response from")
411
445
 
412
446
  messages = self.history.messages
413
-
447
+ print(self.model_name)
414
448
  response_text = completion(
415
449
  model=self.model_name,
416
450
  messages=messages,
417
451
  max_tokens=max_tokens,
418
452
  temperature=temperature,
453
+ top_p=top_p,
454
+ top_k=top_k,
419
455
  api_key=self.api_key,
420
456
  json_format=json_format # Pass json_format to completion
421
457
  )
@@ -486,13 +522,13 @@ class Agent:
486
522
  if __name__ == "__main__":
487
523
  # Example Usage
488
524
  # Create an Agent instance (Gemini model)
489
- agent = Agent("gemini-1.5-flash", "you are an assistant", memory_enabled=True)
525
+ agent = Agent("gemini-1.5-flash", "you are Jack101", memory_enabled=True)
490
526
 
491
527
  # Add an image
492
528
  agent.add_image(image_path="/Users/junfan/Projects/Personal/oneapi/dialog_manager/example.png")
493
529
 
494
530
  # Add a user message
495
- agent.add_message("user", "What's in this image?")
531
+ agent.add_message("user", "Who are you? What's in this image?")
496
532
 
497
533
  # Generate response with JSON format enabled
498
534
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: llm_dialog_manager
3
- Version: 0.3.5
3
+ Version: 0.4.2
4
4
  Summary: A Python package for managing LLM chat conversation history
5
5
  Author-email: xihajun <work@2333.fun>
6
6
  License: MIT
@@ -64,6 +64,7 @@ A Python package for managing AI chat conversation history with support for mult
64
64
  - Memory management options
65
65
  - Conversation search and indexing
66
66
  - Rich conversation display options
67
+ - Vision & Json Output enabled [20240111]
67
68
 
68
69
  ## Installation
69
70
 
@@ -0,0 +1,32 @@
1
+ [build-system]
2
+ requires = [ "setuptools>=61.0", "wheel",]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "llm_dialog_manager"
7
+ version = "0.4.2"
8
+ description = "A Python package for managing LLM chat conversation history"
9
+ readme = "README.md"
10
+ classifiers = [ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Scientific/Engineering :: Artificial Intelligence",]
11
+ requires-python = ">=3.7"
12
+ dependencies = [ "openai>=1.54.2", "anthropic>=0.39.0", "google-generativeai>=0.1.0", "python-dotenv>=1.0.0", "typing-extensions>=4.0.0", "uuid>=1.30",]
13
+ [[project.authors]]
14
+ name = "xihajun"
15
+ email = "work@2333.fun"
16
+
17
+ [project.license]
18
+ text = "MIT"
19
+
20
+ [project.optional-dependencies]
21
+ dev = [ "pytest>=8.0.0", "pytest-asyncio>=0.21.1", "pytest-cov>=4.1.0", "black>=23.9.1", "isort>=5.12.0",]
22
+ test = [ "pytest>=6.0", "pytest-asyncio>=0.14.0", "pytest-cov>=2.0",]
23
+ lint = [ "black>=22.0", "isort>=5.0",]
24
+ all = [ "pytest>=8.0.0", "pytest-asyncio>=0.21.1", "pytest-cov>=4.1.0", "black>=23.9.1", "isort>=5.12.0",]
25
+
26
+ [project.urls]
27
+ "Bug Tracker" = "https://github.com/xihajun/llm_dialog_manager/issues"
28
+ Documentation = "https://github.com/xihajun/llm_dialog_manager#readme"
29
+ "Source Code" = "https://github.com/xihajun/llm_dialog_manager"
30
+
31
+ [tool.setuptools]
32
+ packages = [ "llm_dialog_manager",]
@@ -1,64 +0,0 @@
1
- [build-system]
2
- requires = ["setuptools>=61.0", "wheel"]
3
- build-backend = "setuptools.build_meta"
4
-
5
- [project]
6
- name = "llm_dialog_manager"
7
- version = "0.3.5"
8
- description = "A Python package for managing LLM chat conversation history"
9
- readme = "README.md"
10
- authors = [{ name = "xihajun", email = "work@2333.fun" }]
11
- license = { text = "MIT" }
12
- classifiers = [
13
- "Development Status :: 3 - Alpha",
14
- "Intended Audience :: Developers",
15
- "License :: OSI Approved :: MIT License",
16
- "Operating System :: OS Independent",
17
- "Programming Language :: Python :: 3.8",
18
- "Programming Language :: Python :: 3.9",
19
- "Programming Language :: Python :: 3.10",
20
- "Topic :: Software Development :: Libraries :: Python Modules",
21
- "Topic :: Scientific/Engineering :: Artificial Intelligence",
22
- ]
23
- requires-python = ">=3.7"
24
- dependencies = [
25
- "openai>=1.54.2",
26
- "anthropic>=0.39.0",
27
- "google-generativeai>=0.1.0",
28
- "python-dotenv>=1.0.0",
29
- "typing-extensions>=4.0.0",
30
- "uuid>=1.30",
31
- ]
32
-
33
- [project.optional-dependencies]
34
- dev = [
35
- "pytest>=8.0.0",
36
- "pytest-asyncio>=0.21.1",
37
- "pytest-cov>=4.1.0",
38
- "black>=23.9.1",
39
- "isort>=5.12.0",
40
- ]
41
- test = [
42
- "pytest>=6.0",
43
- "pytest-asyncio>=0.14.0",
44
- "pytest-cov>=2.0",
45
- ]
46
- lint = [
47
- "black>=22.0",
48
- "isort>=5.0",
49
- ]
50
- all = [
51
- "pytest>=8.0.0",
52
- "pytest-asyncio>=0.21.1",
53
- "pytest-cov>=4.1.0",
54
- "black>=23.9.1",
55
- "isort>=5.12.0",
56
- ]
57
-
58
- [project.urls]
59
- "Bug Tracker" = "https://github.com/xihajun/llm_dialog_manager/issues"
60
- "Documentation" = "https://github.com/xihajun/llm_dialog_manager#readme"
61
- "Source Code" = "https://github.com/xihajun/llm_dialog_manager"
62
-
63
- [tool.setuptools]
64
- packages = ["llm_dialog_manager"]