llm-dialog-manager 0.3.5__py3-none-any.whl → 0.4.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,4 +1,4 @@
1
1
  from .chat_history import ChatHistory
2
2
  from .agent import Agent
3
3
 
4
- __version__ = "0.3.5"
4
+ __version__ = "0.4.1"
@@ -38,8 +38,36 @@ def load_env_vars():
38
38
 
39
39
  load_env_vars()
40
40
 
41
+ def format_messages_for_gemini(messages):
42
+ """
43
+ 将标准化的消息格式转化为 Gemini 格式。
44
+ system 消息应该通过 GenerativeModel 的 system_instruction 参数传入,
45
+ 不在这个函数处理。
46
+ """
47
+ gemini_messages = []
48
+
49
+ for msg in messages:
50
+ role = msg["role"]
51
+ content = msg["content"]
52
+
53
+ # 跳过 system 消息,因为它会通过 system_instruction 设置
54
+ if role == "system":
55
+ continue
56
+
57
+ # 处理 user/assistant 消息
58
+ # 如果 content 是单一对象,转换为列表
59
+ if not isinstance(content, list):
60
+ content = [content]
61
+
62
+ gemini_messages.append({
63
+ "role": role,
64
+ "parts": content # content 可以包含文本和 FileMedia
65
+ })
66
+
67
+ return gemini_messages
68
+
41
69
  def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, Image.Image, Dict]]]]], max_tokens: int = 1000,
42
- temperature: float = 0.5, api_key: Optional[str] = None,
70
+ temperature: float = 0.5, top_p: float = 1.0, top_k: int = 40, api_key: Optional[str] = None,
43
71
  base_url: Optional[str] = None, json_format: bool = False) -> str:
44
72
  """
45
73
  Generate a completion using the specified model and messages.
@@ -216,6 +244,9 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
216
244
 
217
245
  response = client.chat.completions.create(
218
246
  model=model,
247
+ max_tokens=max_tokens,
248
+ top_p=top_p,
249
+ top_k=top_k,
219
250
  messages=formatted_messages,
220
251
  temperature=temperature,
221
252
  response_format=response_format # Added response_format
@@ -226,33 +257,31 @@ def completion(model: str, messages: List[Dict[str, Union[str, List[Union[str, I
226
257
  # If OpenAI-style API fails, fall back to Google's genai library
227
258
  logger.info("Falling back to Google's genai library")
228
259
  genai.configure(api_key=api_key)
229
-
230
- # Convert messages to Gemini format
231
- gemini_messages = []
260
+ system_instruction = ""
232
261
  for msg in messages:
233
262
  if msg["role"] == "system":
234
- # Prepend system message to first user message if exists
235
- if gemini_messages:
236
- first_msg = gemini_messages[0]
237
- if "parts" in first_msg and len(first_msg["parts"]) > 0:
238
- first_msg["parts"][0] = f"{msg['content']}\n\n{first_msg['parts'][0]}"
239
- else:
240
- gemini_messages.append({"role": msg["role"], "parts": msg["content"]})
241
-
242
- # Set response_mime_type based on json_format
263
+ system_instruction = msg["content"]
264
+ break
265
+
266
+ # 将其他消息转换为 gemini 格式
267
+ gemini_messages = format_messages_for_gemini(messages)
243
268
  mime_type = "application/json" if json_format else "text/plain"
269
+ generation_config = genai.types.GenerationConfig(
270
+ temperature=temperature,
271
+ top_p=top_p,
272
+ top_k=top_k,
273
+ max_output_tokens=max_tokens,
274
+ response_mime_type=mime_type
275
+ )
244
276
 
245
- # Create Gemini model and generate response
246
- model_instance = genai.GenerativeModel(model_name=model)
247
- response = model_instance.generate_content(
248
- gemini_messages,
249
- generation_config=genai.types.GenerationConfig(
250
- temperature=temperature,
251
- response_mime_type=mime_type, # Modified based on json_format
252
- max_output_tokens=max_tokens
253
- )
277
+ model_instance = genai.GenerativeModel(
278
+ model_name=model,
279
+ system_instruction=system_instruction, # system 消息通过这里传入
280
+ generation_config=generation_config
254
281
  )
255
282
 
283
+ response = model_instance.generate_content(gemini_messages, generation_config=generation_config)
284
+
256
285
  return response.text
257
286
 
258
287
  elif "grok" in model:
@@ -395,7 +424,7 @@ class Agent:
395
424
  # Start a new user message with the image
396
425
  self.history.add_message([image_block], "user")
397
426
 
398
- def generate_response(self, max_tokens=3585, temperature=0.7, json_format: bool = False) -> str:
427
+ def generate_response(self, max_tokens=3585, temperature=0.7, top_p=1.0, top_k=40, json_format: bool = False) -> str:
399
428
  """Generate a response from the agent.
400
429
 
401
430
  Args:
@@ -410,12 +439,14 @@ class Agent:
410
439
  raise ValueError("No messages in history to generate response from")
411
440
 
412
441
  messages = self.history.messages
413
-
442
+ print(self.model_name)
414
443
  response_text = completion(
415
444
  model=self.model_name,
416
445
  messages=messages,
417
446
  max_tokens=max_tokens,
418
447
  temperature=temperature,
448
+ top_p=top_p,
449
+ top_k=top_k,
419
450
  api_key=self.api_key,
420
451
  json_format=json_format # Pass json_format to completion
421
452
  )
@@ -486,13 +517,13 @@ class Agent:
486
517
  if __name__ == "__main__":
487
518
  # Example Usage
488
519
  # Create an Agent instance (Gemini model)
489
- agent = Agent("gemini-1.5-flash", "you are an assistant", memory_enabled=True)
520
+ agent = Agent("gemini-1.5-flash", "you are Jack101", memory_enabled=True)
490
521
 
491
522
  # Add an image
492
523
  agent.add_image(image_path="/Users/junfan/Projects/Personal/oneapi/dialog_manager/example.png")
493
524
 
494
525
  # Add a user message
495
- agent.add_message("user", "What's in this image?")
526
+ agent.add_message("user", "Who are you? What's in this image?")
496
527
 
497
528
  # Generate response with JSON format enabled
498
529
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: llm_dialog_manager
3
- Version: 0.3.5
3
+ Version: 0.4.1
4
4
  Summary: A Python package for managing LLM chat conversation history
5
5
  Author-email: xihajun <work@2333.fun>
6
6
  License: MIT
@@ -64,6 +64,7 @@ A Python package for managing AI chat conversation history with support for mult
64
64
  - Memory management options
65
65
  - Conversation search and indexing
66
66
  - Rich conversation display options
67
+ - Vision & Json Output enabled [20240111]
67
68
 
68
69
  ## Installation
69
70
 
@@ -0,0 +1,9 @@
1
+ llm_dialog_manager/__init__.py,sha256=hTHvsXzvD5geKgv2XERYcp2f-T3LoVVc3arXfPtNS1k,86
2
+ llm_dialog_manager/agent.py,sha256=ZKO3eKHTKcbmYpVRRIpzDy7Tlp_VgQ90ewr1758Ozgs,23931
3
+ llm_dialog_manager/chat_history.py,sha256=DKKRnj_M6h-4JncnH6KekMTghX7vMgdN3J9uOwXKzMU,10347
4
+ llm_dialog_manager/key_manager.py,sha256=shvxmn4zUtQx_p-x1EFyOmnk-WlhigbpKtxTKve-zXk,4421
5
+ llm_dialog_manager-0.4.1.dist-info/LICENSE,sha256=vWGbYgGuWpWrXL8-xi6pNcX5UzD6pWoIAZmcetyfbus,1064
6
+ llm_dialog_manager-0.4.1.dist-info/METADATA,sha256=LER5FN6lFQFPs_8A-fIM7VYmqN-fh0nCD6Dt8vslsiY,4194
7
+ llm_dialog_manager-0.4.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
8
+ llm_dialog_manager-0.4.1.dist-info/top_level.txt,sha256=u2EQEXW0NGAt0AAHT7jx1odXZ4rZfjcgbmJhvKFuMkI,19
9
+ llm_dialog_manager-0.4.1.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- llm_dialog_manager/__init__.py,sha256=J7L76hDTCNM56mprhbMclqCG04IacKiIgaHm8Ty7shQ,86
2
- llm_dialog_manager/agent.py,sha256=aMeSL7rV7sSGgJzCkXp_ahiq569eTy-9Jfepam4pKUU,23064
3
- llm_dialog_manager/chat_history.py,sha256=DKKRnj_M6h-4JncnH6KekMTghX7vMgdN3J9uOwXKzMU,10347
4
- llm_dialog_manager/key_manager.py,sha256=shvxmn4zUtQx_p-x1EFyOmnk-WlhigbpKtxTKve-zXk,4421
5
- llm_dialog_manager-0.3.5.dist-info/LICENSE,sha256=vWGbYgGuWpWrXL8-xi6pNcX5UzD6pWoIAZmcetyfbus,1064
6
- llm_dialog_manager-0.3.5.dist-info/METADATA,sha256=y9rfQ9rcmwrScQJYK-0PjRPCKFWqAKFsm_8NoSwiloI,4152
7
- llm_dialog_manager-0.3.5.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
8
- llm_dialog_manager-0.3.5.dist-info/top_level.txt,sha256=u2EQEXW0NGAt0AAHT7jx1odXZ4rZfjcgbmJhvKFuMkI,19
9
- llm_dialog_manager-0.3.5.dist-info/RECORD,,