bioguider 0.2.32__py3-none-any.whl → 0.2.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of bioguider might be problematic. Click here for more details.

@@ -67,8 +67,17 @@ def get_llm(
67
67
  api_version: str=None,
68
68
  azure_deployment: str=None,
69
69
  temperature: float = 0.0,
70
- max_tokens: int = 4096,
70
+ max_tokens: int = 16384, # Set high by default - enough for any document type
71
71
  ):
72
+ """
73
+ Create an LLM instance with appropriate parameters based on model type and API version.
74
+
75
+ Handles parameter compatibility across different models and API versions:
76
+ - DeepSeek models: Use max_tokens parameter
77
+ - GPT models (newer): Use max_completion_tokens parameter
78
+ - GPT-5+: Don't support custom temperature (uses default)
79
+ """
80
+
72
81
  if model_name.startswith("deepseek"):
73
82
  chat = ChatDeepSeek(
74
83
  api_key=api_key,
@@ -77,23 +86,38 @@ def get_llm(
77
86
  max_tokens=max_tokens,
78
87
  )
79
88
  elif model_name.startswith("gpt"):
80
- chat = AzureChatOpenAI(
81
- api_key=api_key,
82
- azure_endpoint=azure_endpoint,
83
- api_version=api_version,
84
- azure_deployment=azure_deployment,
85
- model=model_name,
86
- temperature=temperature,
87
- max_tokens=max_tokens,
88
- )
89
+ # Base parameters common to all GPT models
90
+ llm_params = {
91
+ "api_key": api_key,
92
+ "azure_endpoint": azure_endpoint,
93
+ "api_version": api_version,
94
+ "azure_deployment": azure_deployment,
95
+ "model": model_name,
96
+ }
97
+
98
+ # Determine token limit parameter name based on API version
99
+ # Newer APIs (2024-08+) use max_completion_tokens instead of max_tokens
100
+ use_completion_tokens = api_version and api_version >= "2024-08-01-preview"
101
+ token_param = "max_completion_tokens" if use_completion_tokens else "max_tokens"
102
+ llm_params[token_param] = max_tokens
103
+
104
+ # Handle temperature parameter based on model capabilities
105
+ # GPT-5+ models don't support custom temperature values
106
+ supports_temperature = not any(restricted in model_name for restricted in ["gpt-5", "o1", "o3"])
107
+ if supports_temperature:
108
+ llm_params["temperature"] = temperature
109
+
110
+ chat = AzureChatOpenAI(**llm_params)
89
111
  else:
90
- raise ValueError("Invalid model name")
91
- # validate chat
112
+ raise ValueError(f"Unsupported model type: {model_name}")
113
+
114
+ # Validate the LLM instance with a simple test
92
115
  try:
93
116
  chat.invoke("Hi")
94
117
  except Exception as e:
95
- print(e)
118
+ logger.error(f"Failed to initialize LLM {model_name}: {e}")
96
119
  return None
120
+
97
121
  return chat
98
122
 
99
123
  def pretty_print(message, printout = True):