LLM-Bridge 1.11.10__py3-none-any.whl → 1.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -58,16 +58,27 @@ class NonStreamClaudeClient(ClaudeClient):
58
58
  try:
59
59
  logging.info(f"messages: {self.messages}")
60
60
 
61
- message = await self.client.beta.messages.create(
62
- model=self.model,
63
- max_tokens=self.max_tokens,
64
- temperature=self.temperature,
65
- system=self.system,
66
- messages=serialize(self.messages),
67
- thinking=self.thinking,
68
- betas=self.betas,
69
- tools=self.tools,
70
- )
61
+ if self.thinking:
62
+ message = await self.client.beta.messages.create(
63
+ model=self.model,
64
+ max_tokens=self.max_tokens,
65
+ temperature=self.temperature,
66
+ system=self.system,
67
+ messages=serialize(self.messages),
68
+ betas=self.betas,
69
+ tools=self.tools,
70
+ thinking=self.thinking,
71
+ )
72
+ else:
73
+ message = await self.client.beta.messages.create(
74
+ model=self.model,
75
+ max_tokens=self.max_tokens,
76
+ temperature=self.temperature,
77
+ system=self.system,
78
+ messages=serialize(self.messages),
79
+ betas=self.betas,
80
+ tools=self.tools,
81
+ )
71
82
 
72
83
  return await process_claude_non_stream_response(
73
84
  message=message,
@@ -16,23 +16,41 @@ class StreamClaudeClient(ClaudeClient):
16
16
  logging.info(f"messages: {self.messages}")
17
17
 
18
18
  try:
19
- async with self.client.beta.messages.stream(
20
- model=self.model,
21
- max_tokens=self.max_tokens,
22
- temperature=self.temperature,
23
- system=self.system,
24
- messages=serialize(self.messages),
25
- thinking=self.thinking,
26
- betas=self.betas,
27
- tools=self.tools,
28
- ) as stream:
29
- async for event in stream:
30
- yield await process_claude_stream_response(
31
- event=event,
32
- input_tokens=self.input_tokens,
33
- client=self.client,
19
+ if self.thinking:
20
+ async with self.client.beta.messages.stream(
21
+ model=self.model,
22
+ max_tokens=self.max_tokens,
23
+ temperature=self.temperature,
24
+ system=self.system,
25
+ messages=serialize(self.messages),
26
+ betas=self.betas,
27
+ tools=self.tools,
28
+ thinking=self.thinking,
29
+ ) as stream:
30
+ async for event in stream:
31
+ yield await process_claude_stream_response(
32
+ event=event,
33
+ input_tokens=self.input_tokens,
34
+ client=self.client,
35
+ model=self.model,
36
+ )
37
+ else:
38
+ async with self.client.beta.messages.stream(
34
39
  model=self.model,
35
- )
40
+ max_tokens=self.max_tokens,
41
+ temperature=self.temperature,
42
+ system=self.system,
43
+ messages=serialize(self.messages),
44
+ betas=self.betas,
45
+ tools=self.tools,
46
+ ) as stream:
47
+ async for event in stream:
48
+ yield await process_claude_stream_response(
49
+ event=event,
50
+ input_tokens=self.input_tokens,
51
+ client=self.client,
52
+ model=self.model,
53
+ )
36
54
 
37
55
  except Exception as e:
38
56
  logging.exception(e)
@@ -18,10 +18,10 @@ class ClaudeClient(ChatClient):
18
18
  system: str,
19
19
  client: anthropic.AsyncAnthropic,
20
20
  max_tokens: int,
21
- thinking: ThinkingConfigEnabledParam,
22
21
  betas: list[AnthropicBetaParam],
23
22
  input_tokens: int,
24
23
  tools: list[BetaToolUnionParam],
24
+ thinking: ThinkingConfigEnabledParam,
25
25
  ):
26
26
  self.model = model
27
27
  self.messages = messages
@@ -29,10 +29,10 @@ class ClaudeClient(ChatClient):
29
29
  self.system = system
30
30
  self.client = client
31
31
  self.max_tokens = max_tokens
32
- self.thinking = thinking
33
32
  self.betas = betas
34
33
  self.input_tokens = input_tokens
35
34
  self.tools = tools
35
+ self.thinking = thinking
36
36
 
37
37
  async def generate_non_stream_response(self) -> ChatResponse:
38
38
  raise NotImplementedError
@@ -8,86 +8,104 @@ from llm_bridge.type.message import Message
8
8
 
9
9
 
10
10
  async def create_chat_client(
11
+ api_keys: dict,
11
12
  messages: list[Message],
12
13
  model: str,
13
14
  api_type: str,
14
15
  temperature: float,
15
16
  stream: bool,
16
- api_keys: dict
17
+ thought: bool,
18
+ code_execution: bool,
17
19
  ) -> ChatClient:
18
20
  if api_type == 'OpenAI':
19
21
  return await create_openai_client(
22
+ api_keys={"OPENAI_API_KEY": api_keys["OPENAI_API_KEY"]},
20
23
  messages=messages,
21
24
  model=model,
22
25
  api_type=api_type,
23
26
  temperature=temperature,
24
27
  stream=stream,
25
- api_keys={"OPENAI_API_KEY": api_keys["OPENAI_API_KEY"]}
28
+ thought=thought,
29
+ code_execution=code_execution,
26
30
  )
27
31
  elif api_type == 'OpenAI-Azure':
28
32
  return await create_openai_client(
33
+ api_keys={
34
+ "AZURE_API_KEY": api_keys["AZURE_API_KEY"],
35
+ "AZURE_API_BASE": api_keys["AZURE_API_BASE"]
36
+ },
29
37
  messages=messages,
30
38
  model=model,
31
39
  api_type=api_type,
32
40
  temperature=temperature,
33
41
  stream=stream,
34
- api_keys={
35
- "AZURE_API_KEY": api_keys["AZURE_API_KEY"],
36
- "AZURE_API_BASE": api_keys["AZURE_API_BASE"]
37
- }
42
+ thought=thought,
43
+ code_execution=code_execution,
38
44
  )
39
45
  elif api_type == 'OpenAI-GitHub':
40
46
  return await create_openai_client(
47
+ api_keys={"GITHUB_API_KEY": api_keys["GITHUB_API_KEY"]},
41
48
  messages=messages,
42
49
  model=model,
43
50
  api_type=api_type,
44
51
  temperature=temperature,
45
52
  stream=stream,
46
- api_keys={"GITHUB_API_KEY": api_keys["GITHUB_API_KEY"]}
53
+ thought=thought,
54
+ code_execution=code_execution,
47
55
  )
48
56
  elif api_type == 'Grok':
49
57
  return await create_openai_client(
58
+ api_keys={"XAI_API_KEY": api_keys["XAI_API_KEY"]},
50
59
  messages=messages,
51
60
  model=model,
52
61
  api_type=api_type,
53
62
  temperature=temperature,
54
63
  stream=stream,
55
- api_keys={"XAI_API_KEY": api_keys["XAI_API_KEY"]}
64
+ thought=thought,
65
+ code_execution=code_execution,
56
66
  )
57
67
  elif api_type == 'Gemini-Free':
58
68
  return await create_gemini_client(
69
+ api_key=api_keys["GEMINI_FREE_API_KEY"],
70
+ vertexai=False,
59
71
  messages=messages,
60
72
  model=model,
61
73
  temperature=temperature,
62
74
  stream=stream,
63
- api_key=api_keys["GEMINI_FREE_API_KEY"],
64
- vertexai=False,
75
+ thought=thought,
76
+ code_execution=code_execution,
65
77
  )
66
78
  elif api_type == 'Gemini-Paid':
67
79
  return await create_gemini_client(
80
+ api_key=api_keys["GEMINI_PAID_API_KEY"],
81
+ vertexai=False,
68
82
  messages=messages,
69
83
  model=model,
70
84
  temperature=temperature,
71
85
  stream=stream,
72
- api_key=api_keys["GEMINI_PAID_API_KEY"],
73
- vertexai=False,
86
+ thought=thought,
87
+ code_execution=code_execution,
74
88
  )
75
89
  elif api_type == 'Gemini-Vertex':
76
90
  return await create_gemini_client(
91
+ api_key=api_keys["GEMINI_VERTEX_API_KEY"],
77
92
  messages=messages,
78
93
  model=model,
79
94
  temperature=temperature,
80
95
  stream=stream,
81
- api_key=api_keys["GEMINI_VERTEX_API_KEY"],
96
+ thought=thought,
97
+ code_execution=code_execution,
82
98
  vertexai=True,
83
99
  )
84
100
  elif api_type == 'Claude':
85
101
  return await create_claude_client(
102
+ api_key=api_keys["ANTHROPIC_API_KEY"],
86
103
  messages=messages,
87
104
  model=model,
88
105
  temperature=temperature,
89
106
  stream=stream,
90
- api_key=api_keys["ANTHROPIC_API_KEY"]
107
+ thought=thought,
108
+ code_execution=code_execution,
91
109
  )
92
110
  else:
93
111
  raise HTTPException(status_code=400, detail="Invalid API type")
@@ -11,11 +11,13 @@ from llm_bridge.type.message import Message
11
11
 
12
12
 
13
13
  async def create_claude_client(
14
+ api_key: str,
14
15
  messages: list[Message],
15
16
  model: str,
16
17
  temperature: float,
17
18
  stream: bool,
18
- api_key: str,
19
+ thought: bool,
20
+ code_execution: bool,
19
21
  ):
20
22
  client = anthropic.AsyncAnthropic(
21
23
  api_key=api_key,
@@ -38,11 +40,13 @@ async def create_claude_client(
38
40
  32_000, # Max output: Claude 4.5 64K; Claude 4.1 32K
39
41
  200_000 - input_tokens, # Context window: Claude Sonnet 4.5 beta: 1M; otherwise 200K
40
42
  )
41
- thinking = ThinkingConfigEnabledParam(
42
- type="enabled",
43
- budget_tokens=max(1024, max_tokens // 2), # Minimum budget tokens: 1024
44
- )
45
- temperature = 1
43
+ thinking = None
44
+ if thought:
45
+ thinking = ThinkingConfigEnabledParam(
46
+ type="enabled",
47
+ budget_tokens=max(1024, max_tokens // 2), # Minimum budget tokens: 1024
48
+ )
49
+ temperature = 1
46
50
  betas: list[AnthropicBetaParam] = [
47
51
  "context-1m-2025-08-07",
48
52
  "output-128k-2025-02-19",
@@ -53,11 +57,14 @@ async def create_claude_client(
53
57
  type="web_search_20250305",
54
58
  name="web_search",
55
59
  ),
56
- BetaCodeExecutionTool20250825Param(
57
- type="code_execution_20250825",
58
- name="code_execution",
59
- )
60
60
  ]
61
+ if code_execution:
62
+ tools.append(
63
+ BetaCodeExecutionTool20250825Param(
64
+ type="code_execution_20250825",
65
+ name="code_execution",
66
+ )
67
+ )
61
68
 
62
69
  if stream:
63
70
  return StreamClaudeClient(
@@ -67,10 +74,10 @@ async def create_claude_client(
67
74
  system=system,
68
75
  client=client,
69
76
  max_tokens=max_tokens,
70
- thinking=thinking,
71
77
  betas=betas,
72
78
  input_tokens=input_tokens,
73
79
  tools=tools,
80
+ thinking=thinking,
74
81
  )
75
82
  else:
76
83
  return NonStreamClaudeClient(
@@ -80,8 +87,8 @@ async def create_claude_client(
80
87
  system=system,
81
88
  client=client,
82
89
  max_tokens=max_tokens,
83
- thinking=thinking,
84
90
  betas=betas,
85
91
  input_tokens=input_tokens,
86
92
  tools=tools,
93
+ thinking=thinking,
87
94
  )
@@ -1,6 +1,5 @@
1
1
  from google import genai
2
2
  from google.genai import types
3
- from google.genai._api_client import HttpOptions
4
3
  from google.genai.types import Modality
5
4
 
6
5
  from llm_bridge.client.implementations.gemini.non_stream_gemini_client import NonStreamGeminiClient
@@ -11,25 +10,26 @@ from llm_bridge.type.message import Message
11
10
 
12
11
 
13
12
  async def create_gemini_client(
13
+ api_key: str,
14
+ vertexai: bool,
14
15
  messages: list[Message],
15
16
  model: str,
16
17
  temperature: float,
17
18
  stream: bool,
18
- api_key: str,
19
- vertexai: bool,
19
+ thought: bool,
20
+ code_execution: bool,
20
21
  ):
21
22
  client = genai.Client(
22
23
  vertexai=vertexai,
23
24
  api_key=api_key,
24
25
  )
25
26
 
26
- system_instruction = None
27
+ system_instruction = extract_system_messages(messages) or " "
27
28
  tools = []
28
29
  thinking_config = None
29
30
  response_modalities = [Modality.TEXT]
30
31
 
31
- system_instruction = extract_system_messages(messages) or " "
32
- if "image" not in model and not vertexai:
32
+ if "image" not in model:
33
33
  tools.append(
34
34
  types.Tool(
35
35
  google_search=types.GoogleSearch()
@@ -40,27 +40,18 @@ async def create_gemini_client(
40
40
  url_context=types.UrlContext()
41
41
  )
42
42
  )
43
- tools.append(
44
- types.Tool(
45
- code_execution=types.ToolCodeExecution()
46
- )
47
- )
48
- if "image" not in model and vertexai:
49
- tools.append(
50
- types.Tool(
51
- google_search=types.GoogleSearch()
43
+ if thought:
44
+ thinking_config = types.ThinkingConfig(
45
+ include_thoughts=True,
46
+ thinking_budget=-1,
52
47
  )
53
- )
54
- tools.append(
55
- types.Tool(
56
- url_context=types.UrlContext()
57
- )
58
- )
59
- if "image" not in model:
60
- thinking_config = types.ThinkingConfig(
61
- include_thoughts=True,
62
- thinking_budget=-1,
63
- )
48
+ if not vertexai:
49
+ if code_execution:
50
+ tools.append(
51
+ types.Tool(
52
+ code_execution=types.ToolCodeExecution()
53
+ )
54
+ )
64
55
  if "image" in model:
65
56
  response_modalities = [Modality.TEXT, Modality.IMAGE]
66
57
 
@@ -17,12 +17,14 @@ from llm_bridge.type.message import Message
17
17
 
18
18
 
19
19
  async def create_openai_client(
20
+ api_keys: dict,
20
21
  messages: list[Message],
21
22
  model: str,
22
23
  api_type: str,
23
24
  temperature: float,
24
25
  stream: bool,
25
- api_keys: dict
26
+ thought: bool,
27
+ code_execution: bool,
26
28
  ):
27
29
  if api_type == "OpenAI":
28
30
  client = openai.AsyncOpenAI(
@@ -61,12 +63,13 @@ async def create_openai_client(
61
63
  reasoning = None
62
64
 
63
65
  if model not in ["gpt-5-chat-latest", "gpt-5-pro"]:
64
- tools.append(
65
- CodeInterpreter(
66
- type="code_interpreter",
67
- container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
66
+ if code_execution:
67
+ tools.append(
68
+ CodeInterpreter(
69
+ type="code_interpreter",
70
+ container=CodeInterpreterContainerCodeInterpreterToolAuto(type="auto")
71
+ )
68
72
  )
69
- )
70
73
  if model not in ["gpt-5-chat-latest"]:
71
74
  tools.append(
72
75
  WebSearchToolParam(
@@ -77,10 +80,11 @@ async def create_openai_client(
77
80
  if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
78
81
  temperature = 1
79
82
  if re.match(r"gpt-5.*", model) and model != "gpt-5-chat-latest":
80
- reasoning = Reasoning(
81
- effort="high",
82
- summary="auto",
83
- )
83
+ if thought:
84
+ reasoning = Reasoning(
85
+ effort="high",
86
+ summary="auto",
87
+ )
84
88
  tools.append(
85
89
  ImageGeneration(
86
90
  type="image_generation",
@@ -0,0 +1,88 @@
1
+ Metadata-Version: 2.4
2
+ Name: LLM-Bridge
3
+ Version: 1.12.0
4
+ Summary: A Bridge for LLMs
5
+ Author-email: windsnow1025 <windsnow1025@gmail.com>
6
+ License-Expression: MIT
7
+ Keywords: llm,ai
8
+ Classifier: Framework :: FastAPI
9
+ Classifier: Programming Language :: Python :: 3
10
+ Requires-Python: >=3.12
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
13
+ Requires-Dist: fastapi
14
+ Requires-Dist: httpx
15
+ Requires-Dist: tenacity
16
+ Requires-Dist: openai==1.106.1
17
+ Requires-Dist: tiktoken==0.11.0
18
+ Requires-Dist: google-genai==1.46.0
19
+ Requires-Dist: anthropic==0.66.0
20
+ Requires-Dist: PyMuPDF
21
+ Requires-Dist: docxlatex>=1.1.1
22
+ Requires-Dist: openpyxl
23
+ Requires-Dist: python-pptx
24
+ Provides-Extra: test
25
+ Requires-Dist: pytest; extra == "test"
26
+ Requires-Dist: pytest-asyncio; extra == "test"
27
+ Requires-Dist: python-dotenv; extra == "test"
28
+ Requires-Dist: protobuf; extra == "test"
29
+ Dynamic: license-file
30
+
31
+ # LLM Bridge
32
+
33
+ LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI (Native / Azure / GitHub), Gemini (AI Studio / Vertex), Claude, and Grok.
34
+
35
+ GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)
36
+
37
+ PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge/)
38
+
39
+ ## Workflow and Features
40
+
41
+ 1. **Message Preprocessor**: extracts text content from documents (Word, Excel, PPT, Code files, PDFs) which are not natively supported by the target model.
42
+ 2. **Chat Client Factory**: creates a client for the specific LLM API with model parameters
43
+ 1. **Model Message Converter**: converts general messages to model messages
44
+ 1. **Media Processor**: converts general media (Image, Audio, Video, PDF) to model compatible formats.
45
+ 3. **Chat Client**: generate stream or non-stream responses
46
+ - **Model Thoughts**: captures and formats the model's thinking process
47
+ - **Code Execution**: auto generate and execute Python code
48
+ - **Web Search + Citations**: extracts and formats citations from search results
49
+ - **Token Counter**: tracks and reports input and output token usage
50
+
51
+ ### Supported Features for API Types
52
+
53
+ The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
54
+
55
+ | API Type | Input Format | Capabilities | Output Format |
56
+ |----------|--------------------------------|--------------------------------------------------|-------------------|
57
+ | OpenAI | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
58
+ | Gemini | Text, Image, Video, Audio, PDF | Thinking, Web Search + Citations, Code Execution | Text, Image, File |
59
+ | Claude | Text, Image, PDF | Thinking, Web Search, Code Execution | Text |
60
+ | Grok | Text, Image | | Text |
61
+
62
+ #### Planned Features
63
+
64
+ - Structured Output
65
+ - More features for API Types
66
+ - Native support for Grok
67
+
68
+ ## Installation
69
+
70
+ ```bash
71
+ pip install --upgrade llm_bridge
72
+ ```
73
+
74
+ ## Test
75
+
76
+ ```bash
77
+ pytest
78
+ ```
79
+
80
+ ## Quick Start
81
+
82
+ ### Setup
83
+
84
+ 1. Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
85
+ 2. Install requirements: `pip install -r requirements.txt`
86
+ 3. In PyCharm, add a new Python configuration:
87
+ - script: `./usage/main.py`
88
+ - Paths to ".env" files: `./usage/.env`
@@ -6,8 +6,8 @@ llm_bridge/client/implementations/printing_status.py,sha256=ok3ihBRIEan3qMbc62He
6
6
  llm_bridge/client/implementations/claude/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  llm_bridge/client/implementations/claude/claude_response_handler.py,sha256=d5e1rlxfao_rjhgT1Rky-xlCRJIK2M-e9LKNUATOczc,4143
8
8
  llm_bridge/client/implementations/claude/claude_token_counter.py,sha256=m_aoLJkFPJqSBA3Thzv5vg3GnaucZh41SAgT28sLeBA,1324
9
- llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=1khCk0vJkCQ09Q8wuCqX1ZUV54qcwtTGa21ij8ziyak,2990
10
- llm_bridge/client/implementations/claude/stream_claude_client.py,sha256=gOvdoSa_pNAbZ882pG4NAOOwNtjth-X4M3Gt34orXww,2005
9
+ llm_bridge/client/implementations/claude/non_stream_claude_client.py,sha256=IunPFhqFoHWlYqoz__fWO0vLTJbbjMVL1FuHGRAQwkg,3469
10
+ llm_bridge/client/implementations/claude/stream_claude_client.py,sha256=cu06_AcBVZJJE_SuSr26jBW8GEdaK4OQN9XA-6p4O0Q,2954
11
11
  llm_bridge/client/implementations/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  llm_bridge/client/implementations/gemini/gemini_response_handler.py,sha256=_bejFAjo07s4jBpXBGF5djPs3nxZZjHaDkr4w1S8lTs,4321
13
13
  llm_bridge/client/implementations/gemini/gemini_token_counter.py,sha256=GdnwJWPhGZMB_xC0fz88zQRparIHzTemkQoqfDcxVEA,687
@@ -20,20 +20,20 @@ llm_bridge/client/implementations/openai/openai_token_couter.py,sha256=ESl3L049N
20
20
  llm_bridge/client/implementations/openai/steam_openai_responses_client.py,sha256=HdaIYeJg9o5TjyqMlGUjfsPF2MDoxWF8tOqsqIbNTw8,4100
21
21
  llm_bridge/client/implementations/openai/stream_openai_client.py,sha256=Izq4xH9EuLjUCBJsuSr6U4Kj6FN5c7w_oHf9wmQatXE,2988
22
22
  llm_bridge/client/model_client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
- llm_bridge/client/model_client/claude_client.py,sha256=cuYORseQY8HVt-COh2J0C_mhqPehDB3A4G4vrunoSFA,1352
23
+ llm_bridge/client/model_client/claude_client.py,sha256=Q-ciptb1cV3BNUOUj3Blwy-zu0loD2glFceRe5YDKKo,1352
24
24
  llm_bridge/client/model_client/gemini_client.py,sha256=4dcueIbpLFqkT98WxmeVmW9Vbq7Z5jbYbifAem-NL1E,906
25
25
  llm_bridge/client/model_client/openai_client.py,sha256=92nSIrlAhT0u6m8MvT31-VSqrtDUekkRwV3JpTd_WKE,1239
26
26
  llm_bridge/logic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
27
  llm_bridge/logic/file_fetch.py,sha256=Q8PGNj76E25sKD70TmlnSIdPgAxpNlb89syk87DbAGg,1341
28
28
  llm_bridge/logic/model_prices.py,sha256=hiXVbki3004Rrm5LQrmVfdm0lLABeygxtFB-Qn9_mm0,1219
29
29
  llm_bridge/logic/chat_generate/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- llm_bridge/logic/chat_generate/chat_client_factory.py,sha256=H0rcRHytSfYKz_mwRfJgJYyI-d3S3nxBssREYWPyOWw,3165
30
+ llm_bridge/logic/chat_generate/chat_client_factory.py,sha256=i2Lefytc2Sc6Ownjq7obgCkjdCFe9oFuozf96J9wHzs,3800
31
31
  llm_bridge/logic/chat_generate/chat_message_converter.py,sha256=40VTBOPXg_ocrEZMdt1ObYlm-mhRL35zWzzxv8m2xRc,1538
32
32
  llm_bridge/logic/chat_generate/media_processor.py,sha256=ZR8G24EHwZZr2T9iFDRmScDGyJ_kvThApABzSzK0CL0,702
33
33
  llm_bridge/logic/chat_generate/model_client_factory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
- llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=AxXY-Lr1MAZpV1wCtKXfcnq9B72dnhCAEb9K4_zSOCk,2830
35
- llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py,sha256=ms0v1TnVA_JJFKhOkbF_qHeRJEAZ3SH2QOYUi2w_FBI,3614
36
- llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py,sha256=i15B02EeSi-KHFKsL9aSJLf4noXGL95EFHQUZNGOMog,4376
34
+ llm_bridge/logic/chat_generate/model_client_factory/claude_client_factory.py,sha256=8qhKE8H6bEUiCDc8ZYaKuzeAe2ne_6oNGR0Y-FQHJTk,3011
35
+ llm_bridge/logic/chat_generate/model_client_factory/gemini_client_factory.py,sha256=j4V3l8I3G4qmJzK1ZVpCKnUxsSEkPA5nL72MCywxHIg,3394
36
+ llm_bridge/logic/chat_generate/model_client_factory/openai_client_factory.py,sha256=EoU5kyccnwOKjGdFi5yTozNVPrq402jRtWPjSmBJs7M,4517
37
37
  llm_bridge/logic/chat_generate/model_message_converter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
38
  llm_bridge/logic/chat_generate/model_message_converter/claude_message_converter.py,sha256=SfDhQXR7L5nCPHS4MIjwgzK_wER7qOUCc8gh-K77kKY,2441
39
39
  llm_bridge/logic/chat_generate/model_message_converter/gemini_message_converter.py,sha256=UjhzRX7sBa3-Zv1flMJd8bc8uRWMMav4UdJFhE6nVq4,1527
@@ -55,8 +55,8 @@ llm_bridge/type/model_message/claude_message.py,sha256=gYJUTbLUeifQMva3Axarc-VFe
55
55
  llm_bridge/type/model_message/gemini_message.py,sha256=mh8pf929g7_NkBzSOwnLXyrwSzTT4yt2FmyX7NZn0sM,4302
56
56
  llm_bridge/type/model_message/openai_message.py,sha256=xFaLY-cZoSwNd7E9BSWQjBNcRfCVH11X9s2yxXlctR0,453
57
57
  llm_bridge/type/model_message/openai_responses_message.py,sha256=be1q2euA0ybjj4NO6NxOGIRB9eJuXSb4ssUm_bM4Ocs,1529
58
- llm_bridge-1.11.10.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
59
- llm_bridge-1.11.10.dist-info/METADATA,sha256=42XpCM5vIkUx5dvWNIqNClIJuchj96BJnslswh8U9_M,7850
60
- llm_bridge-1.11.10.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
- llm_bridge-1.11.10.dist-info/top_level.txt,sha256=PtxyrgNX1lSa1Ab_qswg0sekSXejG5zrS6b_v3Po05g,11
62
- llm_bridge-1.11.10.dist-info/RECORD,,
58
+ llm_bridge-1.12.0.dist-info/licenses/LICENSE,sha256=m6uon-6P_CaiqcBfApMfjG9YRtDxcr40Z52JcqUCEAE,1069
59
+ llm_bridge-1.12.0.dist-info/METADATA,sha256=H-1XWKJ35voqZsQOlo2YVZC-MDq8YI_cT1_QiawJnno,3388
60
+ llm_bridge-1.12.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
+ llm_bridge-1.12.0.dist-info/top_level.txt,sha256=PtxyrgNX1lSa1Ab_qswg0sekSXejG5zrS6b_v3Po05g,11
62
+ llm_bridge-1.12.0.dist-info/RECORD,,
@@ -1,231 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: LLM-Bridge
3
- Version: 1.11.10
4
- Summary: A Bridge for LLMs
5
- Author-email: windsnow1025 <windsnow1025@gmail.com>
6
- License-Expression: MIT
7
- Keywords: llm,ai
8
- Classifier: Framework :: FastAPI
9
- Classifier: Programming Language :: Python :: 3
10
- Requires-Python: >=3.12
11
- Description-Content-Type: text/markdown
12
- License-File: LICENSE
13
- Requires-Dist: fastapi
14
- Requires-Dist: httpx
15
- Requires-Dist: tenacity
16
- Requires-Dist: openai==1.106.1
17
- Requires-Dist: tiktoken==0.11.0
18
- Requires-Dist: google-genai==1.46.0
19
- Requires-Dist: anthropic==0.66.0
20
- Requires-Dist: PyMuPDF
21
- Requires-Dist: docxlatex>=1.1.1
22
- Requires-Dist: openpyxl
23
- Requires-Dist: python-pptx
24
- Provides-Extra: test
25
- Requires-Dist: pytest; extra == "test"
26
- Requires-Dist: pytest-asyncio; extra == "test"
27
- Requires-Dist: python-dotenv; extra == "test"
28
- Requires-Dist: protobuf; extra == "test"
29
- Dynamic: license-file
30
-
31
- # LLM Bridge
32
-
33
- LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI, OpenAI-Azure, OpenAI-GitHub, Gemini, Claude, and Grok.
34
-
35
- GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)
36
-
37
- PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge/)
38
-
39
- ## Workflow and Features
40
-
41
- 1. **Message Preprocessor**: extracts text content from documents (Word, Excel, PPT, Code files, PDFs) which are not natively supported by the target model.
42
- 2. **Chat Client Factory**: create a client for the specific LLM API with model parameters
43
- 1. **Model Message Converter**: convert general messages to model messages
44
- 1. **Media Processor**: converts media (Image, Audio, Video, PDF) which are natively supported by the target model into compatible formats.
45
- 3. **Chat Client**: generate stream or non-stream responses
46
- 1. **Model Thoughts**: captures and formats the model's thinking process
47
- 2. **Search Citations**: extracts and formats citations from search results
48
- 3. **Token Counter**: tracks and reports input and output token usage
49
-
50
- ### Model Features
51
-
52
- The features listed represent the maximum capabilities of each API type supported by LLM Bridge.
53
-
54
- | API Type | Input Format | Capabilities | Output Format |
55
- |----------|--------------------------------|------------------------------------------------------------|---------------|
56
- | OpenAI | Text, Image | Thinking, Web Search, Code Execution | Text |
57
- | Gemini | Text, Image, Video, Audio, PDF | Thinking + Thought, Web Search + Citations, Code Execution | Text, Image |
58
- | Claude | Text, Image, PDF | Thinking, Web Search | Text |
59
- | Grok | Text, Image | | Text |
60
-
61
- #### Planned Features
62
-
63
- - OpenAI: Web Search: Citations, Image Output
64
- - Gemini: Code Execution: Code, Code Output
65
- - Claude: Code Execution, File Output
66
-
67
- ## Installation
68
-
69
- ```bash
70
- pip install --upgrade llm_bridge
71
- ```
72
-
73
- ## Test
74
-
75
- ```bash
76
- pytest
77
- ```
78
-
79
- ## Quick Start
80
-
81
- ### Setup
82
-
83
- 1. Copy `./usage/.env.example` and rename it to `./usage/.env`, then fill in the environment variables.
84
- 2. Install requirements: `pip install -r requirements.txt`
85
- 3. In PyCharm, add a new Python configuration:
86
- - script: `./usage/main.py`
87
- - Paths to ".env" files: `./usage/.env`
88
-
89
- ## Workflow
90
-
91
- ```python
92
- from typing import AsyncGenerator
93
-
94
- from llm_bridge import *
95
-
96
-
97
- async def workflow(
98
- api_keys: dict[str, str],
99
- messages: list[Message],
100
- model: str,
101
- api_type: str,
102
- temperature: float,
103
- stream: bool
104
- ) -> ChatResponse | AsyncGenerator[ChatResponse, None]:
105
- await preprocess_messages(messages, api_type)
106
-
107
- chat_client = await create_chat_client(
108
- messages=messages,
109
- model=model,
110
- api_type=api_type,
111
- temperature=temperature,
112
- stream=stream,
113
- api_keys=api_keys,
114
- )
115
-
116
- if stream:
117
- return chat_client.generate_stream_response()
118
- else:
119
- return await chat_client.generate_non_stream_response()
120
- ```
121
-
122
- ### Main
123
-
124
- ```python
125
- import asyncio
126
- import logging
127
- import os
128
- from pprint import pprint
129
-
130
- from dotenv import load_dotenv
131
-
132
- from llm_bridge import *
133
- from usage.workflow import workflow
134
-
135
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
136
-
137
- load_dotenv(".env")
138
-
139
- api_keys = {
140
- "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY"),
141
- "AZURE_API_KEY": os.environ.get("AZURE_API_KEY"),
142
- "AZURE_API_BASE": os.environ.get("AZURE_API_BASE"),
143
- "GITHUB_API_KEY": os.environ.get("GITHUB_API_KEY"),
144
- "GEMINI_FREE_API_KEY": os.environ.get("GEMINI_FREE_API_KEY"),
145
- "GEMINI_PAID_API_KEY": os.environ.get("GEMINI_PAID_API_KEY"),
146
- "ANTHROPIC_API_KEY": os.environ.get("ANTHROPIC_API_KEY"),
147
- "XAI_API_KEY": os.environ.get("XAI_API_KEY"),
148
- }
149
-
150
- messages = [
151
- Message(
152
- role=Role.System,
153
- contents=[
154
- Content(type=ContentType.Text, data="You are a helpful assistant.")
155
- ]
156
- ),
157
- Message(
158
- role=Role.User,
159
- contents=[
160
- Content(type=ContentType.Text, data="Hello")
161
- ]
162
- ),
163
- Message(
164
- role=Role.Assistant,
165
- contents=[
166
- Content(type=ContentType.Text, data="Hello! How can I assist you today?")
167
- ]
168
- ),
169
- Message(
170
- role=Role.User,
171
- contents=[
172
- Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
173
- # Content(type=ContentType.Text, data="What's the weather in NYC today?"),
174
- # Content(type=ContentType.Text, data="Please generate an image of a cat."),
175
- ]
176
- ),
177
- # Message(
178
- # role=Role.User,
179
- # contents=[
180
- # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746208707489-image.png"),
181
- # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf"),
182
- # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212253473-file_example_MP3_700KB.mp3"),
183
- # # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212980820-file_example_MP4_480_1_5MG.mp4"),
184
- # Content(type=ContentType.Text, data="What's this?"),
185
- # ]
186
- # ),
187
- ]
188
- # See /llm_bridge/resources/model_prices.json for available models
189
- # model = "gpt-4.1"
190
- # model = "gemini-2.5-flash-preview-native-audio-dialog"
191
- # model = "gemini-2.5-pro-exp-03-25"
192
- model = "gemini-2.5-pro-preview-05-06"
193
- # model = "claude-sonnet-4-0"
194
- # api_type = "OpenAI"
195
- # api_type = "Gemini-Free"
196
- api_type = "Gemini-Paid"
197
- # api_type = "Claude"
198
- temperature = 0
199
- stream = True
200
-
201
-
202
- async def main():
203
- model_prices = get_model_prices()
204
- pprint(model_prices)
205
-
206
- input_tokens = 0
207
- output_tokens = 0
208
- response = await workflow(api_keys, messages, model, api_type, temperature, stream)
209
- text = ""
210
- if stream:
211
- async for chunk in response:
212
- pprint(chunk)
213
- if chunk.text:
214
- text += chunk.text
215
- if chunk.input_tokens:
216
- input_tokens = chunk.input_tokens
217
- if chunk.output_tokens:
218
- output_tokens += chunk.output_tokens
219
- else:
220
- pprint(response)
221
- text = response.text
222
- input_tokens = response.input_tokens
223
- output_tokens = response.output_tokens
224
- total_cost = calculate_chat_cost(api_type, model, input_tokens, output_tokens)
225
- print(text)
226
- print(f'Input tokens: {input_tokens}, Output tokens: {output_tokens}, Total cost: ${total_cost}')
227
-
228
-
229
- if __name__ == "__main__":
230
- asyncio.run(main())
231
- ```