multi-llm-consensus 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,4 @@
1
+ from .run_llm import RunLLM
2
+ from .consensus import Consensus
3
+
4
+ __all__ = ["RunLLM", "Consensus"]
@@ -0,0 +1,165 @@
1
+ from langchain.agents import create_agent
2
+ from langchain.agents.middleware import TodoListMiddleware, SummarizationMiddleware, ToolCallLimitMiddleware
3
+ from deepagents.middleware.filesystem import FilesystemMiddleware
4
+ from deepagents.backends import StateBackend
5
+ from langchain.chat_models import init_chat_model
6
+ from langchain.tools import tool
7
+ from .run_llm import RunLLM
8
+ from pathlib import Path
9
+ from typing import Type
10
+
11
+
12
+ class Consensus:
13
+ """
14
+ Consensus class that uses a configurable judge model to orchestrate
15
+ multiple RunLLM invocations until consensus is reached among LLMs.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ models: list[str],
21
+ judge_model: str = "anthropic:claude-opus-4-5-20251101",
22
+ summarization_model: str = "claude-4-5-sonnet-20250929",
23
+ summarization_trigger_tokens: int = 200_000,
24
+ summarization_keep_messages: int = 5,
25
+ run_limit: int = 20,
26
+ response_schema: Type | None = None
27
+ ):
28
+ """
29
+ Initialize the Consensus class.
30
+
31
+ Args:
32
+ models: List of model strings in format "provider:model-name".
33
+ judge_model: Model string for the judge coordinator in format "provider:model-name".
34
+ Defaults to "anthropic:claude-opus-4-5-20251101".
35
+ summarization_model: Model string for summarization middleware in format "provider:model-name".
36
+ Defaults to "anthropic:claude-3-5-sonnet-20241022".
37
+ summarization_trigger_tokens: Token count to trigger summarization middleware.
38
+ summarization_keep_messages: Number of messages to keep after summarization.
39
+ run_limit: Maximum number of calls to run_llms tool per invocation.
40
+ response_schema: Optional schema for structured output (TypedDict or Pydantic model).
41
+ If None, returns full agent result without structured output.
42
+
43
+ Raises:
44
+ ValueError: If models list is empty or contains only one model.
45
+ """
46
+ if not models:
47
+ raise ValueError("models list cannot be empty")
48
+ if len(models) < 2:
49
+ raise ValueError("models list must contain at least 2 models for consensus")
50
+
51
+ # Store as instance variables for use in tool creation
52
+ self.models = models
53
+ self.system_message = "You are a helpful AI assistant."
54
+
55
+ # Load judge prompt
56
+ judge_prompt_path = Path(__file__).parent / "prompts" / "judge.prompt"
57
+ judge_prompt = judge_prompt_path.read_text()
58
+
59
+ # Create the run_llms tool
60
+ run_llms = self._create_run_llms_tool()
61
+
62
+ # Create judge LLM using init_chat_model
63
+ llm = init_chat_model(judge_model)
64
+
65
+ # Create middleware
66
+ middleware = [
67
+ TodoListMiddleware(),
68
+ FilesystemMiddleware(backend=lambda rt: StateBackend(rt)),
69
+ SummarizationMiddleware(
70
+ model=summarization_model,
71
+ trigger=("tokens", summarization_trigger_tokens),
72
+ keep=("messages", summarization_keep_messages)
73
+ ),
74
+ ToolCallLimitMiddleware(
75
+ tool_name="run_llms",
76
+ run_limit=run_limit,
77
+ exit_behavior="error"
78
+ )
79
+ ]
80
+
81
+ # Create agent with optional structured output
82
+ if response_schema is not None:
83
+ self._agent = create_agent(
84
+ model=llm,
85
+ tools=[run_llms],
86
+ system_prompt=judge_prompt,
87
+ middleware=middleware,
88
+ response_format=response_schema
89
+ )
90
+ else:
91
+ self._agent = create_agent(
92
+ model=llm,
93
+ tools=[run_llms],
94
+ system_prompt=judge_prompt,
95
+ middleware=middleware
96
+ )
97
+
98
+ self._response_schema = response_schema
99
+
100
+ def _create_run_llms_tool(self):
101
+ """
102
+ Creates the run_llms tool with access to instance variables.
103
+
104
+ Returns:
105
+ A LangChain tool that runs multiple LLMs in parallel.
106
+ """
107
+ @tool
108
+ def run_llms(query: str) -> str:
109
+ """
110
+ Runs multiple LLMs in parallel on the same query.
111
+
112
+ Args:
113
+ query: The prompt/question to send to all LLMs. Include full context and
114
+ any specific instructions (e.g., "use search_the_web for web search",
115
+ "use add/multiply/subtract/divide tools for calculations").
116
+
117
+ Returns:
118
+ Aggregated responses from all LLMs. Each response is prefixed with the
119
+ exact model identifier (e.g., "openai:gpt-5-mini:", "google_genai:gemini-3-flash-preview:").
120
+ Always refer to models by these exact identifiers in your analysis.
121
+ """
122
+ run_llm = RunLLM(models=self.models, system_message=self.system_message)
123
+ return run_llm.invoke(query)
124
+
125
+ return run_llms
126
+
127
+ def invoke(self, prompt: str):
128
+ """
129
+ Invoke the consensus process with a user query.
130
+
131
+ Args:
132
+ prompt: The user's initial query
133
+
134
+ Returns:
135
+ If response_schema was provided: structured response dict (or default values on error)
136
+ Otherwise: full agent result (or None on error)
137
+ """
138
+ try:
139
+ result = self._agent.invoke({
140
+ "messages": [{"role": "user", "content": prompt}]
141
+ })
142
+ if self._response_schema is not None:
143
+ return result["structured_response"]
144
+ else:
145
+ return result
146
+ except Exception as e:
147
+ # Tool call limit reached or other error
148
+ print(f"Error during consensus: {str(e)}")
149
+ if self._response_schema is not None:
150
+ # Return default values - create a dict with all schema keys set to defaults
151
+ # This tries to match common schema patterns
152
+ default_dict = {}
153
+ if hasattr(self._response_schema, '__annotations__'):
154
+ for key, type_hint in self._response_schema.__annotations__.items():
155
+ if key in ['consensus', 'consensus_reached']:
156
+ default_dict[key] = False
157
+ elif type_hint == bool:
158
+ default_dict[key] = False
159
+ elif type_hint == str:
160
+ default_dict[key] = f"Error occurred: {str(e)}"
161
+ else:
162
+ default_dict[key] = None
163
+ return default_dict if default_dict else None
164
+ else:
165
+ return None
@@ -0,0 +1,38 @@
1
+ You are a consensus coordinator for multiple AI language models. Your role is to facilitate agreement among different LLMs by iteratively querying them and refining your questions based on their responses.
2
+
3
+ ## Your Tools (Judge)
4
+
5
+ **run_llms**: Runs multiple LLMs in parallel. Takes a `query` parameter where you craft the prompt to send to all LLMs.
6
+
7
+ **TODO list**: Track agreements reached, disagreements to resolve, and clarifications needed across iterations.
8
+
9
+ **Filesystem** (`write_file`, `read_file`, `ls`): After the first run_llms call, save the exact LLM model identifiers to a file and add a TODO reminder to read this file before providing the final answer to ensure model names are accurate.
10
+
11
+ ## Your Process
12
+
13
+ 1. **Initial Query**: Call `run_llms` with the user's question
14
+ 2. **Analyze Responses**: Identify agreements, disagreements, and areas needing clarification
15
+ 3. **Iterative Refinement**: Call `run_llms` again with updated queries that summarize the objective/context and clarify specific points where models diverged
16
+ 4. **Continue Until Consensus**: Iterate until LLMs substantially agree or you can draw a reasonable conclusion
17
+
18
+ ## Context Management - CRITICAL
19
+
20
+ **LLMs ARE STATELESS.** They have NO memory of previous iterations. In every query after the first, you MUST include:
21
+
22
+ 1. **Original Question**: Restate the user's question or provide a summary
23
+ 2. **Previous Iterations**: Summarize what each model said in prior rounds. **IMPORTANT: Always refer to models by their exact identifiers as shown in the run_llms output** (e.g., "openai:gpt-5-mini said X", "google_genai:gemini-3-flash-preview said Y"). Never use shortened or friendly names.
24
+ 3. **Current Status**: Explain where you are now, what's agreed upon, and what needs resolution
25
+ 4. **Complete Context**: Treat each query as if the LLMs are starting fresh—provide everything they need to answer meaningfully
26
+
27
+ ## LLM Tools (Instruct LLMs to Use)
28
+
29
+ When crafting queries for `run_llms`, instruct the LLMs to use their available tools:
30
+
31
+ - **search_the_web**: For current events, recent information, factual verification, or when the user requests web search. If used in iteration 1, in subsequent iterations tell LLMs to use only if needed
32
+ - **Calculation tools** (`add`, `subtract`, `multiply`, `divide`): If the user query implies calculations, tell LLMs to use these tools
33
+
34
+ ## Critical Guidelines
35
+
36
+ - **Use exact model identifiers**: Always refer to models by their full identifiers as they appear in run_llms
37
+ - Base consensus ONLY on what the LLMs tell you, not your own knowledge
38
+ - Be completely unbiased—evaluate all responses objectively and equally
llm_ensemble/py.typed ADDED
File without changes
@@ -0,0 +1,151 @@
1
+ from dotenv import load_dotenv
2
+ from langgraph.graph import StateGraph, START, END
3
+ from langgraph.prebuilt import create_react_agent
4
+ from langchain_core.messages import SystemMessage, HumanMessage
5
+ from .utils import add, subtract, multiply, divide, search_the_web
6
+ from .schemas import InputState, OutputState, RunLLMState
7
+
8
+ # Load environment variables at module level
9
+ load_dotenv()
10
+
11
+
12
+ class RunLLM:
13
+ """
14
+ RunLLM class that runs multiple LLM agents in parallel on the same query,
15
+ then aggregates their responses.
16
+ """
17
+
18
+ def __init__(self, models: list[str], system_message: str):
19
+ """
20
+ Initialize the RunLLM class.
21
+
22
+ Args:
23
+ models: List of model strings in format "provider:model-name"
24
+ system_message: System message to include in every agent invocation
25
+ """
26
+ self._models = models
27
+ self._system_message = system_message
28
+
29
+ # Create tool list
30
+ tools = [
31
+ search_the_web,
32
+ add,
33
+ subtract,
34
+ multiply,
35
+ divide
36
+ ]
37
+
38
+ # Initialize react agents for each model
39
+ self._agents = {}
40
+ for model_string in models:
41
+ # Pass model string directly to create_react_agent
42
+ # LangChain's init_chat_model handles provider parsing automatically
43
+ agent = create_react_agent(
44
+ model=model_string,
45
+ tools=tools
46
+ )
47
+ self._agents[model_string] = agent
48
+
49
+ # Build and compile the StateGraph
50
+ self._graph = self._build_graph()
51
+
52
+ def _build_graph(self):
53
+ """
54
+ Build and compile the StateGraph.
55
+
56
+ Returns:
57
+ Compiled graph ready for invocation
58
+ """
59
+ # Initialize StateGraph with input/output schemas
60
+ graph = StateGraph(
61
+ state_schema=RunLLMState,
62
+ input=InputState,
63
+ output=OutputState
64
+ )
65
+
66
+ # Factory function to create model nodes with proper closure
67
+ def make_model_node(model_name: str, agent, system_message: str):
68
+ """Factory function to create a node function with proper closure."""
69
+ def node_function(state: RunLLMState) -> dict:
70
+ prompt = state["prompt"]
71
+ # Invoke react agent with system message and user prompt
72
+ result = agent.invoke({
73
+ "messages": [
74
+ SystemMessage(content=system_message),
75
+ HumanMessage(content=prompt)
76
+ ]
77
+ })
78
+ # Extract final AI message content
79
+ content = result["messages"][-1].content
80
+
81
+ # Handle different content formats from different providers
82
+ # Gemini returns: [{'type': 'text', 'text': '...', 'extras': {...}}]
83
+ # OpenAI/Anthropic return: plain string
84
+ if isinstance(content, list) and len(content) > 0:
85
+ # Extract text from Gemini's structured format
86
+ output_text = content[0].get('text', str(content))
87
+ else:
88
+ # Use content directly for OpenAI/Anthropic
89
+ output_text = content
90
+
91
+ # Return update to model_outputs dict
92
+ return {"model_outputs": {model_name: output_text}}
93
+ return node_function
94
+
95
+ # Add a node for each model
96
+ # Use sanitized node names (replace : with _) since LangGraph doesn't allow colons
97
+ for model_name in self._models:
98
+ node_fn = make_model_node(
99
+ model_name,
100
+ self._agents[model_name],
101
+ self._system_message
102
+ )
103
+ # Sanitize node name by replacing : with _
104
+ sanitized_node_name = model_name.replace(":", "_")
105
+ graph.add_node(sanitized_node_name, node_fn)
106
+
107
+ # Add process node
108
+ def process_node(state: RunLLMState) -> dict:
109
+ """Aggregate all model outputs in order."""
110
+ outputs = []
111
+ for model_name in self._models:
112
+ output = state["model_outputs"].get(model_name, "")
113
+ outputs.append(f"{model_name}:\n{output}")
114
+
115
+ aggregated = "\n\n".join(outputs)
116
+ return {"result": aggregated}
117
+
118
+ graph.add_node("process", process_node)
119
+
120
+ # Add edges
121
+ # From START to each model node
122
+ for model_name in self._models:
123
+ sanitized_node_name = model_name.replace(":", "_")
124
+ graph.add_edge(START, sanitized_node_name)
125
+
126
+ # From each model node to process
127
+ for model_name in self._models:
128
+ sanitized_node_name = model_name.replace(":", "_")
129
+ graph.add_edge(sanitized_node_name, "process")
130
+
131
+ # From process to END
132
+ graph.add_edge("process", END)
133
+
134
+ # Compile and return
135
+ return graph.compile()
136
+
137
+ def invoke(self, prompt: str) -> str:
138
+ """
139
+ Invoke the graph with a prompt.
140
+
141
+ Args:
142
+ prompt: The user's query
143
+
144
+ Returns:
145
+ Aggregated responses from all models
146
+ """
147
+ # Thanks to input/output schema configuration,
148
+ # we can pass just the prompt string
149
+ result = self._graph.invoke({"prompt": prompt})
150
+ # Result dict contains only the output state fields
151
+ return result["result"]
@@ -0,0 +1,3 @@
1
+ from .schemas import InputState, OutputState, RunLLMState
2
+
3
+ __all__ = ["InputState", "OutputState", "RunLLMState"]
@@ -0,0 +1,20 @@
1
+ from typing import TypedDict, Annotated
2
+ from operator import or_
3
+
4
+
5
+ class InputState(TypedDict):
6
+ """Input schema: what users provide when invoking."""
7
+ prompt: str
8
+
9
+
10
+ class OutputState(TypedDict):
11
+ """Output schema: what the graph returns."""
12
+ result: str
13
+
14
+
15
+ class RunLLMState(TypedDict):
16
+ """Overall state: internal state during execution."""
17
+ prompt: str
18
+ # Use or_ reducer to merge dict updates from parallel nodes
19
+ model_outputs: Annotated[dict[str, str], or_]
20
+ result: str
@@ -0,0 +1,4 @@
1
+ from .utils import add, subtract, multiply, divide
2
+ from .tavily_tool import search_the_web
3
+
4
+ __all__ = ["add", "subtract", "multiply", "divide", "search_the_web"]
@@ -0,0 +1,30 @@
1
+ import os
2
+ from langchain_core.tools import tool
3
+ from tavily import TavilyClient
4
+
5
+
6
+ @tool
7
+ def search_the_web(query: str):
8
+ """
9
+ Search the internet for current events, news, and real-time information.
10
+ Use this for any questions about the world that require up-to-date data.
11
+ """
12
+ # Initialize Tavily Client (lazy initialization to ensure env vars are loaded)
13
+ tavily = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
14
+
15
+ # Execute the search using the raw SDK
16
+ response = tavily.search(
17
+ query=query,
18
+ max_results=10,
19
+ search_depth="basic"
20
+ )
21
+
22
+ # Format the results into a clean string for the LLM
23
+ # This prevents the LLM from getting confused by raw JSON metadata
24
+ formatted_results = []
25
+ for res in response['results']:
26
+ formatted_results.append(
27
+ f"Title: {res['title']}\nURL: {res['url']}\nContent: {res['content']}\n"
28
+ )
29
+
30
+ return "\n---\n".join(formatted_results)
@@ -0,0 +1,27 @@
1
+ from langchain_core.tools import tool
2
+
3
+
4
+ @tool
5
+ def add(a: float, b: float) -> float:
6
+ """Add two numbers together."""
7
+ return a + b
8
+
9
+
10
+ @tool
11
+ def subtract(a: float, b: float) -> float:
12
+ """Subtract b from a."""
13
+ return a - b
14
+
15
+
16
+ @tool
17
+ def multiply(a: float, b: float) -> float:
18
+ """Multiply two numbers together."""
19
+ return a * b
20
+
21
+
22
+ @tool
23
+ def divide(a: float, b: float) -> float:
24
+ """Divide a by b."""
25
+ if b == 0:
26
+ raise ValueError("Cannot divide by zero")
27
+ return a / b
@@ -0,0 +1,171 @@
1
+ Metadata-Version: 2.4
2
+ Name: multi-llm-consensus
3
+ Version: 0.1.3
4
+ Summary: Achieve consensus across multiple LLMs through an AI judge coordinator
5
+ Project-URL: Homepage, https://github.com/zzzrbx/llm-ensemble
6
+ Project-URL: Repository, https://github.com/zzzrbx/llm-ensemble
7
+ Project-URL: Issues, https://github.com/zzzrbx/llm-ensemble/issues
8
+ Author-email: Roberto Pagliari <roberto.pagliari@gmail.com>
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Keywords: agent,ai,consensus,langchain,langgraph,llm,multi-model
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
18
+ Requires-Python: >=3.12
19
+ Requires-Dist: deepagents>=0.1.0
20
+ Requires-Dist: langchain-anthropic>=1.3.1
21
+ Requires-Dist: langchain-community>=0.4.1
22
+ Requires-Dist: langchain-google-genai>=4.1.3
23
+ Requires-Dist: langchain-openai>=1.1.7
24
+ Requires-Dist: langchain-xai>=1.2.1
25
+ Requires-Dist: langchain>=1.2.3
26
+ Requires-Dist: langgraph>=1.0.5
27
+ Requires-Dist: pydantic>=2.12.5
28
+ Requires-Dist: python-dotenv>=1.2.1
29
+ Requires-Dist: rich>=14.2.0
30
+ Requires-Dist: tavily-python>=0.7.17
31
+ Description-Content-Type: text/markdown
32
+
33
+ # LLM Ensemble
34
+
35
+ ![LLM Ensemble Banner](.github/banner.png)
36
+
37
+ A Python library for achieving consensus across multiple Agents.
38
+
39
+ ## Features
40
+
41
+ - **Consensus**: Uses a moderator to iteratively coordinate multiple LLMs until consensus is reached
42
+ - Can use any model API based model such as OpenAI, Anthropic, Gemini, Grok supported by Langgraph
43
+ - Supports web search for real-time data
44
+
45
+ ## Installation
46
+
47
+ ```bash
48
+ uv add multi-llm-consensus
49
+ ```
50
+
51
+ Or install from source:
52
+
53
+ ```bash
54
+ git clone https://github.com/zzzrbx/llm-ensemble.git
55
+ cd llm-ensemble
56
+ uv sync
57
+ ```
58
+
59
+ ## Environment Setup
60
+
61
+ Create a `.env` file with your API keys, for example:
62
+
63
+ ```bash
64
+ OPENAI_API_KEY=your_openai_key
65
+ ANTHROPIC_API_KEY=your_anthropic_key
66
+ GOOGLE_API_KEY=your_google_key
67
+ XAI_API_KEY=your_xai_key
68
+ TAVILY_API_KEY=your_tavily_key # For web search
69
+ ```
70
+
71
+ You must provide at least two API keys for the models you want to use in the ensemble.
72
+
73
+ ## How It Works
74
+
75
+ ```
76
+ User Query → Judge (configurable, default: Claude Opus 4.5)
77
+
78
+ Judge calls run_llms tool
79
+ ├── Model A (parallel)
80
+ ├── Model B (parallel)
81
+ ├── Model C (parallel)
82
+ └── Model D (parallel)
83
+
84
+ Judge analyzes responses
85
+ ├── Consensus? → Return answer
86
+ └── No consensus? → Refine query and call run_llms again
87
+
88
+ Repeat until consensus or limit reached
89
+ ```
90
+
91
+ **Key Features:**
92
+ - **Dynamic queries** - Judge crafts different prompts each iteration:
93
+ - Iteration 1: Sends initial question with research instructions
94
+ - Iteration 2+: Summarizes agreements, highlights disagreements, requests refinements
95
+ - Final iteration: Presents refined consensus statement for confirmation
96
+ - **Error handling** - Returns default values on timeout or tool call limit reached
97
+
98
+ **Tools currently available for LLMs:**
99
+ - `search_the_web` - Tavily web search for current events and factual data
100
+ - `add`, `subtract`, `multiply`, `divide` - Math operations
101
+
102
+
103
+ ## Examples
104
+
105
+ ### Example 1: With structured output
106
+
107
+ ```python
108
+ from typing import TypedDict
109
+ from llm_ensemble import Consensus
110
+
111
+ class UserSchema(TypedDict):
112
+ consensus: bool
113
+ final_answer: str
114
+ notes: str
115
+
116
+ consensus = Consensus(
117
+ models=[
118
+ "openai:gpt-5-mini",
119
+ "google_genai:gemini-3-flash-preview",
120
+ "anthropic:claude-3-5-haiku-20241022",
121
+ "xai:grok-3-mini",
122
+ ],
123
+ response_schema=UserSchema
124
+ )
125
+
126
+ result = consensus.invoke(
127
+ "If survival is arbitrary, is moral judgment arbitrary too?"
128
+ )
129
+
130
+ print(f"Consensus: {result['consensus']}")
131
+ print(f"Answer: {result['final_answer']}")
132
+ print(f"Notes: {result['notes']}")
133
+ ```
134
+
135
+ ### Example 2: No structured output with web search enabled (you just need to mention it in the prompt)
136
+
137
+ ```python
138
+ from llm_ensemble import Consensus
139
+
140
+ # No response_schema - returns full agent result
141
+ consensus = Consensus(
142
+ models=[
143
+ "openai:gpt-5-mini",
144
+ "google_genai:gemini-3-flash-preview",
145
+ "anthropic:claude-3-5-haiku-20241022",
146
+ "xai:grok-3-mini",
147
+ ]
148
+ )
149
+
150
+ result = consensus.invoke(
151
+ "What are the latest developments in quantum computing?\n\n"
152
+ "Use the web search to research current news and breakthroughs."
153
+ )
154
+
155
+ # Access full agent result
156
+ print(result['messages'][-1].content)
157
+ ```
158
+
159
+ ## Debugging and Observability
160
+
161
+ The library integrates with LangSmith for trace observability. Set `LANGSMITH_API_KEY` and `LANGSMITH_PROJECT` in your `.env` file to enable tracing.
162
+
163
+ ## License
164
+
165
+ MIT License
166
+
167
+ ## Contributing
168
+
169
+ Contributions are welcome! Please open an issue or submit a pull request.
170
+
171
+
@@ -0,0 +1,14 @@
1
+ llm_ensemble/__init__.py,sha256=PW0ymnE-IE7oDqLm2XRP7kzPj2CfiWaFFT40HXB_bDs,96
2
+ llm_ensemble/consensus.py,sha256=EEwa4nOeLHiVwD9-VmvKmiL6YcEp5eYRqkEVtyEdapg,6688
3
+ llm_ensemble/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ llm_ensemble/run_llm.py,sha256=fDPIBelDlV8Jcvs-mAHMwMAotp3y60U5zASDlEDba00,5375
5
+ llm_ensemble/prompts/judge.prompt,sha256=xIMhW6pzJC1c3EE-QD4iW9CK7PI-Gv6-NOamxHpQ2DM,2646
6
+ llm_ensemble/schemas/__init__.py,sha256=4m6x_Ao8ZHShyu7U1U237zkDHmXWgqSdHp0-TA1HjoU,114
7
+ llm_ensemble/schemas/schemas.py,sha256=lSYjKAsC_7m3K5AVRgUquYtg2DW3LXBbwAp6OT9ObnU,503
8
+ llm_ensemble/utils/__init__.py,sha256=6NgwRGS0pN9V0kEwU8lLKkkyWgeKurh2F3OIUA6cbkY,162
9
+ llm_ensemble/utils/tavily_tool.py,sha256=Q2S-VwxTJgAqtXUk8_uMo3Fa5MWaZK_MbDri3_OderI,957
10
+ llm_ensemble/utils/utils.py,sha256=57VHE7LnlpHPGcaGsrFDZU978AtEA70VuKVBxVRvHOI,499
11
+ multi_llm_consensus-0.1.3.dist-info/METADATA,sha256=H4feUKu_CohokI17rEbFKVogHHpOQ92FoOSJEzGiUgI,4839
12
+ multi_llm_consensus-0.1.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
13
+ multi_llm_consensus-0.1.3.dist-info/licenses/LICENSE,sha256=89cjtVjCzuOkNauPK_zp0h75f8q3bN-L12lX6MYkjyU,1073
14
+ multi_llm_consensus-0.1.3.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.28.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Roberto Pagliari
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.