lollms-client 1.3.1__tar.gz → 1.3.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (77) hide show
  1. {lollms_client-1.3.1/src/lollms_client.egg-info → lollms_client-1.3.2}/PKG-INFO +1 -1
  2. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/__init__.py +1 -1
  3. lollms_client-1.3.2/src/lollms_client/lollms_agentic.py +361 -0
  4. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_core.py +305 -308
  5. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/tti_bindings/diffusers/__init__.py +19 -17
  6. {lollms_client-1.3.1 → lollms_client-1.3.2/src/lollms_client.egg-info}/PKG-INFO +1 -1
  7. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client.egg-info/SOURCES.txt +1 -0
  8. {lollms_client-1.3.1 → lollms_client-1.3.2}/LICENSE +0 -0
  9. {lollms_client-1.3.1 → lollms_client-1.3.2}/README.md +0 -0
  10. {lollms_client-1.3.1 → lollms_client-1.3.2}/pyproject.toml +0 -0
  11. {lollms_client-1.3.1 → lollms_client-1.3.2}/setup.cfg +0 -0
  12. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/assets/models_ctx_sizes.json +0 -0
  13. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/__init__.py +0 -0
  14. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/azure_openai/__init__.py +0 -0
  15. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/claude/__init__.py +0 -0
  16. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/gemini/__init__.py +0 -0
  17. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/grok/__init__.py +0 -0
  18. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/groq/__init__.py +0 -0
  19. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +0 -0
  20. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/litellm/__init__.py +0 -0
  21. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  22. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  23. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/lollms_webui/__init__.py +0 -0
  24. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/mistral/__init__.py +0 -0
  25. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  26. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/open_router/__init__.py +0 -0
  27. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/openai/__init__.py +0 -0
  28. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  29. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  30. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  31. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  32. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  33. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_config.py +0 -0
  34. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_discussion.py +0 -0
  35. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_js_analyzer.py +0 -0
  36. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_llm_binding.py +0 -0
  37. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_mcp_binding.py +0 -0
  38. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_mcp_security.py +0 -0
  39. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_personality.py +0 -0
  40. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_python_analyzer.py +0 -0
  41. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_stt_binding.py +0 -0
  42. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_tti_binding.py +0 -0
  43. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_ttm_binding.py +0 -0
  44. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_tts_binding.py +0 -0
  45. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_ttv_binding.py +0 -0
  46. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_types.py +0 -0
  47. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/lollms_utilities.py +0 -0
  48. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  49. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  50. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  51. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  52. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  53. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
  54. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
  55. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/stt_bindings/__init__.py +0 -0
  56. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  57. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  58. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  59. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/tti_bindings/__init__.py +0 -0
  60. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  61. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  62. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/tti_bindings/openai/__init__.py +0 -0
  63. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/ttm_bindings/__init__.py +0 -0
  64. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  65. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/ttm_bindings/bark/__init__.py +0 -0
  66. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  67. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/tts_bindings/__init__.py +0 -0
  68. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/tts_bindings/bark/__init__.py +0 -0
  69. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  70. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  71. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  72. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/ttv_bindings/__init__.py +0 -0
  73. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  74. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client.egg-info/dependency_links.txt +0 -0
  75. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client.egg-info/requires.txt +0 -0
  76. {lollms_client-1.3.1 → lollms_client-1.3.2}/src/lollms_client.egg-info/top_level.txt +0 -0
  77. {lollms_client-1.3.1 → lollms_client-1.3.2}/test/test_lollms_discussion.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.3.1
3
+ Version: 1.3.2
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "1.3.1" # Updated version
11
+ __version__ = "1.3.2" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -0,0 +1,361 @@
1
+ import json
2
+ import re
3
+ import uuid
4
+ import base64
5
+ import time
6
+ import asyncio
7
+ from typing import Dict, List, Any, Optional, Union, Callable, Tuple
8
+ from dataclasses import dataclass, field
9
+ from enum import Enum
10
+ import threading
11
+ from concurrent.futures import ThreadPoolExecutor, as_completed
12
+ import hashlib
13
+
14
+ class TaskStatus(Enum):
15
+ PENDING = "pending"
16
+ RUNNING = "running"
17
+ COMPLETED = "completed"
18
+ FAILED = "failed"
19
+ SKIPPED = "skipped"
20
+
21
+ class ConfidenceLevel(Enum):
22
+ LOW = "low"
23
+ MEDIUM = "medium"
24
+ HIGH = "high"
25
+ VERY_HIGH = "very_high"
26
+
27
+ @dataclass
28
+ class SubTask:
29
+ id: str
30
+ description: str
31
+ dependencies: List[str] = field(default_factory=list)
32
+ status: TaskStatus = TaskStatus.PENDING
33
+ result: Optional[Dict] = None
34
+ confidence: float = 0.0
35
+ tools_required: List[str] = field(default_factory=list)
36
+ estimated_complexity: int = 1 # 1-5 scale
37
+
38
+ @dataclass
39
+ class ExecutionPlan:
40
+ tasks: List[SubTask]
41
+ total_estimated_steps: int
42
+ execution_order: List[str]
43
+ fallback_strategies: Dict[str, List[str]] = field(default_factory=dict)
44
+
45
+ @dataclass
46
+ class MemoryEntry:
47
+ timestamp: float
48
+ context: str
49
+ action: str
50
+ result: Dict
51
+ confidence: float
52
+ success: bool
53
+ user_feedback: Optional[str] = None
54
+
55
+ @dataclass
56
+ class ToolPerformance:
57
+ success_rate: float = 0.0
58
+ avg_confidence: float = 0.0
59
+ total_calls: int = 0
60
+ avg_response_time: float = 0.0
61
+ last_used: float = 0.0
62
+ failure_patterns: List[str] = field(default_factory=list)
63
+
64
+ class TaskPlanner:
65
+ def __init__(self, llm_client):
66
+ self.llm_client = llm_client
67
+
68
+ def decompose_task(self, user_request: str, context: str = "") -> ExecutionPlan:
69
+ """Break down complex requests into manageable subtasks"""
70
+ decomposition_prompt = f"""
71
+ Analyze this user request and break it down into specific, actionable subtasks:
72
+
73
+ USER REQUEST: "{user_request}"
74
+ CONTEXT: {context}
75
+
76
+ Create a JSON plan with subtasks that are:
77
+ 1. Specific and actionable
78
+ 2. Have clear success criteria
79
+ 3. Include estimated complexity (1-5 scale)
80
+ 4. List required tool types
81
+
82
+ Output format:
83
+ {{
84
+ "tasks": [
85
+ {{
86
+ "id": "task_1",
87
+ "description": "specific action to take",
88
+ "dependencies": ["task_id"],
89
+ "estimated_complexity": 2,
90
+ "tools_required": ["tool_type"]
91
+ }}
92
+ ],
93
+ "execution_strategy": "sequential|parallel|hybrid"
94
+ }}
95
+ """
96
+
97
+ try:
98
+ plan_data = self.llm_client.generate_structured_content(
99
+ prompt=decomposition_prompt,
100
+ schema={"tasks": "array", "execution_strategy": "string"},
101
+ temperature=0.3
102
+ )
103
+
104
+ tasks = []
105
+ for task_data in plan_data.get("tasks", []):
106
+ task = SubTask(
107
+ id=task_data.get("id", str(uuid.uuid4())),
108
+ description=task_data.get("description", ""),
109
+ dependencies=task_data.get("dependencies", []),
110
+ estimated_complexity=task_data.get("estimated_complexity", 1),
111
+ tools_required=task_data.get("tools_required", [])
112
+ )
113
+ tasks.append(task)
114
+
115
+ execution_order = self._calculate_execution_order(tasks)
116
+ total_steps = sum(task.estimated_complexity for task in tasks)
117
+
118
+ return ExecutionPlan(
119
+ tasks=tasks,
120
+ total_estimated_steps=total_steps,
121
+ execution_order=execution_order
122
+ )
123
+
124
+ except Exception as e:
125
+ # Fallback: create single task
126
+ single_task = SubTask(
127
+ id="fallback_task",
128
+ description=user_request,
129
+ estimated_complexity=3
130
+ )
131
+ return ExecutionPlan(
132
+ tasks=[single_task],
133
+ total_estimated_steps=3,
134
+ execution_order=["fallback_task"]
135
+ )
136
+
137
+ def _calculate_execution_order(self, tasks: List[SubTask]) -> List[str]:
138
+ """Calculate optimal execution order based on dependencies"""
139
+ task_map = {task.id: task for task in tasks}
140
+ executed = set()
141
+ order = []
142
+
143
+ def can_execute(task_id: str) -> bool:
144
+ task = task_map[task_id]
145
+ return all(dep in executed for dep in task.dependencies)
146
+
147
+ while len(order) < len(tasks):
148
+ ready_tasks = [tid for tid in task_map.keys()
149
+ if tid not in executed and can_execute(tid)]
150
+
151
+ if not ready_tasks:
152
+ # Handle circular dependencies - execute remaining tasks
153
+ remaining = [tid for tid in task_map.keys() if tid not in executed]
154
+ ready_tasks = remaining[:1] if remaining else []
155
+
156
+ # Sort by complexity (simpler tasks first)
157
+ ready_tasks.sort(key=lambda tid: task_map[tid].estimated_complexity)
158
+
159
+ for task_id in ready_tasks:
160
+ order.append(task_id)
161
+ executed.add(task_id)
162
+
163
+ return order
164
+
165
+ class MemoryManager:
166
+ def __init__(self, max_entries: int = 1000):
167
+ self.memory: List[MemoryEntry] = []
168
+ self.max_entries = max_entries
169
+ self.cache: Dict[str, Any] = {}
170
+ self.cache_ttl: Dict[str, float] = {}
171
+
172
+ def add_memory(self, context: str, action: str, result: Dict,
173
+ confidence: float, success: bool, user_feedback: str = None):
174
+ """Add a new memory entry"""
175
+ entry = MemoryEntry(
176
+ timestamp=time.time(),
177
+ context=context,
178
+ action=action,
179
+ result=result,
180
+ confidence=confidence,
181
+ success=success,
182
+ user_feedback=user_feedback
183
+ )
184
+
185
+ self.memory.append(entry)
186
+
187
+ # Prune old memories
188
+ if len(self.memory) > self.max_entries:
189
+ self.memory = self.memory[-self.max_entries:]
190
+
191
+ def get_relevant_patterns(self, current_context: str, limit: int = 5) -> List[MemoryEntry]:
192
+ """Retrieve relevant past experiences"""
193
+ # Simple similarity scoring based on context overlap
194
+ scored_memories = []
195
+ current_words = set(current_context.lower().split())
196
+
197
+ for memory in self.memory:
198
+ memory_words = set(memory.context.lower().split())
199
+ overlap = len(current_words & memory_words)
200
+ if overlap > 0:
201
+ score = overlap / max(len(current_words), len(memory_words))
202
+ scored_memories.append((score, memory))
203
+
204
+ scored_memories.sort(key=lambda x: x[0], reverse=True)
205
+ return [memory for _, memory in scored_memories[:limit]]
206
+
207
+ def compress_scratchpad(self, scratchpad: str, current_goal: str,
208
+ max_length: int = 8000) -> str:
209
+ """Intelligently compress scratchpad while preserving key insights"""
210
+ if len(scratchpad) <= max_length:
211
+ return scratchpad
212
+
213
+ # Extract key sections
214
+ sections = re.split(r'\n### ', scratchpad)
215
+
216
+ # Prioritize recent steps and successful outcomes
217
+ important_sections = []
218
+ for section in sections[-10:]: # Keep last 10 sections
219
+ if any(keyword in section.lower() for keyword in
220
+ ['success', 'completed', 'found', 'generated', current_goal.lower()]):
221
+ important_sections.append(section)
222
+
223
+ # If still too long, summarize older sections
224
+ if len('\n### '.join(important_sections)) > max_length:
225
+ summary = f"### Previous Steps Summary\n- Completed {len(sections)-len(important_sections)} earlier steps\n- Working toward: {current_goal}\n"
226
+ return summary + '\n### '.join(important_sections[-5:])
227
+
228
+ return '\n### '.join(important_sections)
229
+
230
+ def cache_result(self, key: str, value: Any, ttl: int = 300):
231
+ """Cache expensive operation results"""
232
+ self.cache[key] = value
233
+ self.cache_ttl[key] = time.time() + ttl
234
+
235
+ def get_cached_result(self, key: str) -> Optional[Any]:
236
+ """Retrieve cached result if still valid"""
237
+ if key in self.cache:
238
+ if time.time() < self.cache_ttl.get(key, 0):
239
+ return self.cache[key]
240
+ else:
241
+ # Expired - remove
242
+ del self.cache[key]
243
+ if key in self.cache_ttl:
244
+ del self.cache_ttl[key]
245
+ return None
246
+
247
+ class ToolPerformanceTracker:
248
+ def __init__(self):
249
+ self.tool_stats: Dict[str, ToolPerformance] = {}
250
+ self.lock = threading.Lock()
251
+
252
+ def record_tool_usage(self, tool_name: str, success: bool,
253
+ confidence: float, response_time: float,
254
+ error_msg: str = None):
255
+ """Record tool usage statistics"""
256
+ with self.lock:
257
+ if tool_name not in self.tool_stats:
258
+ self.tool_stats[tool_name] = ToolPerformance()
259
+
260
+ stats = self.tool_stats[tool_name]
261
+ stats.total_calls += 1
262
+ stats.last_used = time.time()
263
+
264
+ # Update success rate
265
+ old_successes = stats.success_rate * (stats.total_calls - 1)
266
+ new_successes = old_successes + (1 if success else 0)
267
+ stats.success_rate = new_successes / stats.total_calls
268
+
269
+ # Update average confidence
270
+ old_conf_total = stats.avg_confidence * (stats.total_calls - 1)
271
+ stats.avg_confidence = (old_conf_total + confidence) / stats.total_calls
272
+
273
+ # Update response time
274
+ old_time_total = stats.avg_response_time * (stats.total_calls - 1)
275
+ stats.avg_response_time = (old_time_total + response_time) / stats.total_calls
276
+
277
+ # Record failure patterns
278
+ if not success and error_msg:
279
+ stats.failure_patterns.append(error_msg[:100])
280
+ # Keep only last 10 failure patterns
281
+ stats.failure_patterns = stats.failure_patterns[-10:]
282
+
283
+ def get_tool_reliability_score(self, tool_name: str) -> float:
284
+ """Calculate overall tool reliability score (0-1)"""
285
+ if tool_name not in self.tool_stats:
286
+ return 0.5 # Neutral for unknown tools
287
+
288
+ stats = self.tool_stats[tool_name]
289
+
290
+ # Weighted combination of success rate and confidence
291
+ reliability = (stats.success_rate * 0.7) + (stats.avg_confidence * 0.3)
292
+
293
+ # Penalty for tools not used recently (older than 1 hour)
294
+ if time.time() - stats.last_used > 3600:
295
+ reliability *= 0.8
296
+
297
+ return reliability
298
+
299
+ def rank_tools_for_task(self, available_tools: List[str],
300
+ task_description: str) -> List[Tuple[str, float]]:
301
+ """Rank tools by suitability for a specific task"""
302
+ tool_scores = []
303
+
304
+ for tool_name in available_tools:
305
+ base_score = self.get_tool_reliability_score(tool_name)
306
+
307
+ # Simple keyword matching bonus
308
+ task_lower = task_description.lower()
309
+ if any(keyword in tool_name.lower() for keyword in
310
+ ['search', 'research'] if 'find' in task_lower or 'search' in task_lower):
311
+ base_score *= 1.2
312
+ elif 'generate' in tool_name.lower() and 'create' in task_lower:
313
+ base_score *= 1.2
314
+
315
+ tool_scores.append((tool_name, min(base_score, 1.0)))
316
+
317
+ tool_scores.sort(key=lambda x: x[1], reverse=True)
318
+ return tool_scores
319
+
320
+ class UncertaintyManager:
321
+ @staticmethod
322
+ def calculate_confidence(reasoning_step: str, tool_results: List[Dict],
323
+ memory_patterns: List[MemoryEntry]) -> Tuple[float, ConfidenceLevel]:
324
+ """Calculate confidence in current reasoning step"""
325
+ base_confidence = 0.5
326
+
327
+ # Boost confidence if similar patterns succeeded before
328
+ if memory_patterns:
329
+ successful_patterns = [m for m in memory_patterns if m.success]
330
+ if successful_patterns:
331
+ avg_success_confidence = sum(m.confidence for m in successful_patterns) / len(successful_patterns)
332
+ base_confidence = (base_confidence + avg_success_confidence) / 2
333
+
334
+ # Adjust based on tool result consistency
335
+ if tool_results:
336
+ success_results = [r for r in tool_results if r.get('status') == 'success']
337
+ if success_results:
338
+ base_confidence += 0.2
339
+
340
+ # Check for consistent information across tools
341
+ if len(tool_results) > 1:
342
+ base_confidence += 0.1
343
+
344
+ # Reasoning quality indicators
345
+ if len(reasoning_step) > 50 and any(word in reasoning_step.lower()
346
+ for word in ['because', 'therefore', 'analysis', 'evidence']):
347
+ base_confidence += 0.1
348
+
349
+ confidence = max(0.0, min(1.0, base_confidence))
350
+
351
+ # Map to confidence levels
352
+ if confidence >= 0.8:
353
+ level = ConfidenceLevel.VERY_HIGH
354
+ elif confidence >= 0.6:
355
+ level = ConfidenceLevel.HIGH
356
+ elif confidence >= 0.4:
357
+ level = ConfidenceLevel.MEDIUM
358
+ else:
359
+ level = ConfidenceLevel.LOW
360
+
361
+ return confidence, level