mem-llm 1.0.7__py3-none-any.whl → 1.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

mem_llm/__init__.py CHANGED
@@ -17,19 +17,25 @@ except ImportError:
17
17
  # Pro version imports (optional)
18
18
  try:
19
19
  from .memory_db import SQLMemoryManager
20
- from .prompt_templates import prompt_manager
21
20
  from .config_manager import get_config
22
21
  from .config_from_docs import create_config_from_document
23
- __all_pro__ = ["SQLMemoryManager", "prompt_manager", "get_config", "create_config_from_document"]
22
+ from .dynamic_prompt import dynamic_prompt_builder
23
+ __all_pro__ = ["SQLMemoryManager", "get_config", "create_config_from_document", "dynamic_prompt_builder"]
24
24
  except ImportError:
25
25
  __all_pro__ = []
26
26
 
27
- __version__ = "1.0.7"
27
+ __version__ = "1.0.11"
28
28
  __author__ = "C. Emre Karataş"
29
29
 
30
+ # CLI
31
+ try:
32
+ from .cli import cli
33
+ __all_cli__ = ["cli"]
34
+ except ImportError:
35
+ __all_cli__ = []
36
+
30
37
  __all__ = [
31
38
  "MemAgent",
32
- "MemoryManager",
39
+ "MemoryManager",
33
40
  "OllamaClient",
34
- ] + __all_tools__ + __all_pro__
35
-
41
+ ] + __all_tools__ + __all_pro__ + __all_cli__
mem_llm/cli.py ADDED
@@ -0,0 +1,254 @@
1
+ """
2
+ Command Line Interface for Mem-LLM
3
+ Interactive chat, statistics, and data management
4
+ """
5
+
6
+ import click
7
+ import json
8
+ import sys
9
+ from typing import Optional
10
+ from .mem_agent import MemAgent
11
+ from . import __version__
12
+
13
+
14
+ @click.group()
15
+ @click.version_option(version=__version__, prog_name="mem-llm")
16
+ def cli():
17
+ """
18
+ Mem-LLM - Memory-enabled AI Assistant CLI
19
+
20
+ A powerful command-line interface for interacting with your AI assistant.
21
+ """
22
+ pass
23
+
24
+
25
+ @cli.command()
26
+ @click.option('--user', '-u', default='default', help='User ID for the chat session')
27
+ @click.option('--model', '-m', default='granite4:tiny-h', help='LLM model to use')
28
+ @click.option('--sql/--json', default=False, help='Use SQL (default: JSON)')
29
+ @click.option('--config', '-c', type=click.Path(exists=True), help='Config file path')
30
+ def chat(user: str, model: str, sql: bool, config: Optional[str]):
31
+ """
32
+ Start an interactive chat session
33
+
34
+ Examples:
35
+ mem-llm chat --user john
36
+ mem-llm chat --user alice --sql
37
+ mem-llm chat --config config.yaml
38
+ """
39
+ click.echo("🤖 Mem-LLM Interactive Chat")
40
+ click.echo("=" * 60)
41
+
42
+ # Initialize agent
43
+ try:
44
+ if config:
45
+ agent = MemAgent(config_file=config)
46
+ else:
47
+ agent = MemAgent(model=model, use_sql=sql)
48
+
49
+ # Check setup
50
+ status = agent.check_setup()
51
+ if status['status'] != 'ready':
52
+ click.echo("\n❌ Setup Error!", err=True)
53
+ if not status['ollama_running']:
54
+ click.echo(" → Ollama service is not running", err=True)
55
+ click.echo(" → Start it with: ollama serve", err=True)
56
+ elif not status['model_ready']:
57
+ click.echo(f" → Model '{model}' not found", err=True)
58
+ click.echo(f" → Download it with: ollama pull {model}", err=True)
59
+ sys.exit(1)
60
+
61
+ agent.set_user(user)
62
+
63
+ click.echo(f"\n✅ Chat session started")
64
+ click.echo(f" User: {user}")
65
+ click.echo(f" Model: {model}")
66
+ click.echo(f" Memory: {'SQL' if sql else 'JSON'}")
67
+ click.echo("\nType your message and press Enter. Commands:")
68
+ click.echo(" /profile - Show user profile")
69
+ click.echo(" /stats - Show statistics")
70
+ click.echo(" /history - Show recent conversations")
71
+ click.echo(" /exit - Exit chat\n")
72
+
73
+ # Chat loop
74
+ while True:
75
+ try:
76
+ message = input(f"\n{user}> ").strip()
77
+
78
+ if not message:
79
+ continue
80
+
81
+ # Handle commands
82
+ if message.lower() in ['/exit', '/quit', 'exit', 'quit']:
83
+ click.echo("\n👋 Goodbye!")
84
+ break
85
+
86
+ elif message.lower() == '/profile':
87
+ profile = agent.get_user_profile()
88
+ click.echo("\n👤 User Profile:")
89
+ click.echo(json.dumps(profile, indent=2, ensure_ascii=False))
90
+ continue
91
+
92
+ elif message.lower() == '/stats':
93
+ stats = agent.get_statistics()
94
+ click.echo("\n📊 Statistics:")
95
+ click.echo(json.dumps(stats, indent=2, ensure_ascii=False))
96
+ continue
97
+
98
+ elif message.lower() == '/history':
99
+ if hasattr(agent.memory, 'get_recent_conversations'):
100
+ convs = agent.memory.get_recent_conversations(user, 5)
101
+ click.echo("\n📜 Recent Conversations:")
102
+ for i, conv in enumerate(convs, 1):
103
+ click.echo(f"\n{i}. [{conv.get('timestamp', 'N/A')}]")
104
+ click.echo(f" You: {conv.get('user_message', '')[:100]}")
105
+ click.echo(f" Bot: {conv.get('bot_response', '')[:100]}")
106
+ continue
107
+
108
+ # Regular chat
109
+ response = agent.chat(message)
110
+ click.echo(f"\n🤖 Bot> {response}")
111
+
112
+ except KeyboardInterrupt:
113
+ click.echo("\n\n👋 Goodbye!")
114
+ break
115
+ except EOFError:
116
+ click.echo("\n\n👋 Goodbye!")
117
+ break
118
+
119
+ except Exception as e:
120
+ click.echo(f"\n❌ Error: {str(e)}", err=True)
121
+ sys.exit(1)
122
+
123
+
124
+ @cli.command()
125
+ @click.option('--user', '-u', help='User ID (optional, shows all users if not specified)')
126
+ @click.option('--sql/--json', default=False, help='Use SQL (default: JSON)')
127
+ def stats(user: Optional[str], sql: bool):
128
+ """
129
+ Show memory statistics
130
+
131
+ Examples:
132
+ mem-llm stats
133
+ mem-llm stats --user john
134
+ mem-llm stats --sql
135
+ """
136
+ try:
137
+ agent = MemAgent(use_sql=sql)
138
+
139
+ if user:
140
+ agent.set_user(user)
141
+ profile = agent.get_user_profile()
142
+ click.echo(f"\n👤 User Profile: {user}")
143
+ click.echo("=" * 60)
144
+ click.echo(json.dumps(profile, indent=2, ensure_ascii=False))
145
+ else:
146
+ stats = agent.get_statistics()
147
+ click.echo("\n📊 Memory Statistics")
148
+ click.echo("=" * 60)
149
+ click.echo(json.dumps(stats, indent=2, ensure_ascii=False))
150
+
151
+ except Exception as e:
152
+ click.echo(f"\n❌ Error: {str(e)}", err=True)
153
+ sys.exit(1)
154
+
155
+
156
+ @cli.command()
157
+ @click.argument('user')
158
+ @click.option('--format', '-f', type=click.Choice(['json', 'txt']), default='json', help='Export format')
159
+ @click.option('--output', '-o', type=click.Path(), help='Output file (default: stdout)')
160
+ @click.option('--sql/--json', default=False, help='Use SQL (default: JSON)')
161
+ def export(user: str, format: str, output: Optional[str], sql: bool):
162
+ """
163
+ Export user conversation data
164
+
165
+ Examples:
166
+ mem-llm export john
167
+ mem-llm export john --format txt
168
+ mem-llm export john --output john_data.json
169
+ """
170
+ try:
171
+ agent = MemAgent(use_sql=sql)
172
+ agent.set_user(user)
173
+
174
+ data = agent.export_memory(format=format)
175
+
176
+ if output:
177
+ with open(output, 'w', encoding='utf-8') as f:
178
+ f.write(data)
179
+ click.echo(f"✅ Exported to: {output}")
180
+ else:
181
+ click.echo(data)
182
+
183
+ except Exception as e:
184
+ click.echo(f"\n❌ Error: {str(e)}", err=True)
185
+ sys.exit(1)
186
+
187
+
188
+ @cli.command()
189
+ @click.option('--model', '-m', default='granite4:tiny-h', help='Model to check')
190
+ def check(model: str):
191
+ """
192
+ Check if Ollama and model are ready
193
+
194
+ Example:
195
+ mem-llm check
196
+ mem-llm check --model llama3.2:3b
197
+ """
198
+ try:
199
+ agent = MemAgent(model=model)
200
+ status = agent.check_setup()
201
+
202
+ click.echo("\n🔍 System Check")
203
+ click.echo("=" * 60)
204
+ click.echo(f"Ollama Running: {'✅' if status['ollama_running'] else '❌'}")
205
+ click.echo(f"Target Model: {status['target_model']}")
206
+ click.echo(f"Model Ready: {'✅' if status['model_ready'] else '❌'}")
207
+ click.echo(f"Memory Backend: {status['memory_backend']}")
208
+ click.echo(f"Total Users: {status['total_users']}")
209
+ click.echo(f"Total Chats: {status['total_interactions']}")
210
+ click.echo(f"KB Entries: {status['kb_entries']}")
211
+
212
+ if status['available_models']:
213
+ click.echo(f"\nAvailable Models:")
214
+ for m in status['available_models']:
215
+ click.echo(f" • {m}")
216
+
217
+ click.echo(f"\nStatus: {'✅ READY' if status['status'] == 'ready' else '❌ NOT READY'}")
218
+
219
+ if status['status'] != 'ready':
220
+ sys.exit(1)
221
+
222
+ except Exception as e:
223
+ click.echo(f"\n❌ Error: {str(e)}", err=True)
224
+ sys.exit(1)
225
+
226
+
227
+ @cli.command()
228
+ @click.argument('user')
229
+ @click.confirmation_option(prompt='Are you sure you want to delete all data for this user?')
230
+ @click.option('--sql/--json', default=False, help='Use SQL (default: JSON)')
231
+ def clear(user: str, sql: bool):
232
+ """
233
+ Clear all data for a user (requires confirmation)
234
+
235
+ Example:
236
+ mem-llm clear john
237
+ """
238
+ try:
239
+ agent = MemAgent(use_sql=sql)
240
+ result = agent.clear_user_data(user, confirm=True)
241
+ click.echo(f"✅ {result}")
242
+
243
+ except Exception as e:
244
+ click.echo(f"\n❌ Error: {str(e)}", err=True)
245
+ sys.exit(1)
246
+
247
+
248
+ def main():
249
+ """Entry point for the CLI"""
250
+ cli()
251
+
252
+
253
+ if __name__ == '__main__':
254
+ main()
@@ -0,0 +1,298 @@
1
+ """
2
+ Dynamic System Prompt Builder
3
+ =============================
4
+
5
+ Builds optimized system prompts based on active features:
6
+ - Knowledge Base enabled/disabled
7
+ - Tools enabled/disabled
8
+ - Memory type (JSON/SQL)
9
+ - Usage mode (business/personal)
10
+ - Multi-user support
11
+ - Document processing
12
+
13
+ This prevents irrelevant/context-broken responses by adapting
14
+ the system prompt to actual capabilities.
15
+ """
16
+
17
+ from typing import Dict, List, Optional, Any
18
+ from datetime import datetime
19
+
20
+
21
+ class DynamicPromptBuilder:
22
+ """Builds context-aware system prompts based on active features"""
23
+
24
+ def __init__(self):
25
+ self.base_instructions = {
26
+ "core": """You are a helpful AI assistant that maintains conversation context and provides accurate, relevant responses.
27
+
28
+ ⚠️ OUTPUT FORMAT:
29
+ - If you're a thinking-enabled model (Qwen, DeepSeek, etc.), DO NOT show your internal reasoning
30
+ - Respond DIRECTLY with the final answer only
31
+ - Suppress any chain-of-thought or thinking process
32
+ - Be concise and natural""",
33
+
34
+ "concise": """
35
+ RESPONSE GUIDELINES:
36
+ - Keep responses SHORT and FOCUSED (1-3 sentences for simple questions)
37
+ - Only elaborate when the user asks for details
38
+ - Acknowledge personal information briefly ("Got it!", "Noted!")
39
+ - Be conversational and natural""",
40
+
41
+ "memory": """
42
+ MEMORY AWARENESS:
43
+ - You have access to past conversations with this user
44
+ - Reference previous context when relevant
45
+ - Build upon earlier discussions naturally
46
+ - Remember user preferences and details shared""",
47
+
48
+ "knowledge_base": """
49
+ KNOWLEDGE BASE PRIORITY (⚠️ CRITICAL):
50
+ 1. If KNOWLEDGE BASE information is provided below, USE IT FIRST - it's authoritative!
51
+ 2. Knowledge base entries are marked with "📚 RELEVANT KNOWLEDGE"
52
+ 3. Answer from knowledge base EXACTLY as provided
53
+ 4. DO NOT make up information not in the knowledge base
54
+ 5. If knowledge base has no info, then use conversation history or say "I don't have specific information about that"
55
+
56
+ RESPONSE PRIORITY:
57
+ 1️⃣ Knowledge Base (if available) ← ALWAYS FIRST!
58
+ 2️⃣ Conversation History
59
+ 3️⃣ General knowledge (if appropriate)""",
60
+
61
+ "no_knowledge_base": """
62
+ INFORMATION SOURCES:
63
+ - Use conversation history to maintain context
64
+ - Provide helpful general information when appropriate
65
+ - Be honest when you don't have specific information""",
66
+
67
+ "tools": """
68
+ AVAILABLE TOOLS:
69
+ {tool_descriptions}
70
+
71
+ TOOL USAGE:
72
+ - Use tools when user requests actions (calculator, weather, search, etc.)
73
+ - Explain what you're doing when using a tool
74
+ - Present tool results clearly""",
75
+
76
+ "multi_user": """
77
+ USER CONTEXT:
78
+ - Each user has separate conversation history
79
+ - Maintain appropriate boundaries between user sessions
80
+ - Current user: {current_user}""",
81
+
82
+ "business": """
83
+ BUSINESS CONTEXT:
84
+ - Company: {company_name}
85
+ - Industry: {industry}
86
+ - Founded: {founded_year}
87
+
88
+ PROFESSIONAL STANDARDS:
89
+ - Maintain professional tone
90
+ - Prioritize customer satisfaction
91
+ - Provide clear, actionable solutions
92
+ - Escalate complex issues appropriately""",
93
+
94
+ "personal": """
95
+ PERSONAL ASSISTANT MODE:
96
+ - User: {user_name}
97
+ - Timezone: {timezone}
98
+
99
+ ASSISTANCE STYLE:
100
+ - Friendly and helpful
101
+ - Proactive suggestions when appropriate
102
+ - Respect user preferences and privacy""",
103
+ }
104
+
105
+ def build_prompt(self,
106
+ usage_mode: str = "personal",
107
+ has_knowledge_base: bool = False,
108
+ has_tools: bool = False,
109
+ tool_descriptions: Optional[str] = None,
110
+ is_multi_user: bool = False,
111
+ current_user: Optional[str] = None,
112
+ business_config: Optional[Dict] = None,
113
+ personal_config: Optional[Dict] = None,
114
+ memory_type: str = "sql",
115
+ custom_instructions: Optional[str] = None) -> str:
116
+ """
117
+ Build dynamic system prompt based on active features
118
+
119
+ Args:
120
+ usage_mode: 'business' or 'personal'
121
+ has_knowledge_base: Whether knowledge base is active
122
+ has_tools: Whether tools are enabled
123
+ tool_descriptions: Description of available tools
124
+ is_multi_user: Whether multi-user mode is active
125
+ current_user: Current user ID
126
+ business_config: Business mode configuration
127
+ personal_config: Personal mode configuration
128
+ memory_type: 'json' or 'sql'
129
+ custom_instructions: Additional custom instructions
130
+
131
+ Returns:
132
+ Complete system prompt
133
+ """
134
+
135
+ sections = []
136
+
137
+ # 1. Core identity
138
+ sections.append(self.base_instructions["core"])
139
+
140
+ # 2. Mode-specific context
141
+ if usage_mode == "business":
142
+ business_info = business_config or {}
143
+ business_prompt = self.base_instructions["business"].format(
144
+ company_name=business_info.get("company_name", "Our Company"),
145
+ industry=business_info.get("industry", "Technology"),
146
+ founded_year=business_info.get("founded_year", "2020")
147
+ )
148
+ sections.append(business_prompt)
149
+ else: # personal
150
+ personal_info = personal_config or {}
151
+ personal_prompt = self.base_instructions["personal"].format(
152
+ user_name=personal_info.get("user_name", "User"),
153
+ timezone=personal_info.get("timezone", "UTC")
154
+ )
155
+ sections.append(personal_prompt)
156
+
157
+ # 3. Memory awareness
158
+ sections.append(self.base_instructions["memory"])
159
+
160
+ # 4. Knowledge base instructions (CRITICAL - only if enabled!)
161
+ if has_knowledge_base:
162
+ sections.append(self.base_instructions["knowledge_base"])
163
+ else:
164
+ sections.append(self.base_instructions["no_knowledge_base"])
165
+
166
+ # 5. Tools instructions (only if enabled)
167
+ if has_tools and tool_descriptions:
168
+ tools_prompt = self.base_instructions["tools"].format(
169
+ tool_descriptions=tool_descriptions
170
+ )
171
+ sections.append(tools_prompt)
172
+
173
+ # 6. Multi-user context (only if enabled)
174
+ if is_multi_user and current_user:
175
+ multi_user_prompt = self.base_instructions["multi_user"].format(
176
+ current_user=current_user
177
+ )
178
+ sections.append(multi_user_prompt)
179
+
180
+ # 7. Response guidelines
181
+ sections.append(self.base_instructions["concise"])
182
+
183
+ # 8. Custom instructions (if provided)
184
+ if custom_instructions:
185
+ sections.append(f"\nADDITIONAL INSTRUCTIONS:\n{custom_instructions}")
186
+
187
+ # 9. Current date
188
+ current_date = datetime.now().strftime("%Y-%m-%d %H:%M")
189
+ sections.append(f"\nCurrent Date/Time: {current_date}")
190
+
191
+ # Join all sections
192
+ full_prompt = "\n\n".join(sections)
193
+
194
+ return full_prompt
195
+
196
+ def get_feature_summary(self,
197
+ has_knowledge_base: bool,
198
+ has_tools: bool,
199
+ is_multi_user: bool,
200
+ memory_type: str) -> str:
201
+ """
202
+ Get human-readable summary of active features
203
+
204
+ Returns:
205
+ Feature summary string
206
+ """
207
+ features = []
208
+
209
+ if has_knowledge_base:
210
+ features.append("✅ Knowledge Base")
211
+ else:
212
+ features.append("❌ Knowledge Base")
213
+
214
+ if has_tools:
215
+ features.append("✅ Tools")
216
+ else:
217
+ features.append("❌ Tools")
218
+
219
+ if is_multi_user:
220
+ features.append("✅ Multi-user")
221
+ else:
222
+ features.append("⚪ Single-user")
223
+
224
+ features.append(f"💾 Memory: {memory_type.upper()}")
225
+
226
+ return " | ".join(features)
227
+
228
+
229
+ # Global instance
230
+ dynamic_prompt_builder = DynamicPromptBuilder()
231
+
232
+
233
+ # Example usage
234
+ if __name__ == "__main__":
235
+ print("=" * 70)
236
+ print("DYNAMIC PROMPT BUILDER - EXAMPLES")
237
+ print("=" * 70)
238
+
239
+ # Example 1: Simple personal assistant (no KB, no tools)
240
+ print("\n📱 EXAMPLE 1: Simple Personal Assistant")
241
+ print("-" * 70)
242
+ prompt1 = dynamic_prompt_builder.build_prompt(
243
+ usage_mode="personal",
244
+ has_knowledge_base=False,
245
+ has_tools=False,
246
+ memory_type="json"
247
+ )
248
+ print(prompt1[:300] + "...")
249
+
250
+ # Example 2: Business with Knowledge Base
251
+ print("\n\n🏢 EXAMPLE 2: Business with Knowledge Base")
252
+ print("-" * 70)
253
+ prompt2 = dynamic_prompt_builder.build_prompt(
254
+ usage_mode="business",
255
+ has_knowledge_base=True,
256
+ has_tools=False,
257
+ business_config={
258
+ "company_name": "Acme Corp",
259
+ "industry": "E-commerce",
260
+ "founded_year": "2015"
261
+ },
262
+ memory_type="sql"
263
+ )
264
+ print(prompt2[:300] + "...")
265
+
266
+ # Example 3: Full-featured multi-user system
267
+ print("\n\n⚡ EXAMPLE 3: Full-Featured Multi-User System")
268
+ print("-" * 70)
269
+ prompt3 = dynamic_prompt_builder.build_prompt(
270
+ usage_mode="business",
271
+ has_knowledge_base=True,
272
+ has_tools=True,
273
+ tool_descriptions="- Calculator: Perform math calculations\n- Weather: Get current weather",
274
+ is_multi_user=True,
275
+ current_user="customer_12345",
276
+ business_config={
277
+ "company_name": "TechSupport Inc",
278
+ "industry": "Technology",
279
+ "founded_year": "2010"
280
+ },
281
+ memory_type="sql"
282
+ )
283
+ print(prompt3[:300] + "...")
284
+
285
+ # Feature summaries
286
+ print("\n\n📊 FEATURE SUMMARIES")
287
+ print("-" * 70)
288
+
289
+ configs = [
290
+ ("Simple", False, False, False, "json"),
291
+ ("Basic KB", True, False, False, "json"),
292
+ ("With Tools", True, True, False, "sql"),
293
+ ("Full System", True, True, True, "sql"),
294
+ ]
295
+
296
+ for name, kb, tools, multi, mem in configs:
297
+ summary = dynamic_prompt_builder.get_feature_summary(kb, tools, multi, mem)
298
+ print(f"{name:15} : {summary}")
mem_llm/llm_client.py CHANGED
@@ -89,9 +89,9 @@ class OllamaClient:
89
89
  return f"Connection error: {str(e)}"
90
90
 
91
91
  def chat(self, messages: List[Dict[str, str]],
92
- temperature: float = 0.7, max_tokens: int = 500) -> str:
92
+ temperature: float = 0.7, max_tokens: int = 2000) -> str:
93
93
  """
94
- Chat format interaction
94
+ Chat format interaction - Compatible with ALL Ollama models
95
95
 
96
96
  Args:
97
97
  messages: Message history [{"role": "user/assistant/system", "content": "..."}]
@@ -108,17 +108,57 @@ class OllamaClient:
108
108
  "options": {
109
109
  "temperature": temperature,
110
110
  "num_predict": max_tokens,
111
- "num_ctx": 2048, # Context window
111
+ "num_ctx": 4096, # Context window
112
112
  "top_k": 40, # Limit vocab
113
113
  "top_p": 0.9, # Nucleus sampling
114
- "stop": ["\n\n\n", "---"] # Stop sequences
114
+ "num_thread": 8 # Parallel processing
115
115
  }
116
116
  }
117
117
 
118
+ # For thinking-enabled models (like qwen3), disable thinking mode
119
+ # to get direct answers instead of reasoning process
120
+ if 'qwen' in self.model.lower() or 'deepseek' in self.model.lower():
121
+ payload["options"]["enable_thinking"] = False
122
+
118
123
  try:
119
- response = requests.post(self.chat_url, json=payload, timeout=60)
124
+ response = requests.post(self.chat_url, json=payload, timeout=120)
120
125
  if response.status_code == 200:
121
- return response.json().get('message', {}).get('content', '').strip()
126
+ response_data = response.json()
127
+ message = response_data.get('message', {})
128
+
129
+ # Get content - primary response field
130
+ result = message.get('content', '').strip()
131
+
132
+ # Fallback: If content is empty but thinking exists
133
+ # This happens when thinking mode couldn't be disabled
134
+ if not result and message.get('thinking'):
135
+ thinking = message.get('thinking', '')
136
+
137
+ # Try to extract the actual answer from thinking process
138
+ # Usually the answer is at the end after reasoning
139
+ if thinking:
140
+ # Split by common patterns that indicate final answer
141
+ for separator in ['\n\nAnswer:', '\n\nFinal answer:',
142
+ '\n\nResponse:', '\n\nSo the answer is:',
143
+ '\n\n---\n', '\n\nOkay,']:
144
+ if separator in thinking:
145
+ parts = thinking.split(separator)
146
+ if len(parts) > 1:
147
+ result = parts[-1].strip()
148
+ break
149
+
150
+ # If no separator found, try to get last meaningful paragraph
151
+ if not result:
152
+ paragraphs = [p.strip() for p in thinking.split('\n\n') if p.strip()]
153
+ if paragraphs:
154
+ # Take the last paragraph as likely answer
155
+ last_para = paragraphs[-1]
156
+ # Avoid meta-commentary like "Wait, let me think..."
157
+ if not any(word in last_para.lower() for word in
158
+ ['wait', 'hmm', 'let me', 'thinking', 'okay']):
159
+ result = last_para
160
+
161
+ return result
122
162
  else:
123
163
  return f"Error: {response.status_code} - {response.text}"
124
164
  except Exception as e: