memra 0.2.13__py3-none-any.whl → 0.2.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. memra/cli.py +322 -51
  2. {memra-0.2.13.dist-info → memra-0.2.15.dist-info}/METADATA +1 -1
  3. {memra-0.2.13.dist-info → memra-0.2.15.dist-info}/RECORD +7 -61
  4. memra-0.2.15.dist-info/top_level.txt +1 -0
  5. memra-0.2.13.dist-info/top_level.txt +0 -4
  6. memra-ops/app.py +0 -808
  7. memra-ops/config/config.py +0 -25
  8. memra-ops/config.py +0 -34
  9. memra-ops/logic/__init__.py +0 -1
  10. memra-ops/logic/file_tools.py +0 -43
  11. memra-ops/logic/invoice_tools.py +0 -668
  12. memra-ops/logic/invoice_tools_fix.py +0 -66
  13. memra-ops/mcp_bridge_server.py +0 -1178
  14. memra-ops/scripts/check_database.py +0 -37
  15. memra-ops/scripts/clear_database.py +0 -48
  16. memra-ops/scripts/monitor_database.py +0 -67
  17. memra-ops/scripts/release.py +0 -133
  18. memra-ops/scripts/reset_database.py +0 -65
  19. memra-ops/scripts/start_memra.py +0 -334
  20. memra-ops/scripts/stop_memra.py +0 -132
  21. memra-ops/server_tool_registry.py +0 -190
  22. memra-ops/tests/test_llm_text_to_sql.py +0 -115
  23. memra-ops/tests/test_llm_vs_pattern.py +0 -130
  24. memra-ops/tests/test_mcp_schema_aware.py +0 -124
  25. memra-ops/tests/test_schema_aware_sql.py +0 -139
  26. memra-ops/tests/test_schema_aware_sql_simple.py +0 -66
  27. memra-ops/tests/test_text_to_sql_demo.py +0 -140
  28. memra-ops/tools/mcp_bridge_server.py +0 -851
  29. memra-sdk/examples/accounts_payable.py +0 -215
  30. memra-sdk/examples/accounts_payable_client.py +0 -217
  31. memra-sdk/examples/accounts_payable_mcp.py +0 -200
  32. memra-sdk/examples/ask_questions.py +0 -123
  33. memra-sdk/examples/invoice_processing.py +0 -116
  34. memra-sdk/examples/propane_delivery.py +0 -87
  35. memra-sdk/examples/simple_text_to_sql.py +0 -158
  36. memra-sdk/memra/__init__.py +0 -31
  37. memra-sdk/memra/discovery.py +0 -15
  38. memra-sdk/memra/discovery_client.py +0 -49
  39. memra-sdk/memra/execution.py +0 -481
  40. memra-sdk/memra/models.py +0 -99
  41. memra-sdk/memra/tool_registry.py +0 -343
  42. memra-sdk/memra/tool_registry_client.py +0 -106
  43. memra-sdk/scripts/release.py +0 -133
  44. memra-sdk/setup.py +0 -52
  45. memra-workflows/accounts_payable/accounts_payable.py +0 -215
  46. memra-workflows/accounts_payable/accounts_payable_client.py +0 -216
  47. memra-workflows/accounts_payable/accounts_payable_mcp.py +0 -200
  48. memra-workflows/accounts_payable/accounts_payable_smart.py +0 -221
  49. memra-workflows/invoice_processing/invoice_processing.py +0 -116
  50. memra-workflows/invoice_processing/smart_invoice_processor.py +0 -220
  51. memra-workflows/logic/__init__.py +0 -1
  52. memra-workflows/logic/file_tools.py +0 -50
  53. memra-workflows/logic/invoice_tools.py +0 -501
  54. memra-workflows/logic/propane_agents.py +0 -52
  55. memra-workflows/mcp_bridge_server.py +0 -230
  56. memra-workflows/propane_delivery/propane_delivery.py +0 -87
  57. memra-workflows/text_to_sql/complete_invoice_workflow_with_queries.py +0 -208
  58. memra-workflows/text_to_sql/complete_text_to_sql_system.py +0 -266
  59. memra-workflows/text_to_sql/file_discovery_demo.py +0 -156
  60. {memra-0.2.13.dist-info → memra-0.2.15.dist-info}/LICENSE +0 -0
  61. {memra-0.2.13.dist-info → memra-0.2.15.dist-info}/WHEEL +0 -0
  62. {memra-0.2.13.dist-info → memra-0.2.15.dist-info}/entry_points.txt +0 -0
@@ -1,1178 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Simple MCP Bridge Server for local tool execution
4
- """
5
-
6
- import os
7
- import json
8
- import hmac
9
- import hashlib
10
- import logging
11
- import asyncio
12
- import psycopg2
13
- import re
14
- from decimal import Decimal
15
- from aiohttp import web, web_request
16
- from typing import Dict, Any, Optional
17
-
18
- # Add Hugging Face imports
19
- try:
20
- from huggingface_hub import InferenceClient
21
- HF_AVAILABLE = True
22
- except ImportError:
23
- HF_AVAILABLE = False
24
- print("Warning: huggingface_hub not available. Install with: pip install huggingface_hub")
25
-
26
- # Add PDF processing imports
27
- try:
28
- import PyPDF2
29
- PDF_AVAILABLE = True
30
- except ImportError:
31
- PDF_AVAILABLE = False
32
- print("Warning: PyPDF2 not available. Install with: pip install PyPDF2")
33
-
34
- logging.basicConfig(level=logging.INFO)
35
- logger = logging.getLogger(__name__)
36
-
37
- class MCPBridgeServer:
38
- def __init__(self, postgres_url: str, bridge_secret: str):
39
- self.postgres_url = postgres_url
40
- self.bridge_secret = bridge_secret
41
-
42
- # Hugging Face configuration
43
- self.hf_api_key = os.getenv("HUGGINGFACE_API_KEY", "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")
44
- self.hf_model = os.getenv("HUGGINGFACE_MODEL", "meta-llama/Llama-3.1-8B-Instruct")
45
- self.hf_client = None
46
-
47
- # Initialize Hugging Face client if available
48
- if HF_AVAILABLE and self.hf_api_key:
49
- try:
50
- self.hf_client = InferenceClient(
51
- model=self.hf_model,
52
- token=self.hf_api_key
53
- )
54
- logger.info(f"Initialized Hugging Face client with model: {self.hf_model}")
55
- except Exception as e:
56
- logger.warning(f"Failed to initialize Hugging Face client: {e}")
57
- self.hf_client = None
58
- else:
59
- logger.warning("Hugging Face client not available - using fallback pattern matching")
60
-
61
- def verify_signature(self, request_body: str, signature: str) -> bool:
62
- """Verify HMAC signature"""
63
- expected = hmac.new(
64
- self.bridge_secret.encode(),
65
- request_body.encode(),
66
- hashlib.sha256
67
- ).hexdigest()
68
- return hmac.compare_digest(expected, signature)
69
-
70
- async def execute_tool(self, request: web_request.Request) -> web.Response:
71
- """Execute MCP tool endpoint"""
72
- try:
73
- # Get request body
74
- body = await request.text()
75
- data = json.loads(body)
76
-
77
- # Verify signature
78
- signature = request.headers.get('X-Bridge-Secret')
79
- if not signature or signature != self.bridge_secret:
80
- logger.warning("Invalid or missing bridge secret")
81
- return web.json_response({
82
- "success": False,
83
- "error": "Invalid authentication"
84
- }, status=401)
85
-
86
- tool_name = data.get('tool_name')
87
- input_data = data.get('input_data', {})
88
-
89
- logger.info(f"Executing MCP tool: {tool_name}")
90
-
91
- if tool_name == "DataValidator":
92
- result = await self.data_validator(input_data)
93
- elif tool_name == "PostgresInsert":
94
- result = await self.postgres_insert(input_data)
95
- elif tool_name == "SQLExecutor":
96
- result = await self.sql_executor(input_data)
97
- elif tool_name == "TextToSQLGenerator":
98
- result = await self.text_to_sql_generator(input_data)
99
- elif tool_name == "PDFProcessor":
100
- result = await self.pdf_processor(input_data)
101
- elif tool_name == "MockInvoiceGenerator":
102
- result = await self.mock_invoice_generator(input_data)
103
- else:
104
- return web.json_response({
105
- "success": False,
106
- "error": f"Unknown tool: {tool_name}"
107
- }, status=400)
108
-
109
- return web.json_response(result)
110
-
111
- except Exception as e:
112
- logger.error(f"Tool execution failed: {str(e)}")
113
- return web.json_response({
114
- "success": False,
115
- "error": str(e)
116
- }, status=500)
117
-
118
- async def data_validator(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
119
- """Validate data against schema"""
120
- try:
121
- logger.info(f"DataValidator received input_data: {input_data}")
122
- invoice_data = input_data.get('invoice_data', {})
123
- logger.info(f"DataValidator extracted invoice_data: {invoice_data}")
124
-
125
- # Perform basic validation
126
- validation_errors = []
127
-
128
- # Check required fields
129
- required_fields = ['headerSection', 'billingDetails', 'chargesSummary']
130
- for field in required_fields:
131
- if field not in invoice_data:
132
- validation_errors.append(f"Missing required field: {field}")
133
-
134
- # Validate header section
135
- if 'headerSection' in invoice_data:
136
- header = invoice_data['headerSection']
137
- if not header.get('vendorName'):
138
- validation_errors.append("Missing vendor name in header")
139
- if not header.get('subtotal'):
140
- validation_errors.append("Missing subtotal in header")
141
-
142
- # Validate billing details
143
- if 'billingDetails' in invoice_data:
144
- billing = invoice_data['billingDetails']
145
- if not billing.get('invoiceNumber'):
146
- validation_errors.append("Missing invoice number")
147
- if not billing.get('invoiceDate'):
148
- validation_errors.append("Missing invoice date")
149
-
150
- is_valid = len(validation_errors) == 0
151
-
152
- logger.info(f"Data validation completed: {'valid' if is_valid else 'invalid'}")
153
-
154
- return {
155
- "success": True,
156
- "data": {
157
- "is_valid": is_valid,
158
- "validation_errors": validation_errors,
159
- "validated_data": invoice_data
160
- }
161
- }
162
-
163
- except Exception as e:
164
- logger.error(f"Data validation failed: {str(e)}")
165
- return {
166
- "success": False,
167
- "error": str(e)
168
- }
169
-
170
- async def postgres_insert(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
171
- """Insert data into PostgreSQL"""
172
- try:
173
- invoice_data = input_data.get('invoice_data', {})
174
- table_name = input_data.get('table_name', 'invoices')
175
-
176
- # Extract key fields from invoice data
177
- header = invoice_data.get('headerSection', {})
178
- billing = invoice_data.get('billingDetails', {})
179
- charges = invoice_data.get('chargesSummary', {})
180
-
181
- # Prepare insert data
182
- insert_data = {
183
- 'invoice_number': billing.get('invoiceNumber', ''),
184
- 'vendor_name': header.get('vendorName', ''),
185
- 'invoice_date': billing.get('invoiceDate', ''),
186
- 'total_amount': charges.get('document_total', 0),
187
- 'tax_amount': charges.get('secondary_tax', 0),
188
- 'line_items': json.dumps(charges.get('lineItemsBreakdown', [])),
189
- 'status': 'processed'
190
- }
191
-
192
- # Connect to database and insert
193
- conn = psycopg2.connect(self.postgres_url)
194
- cursor = conn.cursor()
195
-
196
- # Build insert query
197
- columns = ', '.join(insert_data.keys())
198
- placeholders = ', '.join(['%s'] * len(insert_data))
199
- query = f"INSERT INTO {table_name} ({columns}) VALUES ({placeholders}) RETURNING id"
200
-
201
- cursor.execute(query, list(insert_data.values()))
202
- record_id = cursor.fetchone()[0]
203
-
204
- conn.commit()
205
- cursor.close()
206
- conn.close()
207
-
208
- logger.info(f"Successfully inserted record with ID: {record_id}")
209
-
210
- return {
211
- "success": True,
212
- "data": {
213
- "success": True,
214
- "record_id": record_id,
215
- "database_table": table_name,
216
- "inserted_data": insert_data
217
- }
218
- }
219
-
220
- except Exception as e:
221
- logger.error(f"Database insert failed: {str(e)}")
222
- return {
223
- "success": False,
224
- "error": str(e)
225
- }
226
-
227
- async def pdf_processor(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
228
- """Process PDF file and extract invoice data"""
229
- try:
230
- file_path = input_data.get('file', '')
231
-
232
- if not file_path:
233
- return {
234
- "success": False,
235
- "error": "No file path provided"
236
- }
237
-
238
- if not os.path.exists(file_path):
239
- return {
240
- "success": False,
241
- "error": f"PDF file not found: {file_path}"
242
- }
243
-
244
- if not PDF_AVAILABLE:
245
- return {
246
- "success": False,
247
- "error": "PDF processing not available. Install PyPDF2: pip install PyPDF2"
248
- }
249
-
250
- # Extract text from PDF
251
- text_content = ""
252
- try:
253
- with open(file_path, 'rb') as file:
254
- pdf_reader = PyPDF2.PdfReader(file)
255
- for page in pdf_reader.pages:
256
- text_content += page.extract_text() + "\n"
257
- except Exception as e:
258
- return {
259
- "success": False,
260
- "error": f"Failed to read PDF: {str(e)}"
261
- }
262
-
263
- # Simple pattern matching to extract invoice data
264
- # This is a basic implementation - in production you'd use more sophisticated parsing
265
- invoice_data = {
266
- "headerSection": {
267
- "vendorName": self._extract_vendor_name(text_content),
268
- "subtotal": self._extract_subtotal(text_content)
269
- },
270
- "billingDetails": {
271
- "invoiceNumber": self._extract_invoice_number(text_content),
272
- "invoiceDate": self._extract_invoice_date(text_content)
273
- },
274
- "chargesSummary": {
275
- "document_total": self._extract_total_amount(text_content),
276
- "secondary_tax": self._extract_tax_amount(text_content),
277
- "lineItemsBreakdown": self._extract_line_items(text_content)
278
- }
279
- }
280
-
281
- logger.info(f"PDF processed successfully: {file_path}")
282
-
283
- return {
284
- "success": True,
285
- "data": {
286
- "file_path": file_path,
287
- "text_content": text_content[:1000] + "..." if len(text_content) > 1000 else text_content,
288
- "extracted_data": invoice_data
289
- }
290
- }
291
-
292
- except Exception as e:
293
- logger.error(f"PDF processing failed: {str(e)}")
294
- return {
295
- "success": False,
296
- "error": str(e)
297
- }
298
-
299
- def _extract_vendor_name(self, text: str) -> str:
300
- """Extract vendor name from text"""
301
- # Look for common patterns
302
- lines = text.split('\n')
303
- for line in lines[:10]: # Check first 10 lines
304
- if any(keyword in line.upper() for keyword in ['INC', 'LLC', 'CORP', 'LTD', 'COMPANY']):
305
- return line.strip()
306
- return "Unknown Vendor"
307
-
308
- def _extract_invoice_number(self, text: str) -> str:
309
- """Extract invoice number from text"""
310
- import re
311
- # Look for patterns like "Invoice #12345" or "INV-12345"
312
- patterns = [
313
- r'INVOICE[:\s#]*([A-Z0-9-]+)',
314
- r'INV[:\s#]*([A-Z0-9-]+)',
315
- r'#([A-Z0-9-]{6,})'
316
- ]
317
- for pattern in patterns:
318
- match = re.search(pattern, text.upper())
319
- if match:
320
- return match.group(1)
321
- return "UNKNOWN-001"
322
-
323
- def _extract_invoice_date(self, text: str) -> str:
324
- """Extract invoice date from text"""
325
- import re
326
- # Look for date patterns
327
- patterns = [
328
- r'(\d{1,2}[/-]\d{1,2}[/-]\d{2,4})',
329
- r'(\d{4}-\d{2}-\d{2})'
330
- ]
331
- for pattern in patterns:
332
- match = re.search(pattern, text)
333
- if match:
334
- return match.group(1)
335
- return "2024-01-01"
336
-
337
- def _extract_total_amount(self, text: str) -> float:
338
- """Extract total amount from text"""
339
- import re
340
- # Look for total patterns
341
- patterns = [
342
- r'TOTAL[:\s]*\$?([0-9,]+\.?[0-9]*)',
343
- r'AMOUNT[:\s]*\$?([0-9,]+\.?[0-9]*)',
344
- r'\$([0-9,]+\.?[0-9]*)'
345
- ]
346
- for pattern in patterns:
347
- match = re.search(pattern, text.upper())
348
- if match:
349
- amount_str = match.group(1).replace(',', '')
350
- try:
351
- return float(amount_str)
352
- except ValueError:
353
- continue
354
- return 0.0
355
-
356
- def _extract_subtotal(self, text: str) -> float:
357
- """Extract subtotal from text"""
358
- import re
359
- patterns = [
360
- r'SUBTOTAL[:\s]*\$?([0-9,]+\.?[0-9]*)',
361
- r'SUB TOTAL[:\s]*\$?([0-9,]+\.?[0-9]*)'
362
- ]
363
- for pattern in patterns:
364
- match = re.search(pattern, text.upper())
365
- if match:
366
- amount_str = match.group(1).replace(',', '')
367
- try:
368
- return float(amount_str)
369
- except ValueError:
370
- continue
371
- return 0.0
372
-
373
- def _extract_tax_amount(self, text: str) -> float:
374
- """Extract tax amount from text"""
375
- import re
376
- patterns = [
377
- r'TAX[:\s]*\$?([0-9,]+\.?[0-9]*)',
378
- r'HST[:\s]*\$?([0-9,]+\.?[0-9]*)',
379
- r'GST[:\s]*\$?([0-9,]+\.?[0-9]*)'
380
- ]
381
- for pattern in patterns:
382
- match = re.search(pattern, text.upper())
383
- if match:
384
- amount_str = match.group(1).replace(',', '')
385
- try:
386
- return float(amount_str)
387
- except ValueError:
388
- continue
389
- return 0.0
390
-
391
- def _extract_line_items(self, text: str) -> list:
392
- """Extract line items from text"""
393
- # This is a simplified implementation
394
- # In production, you'd use more sophisticated parsing
395
- items = []
396
- lines = text.split('\n')
397
-
398
- for line in lines:
399
- # Look for lines with quantities and amounts
400
- if any(char.isdigit() for char in line) and '$' in line:
401
- # Simple pattern matching for line items
402
- parts = line.split()
403
- if len(parts) >= 3:
404
- try:
405
- # Try to extract quantity and amount
406
- quantity = 1.0
407
- amount = 0.0
408
- description = "Unknown Item"
409
-
410
- for i, part in enumerate(parts):
411
- if part.startswith('$'):
412
- try:
413
- amount = float(part[1:].replace(',', ''))
414
- except ValueError:
415
- pass
416
- elif part.replace('.', '').isdigit():
417
- try:
418
- quantity = float(part)
419
- except ValueError:
420
- pass
421
- else:
422
- description = part
423
-
424
- if amount > 0:
425
- items.append({
426
- "description": description,
427
- "quantity": quantity,
428
- "unit_price": amount / quantity if quantity > 0 else amount,
429
- "amount": amount,
430
- "main_product": len(items) == 0 # First item is main product
431
- })
432
- except:
433
- continue
434
-
435
- return items[:5] # Return max 5 items
436
-
437
- async def sql_executor(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
438
- """Execute SQL query against PostgreSQL"""
439
- try:
440
- sql_query = input_data.get('sql_query', '')
441
-
442
- if not sql_query:
443
- return {
444
- "success": False,
445
- "error": "No SQL query provided"
446
- }
447
-
448
- # Connect to database and execute query
449
- conn = psycopg2.connect(self.postgres_url)
450
- cursor = conn.cursor()
451
-
452
- # Execute the query
453
- cursor.execute(sql_query)
454
-
455
- # Fetch results if it's a SELECT query
456
- if sql_query.strip().upper().startswith('SELECT'):
457
- results = cursor.fetchall()
458
- column_names = [desc[0] for desc in cursor.description]
459
-
460
- # Convert to list of dictionaries
461
- formatted_results = []
462
- for row in results:
463
- row_dict = dict(zip(column_names, row))
464
- # Convert date/datetime objects to strings for JSON serialization
465
- for key, value in row_dict.items():
466
- if hasattr(value, 'isoformat'): # datetime, date objects
467
- row_dict[key] = value.isoformat()
468
- elif isinstance(value, Decimal): # Decimal objects
469
- row_dict[key] = float(value)
470
- formatted_results.append(row_dict)
471
-
472
- logger.info(f"SQL query executed successfully, returned {len(results)} rows")
473
-
474
- return {
475
- "success": True,
476
- "data": {
477
- "query": sql_query,
478
- "results": formatted_results,
479
- "row_count": len(results),
480
- "columns": column_names
481
- }
482
- }
483
- else:
484
- # For non-SELECT queries (INSERT, UPDATE, DELETE)
485
- conn.commit()
486
- affected_rows = cursor.rowcount
487
-
488
- logger.info(f"SQL query executed successfully, affected {affected_rows} rows")
489
-
490
- return {
491
- "success": True,
492
- "data": {
493
- "query": sql_query,
494
- "affected_rows": affected_rows,
495
- "message": "Query executed successfully"
496
- }
497
- }
498
-
499
- except Exception as e:
500
- logger.error(f"SQL execution failed: {str(e)}")
501
- return {
502
- "success": False,
503
- "error": str(e)
504
- }
505
- finally:
506
- if 'cursor' in locals():
507
- cursor.close()
508
- if 'conn' in locals():
509
- conn.close()
510
-
511
- async def text_to_sql_generator(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
512
- """Generate SQL from natural language using LLM or fallback to pattern matching"""
513
- try:
514
- question = input_data.get('question', '')
515
- schema_info = input_data.get('schema_info', {})
516
-
517
- if not question:
518
- return {
519
- "success": False,
520
- "error": "No question provided"
521
- }
522
-
523
- # If no schema provided or incomplete, fetch it dynamically
524
- if not schema_info or not schema_info.get('schema', {}).get('invoices', {}).get('columns'):
525
- logger.info("No schema provided, fetching dynamically from database")
526
- schema_info = await self.get_table_schema("invoices")
527
-
528
- # Try LLM-based generation first
529
- if self.hf_client:
530
- try:
531
- return await self._llm_text_to_sql(question, schema_info)
532
- except Exception as e:
533
- logger.warning(f"LLM text-to-SQL failed, falling back to pattern matching: {e}")
534
-
535
- # Fallback to pattern matching
536
- return await self._pattern_text_to_sql(question, schema_info)
537
-
538
- except Exception as e:
539
- logger.error(f"Text-to-SQL generation failed: {str(e)}")
540
- return {
541
- "success": False,
542
- "error": str(e)
543
- }
544
-
545
- async def _llm_text_to_sql(self, question: str, schema_info: Dict[str, Any]) -> Dict[str, Any]:
546
- """Generate SQL using Hugging Face LLM"""
547
-
548
- # Extract schema information
549
- tables = schema_info.get('schema', {})
550
- table_name = 'invoices' # Default table
551
- columns = []
552
-
553
- # Get column information from schema
554
- if table_name in tables:
555
- table_info = tables[table_name]
556
- if 'columns' in table_info:
557
- columns = [f"{col['name']} ({col['type']})" for col in table_info['columns']]
558
-
559
- # If no schema info, use default columns
560
- if not columns:
561
- columns = [
562
- 'id (integer)',
563
- 'vendor_name (text)',
564
- 'invoice_number (text)',
565
- 'invoice_date (date)',
566
- 'total_amount (numeric)',
567
- 'tax_amount (numeric)',
568
- 'line_items (jsonb)',
569
- 'status (text)'
570
- ]
571
-
572
- # Create the prompt for the LLM
573
- schema_text = f"Table: {table_name}\nColumns: {', '.join(columns)}"
574
-
575
- # Comprehensive prompt with detailed instructions and examples
576
- prompt = f"""You are a PostgreSQL SQL query generator. Convert natural language questions into valid PostgreSQL queries.
577
-
578
- IMPORTANT RULES:
579
- 1. ALWAYS return a complete, valid SQL query
580
- 2. Use ONLY the table and columns provided in the schema
581
- 3. Use PostgreSQL syntax (ILIKE for case-insensitive matching)
582
- 4. For aggregations with GROUP BY, don't include non-aggregated columns in ORDER BY unless they're in GROUP BY
583
- 5. Use appropriate aliases for calculated columns (as count, as total, as average, etc.)
584
- 6. For date queries, use proper date functions and comparisons
585
-
586
- TABLE SCHEMA:
587
- Table: invoices
588
- Columns: {', '.join(columns)}
589
-
590
- QUERY PATTERNS AND EXAMPLES:
591
-
592
- 1. COUNT QUERIES:
593
- Q: How many invoices are there?
594
- A: SELECT COUNT(*) as count FROM invoices
595
-
596
- Q: How many invoices from Air Liquide?
597
- A: SELECT COUNT(*) as count FROM invoices WHERE vendor_name ILIKE '%air liquide%'
598
-
599
- 2. VENDOR FILTERING:
600
- Q: Show me all invoices from Air Liquide
601
- A: SELECT * FROM invoices WHERE vendor_name ILIKE '%air liquide%'
602
-
603
- Q: Find invoices from Microsoft
604
- A: SELECT * FROM invoices WHERE vendor_name ILIKE '%microsoft%'
605
-
606
- 3. AGGREGATION QUERIES:
607
- Q: What is the total amount of all invoices?
608
- A: SELECT SUM(total_amount) as total FROM invoices
609
-
610
- Q: What is the average invoice amount?
611
- A: SELECT AVG(total_amount) as average FROM invoices
612
-
613
- Q: What is the highest invoice amount?
614
- A: SELECT MAX(total_amount) as max_amount FROM invoices
615
-
616
- 4. GROUPING QUERIES:
617
- Q: Show me invoices grouped by date
618
- A: SELECT invoice_date, COUNT(*) as count FROM invoices GROUP BY invoice_date ORDER BY invoice_date
619
-
620
- Q: Show me invoice counts by vendor
621
- A: SELECT vendor_name, COUNT(*) as count FROM invoices GROUP BY vendor_name ORDER BY count DESC
622
-
623
- Q: Who is the primary vendor?
624
- A: SELECT vendor_name, COUNT(*) as count FROM invoices GROUP BY vendor_name ORDER BY count DESC LIMIT 1
625
-
626
- 5. SORTING AND LIMITING:
627
- Q: Show me the 3 most recent invoices
628
- A: SELECT * FROM invoices ORDER BY invoice_date DESC LIMIT 3
629
-
630
- Q: Show me the oldest invoice
631
- A: SELECT * FROM invoices ORDER BY invoice_date ASC LIMIT 1
632
-
633
- 6. AMOUNT FILTERING:
634
- Q: Find invoices with amounts greater than 1000
635
- A: SELECT * FROM invoices WHERE total_amount > 1000
636
-
637
- Q: Show me invoices under 500
638
- A: SELECT * FROM invoices WHERE total_amount < 500
639
-
640
- 7. DATE QUERIES:
641
- Q: What is the most recent invoice date?
642
- A: SELECT MAX(invoice_date) as latest_date FROM invoices
643
-
644
- Q: Show me invoices from this year
645
- A: SELECT * FROM invoices WHERE EXTRACT(YEAR FROM invoice_date) = EXTRACT(YEAR FROM CURRENT_DATE)
646
-
647
- Q: What are the invoices created this month?
648
- A: SELECT * FROM invoices WHERE EXTRACT(YEAR FROM created_at) = EXTRACT(YEAR FROM CURRENT_DATE) AND EXTRACT(MONTH FROM created_at) = EXTRACT(MONTH FROM CURRENT_DATE)
649
-
650
- Q: Show me invoices from last month
651
- A: SELECT * FROM invoices WHERE EXTRACT(YEAR FROM invoice_date) = EXTRACT(YEAR FROM CURRENT_DATE - INTERVAL '1 month') AND EXTRACT(MONTH FROM invoice_date) = EXTRACT(MONTH FROM CURRENT_DATE - INTERVAL '1 month')
652
-
653
- 8. DISTINCT QUERIES:
654
- Q: Show me all the vendors
655
- A: SELECT DISTINCT vendor_name FROM invoices ORDER BY vendor_name
656
-
657
- Q: What are all the different invoice dates?
658
- A: SELECT DISTINCT invoice_date FROM invoices ORDER BY invoice_date
659
-
660
- 9. COMPLEX VENDOR ANALYSIS:
661
- Q: Which vendor has the highest total invoice amount?
662
- A: SELECT vendor_name, SUM(total_amount) as total FROM invoices GROUP BY vendor_name ORDER BY total DESC LIMIT 1
663
-
664
- Q: Show me vendor totals
665
- A: SELECT vendor_name, SUM(total_amount) as total, COUNT(*) as count FROM invoices GROUP BY vendor_name ORDER BY total DESC
666
-
667
- 10. LINE ITEMS (JSONB):
668
- Q: Show me all the line item costs
669
- A: SELECT vendor_name, invoice_number, line_items FROM invoices WHERE line_items IS NOT NULL
670
-
671
- Q: What are the line item details?
672
- A: SELECT id, vendor_name, line_items FROM invoices WHERE line_items IS NOT NULL AND line_items != '[]'
673
-
674
- Q: Which invoice contains a line item for 'Electricity'?
675
- A: SELECT * FROM invoices WHERE line_items::text ILIKE '%electricity%'
676
-
677
- Q: Find invoices with line items containing 'PROPANE'
678
- A: SELECT * FROM invoices WHERE line_items::text ILIKE '%propane%'
679
-
680
- IMPORTANT:
681
- - Always return a complete SQL query starting with SELECT
682
- - Never return partial queries or just "SELECT"
683
- - Use proper PostgreSQL syntax
684
- - Include appropriate WHERE, GROUP BY, ORDER BY, and LIMIT clauses as needed
685
- - For vendor searches, use ILIKE with % wildcards for partial matching
686
-
687
- Question: {question}
688
- SQL Query:
689
- """
690
-
691
- try:
692
- # Call Hugging Face API with improved parameters
693
- response = self.hf_client.text_generation(
694
- prompt,
695
- max_new_tokens=150, # Increased for complex queries
696
- temperature=0.05, # Lower temperature for more deterministic output
697
- do_sample=True,
698
- stop_sequences=["\n\n", "Q:", "Question:", "Examples:"], # Reduced to 4 sequences
699
- return_full_text=False # Only return the generated part
700
- )
701
-
702
- # Extract SQL from response
703
- sql_query = response.strip()
704
-
705
- # Clean up the response - remove any extra text and extract SQL
706
- if "SELECT" in sql_query.upper():
707
- # Find the SQL query part
708
- lines = sql_query.split('\n')
709
- for line in lines:
710
- line = line.strip()
711
- if line.upper().startswith('SELECT'):
712
- sql_query = line.rstrip(';')
713
- break
714
- else:
715
- # If no line starts with SELECT, try to extract from the whole response
716
- sql_match = re.search(r'(SELECT[^;]+)', sql_query, re.IGNORECASE | re.DOTALL)
717
- if sql_match:
718
- sql_query = sql_match.group(1).strip()
719
-
720
- # Final cleanup
721
- sql_query = sql_query.replace('\n', ' ').strip()
722
-
723
- # Validate the SQL contains basic components
724
- if not sql_query.upper().strip().startswith('SELECT'):
725
- raise ValueError(f"Generated response is not a valid SQL query: '{sql_query}'")
726
-
727
- # Check for incomplete queries
728
- if len(sql_query.strip()) < 15 or sql_query.upper().strip() == 'SELECT':
729
- raise ValueError(f"Generated incomplete SQL query: '{sql_query}'")
730
-
731
- logger.info(f"LLM generated SQL for question: '{question}' -> {sql_query}")
732
-
733
- return {
734
- "success": True,
735
- "data": {
736
- "question": question,
737
- "generated_sql": sql_query,
738
- "explanation": f"Generated using {self.hf_model} LLM with schema context",
739
- "confidence": "high",
740
- "method": "llm",
741
- "schema_used": {
742
- "table": table_name,
743
- "columns": [col.split(' (')[0] for col in columns]
744
- }
745
- }
746
- }
747
-
748
- except Exception as e:
749
- logger.error(f"LLM text-to-SQL generation failed: {str(e)}")
750
- raise e
751
-
752
- async def _pattern_text_to_sql(self, question: str, schema_info: Dict[str, Any]) -> Dict[str, Any]:
753
- """Generate SQL using pattern matching (fallback method)"""
754
-
755
- # Extract schema information for better SQL generation
756
- tables = schema_info.get('schema', {})
757
- table_name = 'invoices' # Default table
758
- columns = []
759
-
760
- # Get column information from schema
761
- if table_name in tables:
762
- table_info = tables[table_name]
763
- if 'columns' in table_info:
764
- columns = [col['name'] for col in table_info['columns']]
765
- column_types = {col['name']: col['type'] for col in table_info['columns']}
766
-
767
- # If no schema info, use default columns
768
- if not columns:
769
- columns = ['id', 'vendor_name', 'invoice_number', 'invoice_date', 'total_amount', 'line_items']
770
- column_types = {
771
- 'id': 'integer',
772
- 'vendor_name': 'text',
773
- 'invoice_number': 'text',
774
- 'invoice_date': 'date',
775
- 'total_amount': 'numeric',
776
- 'line_items': 'jsonb'
777
- }
778
-
779
- # Generate SQL based on question and schema context
780
- question_lower = question.lower()
781
-
782
- # Determine what columns to select based on question
783
- select_clause = "*" # default
784
-
785
- if 'total amount' in question_lower or 'sum' in question_lower:
786
- if 'total_amount' in columns:
787
- select_clause = "SUM(total_amount) as total"
788
- else:
789
- select_clause = "SUM(amount) as total" # fallback
790
- elif 'count' in question_lower or 'how many' in question_lower:
791
- select_clause = "COUNT(*) as count"
792
- elif 'average' in question_lower or 'avg' in question_lower:
793
- if 'total_amount' in columns:
794
- select_clause = "AVG(total_amount) as average"
795
- else:
796
- select_clause = "AVG(amount) as average" # fallback
797
- elif 'vendors' in question_lower or 'companies' in question_lower:
798
- if 'who are' in question_lower or 'all the' in question_lower or 'list' in question_lower:
799
- select_clause = "DISTINCT vendor_name"
800
- elif 'last' in question_lower or 'latest' in question_lower or 'most recent' in question_lower:
801
- if 'date' in question_lower:
802
- select_clause = "MAX(invoice_date) as latest_date"
803
- elif 'invoice' in question_lower:
804
- # For "last invoice" or "latest invoice", show the most recent one
805
- select_clause = "*"
806
- # Will be handled in ORDER BY section
807
- elif 'first' in question_lower or 'earliest' in question_lower:
808
- if 'date' in question_lower:
809
- select_clause = "MIN(invoice_date) as earliest_date"
810
- elif 'invoice' in question_lower:
811
- select_clause = "*"
812
- # Will be handled in ORDER BY section
813
- elif 'max' in question_lower or 'maximum' in question_lower or 'highest' in question_lower:
814
- if 'amount' in question_lower:
815
- select_clause = "MAX(total_amount) as max_amount"
816
- elif 'date' in question_lower:
817
- select_clause = "MAX(invoice_date) as max_date"
818
- elif 'min' in question_lower or 'minimum' in question_lower or 'lowest' in question_lower:
819
- if 'amount' in question_lower:
820
- select_clause = "MIN(total_amount) as min_amount"
821
- elif 'date' in question_lower:
822
- select_clause = "MIN(invoice_date) as min_date"
823
-
824
- # Build WHERE clause based on question
825
- where_clause = ""
826
-
827
- # Look for vendor filtering patterns
828
- vendor_patterns = [
829
- ('from', 'from'), # "invoices from Air Liquide"
830
- ('by', 'by'), # "invoices by Microsoft"
831
- ('for', 'for'), # "invoices for Apple"
832
- ]
833
-
834
- vendor_name = None
835
- for pattern, keyword in vendor_patterns:
836
- if keyword in question_lower:
837
- parts = question_lower.split(keyword)
838
- if len(parts) > 1:
839
- # Extract vendor name after the keyword
840
- vendor_part = parts[1].strip()
841
- # Remove common trailing words
842
- vendor_part = vendor_part.replace(' invoices', '').replace(' invoice', '').strip()
843
- # Take first few words as vendor name
844
- vendor_words = vendor_part.split()[:3] # Max 3 words for vendor name
845
- if vendor_words:
846
- vendor_name = ' '.join(vendor_words).strip('"\'.,?!')
847
- break
848
-
849
- # Also check for direct company name patterns
850
- if not vendor_name:
851
- # Look for patterns like "Air Liquide invoices" or "Microsoft invoices"
852
- # Match capitalized words that might be company names
853
- company_pattern = r'\b([A-Z][a-z]+(?:\s+[A-Z][a-z]+)*)\s+invoices?'
854
- match = re.search(company_pattern, question)
855
- if match:
856
- vendor_name = match.group(1)
857
-
858
- if vendor_name:
859
- if 'vendor_name' in columns:
860
- where_clause = f"WHERE vendor_name ILIKE '%{vendor_name}%'"
861
- elif 'vendor' in columns:
862
- where_clause = f"WHERE vendor ILIKE '%{vendor_name}%'"
863
-
864
- # Build ORDER BY clause
865
- order_clause = ""
866
- limit_clause = ""
867
-
868
- if 'recent' in question_lower or 'latest' in question_lower or 'last' in question_lower:
869
- if 'invoice_date' in columns:
870
- order_clause = "ORDER BY invoice_date DESC"
871
- elif 'date' in columns:
872
- order_clause = "ORDER BY date DESC"
873
- elif 'created_at' in columns:
874
- order_clause = "ORDER BY created_at DESC"
875
-
876
- # Add LIMIT for recent queries if it's asking for specific invoices
877
- if 'invoice' in question_lower and select_clause == "*":
878
- # Extract number if specified
879
- numbers = re.findall(r'\d+', question)
880
- limit = numbers[0] if numbers else "1" # Default to 1 for "last invoice"
881
- limit_clause = f"LIMIT {limit}"
882
-
883
- elif 'first' in question_lower or 'earliest' in question_lower:
884
- if 'invoice_date' in columns:
885
- order_clause = "ORDER BY invoice_date ASC"
886
- elif 'date' in columns:
887
- order_clause = "ORDER BY date ASC"
888
- elif 'created_at' in columns:
889
- order_clause = "ORDER BY created_at ASC"
890
-
891
- # Add LIMIT for earliest queries if it's asking for specific invoices
892
- if 'invoice' in question_lower and select_clause == "*":
893
- numbers = re.findall(r'\d+', question)
894
- limit = numbers[0] if numbers else "1"
895
- limit_clause = f"LIMIT {limit}"
896
-
897
- elif re.search(r'\d+', question) and ('recent' in question_lower or 'latest' in question_lower or 'last' in question_lower):
898
- # Handle "Show me the 5 most recent invoices"
899
- if 'invoice_date' in columns:
900
- order_clause = "ORDER BY invoice_date DESC"
901
- numbers = re.findall(r'\d+', question)
902
- if numbers:
903
- limit_clause = f"LIMIT {numbers[0]}"
904
-
905
- # Construct the final SQL query
906
- sql_parts = [f"SELECT {select_clause}", f"FROM {table_name}"]
907
-
908
- if where_clause:
909
- sql_parts.append(where_clause)
910
-
911
- if order_clause:
912
- sql_parts.append(order_clause)
913
-
914
- if limit_clause:
915
- sql_parts.append(limit_clause)
916
-
917
- sql_query = " ".join(sql_parts)
918
-
919
- # Generate explanation based on schema context
920
- explanation_parts = [f"Generated SQL query for table '{table_name}' using pattern matching"]
921
- if columns:
922
- explanation_parts.append(f"Available columns: {', '.join(columns)}")
923
- if where_clause:
924
- explanation_parts.append("Applied filtering based on question context")
925
- if order_clause:
926
- explanation_parts.append("Added sorting and/or limiting based on question")
927
-
928
- explanation = ". ".join(explanation_parts)
929
-
930
- logger.info(f"Pattern matching generated SQL for question: '{question}' -> {sql_query}")
931
- logger.info(f"Used schema with columns: {columns}")
932
-
933
- return {
934
- "success": True,
935
- "data": {
936
- "question": question,
937
- "generated_sql": sql_query,
938
- "explanation": explanation,
939
- "confidence": "medium", # Lower confidence for pattern matching
940
- "method": "pattern_matching",
941
- "schema_used": {
942
- "table": table_name,
943
- "columns": columns,
944
- "column_types": column_types if 'column_types' in locals() else {}
945
- }
946
- }
947
- }
948
-
949
- async def health_check(self, request: web_request.Request) -> web.Response:
950
- """Health check endpoint"""
951
- return web.json_response({"status": "healthy", "service": "mcp-bridge"})
952
-
953
- async def get_schema(self, request: web_request.Request) -> web.Response:
954
- """Get database schema endpoint"""
955
- try:
956
- table_name = request.query.get('table', 'invoices')
957
- schema = await self.get_table_schema(table_name)
958
- return web.json_response({
959
- "success": True,
960
- "data": schema
961
- })
962
- except Exception as e:
963
- logger.error(f"Schema fetch failed: {str(e)}")
964
- return web.json_response({
965
- "success": False,
966
- "error": str(e)
967
- }, status=500)
968
-
969
- def create_app(self) -> web.Application:
970
- """Create aiohttp application"""
971
- app = web.Application()
972
-
973
- # Add routes
974
- app.router.add_post('/execute_tool', self.execute_tool)
975
- app.router.add_get('/health', self.health_check)
976
- app.router.add_get('/get_schema', self.get_schema)
977
-
978
- return app
979
-
980
- async def start(self, port: int = 8081):
981
- """Start the server"""
982
- app = self.create_app()
983
- runner = web.AppRunner(app)
984
- await runner.setup()
985
-
986
- site = web.TCPSite(runner, 'localhost', port)
987
- await site.start()
988
-
989
- logger.info(f"MCP Bridge Server started on http://localhost:{port}")
990
- logger.info(f"Available endpoints:")
991
- logger.info(f" POST /execute_tool - Execute MCP tools")
992
- logger.info(f" GET /health - Health check")
993
- logger.info(f" GET /get_schema - Get database schema")
994
-
995
- # Keep running
996
- try:
997
- await asyncio.Future() # Run forever
998
- except KeyboardInterrupt:
999
- logger.info("Shutting down server...")
1000
- finally:
1001
- await runner.cleanup()
1002
-
1003
- async def get_table_schema(self, table_name: str = "invoices") -> Dict[str, Any]:
1004
- """Dynamically fetch table schema from database"""
1005
- try:
1006
- conn = psycopg2.connect(self.postgres_url)
1007
- cursor = conn.cursor()
1008
-
1009
- # Get column information
1010
- query = """
1011
- SELECT column_name, data_type, is_nullable, column_default
1012
- FROM information_schema.columns
1013
- WHERE table_name = %s
1014
- ORDER BY ordinal_position
1015
- """
1016
- cursor.execute(query, (table_name,))
1017
- columns = cursor.fetchall()
1018
-
1019
- schema = {
1020
- "schema": {
1021
- table_name: {
1022
- "columns": [
1023
- {
1024
- "name": col[0],
1025
- "type": col[1],
1026
- "nullable": col[2] == "YES",
1027
- "default": col[3]
1028
- }
1029
- for col in columns
1030
- ]
1031
- }
1032
- }
1033
- }
1034
-
1035
- cursor.close()
1036
- conn.close()
1037
-
1038
- logger.info(f"Dynamically fetched schema for table '{table_name}' with {len(columns)} columns")
1039
- return schema
1040
-
1041
- except Exception as e:
1042
- logger.error(f"Failed to fetch schema for table '{table_name}': {str(e)}")
1043
- # Fallback to basic schema
1044
- return {
1045
- "schema": {
1046
- table_name: {
1047
- "columns": [
1048
- {"name": "id", "type": "integer"},
1049
- {"name": "vendor_name", "type": "character varying"},
1050
- {"name": "invoice_date", "type": "date"},
1051
- {"name": "total_amount", "type": "numeric"}
1052
- ]
1053
- }
1054
- }
1055
- }
1056
-
1057
- async def mock_invoice_generator(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
1058
- """Generate mock invoice data for demo purposes"""
1059
- try:
1060
- file_path = input_data.get('file', '')
1061
- schema = input_data.get('schema', {})
1062
-
1063
- # Extract invoice number from filename
1064
- import re
1065
- invoice_match = re.search(r'(\d+)', os.path.basename(file_path))
1066
- invoice_number = invoice_match.group(1) if invoice_match else "DEMO-001"
1067
-
1068
- # Generate realistic mock data
1069
- import random
1070
- vendors = [
1071
- "Air Liquide Canada Inc.",
1072
- "Propane Supply Co.",
1073
- "Gas Distribution Ltd.",
1074
- "Fuel Services Corp.",
1075
- "Energy Solutions Inc."
1076
- ]
1077
-
1078
- vendor = random.choice(vendors)
1079
- subtotal = round(random.uniform(200, 800), 2)
1080
- tax_rate = 0.13 # 13% tax
1081
- tax_amount = round(subtotal * tax_rate, 2)
1082
- total_amount = subtotal + tax_amount
1083
-
1084
- # Generate line items
1085
- line_items = [
1086
- {
1087
- "description": "PROPANE, C3H8, 33 1/3LB, (14KG / 30.8LB)",
1088
- "quantity": random.randint(20, 40),
1089
- "unit_price": round(random.uniform(12, 18), 2),
1090
- "amount": 0,
1091
- "main_product": True
1092
- },
1093
- {
1094
- "description": "EMPTY CYLINDER PROPANE, 30.8LB (14KG)",
1095
- "quantity": random.randint(20, 40),
1096
- "unit_price": 0.0,
1097
- "amount": 0.0,
1098
- "main_product": False
1099
- },
1100
- {
1101
- "description": "CHARGE, FUEL SURCHARGE",
1102
- "quantity": 1.0,
1103
- "unit_price": 0.0,
1104
- "amount": 0.0,
1105
- "main_product": False
1106
- }
1107
- ]
1108
-
1109
- # Calculate amounts
1110
- for item in line_items:
1111
- if item["main_product"]:
1112
- item["amount"] = round(item["quantity"] * item["unit_price"], 2)
1113
- subtotal = item["amount"]
1114
-
1115
- # Add carbon tax
1116
- carbon_tax = round(random.uniform(50, 150), 2)
1117
- line_items.append({
1118
- "description": "CHARGE, CARBON TAX PROPANE, ON, NB, SASK, MANITOBA, 33 1/3LB CYLINDER",
1119
- "quantity": line_items[0]["quantity"],
1120
- "unit_price": round(carbon_tax / line_items[0]["quantity"], 2),
1121
- "amount": carbon_tax,
1122
- "main_product": False
1123
- })
1124
-
1125
- total_amount = subtotal + carbon_tax + tax_amount
1126
-
1127
- invoice_data = {
1128
- "headerSection": {
1129
- "vendorName": vendor,
1130
- "subtotal": subtotal
1131
- },
1132
- "billingDetails": {
1133
- "invoiceNumber": invoice_number,
1134
- "invoiceDate": "2024-09-19"
1135
- },
1136
- "chargesSummary": {
1137
- "document_total": total_amount,
1138
- "secondary_tax": tax_amount,
1139
- "lineItemsBreakdown": line_items
1140
- }
1141
- }
1142
-
1143
- logger.info(f"Generated mock invoice data for: {file_path}")
1144
-
1145
- return {
1146
- "success": True,
1147
- "data": {
1148
- "file_path": file_path,
1149
- "extracted_data": invoice_data
1150
- }
1151
- }
1152
-
1153
- except Exception as e:
1154
- logger.error(f"Mock invoice generation failed: {str(e)}")
1155
- return {
1156
- "success": False,
1157
- "error": str(e)
1158
- }
1159
-
1160
- def main():
1161
- # Get configuration from environment
1162
- postgres_url = os.getenv('MCP_POSTGRES_URL', 'postgresql://memra:memra123@localhost:5432/memra_invoice_db')
1163
- bridge_secret = os.getenv('MCP_BRIDGE_SECRET', 'test-secret-for-development')
1164
- hf_api_key = os.getenv('HUGGINGFACE_API_KEY', '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')
1165
- hf_model = os.getenv('HUGGINGFACE_MODEL', 'meta-llama/Llama-3.1-8B-Instruct')
1166
-
1167
- logger.info(f"Starting MCP Bridge Server...")
1168
- logger.info(f"PostgreSQL URL: {postgres_url}")
1169
- logger.info(f"Bridge Secret: {'*' * len(bridge_secret)}")
1170
- logger.info(f"Hugging Face Model: {hf_model}")
1171
- logger.info(f"Hugging Face API Key: {'*' * (len(hf_api_key) - 8) + hf_api_key[-8:] if hf_api_key else 'Not set'}")
1172
-
1173
- # Create and start server
1174
- server = MCPBridgeServer(postgres_url, bridge_secret)
1175
- asyncio.run(server.start())
1176
-
1177
- if __name__ == '__main__':
1178
- main()