legalmind-ai 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of legalmind-ai might be problematic. Click here for more details.
- legalmind/__init__.py +1 -0
- legalmind/ai/__init__.py +7 -0
- legalmind/ai/legal_ai.py +232 -0
- legalmind/analyzers/__init__.py +0 -0
- legalmind/api/__init__.py +0 -0
- legalmind/api/server.py +288 -0
- legalmind/config.py +41 -0
- legalmind/core.py +92 -0
- legalmind/core_enhanced.py +206 -0
- legalmind/enhanced_search.py +148 -0
- legalmind/prompt_templates.py +284 -0
- legalmind/providers/__init__.py +0 -0
- legalmind/providers/fallback/__init__.py +11 -0
- legalmind/providers/fallback/config.py +66 -0
- legalmind/providers/fallback/data_loader.py +308 -0
- legalmind/providers/fallback/enhanced_system.py +151 -0
- legalmind/providers/fallback/system.py +456 -0
- legalmind/providers/fallback/versalaw2_core/__init__.py +11 -0
- legalmind/providers/fallback/versalaw2_core/config.py +66 -0
- legalmind/providers/fallback/versalaw2_core/data_loader.py +308 -0
- legalmind/providers/fallback/versalaw2_core/enhanced_system.py +151 -0
- legalmind/providers/fallback/versalaw2_core/system.py +456 -0
- legalmind/providers/qodo.py +139 -0
- legalmind/providers/qodo_ai.py +85 -0
- legalmind/study_cases/CROSS_PROJECT_INTEGRATION_ANALYSIS.md +411 -0
- legalmind/study_cases/DAFTAR_KASUS_PRIORITAS_ANALISIS.md +779 -0
- legalmind/study_cases/JAWABAN_ANALISIS_3_KASUS_MENANTANG.md +393 -0
- legalmind/study_cases/JAWABAN_TERBAIK_KONTRAK_REAL.md +854 -0
- legalmind/study_cases/LEGAL_PROJECTS_ANALYSIS_REPORT.md +442 -0
- legalmind/study_cases/PORTFOLIO_11_KASUS_LENGKAP.md +458 -0
- legalmind/study_cases/RINGKASAN_3_KASUS_TECH_INTERNASIONAL.md +565 -0
- legalmind/study_cases/RINGKASAN_HASIL_PENGUJIAN.md +112 -0
- legalmind/study_cases/RINGKASAN_IDE_MONETISASI.md +464 -0
- legalmind/study_cases/RINGKASAN_LENGKAP.md +419 -0
- legalmind/study_cases/RINGKASAN_VISUAL_HASIL_ANALISIS.md +331 -0
- legalmind/study_cases/Real_Studycase_Law_International_Edition.md +434 -0
- legalmind/study_cases/analyze_5_additional_cases.py +905 -0
- legalmind/study_cases/analyze_5_additional_cases_part2.py +461 -0
- legalmind/study_cases/analyze_challenging_cases.py +963 -0
- legalmind/study_cases/analyze_international_tech_cases.py +1706 -0
- legalmind/study_cases/analyze_real_problematic_contracts.py +603 -0
- legalmind/study_cases/kuhp_baru_2026/analisis_perbandingan/ANALISIS_PERUBAHAN_SISTEM_PEMIDANAAN.md +16 -0
- legalmind/study_cases/kuhp_baru_2026/analisis_perbandingan/PERBANDINGAN_KOMPREHENSIF_KUHP_LAMA_BARU.md +27 -0
- legalmind/study_cases/kuhp_baru_2026/analisis_perbandingan/STUDI_KASUS_TRANSISI_KUHP_BARU.md +16 -0
- legalmind/study_cases/kuhp_baru_2026/implementasi_praktis/ANALISIS_DAMPAK_BISNIS_KUHP_BARU.md +16 -0
- legalmind/study_cases/kuhp_baru_2026/implementasi_praktis/CHECKLIST_KOMPLIANCE_KUHP_BARU.md +16 -0
- legalmind/study_cases/kuhp_baru_2026/implementasi_praktis/PANDUAN_TRANSISI_KUHP_BARU_2026.md +28 -0
- legalmind/study_cases/kuhp_baru_2026/studi_kasus/KASUS_KEKERASAN_SEKSUAL_BARU.md +16 -0
- legalmind/study_cases/kuhp_baru_2026/studi_kasus/KASUS_KORUPSI_DAN_GRATIFIKASI.md +16 -0
- legalmind/study_cases/kuhp_baru_2026/studi_kasus/KASUS_TINDAK_PIDANA_SIBER_KUHP_BARU.md +16 -0
- legalmind/study_cases/kuhp_baru_2026/topik_khusus/HUKUM_YANG_HIDUP_DI_MASYARAKAT.md +16 -0
- legalmind/study_cases/kuhp_baru_2026/topik_khusus/PIDANA_TAMBAHAN_DAN_TINDAKAN.md +16 -0
- legalmind/study_cases/kuhp_baru_2026/topik_khusus/TINDAK_PIDANA_SIBER_KUHP_BARU.md +16 -0
- legalmind_ai-1.1.0.dist-info/METADATA +93 -0
- legalmind_ai-1.1.0.dist-info/RECORD +58 -0
- legalmind_ai-1.1.0.dist-info/WHEEL +5 -0
- legalmind_ai-1.1.0.dist-info/entry_points.txt +4 -0
- legalmind_ai-1.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,456 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
VersaLaw2 Integrated System
|
|
4
|
+
Complete production-ready implementation
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Dict, List, Optional
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
import json
|
|
11
|
+
import hashlib
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
|
|
14
|
+
from .data_loader import MayaLawDataLoader
|
|
15
|
+
from .config import Config
|
|
16
|
+
|
|
17
|
+
# Setup logging
|
|
18
|
+
logging.basicConfig(
|
|
19
|
+
level=logging.INFO,
|
|
20
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
21
|
+
)
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
class VersaLaw2Classifier:
|
|
25
|
+
"""Enhanced classifier"""
|
|
26
|
+
|
|
27
|
+
def __init__(self):
|
|
28
|
+
self.categories = {
|
|
29
|
+
'hukum_pidana': {
|
|
30
|
+
'keywords': ['pidana', 'pencurian', 'pembunuhan', 'korupsi', 'narkotika',
|
|
31
|
+
'hakim', 'terdakwa', 'minimum', 'maksimum', 'penjara', 'hukuman'],
|
|
32
|
+
'weight': 1.0
|
|
33
|
+
},
|
|
34
|
+
'hukum_perdata': {
|
|
35
|
+
'keywords': ['perdata', 'gugatan', 'wanprestasi', 'kontrak', 'perjanjian',
|
|
36
|
+
'ganti rugi', 'sengketa'],
|
|
37
|
+
'weight': 1.0
|
|
38
|
+
},
|
|
39
|
+
'hukum_keluarga': {
|
|
40
|
+
'keywords': ['perceraian', 'cerai', 'nafkah', 'waris', 'anak', 'nikah',
|
|
41
|
+
'perkawinan'],
|
|
42
|
+
'weight': 1.0
|
|
43
|
+
},
|
|
44
|
+
'hukum_bisnis': {
|
|
45
|
+
'keywords': ['perusahaan', 'pt', 'cv', 'saham', 'ipo', 'merger', 'akuisisi'],
|
|
46
|
+
'weight': 1.0
|
|
47
|
+
},
|
|
48
|
+
'hukum_properti': {
|
|
49
|
+
'keywords': ['tanah', 'sertifikat', 'properti', 'bangunan', 'hak tanggungan'],
|
|
50
|
+
'weight': 1.0
|
|
51
|
+
},
|
|
52
|
+
'hukum_tata_negara': {
|
|
53
|
+
'keywords': ['konstitusi', 'uud', 'mahkamah konstitusi', 'pemilu', 'dpr'],
|
|
54
|
+
'weight': 1.0
|
|
55
|
+
},
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
def classify(self, question: str) -> Dict:
|
|
59
|
+
"""Classify legal question"""
|
|
60
|
+
question_lower = question.lower()
|
|
61
|
+
|
|
62
|
+
scores = {}
|
|
63
|
+
for category, data in self.categories.items():
|
|
64
|
+
score = sum(1 for kw in data['keywords'] if kw in question_lower)
|
|
65
|
+
if score > 0:
|
|
66
|
+
scores[category] = score * data['weight']
|
|
67
|
+
|
|
68
|
+
if scores:
|
|
69
|
+
best_category = max(scores, key=scores.get)
|
|
70
|
+
total_words = len(question.split())
|
|
71
|
+
confidence = min(scores[best_category] / max(total_words, 1), 0.95)
|
|
72
|
+
|
|
73
|
+
return {
|
|
74
|
+
'category': best_category,
|
|
75
|
+
'confidence': confidence,
|
|
76
|
+
'all_scores': scores
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
return {
|
|
80
|
+
'category': 'umum',
|
|
81
|
+
'confidence': 0.3,
|
|
82
|
+
'all_scores': {}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
class AIProcessor:
|
|
86
|
+
"""AI processor with support for multiple providers"""
|
|
87
|
+
|
|
88
|
+
def __init__(self, provider: str = "mock", api_key: Optional[str] = None, config: Optional[Config] = None):
|
|
89
|
+
self.provider = provider
|
|
90
|
+
self.api_key = api_key
|
|
91
|
+
self.config = config or Config()
|
|
92
|
+
|
|
93
|
+
if provider == "openai" and api_key:
|
|
94
|
+
try:
|
|
95
|
+
from openai import OpenAI
|
|
96
|
+
self.client = OpenAI(api_key=api_key)
|
|
97
|
+
self.model = "gpt-4-turbo-preview"
|
|
98
|
+
logger.info("OpenAI client initialized")
|
|
99
|
+
except ImportError:
|
|
100
|
+
logger.warning("OpenAI package not installed, falling back to mock")
|
|
101
|
+
self.provider = "mock"
|
|
102
|
+
|
|
103
|
+
elif provider == "deepseek" and api_key:
|
|
104
|
+
try:
|
|
105
|
+
from openai import OpenAI
|
|
106
|
+
self.client = OpenAI(
|
|
107
|
+
api_key=api_key,
|
|
108
|
+
base_url="https://api.deepseek.com/v1"
|
|
109
|
+
)
|
|
110
|
+
self.model = "deepseek-chat"
|
|
111
|
+
logger.info("DeepSeek client initialized")
|
|
112
|
+
except ImportError:
|
|
113
|
+
logger.warning("OpenAI package not installed, falling back to mock")
|
|
114
|
+
self.provider = "mock"
|
|
115
|
+
|
|
116
|
+
elif provider == "qodo" and api_key:
|
|
117
|
+
try:
|
|
118
|
+
from openai import OpenAI
|
|
119
|
+
base_url = self.config.get('qodo_base_url', 'https://api.qodo.ai/v1')
|
|
120
|
+
self.client = OpenAI(
|
|
121
|
+
api_key=api_key,
|
|
122
|
+
base_url=base_url
|
|
123
|
+
)
|
|
124
|
+
self.model = "qodo-chat" # Adjust based on Qodo.ai's actual model name
|
|
125
|
+
logger.info(f"Qodo.ai client initialized (250 free calls available!)")
|
|
126
|
+
print("✅ Qodo.ai initialized - 250 free calls available!")
|
|
127
|
+
except ImportError:
|
|
128
|
+
logger.warning("OpenAI package not installed, falling back to mock")
|
|
129
|
+
self.provider = "mock"
|
|
130
|
+
except Exception as e:
|
|
131
|
+
logger.warning(f"Qodo.ai initialization failed: {e}, falling back to mock")
|
|
132
|
+
self.provider = "mock"
|
|
133
|
+
|
|
134
|
+
else:
|
|
135
|
+
self.provider = "mock"
|
|
136
|
+
logger.info("Using mock AI processor")
|
|
137
|
+
|
|
138
|
+
def generate_answer(self, question: str, context: Dict) -> Dict:
|
|
139
|
+
"""Generate answer using AI"""
|
|
140
|
+
|
|
141
|
+
if self.provider == "mock":
|
|
142
|
+
return self._mock_answer(question, context)
|
|
143
|
+
else:
|
|
144
|
+
return self._real_ai_answer(question, context)
|
|
145
|
+
|
|
146
|
+
def _real_ai_answer(self, question: str, context: Dict) -> Dict:
|
|
147
|
+
"""Generate answer using real AI"""
|
|
148
|
+
try:
|
|
149
|
+
prompt = self._build_prompt(question, context)
|
|
150
|
+
|
|
151
|
+
response = self.client.chat.completions.create(
|
|
152
|
+
model=self.model,
|
|
153
|
+
messages=[
|
|
154
|
+
{
|
|
155
|
+
"role": "system",
|
|
156
|
+
"content": "Anda adalah ahli hukum Indonesia yang sangat berpengalaman. "
|
|
157
|
+
"Jawab pertanyaan berdasarkan konteks yang diberikan dengan akurat."
|
|
158
|
+
},
|
|
159
|
+
{
|
|
160
|
+
"role": "user",
|
|
161
|
+
"content": prompt
|
|
162
|
+
}
|
|
163
|
+
],
|
|
164
|
+
temperature=self.config.get('ai_temperature', 0.3),
|
|
165
|
+
max_tokens=self.config.get('ai_max_tokens', 2000)
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
return {
|
|
169
|
+
'answer': response.choices[0].message.content,
|
|
170
|
+
'model': self.model,
|
|
171
|
+
'usage': response.usage._asdict()
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
except Exception as e:
|
|
175
|
+
logger.error(f"AI generation error: {e}")
|
|
176
|
+
return self._mock_answer(question, context)
|
|
177
|
+
|
|
178
|
+
def _build_prompt(self, question: str, context: Dict) -> str:
|
|
179
|
+
"""Build prompt for AI"""
|
|
180
|
+
cases = context.get('cases', [])
|
|
181
|
+
|
|
182
|
+
context_text = ""
|
|
183
|
+
for case in cases[:2]: # Use top 2 cases
|
|
184
|
+
context_text += f"""
|
|
185
|
+
KASUS #{case['number']}:
|
|
186
|
+
{case.get('kasus', '')}
|
|
187
|
+
|
|
188
|
+
PERTANYAAN:
|
|
189
|
+
{case.get('pertanyaan', '')}
|
|
190
|
+
|
|
191
|
+
JAWABAN:
|
|
192
|
+
{case.get('jawaban', '')[:400]}
|
|
193
|
+
|
|
194
|
+
DASAR HUKUM:
|
|
195
|
+
{', '.join(case.get('pasal', [])[:3])}
|
|
196
|
+
{', '.join(case.get('uu', [])[:2])}
|
|
197
|
+
|
|
198
|
+
---
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
prompt = f"""
|
|
202
|
+
Berdasarkan konteks kasus hukum berikut dari database MayaLaw,
|
|
203
|
+
jawab pertanyaan dengan akurat dan detail.
|
|
204
|
+
|
|
205
|
+
KONTEKS DARI MAYALAW:
|
|
206
|
+
{context_text}
|
|
207
|
+
|
|
208
|
+
PERTANYAAN USER:
|
|
209
|
+
{question}
|
|
210
|
+
|
|
211
|
+
Berikan jawaban yang:
|
|
212
|
+
1. ✅ Akurat berdasarkan hukum Indonesia
|
|
213
|
+
2. ✅ Merujuk pada Pasal dan UU yang spesifik dari konteks
|
|
214
|
+
3. ✅ Menjelaskan dengan bahasa yang mudah dipahami
|
|
215
|
+
4. ✅ Menyertakan analisis hukum yang mendalam
|
|
216
|
+
5. ✅ Memberikan tingkat keyakinan (confidence level)
|
|
217
|
+
|
|
218
|
+
Format jawaban:
|
|
219
|
+
## ⚖️ JAWABAN:
|
|
220
|
+
[Jawaban singkat dan jelas]
|
|
221
|
+
|
|
222
|
+
## 📖 DASAR HUKUM:
|
|
223
|
+
[Pasal dan UU yang relevan]
|
|
224
|
+
|
|
225
|
+
## 🔍 ANALISIS:
|
|
226
|
+
[Penjelasan detail]
|
|
227
|
+
|
|
228
|
+
## 💯 TINGKAT KEYAKINAN:
|
|
229
|
+
[Persentase dan alasan]
|
|
230
|
+
"""
|
|
231
|
+
return prompt
|
|
232
|
+
|
|
233
|
+
def _mock_answer(self, question: str, context: Dict) -> Dict:
|
|
234
|
+
"""Mock answer for testing"""
|
|
235
|
+
cases = context.get('cases', [])
|
|
236
|
+
|
|
237
|
+
if not cases:
|
|
238
|
+
answer = f"""## ⚠️ INFORMASI
|
|
239
|
+
|
|
240
|
+
Pertanyaan: "{question}"
|
|
241
|
+
|
|
242
|
+
Saat ini tidak ditemukan kasus yang relevan di database MayaLaw untuk pertanyaan ini.
|
|
243
|
+
|
|
244
|
+
## 💡 SARAN
|
|
245
|
+
|
|
246
|
+
1. Coba rumuskan pertanyaan dengan kata kunci yang lebih spesifik
|
|
247
|
+
2. Konsultasikan dengan ahli hukum untuk analisis mendalam
|
|
248
|
+
3. Database sedang dikembangkan untuk mencakup lebih banyak kasus
|
|
249
|
+
"""
|
|
250
|
+
return {
|
|
251
|
+
'answer': answer,
|
|
252
|
+
'model': 'mock',
|
|
253
|
+
'usage': {'total_tokens': 50}
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
case = cases[0]
|
|
257
|
+
|
|
258
|
+
answer = f"""## ⚖️ JAWABAN
|
|
259
|
+
|
|
260
|
+
{case.get('jawaban', 'Berdasarkan analisis hukum...')[:400]}
|
|
261
|
+
|
|
262
|
+
## 📖 DASAR HUKUM
|
|
263
|
+
|
|
264
|
+
"""
|
|
265
|
+
|
|
266
|
+
if case.get('pasal'):
|
|
267
|
+
answer += "**Pasal yang Relevan:**\n"
|
|
268
|
+
for pasal in case['pasal'][:5]:
|
|
269
|
+
answer += f"- {pasal}\n"
|
|
270
|
+
answer += "\n"
|
|
271
|
+
|
|
272
|
+
if case.get('uu'):
|
|
273
|
+
answer += "**Undang-Undang:**\n"
|
|
274
|
+
for uu in case['uu'][:3]:
|
|
275
|
+
answer += f"- {uu}\n"
|
|
276
|
+
answer += "\n"
|
|
277
|
+
|
|
278
|
+
if case.get('dasar_hukum'):
|
|
279
|
+
answer += f"{case['dasar_hukum'][:300]}\n\n"
|
|
280
|
+
|
|
281
|
+
answer += f"""## 🔍 ANALISIS
|
|
282
|
+
|
|
283
|
+
{case.get('analisis', '')[:600]}
|
|
284
|
+
|
|
285
|
+
## 📚 REFERENSI
|
|
286
|
+
|
|
287
|
+
Berdasarkan Kasus #{case['number']} dari database MayaLaw ({case['file']})
|
|
288
|
+
|
|
289
|
+
## 💯 TINGKAT KEYAKINAN
|
|
290
|
+
|
|
291
|
+
95% - Jawaban berdasarkan studi kasus yang relevan dan terverifikasi
|
|
292
|
+
"""
|
|
293
|
+
|
|
294
|
+
return {
|
|
295
|
+
'answer': answer,
|
|
296
|
+
'model': 'mock',
|
|
297
|
+
'usage': {'total_tokens': len(answer.split())}
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
class CacheManager:
|
|
301
|
+
"""Simple cache manager"""
|
|
302
|
+
|
|
303
|
+
def __init__(self, cache_dir: str = "/root/dragon/global/lab/.cache"):
|
|
304
|
+
self.cache_dir = Path(cache_dir)
|
|
305
|
+
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
|
306
|
+
self.enabled = True
|
|
307
|
+
|
|
308
|
+
def get_cache_key(self, question: str) -> str:
|
|
309
|
+
"""Generate cache key"""
|
|
310
|
+
return hashlib.md5(question.encode()).hexdigest()
|
|
311
|
+
|
|
312
|
+
def get(self, question: str) -> Optional[Dict]:
|
|
313
|
+
"""Get cached result"""
|
|
314
|
+
if not self.enabled:
|
|
315
|
+
return None
|
|
316
|
+
|
|
317
|
+
cache_key = self.get_cache_key(question)
|
|
318
|
+
cache_file = self.cache_dir / f"{cache_key}.json"
|
|
319
|
+
|
|
320
|
+
if cache_file.exists():
|
|
321
|
+
try:
|
|
322
|
+
with open(cache_file, 'r', encoding='utf-8') as f:
|
|
323
|
+
return json.load(f)
|
|
324
|
+
except:
|
|
325
|
+
return None
|
|
326
|
+
return None
|
|
327
|
+
|
|
328
|
+
def set(self, question: str, result: Dict):
|
|
329
|
+
"""Cache result"""
|
|
330
|
+
if not self.enabled:
|
|
331
|
+
return
|
|
332
|
+
|
|
333
|
+
cache_key = self.get_cache_key(question)
|
|
334
|
+
cache_file = self.cache_dir / f"{cache_key}.json"
|
|
335
|
+
|
|
336
|
+
try:
|
|
337
|
+
with open(cache_file, 'w', encoding='utf-8') as f:
|
|
338
|
+
json.dump(result, f, ensure_ascii=False, indent=2)
|
|
339
|
+
except Exception as e:
|
|
340
|
+
logger.warning(f"Cache write error: {e}")
|
|
341
|
+
|
|
342
|
+
class VersaLaw2System:
|
|
343
|
+
"""Complete VersaLaw2 integrated system"""
|
|
344
|
+
|
|
345
|
+
def __init__(self, config: Optional[Config] = None):
|
|
346
|
+
self.config = config or Config()
|
|
347
|
+
|
|
348
|
+
print("🚀 Initializing VersaLaw2 System...")
|
|
349
|
+
print("="*60)
|
|
350
|
+
|
|
351
|
+
# Initialize components
|
|
352
|
+
self.data_loader = MayaLawDataLoader(self.config['mayalaw_path'])
|
|
353
|
+
self.classifier = VersaLaw2Classifier()
|
|
354
|
+
print("✅ Classifier ready")
|
|
355
|
+
|
|
356
|
+
self.ai_processor = AIProcessor(
|
|
357
|
+
provider=self.config['ai_provider'],
|
|
358
|
+
api_key=self.config.get('ai_api_key') or self.config.get('qodo_api_key') or self.config.get('deepseek_api_key') or self.config.get('openai_api_key'),
|
|
359
|
+
config=self.config
|
|
360
|
+
)
|
|
361
|
+
print(f"✅ AI processor ready (mode: {self.ai_processor.provider})")
|
|
362
|
+
|
|
363
|
+
self.cache = CacheManager(self.config['cache_dir'])
|
|
364
|
+
print(f"✅ Cache {'enabled' if self.cache.enabled else 'disabled'}")
|
|
365
|
+
|
|
366
|
+
print("="*60)
|
|
367
|
+
print("🎉 System ready!\n")
|
|
368
|
+
|
|
369
|
+
logger.info("VersaLaw2 System initialized successfully")
|
|
370
|
+
|
|
371
|
+
def ask(self, question: str, use_cache: bool = True) -> Dict:
|
|
372
|
+
"""Answer legal question"""
|
|
373
|
+
|
|
374
|
+
# Check cache
|
|
375
|
+
if use_cache:
|
|
376
|
+
cached = self.cache.get(question)
|
|
377
|
+
if cached:
|
|
378
|
+
logger.info(f"Cache hit for question: {question[:50]}")
|
|
379
|
+
print("💾 Using cached result")
|
|
380
|
+
return cached
|
|
381
|
+
|
|
382
|
+
print(f"\n{'='*60}")
|
|
383
|
+
print(f"📝 PERTANYAAN: {question}")
|
|
384
|
+
print(f"{'='*60}\n")
|
|
385
|
+
|
|
386
|
+
# Step 1: Classify
|
|
387
|
+
print("1️⃣ Mengklasifikasi...")
|
|
388
|
+
classification = self.classifier.classify(question)
|
|
389
|
+
print(f" ✅ Kategori: {classification['category']}")
|
|
390
|
+
print(f" ✅ Confidence: {classification['confidence']:.0%}\n")
|
|
391
|
+
|
|
392
|
+
# Step 2: Search
|
|
393
|
+
print("2️⃣ Mencari di MayaLaw...")
|
|
394
|
+
relevant_cases = self.data_loader.search(
|
|
395
|
+
question,
|
|
396
|
+
top_k=self.config['max_search_results']
|
|
397
|
+
)
|
|
398
|
+
print(f" ✅ Ditemukan: {len(relevant_cases)} kasus\n")
|
|
399
|
+
|
|
400
|
+
if relevant_cases:
|
|
401
|
+
for i, case in enumerate(relevant_cases, 1):
|
|
402
|
+
print(f" {i}. Kasus #{case['number']}: {case['pertanyaan'][:60]}...")
|
|
403
|
+
print()
|
|
404
|
+
|
|
405
|
+
# Step 3: Generate answer
|
|
406
|
+
print("3️⃣ Memproses dengan AI...")
|
|
407
|
+
context = {'cases': relevant_cases}
|
|
408
|
+
ai_response = self.ai_processor.generate_answer(question, context)
|
|
409
|
+
print(f" ✅ Generated\n")
|
|
410
|
+
|
|
411
|
+
# Build result
|
|
412
|
+
result = {
|
|
413
|
+
'question': question,
|
|
414
|
+
'classification': classification,
|
|
415
|
+
'cases_found': len(relevant_cases),
|
|
416
|
+
'cases': relevant_cases,
|
|
417
|
+
'answer': ai_response['answer'],
|
|
418
|
+
'metadata': {
|
|
419
|
+
'ai_model': ai_response['model'],
|
|
420
|
+
'tokens': ai_response['usage']['total_tokens'],
|
|
421
|
+
'confidence': 0.95 if relevant_cases else 0.5,
|
|
422
|
+
'timestamp': datetime.now().isoformat()
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
# Cache result
|
|
427
|
+
if use_cache:
|
|
428
|
+
self.cache.set(question, result)
|
|
429
|
+
|
|
430
|
+
logger.info(f"Question answered: {question[:50]}")
|
|
431
|
+
|
|
432
|
+
return result
|
|
433
|
+
|
|
434
|
+
def print_answer(self, result: Dict):
|
|
435
|
+
"""Pretty print answer"""
|
|
436
|
+
print(f"{'='*60}")
|
|
437
|
+
print("📊 HASIL ANALISIS")
|
|
438
|
+
print(f"{'='*60}\n")
|
|
439
|
+
|
|
440
|
+
print(f"🎯 Kategori: {result['classification']['category']}")
|
|
441
|
+
print(f"📚 Kasus: {result['cases_found']}")
|
|
442
|
+
print(f"💯 Confidence: {result['metadata']['confidence']:.0%}\n")
|
|
443
|
+
|
|
444
|
+
print(f"{'='*60}")
|
|
445
|
+
print(result['answer'])
|
|
446
|
+
print(f"{'='*60}\n")
|
|
447
|
+
|
|
448
|
+
def get_stats(self) -> Dict:
|
|
449
|
+
"""Get system statistics"""
|
|
450
|
+
return {
|
|
451
|
+
'system': {
|
|
452
|
+
'ai_provider': self.ai_processor.provider,
|
|
453
|
+
'cache_enabled': self.cache.enabled,
|
|
454
|
+
},
|
|
455
|
+
'data': self.data_loader.get_stats()
|
|
456
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"""
|
|
2
|
+
VersaLaw2 Core Package
|
|
3
|
+
Complete legal AI system
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from .system import VersaLaw2System
|
|
7
|
+
from .config import Config
|
|
8
|
+
from .data_loader import MayaLawDataLoader
|
|
9
|
+
|
|
10
|
+
__version__ = "2.0.0"
|
|
11
|
+
__all__ = ['VersaLaw2System', 'Config', 'MayaLawDataLoader']
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Configuration Management for VersaLaw2
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import os
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Optional
|
|
9
|
+
import json
|
|
10
|
+
|
|
11
|
+
class Config:
|
|
12
|
+
"""Configuration manager"""
|
|
13
|
+
|
|
14
|
+
def __init__(self, config_file: Optional[str] = None):
|
|
15
|
+
self.config_file = config_file
|
|
16
|
+
self.config = self.load_config()
|
|
17
|
+
|
|
18
|
+
def load_config(self) -> dict:
|
|
19
|
+
"""Load configuration"""
|
|
20
|
+
default_config = {
|
|
21
|
+
'mayalaw_path': '/root/dragon/global/mayalaw',
|
|
22
|
+
'ai_provider': 'mock',
|
|
23
|
+
'ai_api_key': os.getenv('AI_API_KEY', ''),
|
|
24
|
+
'openai_api_key': os.getenv('OPENAI_API_KEY', ''),
|
|
25
|
+
'deepseek_api_key': os.getenv('DEEPSEEK_API_KEY', ''),
|
|
26
|
+
'qodo_api_key': os.getenv('QODO_API_KEY', ''), # NEW: Qodo.ai support
|
|
27
|
+
'qodo_base_url': os.getenv('QODO_BASE_URL', 'https://api.qodo.ai/v1'), # NEW
|
|
28
|
+
'cache_enabled': True,
|
|
29
|
+
'cache_dir': '/root/dragon/global/lab/.cache',
|
|
30
|
+
'log_level': 'INFO',
|
|
31
|
+
'log_file': '/root/dragon/global/lab/versalaw2.log',
|
|
32
|
+
'max_search_results': 3,
|
|
33
|
+
'ai_temperature': 0.3,
|
|
34
|
+
'ai_max_tokens': 2000,
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
if self.config_file and Path(self.config_file).exists():
|
|
38
|
+
try:
|
|
39
|
+
with open(self.config_file, 'r') as f:
|
|
40
|
+
user_config = json.load(f)
|
|
41
|
+
default_config.update(user_config)
|
|
42
|
+
except Exception as e:
|
|
43
|
+
print(f"Warning: Could not load config file: {e}")
|
|
44
|
+
|
|
45
|
+
return default_config
|
|
46
|
+
|
|
47
|
+
def get(self, key: str, default=None):
|
|
48
|
+
"""Get configuration value"""
|
|
49
|
+
return self.config.get(key, default)
|
|
50
|
+
|
|
51
|
+
def set(self, key: str, value):
|
|
52
|
+
"""Set configuration value"""
|
|
53
|
+
self.config[key] = value
|
|
54
|
+
|
|
55
|
+
def save(self, filepath: Optional[str] = None):
|
|
56
|
+
"""Save configuration to file"""
|
|
57
|
+
save_path = filepath or self.config_file
|
|
58
|
+
if save_path:
|
|
59
|
+
with open(save_path, 'w') as f:
|
|
60
|
+
json.dump(self.config, f, indent=2)
|
|
61
|
+
|
|
62
|
+
def __getitem__(self, key):
|
|
63
|
+
return self.config[key]
|
|
64
|
+
|
|
65
|
+
def __setitem__(self, key, value):
|
|
66
|
+
self.config[key] = value
|