hdsp-jupyter-extension 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_server/__init__.py +8 -0
- agent_server/core/__init__.py +92 -0
- agent_server/core/api_key_manager.py +427 -0
- agent_server/core/code_validator.py +1238 -0
- agent_server/core/context_condenser.py +308 -0
- agent_server/core/embedding_service.py +254 -0
- agent_server/core/error_classifier.py +577 -0
- agent_server/core/llm_client.py +95 -0
- agent_server/core/llm_service.py +649 -0
- agent_server/core/notebook_generator.py +274 -0
- agent_server/core/prompt_builder.py +35 -0
- agent_server/core/rag_manager.py +742 -0
- agent_server/core/reflection_engine.py +489 -0
- agent_server/core/retriever.py +248 -0
- agent_server/core/state_verifier.py +452 -0
- agent_server/core/summary_generator.py +484 -0
- agent_server/core/task_manager.py +198 -0
- agent_server/knowledge/__init__.py +9 -0
- agent_server/knowledge/watchdog_service.py +352 -0
- agent_server/main.py +160 -0
- agent_server/prompts/__init__.py +60 -0
- agent_server/prompts/file_action_prompts.py +113 -0
- agent_server/routers/__init__.py +9 -0
- agent_server/routers/agent.py +591 -0
- agent_server/routers/chat.py +188 -0
- agent_server/routers/config.py +100 -0
- agent_server/routers/file_resolver.py +293 -0
- agent_server/routers/health.py +42 -0
- agent_server/routers/rag.py +163 -0
- agent_server/schemas/__init__.py +60 -0
- hdsp_agent_core/__init__.py +158 -0
- hdsp_agent_core/factory.py +252 -0
- hdsp_agent_core/interfaces.py +203 -0
- hdsp_agent_core/knowledge/__init__.py +31 -0
- hdsp_agent_core/knowledge/chunking.py +356 -0
- hdsp_agent_core/knowledge/libraries/dask.md +188 -0
- hdsp_agent_core/knowledge/libraries/matplotlib.md +164 -0
- hdsp_agent_core/knowledge/libraries/polars.md +68 -0
- hdsp_agent_core/knowledge/loader.py +337 -0
- hdsp_agent_core/llm/__init__.py +13 -0
- hdsp_agent_core/llm/service.py +556 -0
- hdsp_agent_core/managers/__init__.py +22 -0
- hdsp_agent_core/managers/config_manager.py +133 -0
- hdsp_agent_core/managers/session_manager.py +251 -0
- hdsp_agent_core/models/__init__.py +115 -0
- hdsp_agent_core/models/agent.py +316 -0
- hdsp_agent_core/models/chat.py +41 -0
- hdsp_agent_core/models/common.py +95 -0
- hdsp_agent_core/models/rag.py +368 -0
- hdsp_agent_core/prompts/__init__.py +63 -0
- hdsp_agent_core/prompts/auto_agent_prompts.py +1260 -0
- hdsp_agent_core/prompts/cell_action_prompts.py +98 -0
- hdsp_agent_core/services/__init__.py +18 -0
- hdsp_agent_core/services/agent_service.py +438 -0
- hdsp_agent_core/services/chat_service.py +205 -0
- hdsp_agent_core/services/rag_service.py +262 -0
- hdsp_agent_core/tests/__init__.py +1 -0
- hdsp_agent_core/tests/conftest.py +102 -0
- hdsp_agent_core/tests/test_factory.py +251 -0
- hdsp_agent_core/tests/test_services.py +326 -0
- hdsp_jupyter_extension-2.0.0.data/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +7 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/build_log.json +738 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/install.json +5 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/package.json +134 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2607ff74c74acfa83158.js +4369 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2607ff74c74acfa83158.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.622c1a5918b3aafb2315.js +12496 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.622c1a5918b3aafb2315.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +94 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +94 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.dae97cde171e13b8c834.js +623 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.dae97cde171e13b8c834.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/style.js +4 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +507 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js +2071 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js +1059 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +376 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +60336 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js +7132 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.dist-info/METADATA +152 -0
- hdsp_jupyter_extension-2.0.0.dist-info/RECORD +121 -0
- hdsp_jupyter_extension-2.0.0.dist-info/WHEEL +4 -0
- hdsp_jupyter_extension-2.0.0.dist-info/licenses/LICENSE +21 -0
- jupyter_ext/__init__.py +233 -0
- jupyter_ext/_version.py +4 -0
- jupyter_ext/config.py +111 -0
- jupyter_ext/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +7 -0
- jupyter_ext/handlers.py +632 -0
- jupyter_ext/labextension/build_log.json +738 -0
- jupyter_ext/labextension/package.json +134 -0
- jupyter_ext/labextension/static/frontend_styles_index_js.2607ff74c74acfa83158.js +4369 -0
- jupyter_ext/labextension/static/frontend_styles_index_js.2607ff74c74acfa83158.js.map +1 -0
- jupyter_ext/labextension/static/lib_index_js.622c1a5918b3aafb2315.js +12496 -0
- jupyter_ext/labextension/static/lib_index_js.622c1a5918b3aafb2315.js.map +1 -0
- jupyter_ext/labextension/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +94 -0
- jupyter_ext/labextension/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +1 -0
- jupyter_ext/labextension/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +94 -0
- jupyter_ext/labextension/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +1 -0
- jupyter_ext/labextension/static/remoteEntry.dae97cde171e13b8c834.js +623 -0
- jupyter_ext/labextension/static/remoteEntry.dae97cde171e13b8c834.js.map +1 -0
- jupyter_ext/labextension/static/style.js +4 -0
- jupyter_ext/labextension/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +507 -0
- jupyter_ext/labextension/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js +2071 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js +1059 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +376 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +60336 -0
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js +7132 -0
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +1 -0
|
@@ -0,0 +1,577 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Error Classifier - 결정론적 에러 분류 및 Replan 결정
|
|
3
|
+
LLM 호출 없이 에러 타입 기반으로 refine/insert_steps/replace_step/replan_remaining 결정
|
|
4
|
+
토큰 절약: ~1,000-2,000 토큰/세션
|
|
5
|
+
|
|
6
|
+
LLM Fallback 조건:
|
|
7
|
+
1. 동일 에러로 REFINE 2회 이상 실패
|
|
8
|
+
2. 패턴 매핑에 없는 미지의 에러 타입
|
|
9
|
+
3. 복잡한 에러 (트레이스백에 2개 이상 Exception)
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import re
|
|
14
|
+
from dataclasses import dataclass, field
|
|
15
|
+
from enum import Enum
|
|
16
|
+
from typing import Any, Dict, List, Optional
|
|
17
|
+
|
|
18
|
+
from hdsp_agent_core.prompts.auto_agent_prompts import PIP_INDEX_OPTION
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ReplanDecision(Enum):
|
|
22
|
+
"""Replan 결정 타입"""
|
|
23
|
+
|
|
24
|
+
REFINE = "refine" # 같은 접근법으로 코드만 수정
|
|
25
|
+
INSERT_STEPS = "insert_steps" # 선행 작업 추가 (패키지 설치 등)
|
|
26
|
+
REPLACE_STEP = "replace_step" # 완전히 다른 접근법으로 교체
|
|
27
|
+
REPLAN_REMAINING = "replan_remaining" # 남은 단계 모두 재계획
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class ErrorAnalysis:
|
|
32
|
+
"""에러 분석 결과"""
|
|
33
|
+
|
|
34
|
+
decision: ReplanDecision
|
|
35
|
+
root_cause: str
|
|
36
|
+
reasoning: str
|
|
37
|
+
missing_package: Optional[str] = None
|
|
38
|
+
changes: Dict[str, Any] = field(default_factory=dict)
|
|
39
|
+
used_llm: bool = False # LLM Fallback 사용 여부
|
|
40
|
+
confidence: float = 1.0 # 분석 신뢰도 (패턴 매칭=1.0, LLM=0.0~1.0)
|
|
41
|
+
|
|
42
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
43
|
+
"""API 응답용 딕셔너리 변환"""
|
|
44
|
+
return {
|
|
45
|
+
"analysis": {
|
|
46
|
+
"root_cause": self.root_cause,
|
|
47
|
+
"is_approach_problem": self.decision
|
|
48
|
+
in (ReplanDecision.REPLACE_STEP, ReplanDecision.REPLAN_REMAINING),
|
|
49
|
+
"missing_prerequisites": [self.missing_package]
|
|
50
|
+
if self.missing_package
|
|
51
|
+
else [],
|
|
52
|
+
},
|
|
53
|
+
"decision": self.decision.value,
|
|
54
|
+
"reasoning": self.reasoning,
|
|
55
|
+
"changes": self.changes,
|
|
56
|
+
"usedLlm": self.used_llm,
|
|
57
|
+
"confidence": self.confidence,
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class ErrorClassifier:
|
|
62
|
+
"""
|
|
63
|
+
결정론적 에러 분류기 (LLM 호출 없음)
|
|
64
|
+
에러 타입 기반으로 replan 결정을 자동으로 수행
|
|
65
|
+
|
|
66
|
+
규칙:
|
|
67
|
+
- ModuleNotFoundError/ImportError → 무조건 INSERT_STEPS (pip install)
|
|
68
|
+
- SyntaxError/TypeError/ValueError 등 → REFINE (코드 수정)
|
|
69
|
+
- FileNotFoundError → REFINE (경로 수정 시도)
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
# 패키지명 별칭 매핑 (import명 → pip 패키지명)
|
|
73
|
+
PACKAGE_ALIASES: Dict[str, str] = {
|
|
74
|
+
"sklearn": "scikit-learn",
|
|
75
|
+
"cv2": "opencv-python",
|
|
76
|
+
"PIL": "pillow",
|
|
77
|
+
"yaml": "pyyaml",
|
|
78
|
+
"bs4": "beautifulsoup4",
|
|
79
|
+
"skimage": "scikit-image",
|
|
80
|
+
"dotenv": "python-dotenv",
|
|
81
|
+
"dateutil": "python-dateutil",
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
# 에러 타입별 결정 매핑
|
|
85
|
+
ERROR_DECISION_MAP: Dict[str, ReplanDecision] = {
|
|
86
|
+
# INSERT_STEPS: 패키지 설치 필요
|
|
87
|
+
"ModuleNotFoundError": ReplanDecision.INSERT_STEPS,
|
|
88
|
+
"ImportError": ReplanDecision.INSERT_STEPS,
|
|
89
|
+
# REFINE: 코드 수정으로 해결 가능
|
|
90
|
+
"SyntaxError": ReplanDecision.REFINE,
|
|
91
|
+
"TypeError": ReplanDecision.REFINE,
|
|
92
|
+
"ValueError": ReplanDecision.REFINE,
|
|
93
|
+
"KeyError": ReplanDecision.REFINE,
|
|
94
|
+
"IndexError": ReplanDecision.REFINE,
|
|
95
|
+
"AttributeError": ReplanDecision.REFINE,
|
|
96
|
+
"NameError": ReplanDecision.REFINE,
|
|
97
|
+
"ZeroDivisionError": ReplanDecision.REFINE,
|
|
98
|
+
"FileNotFoundError": ReplanDecision.REFINE,
|
|
99
|
+
"PermissionError": ReplanDecision.REFINE,
|
|
100
|
+
"RuntimeError": ReplanDecision.REFINE,
|
|
101
|
+
"AssertionError": ReplanDecision.REFINE,
|
|
102
|
+
"StopIteration": ReplanDecision.REFINE,
|
|
103
|
+
"RecursionError": ReplanDecision.REFINE,
|
|
104
|
+
"MemoryError": ReplanDecision.REFINE,
|
|
105
|
+
"OverflowError": ReplanDecision.REFINE,
|
|
106
|
+
"FloatingPointError": ReplanDecision.REFINE,
|
|
107
|
+
"UnicodeError": ReplanDecision.REFINE,
|
|
108
|
+
"UnicodeDecodeError": ReplanDecision.REFINE,
|
|
109
|
+
"UnicodeEncodeError": ReplanDecision.REFINE,
|
|
110
|
+
"OSError": ReplanDecision.REFINE, # 기본값, dlopen은 별도 처리
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
# dlopen 에러 패턴 (시스템 라이브러리 누락)
|
|
114
|
+
DLOPEN_ERROR_PATTERNS = [
|
|
115
|
+
r"dlopen\([^)]+\).*Library not loaded.*?(\w+\.dylib)", # macOS
|
|
116
|
+
r"cannot open shared object file.*?lib(\w+)\.so", # Linux
|
|
117
|
+
r"DLL load failed.*?(\w+\.dll)", # Windows
|
|
118
|
+
]
|
|
119
|
+
|
|
120
|
+
# ModuleNotFoundError 추출 패턴
|
|
121
|
+
MODULE_ERROR_PATTERNS = [
|
|
122
|
+
r"ModuleNotFoundError: No module named ['\"]([^'\"]+)['\"]",
|
|
123
|
+
r"ImportError: No module named ['\"]([^'\"]+)['\"]",
|
|
124
|
+
r"ImportError: cannot import name ['\"]([^'\"]+)['\"]",
|
|
125
|
+
r"No module named ['\"]([^'\"]+)['\"]",
|
|
126
|
+
]
|
|
127
|
+
|
|
128
|
+
def __init__(self, pip_index_option: str = None):
|
|
129
|
+
"""
|
|
130
|
+
Args:
|
|
131
|
+
pip_index_option: pip install 시 사용할 인덱스 옵션 (환경별)
|
|
132
|
+
"""
|
|
133
|
+
self.pip_index_option = pip_index_option or PIP_INDEX_OPTION
|
|
134
|
+
|
|
135
|
+
def classify(
|
|
136
|
+
self,
|
|
137
|
+
error_type: str,
|
|
138
|
+
error_message: str,
|
|
139
|
+
traceback: str = "",
|
|
140
|
+
installed_packages: List[str] = None,
|
|
141
|
+
) -> ErrorAnalysis:
|
|
142
|
+
"""
|
|
143
|
+
에러를 분류하고 replan 결정 반환
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
error_type: 에러 타입 (예: 'ModuleNotFoundError')
|
|
147
|
+
error_message: 에러 메시지
|
|
148
|
+
traceback: 스택 트레이스
|
|
149
|
+
installed_packages: 설치된 패키지 목록
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
ErrorAnalysis: 에러 분석 결과 및 replan 결정
|
|
153
|
+
"""
|
|
154
|
+
installed_packages = installed_packages or []
|
|
155
|
+
installed_lower = {pkg.lower() for pkg in installed_packages}
|
|
156
|
+
|
|
157
|
+
# Step 0: 일반 타입('runtime' 등)일 경우 traceback에서 실제 에러 추출
|
|
158
|
+
if error_type in ("runtime", "timeout", "safety", "validation", "environment"):
|
|
159
|
+
actual_error_type = self._extract_error_type_from_traceback(
|
|
160
|
+
traceback, error_message
|
|
161
|
+
)
|
|
162
|
+
if actual_error_type:
|
|
163
|
+
error_type = actual_error_type
|
|
164
|
+
|
|
165
|
+
# Step 1: 에러 타입 정규화
|
|
166
|
+
error_type_normalized = self._normalize_error_type(error_type)
|
|
167
|
+
|
|
168
|
+
# Step 2: ModuleNotFoundError/ImportError 특별 처리
|
|
169
|
+
if error_type_normalized in ("ModuleNotFoundError", "ImportError"):
|
|
170
|
+
return self._handle_module_error(error_message, traceback, installed_lower)
|
|
171
|
+
|
|
172
|
+
# Step 2.5: OSError 중 dlopen 에러 특별 처리
|
|
173
|
+
if error_type_normalized == "OSError":
|
|
174
|
+
return self._handle_os_error(error_message, traceback)
|
|
175
|
+
|
|
176
|
+
# Step 3: 에러 타입 기반 결정
|
|
177
|
+
decision = self.ERROR_DECISION_MAP.get(
|
|
178
|
+
error_type_normalized,
|
|
179
|
+
ReplanDecision.REFINE, # 기본값: REFINE
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
return ErrorAnalysis(
|
|
183
|
+
decision=decision,
|
|
184
|
+
root_cause=self._get_error_description(
|
|
185
|
+
error_type_normalized, error_message
|
|
186
|
+
),
|
|
187
|
+
reasoning=f"{error_type_normalized}는 코드 수정으로 해결 가능합니다.",
|
|
188
|
+
changes={"refined_code": None}, # LLM이 코드 생성
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
def _normalize_error_type(self, error_type: str) -> str:
|
|
192
|
+
"""에러 타입 정규화"""
|
|
193
|
+
if not error_type:
|
|
194
|
+
return "RuntimeError"
|
|
195
|
+
|
|
196
|
+
# 'ModuleNotFoundError: ...' 형태에서 타입만 추출
|
|
197
|
+
if ":" in error_type:
|
|
198
|
+
error_type = error_type.split(":")[0].strip()
|
|
199
|
+
|
|
200
|
+
# 전체 경로에서 클래스명만 추출 (예: 'builtins.ValueError' → 'ValueError')
|
|
201
|
+
if "." in error_type:
|
|
202
|
+
error_type = error_type.split(".")[-1]
|
|
203
|
+
|
|
204
|
+
return error_type
|
|
205
|
+
|
|
206
|
+
def _extract_error_type_from_traceback(
|
|
207
|
+
self, traceback: str, error_message: str
|
|
208
|
+
) -> Optional[str]:
|
|
209
|
+
"""
|
|
210
|
+
traceback에서 실제 Python 에러 타입 추출
|
|
211
|
+
|
|
212
|
+
프론트엔드가 error.type을 'runtime'으로 보낼 때,
|
|
213
|
+
traceback에서 실제 에러 타입(ModuleNotFoundError, ImportError 등)을 찾음
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
traceback: 스택 트레이스 문자열
|
|
217
|
+
error_message: 에러 메시지
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
추출된 에러 타입 (예: 'ModuleNotFoundError') 또는 None
|
|
221
|
+
"""
|
|
222
|
+
if not traceback:
|
|
223
|
+
return None
|
|
224
|
+
|
|
225
|
+
# traceback의 마지막 줄에서 에러 타입 추출
|
|
226
|
+
# 형식: "ModuleNotFoundError: No module named 'matplotlib'"
|
|
227
|
+
lines = traceback.strip().split("\n")
|
|
228
|
+
|
|
229
|
+
# 뒤에서부터 에러 타입 라인 찾기
|
|
230
|
+
for line in reversed(lines):
|
|
231
|
+
line = line.strip()
|
|
232
|
+
|
|
233
|
+
# ANSI 색상 코드 제거
|
|
234
|
+
line = re.sub(r"\x1b\[[0-9;]*m", "", line)
|
|
235
|
+
|
|
236
|
+
# Python 에러 타입 패턴 매칭
|
|
237
|
+
# 예: "ModuleNotFoundError: ..." 또는 "ModuleNotFoundError Traceback..."
|
|
238
|
+
error_pattern = r"^([A-Z][a-zA-Z0-9]*Error|[A-Z][a-zA-Z0-9]*Exception)[\s:]"
|
|
239
|
+
match = re.match(error_pattern, line)
|
|
240
|
+
|
|
241
|
+
if match:
|
|
242
|
+
return match.group(1)
|
|
243
|
+
|
|
244
|
+
return None
|
|
245
|
+
|
|
246
|
+
def _handle_module_error(
|
|
247
|
+
self, error_message: str, traceback: str, installed_packages: set
|
|
248
|
+
) -> ErrorAnalysis:
|
|
249
|
+
"""
|
|
250
|
+
ModuleNotFoundError/ImportError 처리
|
|
251
|
+
|
|
252
|
+
CRITICAL: 에러 메시지에서 패키지명 추출 (사용자 코드 아님!)
|
|
253
|
+
"""
|
|
254
|
+
full_text = f"{error_message}\n{traceback}"
|
|
255
|
+
|
|
256
|
+
# 패키지명 추출
|
|
257
|
+
missing_pkg = self._extract_missing_package(full_text)
|
|
258
|
+
|
|
259
|
+
if not missing_pkg:
|
|
260
|
+
# 패키지명을 찾지 못한 경우 REFINE으로 폴백
|
|
261
|
+
return ErrorAnalysis(
|
|
262
|
+
decision=ReplanDecision.REFINE,
|
|
263
|
+
root_cause="Import 에러 발생, 패키지명 추출 실패",
|
|
264
|
+
reasoning="패키지명을 특정할 수 없어 코드 수정 시도",
|
|
265
|
+
changes={"refined_code": None},
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
# pip 패키지명으로 변환
|
|
269
|
+
pip_pkg = self._get_pip_package_name(missing_pkg)
|
|
270
|
+
|
|
271
|
+
# 이미 설치된 패키지인지 확인
|
|
272
|
+
if pip_pkg.lower() in installed_packages:
|
|
273
|
+
# 패키지는 설치되어 있지만 import 실패 → 코드 문제
|
|
274
|
+
return ErrorAnalysis(
|
|
275
|
+
decision=ReplanDecision.REFINE,
|
|
276
|
+
root_cause=f"'{missing_pkg}' import 실패 (패키지는 이미 설치됨)",
|
|
277
|
+
reasoning="패키지가 설치되어 있으므로 import 구문 또는 코드 수정 필요",
|
|
278
|
+
changes={"refined_code": None},
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
# pip install 코드 생성
|
|
282
|
+
pip_command = self._generate_pip_install(pip_pkg)
|
|
283
|
+
|
|
284
|
+
return ErrorAnalysis(
|
|
285
|
+
decision=ReplanDecision.INSERT_STEPS,
|
|
286
|
+
root_cause=f"'{missing_pkg}' 모듈이 설치되지 않음",
|
|
287
|
+
reasoning="ModuleNotFoundError는 항상 패키지 설치로 해결합니다.",
|
|
288
|
+
missing_package=pip_pkg,
|
|
289
|
+
changes={
|
|
290
|
+
"new_steps": [
|
|
291
|
+
{
|
|
292
|
+
"description": f"{pip_pkg} 패키지 설치",
|
|
293
|
+
"toolCalls": [
|
|
294
|
+
{
|
|
295
|
+
"tool": "jupyter_cell",
|
|
296
|
+
"parameters": {"code": pip_command},
|
|
297
|
+
}
|
|
298
|
+
],
|
|
299
|
+
}
|
|
300
|
+
]
|
|
301
|
+
},
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
def _handle_os_error(
|
|
305
|
+
self,
|
|
306
|
+
error_message: str,
|
|
307
|
+
traceback: str,
|
|
308
|
+
) -> ErrorAnalysis:
|
|
309
|
+
"""
|
|
310
|
+
OSError 처리 - dlopen 에러 감지
|
|
311
|
+
"""
|
|
312
|
+
full_text = f"{error_message}\n{traceback}"
|
|
313
|
+
|
|
314
|
+
# dlopen 에러 패턴 확인
|
|
315
|
+
for pattern in self.DLOPEN_ERROR_PATTERNS:
|
|
316
|
+
match = re.search(pattern, full_text, re.IGNORECASE | re.DOTALL)
|
|
317
|
+
if match:
|
|
318
|
+
missing_lib = match.group(1) if match.groups() else "unknown"
|
|
319
|
+
return ErrorAnalysis(
|
|
320
|
+
decision=ReplanDecision.REPLAN_REMAINING,
|
|
321
|
+
root_cause=f"시스템 라이브러리 누락: {missing_lib}",
|
|
322
|
+
reasoning="dlopen 에러는 시스템 라이브러리 문제입니다. pip으로 해결할 수 없으며, 시스템 패키지 관리자(brew/apt)로 설치가 필요합니다.",
|
|
323
|
+
changes={"system_dependency": missing_lib},
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
# 일반 OSError는 REFINE
|
|
327
|
+
return ErrorAnalysis(
|
|
328
|
+
decision=ReplanDecision.REFINE,
|
|
329
|
+
root_cause=f"OSError: {error_message[:150]}",
|
|
330
|
+
reasoning="일반 OSError는 코드 수정으로 해결을 시도합니다.",
|
|
331
|
+
changes={"refined_code": None},
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
def _extract_missing_package(self, text: str) -> Optional[str]:
|
|
335
|
+
"""에러 메시지에서 누락된 패키지명 추출"""
|
|
336
|
+
for pattern in self.MODULE_ERROR_PATTERNS:
|
|
337
|
+
match = re.search(pattern, text, re.IGNORECASE)
|
|
338
|
+
if match:
|
|
339
|
+
pkg = match.group(1)
|
|
340
|
+
# 최상위 패키지만 반환 (예: 'pyarrow.lib' → 'pyarrow')
|
|
341
|
+
return pkg.split(".")[0]
|
|
342
|
+
return None
|
|
343
|
+
|
|
344
|
+
def _get_pip_package_name(self, import_name: str) -> str:
|
|
345
|
+
"""import 이름을 pip 패키지명으로 변환"""
|
|
346
|
+
return self.PACKAGE_ALIASES.get(import_name, import_name)
|
|
347
|
+
|
|
348
|
+
def _generate_pip_install(self, package: str) -> str:
|
|
349
|
+
"""pip install 명령 생성"""
|
|
350
|
+
if self.pip_index_option:
|
|
351
|
+
return f"!pip install {self.pip_index_option} --timeout 180 {package}"
|
|
352
|
+
return f"!pip install --timeout 180 {package}"
|
|
353
|
+
|
|
354
|
+
def _get_error_description(self, error_type: str, error_msg: str) -> str:
|
|
355
|
+
"""에러 타입별 설명 생성"""
|
|
356
|
+
descriptions = {
|
|
357
|
+
"SyntaxError": "문법 오류",
|
|
358
|
+
"TypeError": "타입 불일치",
|
|
359
|
+
"ValueError": "값 오류",
|
|
360
|
+
"KeyError": "딕셔너리/데이터프레임 키 없음",
|
|
361
|
+
"IndexError": "인덱스 범위 초과",
|
|
362
|
+
"AttributeError": "속성/메서드 없음",
|
|
363
|
+
"NameError": "변수 미정의",
|
|
364
|
+
"FileNotFoundError": "파일을 찾을 수 없음",
|
|
365
|
+
"ZeroDivisionError": "0으로 나누기",
|
|
366
|
+
"PermissionError": "권한 없음",
|
|
367
|
+
"RuntimeError": "런타임 에러",
|
|
368
|
+
"MemoryError": "메모리 부족",
|
|
369
|
+
}
|
|
370
|
+
base = descriptions.get(error_type, error_type)
|
|
371
|
+
# 에러 메시지에서 핵심 부분만 추출 (150자 제한)
|
|
372
|
+
msg_preview = error_msg[:150] if error_msg else ""
|
|
373
|
+
return f"{base}: {msg_preview}"
|
|
374
|
+
|
|
375
|
+
# =========================================================================
|
|
376
|
+
# LLM Fallback 관련 메서드
|
|
377
|
+
# =========================================================================
|
|
378
|
+
|
|
379
|
+
def _count_exceptions_in_traceback(self, traceback: str) -> int:
|
|
380
|
+
"""트레이스백에서 Exception 개수 카운트"""
|
|
381
|
+
if not traceback:
|
|
382
|
+
return 0
|
|
383
|
+
# 다양한 Exception 패턴 매칭
|
|
384
|
+
patterns = [
|
|
385
|
+
r"\b\w+Error\b", # ValueError, TypeError 등
|
|
386
|
+
r"\b\w+Exception\b", # CustomException 등
|
|
387
|
+
r"During handling of the above exception", # 연쇄 예외
|
|
388
|
+
]
|
|
389
|
+
count = 0
|
|
390
|
+
for pattern in patterns:
|
|
391
|
+
count += len(re.findall(pattern, traceback))
|
|
392
|
+
return count
|
|
393
|
+
|
|
394
|
+
def should_use_llm_fallback(
|
|
395
|
+
self,
|
|
396
|
+
error_type: str,
|
|
397
|
+
traceback: str = "",
|
|
398
|
+
previous_attempts: int = 0,
|
|
399
|
+
) -> tuple[bool, str]:
|
|
400
|
+
"""
|
|
401
|
+
LLM Fallback 사용 여부 결정
|
|
402
|
+
|
|
403
|
+
Returns:
|
|
404
|
+
(should_use: bool, reason: str)
|
|
405
|
+
"""
|
|
406
|
+
# 조건 1: 동일 에러로 REFINE 2회 이상 실패
|
|
407
|
+
if previous_attempts >= 2:
|
|
408
|
+
return True, f"동일 에러 {previous_attempts}회 실패 후 LLM 분석 필요"
|
|
409
|
+
|
|
410
|
+
# 조건 2: 패턴 매핑에 없는 미지의 에러 타입
|
|
411
|
+
error_type_normalized = self._normalize_error_type(error_type)
|
|
412
|
+
if error_type_normalized not in self.ERROR_DECISION_MAP:
|
|
413
|
+
return True, f"미지의 에러 타입: {error_type_normalized}"
|
|
414
|
+
|
|
415
|
+
# 조건 3: 복잡한 에러 (트레이스백에 2개 이상 Exception)
|
|
416
|
+
exception_count = self._count_exceptions_in_traceback(traceback)
|
|
417
|
+
if exception_count >= 2:
|
|
418
|
+
return True, f"복잡한 에러 (트레이스백에 {exception_count}개 Exception)"
|
|
419
|
+
|
|
420
|
+
return False, ""
|
|
421
|
+
|
|
422
|
+
async def classify_with_fallback(
|
|
423
|
+
self,
|
|
424
|
+
error_type: str,
|
|
425
|
+
error_message: str,
|
|
426
|
+
traceback: str = "",
|
|
427
|
+
installed_packages: List[str] = None,
|
|
428
|
+
previous_attempts: int = 0,
|
|
429
|
+
previous_codes: List[str] = None,
|
|
430
|
+
llm_client=None,
|
|
431
|
+
model: str = "gpt-4o-mini",
|
|
432
|
+
) -> ErrorAnalysis:
|
|
433
|
+
"""
|
|
434
|
+
패턴 매칭 우선, 조건 충족 시 LLM Fallback
|
|
435
|
+
|
|
436
|
+
Args:
|
|
437
|
+
error_type: 에러 타입
|
|
438
|
+
error_message: 에러 메시지
|
|
439
|
+
traceback: 스택 트레이스
|
|
440
|
+
installed_packages: 설치된 패키지 목록
|
|
441
|
+
previous_attempts: 이전 시도 횟수
|
|
442
|
+
previous_codes: 이전에 시도한 코드들
|
|
443
|
+
llm_client: LLM 클라이언트 (AsyncOpenAI 호환)
|
|
444
|
+
model: 사용할 모델명
|
|
445
|
+
|
|
446
|
+
Returns:
|
|
447
|
+
ErrorAnalysis: 에러 분석 결과
|
|
448
|
+
"""
|
|
449
|
+
# Step 1: LLM Fallback 필요 여부 확인
|
|
450
|
+
should_use_llm, fallback_reason = self.should_use_llm_fallback(
|
|
451
|
+
error_type, traceback, previous_attempts
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
# Step 2: 패턴 매칭 우선 시도
|
|
455
|
+
if not should_use_llm:
|
|
456
|
+
return self.classify(
|
|
457
|
+
error_type, error_message, traceback, installed_packages
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
# Step 3: LLM Fallback
|
|
461
|
+
if llm_client is None:
|
|
462
|
+
# LLM 클라이언트 없으면 패턴 매칭으로 폴백
|
|
463
|
+
print(
|
|
464
|
+
f"[ErrorClassifier] LLM 클라이언트 없음, 패턴 매칭 사용: {fallback_reason}"
|
|
465
|
+
)
|
|
466
|
+
return self.classify(
|
|
467
|
+
error_type, error_message, traceback, installed_packages
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
print(f"[ErrorClassifier] LLM Fallback 사용: {fallback_reason}")
|
|
471
|
+
return await self._classify_with_llm(
|
|
472
|
+
error_type=error_type,
|
|
473
|
+
error_message=error_message,
|
|
474
|
+
traceback=traceback,
|
|
475
|
+
previous_attempts=previous_attempts,
|
|
476
|
+
previous_codes=previous_codes or [],
|
|
477
|
+
llm_client=llm_client,
|
|
478
|
+
model=model,
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
async def _classify_with_llm(
|
|
482
|
+
self,
|
|
483
|
+
error_type: str,
|
|
484
|
+
error_message: str,
|
|
485
|
+
traceback: str,
|
|
486
|
+
previous_attempts: int,
|
|
487
|
+
previous_codes: List[str],
|
|
488
|
+
llm_client,
|
|
489
|
+
model: str,
|
|
490
|
+
) -> ErrorAnalysis:
|
|
491
|
+
"""LLM을 사용한 에러 분석"""
|
|
492
|
+
from hdsp_agent_core.prompts.auto_agent_prompts import (
|
|
493
|
+
format_error_analysis_prompt,
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
prompt = format_error_analysis_prompt(
|
|
497
|
+
error_type=error_type,
|
|
498
|
+
error_message=error_message,
|
|
499
|
+
traceback=traceback,
|
|
500
|
+
previous_attempts=previous_attempts,
|
|
501
|
+
previous_codes=previous_codes,
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
try:
|
|
505
|
+
response = await llm_client.chat.completions.create(
|
|
506
|
+
model=model,
|
|
507
|
+
messages=[{"role": "user", "content": prompt}],
|
|
508
|
+
temperature=0.3,
|
|
509
|
+
max_tokens=2000,
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
content = response.choices[0].message.content
|
|
513
|
+
return self._parse_llm_response(content)
|
|
514
|
+
|
|
515
|
+
except Exception as e:
|
|
516
|
+
print(f"[ErrorClassifier] LLM 호출 실패: {e}")
|
|
517
|
+
# LLM 실패 시 패턴 매칭으로 폴백
|
|
518
|
+
result = self.classify(error_type, error_message, traceback, [])
|
|
519
|
+
result.reasoning += f" (LLM 실패로 패턴 매칭 사용: {str(e)[:50]})"
|
|
520
|
+
return result
|
|
521
|
+
|
|
522
|
+
def _parse_llm_response(self, content: str) -> ErrorAnalysis:
|
|
523
|
+
"""LLM 응답 파싱"""
|
|
524
|
+
try:
|
|
525
|
+
# JSON 블록 추출
|
|
526
|
+
json_match = re.search(r"```json\s*([\s\S]*?)\s*```", content)
|
|
527
|
+
if json_match:
|
|
528
|
+
json_str = json_match.group(1)
|
|
529
|
+
else:
|
|
530
|
+
json_str = content
|
|
531
|
+
|
|
532
|
+
data = json.loads(json_str)
|
|
533
|
+
|
|
534
|
+
# decision 파싱
|
|
535
|
+
decision_str = data.get("decision", "refine")
|
|
536
|
+
decision_map = {
|
|
537
|
+
"refine": ReplanDecision.REFINE,
|
|
538
|
+
"insert_steps": ReplanDecision.INSERT_STEPS,
|
|
539
|
+
"replace_step": ReplanDecision.REPLACE_STEP,
|
|
540
|
+
"replan_remaining": ReplanDecision.REPLAN_REMAINING,
|
|
541
|
+
}
|
|
542
|
+
decision = decision_map.get(decision_str, ReplanDecision.REFINE)
|
|
543
|
+
|
|
544
|
+
# confidence 추출
|
|
545
|
+
confidence = float(data.get("confidence", 0.8))
|
|
546
|
+
|
|
547
|
+
return ErrorAnalysis(
|
|
548
|
+
decision=decision,
|
|
549
|
+
root_cause=data.get("analysis", {}).get("root_cause", "LLM 분석 결과"),
|
|
550
|
+
reasoning=data.get("reasoning", "LLM 분석 기반 결정"),
|
|
551
|
+
changes=data.get("changes", {}),
|
|
552
|
+
used_llm=True,
|
|
553
|
+
confidence=confidence,
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
except (json.JSONDecodeError, KeyError, ValueError) as e:
|
|
557
|
+
print(f"[ErrorClassifier] LLM 응답 파싱 실패: {e}")
|
|
558
|
+
return ErrorAnalysis(
|
|
559
|
+
decision=ReplanDecision.REFINE,
|
|
560
|
+
root_cause="LLM 응답 파싱 실패",
|
|
561
|
+
reasoning=f"파싱 오류로 기본값(refine) 사용: {str(e)[:50]}",
|
|
562
|
+
changes={"refined_code": None},
|
|
563
|
+
used_llm=True,
|
|
564
|
+
confidence=0.3,
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
|
|
568
|
+
# 싱글톤 인스턴스
|
|
569
|
+
_error_classifier_instance: Optional[ErrorClassifier] = None
|
|
570
|
+
|
|
571
|
+
|
|
572
|
+
def get_error_classifier() -> ErrorClassifier:
|
|
573
|
+
"""싱글톤 ErrorClassifier 반환"""
|
|
574
|
+
global _error_classifier_instance
|
|
575
|
+
if _error_classifier_instance is None:
|
|
576
|
+
_error_classifier_instance = ErrorClassifier()
|
|
577
|
+
return _error_classifier_instance
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM Client - Interface with language models
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import ssl
|
|
7
|
+
from typing import Any, Dict
|
|
8
|
+
|
|
9
|
+
import aiohttp
|
|
10
|
+
import certifi
|
|
11
|
+
|
|
12
|
+
from agent_server.prompts import DEFAULT_SYSTEM_PROMPT
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class LLMClient:
|
|
16
|
+
"""Client for LLM API communication"""
|
|
17
|
+
|
|
18
|
+
_instance = None
|
|
19
|
+
|
|
20
|
+
def __init__(self, config: Dict[str, Any]):
|
|
21
|
+
self._update_config(config)
|
|
22
|
+
|
|
23
|
+
def _update_config(self, config: Dict[str, Any]):
|
|
24
|
+
"""Update client configuration from config dict"""
|
|
25
|
+
self.config = config
|
|
26
|
+
self.api_key = config.get("apiKey") or os.environ.get("OPENAI_API_KEY")
|
|
27
|
+
self.base_url = config.get("baseUrl", "https://api.openai.com/v1")
|
|
28
|
+
self.model = config.get("modelId", "gpt-4")
|
|
29
|
+
self.temperature = config.get("temperature", 0.7)
|
|
30
|
+
self.max_tokens = config.get("maxTokens", 2000)
|
|
31
|
+
|
|
32
|
+
@classmethod
|
|
33
|
+
def get_instance(cls, config: Dict[str, Any]):
|
|
34
|
+
"""Get singleton instance"""
|
|
35
|
+
if cls._instance is None:
|
|
36
|
+
cls._instance = LLMClient(config)
|
|
37
|
+
else:
|
|
38
|
+
cls._instance._update_config(config)
|
|
39
|
+
return cls._instance
|
|
40
|
+
|
|
41
|
+
async def generate(self, prompt: str) -> Dict[str, Any]:
|
|
42
|
+
"""Generate response from LLM"""
|
|
43
|
+
if not self.api_key:
|
|
44
|
+
raise ValueError("API key not configured")
|
|
45
|
+
|
|
46
|
+
headers = {
|
|
47
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
48
|
+
"Content-Type": "application/json",
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
payload = {
|
|
52
|
+
"model": self.model,
|
|
53
|
+
"messages": [
|
|
54
|
+
{
|
|
55
|
+
"role": "system",
|
|
56
|
+
"content": self.config.get("systemPrompt", DEFAULT_SYSTEM_PROMPT),
|
|
57
|
+
},
|
|
58
|
+
{"role": "user", "content": prompt},
|
|
59
|
+
],
|
|
60
|
+
"temperature": self.temperature,
|
|
61
|
+
"max_tokens": self.max_tokens,
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
url = f"{self.base_url}/chat/completions"
|
|
65
|
+
|
|
66
|
+
# Create SSL context with certifi certificates
|
|
67
|
+
ssl_context = ssl.create_default_context(cafile=certifi.where())
|
|
68
|
+
|
|
69
|
+
try:
|
|
70
|
+
async with aiohttp.ClientSession(
|
|
71
|
+
connector=aiohttp.TCPConnector(ssl=ssl_context)
|
|
72
|
+
) as session:
|
|
73
|
+
async with session.post(url, headers=headers, json=payload) as response:
|
|
74
|
+
if response.status != 200:
|
|
75
|
+
error_text = await response.text()
|
|
76
|
+
raise RuntimeError(
|
|
77
|
+
f"API request failed ({response.status}): {error_text}"
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
data = await response.json()
|
|
81
|
+
|
|
82
|
+
# Extract response
|
|
83
|
+
content = data["choices"][0]["message"]["content"]
|
|
84
|
+
tokens = data.get("usage", {}).get("total_tokens", 0)
|
|
85
|
+
|
|
86
|
+
return {
|
|
87
|
+
"content": content,
|
|
88
|
+
"model": data.get("model", self.model),
|
|
89
|
+
"tokens": tokens,
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
except aiohttp.ClientError as e:
|
|
93
|
+
raise RuntimeError(f"Network error: {str(e)}")
|
|
94
|
+
except KeyError as e:
|
|
95
|
+
raise RuntimeError(f"Unexpected API response format: {str(e)}")
|