IncludeCPP 4.0.2__py3-none-any.whl → 4.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- includecpp/__init__.py +1 -1
- includecpp/__init__.pyi +3 -1
- includecpp/cli/commands.py +3 -3
- includecpp/core/ai_integration.py +46 -13
- includecpp/core/cpp_api_extensions.pyi +622 -0
- includecpp/core/cssl/CSSL_DOCUMENTATION.md +186 -5
- includecpp/core/cssl/cssl_builtins.py +87 -2
- includecpp/core/cssl/cssl_languages.py +836 -0
- includecpp/core/cssl/cssl_parser.py +138 -38
- includecpp/core/cssl/cssl_runtime.py +364 -24
- includecpp/core/cssl/cssl_syntax.py +88 -4
- includecpp/core/cssl/cssl_types.py +172 -1
- includecpp/core/cssl_bridge.py +194 -8
- includecpp/core/cssl_bridge.pyi +148 -10
- includecpp/vscode/cssl/package.json +43 -1
- includecpp/vscode/cssl/syntaxes/cssl.tmLanguage.json +140 -17
- {includecpp-4.0.2.dist-info → includecpp-4.1.0.dist-info}/METADATA +101 -1
- {includecpp-4.0.2.dist-info → includecpp-4.1.0.dist-info}/RECORD +22 -20
- {includecpp-4.0.2.dist-info → includecpp-4.1.0.dist-info}/WHEEL +0 -0
- {includecpp-4.0.2.dist-info → includecpp-4.1.0.dist-info}/entry_points.txt +0 -0
- {includecpp-4.0.2.dist-info → includecpp-4.1.0.dist-info}/licenses/LICENSE +0 -0
- {includecpp-4.0.2.dist-info → includecpp-4.1.0.dist-info}/top_level.txt +0 -0
includecpp/__init__.py
CHANGED
includecpp/__init__.pyi
CHANGED
|
@@ -147,7 +147,9 @@ __version__: str
|
|
|
147
147
|
# Dynamic module access via: from includecpp import <module_name>
|
|
148
148
|
# Auto-generated module declarations
|
|
149
149
|
# These allow: from includecpp import <module_name>
|
|
150
|
-
|
|
150
|
+
combat_sim: Combat_simModuleWrapper
|
|
151
|
+
mcts_engine: Mcts_engineModuleWrapper
|
|
152
|
+
pathfinding: PathfindingModuleWrapper
|
|
151
153
|
|
|
152
154
|
def __dir__() -> List[str]:
|
|
153
155
|
"""List available modules including dynamically loaded C++ modules."""
|
includecpp/cli/commands.py
CHANGED
|
@@ -5199,7 +5199,7 @@ def ai_ask(question, module_name, files, all_modules, exclude, think_mode, think
|
|
|
5199
5199
|
verbose.phase('init', 'Setting up context')
|
|
5200
5200
|
verbose.detail('Question', question[:50] + '...' if len(question) > 50 else question)
|
|
5201
5201
|
verbose.detail('Mode', mode)
|
|
5202
|
-
verbose.detail('Model', ai_mgr.config.get('model', 'gpt-
|
|
5202
|
+
verbose.detail('Model', ai_mgr.config.get('model', 'gpt-4o'))
|
|
5203
5203
|
|
|
5204
5204
|
project_root = Path.cwd()
|
|
5205
5205
|
plugins_dir = project_root / "plugins"
|
|
@@ -5220,7 +5220,7 @@ def ai_ask(question, module_name, files, all_modules, exclude, think_mode, think
|
|
|
5220
5220
|
files=len(source_files) + len(plugin_files),
|
|
5221
5221
|
lines=total_lines,
|
|
5222
5222
|
tokens=total_lines * 4, # Approximate
|
|
5223
|
-
model=ai_mgr.config.get('model', 'gpt-
|
|
5223
|
+
model=ai_mgr.config.get('model', 'gpt-4o')
|
|
5224
5224
|
)
|
|
5225
5225
|
|
|
5226
5226
|
if use_websearch:
|
|
@@ -6456,7 +6456,7 @@ def _convert_with_ai(content: str, module_name: str, source_file,
|
|
|
6456
6456
|
)
|
|
6457
6457
|
|
|
6458
6458
|
if not success:
|
|
6459
|
-
ai_verbose.warning("AI conversion failed
|
|
6459
|
+
ai_verbose.warning(f"AI conversion failed: {response}")
|
|
6460
6460
|
ai_verbose.phase('fallback', 'Falling back to standard conversion')
|
|
6461
6461
|
ai_verbose.end(success=True, message="Fallback successful")
|
|
6462
6462
|
if to_cpp:
|
|
@@ -32,8 +32,12 @@ MODELS = {
|
|
|
32
32
|
'gpt-3.5-turbo': {'context': 16385, 'endpoint': 'gpt-3.5-turbo'},
|
|
33
33
|
'gpt-4-turbo': {'context': 128000, 'endpoint': 'gpt-4-turbo'},
|
|
34
34
|
'gpt-4o': {'context': 128000, 'endpoint': 'gpt-4o'},
|
|
35
|
+
'gpt-4o-mini': {'context': 128000, 'endpoint': 'gpt-4o-mini'},
|
|
35
36
|
'gpt-5': {'context': 256000, 'endpoint': 'gpt-5'},
|
|
36
37
|
'gpt-5-nano': {'context': 32000, 'endpoint': 'gpt-5-nano'},
|
|
38
|
+
'o1': {'context': 200000, 'endpoint': 'o1'},
|
|
39
|
+
'o1-mini': {'context': 128000, 'endpoint': 'o1-mini'},
|
|
40
|
+
'o1-preview': {'context': 128000, 'endpoint': 'o1-preview'},
|
|
37
41
|
}
|
|
38
42
|
|
|
39
43
|
DEFAULT_MODEL = 'gpt-5'
|
|
@@ -981,18 +985,43 @@ class AIManager:
|
|
|
981
985
|
'Content-Type': 'application/json'
|
|
982
986
|
}
|
|
983
987
|
token_limit = min(16000, model_info['context'] // 2)
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
988
|
+
|
|
989
|
+
# o1 models use different message format (no system message, combined into user)
|
|
990
|
+
# gpt-5 models use max_completion_tokens instead of max_tokens
|
|
991
|
+
is_o1_model = model.startswith('o1')
|
|
992
|
+
is_gpt5_model = model.startswith('gpt-5')
|
|
993
|
+
|
|
994
|
+
if is_o1_model:
|
|
995
|
+
# o1 models: combine system + user into single user message
|
|
996
|
+
combined_content = f"{system_prompt}\n\n---\n\n{user_prompt}"
|
|
997
|
+
data = {
|
|
998
|
+
'model': model_info['endpoint'],
|
|
999
|
+
'messages': [
|
|
1000
|
+
{'role': 'user', 'content': combined_content}
|
|
1001
|
+
],
|
|
1002
|
+
'max_completion_tokens': token_limit
|
|
1003
|
+
}
|
|
1004
|
+
# o1 models don't support temperature
|
|
1005
|
+
elif is_gpt5_model:
|
|
1006
|
+
# gpt-5 models: use max_completion_tokens, no custom temperature
|
|
1007
|
+
data = {
|
|
1008
|
+
'model': model_info['endpoint'],
|
|
1009
|
+
'messages': [
|
|
1010
|
+
{'role': 'system', 'content': system_prompt},
|
|
1011
|
+
{'role': 'user', 'content': user_prompt}
|
|
1012
|
+
],
|
|
1013
|
+
'max_completion_tokens': token_limit
|
|
1014
|
+
}
|
|
993
1015
|
else:
|
|
994
|
-
data
|
|
995
|
-
|
|
1016
|
+
data = {
|
|
1017
|
+
'model': model_info['endpoint'],
|
|
1018
|
+
'messages': [
|
|
1019
|
+
{'role': 'system', 'content': system_prompt},
|
|
1020
|
+
{'role': 'user', 'content': user_prompt}
|
|
1021
|
+
],
|
|
1022
|
+
'max_tokens': token_limit,
|
|
1023
|
+
'temperature': temperature
|
|
1024
|
+
}
|
|
996
1025
|
try:
|
|
997
1026
|
response = requests.post(OPENAI_API_URL, headers=headers, json=data, timeout=timeout)
|
|
998
1027
|
if response.status_code == 200:
|
|
@@ -1312,6 +1341,10 @@ class AIManager:
|
|
|
1312
1341
|
import platform
|
|
1313
1342
|
import subprocess
|
|
1314
1343
|
|
|
1344
|
+
# Early fail-fast checks
|
|
1345
|
+
if not self.config.get('api_key'):
|
|
1346
|
+
return False, 'No API key configured. Run: includecpp ai setup', []
|
|
1347
|
+
|
|
1315
1348
|
# Build tools list for prompt
|
|
1316
1349
|
tools_list = '\n'.join([
|
|
1317
1350
|
f"- {name}: {info['desc']}\n Format:\n {info['format']}"
|
|
@@ -1371,9 +1404,9 @@ class AIManager:
|
|
|
1371
1404
|
|
|
1372
1405
|
prompt = self._build_prompt_with_docs(prompt)
|
|
1373
1406
|
|
|
1374
|
-
# Temperature and timeout
|
|
1407
|
+
# Temperature and timeout (reduced for faster failure detection)
|
|
1375
1408
|
temperature = 0.1 if think_three else (0.2 if think_twice else 0.3)
|
|
1376
|
-
timeout =
|
|
1409
|
+
timeout = 180 if think_three else (120 if think_twice else (90 if think else 60))
|
|
1377
1410
|
|
|
1378
1411
|
# Execute with tool loop
|
|
1379
1412
|
all_changes = []
|