symbolicai 1.0.0__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. symai/__init__.py +198 -134
  2. symai/backend/base.py +51 -51
  3. symai/backend/engines/drawing/engine_bfl.py +33 -33
  4. symai/backend/engines/drawing/engine_gpt_image.py +4 -10
  5. symai/backend/engines/embedding/engine_llama_cpp.py +50 -35
  6. symai/backend/engines/embedding/engine_openai.py +22 -16
  7. symai/backend/engines/execute/engine_python.py +16 -16
  8. symai/backend/engines/files/engine_io.py +51 -49
  9. symai/backend/engines/imagecaptioning/engine_blip2.py +27 -23
  10. symai/backend/engines/imagecaptioning/engine_llavacpp_client.py +53 -46
  11. symai/backend/engines/index/engine_pinecone.py +116 -88
  12. symai/backend/engines/index/engine_qdrant.py +1011 -0
  13. symai/backend/engines/index/engine_vectordb.py +78 -52
  14. symai/backend/engines/lean/engine_lean4.py +65 -25
  15. symai/backend/engines/neurosymbolic/__init__.py +35 -28
  16. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_chat.py +137 -135
  17. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_reasoning.py +145 -152
  18. symai/backend/engines/neurosymbolic/engine_cerebras.py +328 -0
  19. symai/backend/engines/neurosymbolic/engine_deepseekX_reasoning.py +75 -49
  20. symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py +199 -155
  21. symai/backend/engines/neurosymbolic/engine_groq.py +106 -72
  22. symai/backend/engines/neurosymbolic/engine_huggingface.py +100 -67
  23. symai/backend/engines/neurosymbolic/engine_llama_cpp.py +121 -93
  24. symai/backend/engines/neurosymbolic/engine_openai_gptX_chat.py +213 -132
  25. symai/backend/engines/neurosymbolic/engine_openai_gptX_reasoning.py +180 -137
  26. symai/backend/engines/ocr/engine_apilayer.py +18 -20
  27. symai/backend/engines/output/engine_stdout.py +9 -9
  28. symai/backend/engines/{webscraping → scrape}/engine_requests.py +25 -11
  29. symai/backend/engines/search/engine_openai.py +95 -83
  30. symai/backend/engines/search/engine_parallel.py +665 -0
  31. symai/backend/engines/search/engine_perplexity.py +40 -41
  32. symai/backend/engines/search/engine_serpapi.py +33 -28
  33. symai/backend/engines/speech_to_text/engine_local_whisper.py +37 -27
  34. symai/backend/engines/symbolic/engine_wolframalpha.py +14 -8
  35. symai/backend/engines/text_to_speech/engine_openai.py +15 -19
  36. symai/backend/engines/text_vision/engine_clip.py +34 -28
  37. symai/backend/engines/userinput/engine_console.py +3 -4
  38. symai/backend/mixin/__init__.py +4 -0
  39. symai/backend/mixin/anthropic.py +48 -40
  40. symai/backend/mixin/cerebras.py +9 -0
  41. symai/backend/mixin/deepseek.py +4 -5
  42. symai/backend/mixin/google.py +5 -4
  43. symai/backend/mixin/groq.py +2 -4
  44. symai/backend/mixin/openai.py +132 -110
  45. symai/backend/settings.py +14 -14
  46. symai/chat.py +164 -94
  47. symai/collect/dynamic.py +13 -11
  48. symai/collect/pipeline.py +39 -31
  49. symai/collect/stats.py +109 -69
  50. symai/components.py +578 -238
  51. symai/constraints.py +14 -5
  52. symai/core.py +1495 -1210
  53. symai/core_ext.py +55 -50
  54. symai/endpoints/api.py +113 -58
  55. symai/extended/api_builder.py +22 -17
  56. symai/extended/arxiv_pdf_parser.py +13 -5
  57. symai/extended/bibtex_parser.py +8 -4
  58. symai/extended/conversation.py +88 -69
  59. symai/extended/document.py +40 -27
  60. symai/extended/file_merger.py +45 -7
  61. symai/extended/graph.py +38 -24
  62. symai/extended/html_style_template.py +17 -11
  63. symai/extended/interfaces/blip_2.py +1 -1
  64. symai/extended/interfaces/clip.py +4 -2
  65. symai/extended/interfaces/console.py +5 -3
  66. symai/extended/interfaces/dall_e.py +3 -1
  67. symai/extended/interfaces/file.py +2 -0
  68. symai/extended/interfaces/flux.py +3 -1
  69. symai/extended/interfaces/gpt_image.py +15 -6
  70. symai/extended/interfaces/input.py +2 -1
  71. symai/extended/interfaces/llava.py +1 -1
  72. symai/extended/interfaces/{naive_webscraping.py → naive_scrape.py} +3 -2
  73. symai/extended/interfaces/naive_vectordb.py +2 -2
  74. symai/extended/interfaces/ocr.py +4 -2
  75. symai/extended/interfaces/openai_search.py +2 -0
  76. symai/extended/interfaces/parallel.py +30 -0
  77. symai/extended/interfaces/perplexity.py +2 -0
  78. symai/extended/interfaces/pinecone.py +6 -4
  79. symai/extended/interfaces/python.py +2 -0
  80. symai/extended/interfaces/serpapi.py +2 -0
  81. symai/extended/interfaces/terminal.py +0 -1
  82. symai/extended/interfaces/tts.py +2 -1
  83. symai/extended/interfaces/whisper.py +2 -1
  84. symai/extended/interfaces/wolframalpha.py +1 -0
  85. symai/extended/metrics/__init__.py +1 -1
  86. symai/extended/metrics/similarity.py +5 -2
  87. symai/extended/os_command.py +31 -22
  88. symai/extended/packages/symdev.py +39 -34
  89. symai/extended/packages/sympkg.py +30 -27
  90. symai/extended/packages/symrun.py +46 -35
  91. symai/extended/repo_cloner.py +10 -9
  92. symai/extended/seo_query_optimizer.py +15 -12
  93. symai/extended/solver.py +104 -76
  94. symai/extended/summarizer.py +8 -7
  95. symai/extended/taypan_interpreter.py +10 -9
  96. symai/extended/vectordb.py +28 -15
  97. symai/formatter/formatter.py +39 -31
  98. symai/formatter/regex.py +46 -44
  99. symai/functional.py +184 -86
  100. symai/imports.py +85 -51
  101. symai/interfaces.py +1 -1
  102. symai/memory.py +33 -24
  103. symai/menu/screen.py +28 -19
  104. symai/misc/console.py +27 -27
  105. symai/misc/loader.py +4 -3
  106. symai/models/base.py +147 -76
  107. symai/models/errors.py +1 -1
  108. symai/ops/__init__.py +1 -1
  109. symai/ops/measures.py +17 -14
  110. symai/ops/primitives.py +933 -635
  111. symai/post_processors.py +28 -24
  112. symai/pre_processors.py +58 -52
  113. symai/processor.py +15 -9
  114. symai/prompts.py +714 -649
  115. symai/server/huggingface_server.py +115 -32
  116. symai/server/llama_cpp_server.py +14 -6
  117. symai/server/qdrant_server.py +206 -0
  118. symai/shell.py +98 -39
  119. symai/shellsv.py +307 -223
  120. symai/strategy.py +135 -81
  121. symai/symbol.py +276 -225
  122. symai/utils.py +62 -46
  123. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/METADATA +19 -9
  124. symbolicai-1.1.1.dist-info/RECORD +169 -0
  125. symbolicai-1.0.0.dist-info/RECORD +0 -163
  126. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/WHEEL +0 -0
  127. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/entry_points.txt +0 -0
  128. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/licenses/LICENSE +0 -0
  129. {symbolicai-1.0.0.dist-info → symbolicai-1.1.1.dist-info}/top_level.txt +0 -0
@@ -1,58 +1,66 @@
1
1
  # https://docs.anthropic.com/en/docs/about-claude/models
2
2
  SUPPORTED_CHAT_MODELS = [
3
- 'claude-3-5-sonnet-latest',
4
- 'claude-3-5-haiku-latest',
5
- 'claude-3-5-sonnet-20241022',
6
- 'claude-3-5-sonnet-20240620',
7
- 'claude-3-opus-latest',
8
- 'claude-3-opus-20240229',
9
- 'claude-3-sonnet-20240229',
10
- 'claude-3-haiku-20240307',
3
+ "claude-3-5-sonnet-latest",
4
+ "claude-3-5-haiku-latest",
5
+ "claude-3-5-sonnet-20241022",
6
+ "claude-3-5-sonnet-20240620",
7
+ "claude-3-opus-latest",
8
+ "claude-3-opus-20240229",
9
+ "claude-3-sonnet-20240229",
10
+ "claude-3-haiku-20240307",
11
11
  ]
12
12
  SUPPORTED_REASONING_MODELS = [
13
13
  "claude-opus-4-1",
14
14
  "claude-opus-4-0",
15
15
  "claude-sonnet-4-0",
16
- 'claude-3-7-sonnet-latest',
17
- 'claude-haiku-4-5',
18
- 'claude-sonnet-4-5',
16
+ "claude-3-7-sonnet-latest",
17
+ "claude-haiku-4-5",
18
+ "claude-sonnet-4-5",
19
19
  ]
20
20
 
21
+
21
22
  class AnthropicMixin:
22
23
  def api_max_context_tokens(self):
23
- if self.model == 'claude-opus-4-1' or \
24
- self.model == 'claude-opus-4-0' or \
25
- self.model == 'claude-sonnet-4-0' or \
26
- self.model == 'claude-3-7-sonnet-latest' or \
27
- self.model == 'claude-haiku-4-5' or \
28
- self.model == 'claude-sonnet-4-5' or \
29
- self.model == 'claude-3-5-sonnet-latest' or \
30
- self.model == 'claude-3-5-sonnet-20241022' or \
31
- self.model == 'claude-3-5-sonnet-20240620' or \
32
- self.model == 'claude-3-opus-latest' or \
33
- self.model == 'claude-3-opus-20240229' or \
34
- self.model == 'claude-3-sonnet-20240229' or \
35
- self.model == 'claude-3-haiku-20240307':
36
- return 200_000
24
+ if (
25
+ self.model == "claude-opus-4-1"
26
+ or self.model == "claude-opus-4-0"
27
+ or self.model == "claude-sonnet-4-0"
28
+ or self.model == "claude-3-7-sonnet-latest"
29
+ or self.model == "claude-haiku-4-5"
30
+ or self.model == "claude-sonnet-4-5"
31
+ or self.model == "claude-3-5-sonnet-latest"
32
+ or self.model == "claude-3-5-sonnet-20241022"
33
+ or self.model == "claude-3-5-sonnet-20240620"
34
+ or self.model == "claude-3-opus-latest"
35
+ or self.model == "claude-3-opus-20240229"
36
+ or self.model == "claude-3-sonnet-20240229"
37
+ or self.model == "claude-3-haiku-20240307"
38
+ ):
39
+ return 200_000
37
40
  return None
38
41
 
39
42
  def api_max_response_tokens(self):
40
- if self.model == 'claude-sonnet-4-0' or \
41
- self.model == 'claude-3-7-sonnet-latest' or \
42
- self.model == 'claude-haiku-4-5' or \
43
- self.model == 'claude-sonnet-4-5':
43
+ if (
44
+ self.model == "claude-sonnet-4-0"
45
+ or self.model == "claude-3-7-sonnet-latest"
46
+ or self.model == "claude-haiku-4-5"
47
+ or self.model == "claude-sonnet-4-5"
48
+ ):
44
49
  return 64_000
45
- if self.model == 'claude-opus-4-1' or \
46
- self.model == 'claude-opus-4-0':
50
+ if self.model == "claude-opus-4-1" or self.model == "claude-opus-4-0":
47
51
  return 32_000
48
- if self.model == 'claude-3-5-sonnet-latest' or \
49
- self.model == 'claude-3-5-sonnet-20241022' or \
50
- self.model == 'claude-3-5-haiku-latest':
52
+ if (
53
+ self.model == "claude-3-5-sonnet-latest"
54
+ or self.model == "claude-3-5-sonnet-20241022"
55
+ or self.model == "claude-3-5-haiku-latest"
56
+ ):
51
57
  return 8_192
52
- if self.model == 'claude-3-5-sonnet-20240620' or \
53
- self.model == 'claude-3-opus-latest' or \
54
- self.model == 'clade-3-opus-20240229' or \
55
- self.model == 'claude-3-sonnet-20240229' or \
56
- self.model == 'claude-3-haiku-20240307':
57
- return 4_096
58
+ if (
59
+ self.model == "claude-3-5-sonnet-20240620"
60
+ or self.model == "claude-3-opus-latest"
61
+ or self.model == "clade-3-opus-20240229"
62
+ or self.model == "claude-3-sonnet-20240229"
63
+ or self.model == "claude-3-haiku-20240307"
64
+ ):
65
+ return 4_096
58
66
  return None
@@ -0,0 +1,9 @@
1
+ SUPPORTED_CHAT_MODELS: list[str] = [
2
+ "cerebras:qwen-3-235b-a22b-instruct-2507",
3
+ ]
4
+
5
+ SUPPORTED_REASONING_MODELS: list[str] = [
6
+ "cerebras:zai-glm-4.6",
7
+ "cerebras:gpt-oss-120b",
8
+ "cerebras:qwen-3-32b",
9
+ ]
@@ -1,16 +1,15 @@
1
1
  # https://api-docs.deepseek.com/quick_start/pricing
2
2
  SUPPORTED_CHAT_MODELS = []
3
- SUPPORTED_REASONING_MODELS = [
4
- 'deepseek-reasoner'
5
- ]
3
+ SUPPORTED_REASONING_MODELS = ["deepseek-reasoner"]
4
+
6
5
 
7
6
  class DeepSeekMixin:
8
7
  def api_max_context_tokens(self):
9
- if self.model == 'deepseek-reasoner':
8
+ if self.model == "deepseek-reasoner":
10
9
  return 64_000
11
10
  return None
12
11
 
13
12
  def api_max_response_tokens(self):
14
- if self.model == 'deepseek-reasoner':
13
+ if self.model == "deepseek-reasoner":
15
14
  return 8_000
16
15
  return None
@@ -2,17 +2,18 @@
2
2
  SUPPORTED_CHAT_MODELS = []
3
3
  SUPPORTED_REASONING_MODELS = [
4
4
  # Check the latest snapshots; ie. *-06-05, etc
5
- 'gemini-2.5-pro',
6
- 'gemini-2.5-flash',
5
+ "gemini-2.5-pro",
6
+ "gemini-2.5-flash",
7
7
  ]
8
8
 
9
+
9
10
  class GoogleMixin:
10
11
  def api_max_context_tokens(self):
11
- if self.model.startswith('gemini-2.5-'):
12
+ if self.model.startswith("gemini-2.5-"):
12
13
  return 1_048_576
13
14
  return None
14
15
 
15
16
  def api_max_response_tokens(self):
16
- if self.model == 'gemini-2.5-':
17
+ if self.model == "gemini-2.5-":
17
18
  return 65_536
18
19
  return None
@@ -1,10 +1,8 @@
1
- SUPPORTED_CHAT_MODELS = [
2
- "groq:moonshotai/kimi-k2-instruct"
3
- ]
1
+ SUPPORTED_CHAT_MODELS = ["groq:moonshotai/kimi-k2-instruct"]
4
2
 
5
3
  SUPPORTED_REASONING_MODELS = [
6
4
  "groq:openai/gpt-oss-120b",
7
5
  "groq:openai/gpt-oss-20b",
8
6
  "groq:qwen/qwen3-32b",
9
- "groq:deepseek-r1-distill-llama-70b"
7
+ "groq:deepseek-r1-distill-llama-70b",
10
8
  ]
@@ -1,142 +1,164 @@
1
1
  from ...utils import UserMessage
2
2
 
3
3
  SUPPORTED_COMPLETION_MODELS = [
4
- 'davinci-002',
4
+ "davinci-002",
5
5
  ]
6
6
  SUPPORTED_CHAT_MODELS = [
7
- 'gpt-3.5-turbo',
8
- 'gpt-3.5-turbo-16k',
9
- 'gpt-3.5-turbo-1106',
10
- 'gpt-3.5-turbo-0613',
11
- 'gpt-4',
12
- 'gpt-4-0613',
13
- 'gpt-4-1106-preview', # @NOTE: probabily obsolete; same price as 'gpt-4-turbo-2024-04-09' but no vision
14
- 'gpt-4-turbo',
15
- 'gpt-4-turbo-2024-04-09',
16
- 'gpt-4o',
17
- 'gpt-4o-2024-11-20',
18
- 'gpt-4o-mini',
19
- 'chatgpt-4o-latest',
20
- 'gpt-4.1',
21
- 'gpt-4.1-mini',
22
- 'gpt-4.1-nano',
23
- 'gpt-5-chat-latest',
7
+ "gpt-3.5-turbo",
8
+ "gpt-3.5-turbo-16k",
9
+ "gpt-3.5-turbo-1106",
10
+ "gpt-3.5-turbo-0613",
11
+ "gpt-4",
12
+ "gpt-4-0613",
13
+ "gpt-4-1106-preview", # @NOTE: probabily obsolete; same price as 'gpt-4-turbo-2024-04-09' but no vision
14
+ "gpt-4-turbo",
15
+ "gpt-4-turbo-2024-04-09",
16
+ "gpt-4o",
17
+ "gpt-4o-2024-11-20",
18
+ "gpt-4o-mini",
19
+ "chatgpt-4o-latest",
20
+ "gpt-4.1",
21
+ "gpt-4.1-mini",
22
+ "gpt-4.1-nano",
23
+ "gpt-5-chat-latest",
24
+ "gpt-5.1-chat-latest",
24
25
  ]
25
26
  SUPPORTED_REASONING_MODELS = [
26
- 'o3-mini',
27
- 'o4-mini',
28
- 'o1',
29
- 'o3',
30
- 'gpt-5',
31
- 'gpt-5-mini',
32
- 'gpt-5-nano',
27
+ "o3-mini",
28
+ "o4-mini",
29
+ "o1",
30
+ "o3",
31
+ "gpt-5",
32
+ "gpt-5.1",
33
+ "gpt-5-mini",
34
+ "gpt-5-nano",
33
35
  ]
34
36
  SUPPORTED_EMBEDDING_MODELS = [
35
- 'text-embedding-ada-002',
36
- 'text-embedding-3-small',
37
- 'text-embedding-3-large'
37
+ "text-embedding-ada-002",
38
+ "text-embedding-3-small",
39
+ "text-embedding-3-large",
38
40
  ]
39
41
 
40
42
 
41
43
  class OpenAIMixin:
42
44
  def api_max_context_tokens(self):
43
- if self.model == 'text-curie-001' or \
44
- self.model == 'text-babbage-001' or \
45
- self.model == 'text-ada-001' or \
46
- self.model == 'davinci' or \
47
- self.model == 'curie' or \
48
- self.model == 'babbage' or \
49
- self.model == 'ada':
50
- return 2_049
51
- if self.model == 'gpt-3.5-turbo' or \
52
- self.model == 'gpt-3.5-turbo-0613' or \
53
- self.model == 'gpt-3.5-turbo-1106':
54
- return 4_096
55
- if self.model == 'gpt-4' or \
56
- self.model == 'gpt-4-0613' or \
57
- self.model == 'text-embedding-ada-002' or \
58
- self.model == 'text-embedding-3-small' or \
59
- self.model == 'text-embedding-3-large':
60
- return 8_192
61
- if self.model == 'gpt-3.5-turbo-16k' or \
62
- self.model == 'gpt-3.5-turbo-16k-0613' or \
63
- self.model == 'davinci-002':
64
- return 16_384
65
- if self.model == 'gpt-4-32k' or \
66
- self.model == 'gpt-4-32k-0613':
67
- return 32_768
68
- if self.model == 'gpt-4-1106-preview' or \
69
- self.model == 'gpt-4-turbo-2024-04-09' or \
70
- self.model == 'gpt-4-turbo' or \
71
- self.model == 'gpt-4-1106' or \
72
- self.model == 'gpt-4o' or \
73
- self.model == 'gpt-4o-2024-11-20' or \
74
- self.model == 'gpt-4o-mini' or \
75
- self.model == 'chatgpt-4o-latest':
76
- return 128_000
77
- if self.model == 'o1' or \
78
- self.model == 'o3' or \
79
- self.model == 'o3-mini' or \
80
- self.model == 'o4-mini':
81
- return 200_000
82
- if self.model == 'gpt-5' or \
83
- self.model == 'gpt-5-mini' or \
84
- self.model == 'gpt-5-nano' or \
85
- self.model == 'gpt-5-chat-latest':
45
+ if (
46
+ self.model == "text-curie-001"
47
+ or self.model == "text-babbage-001"
48
+ or self.model == "text-ada-001"
49
+ or self.model == "davinci"
50
+ or self.model == "curie"
51
+ or self.model == "babbage"
52
+ or self.model == "ada"
53
+ ):
54
+ return 2_049
55
+ if (
56
+ self.model == "gpt-3.5-turbo"
57
+ or self.model == "gpt-3.5-turbo-0613"
58
+ or self.model == "gpt-3.5-turbo-1106"
59
+ ):
60
+ return 4_096
61
+ if (
62
+ self.model == "gpt-4"
63
+ or self.model == "gpt-4-0613"
64
+ or self.model == "text-embedding-ada-002"
65
+ or self.model == "text-embedding-3-small"
66
+ or self.model == "text-embedding-3-large"
67
+ ):
68
+ return 8_192
69
+ if (
70
+ self.model == "gpt-3.5-turbo-16k"
71
+ or self.model == "gpt-3.5-turbo-16k-0613"
72
+ or self.model == "davinci-002"
73
+ ):
74
+ return 16_384
75
+ if self.model == "gpt-4-32k" or self.model == "gpt-4-32k-0613":
76
+ return 32_768
77
+ if (
78
+ self.model == "gpt-4-1106-preview"
79
+ or self.model == "gpt-4-turbo-2024-04-09"
80
+ or self.model == "gpt-4-turbo"
81
+ or self.model == "gpt-4-1106"
82
+ or self.model == "gpt-4o"
83
+ or self.model == "gpt-4o-2024-11-20"
84
+ or self.model == "gpt-4o-mini"
85
+ or self.model == "chatgpt-4o-latest"
86
+ ):
87
+ return 128_000
88
+ if (
89
+ self.model == "o1"
90
+ or self.model == "o3"
91
+ or self.model == "o3-mini"
92
+ or self.model == "o4-mini"
93
+ or self.model == "gpt-5-chat-latest"
94
+ or self.model == "gpt-5.1-chat-latest"
95
+ ):
96
+ return 200_000
97
+ if (
98
+ self.model == "gpt-5"
99
+ or self.model == "gpt-5.1"
100
+ or self.model == "gpt-5-mini"
101
+ or self.model == "gpt-5-nano"
102
+ ):
86
103
  return 400_000
87
- if self.model == 'gpt-4.1' or \
88
- self.model == 'gpt-4.1-mini' or \
89
- self.model == 'gpt-4.1-nano':
104
+ if self.model == "gpt-4.1" or self.model == "gpt-4.1-mini" or self.model == "gpt-4.1-nano":
90
105
  return 1_047_576
91
- msg = f'Unsupported model: {self.model}'
106
+ msg = f"Unsupported model: {self.model}"
92
107
  UserMessage(msg)
93
108
  raise ValueError(msg)
94
109
 
95
110
  def api_max_response_tokens(self):
96
- if self.model == 'davinci-002':
111
+ if self.model == "davinci-002":
97
112
  return 2_048
98
- if self.model == 'gpt-4-turbo' or \
99
- self.model == 'gpt-4-turbo-2024-04-09' or \
100
- self.model == 'gpt-4-1106-preview' or \
101
- self.model == 'gpt-3.5-turbo-1106' or \
102
- self.model == 'gpt-3.5-turbo-0613' or \
103
- self.model == 'gpt-3.5-turbo':
104
- return 4_096
105
- if self.model == 'gpt-4-0613' or \
106
- self.model == 'gpt-4':
107
- return 8_192
108
- if self.model == 'gpt-3.5-turbo-16k-0613' or \
109
- self.model == 'gpt-3.5-turbo-16k' or \
110
- self.model == 'gpt-4o-mini' or \
111
- self.model == 'gpt-4o' or \
112
- self.model == 'gpt-4o-2024-11-20' or \
113
- self.model == 'chatgpt-4o-latest':
114
- return 16_384
115
- if self.model == 'gpt-4.1' or \
116
- self.model == 'gpt-4.1-mini' or \
117
- self.model == 'gpt-4.1-nano':
113
+ if (
114
+ self.model == "gpt-4-turbo"
115
+ or self.model == "gpt-4-turbo-2024-04-09"
116
+ or self.model == "gpt-4-1106-preview"
117
+ or self.model == "gpt-3.5-turbo-1106"
118
+ or self.model == "gpt-3.5-turbo-0613"
119
+ or self.model == "gpt-3.5-turbo"
120
+ ):
121
+ return 4_096
122
+ if self.model == "gpt-4-0613" or self.model == "gpt-4":
123
+ return 8_192
124
+ if (
125
+ self.model == "gpt-3.5-turbo-16k-0613"
126
+ or self.model == "gpt-3.5-turbo-16k"
127
+ or self.model == "gpt-4o-mini"
128
+ or self.model == "gpt-4o"
129
+ or self.model == "gpt-4o-2024-11-20"
130
+ or self.model == "chatgpt-4o-latest"
131
+ or self.model == "gpt-5-chat-latest"
132
+ or self.model == "gpt-5.1-chat-latest"
133
+ ):
134
+ return 16_384
135
+ if self.model == "gpt-4.1" or self.model == "gpt-4.1-mini" or self.model == "gpt-4.1-nano":
118
136
  return 32_768
119
- if self.model == 'o1' or \
120
- self.model == 'o3' or \
121
- self.model == 'o3-mini' or \
122
- self.model == 'o4-mini':
123
- return 100_000
124
- if self.model == 'gpt-5' or \
125
- self.model == 'gpt-5-mini' or \
126
- self.model == 'gpt-5-nano' or \
127
- self.model == 'gpt-5-chat-latest':
137
+ if (
138
+ self.model == "o1"
139
+ or self.model == "o3"
140
+ or self.model == "o3-mini"
141
+ or self.model == "o4-mini"
142
+ ):
143
+ return 100_000
144
+ if (
145
+ self.model == "gpt-5"
146
+ or self.model == "gpt-5.1"
147
+ or self.model == "gpt-5-mini"
148
+ or self.model == "gpt-5-nano"
149
+ ):
128
150
  return 128_000
129
- msg = f'Unsupported model: {self.model}'
151
+ msg = f"Unsupported model: {self.model}"
130
152
  UserMessage(msg)
131
153
  raise ValueError(msg)
132
154
 
133
155
  def api_embedding_dims(self):
134
- if self.model == 'text-embedding-ada-002':
156
+ if self.model == "text-embedding-ada-002":
135
157
  return 1_536
136
- if self.model == 'text-embedding-3-small':
158
+ if self.model == "text-embedding-3-small":
137
159
  return 1_536
138
- if self.model == 'text-embedding-3-large':
160
+ if self.model == "text-embedding-3-large":
139
161
  return 3_072
140
- msg = f'Unsupported model: {self.model}'
162
+ msg = f"Unsupported model: {self.model}"
141
163
  UserMessage(msg)
142
164
  raise ValueError(msg)
symai/backend/settings.py CHANGED
@@ -14,8 +14,8 @@ class SymAIConfig:
14
14
  def __init__(self):
15
15
  """Initialize configuration paths based on current Python environment."""
16
16
  self._env_path = Path(sys.prefix)
17
- self._env_config_dir = self._env_path / '.symai'
18
- self._home_config_dir = Path.home() / '.symai'
17
+ self._env_config_dir = self._env_path / ".symai"
18
+ self._home_config_dir = Path.home() / ".symai"
19
19
  self._debug_dir = Path.cwd() # Current working directory for debug mode
20
20
  self._active_paths: dict[str, Path] = {}
21
21
 
@@ -31,7 +31,8 @@ class SymAIConfig:
31
31
  target_path = Path(key)
32
32
  target_name = target_path.name or key
33
33
  stale_keys: list[Path] = [
34
- existing_key for existing_key in self._active_paths
34
+ existing_key
35
+ for existing_key in self._active_paths
35
36
  if isinstance(existing_key, Path)
36
37
  and (existing_key.name == target_name or str(existing_key) == key)
37
38
  ]
@@ -42,8 +43,8 @@ class SymAIConfig:
42
43
  def config_dir(self) -> Path:
43
44
  """Returns the active configuration directory based on priority system."""
44
45
  # Debug mode takes precedence
45
- if (self._debug_dir / 'symai.config.json').exists():
46
- return self._debug_dir / '.symai'
46
+ if (self._debug_dir / "symai.config.json").exists():
47
+ return self._debug_dir / ".symai"
47
48
  # Then environment config
48
49
  if self._env_config_dir.exists():
49
50
  return self._env_config_dir
@@ -60,11 +61,11 @@ class SymAIConfig:
60
61
  # Only use the basename for managed directories
61
62
  normalized_filename = Path(normalized_filename).name
62
63
  debug_config = self._debug_dir / normalized_filename
63
- env_config = self._env_config_dir / normalized_filename
64
- home_config = self._home_config_dir / normalized_filename
64
+ env_config = self._env_config_dir / normalized_filename
65
+ home_config = self._home_config_dir / normalized_filename
65
66
 
66
67
  # Check debug first (only valid for symai.config.json)
67
- if normalized_filename == 'symai.config.json' and debug_config.exists():
68
+ if normalized_filename == "symai.config.json" and debug_config.exists():
68
69
  return debug_config
69
70
 
70
71
  # If forced to fallback, return home config if it exists, otherwise environment
@@ -88,7 +89,7 @@ class SymAIConfig:
88
89
  self._remove_legacy_path_keys(key)
89
90
  self._active_paths.pop(key, None)
90
91
  return {}
91
- with config_path.open(encoding='utf-8') as f:
92
+ with config_path.open(encoding="utf-8") as f:
92
93
  config = json.load(f)
93
94
  self._remove_legacy_path_keys(key)
94
95
  self._active_paths[key] = config_path
@@ -99,7 +100,7 @@ class SymAIConfig:
99
100
  config_path = self.get_config_path(filename, fallback_to_home=fallback_to_home)
100
101
  key = self._canonical_key(filename)
101
102
  config_path.parent.mkdir(parents=True, exist_ok=True)
102
- with config_path.open('w', encoding='utf-8') as f:
103
+ with config_path.open("w", encoding="utf-8") as f:
103
104
  json.dump(data, f, indent=4)
104
105
  self._remove_legacy_path_keys(key)
105
106
  self._active_paths[key] = config_path
@@ -117,9 +118,7 @@ class SymAIConfig:
117
118
  if cached is not None:
118
119
  return cached
119
120
  for legacy_key, cached_path in list(self._active_paths.items()):
120
- if isinstance(legacy_key, Path) and (
121
- legacy_key.name == key or str(legacy_key) == key
122
- ):
121
+ if isinstance(legacy_key, Path) and (legacy_key.name == key or str(legacy_key) == key):
123
122
  self._active_paths.pop(legacy_key, None)
124
123
  self._active_paths[key] = cached_path
125
124
  return cached_path
@@ -127,7 +126,7 @@ class SymAIConfig:
127
126
 
128
127
  def get_active_config_dir(self) -> Path:
129
128
  """Returns the directory backing the active symai configuration."""
130
- symai_key = self._canonical_key('symai.config.json')
129
+ symai_key = self._canonical_key("symai.config.json")
131
130
  cached = self._active_paths.get(symai_key)
132
131
  if cached is not None:
133
132
  return cached.parent
@@ -140,6 +139,7 @@ class SymAIConfig:
140
139
  return cached_path.parent
141
140
  return self.config_dir
142
141
 
142
+
143
143
  SYMAI_CONFIG = {}
144
144
  SYMSH_CONFIG = {}
145
145
  SYMSERVER_CONFIG = {}