symbolicai 0.21.0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. symai/__init__.py +96 -64
  2. symai/backend/base.py +93 -80
  3. symai/backend/engines/drawing/engine_bfl.py +12 -11
  4. symai/backend/engines/drawing/engine_gpt_image.py +108 -87
  5. symai/backend/engines/embedding/engine_llama_cpp.py +20 -24
  6. symai/backend/engines/embedding/engine_openai.py +3 -5
  7. symai/backend/engines/execute/engine_python.py +6 -5
  8. symai/backend/engines/files/engine_io.py +74 -67
  9. symai/backend/engines/imagecaptioning/engine_blip2.py +3 -3
  10. symai/backend/engines/imagecaptioning/engine_llavacpp_client.py +54 -38
  11. symai/backend/engines/index/engine_pinecone.py +23 -24
  12. symai/backend/engines/index/engine_vectordb.py +16 -14
  13. symai/backend/engines/lean/engine_lean4.py +38 -34
  14. symai/backend/engines/neurosymbolic/__init__.py +41 -13
  15. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_chat.py +262 -182
  16. symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_reasoning.py +263 -191
  17. symai/backend/engines/neurosymbolic/engine_deepseekX_reasoning.py +53 -49
  18. symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py +212 -211
  19. symai/backend/engines/neurosymbolic/engine_groq.py +87 -63
  20. symai/backend/engines/neurosymbolic/engine_huggingface.py +21 -24
  21. symai/backend/engines/neurosymbolic/engine_llama_cpp.py +44 -46
  22. symai/backend/engines/neurosymbolic/engine_openai_gptX_chat.py +256 -229
  23. symai/backend/engines/neurosymbolic/engine_openai_gptX_reasoning.py +270 -150
  24. symai/backend/engines/ocr/engine_apilayer.py +6 -8
  25. symai/backend/engines/output/engine_stdout.py +1 -4
  26. symai/backend/engines/search/engine_openai.py +7 -7
  27. symai/backend/engines/search/engine_perplexity.py +5 -5
  28. symai/backend/engines/search/engine_serpapi.py +12 -14
  29. symai/backend/engines/speech_to_text/engine_local_whisper.py +20 -27
  30. symai/backend/engines/symbolic/engine_wolframalpha.py +3 -3
  31. symai/backend/engines/text_to_speech/engine_openai.py +5 -7
  32. symai/backend/engines/text_vision/engine_clip.py +7 -11
  33. symai/backend/engines/userinput/engine_console.py +3 -3
  34. symai/backend/engines/webscraping/engine_requests.py +81 -48
  35. symai/backend/mixin/__init__.py +13 -0
  36. symai/backend/mixin/anthropic.py +4 -2
  37. symai/backend/mixin/deepseek.py +2 -0
  38. symai/backend/mixin/google.py +2 -0
  39. symai/backend/mixin/openai.py +11 -3
  40. symai/backend/settings.py +83 -16
  41. symai/chat.py +101 -78
  42. symai/collect/__init__.py +7 -1
  43. symai/collect/dynamic.py +77 -69
  44. symai/collect/pipeline.py +35 -27
  45. symai/collect/stats.py +75 -63
  46. symai/components.py +198 -169
  47. symai/constraints.py +15 -12
  48. symai/core.py +698 -359
  49. symai/core_ext.py +32 -34
  50. symai/endpoints/api.py +80 -73
  51. symai/extended/.DS_Store +0 -0
  52. symai/extended/__init__.py +46 -12
  53. symai/extended/api_builder.py +11 -8
  54. symai/extended/arxiv_pdf_parser.py +13 -12
  55. symai/extended/bibtex_parser.py +2 -3
  56. symai/extended/conversation.py +101 -90
  57. symai/extended/document.py +17 -10
  58. symai/extended/file_merger.py +18 -13
  59. symai/extended/graph.py +18 -13
  60. symai/extended/html_style_template.py +2 -4
  61. symai/extended/interfaces/blip_2.py +1 -2
  62. symai/extended/interfaces/clip.py +1 -2
  63. symai/extended/interfaces/console.py +7 -1
  64. symai/extended/interfaces/dall_e.py +1 -1
  65. symai/extended/interfaces/flux.py +1 -1
  66. symai/extended/interfaces/gpt_image.py +1 -1
  67. symai/extended/interfaces/input.py +1 -1
  68. symai/extended/interfaces/llava.py +0 -1
  69. symai/extended/interfaces/naive_vectordb.py +7 -8
  70. symai/extended/interfaces/naive_webscraping.py +1 -1
  71. symai/extended/interfaces/ocr.py +1 -1
  72. symai/extended/interfaces/pinecone.py +6 -5
  73. symai/extended/interfaces/serpapi.py +1 -1
  74. symai/extended/interfaces/terminal.py +2 -3
  75. symai/extended/interfaces/tts.py +1 -1
  76. symai/extended/interfaces/whisper.py +1 -1
  77. symai/extended/interfaces/wolframalpha.py +1 -1
  78. symai/extended/metrics/__init__.py +11 -1
  79. symai/extended/metrics/similarity.py +11 -13
  80. symai/extended/os_command.py +17 -16
  81. symai/extended/packages/__init__.py +29 -3
  82. symai/extended/packages/symdev.py +19 -16
  83. symai/extended/packages/sympkg.py +12 -9
  84. symai/extended/packages/symrun.py +21 -19
  85. symai/extended/repo_cloner.py +11 -10
  86. symai/extended/seo_query_optimizer.py +1 -2
  87. symai/extended/solver.py +20 -23
  88. symai/extended/summarizer.py +4 -3
  89. symai/extended/taypan_interpreter.py +10 -12
  90. symai/extended/vectordb.py +99 -82
  91. symai/formatter/__init__.py +9 -1
  92. symai/formatter/formatter.py +12 -16
  93. symai/formatter/regex.py +62 -63
  94. symai/functional.py +173 -122
  95. symai/imports.py +136 -127
  96. symai/interfaces.py +56 -27
  97. symai/memory.py +14 -13
  98. symai/misc/console.py +49 -39
  99. symai/misc/loader.py +5 -3
  100. symai/models/__init__.py +17 -1
  101. symai/models/base.py +269 -181
  102. symai/models/errors.py +0 -1
  103. symai/ops/__init__.py +32 -22
  104. symai/ops/measures.py +11 -15
  105. symai/ops/primitives.py +348 -228
  106. symai/post_processors.py +32 -28
  107. symai/pre_processors.py +39 -41
  108. symai/processor.py +6 -4
  109. symai/prompts.py +59 -45
  110. symai/server/huggingface_server.py +23 -20
  111. symai/server/llama_cpp_server.py +7 -5
  112. symai/shell.py +3 -4
  113. symai/shellsv.py +499 -375
  114. symai/strategy.py +517 -287
  115. symai/symbol.py +111 -116
  116. symai/utils.py +42 -36
  117. {symbolicai-0.21.0.dist-info → symbolicai-1.0.0.dist-info}/METADATA +4 -2
  118. symbolicai-1.0.0.dist-info/RECORD +163 -0
  119. symbolicai-0.21.0.dist-info/RECORD +0 -162
  120. {symbolicai-0.21.0.dist-info → symbolicai-1.0.0.dist-info}/WHEEL +0 -0
  121. {symbolicai-0.21.0.dist-info → symbolicai-1.0.0.dist-info}/entry_points.txt +0 -0
  122. {symbolicai-0.21.0.dist-info → symbolicai-1.0.0.dist-info}/licenses/LICENSE +0 -0
  123. {symbolicai-0.21.0.dist-info → symbolicai-1.0.0.dist-info}/top_level.txt +0 -0
@@ -1,13 +1,14 @@
1
1
  import base64
2
+ import contextlib
2
3
  import logging
3
4
  import tempfile
4
5
  from pathlib import Path
5
- from typing import Optional
6
6
 
7
7
  import openai
8
8
  import requests
9
9
 
10
10
  from ....symbol import Result
11
+ from ....utils import UserMessage
11
12
  from ...base import Engine
12
13
  from ...settings import SYMAI_CONFIG
13
14
 
@@ -31,15 +32,16 @@ class GPTImageResult(Result):
31
32
  for item in value.data:
32
33
  has_url = hasattr(item, "url")
33
34
  has_b64 = hasattr(item, "b64_json")
34
- path = tempfile.NamedTemporaryFile(suffix=".png", delete=False).name
35
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_file:
36
+ path = tmp_file.name
35
37
  if has_url and item.url is not None:
36
38
  request = requests.get(item.url, allow_redirects=True)
37
39
  request.raise_for_status()
38
- with open(path, "wb") as f:
40
+ with Path(path).open("wb") as f:
39
41
  f.write(request.content)
40
42
  elif has_b64 and item.b64_json is not None:
41
43
  raw = base64.b64decode(item.b64_json)
42
- with open(path, "wb") as f:
44
+ with Path(path).open("wb") as f:
43
45
  f.write(raw)
44
46
  imgs.append(path)
45
47
  self._value = imgs
@@ -47,14 +49,14 @@ class GPTImageResult(Result):
47
49
 
48
50
  class GPTImageEngine(Engine):
49
51
  """
50
- A dropin engine for OpenAI's unified Images API,
52
+ A drop-in engine for OpenAI's unified Images API,
51
53
  supporting gpt-image-1, dall-e-2, dall-e-3,
52
54
  with all the extra parameters (background, moderation, etc).
53
55
  """
54
56
  def __init__(
55
57
  self,
56
- api_key: Optional[str] = None,
57
- model: Optional[str] = None,
58
+ api_key: str | None = None,
59
+ model: str | None = None,
58
60
  ):
59
61
  super().__init__()
60
62
  self.config = SYMAI_CONFIG
@@ -83,7 +85,7 @@ class GPTImageEngine(Engine):
83
85
 
84
86
  def command(self, *args, **kwargs):
85
87
  """
86
- Allow hotswapping API key or model at runtime.
88
+ Allow hot-swapping API key or model at runtime.
87
89
  """
88
90
  super().command(*args, **kwargs)
89
91
  if "DRAWING_ENGINE_API_KEY" in kwargs:
@@ -105,98 +107,117 @@ class GPTImageEngine(Engine):
105
107
  operation = kwargs.get("operation")
106
108
 
107
109
  if operation is None:
108
- raise ValueError("Operation not specified!")
110
+ UserMessage("Operation not specified!", raise_with=ValueError)
109
111
 
110
112
  n = kwargs.get("n", 1)
111
113
 
112
- if "size" in kwargs:
113
- if isinstance(kwargs["size"], int):
114
- s = kwargs["size"]
115
- kwargs["size"] = f"{s}x{s}"
114
+ self._normalize_size(kwargs)
116
115
 
117
116
  except_remedy = kwargs.get("except_remedy", None)
118
117
 
119
118
  callback = None
120
119
  try:
121
- if operation == "create":
122
- create_kwargs = {
123
- "model": model,
124
- "prompt": prompt,
125
- "n": n,
126
- "size": kwargs.get("size"),
127
- }
128
-
129
- if model == "dall-e-3":
130
- create_kwargs["response_format"] = kwargs.get("response_format", "url")
131
- create_kwargs["quality"] = kwargs.get("quality", "standard")
132
- create_kwargs["style"] = kwargs.get("style", "vivid")
133
-
134
- if model.startswith("gpt-image-"):
135
- create_kwargs["quality"] = kwargs.get("quality", "medium")
136
- create_kwargs["moderation"] = kwargs.get("moderation", "auto")
137
- create_kwargs["background"] = kwargs.get("background", "auto")
138
- create_kwargs["output_format"] = kwargs.get("output_compression", "png")
139
- if create_kwargs["output_format"] == "jpeg" or create_kwargs["output_format"] == "webp":
140
- create_kwargs["output_compression"] = kwargs.get("output_compression", "100")
141
-
142
- callback = openai.images.generate
143
- res = openai.images.generate(**create_kwargs)
144
-
145
- elif operation == "variation":
146
- assert "image_path" in kwargs, "image_path required for variation"
147
- callback = openai.images.create_variation
148
- with open(kwargs["image_path"], "rb") as img:
149
- res = openai.images.create_variation(
150
- model=model,
151
- image=img,
152
- n=n,
153
- size=kwargs.get("size"),
154
- response_format=kwargs.get("response_format", "url"),
155
- )
156
-
157
- elif operation == "edit":
158
- assert "image_path" in kwargs, "image_path required for edit"
159
- # allow either a single path or a list of paths
160
- img_paths = kwargs["image_path"]
161
- if not isinstance(img_paths, (list, tuple)):
162
- img_paths = [img_paths]
163
- # open all images
164
- image_files = [open(p, "rb") for p in img_paths]
165
- # optional mask (only for the first image)
166
- mask_file = None
167
- if "mask_path" in kwargs and kwargs["mask_path"] is not None:
168
- mask_file = open(kwargs["mask_path"], "rb")
169
- # construct API args
170
- edit_kwargs = {
171
- "model": model,
172
- "image": image_files if len(image_files) > 1 else image_files[0],
173
- "prompt": prompt,
174
- "n": n,
175
- "size": kwargs.get("size"),
176
- }
177
-
178
- if model.startswith("gpt-image-"):
179
- edit_kwargs["quality"] = kwargs.get("quality", "auto")
180
-
181
- if mask_file:
182
- edit_kwargs["mask"] = mask_file
183
- callback = openai.images.edit
184
-
185
- res = openai.images.edit(**edit_kwargs)
186
- # clean up file handles
187
- for f in image_files:
188
- f.close()
189
- if mask_file:
190
- mask_file.close()
191
- else:
192
- raise ValueError(f"Unknown image operation: {operation}")
193
-
120
+ callback = self._resolve_callback(operation)
121
+ callback, res = self._dispatch_operation(
122
+ operation=operation,
123
+ prompt=prompt,
124
+ model=model,
125
+ n=n,
126
+ kwargs=kwargs,
127
+ )
194
128
  except Exception as e:
195
129
  if except_remedy is None:
196
130
  raise
197
131
  res = except_remedy(self, e, callback, argument)
198
132
 
199
- # wrap it up
200
133
  metadata = {}
201
134
  result = GPTImageResult(res)
202
135
  return [result], metadata
136
+
137
+ def _normalize_size(self, kwargs):
138
+ if "size" in kwargs and isinstance(kwargs["size"], int):
139
+ s = kwargs["size"]
140
+ kwargs["size"] = f"{s}x{s}"
141
+
142
+ def _resolve_callback(self, operation):
143
+ if operation == "create":
144
+ return openai.images.generate
145
+ if operation == "variation":
146
+ return openai.images.create_variation
147
+ if operation == "edit":
148
+ return openai.images.edit
149
+ UserMessage(f"Unknown image operation: {operation}", raise_with=ValueError)
150
+ return openai.images.generate
151
+
152
+ def _dispatch_operation(self, operation, prompt, model, n, kwargs):
153
+ if operation == "create":
154
+ return self._execute_create(prompt, model, n, kwargs)
155
+ if operation == "variation":
156
+ return self._execute_variation(model, n, kwargs)
157
+ if operation == "edit":
158
+ return self._execute_edit(prompt, model, n, kwargs)
159
+ return UserMessage(f"Unknown image operation: {operation}", raise_with=ValueError)
160
+
161
+ def _execute_create(self, prompt, model, n, kwargs):
162
+ create_kwargs = {
163
+ "model": model,
164
+ "prompt": prompt,
165
+ "n": n,
166
+ "size": kwargs.get("size"),
167
+ }
168
+
169
+ if model == "dall-e-3":
170
+ create_kwargs["response_format"] = kwargs.get("response_format", "url")
171
+ create_kwargs["quality"] = kwargs.get("quality", "standard")
172
+ create_kwargs["style"] = kwargs.get("style", "vivid")
173
+
174
+ if model.startswith("gpt-image-"):
175
+ create_kwargs["quality"] = kwargs.get("quality", "medium")
176
+ create_kwargs["moderation"] = kwargs.get("moderation", "auto")
177
+ create_kwargs["background"] = kwargs.get("background", "auto")
178
+ create_kwargs["output_format"] = kwargs.get("output_compression", "png")
179
+ if create_kwargs["output_format"] == "jpeg" or create_kwargs["output_format"] == "webp":
180
+ create_kwargs["output_compression"] = kwargs.get("output_compression", "100")
181
+
182
+ callback = openai.images.generate
183
+ return callback, callback(**create_kwargs)
184
+
185
+ def _execute_variation(self, model, n, kwargs):
186
+ assert "image_path" in kwargs, "image_path required for variation"
187
+ callback = openai.images.create_variation
188
+ with Path(kwargs["image_path"]).open("rb") as img:
189
+ result = callback(
190
+ model=model,
191
+ image=img,
192
+ n=n,
193
+ size=kwargs.get("size"),
194
+ response_format=kwargs.get("response_format", "url"),
195
+ )
196
+ return callback, result
197
+
198
+ def _execute_edit(self, prompt, model, n, kwargs):
199
+ assert "image_path" in kwargs, "image_path required for edit"
200
+ img_paths = kwargs["image_path"]
201
+ if not isinstance(img_paths, (list, tuple)):
202
+ img_paths = [img_paths]
203
+ with contextlib.ExitStack() as stack:
204
+ image_files = [stack.enter_context(Path(p).open("rb")) for p in img_paths]
205
+ mask_file = None
206
+ if "mask_path" in kwargs and kwargs["mask_path"] is not None:
207
+ mask_file = stack.enter_context(Path(kwargs["mask_path"]).open("rb"))
208
+ edit_kwargs = {
209
+ "model": model,
210
+ "image": image_files if len(image_files) > 1 else image_files[0],
211
+ "prompt": prompt,
212
+ "n": n,
213
+ "size": kwargs.get("size"),
214
+ }
215
+
216
+ if model.startswith("gpt-image-"):
217
+ edit_kwargs["quality"] = kwargs.get("quality", "auto")
218
+
219
+ if mask_file:
220
+ edit_kwargs["mask"] = mask_file
221
+ callback = openai.images.edit
222
+ result = callback(**edit_kwargs)
223
+ return callback, result
@@ -1,14 +1,12 @@
1
1
  import asyncio
2
2
  import logging
3
- from multiprocessing import Value
4
- from typing import Optional
3
+ from typing import Any, ClassVar
5
4
 
6
5
  import aiohttp
7
6
  import nest_asyncio
8
- import numpy as np
9
7
 
10
8
  from ....core_ext import retry
11
- from ....utils import CustomUserWarning
9
+ from ....utils import UserMessage
12
10
  from ...base import Engine
13
11
  from ...settings import SYMAI_CONFIG, SYMSERVER_CONFIG
14
12
 
@@ -18,7 +16,7 @@ logging.getLogger("httpx").setLevel(logging.ERROR)
18
16
  logging.getLogger("httpcore").setLevel(logging.ERROR)
19
17
 
20
18
  class LlamaCppEmbeddingEngine(Engine):
21
- _retry_params = {
19
+ _retry_params: ClassVar[dict[str, Any]] = {
22
20
  'tries': 5,
23
21
  'delay': 2,
24
22
  'max_delay': 60,
@@ -26,7 +24,7 @@ class LlamaCppEmbeddingEngine(Engine):
26
24
  'jitter': (1, 5),
27
25
  'graceful': True
28
26
  }
29
- _timeout_params = {
27
+ _timeout_params: ClassVar[dict[str, Any]] = {
30
28
  'read': None,
31
29
  'connect': None,
32
30
  }
@@ -41,7 +39,7 @@ class LlamaCppEmbeddingEngine(Engine):
41
39
  if self.id() != 'embedding':
42
40
  return
43
41
  if not SYMSERVER_CONFIG.get('online'):
44
- CustomUserWarning('You are using the llama.cpp embedding engine, but the server endpoint is not started. Please start the server with `symserver [--args]`.', raise_with=ValueError)
42
+ UserMessage('You are using the llama.cpp embedding engine, but the server endpoint is not started. Please start the server with `symserver [--args]`.', raise_with=ValueError)
45
43
 
46
44
  self.server_endpoint = f"http://{SYMSERVER_CONFIG.get('--host')}:{SYMSERVER_CONFIG.get('--port')}"
47
45
  self.timeout_params = self._validate_timeout_params(timeout_params)
@@ -60,13 +58,13 @@ class LlamaCppEmbeddingEngine(Engine):
60
58
 
61
59
  def _validate_timeout_params(self, timeout_params):
62
60
  if not isinstance(timeout_params, dict):
63
- raise ValueError("timeout_params must be a dictionary")
61
+ UserMessage("timeout_params must be a dictionary", raise_with=ValueError)
64
62
  assert all(key in timeout_params for key in ['read', 'connect']), "Available keys: ['read', 'connect']"
65
63
  return timeout_params
66
64
 
67
65
  def _validate_retry_params(self, retry_params):
68
66
  if not isinstance(retry_params, dict):
69
- raise ValueError("retry_params must be a dictionary")
67
+ UserMessage("retry_params must be a dictionary", raise_with=ValueError)
70
68
  assert all(key in retry_params for key in ['tries', 'delay', 'max_delay', 'backoff', 'jitter', 'graceful']), \
71
69
  "Available keys: ['tries', 'delay', 'max_delay', 'backoff', 'jitter', 'graceful']"
72
70
  return retry_params
@@ -77,7 +75,9 @@ class LlamaCppEmbeddingEngine(Engine):
77
75
  try:
78
76
  current_loop = asyncio.get_event_loop()
79
77
  if current_loop.is_closed():
80
- raise RuntimeError("Event loop is closed.")
78
+ msg = "Event loop is closed."
79
+ UserMessage(msg)
80
+ raise RuntimeError(msg)
81
81
  return current_loop
82
82
  except RuntimeError:
83
83
  new_loop = asyncio.new_event_loop()
@@ -92,14 +92,13 @@ class LlamaCppEmbeddingEngine(Engine):
92
92
  sock_connect=self.timeout_params['connect'],
93
93
  sock_read=self.timeout_params['read']
94
94
  )
95
- async with aiohttp.ClientSession(timeout=timeout) as session:
96
- async with session.post(
97
- f"{self.server_endpoint}/v1/embeddings",
98
- json={"content": text, "embd_normalize": embd_normalize}
99
- ) as res:
100
- if res.status != 200:
101
- raise ValueError(f"Request failed with status code: {res.status}")
102
- return await res.json()
95
+ async with aiohttp.ClientSession(timeout=timeout) as session, session.post(
96
+ f"{self.server_endpoint}/v1/embeddings",
97
+ json={"content": text, "embd_normalize": embd_normalize}
98
+ ) as res:
99
+ if res.status != 200:
100
+ UserMessage(f"Request failed with status code: {res.status}", raise_with=ValueError)
101
+ return await res.json()
103
102
 
104
103
  return await _make_request()
105
104
 
@@ -112,7 +111,7 @@ class LlamaCppEmbeddingEngine(Engine):
112
111
 
113
112
  new_dim = kwargs.get('new_dim')
114
113
  if new_dim:
115
- raise NotImplementedError("new_dim is not yet supported")
114
+ UserMessage("new_dim is not yet supported", raise_with=NotImplementedError)
116
115
 
117
116
  nest_asyncio.apply()
118
117
  loop = self._get_event_loop()
@@ -120,12 +119,9 @@ class LlamaCppEmbeddingEngine(Engine):
120
119
  try:
121
120
  res = loop.run_until_complete(self._arequest(inp, embd_normalize))
122
121
  except Exception as e:
123
- raise ValueError(f"Request failed with error: {str(e)}")
122
+ UserMessage(f"Request failed with error: {e!s}", raise_with=ValueError)
124
123
 
125
- if res is not None:
126
- output = [r["embedding"] for r in res] # B x 1 x D
127
- else:
128
- output = None
124
+ output = [r["embedding"] for r in res] if res is not None else None # B x 1 x D
129
125
  metadata = {'raw_output': res}
130
126
 
131
127
  return [output], metadata
@@ -1,5 +1,4 @@
1
1
  import logging
2
- from typing import Optional
3
2
 
4
3
  import numpy as np
5
4
  import openai
@@ -16,7 +15,7 @@ logging.getLogger("httpcore").setLevel(logging.ERROR)
16
15
 
17
16
 
18
17
  class EmbeddingEngine(Engine, OpenAIMixin):
19
- def __init__(self, api_key: Optional[str] = None, model: Optional[str] = None):
18
+ def __init__(self, api_key: str | None = None, model: str | None = None):
20
19
  super().__init__()
21
20
  logger = logging.getLogger('openai')
22
21
  logger.setLevel(logging.WARNING)
@@ -79,6 +78,5 @@ class EmbeddingEngine(Engine, OpenAIMixin):
79
78
  if norm == 0:
80
79
  return x.tolist()
81
80
  return (x / norm).tolist()
82
- else:
83
- norm = np.linalg.norm(x, 2, axis=1, keepdims=True)
84
- return np.where(norm == 0, x, x / norm).tolist()
81
+ norm = np.linalg.norm(x, 2, axis=1, keepdims=True)
82
+ return np.where(norm == 0, x, x / norm).tolist()
@@ -1,10 +1,11 @@
1
+ import sys
2
+ import traceback
3
+
1
4
  from ....symbol import Result
2
5
  from ...base import Engine
3
6
 
4
7
 
5
8
  def full_stack():
6
- import sys
7
- import traceback
8
9
  exc = sys.exc_info()[0]
9
10
  stack = traceback.extract_stack()[-10:-1] # last one would be full_stack()
10
11
  if exc is not None: # i.e. an exception is present
@@ -68,9 +69,9 @@ class PythonEngine(Engine):
68
69
  def forward(self, argument):
69
70
  code = argument.prop.prepared_input
70
71
  kwargs = argument.kwargs
71
- globals_ = kwargs['globals'] if 'globals' in kwargs else {}
72
- locals_ = kwargs['locals'] if 'locals' in kwargs else {}
73
- input_handler = kwargs['input_handler'] if 'input_handler' in kwargs else None
72
+ globals_ = kwargs.get('globals', {})
73
+ locals_ = kwargs.get('locals', {})
74
+ input_handler = kwargs.get('input_handler')
74
75
  if input_handler:
75
76
  input_handler((code,))
76
77