graphiti-core 0.5.0rc2__py3-none-any.whl → 0.5.0rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of graphiti-core might be problematic. Click here for more details.
- graphiti_core/llm_client/openai_client.py +45 -4
- {graphiti_core-0.5.0rc2.dist-info → graphiti_core-0.5.0rc3.dist-info}/METADATA +1 -1
- {graphiti_core-0.5.0rc2.dist-info → graphiti_core-0.5.0rc3.dist-info}/RECORD +5 -5
- {graphiti_core-0.5.0rc2.dist-info → graphiti_core-0.5.0rc3.dist-info}/LICENSE +0 -0
- {graphiti_core-0.5.0rc2.dist-info → graphiti_core-0.5.0rc3.dist-info}/WHEEL +0 -0
|
@@ -16,6 +16,7 @@ limitations under the License.
|
|
|
16
16
|
|
|
17
17
|
import logging
|
|
18
18
|
import typing
|
|
19
|
+
from typing import ClassVar
|
|
19
20
|
|
|
20
21
|
import openai
|
|
21
22
|
from openai import AsyncOpenAI
|
|
@@ -53,6 +54,9 @@ class OpenAIClient(LLMClient):
|
|
|
53
54
|
Generates a response from the language model based on the provided messages.
|
|
54
55
|
"""
|
|
55
56
|
|
|
57
|
+
# Class-level constants
|
|
58
|
+
MAX_RETRIES: ClassVar[int] = 2
|
|
59
|
+
|
|
56
60
|
def __init__(
|
|
57
61
|
self, config: LLMConfig | None = None, cache: bool = False, client: typing.Any = None
|
|
58
62
|
):
|
|
@@ -104,7 +108,7 @@ class OpenAIClient(LLMClient):
|
|
|
104
108
|
elif response_object.refusal:
|
|
105
109
|
raise RefusalError(response_object.refusal)
|
|
106
110
|
else:
|
|
107
|
-
raise Exception('
|
|
111
|
+
raise Exception(f'Invalid response from LLM: {response_object.model_dump()}')
|
|
108
112
|
except openai.LengthFinishReasonError as e:
|
|
109
113
|
raise Exception(f'Output length exceeded max tokens {self.max_tokens}: {e}') from e
|
|
110
114
|
except openai.RateLimitError as e:
|
|
@@ -116,6 +120,43 @@ class OpenAIClient(LLMClient):
|
|
|
116
120
|
async def generate_response(
|
|
117
121
|
self, messages: list[Message], response_model: type[BaseModel] | None = None
|
|
118
122
|
) -> dict[str, typing.Any]:
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
123
|
+
retry_count = 0
|
|
124
|
+
last_error = None
|
|
125
|
+
|
|
126
|
+
while retry_count <= self.MAX_RETRIES:
|
|
127
|
+
try:
|
|
128
|
+
response = await self._generate_response(messages, response_model)
|
|
129
|
+
return response
|
|
130
|
+
except (RateLimitError, RefusalError):
|
|
131
|
+
# These errors should not trigger retries
|
|
132
|
+
raise
|
|
133
|
+
except (openai.APITimeoutError, openai.APIConnectionError, openai.InternalServerError):
|
|
134
|
+
# Let OpenAI's client handle these retries
|
|
135
|
+
raise
|
|
136
|
+
except Exception as e:
|
|
137
|
+
last_error = e
|
|
138
|
+
|
|
139
|
+
# Don't retry if we've hit the max retries
|
|
140
|
+
if retry_count >= self.MAX_RETRIES:
|
|
141
|
+
logger.error(f'Max retries ({self.MAX_RETRIES}) exceeded. Last error: {e}')
|
|
142
|
+
raise
|
|
143
|
+
|
|
144
|
+
retry_count += 1
|
|
145
|
+
|
|
146
|
+
# Construct a detailed error message for the LLM
|
|
147
|
+
error_context = (
|
|
148
|
+
f'The previous response attempt was invalid. '
|
|
149
|
+
f'Error type: {e.__class__.__name__}. '
|
|
150
|
+
f'Error details: {str(e)}. '
|
|
151
|
+
f'Please try again with a valid response, ensuring the output matches '
|
|
152
|
+
f'the expected format and constraints.'
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
error_message = Message(role='user', content=error_context)
|
|
156
|
+
messages.append(error_message)
|
|
157
|
+
logger.warning(
|
|
158
|
+
f'Retrying after application error (attempt {retry_count}/{self.MAX_RETRIES}): {e}'
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# If we somehow get here, raise the last error
|
|
162
|
+
raise last_error or Exception('Max retries exceeded with no specific error')
|
|
@@ -17,7 +17,7 @@ graphiti_core/llm_client/client.py,sha256=rt0Ic0RiKNbleHCkMYaqI6SUH_K9SkbJtdv7Ty
|
|
|
17
17
|
graphiti_core/llm_client/config.py,sha256=VwtvD0B7TNqE6Cl-rvH5v-bAfmjMLhEUuFmHSPt10EI,2339
|
|
18
18
|
graphiti_core/llm_client/errors.py,sha256=Vk0mj2SgNDg8E8p7m1UyUaerqLPNLCDKPVsMEnOSBdQ,1028
|
|
19
19
|
graphiti_core/llm_client/groq_client.py,sha256=A4TcbBGXyF5Br5Ggm7qnvso76L1ERO4JoCA2HlzDEyI,2421
|
|
20
|
-
graphiti_core/llm_client/openai_client.py,sha256=
|
|
20
|
+
graphiti_core/llm_client/openai_client.py,sha256=Nz8X-4huWC3nKG9azEOSFsRYpq5bLL3S7YRuvY-hYUU,6502
|
|
21
21
|
graphiti_core/llm_client/utils.py,sha256=zKpxXEbKa369m4W7RDEf-m56kH46V1Mx3RowcWZEWWs,1000
|
|
22
22
|
graphiti_core/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
23
23
|
graphiti_core/models/edges/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -52,7 +52,7 @@ graphiti_core/utils/maintenance/graph_data_operations.py,sha256=w66_SLlvPapuG91Y
|
|
|
52
52
|
graphiti_core/utils/maintenance/node_operations.py,sha256=KREaJeMFGE4RfQ0TAOsA7t50rcH8KZQV4GZhVeAyzUM,12031
|
|
53
53
|
graphiti_core/utils/maintenance/temporal_operations.py,sha256=3fVTVRJ75IfsS7j8DkF0yVbtRf0TAaFKgr0KWvkJK9g,3561
|
|
54
54
|
graphiti_core/utils/maintenance/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
55
|
-
graphiti_core-0.5.
|
|
56
|
-
graphiti_core-0.5.
|
|
57
|
-
graphiti_core-0.5.
|
|
58
|
-
graphiti_core-0.5.
|
|
55
|
+
graphiti_core-0.5.0rc3.dist-info/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
|
|
56
|
+
graphiti_core-0.5.0rc3.dist-info/METADATA,sha256=NEz-6xou_CrN9JoF9VHgIdIt65x-FhNooTcLBrGZXHA,10061
|
|
57
|
+
graphiti_core-0.5.0rc3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
58
|
+
graphiti_core-0.5.0rc3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|