osmosis-ai 0.1.8__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of osmosis-ai might be problematic. Click here for more details.
- osmosis_ai/__init__.py +9 -131
- osmosis_ai/consts.py +2 -16
- osmosis_ai/utils.py +46 -104
- osmosis_ai-0.2.0.dist-info/METADATA +143 -0
- osmosis_ai-0.2.0.dist-info/RECORD +8 -0
- {osmosis_ai-0.1.8.dist-info → osmosis_ai-0.2.0.dist-info}/licenses/LICENSE +1 -1
- osmosis_ai/adapters/__init__.py +0 -9
- osmosis_ai/adapters/anthropic.py +0 -502
- osmosis_ai/adapters/langchain.py +0 -674
- osmosis_ai/adapters/langchain_anthropic.py +0 -338
- osmosis_ai/adapters/langchain_openai.py +0 -596
- osmosis_ai/adapters/openai.py +0 -900
- osmosis_ai/logger.py +0 -77
- osmosis_ai-0.1.8.dist-info/METADATA +0 -281
- osmosis_ai-0.1.8.dist-info/RECORD +0 -15
- {osmosis_ai-0.1.8.dist-info → osmosis_ai-0.2.0.dist-info}/WHEEL +0 -0
- {osmosis_ai-0.1.8.dist-info → osmosis_ai-0.2.0.dist-info}/top_level.txt +0 -0
|
@@ -1,596 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Langchain-OpenAI adapter for Osmosis
|
|
3
|
-
|
|
4
|
-
This module provides monkey patching for the langchain-openai package.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import functools
|
|
8
|
-
import sys
|
|
9
|
-
|
|
10
|
-
from osmosis_ai import utils
|
|
11
|
-
from osmosis_ai.utils import send_to_osmosis
|
|
12
|
-
from osmosis_ai.logger import logger
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
def wrap_langchain_openai() -> None:
|
|
16
|
-
"""
|
|
17
|
-
Monkey patch langchain-openai's models to send all prompts and responses to OSMOSIS.
|
|
18
|
-
|
|
19
|
-
This function should be called before using any langchain-openai models.
|
|
20
|
-
"""
|
|
21
|
-
try:
|
|
22
|
-
import langchain_openai
|
|
23
|
-
except ImportError:
|
|
24
|
-
logger.debug("langchain-openai package is not installed.")
|
|
25
|
-
return
|
|
26
|
-
|
|
27
|
-
_patch_openai_chat_models()
|
|
28
|
-
_patch_openai_llm_models()
|
|
29
|
-
|
|
30
|
-
logger.info("langchain-openai has been wrapped by osmosis-ai.")
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
def _patch_openai_chat_models() -> None:
|
|
34
|
-
"""Patch langchain-openai chat model classes to send data to OSMOSIS."""
|
|
35
|
-
try:
|
|
36
|
-
# Try to import ChatOpenAI class
|
|
37
|
-
try:
|
|
38
|
-
from langchain_openai import ChatOpenAI
|
|
39
|
-
|
|
40
|
-
logger.info("Successfully imported ChatOpenAI from langchain_openai")
|
|
41
|
-
except ImportError:
|
|
42
|
-
# Handle older versions if needed
|
|
43
|
-
try:
|
|
44
|
-
from langchain.chat_models.openai import ChatOpenAI
|
|
45
|
-
|
|
46
|
-
logger.info("Found ChatOpenAI in langchain.chat_models.openai")
|
|
47
|
-
except ImportError:
|
|
48
|
-
logger.warning(
|
|
49
|
-
"Could not find ChatOpenAI class in any expected location."
|
|
50
|
-
)
|
|
51
|
-
return
|
|
52
|
-
|
|
53
|
-
# Log available methods on ChatOpenAI for debugging
|
|
54
|
-
chat_methods = [
|
|
55
|
-
method
|
|
56
|
-
for method in dir(ChatOpenAI)
|
|
57
|
-
if not method.startswith("__")
|
|
58
|
-
or method in ["_generate", "_agenerate", "_call", "_acall"]
|
|
59
|
-
]
|
|
60
|
-
logger.info(f"Found the following methods on ChatOpenAI: {chat_methods}")
|
|
61
|
-
|
|
62
|
-
# Try to get the model attribute name - should be model_name but might differ
|
|
63
|
-
model_attr = None
|
|
64
|
-
for attr in ["model_name", "model"]:
|
|
65
|
-
if hasattr(ChatOpenAI, attr):
|
|
66
|
-
model_attr = attr
|
|
67
|
-
logger.info(f"ChatOpenAI uses '{attr}' attribute for model name")
|
|
68
|
-
break
|
|
69
|
-
|
|
70
|
-
if not model_attr:
|
|
71
|
-
model_attr = (
|
|
72
|
-
"model_name" # Default to 'model_name' if we can't determine it
|
|
73
|
-
)
|
|
74
|
-
logger.info(
|
|
75
|
-
f"Could not determine model attribute name, defaulting to '{model_attr}'"
|
|
76
|
-
)
|
|
77
|
-
|
|
78
|
-
# Patch the _generate method if it exists
|
|
79
|
-
if hasattr(ChatOpenAI, "_generate"):
|
|
80
|
-
original_generate = ChatOpenAI._generate
|
|
81
|
-
|
|
82
|
-
if not hasattr(original_generate, "_osmosis_aiped"):
|
|
83
|
-
|
|
84
|
-
@functools.wraps(original_generate)
|
|
85
|
-
def wrapped_generate(
|
|
86
|
-
self, messages, stop=None, run_manager=None, **kwargs
|
|
87
|
-
):
|
|
88
|
-
# Get the response
|
|
89
|
-
response = original_generate(
|
|
90
|
-
self, messages, stop=stop, run_manager=run_manager, **kwargs
|
|
91
|
-
)
|
|
92
|
-
|
|
93
|
-
# Send to OSMOSIS if enabled
|
|
94
|
-
if utils.enabled:
|
|
95
|
-
# Create payload
|
|
96
|
-
model_name = getattr(self, model_attr, "unknown_model")
|
|
97
|
-
payload = {
|
|
98
|
-
"model_type": "ChatOpenAI",
|
|
99
|
-
"model_name": model_name,
|
|
100
|
-
"messages": [
|
|
101
|
-
str(msg) for msg in messages
|
|
102
|
-
], # Convert to strings for serialization
|
|
103
|
-
"response": str(
|
|
104
|
-
response
|
|
105
|
-
), # Convert to string since it may not be serializable
|
|
106
|
-
"kwargs": {"stop": stop, **kwargs},
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
send_to_osmosis(
|
|
110
|
-
query={
|
|
111
|
-
"type": "langchain_openai_generate",
|
|
112
|
-
"messages": [str(msg) for msg in messages],
|
|
113
|
-
"model": model_name,
|
|
114
|
-
},
|
|
115
|
-
response=payload,
|
|
116
|
-
status=200,
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
return response
|
|
120
|
-
|
|
121
|
-
wrapped_generate._osmosis_aiped = True
|
|
122
|
-
ChatOpenAI._generate = wrapped_generate
|
|
123
|
-
logger.info("Successfully wrapped ChatOpenAI._generate method")
|
|
124
|
-
else:
|
|
125
|
-
logger.info("ChatOpenAI._generate already wrapped.")
|
|
126
|
-
else:
|
|
127
|
-
logger.info("ChatOpenAI does not have a _generate method, skipping.")
|
|
128
|
-
|
|
129
|
-
# Patch the _agenerate method if it exists
|
|
130
|
-
if hasattr(ChatOpenAI, "_agenerate"):
|
|
131
|
-
original_agenerate = ChatOpenAI._agenerate
|
|
132
|
-
|
|
133
|
-
if not hasattr(original_agenerate, "_osmosis_aiped"):
|
|
134
|
-
|
|
135
|
-
@functools.wraps(original_agenerate)
|
|
136
|
-
async def wrapped_agenerate(
|
|
137
|
-
self, messages, stop=None, run_manager=None, **kwargs
|
|
138
|
-
):
|
|
139
|
-
# Get the response
|
|
140
|
-
response = await original_agenerate(
|
|
141
|
-
self, messages, stop=stop, run_manager=run_manager, **kwargs
|
|
142
|
-
)
|
|
143
|
-
|
|
144
|
-
# Send to OSMOSIS if enabled
|
|
145
|
-
if utils.enabled:
|
|
146
|
-
# Create payload
|
|
147
|
-
model_name = getattr(self, model_attr, "unknown_model")
|
|
148
|
-
payload = {
|
|
149
|
-
"model_type": "ChatOpenAI",
|
|
150
|
-
"model_name": model_name,
|
|
151
|
-
"messages": [
|
|
152
|
-
str(msg) for msg in messages
|
|
153
|
-
], # Convert to strings for serialization
|
|
154
|
-
"response": str(
|
|
155
|
-
response
|
|
156
|
-
), # Convert to string since it may not be serializable
|
|
157
|
-
"kwargs": {"stop": stop, **kwargs},
|
|
158
|
-
}
|
|
159
|
-
|
|
160
|
-
send_to_osmosis(
|
|
161
|
-
query={
|
|
162
|
-
"type": "langchain_openai_agenerate",
|
|
163
|
-
"messages": [str(msg) for msg in messages],
|
|
164
|
-
"model": model_name,
|
|
165
|
-
},
|
|
166
|
-
response=payload,
|
|
167
|
-
status=200,
|
|
168
|
-
)
|
|
169
|
-
|
|
170
|
-
return response
|
|
171
|
-
|
|
172
|
-
wrapped_agenerate._osmosis_aiped = True
|
|
173
|
-
ChatOpenAI._agenerate = wrapped_agenerate
|
|
174
|
-
logger.info("Successfully wrapped ChatOpenAI._agenerate method")
|
|
175
|
-
else:
|
|
176
|
-
logger.info("ChatOpenAI._agenerate already wrapped.")
|
|
177
|
-
else:
|
|
178
|
-
logger.info("ChatOpenAI does not have a _agenerate method, skipping.")
|
|
179
|
-
|
|
180
|
-
# Patch _call method if it exists (used in newer versions)
|
|
181
|
-
if hasattr(ChatOpenAI, "_call"):
|
|
182
|
-
original_call = ChatOpenAI._call
|
|
183
|
-
|
|
184
|
-
if not hasattr(original_call, "_osmosis_aiped"):
|
|
185
|
-
|
|
186
|
-
@functools.wraps(original_call)
|
|
187
|
-
def wrapped_call(self, messages, stop=None, run_manager=None, **kwargs):
|
|
188
|
-
try:
|
|
189
|
-
# Get the response
|
|
190
|
-
response = original_call(
|
|
191
|
-
self, messages, stop=stop, run_manager=run_manager, **kwargs
|
|
192
|
-
)
|
|
193
|
-
|
|
194
|
-
# Send to OSMOSIS if enabled
|
|
195
|
-
if utils.enabled:
|
|
196
|
-
# Create payload
|
|
197
|
-
model_name = getattr(self, model_attr, "unknown_model")
|
|
198
|
-
payload = {
|
|
199
|
-
"model_type": "ChatOpenAI",
|
|
200
|
-
"model_name": model_name,
|
|
201
|
-
"messages": [
|
|
202
|
-
str(msg) for msg in messages
|
|
203
|
-
], # Convert to strings for serialization
|
|
204
|
-
"response": str(response),
|
|
205
|
-
"kwargs": {"stop": stop, **kwargs},
|
|
206
|
-
}
|
|
207
|
-
|
|
208
|
-
send_to_osmosis(
|
|
209
|
-
query={
|
|
210
|
-
"type": "langchain_openai_call",
|
|
211
|
-
"messages": [str(msg) for msg in messages],
|
|
212
|
-
"model": model_name,
|
|
213
|
-
},
|
|
214
|
-
response=payload,
|
|
215
|
-
status=200,
|
|
216
|
-
)
|
|
217
|
-
|
|
218
|
-
return response
|
|
219
|
-
except TypeError as e:
|
|
220
|
-
# Handle parameter mismatch gracefully
|
|
221
|
-
logger.warning(
|
|
222
|
-
f"TypeError in wrapped _call: {e}, trying without run_manager"
|
|
223
|
-
)
|
|
224
|
-
# Try calling without run_manager (older versions)
|
|
225
|
-
response = original_call(self, messages, stop=stop, **kwargs)
|
|
226
|
-
|
|
227
|
-
# Send to OSMOSIS if enabled
|
|
228
|
-
if utils.enabled:
|
|
229
|
-
model_name = getattr(self, model_attr, "unknown_model")
|
|
230
|
-
payload = {
|
|
231
|
-
"model_type": "ChatOpenAI",
|
|
232
|
-
"model_name": model_name,
|
|
233
|
-
"messages": [str(msg) for msg in messages],
|
|
234
|
-
"response": str(response),
|
|
235
|
-
"kwargs": {"stop": stop, **kwargs},
|
|
236
|
-
}
|
|
237
|
-
|
|
238
|
-
send_to_osmosis(
|
|
239
|
-
query={
|
|
240
|
-
"type": "langchain_openai_call_fallback",
|
|
241
|
-
"messages": [str(msg) for msg in messages],
|
|
242
|
-
"model": model_name,
|
|
243
|
-
},
|
|
244
|
-
response=payload,
|
|
245
|
-
status=200,
|
|
246
|
-
)
|
|
247
|
-
|
|
248
|
-
return response
|
|
249
|
-
|
|
250
|
-
wrapped_call._osmosis_aiped = True
|
|
251
|
-
ChatOpenAI._call = wrapped_call
|
|
252
|
-
logger.info("Successfully wrapped ChatOpenAI._call method")
|
|
253
|
-
else:
|
|
254
|
-
logger.info("ChatOpenAI._call already wrapped.")
|
|
255
|
-
else:
|
|
256
|
-
logger.info("ChatOpenAI does not have a _call method, skipping.")
|
|
257
|
-
|
|
258
|
-
# Patch _acall method if it exists
|
|
259
|
-
if hasattr(ChatOpenAI, "_acall"):
|
|
260
|
-
original_acall = ChatOpenAI._acall
|
|
261
|
-
|
|
262
|
-
if not hasattr(original_acall, "_osmosis_aiped"):
|
|
263
|
-
|
|
264
|
-
@functools.wraps(original_acall)
|
|
265
|
-
async def wrapped_acall(
|
|
266
|
-
self, messages, stop=None, run_manager=None, **kwargs
|
|
267
|
-
):
|
|
268
|
-
try:
|
|
269
|
-
# Get the response
|
|
270
|
-
response = await original_acall(
|
|
271
|
-
self, messages, stop=stop, run_manager=run_manager, **kwargs
|
|
272
|
-
)
|
|
273
|
-
|
|
274
|
-
# Send to OSMOSIS if enabled
|
|
275
|
-
if utils.enabled:
|
|
276
|
-
# Create payload
|
|
277
|
-
model_name = getattr(self, model_attr, "unknown_model")
|
|
278
|
-
payload = {
|
|
279
|
-
"model_type": "ChatOpenAI",
|
|
280
|
-
"model_name": model_name,
|
|
281
|
-
"messages": [
|
|
282
|
-
str(msg) for msg in messages
|
|
283
|
-
], # Convert to strings for serialization
|
|
284
|
-
"response": str(response),
|
|
285
|
-
"kwargs": {"stop": stop, **kwargs},
|
|
286
|
-
}
|
|
287
|
-
|
|
288
|
-
send_to_osmosis(
|
|
289
|
-
query={
|
|
290
|
-
"type": "langchain_openai_acall",
|
|
291
|
-
"messages": [str(msg) for msg in messages],
|
|
292
|
-
"model": model_name,
|
|
293
|
-
},
|
|
294
|
-
response=payload,
|
|
295
|
-
status=200,
|
|
296
|
-
)
|
|
297
|
-
|
|
298
|
-
return response
|
|
299
|
-
except TypeError as e:
|
|
300
|
-
# Handle parameter mismatch gracefully
|
|
301
|
-
logger.warning(
|
|
302
|
-
f"TypeError in wrapped _acall: {e}, trying without run_manager"
|
|
303
|
-
)
|
|
304
|
-
# Try calling without run_manager (older versions)
|
|
305
|
-
response = await original_acall(
|
|
306
|
-
self, messages, stop=stop, **kwargs
|
|
307
|
-
)
|
|
308
|
-
|
|
309
|
-
# Send to OSMOSIS if enabled
|
|
310
|
-
if utils.enabled:
|
|
311
|
-
model_name = getattr(self, model_attr, "unknown_model")
|
|
312
|
-
payload = {
|
|
313
|
-
"model_type": "ChatOpenAI",
|
|
314
|
-
"model_name": model_name,
|
|
315
|
-
"messages": [str(msg) for msg in messages],
|
|
316
|
-
"response": str(response),
|
|
317
|
-
"kwargs": {"stop": stop, **kwargs},
|
|
318
|
-
}
|
|
319
|
-
|
|
320
|
-
send_to_osmosis(
|
|
321
|
-
query={
|
|
322
|
-
"type": "langchain_openai_acall_fallback",
|
|
323
|
-
"messages": [str(msg) for msg in messages],
|
|
324
|
-
"model": model_name,
|
|
325
|
-
},
|
|
326
|
-
response=payload,
|
|
327
|
-
status=200,
|
|
328
|
-
)
|
|
329
|
-
|
|
330
|
-
return response
|
|
331
|
-
|
|
332
|
-
wrapped_acall._osmosis_aiped = True
|
|
333
|
-
ChatOpenAI._acall = wrapped_acall
|
|
334
|
-
logger.info("Successfully wrapped ChatOpenAI._acall method")
|
|
335
|
-
else:
|
|
336
|
-
logger.info("ChatOpenAI._acall already wrapped.")
|
|
337
|
-
else:
|
|
338
|
-
logger.info("ChatOpenAI does not have a _acall method, skipping.")
|
|
339
|
-
|
|
340
|
-
except Exception as e:
|
|
341
|
-
logger.error(f"Failed to patch langchain-openai chat model classes: {e}")
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
def _patch_openai_llm_models() -> None:
|
|
345
|
-
"""Patch langchain-openai LLM classes to send data to OSMOSIS."""
|
|
346
|
-
try:
|
|
347
|
-
# Try to import OpenAI class
|
|
348
|
-
try:
|
|
349
|
-
from langchain_openai import OpenAI
|
|
350
|
-
|
|
351
|
-
logger.info("Successfully imported OpenAI from langchain_openai")
|
|
352
|
-
except ImportError:
|
|
353
|
-
# Handle older versions if needed
|
|
354
|
-
try:
|
|
355
|
-
from langchain.llms.openai import OpenAI
|
|
356
|
-
|
|
357
|
-
logger.info("Found OpenAI in langchain.llms.openai")
|
|
358
|
-
except ImportError:
|
|
359
|
-
logger.warning("Could not find OpenAI class in any expected location.")
|
|
360
|
-
return
|
|
361
|
-
|
|
362
|
-
# Patch the _call method if it exists
|
|
363
|
-
if hasattr(OpenAI, "_call"):
|
|
364
|
-
original_call = OpenAI._call
|
|
365
|
-
|
|
366
|
-
if not hasattr(original_call, "_osmosis_aiped"):
|
|
367
|
-
|
|
368
|
-
@functools.wraps(original_call)
|
|
369
|
-
def wrapped_call(self, prompt, stop=None, run_manager=None, **kwargs):
|
|
370
|
-
# Get the response
|
|
371
|
-
response = original_call(
|
|
372
|
-
self, prompt, stop=stop, run_manager=run_manager, **kwargs
|
|
373
|
-
)
|
|
374
|
-
|
|
375
|
-
# Send to OSMOSIS if enabled
|
|
376
|
-
if utils.enabled:
|
|
377
|
-
# Create payload
|
|
378
|
-
payload = {
|
|
379
|
-
"model_type": "OpenAI",
|
|
380
|
-
"model_name": self.model_name,
|
|
381
|
-
"prompt": prompt,
|
|
382
|
-
"response": response,
|
|
383
|
-
"kwargs": {"stop": stop, **kwargs},
|
|
384
|
-
}
|
|
385
|
-
|
|
386
|
-
send_to_osmosis(
|
|
387
|
-
query={
|
|
388
|
-
"type": "langchain_openai_llm_call",
|
|
389
|
-
"prompt": prompt,
|
|
390
|
-
"model": self.model_name,
|
|
391
|
-
},
|
|
392
|
-
response=payload,
|
|
393
|
-
status=200,
|
|
394
|
-
)
|
|
395
|
-
|
|
396
|
-
return response
|
|
397
|
-
|
|
398
|
-
wrapped_call._osmosis_aiped = True
|
|
399
|
-
OpenAI._call = wrapped_call
|
|
400
|
-
else:
|
|
401
|
-
logger.info("OpenAI._call already wrapped.")
|
|
402
|
-
|
|
403
|
-
# Patch the _acall method if it exists
|
|
404
|
-
if hasattr(OpenAI, "_acall"):
|
|
405
|
-
original_acall = OpenAI._acall
|
|
406
|
-
|
|
407
|
-
if not hasattr(original_acall, "_osmosis_aiped"):
|
|
408
|
-
|
|
409
|
-
@functools.wraps(original_acall)
|
|
410
|
-
async def wrapped_acall(
|
|
411
|
-
self, prompt, stop=None, run_manager=None, **kwargs
|
|
412
|
-
):
|
|
413
|
-
# Get the response
|
|
414
|
-
response = await original_acall(
|
|
415
|
-
self, prompt, stop=stop, run_manager=run_manager, **kwargs
|
|
416
|
-
)
|
|
417
|
-
|
|
418
|
-
# Send to OSMOSIS if enabled
|
|
419
|
-
if utils.enabled:
|
|
420
|
-
# Create payload
|
|
421
|
-
payload = {
|
|
422
|
-
"model_type": "OpenAI",
|
|
423
|
-
"model_name": self.model_name,
|
|
424
|
-
"prompt": prompt,
|
|
425
|
-
"response": response,
|
|
426
|
-
"kwargs": {"stop": stop, **kwargs},
|
|
427
|
-
}
|
|
428
|
-
|
|
429
|
-
send_to_osmosis(
|
|
430
|
-
query={
|
|
431
|
-
"type": "langchain_openai_llm_acall",
|
|
432
|
-
"prompt": prompt,
|
|
433
|
-
"model": self.model_name,
|
|
434
|
-
},
|
|
435
|
-
response=payload,
|
|
436
|
-
status=200,
|
|
437
|
-
)
|
|
438
|
-
|
|
439
|
-
return response
|
|
440
|
-
|
|
441
|
-
wrapped_acall._osmosis_aiped = True
|
|
442
|
-
OpenAI._acall = wrapped_acall
|
|
443
|
-
else:
|
|
444
|
-
logger.info("OpenAI._acall already wrapped.")
|
|
445
|
-
|
|
446
|
-
# Also try to patch AzureOpenAI if available
|
|
447
|
-
try:
|
|
448
|
-
from langchain_openai import AzureOpenAI
|
|
449
|
-
|
|
450
|
-
logger.info("Found AzureOpenAI class, patching...")
|
|
451
|
-
|
|
452
|
-
# Patch the _call method if it exists
|
|
453
|
-
if hasattr(AzureOpenAI, "_call"):
|
|
454
|
-
original_call = AzureOpenAI._call
|
|
455
|
-
|
|
456
|
-
if not hasattr(original_call, "_osmosis_aiped"):
|
|
457
|
-
|
|
458
|
-
@functools.wraps(original_call)
|
|
459
|
-
def wrapped_call(
|
|
460
|
-
self, prompt, stop=None, run_manager=None, **kwargs
|
|
461
|
-
):
|
|
462
|
-
# Get the response
|
|
463
|
-
response = original_call(
|
|
464
|
-
self, prompt, stop=stop, run_manager=run_manager, **kwargs
|
|
465
|
-
)
|
|
466
|
-
|
|
467
|
-
# Send to OSMOSIS if enabled
|
|
468
|
-
if utils.enabled:
|
|
469
|
-
# Create payload
|
|
470
|
-
payload = {
|
|
471
|
-
"model_type": "AzureOpenAI",
|
|
472
|
-
"model_name": self.deployment_name,
|
|
473
|
-
"prompt": prompt,
|
|
474
|
-
"response": response,
|
|
475
|
-
"kwargs": {"stop": stop, **kwargs},
|
|
476
|
-
}
|
|
477
|
-
|
|
478
|
-
send_to_osmosis(
|
|
479
|
-
query={
|
|
480
|
-
"type": "langchain_azure_openai_llm_call",
|
|
481
|
-
"prompt": prompt,
|
|
482
|
-
"model": self.deployment_name,
|
|
483
|
-
},
|
|
484
|
-
response=payload,
|
|
485
|
-
status=200,
|
|
486
|
-
)
|
|
487
|
-
|
|
488
|
-
return response
|
|
489
|
-
|
|
490
|
-
wrapped_call._osmosis_aiped = True
|
|
491
|
-
AzureOpenAI._call = wrapped_call
|
|
492
|
-
else:
|
|
493
|
-
logger.info("AzureOpenAI._call already wrapped.")
|
|
494
|
-
|
|
495
|
-
# Patch the _acall method if it exists
|
|
496
|
-
if hasattr(AzureOpenAI, "_acall"):
|
|
497
|
-
original_acall = AzureOpenAI._acall
|
|
498
|
-
|
|
499
|
-
if not hasattr(original_acall, "_osmosis_aiped"):
|
|
500
|
-
|
|
501
|
-
@functools.wraps(original_acall)
|
|
502
|
-
async def wrapped_acall(
|
|
503
|
-
self, prompt, stop=None, run_manager=None, **kwargs
|
|
504
|
-
):
|
|
505
|
-
# Get the response
|
|
506
|
-
response = await original_acall(
|
|
507
|
-
self, prompt, stop=stop, run_manager=run_manager, **kwargs
|
|
508
|
-
)
|
|
509
|
-
|
|
510
|
-
# Send to OSMOSIS if enabled
|
|
511
|
-
if utils.enabled:
|
|
512
|
-
# Create payload
|
|
513
|
-
payload = {
|
|
514
|
-
"model_type": "AzureOpenAI",
|
|
515
|
-
"model_name": self.deployment_name,
|
|
516
|
-
"prompt": prompt,
|
|
517
|
-
"response": response,
|
|
518
|
-
"kwargs": {"stop": stop, **kwargs},
|
|
519
|
-
}
|
|
520
|
-
|
|
521
|
-
send_to_osmosis(
|
|
522
|
-
query={
|
|
523
|
-
"type": "langchain_azure_openai_llm_acall",
|
|
524
|
-
"prompt": prompt,
|
|
525
|
-
"model": self.deployment_name,
|
|
526
|
-
},
|
|
527
|
-
response=payload,
|
|
528
|
-
status=200,
|
|
529
|
-
)
|
|
530
|
-
|
|
531
|
-
return response
|
|
532
|
-
|
|
533
|
-
wrapped_acall._osmosis_aiped = True
|
|
534
|
-
AzureOpenAI._acall = wrapped_acall
|
|
535
|
-
else:
|
|
536
|
-
logger.info("AzureOpenAI._acall already wrapped.")
|
|
537
|
-
except ImportError:
|
|
538
|
-
logger.info("AzureOpenAI not found, skipping.")
|
|
539
|
-
|
|
540
|
-
# Also try to patch AzureChatOpenAI if available
|
|
541
|
-
try:
|
|
542
|
-
from langchain_openai import AzureChatOpenAI
|
|
543
|
-
|
|
544
|
-
logger.info("Found AzureChatOpenAI class, patching...")
|
|
545
|
-
|
|
546
|
-
# Patch the _generate method if it exists
|
|
547
|
-
if hasattr(AzureChatOpenAI, "_generate"):
|
|
548
|
-
original_generate = AzureChatOpenAI._generate
|
|
549
|
-
|
|
550
|
-
if not hasattr(original_generate, "_osmosis_aiped"):
|
|
551
|
-
|
|
552
|
-
@functools.wraps(original_generate)
|
|
553
|
-
def wrapped_generate(
|
|
554
|
-
self, messages, stop=None, run_manager=None, **kwargs
|
|
555
|
-
):
|
|
556
|
-
# Get the response
|
|
557
|
-
response = original_generate(
|
|
558
|
-
self, messages, stop=stop, run_manager=run_manager, **kwargs
|
|
559
|
-
)
|
|
560
|
-
|
|
561
|
-
# Send to OSMOSIS if enabled
|
|
562
|
-
if utils.enabled:
|
|
563
|
-
# Create payload
|
|
564
|
-
payload = {
|
|
565
|
-
"model_type": "AzureChatOpenAI",
|
|
566
|
-
"model_name": self.deployment_name,
|
|
567
|
-
"messages": [
|
|
568
|
-
str(msg) for msg in messages
|
|
569
|
-
], # Convert to strings for serialization
|
|
570
|
-
"response": str(
|
|
571
|
-
response
|
|
572
|
-
), # Convert to string since it may not be serializable
|
|
573
|
-
"kwargs": {"stop": stop, **kwargs},
|
|
574
|
-
}
|
|
575
|
-
|
|
576
|
-
send_to_osmosis(
|
|
577
|
-
query={
|
|
578
|
-
"type": "langchain_azure_chat_openai",
|
|
579
|
-
"messages": [str(msg) for msg in messages],
|
|
580
|
-
"model": self.deployment_name,
|
|
581
|
-
},
|
|
582
|
-
response=payload,
|
|
583
|
-
status=200,
|
|
584
|
-
)
|
|
585
|
-
|
|
586
|
-
return response
|
|
587
|
-
|
|
588
|
-
wrapped_generate._osmosis_aiped = True
|
|
589
|
-
AzureChatOpenAI._generate = wrapped_generate
|
|
590
|
-
else:
|
|
591
|
-
logger.info("AzureChatOpenAI._generate already wrapped.")
|
|
592
|
-
except ImportError:
|
|
593
|
-
logger.info("AzureChatOpenAI not found, skipping.")
|
|
594
|
-
|
|
595
|
-
except Exception as e:
|
|
596
|
-
logger.error(f"Failed to patch langchain-openai LLM classes: {e}")
|