osmosis-ai 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of osmosis-ai might be problematic. Click here for more details.

@@ -1,5 +1,5 @@
1
1
  """
2
- Langchain-OpenAI adapter for Osmosis Wrap
2
+ Langchain-OpenAI adapter for Osmosis
3
3
 
4
4
  This module provides monkey patching for the langchain-openai package.
5
5
  """
@@ -11,10 +11,11 @@ from osmosis_ai import utils
11
11
  from osmosis_ai.utils import send_to_osmosis
12
12
  from osmosis_ai.logger import logger
13
13
 
14
+
14
15
  def wrap_langchain_openai() -> None:
15
16
  """
16
17
  Monkey patch langchain-openai's models to send all prompts and responses to OSMOSIS.
17
-
18
+
18
19
  This function should be called before using any langchain-openai models.
19
20
  """
20
21
  try:
@@ -22,54 +23,73 @@ def wrap_langchain_openai() -> None:
22
23
  except ImportError:
23
24
  logger.debug("langchain-openai package is not installed.")
24
25
  return
25
-
26
+
26
27
  _patch_openai_chat_models()
27
28
  _patch_openai_llm_models()
28
-
29
+
29
30
  logger.info("langchain-openai has been wrapped by osmosis-ai.")
30
31
 
32
+
31
33
  def _patch_openai_chat_models() -> None:
32
34
  """Patch langchain-openai chat model classes to send data to OSMOSIS."""
33
35
  try:
34
36
  # Try to import ChatOpenAI class
35
37
  try:
36
38
  from langchain_openai import ChatOpenAI
39
+
37
40
  logger.info("Successfully imported ChatOpenAI from langchain_openai")
38
41
  except ImportError:
39
42
  # Handle older versions if needed
40
43
  try:
41
44
  from langchain.chat_models.openai import ChatOpenAI
45
+
42
46
  logger.info("Found ChatOpenAI in langchain.chat_models.openai")
43
47
  except ImportError:
44
- logger.warning("Could not find ChatOpenAI class in any expected location.")
48
+ logger.warning(
49
+ "Could not find ChatOpenAI class in any expected location."
50
+ )
45
51
  return
46
-
52
+
47
53
  # Log available methods on ChatOpenAI for debugging
48
- chat_methods = [method for method in dir(ChatOpenAI) if not method.startswith('__') or method in ['_generate', '_agenerate', '_call', '_acall']]
54
+ chat_methods = [
55
+ method
56
+ for method in dir(ChatOpenAI)
57
+ if not method.startswith("__")
58
+ or method in ["_generate", "_agenerate", "_call", "_acall"]
59
+ ]
49
60
  logger.info(f"Found the following methods on ChatOpenAI: {chat_methods}")
50
-
61
+
51
62
  # Try to get the model attribute name - should be model_name but might differ
52
63
  model_attr = None
53
- for attr in ['model_name', 'model']:
64
+ for attr in ["model_name", "model"]:
54
65
  if hasattr(ChatOpenAI, attr):
55
66
  model_attr = attr
56
67
  logger.info(f"ChatOpenAI uses '{attr}' attribute for model name")
57
68
  break
58
-
69
+
59
70
  if not model_attr:
60
- model_attr = 'model_name' # Default to 'model_name' if we can't determine it
61
- logger.info(f"Could not determine model attribute name, defaulting to '{model_attr}'")
62
-
71
+ model_attr = (
72
+ "model_name" # Default to 'model_name' if we can't determine it
73
+ )
74
+ logger.info(
75
+ f"Could not determine model attribute name, defaulting to '{model_attr}'"
76
+ )
77
+
63
78
  # Patch the _generate method if it exists
64
79
  if hasattr(ChatOpenAI, "_generate"):
65
80
  original_generate = ChatOpenAI._generate
66
-
81
+
67
82
  if not hasattr(original_generate, "_osmosis_aiped"):
83
+
68
84
  @functools.wraps(original_generate)
69
- def wrapped_generate(self, messages, stop=None, run_manager=None, **kwargs):
85
+ def wrapped_generate(
86
+ self, messages, stop=None, run_manager=None, **kwargs
87
+ ):
70
88
  # Get the response
71
- response = original_generate(self, messages, stop=stop, run_manager=run_manager, **kwargs)
72
-
89
+ response = original_generate(
90
+ self, messages, stop=stop, run_manager=run_manager, **kwargs
91
+ )
92
+
73
93
  # Send to OSMOSIS if enabled
74
94
  if utils.enabled:
75
95
  # Create payload
@@ -77,19 +97,27 @@ def _patch_openai_chat_models() -> None:
77
97
  payload = {
78
98
  "model_type": "ChatOpenAI",
79
99
  "model_name": model_name,
80
- "messages": [str(msg) for msg in messages], # Convert to strings for serialization
81
- "response": str(response), # Convert to string since it may not be serializable
82
- "kwargs": {"stop": stop, **kwargs}
100
+ "messages": [
101
+ str(msg) for msg in messages
102
+ ], # Convert to strings for serialization
103
+ "response": str(
104
+ response
105
+ ), # Convert to string since it may not be serializable
106
+ "kwargs": {"stop": stop, **kwargs},
83
107
  }
84
-
108
+
85
109
  send_to_osmosis(
86
- query={"type": "langchain_openai_generate", "messages": [str(msg) for msg in messages], "model": model_name},
110
+ query={
111
+ "type": "langchain_openai_generate",
112
+ "messages": [str(msg) for msg in messages],
113
+ "model": model_name,
114
+ },
87
115
  response=payload,
88
- status=200
116
+ status=200,
89
117
  )
90
-
118
+
91
119
  return response
92
-
120
+
93
121
  wrapped_generate._osmosis_aiped = True
94
122
  ChatOpenAI._generate = wrapped_generate
95
123
  logger.info("Successfully wrapped ChatOpenAI._generate method")
@@ -97,17 +125,22 @@ def _patch_openai_chat_models() -> None:
97
125
  logger.info("ChatOpenAI._generate already wrapped.")
98
126
  else:
99
127
  logger.info("ChatOpenAI does not have a _generate method, skipping.")
100
-
128
+
101
129
  # Patch the _agenerate method if it exists
102
130
  if hasattr(ChatOpenAI, "_agenerate"):
103
131
  original_agenerate = ChatOpenAI._agenerate
104
-
132
+
105
133
  if not hasattr(original_agenerate, "_osmosis_aiped"):
134
+
106
135
  @functools.wraps(original_agenerate)
107
- async def wrapped_agenerate(self, messages, stop=None, run_manager=None, **kwargs):
136
+ async def wrapped_agenerate(
137
+ self, messages, stop=None, run_manager=None, **kwargs
138
+ ):
108
139
  # Get the response
109
- response = await original_agenerate(self, messages, stop=stop, run_manager=run_manager, **kwargs)
110
-
140
+ response = await original_agenerate(
141
+ self, messages, stop=stop, run_manager=run_manager, **kwargs
142
+ )
143
+
111
144
  # Send to OSMOSIS if enabled
112
145
  if utils.enabled:
113
146
  # Create payload
@@ -115,19 +148,27 @@ def _patch_openai_chat_models() -> None:
115
148
  payload = {
116
149
  "model_type": "ChatOpenAI",
117
150
  "model_name": model_name,
118
- "messages": [str(msg) for msg in messages], # Convert to strings for serialization
119
- "response": str(response), # Convert to string since it may not be serializable
120
- "kwargs": {"stop": stop, **kwargs}
151
+ "messages": [
152
+ str(msg) for msg in messages
153
+ ], # Convert to strings for serialization
154
+ "response": str(
155
+ response
156
+ ), # Convert to string since it may not be serializable
157
+ "kwargs": {"stop": stop, **kwargs},
121
158
  }
122
-
159
+
123
160
  send_to_osmosis(
124
- query={"type": "langchain_openai_agenerate", "messages": [str(msg) for msg in messages], "model": model_name},
161
+ query={
162
+ "type": "langchain_openai_agenerate",
163
+ "messages": [str(msg) for msg in messages],
164
+ "model": model_name,
165
+ },
125
166
  response=payload,
126
- status=200
167
+ status=200,
127
168
  )
128
-
169
+
129
170
  return response
130
-
171
+
131
172
  wrapped_agenerate._osmosis_aiped = True
132
173
  ChatOpenAI._agenerate = wrapped_agenerate
133
174
  logger.info("Successfully wrapped ChatOpenAI._agenerate method")
@@ -135,18 +176,21 @@ def _patch_openai_chat_models() -> None:
135
176
  logger.info("ChatOpenAI._agenerate already wrapped.")
136
177
  else:
137
178
  logger.info("ChatOpenAI does not have a _agenerate method, skipping.")
138
-
179
+
139
180
  # Patch _call method if it exists (used in newer versions)
140
181
  if hasattr(ChatOpenAI, "_call"):
141
182
  original_call = ChatOpenAI._call
142
-
183
+
143
184
  if not hasattr(original_call, "_osmosis_aiped"):
185
+
144
186
  @functools.wraps(original_call)
145
187
  def wrapped_call(self, messages, stop=None, run_manager=None, **kwargs):
146
188
  try:
147
189
  # Get the response
148
- response = original_call(self, messages, stop=stop, run_manager=run_manager, **kwargs)
149
-
190
+ response = original_call(
191
+ self, messages, stop=stop, run_manager=run_manager, **kwargs
192
+ )
193
+
150
194
  # Send to OSMOSIS if enabled
151
195
  if utils.enabled:
152
196
  # Create payload
@@ -154,24 +198,32 @@ def _patch_openai_chat_models() -> None:
154
198
  payload = {
155
199
  "model_type": "ChatOpenAI",
156
200
  "model_name": model_name,
157
- "messages": [str(msg) for msg in messages], # Convert to strings for serialization
201
+ "messages": [
202
+ str(msg) for msg in messages
203
+ ], # Convert to strings for serialization
158
204
  "response": str(response),
159
- "kwargs": {"stop": stop, **kwargs}
205
+ "kwargs": {"stop": stop, **kwargs},
160
206
  }
161
-
207
+
162
208
  send_to_osmosis(
163
- query={"type": "langchain_openai_call", "messages": [str(msg) for msg in messages], "model": model_name},
209
+ query={
210
+ "type": "langchain_openai_call",
211
+ "messages": [str(msg) for msg in messages],
212
+ "model": model_name,
213
+ },
164
214
  response=payload,
165
- status=200
215
+ status=200,
166
216
  )
167
-
217
+
168
218
  return response
169
219
  except TypeError as e:
170
220
  # Handle parameter mismatch gracefully
171
- logger.warning(f"TypeError in wrapped _call: {e}, trying without run_manager")
221
+ logger.warning(
222
+ f"TypeError in wrapped _call: {e}, trying without run_manager"
223
+ )
172
224
  # Try calling without run_manager (older versions)
173
225
  response = original_call(self, messages, stop=stop, **kwargs)
174
-
226
+
175
227
  # Send to OSMOSIS if enabled
176
228
  if utils.enabled:
177
229
  model_name = getattr(self, model_attr, "unknown_model")
@@ -180,17 +232,21 @@ def _patch_openai_chat_models() -> None:
180
232
  "model_name": model_name,
181
233
  "messages": [str(msg) for msg in messages],
182
234
  "response": str(response),
183
- "kwargs": {"stop": stop, **kwargs}
235
+ "kwargs": {"stop": stop, **kwargs},
184
236
  }
185
-
237
+
186
238
  send_to_osmosis(
187
- query={"type": "langchain_openai_call_fallback", "messages": [str(msg) for msg in messages], "model": model_name},
239
+ query={
240
+ "type": "langchain_openai_call_fallback",
241
+ "messages": [str(msg) for msg in messages],
242
+ "model": model_name,
243
+ },
188
244
  response=payload,
189
- status=200
245
+ status=200,
190
246
  )
191
-
247
+
192
248
  return response
193
-
249
+
194
250
  wrapped_call._osmosis_aiped = True
195
251
  ChatOpenAI._call = wrapped_call
196
252
  logger.info("Successfully wrapped ChatOpenAI._call method")
@@ -198,18 +254,23 @@ def _patch_openai_chat_models() -> None:
198
254
  logger.info("ChatOpenAI._call already wrapped.")
199
255
  else:
200
256
  logger.info("ChatOpenAI does not have a _call method, skipping.")
201
-
257
+
202
258
  # Patch _acall method if it exists
203
259
  if hasattr(ChatOpenAI, "_acall"):
204
260
  original_acall = ChatOpenAI._acall
205
-
261
+
206
262
  if not hasattr(original_acall, "_osmosis_aiped"):
263
+
207
264
  @functools.wraps(original_acall)
208
- async def wrapped_acall(self, messages, stop=None, run_manager=None, **kwargs):
265
+ async def wrapped_acall(
266
+ self, messages, stop=None, run_manager=None, **kwargs
267
+ ):
209
268
  try:
210
269
  # Get the response
211
- response = await original_acall(self, messages, stop=stop, run_manager=run_manager, **kwargs)
212
-
270
+ response = await original_acall(
271
+ self, messages, stop=stop, run_manager=run_manager, **kwargs
272
+ )
273
+
213
274
  # Send to OSMOSIS if enabled
214
275
  if utils.enabled:
215
276
  # Create payload
@@ -217,24 +278,34 @@ def _patch_openai_chat_models() -> None:
217
278
  payload = {
218
279
  "model_type": "ChatOpenAI",
219
280
  "model_name": model_name,
220
- "messages": [str(msg) for msg in messages], # Convert to strings for serialization
281
+ "messages": [
282
+ str(msg) for msg in messages
283
+ ], # Convert to strings for serialization
221
284
  "response": str(response),
222
- "kwargs": {"stop": stop, **kwargs}
285
+ "kwargs": {"stop": stop, **kwargs},
223
286
  }
224
-
287
+
225
288
  send_to_osmosis(
226
- query={"type": "langchain_openai_acall", "messages": [str(msg) for msg in messages], "model": model_name},
289
+ query={
290
+ "type": "langchain_openai_acall",
291
+ "messages": [str(msg) for msg in messages],
292
+ "model": model_name,
293
+ },
227
294
  response=payload,
228
- status=200
295
+ status=200,
229
296
  )
230
-
297
+
231
298
  return response
232
299
  except TypeError as e:
233
300
  # Handle parameter mismatch gracefully
234
- logger.warning(f"TypeError in wrapped _acall: {e}, trying without run_manager")
301
+ logger.warning(
302
+ f"TypeError in wrapped _acall: {e}, trying without run_manager"
303
+ )
235
304
  # Try calling without run_manager (older versions)
236
- response = await original_acall(self, messages, stop=stop, **kwargs)
237
-
305
+ response = await original_acall(
306
+ self, messages, stop=stop, **kwargs
307
+ )
308
+
238
309
  # Send to OSMOSIS if enabled
239
310
  if utils.enabled:
240
311
  model_name = getattr(self, model_attr, "unknown_model")
@@ -243,17 +314,21 @@ def _patch_openai_chat_models() -> None:
243
314
  "model_name": model_name,
244
315
  "messages": [str(msg) for msg in messages],
245
316
  "response": str(response),
246
- "kwargs": {"stop": stop, **kwargs}
317
+ "kwargs": {"stop": stop, **kwargs},
247
318
  }
248
-
319
+
249
320
  send_to_osmosis(
250
- query={"type": "langchain_openai_acall_fallback", "messages": [str(msg) for msg in messages], "model": model_name},
321
+ query={
322
+ "type": "langchain_openai_acall_fallback",
323
+ "messages": [str(msg) for msg in messages],
324
+ "model": model_name,
325
+ },
251
326
  response=payload,
252
- status=200
327
+ status=200,
253
328
  )
254
-
329
+
255
330
  return response
256
-
331
+
257
332
  wrapped_acall._osmosis_aiped = True
258
333
  ChatOpenAI._acall = wrapped_acall
259
334
  logger.info("Successfully wrapped ChatOpenAI._acall method")
@@ -261,36 +336,42 @@ def _patch_openai_chat_models() -> None:
261
336
  logger.info("ChatOpenAI._acall already wrapped.")
262
337
  else:
263
338
  logger.info("ChatOpenAI does not have a _acall method, skipping.")
264
-
339
+
265
340
  except Exception as e:
266
341
  logger.error(f"Failed to patch langchain-openai chat model classes: {e}")
267
342
 
343
+
268
344
  def _patch_openai_llm_models() -> None:
269
345
  """Patch langchain-openai LLM classes to send data to OSMOSIS."""
270
346
  try:
271
- # Try to import OpenAI class
347
+ # Try to import OpenAI class
272
348
  try:
273
349
  from langchain_openai import OpenAI
350
+
274
351
  logger.info("Successfully imported OpenAI from langchain_openai")
275
352
  except ImportError:
276
353
  # Handle older versions if needed
277
354
  try:
278
355
  from langchain.llms.openai import OpenAI
356
+
279
357
  logger.info("Found OpenAI in langchain.llms.openai")
280
358
  except ImportError:
281
359
  logger.warning("Could not find OpenAI class in any expected location.")
282
360
  return
283
-
361
+
284
362
  # Patch the _call method if it exists
285
363
  if hasattr(OpenAI, "_call"):
286
364
  original_call = OpenAI._call
287
-
365
+
288
366
  if not hasattr(original_call, "_osmosis_aiped"):
367
+
289
368
  @functools.wraps(original_call)
290
369
  def wrapped_call(self, prompt, stop=None, run_manager=None, **kwargs):
291
370
  # Get the response
292
- response = original_call(self, prompt, stop=stop, run_manager=run_manager, **kwargs)
293
-
371
+ response = original_call(
372
+ self, prompt, stop=stop, run_manager=run_manager, **kwargs
373
+ )
374
+
294
375
  # Send to OSMOSIS if enabled
295
376
  if utils.enabled:
296
377
  # Create payload
@@ -299,32 +380,41 @@ def _patch_openai_llm_models() -> None:
299
380
  "model_name": self.model_name,
300
381
  "prompt": prompt,
301
382
  "response": response,
302
- "kwargs": {"stop": stop, **kwargs}
383
+ "kwargs": {"stop": stop, **kwargs},
303
384
  }
304
-
385
+
305
386
  send_to_osmosis(
306
- query={"type": "langchain_openai_llm_call", "prompt": prompt, "model": self.model_name},
387
+ query={
388
+ "type": "langchain_openai_llm_call",
389
+ "prompt": prompt,
390
+ "model": self.model_name,
391
+ },
307
392
  response=payload,
308
- status=200
393
+ status=200,
309
394
  )
310
-
395
+
311
396
  return response
312
-
397
+
313
398
  wrapped_call._osmosis_aiped = True
314
399
  OpenAI._call = wrapped_call
315
400
  else:
316
401
  logger.info("OpenAI._call already wrapped.")
317
-
402
+
318
403
  # Patch the _acall method if it exists
319
404
  if hasattr(OpenAI, "_acall"):
320
405
  original_acall = OpenAI._acall
321
-
406
+
322
407
  if not hasattr(original_acall, "_osmosis_aiped"):
408
+
323
409
  @functools.wraps(original_acall)
324
- async def wrapped_acall(self, prompt, stop=None, run_manager=None, **kwargs):
410
+ async def wrapped_acall(
411
+ self, prompt, stop=None, run_manager=None, **kwargs
412
+ ):
325
413
  # Get the response
326
- response = await original_acall(self, prompt, stop=stop, run_manager=run_manager, **kwargs)
327
-
414
+ response = await original_acall(
415
+ self, prompt, stop=stop, run_manager=run_manager, **kwargs
416
+ )
417
+
328
418
  # Send to OSMOSIS if enabled
329
419
  if utils.enabled:
330
420
  # Create payload
@@ -333,37 +423,47 @@ def _patch_openai_llm_models() -> None:
333
423
  "model_name": self.model_name,
334
424
  "prompt": prompt,
335
425
  "response": response,
336
- "kwargs": {"stop": stop, **kwargs}
426
+ "kwargs": {"stop": stop, **kwargs},
337
427
  }
338
-
428
+
339
429
  send_to_osmosis(
340
- query={"type": "langchain_openai_llm_acall", "prompt": prompt, "model": self.model_name},
430
+ query={
431
+ "type": "langchain_openai_llm_acall",
432
+ "prompt": prompt,
433
+ "model": self.model_name,
434
+ },
341
435
  response=payload,
342
- status=200
436
+ status=200,
343
437
  )
344
-
438
+
345
439
  return response
346
-
440
+
347
441
  wrapped_acall._osmosis_aiped = True
348
442
  OpenAI._acall = wrapped_acall
349
443
  else:
350
444
  logger.info("OpenAI._acall already wrapped.")
351
-
445
+
352
446
  # Also try to patch AzureOpenAI if available
353
447
  try:
354
448
  from langchain_openai import AzureOpenAI
449
+
355
450
  logger.info("Found AzureOpenAI class, patching...")
356
-
451
+
357
452
  # Patch the _call method if it exists
358
453
  if hasattr(AzureOpenAI, "_call"):
359
454
  original_call = AzureOpenAI._call
360
-
455
+
361
456
  if not hasattr(original_call, "_osmosis_aiped"):
457
+
362
458
  @functools.wraps(original_call)
363
- def wrapped_call(self, prompt, stop=None, run_manager=None, **kwargs):
459
+ def wrapped_call(
460
+ self, prompt, stop=None, run_manager=None, **kwargs
461
+ ):
364
462
  # Get the response
365
- response = original_call(self, prompt, stop=stop, run_manager=run_manager, **kwargs)
366
-
463
+ response = original_call(
464
+ self, prompt, stop=stop, run_manager=run_manager, **kwargs
465
+ )
466
+
367
467
  # Send to OSMOSIS if enabled
368
468
  if utils.enabled:
369
469
  # Create payload
@@ -372,98 +472,125 @@ def _patch_openai_llm_models() -> None:
372
472
  "model_name": self.deployment_name,
373
473
  "prompt": prompt,
374
474
  "response": response,
375
- "kwargs": {"stop": stop, **kwargs}
475
+ "kwargs": {"stop": stop, **kwargs},
376
476
  }
377
-
477
+
378
478
  send_to_osmosis(
379
- query={"type": "langchain_azure_openai_llm_call", "prompt": prompt, "model": self.deployment_name},
479
+ query={
480
+ "type": "langchain_azure_openai_llm_call",
481
+ "prompt": prompt,
482
+ "model": self.deployment_name,
483
+ },
380
484
  response=payload,
381
- status=200
485
+ status=200,
382
486
  )
383
-
487
+
384
488
  return response
385
-
489
+
386
490
  wrapped_call._osmosis_aiped = True
387
491
  AzureOpenAI._call = wrapped_call
388
492
  else:
389
493
  logger.info("AzureOpenAI._call already wrapped.")
390
-
494
+
391
495
  # Patch the _acall method if it exists
392
496
  if hasattr(AzureOpenAI, "_acall"):
393
497
  original_acall = AzureOpenAI._acall
394
-
498
+
395
499
  if not hasattr(original_acall, "_osmosis_aiped"):
500
+
396
501
  @functools.wraps(original_acall)
397
- async def wrapped_acall(self, prompt, stop=None, run_manager=None, **kwargs):
502
+ async def wrapped_acall(
503
+ self, prompt, stop=None, run_manager=None, **kwargs
504
+ ):
398
505
  # Get the response
399
- response = await original_acall(self, prompt, stop=stop, run_manager=run_manager, **kwargs)
400
-
506
+ response = await original_acall(
507
+ self, prompt, stop=stop, run_manager=run_manager, **kwargs
508
+ )
509
+
401
510
  # Send to OSMOSIS if enabled
402
511
  if utils.enabled:
403
512
  # Create payload
404
513
  payload = {
405
- "model_type": "AzureOpenAI",
514
+ "model_type": "AzureOpenAI",
406
515
  "model_name": self.deployment_name,
407
516
  "prompt": prompt,
408
517
  "response": response,
409
- "kwargs": {"stop": stop, **kwargs}
518
+ "kwargs": {"stop": stop, **kwargs},
410
519
  }
411
-
520
+
412
521
  send_to_osmosis(
413
- query={"type": "langchain_azure_openai_llm_acall", "prompt": prompt, "model": self.deployment_name},
522
+ query={
523
+ "type": "langchain_azure_openai_llm_acall",
524
+ "prompt": prompt,
525
+ "model": self.deployment_name,
526
+ },
414
527
  response=payload,
415
- status=200
528
+ status=200,
416
529
  )
417
-
530
+
418
531
  return response
419
-
532
+
420
533
  wrapped_acall._osmosis_aiped = True
421
534
  AzureOpenAI._acall = wrapped_acall
422
535
  else:
423
536
  logger.info("AzureOpenAI._acall already wrapped.")
424
537
  except ImportError:
425
538
  logger.info("AzureOpenAI not found, skipping.")
426
-
539
+
427
540
  # Also try to patch AzureChatOpenAI if available
428
541
  try:
429
542
  from langchain_openai import AzureChatOpenAI
543
+
430
544
  logger.info("Found AzureChatOpenAI class, patching...")
431
-
545
+
432
546
  # Patch the _generate method if it exists
433
547
  if hasattr(AzureChatOpenAI, "_generate"):
434
548
  original_generate = AzureChatOpenAI._generate
435
-
549
+
436
550
  if not hasattr(original_generate, "_osmosis_aiped"):
551
+
437
552
  @functools.wraps(original_generate)
438
- def wrapped_generate(self, messages, stop=None, run_manager=None, **kwargs):
553
+ def wrapped_generate(
554
+ self, messages, stop=None, run_manager=None, **kwargs
555
+ ):
439
556
  # Get the response
440
- response = original_generate(self, messages, stop=stop, run_manager=run_manager, **kwargs)
441
-
557
+ response = original_generate(
558
+ self, messages, stop=stop, run_manager=run_manager, **kwargs
559
+ )
560
+
442
561
  # Send to OSMOSIS if enabled
443
562
  if utils.enabled:
444
563
  # Create payload
445
564
  payload = {
446
565
  "model_type": "AzureChatOpenAI",
447
566
  "model_name": self.deployment_name,
448
- "messages": [str(msg) for msg in messages], # Convert to strings for serialization
449
- "response": str(response), # Convert to string since it may not be serializable
450
- "kwargs": {"stop": stop, **kwargs}
567
+ "messages": [
568
+ str(msg) for msg in messages
569
+ ], # Convert to strings for serialization
570
+ "response": str(
571
+ response
572
+ ), # Convert to string since it may not be serializable
573
+ "kwargs": {"stop": stop, **kwargs},
451
574
  }
452
-
575
+
453
576
  send_to_osmosis(
454
- query={"type": "langchain_azure_chat_openai", "messages": [str(msg) for msg in messages], "model": self.deployment_name},
577
+ query={
578
+ "type": "langchain_azure_chat_openai",
579
+ "messages": [str(msg) for msg in messages],
580
+ "model": self.deployment_name,
581
+ },
455
582
  response=payload,
456
- status=200
583
+ status=200,
457
584
  )
458
-
585
+
459
586
  return response
460
-
587
+
461
588
  wrapped_generate._osmosis_aiped = True
462
589
  AzureChatOpenAI._generate = wrapped_generate
463
590
  else:
464
591
  logger.info("AzureChatOpenAI._generate already wrapped.")
465
592
  except ImportError:
466
593
  logger.info("AzureChatOpenAI not found, skipping.")
467
-
594
+
468
595
  except Exception as e:
469
- logger.error(f"Failed to patch langchain-openai LLM classes: {e}")
596
+ logger.error(f"Failed to patch langchain-openai LLM classes: {e}")