osmosis-ai 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of osmosis-ai might be problematic. Click here for more details.

@@ -1,5 +1,5 @@
1
1
  """
2
- Langchain adapter for Osmosis Wrap
2
+ Langchain adapter for Osmosis
3
3
 
4
4
  This module provides monkey patching for the LangChain Python library.
5
5
  """
@@ -11,10 +11,11 @@ from osmosis_ai import utils
11
11
  from osmosis_ai.utils import send_to_osmosis
12
12
  from osmosis_ai.logger import logger
13
13
 
14
+
14
15
  def wrap_langchain() -> None:
15
16
  """
16
17
  Monkey patch LangChain's components to send all prompts and responses to OSMOSIS.
17
-
18
+
18
19
  This function should be called before using any LangChain components.
19
20
  """
20
21
  try:
@@ -22,18 +23,19 @@ def wrap_langchain() -> None:
22
23
  except ImportError:
23
24
  logger.debug("langchain package is not installed.")
24
25
  return
25
-
26
+
26
27
  # Patch LLM classes
27
28
  _patch_langchain_llms()
28
-
29
+
29
30
  # Patch Chat model classes
30
31
  _patch_langchain_chat_models()
31
-
32
+
32
33
  # Patch prompt templates
33
34
  _patch_langchain_prompts()
34
-
35
+
35
36
  logger.info("LangChain has been wrapped by osmosis-ai.")
36
37
 
38
+
37
39
  def _patch_langchain_llms() -> None:
38
40
  """Patch LangChain LLM classes to send data to OSMOSIS."""
39
41
  try:
@@ -41,64 +43,81 @@ def _patch_langchain_llms() -> None:
41
43
  # First try langchain_core (newer versions)
42
44
  try:
43
45
  from langchain_core.language_models.llms import BaseLLM
44
- logger.info(f"Successfully imported BaseLLM from langchain_core.language_models.llms")
46
+
47
+ logger.info(
48
+ f"Successfully imported BaseLLM from langchain_core.language_models.llms"
49
+ )
45
50
  except ImportError:
46
51
  # Then try other possible locations (older versions)
47
52
  try:
48
53
  from langchain.llms.base import BaseLLM
54
+
49
55
  logger.info(f"Found BaseLLM in langchain.llms.base")
50
56
  except ImportError:
51
57
  try:
52
58
  from langchain.llms import BaseLLM
59
+
53
60
  logger.info(f"Found BaseLLM in langchain.llms")
54
61
  except ImportError:
55
62
  try:
56
63
  from langchain_core.language_models import BaseLLM
64
+
57
65
  logger.info(f"Found BaseLLM in langchain_core.language_models")
58
66
  except ImportError:
59
- logger.warning("Could not find BaseLLM class in any expected location.")
67
+ logger.warning(
68
+ "Could not find BaseLLM class in any expected location."
69
+ )
60
70
  return
61
-
71
+
62
72
  logger.info("Starting to wrap LangChain LLM methods...")
63
-
73
+
64
74
  # Get all available methods to understand which API we're working with
65
- llm_methods = [method for method in dir(BaseLLM) if not method.startswith('_') or method in ['_call', '__call__']]
75
+ llm_methods = [
76
+ method
77
+ for method in dir(BaseLLM)
78
+ if not method.startswith("_") or method in ["_call", "__call__"]
79
+ ]
66
80
  logger.info(f"Found the following methods on BaseLLM: {llm_methods}")
67
-
81
+
68
82
  # Patch the _call method if it exists
69
83
  if hasattr(BaseLLM, "_call"):
70
84
  original_call = BaseLLM._call
71
-
85
+
72
86
  if not hasattr(original_call, "_osmosis_aiped"):
87
+
73
88
  @functools.wraps(original_call)
74
89
  def wrapped_call(self, prompt, *args, **kwargs):
75
90
  # Get the response
76
91
  response = original_call(self, prompt, *args, **kwargs)
77
-
92
+
78
93
  # Send to OSMOSIS if enabled
79
94
  if utils.enabled:
80
95
  # Try to get model name
81
96
  model_name = "unknown_model"
82
97
  if hasattr(self, "model_name"):
83
98
  model_name = self.model_name
84
-
99
+
85
100
  # Create payload
86
101
  payload = {
87
102
  "llm_type": self.__class__.__name__,
88
103
  "model_name": model_name,
89
104
  "prompt": prompt,
90
105
  "response": response,
91
- "kwargs": kwargs
106
+ "kwargs": kwargs,
92
107
  }
93
-
108
+
94
109
  send_to_osmosis(
95
- query={"type": "langchain_llm", "prompt": prompt, "model": model_name},
110
+ query={
111
+ "type": "langchain_llm",
112
+ "prompt": prompt,
113
+ "model": model_name,
114
+ },
96
115
  response=payload,
97
- status=200
116
+ status=200,
98
117
  )
99
-
118
+
100
119
  return response
101
-
120
+
102
121
  wrapped_call._osmosis_aiped = True
103
122
  BaseLLM._call = wrapped_call
104
123
  logger.info("Successfully wrapped BaseLLM._call method")
@@ -106,41 +125,46 @@ def _patch_langchain_llms() -> None:
106
125
  logger.info("LangChain BaseLLM._call already wrapped.")
107
126
  else:
108
127
  logger.info("LangChain BaseLLM does not have a _call method, skipping.")
109
-
128
+
110
129
  # Also patch invoke method if it exists
111
130
  if hasattr(BaseLLM, "invoke"):
112
131
  original_invoke = BaseLLM.invoke
113
-
132
+
114
133
  if not hasattr(original_invoke, "_osmosis_aiped"):
134
+
115
135
  @functools.wraps(original_invoke)
116
136
  def wrapped_invoke(self, prompt, *args, **kwargs):
117
137
  # Call original
118
138
  response = original_invoke(self, prompt, *args, **kwargs)
119
-
139
+
120
140
  # Send to OSMOSIS if enabled
121
141
  if utils.enabled:
122
142
  # Try to get model name
123
143
  model_name = "unknown_model"
124
144
  if hasattr(self, "model_name"):
125
145
  model_name = self.model_name
126
-
146
+
127
147
  # Create payload
128
148
  payload = {
129
- "llm_type": self.__class__.__name__,
149
+ "llm_type": self.__class__.__name__,
130
150
  "model_name": model_name,
131
151
  "prompt": prompt,
132
152
  "response": response,
133
- "kwargs": kwargs
153
+ "kwargs": kwargs,
134
154
  }
135
-
155
+
136
156
  send_to_osmosis(
137
- query={"type": "langchain_llm_invoke", "prompt": prompt, "model": model_name},
157
+ query={
158
+ "type": "langchain_llm_invoke",
159
+ "prompt": prompt,
160
+ "model": model_name,
161
+ },
138
162
  response=payload,
139
- status=200
163
+ status=200,
140
164
  )
141
-
165
+
142
166
  return response
143
-
167
+
144
168
  wrapped_invoke._osmosis_aiped = True
145
169
  BaseLLM.invoke = wrapped_invoke
146
170
  logger.info("Successfully wrapped BaseLLM.invoke method")
@@ -148,41 +172,48 @@ def _patch_langchain_llms() -> None:
148
172
  logger.info("LangChain BaseLLM.invoke already wrapped.")
149
173
  else:
150
174
  logger.info("LangChain BaseLLM does not have an invoke method, skipping.")
151
-
175
+
152
176
  # Patch the generate method if it exists
153
177
  if hasattr(BaseLLM, "generate"):
154
178
  original_generate = BaseLLM.generate
155
-
179
+
156
180
  if not hasattr(original_generate, "_osmosis_aiped"):
181
+
157
182
  @functools.wraps(original_generate)
158
183
  def wrapped_generate(self, prompts, *args, **kwargs):
159
184
  # Get the response
160
185
  response = original_generate(self, prompts, *args, **kwargs)
161
-
186
+
162
187
  # Send to OSMOSIS if enabled
163
188
  if utils.enabled:
164
189
  # Try to get model name
165
190
  model_name = "unknown_model"
166
191
  if hasattr(self, "model_name"):
167
192
  model_name = self.model_name
168
-
193
+
169
194
  # Create payload
170
195
  payload = {
171
196
  "llm_type": self.__class__.__name__,
172
197
  "model_name": model_name,
173
198
  "prompts": prompts,
174
- "response": str(response), # Convert to string since it may not be serializable
175
- "kwargs": kwargs
199
+ "response": str(
200
+ response
201
+ ), # Convert to string since it may not be serializable
202
+ "kwargs": kwargs,
176
203
  }
177
-
204
+
178
205
  send_to_osmosis(
179
- query={"type": "langchain_llm_generate", "prompts": prompts, "model": model_name},
180
- response=payload,
181
- status=200
206
+ query={
207
+ "type": "langchain_llm_generate",
208
+ "prompts": prompts,
209
+ "model": model_name,
210
+ },
211
+ response=payload,
212
+ status=200,
182
213
  )
183
-
214
+
184
215
  return response
185
-
216
+
186
217
  wrapped_generate._osmosis_aiped = True
187
218
  BaseLLM.generate = wrapped_generate
188
219
  logger.info("Successfully wrapped BaseLLM.generate method")
@@ -190,78 +221,98 @@ def _patch_langchain_llms() -> None:
190
221
  logger.info("LangChain BaseLLM.generate already wrapped.")
191
222
  else:
192
223
  logger.info("LangChain BaseLLM does not have a generate method, skipping.")
193
-
224
+
194
225
  # For modern LangChain, patch __call__ which could be the Model.__call__ method
195
226
  if hasattr(BaseLLM, "__call__") and callable(getattr(BaseLLM, "__call__")):
196
227
  # Get the method, not the descriptor
197
228
  original_call_method = BaseLLM.__call__
198
-
229
+
199
230
  if not hasattr(original_call_method, "_osmosis_aiped"):
231
+
200
232
  @functools.wraps(original_call_method)
201
- def wrapped_call_method(self, prompt, stop=None, run_manager=None, **kwargs):
233
+ def wrapped_call_method(
234
+ self, prompt, stop=None, run_manager=None, **kwargs
235
+ ):
202
236
  try:
203
237
  # Get the response
204
- response = original_call_method(self, prompt, stop=stop, run_manager=run_manager, **kwargs)
205
-
238
+ response = original_call_method(
239
+ self, prompt, stop=stop, run_manager=run_manager, **kwargs
240
+ )
241
+
206
242
  # Send to OSMOSIS if enabled
207
243
  if utils.enabled:
208
244
  # Try to get model name
209
245
  model_name = "unknown_model"
210
246
  if hasattr(self, "model_name"):
211
247
  model_name = self.model_name
212
-
248
+
213
249
  # Create payload
214
250
  payload = {
215
251
  "llm_type": self.__class__.__name__,
216
252
  "model_name": model_name,
217
- "prompt": prompt,
253
+ "prompt": prompt,
218
254
  "response": response,
219
- "kwargs": {"stop": stop, **kwargs}
255
+ "kwargs": {"stop": stop, **kwargs},
220
256
  }
221
-
257
+
222
258
  send_to_osmosis(
223
- query={"type": "langchain_llm_call", "prompt": prompt, "model": model_name},
259
+ query={
260
+ "type": "langchain_llm_call",
261
+ "prompt": prompt,
262
+ "model": model_name,
263
+ },
224
264
  response=payload,
225
- status=200
265
+ status=200,
226
266
  )
227
-
267
+
228
268
  return response
229
269
  except TypeError as e:
230
270
  # Handle parameter mismatch gracefully
231
- logger.warning(f"TypeError in wrapped __call__: {e}, trying without run_manager")
271
+ logger.warning(
272
+ f"TypeError in wrapped __call__: {e}, trying without run_manager"
273
+ )
232
274
  # Try calling without run_manager (older versions)
233
- response = original_call_method(self, prompt, stop=stop, **kwargs)
234
-
275
+ response = original_call_method(
276
+ self, prompt, stop=stop, **kwargs
277
+ )
278
+
235
279
  # Send to OSMOSIS if enabled
236
280
  if utils.enabled:
237
281
  model_name = getattr(self, "model_name", "unknown_model")
238
282
  payload = {
239
283
  "llm_type": self.__class__.__name__,
240
284
  "model_name": model_name,
241
- "prompt": prompt,
285
+ "prompt": prompt,
242
286
  "response": response,
243
- "kwargs": {"stop": stop, **kwargs}
287
+ "kwargs": {"stop": stop, **kwargs},
244
288
  }
245
-
289
+
246
290
  send_to_osmosis(
247
- query={"type": "langchain_llm_call_fallback", "prompt": prompt, "model": model_name},
291
+ query={
292
+ "type": "langchain_llm_call_fallback",
293
+ "prompt": prompt,
294
+ "model": model_name,
295
+ },
248
296
  response=payload,
249
- status=200
297
+ status=200,
250
298
  )
251
-
299
+
252
300
  return response
253
-
301
+
254
302
  wrapped_call_method._osmosis_aiped = True
255
303
  BaseLLM.__call__ = wrapped_call_method
256
304
  logger.info("Successfully wrapped BaseLLM.__call__ method")
257
305
  else:
258
306
  logger.info("LangChain BaseLLM.__call__ already wrapped.")
259
307
  else:
260
- logger.info("LangChain BaseLLM does not have a callable __call__ method, skipping.")
261
-
308
+ logger.info(
309
+ "LangChain BaseLLM does not have a callable __call__ method, skipping."
310
+ )
311
+
262
312
  except Exception as e:
263
313
  logger.error(f"Failed to patch LangChain LLM classes: {e}")
264
314
 
315
+
265
316
  def _patch_langchain_chat_models() -> None:
266
317
  """Patch LangChain Chat model classes to send data to OSMOSIS."""
267
318
  try:
@@ -269,221 +320,274 @@ def _patch_langchain_chat_models() -> None:
269
320
  # First try langchain_core (newer versions)
270
321
  try:
271
322
  from langchain_core.language_models.chat_models import BaseChatModel
323
+
272
324
  logger.info(f"Successfully imported BaseChatModel from langchain_core")
273
325
  except ImportError:
274
326
  # Then try other possible locations (older versions)
275
327
  try:
276
328
  from langchain.chat_models.base import BaseChatModel
329
+
277
330
  logger.info(f"Found BaseChatModel in langchain.chat_models.base")
278
331
  except ImportError:
279
332
  try:
280
333
  from langchain.chat_models import BaseChatModel
334
+
281
335
  logger.info(f"Found BaseChatModel in langchain.chat_models")
282
336
  except ImportError:
283
- logger.warning("Could not find BaseChatModel class in any expected location.")
337
+ logger.warning(
338
+ "Could not find BaseChatModel class in any expected location."
339
+ )
284
340
  return
285
-
341
+
286
342
  logger.info("Calling wrap_langchain()...")
287
-
343
+
288
344
  # Patch the generate method
289
345
  if hasattr(BaseChatModel, "generate"):
290
346
  original_generate = BaseChatModel.generate
291
-
347
+
292
348
  if not hasattr(original_generate, "_osmosis_aiped"):
349
+
293
350
  @functools.wraps(original_generate)
294
351
  def wrapped_generate(self, messages, stop=None, **kwargs):
295
352
  # Get the response
296
353
  response = original_generate(self, messages, stop=stop, **kwargs)
297
-
354
+
298
355
  # Send to OSMOSIS if enabled
299
356
  if utils.enabled:
300
357
  # Try to get model name
301
358
  model_name = "unknown_model"
302
359
  if hasattr(self, "model_name"):
303
360
  model_name = self.model_name
304
-
361
+
305
362
  # Create payload
306
363
  payload = {
307
364
  "chat_model_type": self.__class__.__name__,
308
365
  "model_name": model_name,
309
- "messages": [str(msg) for msg in messages], # Convert to strings for serialization
310
- "response": str(response), # Convert to string since it may not be serializable
311
- "kwargs": {"stop": stop, **kwargs}
366
+ "messages": [
367
+ str(msg) for msg in messages
368
+ ], # Convert to strings for serialization
369
+ "response": str(
370
+ response
371
+ ), # Convert to string since it may not be serializable
372
+ "kwargs": {"stop": stop, **kwargs},
312
373
  }
313
-
374
+
314
375
  send_to_osmosis(
315
- query={"type": "langchain_chat_generate", "messages": [str(msg) for msg in messages], "model": model_name},
376
+ query={
377
+ "type": "langchain_chat_generate",
378
+ "messages": [str(msg) for msg in messages],
379
+ "model": model_name,
380
+ },
316
381
  response=payload,
317
- status=200
382
+ status=200,
318
383
  )
319
-
384
+
320
385
  return response
321
-
386
+
322
387
  wrapped_generate._osmosis_aiped = True
323
388
  BaseChatModel.generate = wrapped_generate
324
389
  else:
325
390
  logger.info("LangChain BaseChatModel.generate already wrapped.")
326
-
391
+
327
392
  # Patch agenerate method if it exists
328
393
  if hasattr(BaseChatModel, "agenerate"):
329
394
  original_agenerate = BaseChatModel.agenerate
330
-
395
+
331
396
  if not hasattr(original_agenerate, "_osmosis_aiped"):
397
+
332
398
  @functools.wraps(original_agenerate)
333
399
  async def wrapped_agenerate(self, messages, stop=None, **kwargs):
334
400
  # Get the response
335
- response = await original_agenerate(self, messages, stop=stop, **kwargs)
336
-
401
+ response = await original_agenerate(
402
+ self, messages, stop=stop, **kwargs
403
+ )
404
+
337
405
  # Send to OSMOSIS if enabled
338
406
  if utils.enabled:
339
407
  # Try to get model name
340
408
  model_name = "unknown_model"
341
409
  if hasattr(self, "model_name"):
342
410
  model_name = self.model_name
343
-
411
+
344
412
  # Create payload
345
413
  payload = {
346
414
  "chat_model_type": self.__class__.__name__,
347
415
  "model_name": model_name,
348
- "messages": [str(msg) for msg in messages], # Convert to strings for serialization
349
- "response": str(response), # Convert to string since it may not be serializable
350
- "kwargs": {"stop": stop, **kwargs}
416
+ "messages": [
417
+ str(msg) for msg in messages
418
+ ], # Convert to strings for serialization
419
+ "response": str(
420
+ response
421
+ ), # Convert to string since it may not be serializable
422
+ "kwargs": {"stop": stop, **kwargs},
351
423
  }
352
-
424
+
353
425
  send_to_osmosis(
354
- query={"type": "langchain_chat_agenerate", "messages": [str(msg) for msg in messages], "model": model_name},
426
+ query={
427
+ "type": "langchain_chat_agenerate",
428
+ "messages": [str(msg) for msg in messages],
429
+ "model": model_name,
430
+ },
355
431
  response=payload,
356
- status=200
432
+ status=200,
357
433
  )
358
-
434
+
359
435
  return response
360
-
436
+
361
437
  wrapped_agenerate._osmosis_aiped = True
362
438
  BaseChatModel.agenerate = wrapped_agenerate
363
439
  else:
364
440
  logger.info("LangChain BaseChatModel.agenerate already wrapped.")
365
-
441
+
366
442
  # Patch the invoke method if it exists
367
443
  if hasattr(BaseChatModel, "invoke"):
368
444
  original_invoke = BaseChatModel.invoke
369
-
445
+
370
446
  if not hasattr(original_invoke, "_osmosis_aiped"):
447
+
371
448
  @functools.wraps(original_invoke)
372
449
  def wrapped_invoke(self, messages, *args, **kwargs):
373
450
  # Call original
374
451
  response = original_invoke(self, messages, *args, **kwargs)
375
-
452
+
376
453
  # Send to OSMOSIS if enabled
377
454
  if utils.enabled:
378
455
  # Try to get model name
379
456
  model_name = "unknown_model"
380
457
  if hasattr(self, "model_name"):
381
458
  model_name = self.model_name
382
-
459
+
383
460
  # Create payload
384
461
  payload = {
385
462
  "chat_model_type": self.__class__.__name__,
386
463
  "model_name": model_name,
387
- "messages": [str(msg) for msg in messages], # Convert to strings for serialization
388
- "response": str(response), # Convert to string since it may not be serializable
389
- "kwargs": kwargs
464
+ "messages": [
465
+ str(msg) for msg in messages
466
+ ], # Convert to strings for serialization
467
+ "response": str(
468
+ response
469
+ ), # Convert to string since it may not be serializable
470
+ "kwargs": kwargs,
390
471
  }
391
-
472
+
392
473
  send_to_osmosis(
393
- query={"type": "langchain_chat_invoke", "messages": [str(msg) for msg in messages], "model": model_name},
474
+ query={
475
+ "type": "langchain_chat_invoke",
476
+ "messages": [str(msg) for msg in messages],
477
+ "model": model_name,
478
+ },
394
479
  response=payload,
395
- status=200
480
+ status=200,
396
481
  )
397
-
482
+
398
483
  return response
399
-
484
+
400
485
  wrapped_invoke._osmosis_aiped = True
401
486
  BaseChatModel.invoke = wrapped_invoke
402
487
  else:
403
488
  logger.info("LangChain BaseChatModel.invoke already wrapped.")
404
-
489
+
405
490
  # Patch ainvoke method if it exists
406
491
  if hasattr(BaseChatModel, "ainvoke"):
407
492
  original_ainvoke = BaseChatModel.ainvoke
408
-
493
+
409
494
  if not hasattr(original_ainvoke, "_osmosis_aiped"):
495
+
410
496
  @functools.wraps(original_ainvoke)
411
497
  async def wrapped_ainvoke(self, messages, *args, **kwargs):
412
498
  # Call original
413
499
  response = await original_ainvoke(self, messages, *args, **kwargs)
414
-
500
+
415
501
  # Send to OSMOSIS if enabled
416
502
  if utils.enabled:
417
503
  # Try to get model name
418
504
  model_name = "unknown_model"
419
505
  if hasattr(self, "model_name"):
420
506
  model_name = self.model_name
421
-
507
+
422
508
  # Create payload
423
509
  payload = {
424
510
  "chat_model_type": self.__class__.__name__,
425
511
  "model_name": model_name,
426
- "messages": [str(msg) for msg in messages], # Convert to strings for serialization
427
- "response": str(response), # Convert to string since it may not be serializable
428
- "kwargs": kwargs
512
+ "messages": [
513
+ str(msg) for msg in messages
514
+ ], # Convert to strings for serialization
515
+ "response": str(
516
+ response
517
+ ), # Convert to string since it may not be serializable
518
+ "kwargs": kwargs,
429
519
  }
430
-
520
+
431
521
  send_to_osmosis(
432
- query={"type": "langchain_chat_ainvoke", "messages": [str(msg) for msg in messages], "model": model_name},
522
+ query={
523
+ "type": "langchain_chat_ainvoke",
524
+ "messages": [str(msg) for msg in messages],
525
+ "model": model_name,
526
+ },
433
527
  response=payload,
434
- status=200
528
+ status=200,
435
529
  )
436
-
530
+
437
531
  return response
438
-
532
+
439
533
  wrapped_ainvoke._osmosis_aiped = True
440
534
  BaseChatModel.ainvoke = wrapped_ainvoke
441
535
  else:
442
536
  logger.info("LangChain BaseChatModel.ainvoke already wrapped.")
443
-
537
+
444
538
  # For modern LangChain, patch __call__ which could be the Model.__call__ method
445
539
  if hasattr(BaseChatModel, "__call__"):
446
540
  # Get the method, not the descriptor
447
541
  original_call_method = BaseChatModel.__call__
448
-
542
+
449
543
  if not hasattr(original_call_method, "_osmosis_aiped"):
544
+
450
545
  @functools.wraps(original_call_method)
451
546
  def wrapped_call_method(self, messages, stop=None, **kwargs):
452
547
  # Get the response
453
548
  response = original_call_method(self, messages, stop=stop, **kwargs)
454
-
549
+
455
550
  # Send to OSMOSIS if enabled
456
551
  if utils.enabled:
457
552
  # Try to get model name
458
553
  model_name = "unknown_model"
459
554
  if hasattr(self, "model_name"):
460
555
  model_name = self.model_name
461
-
556
+
462
557
  # Create payload
463
558
  payload = {
464
559
  "chat_model_type": self.__class__.__name__,
465
560
  "model_name": model_name,
466
- "messages": [str(msg) for msg in messages], # Convert to strings for serialization
467
- "response": str(response), # Convert to string since it may not be serializable
468
- "kwargs": {"stop": stop, **kwargs}
561
+ "messages": [
562
+ str(msg) for msg in messages
563
+ ], # Convert to strings for serialization
564
+ "response": str(
565
+ response
566
+ ), # Convert to string since it may not be serializable
567
+ "kwargs": {"stop": stop, **kwargs},
469
568
  }
470
-
569
+
471
570
  send_to_osmosis(
472
- query={"type": "langchain_chat_call", "messages": [str(msg) for msg in messages], "model": model_name},
571
+ query={
572
+ "type": "langchain_chat_call",
573
+ "messages": [str(msg) for msg in messages],
574
+ "model": model_name,
575
+ },
473
576
  response=payload,
474
- status=200
577
+ status=200,
475
578
  )
476
-
579
+
477
580
  return response
478
-
581
+
479
582
  wrapped_call_method._osmosis_aiped = True
480
583
  BaseChatModel.__call__ = wrapped_call_method
481
584
  else:
482
585
  logger.info("LangChain BaseChatModel.__call__ already wrapped.")
483
-
586
+
484
587
  except Exception as e:
485
588
  logger.error(f"Failed to patch LangChain Chat model classes: {e}")
486
589
 
590
+
487
591
  def _patch_langchain_prompts() -> None:
488
592
  """Patch LangChain prompt templates to send data to OSMOSIS."""
489
593
  try:
@@ -491,37 +595,42 @@ def _patch_langchain_prompts() -> None:
491
595
  try:
492
596
  # Try from langchain_core first (newer versions)
493
597
  from langchain_core.prompts import BasePromptTemplate
598
+
494
599
  logger.info(f"Successfully imported from langchain_core.prompts")
495
600
  import_path = "langchain_core.prompts"
496
601
  except ImportError:
497
602
  # Try from langchain for older versions
498
603
  try:
499
604
  from langchain.prompts import BasePromptTemplate
605
+
500
606
  logger.info(f"Found prompt templates via langchain.prompts")
501
607
  import_path = "langchain.prompts"
502
608
  except ImportError:
503
609
  # Last attempt
504
610
  try:
505
611
  from langchain.prompts.base import BasePromptTemplate
612
+
506
613
  logger.info(f"Found prompt templates via langchain.prompts.base")
507
614
  import_path = "langchain.prompts.base"
508
615
  except ImportError:
509
- logger.warning("Could not import BasePromptTemplate from any expected location.")
616
+ logger.warning(
617
+ "Could not import BasePromptTemplate from any expected location."
618
+ )
510
619
  return
511
-
620
+
512
621
  # Patch the format method
513
622
  original_format = BasePromptTemplate.format
514
623
  logger.debug(f"Original format method: {original_format}")
515
-
624
+
516
625
  # Only patch if not already patched
517
626
  if not hasattr(original_format, "_osmosis_aiped"):
518
627
  logger.info("Calling wrap_langchain()...")
519
-
628
+
520
629
  @functools.wraps(original_format)
521
630
  def wrapped_format(self, **kwargs):
522
631
  # Call the original format method
523
632
  formatted_prompt = original_format(self, **kwargs)
524
-
633
+
525
634
  # Send to OSMOSIS if enabled
526
635
  if utils.enabled:
527
636
  # Create payload
@@ -531,30 +640,35 @@ def _patch_langchain_prompts() -> None:
531
640
  "input_variables": getattr(self, "input_variables", []),
532
641
  "template_format": getattr(self, "template_format", None),
533
642
  "kwargs": kwargs,
534
- "formatted_prompt": formatted_prompt
643
+ "formatted_prompt": formatted_prompt,
535
644
  }
536
-
645
+
537
646
  send_to_osmosis(
538
- query={"type": "langchain_prompt", "template": getattr(self, "template", str(self))},
647
+ query={
648
+ "type": "langchain_prompt",
649
+ "template": getattr(self, "template", str(self)),
650
+ },
539
651
  response=payload,
540
- status=200
652
+ status=200,
541
653
  )
542
-
654
+
543
655
  return formatted_prompt
544
-
656
+
545
657
  # Mark the method as wrapped to avoid double wrapping
546
658
  wrapped_format._osmosis_aiped = True
547
659
  BasePromptTemplate.format = wrapped_format
548
660
  else:
549
661
  logger.info("LangChain BasePromptTemplate.format already wrapped.")
550
-
662
+
551
663
  except Exception as e:
552
664
  logger.error(f"Failed to patch LangChain prompt templates: {e}")
553
665
  # If format method patching failed but the class exists, try direct patching
554
666
  try:
555
- if 'BasePromptTemplate' in locals():
667
+ if "BasePromptTemplate" in locals():
556
668
  logger.debug("Format method wasn't patched, patching manually...")
557
669
  BasePromptTemplate.format = wrapped_format
558
- logger.debug(f"After manual patch: {BasePromptTemplate.format != original_format}")
670
+ logger.debug(
671
+ f"After manual patch: {BasePromptTemplate.format != original_format}"
672
+ )
559
673
  except Exception as inner_e:
560
- logger.error(f"Manual patching also failed: {inner_e}")
674
+ logger.error(f"Manual patching also failed: {inner_e}")