osmosis-ai 0.1.7__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of osmosis-ai might be problematic. Click here for more details.

@@ -1,674 +0,0 @@
1
- """
2
- Langchain adapter for Osmosis
3
-
4
- This module provides monkey patching for the LangChain Python library.
5
- """
6
-
7
- import functools
8
- import sys
9
-
10
- from osmosis_ai import utils
11
- from osmosis_ai.utils import send_to_osmosis
12
- from osmosis_ai.logger import logger
13
-
14
-
15
- def wrap_langchain() -> None:
16
- """
17
- Monkey patch LangChain's components to send all prompts and responses to OSMOSIS.
18
-
19
- This function should be called before using any LangChain components.
20
- """
21
- try:
22
- import langchain
23
- except ImportError:
24
- logger.debug("langchain package is not installed.")
25
- return
26
-
27
- # Patch LLM classes
28
- _patch_langchain_llms()
29
-
30
- # Patch Chat model classes
31
- _patch_langchain_chat_models()
32
-
33
- # Patch prompt templates
34
- _patch_langchain_prompts()
35
-
36
- logger.info("LangChain has been wrapped by osmosis-ai.")
37
-
38
-
39
- def _patch_langchain_llms() -> None:
40
- """Patch LangChain LLM classes to send data to OSMOSIS."""
41
- try:
42
- # Try to import LangChain LLM base classes
43
- # First try langchain_core (newer versions)
44
- try:
45
- from langchain_core.language_models.llms import BaseLLM
46
-
47
- logger.info(
48
- f"Successfully imported BaseLLM from langchain_core.language_models.llms"
49
- )
50
- except ImportError:
51
- # Then try other possible locations (older versions)
52
- try:
53
- from langchain.llms.base import BaseLLM
54
-
55
- logger.info(f"Found BaseLLM in langchain.llms.base")
56
- except ImportError:
57
- try:
58
- from langchain.llms import BaseLLM
59
-
60
- logger.info(f"Found BaseLLM in langchain.llms")
61
- except ImportError:
62
- try:
63
- from langchain_core.language_models import BaseLLM
64
-
65
- logger.info(f"Found BaseLLM in langchain_core.language_models")
66
- except ImportError:
67
- logger.warning(
68
- "Could not find BaseLLM class in any expected location."
69
- )
70
- return
71
-
72
- logger.info("Starting to wrap LangChain LLM methods...")
73
-
74
- # Get all available methods to understand which API we're working with
75
- llm_methods = [
76
- method
77
- for method in dir(BaseLLM)
78
- if not method.startswith("_") or method in ["_call", "__call__"]
79
- ]
80
- logger.info(f"Found the following methods on BaseLLM: {llm_methods}")
81
-
82
- # Patch the _call method if it exists
83
- if hasattr(BaseLLM, "_call"):
84
- original_call = BaseLLM._call
85
-
86
- if not hasattr(original_call, "_osmosis_aiped"):
87
-
88
- @functools.wraps(original_call)
89
- def wrapped_call(self, prompt, *args, **kwargs):
90
- # Get the response
91
- response = original_call(self, prompt, *args, **kwargs)
92
-
93
- # Send to OSMOSIS if enabled
94
- if utils.enabled:
95
- # Try to get model name
96
- model_name = "unknown_model"
97
- if hasattr(self, "model_name"):
98
- model_name = self.model_name
99
-
100
- # Create payload
101
- payload = {
102
- "llm_type": self.__class__.__name__,
103
- "model_name": model_name,
104
- "prompt": prompt,
105
- "response": response,
106
- "kwargs": kwargs,
107
- }
108
-
109
- send_to_osmosis(
110
- query={
111
- "type": "langchain_llm",
112
- "prompt": prompt,
113
- "model": model_name,
114
- },
115
- response=payload,
116
- status=200,
117
- )
118
-
119
- return response
120
-
121
- wrapped_call._osmosis_aiped = True
122
- BaseLLM._call = wrapped_call
123
- logger.info("Successfully wrapped BaseLLM._call method")
124
- else:
125
- logger.info("LangChain BaseLLM._call already wrapped.")
126
- else:
127
- logger.info("LangChain BaseLLM does not have a _call method, skipping.")
128
-
129
- # Also patch invoke method if it exists
130
- if hasattr(BaseLLM, "invoke"):
131
- original_invoke = BaseLLM.invoke
132
-
133
- if not hasattr(original_invoke, "_osmosis_aiped"):
134
-
135
- @functools.wraps(original_invoke)
136
- def wrapped_invoke(self, prompt, *args, **kwargs):
137
- # Call original
138
- response = original_invoke(self, prompt, *args, **kwargs)
139
-
140
- # Send to OSMOSIS if enabled
141
- if utils.enabled:
142
- # Try to get model name
143
- model_name = "unknown_model"
144
- if hasattr(self, "model_name"):
145
- model_name = self.model_name
146
-
147
- # Create payload
148
- payload = {
149
- "llm_type": self.__class__.__name__,
150
- "model_name": model_name,
151
- "prompt": prompt,
152
- "response": response,
153
- "kwargs": kwargs,
154
- }
155
-
156
- send_to_osmosis(
157
- query={
158
- "type": "langchain_llm_invoke",
159
- "prompt": prompt,
160
- "model": model_name,
161
- },
162
- response=payload,
163
- status=200,
164
- )
165
-
166
- return response
167
-
168
- wrapped_invoke._osmosis_aiped = True
169
- BaseLLM.invoke = wrapped_invoke
170
- logger.info("Successfully wrapped BaseLLM.invoke method")
171
- else:
172
- logger.info("LangChain BaseLLM.invoke already wrapped.")
173
- else:
174
- logger.info("LangChain BaseLLM does not have an invoke method, skipping.")
175
-
176
- # Patch the generate method if it exists
177
- if hasattr(BaseLLM, "generate"):
178
- original_generate = BaseLLM.generate
179
-
180
- if not hasattr(original_generate, "_osmosis_aiped"):
181
-
182
- @functools.wraps(original_generate)
183
- def wrapped_generate(self, prompts, *args, **kwargs):
184
- # Get the response
185
- response = original_generate(self, prompts, *args, **kwargs)
186
-
187
- # Send to OSMOSIS if enabled
188
- if utils.enabled:
189
- # Try to get model name
190
- model_name = "unknown_model"
191
- if hasattr(self, "model_name"):
192
- model_name = self.model_name
193
-
194
- # Create payload
195
- payload = {
196
- "llm_type": self.__class__.__name__,
197
- "model_name": model_name,
198
- "prompts": prompts,
199
- "response": str(
200
- response
201
- ), # Convert to string since it may not be serializable
202
- "kwargs": kwargs,
203
- }
204
-
205
- send_to_osmosis(
206
- query={
207
- "type": "langchain_llm_generate",
208
- "prompts": prompts,
209
- "model": model_name,
210
- },
211
- response=payload,
212
- status=200,
213
- )
214
-
215
- return response
216
-
217
- wrapped_generate._osmosis_aiped = True
218
- BaseLLM.generate = wrapped_generate
219
- logger.info("Successfully wrapped BaseLLM.generate method")
220
- else:
221
- logger.info("LangChain BaseLLM.generate already wrapped.")
222
- else:
223
- logger.info("LangChain BaseLLM does not have a generate method, skipping.")
224
-
225
- # For modern LangChain, patch __call__ which could be the Model.__call__ method
226
- if hasattr(BaseLLM, "__call__") and callable(getattr(BaseLLM, "__call__")):
227
- # Get the method, not the descriptor
228
- original_call_method = BaseLLM.__call__
229
-
230
- if not hasattr(original_call_method, "_osmosis_aiped"):
231
-
232
- @functools.wraps(original_call_method)
233
- def wrapped_call_method(
234
- self, prompt, stop=None, run_manager=None, **kwargs
235
- ):
236
- try:
237
- # Get the response
238
- response = original_call_method(
239
- self, prompt, stop=stop, run_manager=run_manager, **kwargs
240
- )
241
-
242
- # Send to OSMOSIS if enabled
243
- if utils.enabled:
244
- # Try to get model name
245
- model_name = "unknown_model"
246
- if hasattr(self, "model_name"):
247
- model_name = self.model_name
248
-
249
- # Create payload
250
- payload = {
251
- "llm_type": self.__class__.__name__,
252
- "model_name": model_name,
253
- "prompt": prompt,
254
- "response": response,
255
- "kwargs": {"stop": stop, **kwargs},
256
- }
257
-
258
- send_to_osmosis(
259
- query={
260
- "type": "langchain_llm_call",
261
- "prompt": prompt,
262
- "model": model_name,
263
- },
264
- response=payload,
265
- status=200,
266
- )
267
-
268
- return response
269
- except TypeError as e:
270
- # Handle parameter mismatch gracefully
271
- logger.warning(
272
- f"TypeError in wrapped __call__: {e}, trying without run_manager"
273
- )
274
- # Try calling without run_manager (older versions)
275
- response = original_call_method(
276
- self, prompt, stop=stop, **kwargs
277
- )
278
-
279
- # Send to OSMOSIS if enabled
280
- if utils.enabled:
281
- model_name = getattr(self, "model_name", "unknown_model")
282
- payload = {
283
- "llm_type": self.__class__.__name__,
284
- "model_name": model_name,
285
- "prompt": prompt,
286
- "response": response,
287
- "kwargs": {"stop": stop, **kwargs},
288
- }
289
-
290
- send_to_osmosis(
291
- query={
292
- "type": "langchain_llm_call_fallback",
293
- "prompt": prompt,
294
- "model": model_name,
295
- },
296
- response=payload,
297
- status=200,
298
- )
299
-
300
- return response
301
-
302
- wrapped_call_method._osmosis_aiped = True
303
- BaseLLM.__call__ = wrapped_call_method
304
- logger.info("Successfully wrapped BaseLLM.__call__ method")
305
- else:
306
- logger.info("LangChain BaseLLM.__call__ already wrapped.")
307
- else:
308
- logger.info(
309
- "LangChain BaseLLM does not have a callable __call__ method, skipping."
310
- )
311
-
312
- except Exception as e:
313
- logger.error(f"Failed to patch LangChain LLM classes: {e}")
314
-
315
-
316
- def _patch_langchain_chat_models() -> None:
317
- """Patch LangChain Chat model classes to send data to OSMOSIS."""
318
- try:
319
- # Try to import BaseChatModel from different possible locations
320
- # First try langchain_core (newer versions)
321
- try:
322
- from langchain_core.language_models.chat_models import BaseChatModel
323
-
324
- logger.info(f"Successfully imported BaseChatModel from langchain_core")
325
- except ImportError:
326
- # Then try other possible locations (older versions)
327
- try:
328
- from langchain.chat_models.base import BaseChatModel
329
-
330
- logger.info(f"Found BaseChatModel in langchain.chat_models.base")
331
- except ImportError:
332
- try:
333
- from langchain.chat_models import BaseChatModel
334
-
335
- logger.info(f"Found BaseChatModel in langchain.chat_models")
336
- except ImportError:
337
- logger.warning(
338
- "Could not find BaseChatModel class in any expected location."
339
- )
340
- return
341
-
342
- logger.info("Calling wrap_langchain()...")
343
-
344
- # Patch the generate method
345
- if hasattr(BaseChatModel, "generate"):
346
- original_generate = BaseChatModel.generate
347
-
348
- if not hasattr(original_generate, "_osmosis_aiped"):
349
-
350
- @functools.wraps(original_generate)
351
- def wrapped_generate(self, messages, stop=None, **kwargs):
352
- # Get the response
353
- response = original_generate(self, messages, stop=stop, **kwargs)
354
-
355
- # Send to OSMOSIS if enabled
356
- if utils.enabled:
357
- # Try to get model name
358
- model_name = "unknown_model"
359
- if hasattr(self, "model_name"):
360
- model_name = self.model_name
361
-
362
- # Create payload
363
- payload = {
364
- "chat_model_type": self.__class__.__name__,
365
- "model_name": model_name,
366
- "messages": [
367
- str(msg) for msg in messages
368
- ], # Convert to strings for serialization
369
- "response": str(
370
- response
371
- ), # Convert to string since it may not be serializable
372
- "kwargs": {"stop": stop, **kwargs},
373
- }
374
-
375
- send_to_osmosis(
376
- query={
377
- "type": "langchain_chat_generate",
378
- "messages": [str(msg) for msg in messages],
379
- "model": model_name,
380
- },
381
- response=payload,
382
- status=200,
383
- )
384
-
385
- return response
386
-
387
- wrapped_generate._osmosis_aiped = True
388
- BaseChatModel.generate = wrapped_generate
389
- else:
390
- logger.info("LangChain BaseChatModel.generate already wrapped.")
391
-
392
- # Patch agenerate method if it exists
393
- if hasattr(BaseChatModel, "agenerate"):
394
- original_agenerate = BaseChatModel.agenerate
395
-
396
- if not hasattr(original_agenerate, "_osmosis_aiped"):
397
-
398
- @functools.wraps(original_agenerate)
399
- async def wrapped_agenerate(self, messages, stop=None, **kwargs):
400
- # Get the response
401
- response = await original_agenerate(
402
- self, messages, stop=stop, **kwargs
403
- )
404
-
405
- # Send to OSMOSIS if enabled
406
- if utils.enabled:
407
- # Try to get model name
408
- model_name = "unknown_model"
409
- if hasattr(self, "model_name"):
410
- model_name = self.model_name
411
-
412
- # Create payload
413
- payload = {
414
- "chat_model_type": self.__class__.__name__,
415
- "model_name": model_name,
416
- "messages": [
417
- str(msg) for msg in messages
418
- ], # Convert to strings for serialization
419
- "response": str(
420
- response
421
- ), # Convert to string since it may not be serializable
422
- "kwargs": {"stop": stop, **kwargs},
423
- }
424
-
425
- send_to_osmosis(
426
- query={
427
- "type": "langchain_chat_agenerate",
428
- "messages": [str(msg) for msg in messages],
429
- "model": model_name,
430
- },
431
- response=payload,
432
- status=200,
433
- )
434
-
435
- return response
436
-
437
- wrapped_agenerate._osmosis_aiped = True
438
- BaseChatModel.agenerate = wrapped_agenerate
439
- else:
440
- logger.info("LangChain BaseChatModel.agenerate already wrapped.")
441
-
442
- # Patch the invoke method if it exists
443
- if hasattr(BaseChatModel, "invoke"):
444
- original_invoke = BaseChatModel.invoke
445
-
446
- if not hasattr(original_invoke, "_osmosis_aiped"):
447
-
448
- @functools.wraps(original_invoke)
449
- def wrapped_invoke(self, messages, *args, **kwargs):
450
- # Call original
451
- response = original_invoke(self, messages, *args, **kwargs)
452
-
453
- # Send to OSMOSIS if enabled
454
- if utils.enabled:
455
- # Try to get model name
456
- model_name = "unknown_model"
457
- if hasattr(self, "model_name"):
458
- model_name = self.model_name
459
-
460
- # Create payload
461
- payload = {
462
- "chat_model_type": self.__class__.__name__,
463
- "model_name": model_name,
464
- "messages": [
465
- str(msg) for msg in messages
466
- ], # Convert to strings for serialization
467
- "response": str(
468
- response
469
- ), # Convert to string since it may not be serializable
470
- "kwargs": kwargs,
471
- }
472
-
473
- send_to_osmosis(
474
- query={
475
- "type": "langchain_chat_invoke",
476
- "messages": [str(msg) for msg in messages],
477
- "model": model_name,
478
- },
479
- response=payload,
480
- status=200,
481
- )
482
-
483
- return response
484
-
485
- wrapped_invoke._osmosis_aiped = True
486
- BaseChatModel.invoke = wrapped_invoke
487
- else:
488
- logger.info("LangChain BaseChatModel.invoke already wrapped.")
489
-
490
- # Patch ainvoke method if it exists
491
- if hasattr(BaseChatModel, "ainvoke"):
492
- original_ainvoke = BaseChatModel.ainvoke
493
-
494
- if not hasattr(original_ainvoke, "_osmosis_aiped"):
495
-
496
- @functools.wraps(original_ainvoke)
497
- async def wrapped_ainvoke(self, messages, *args, **kwargs):
498
- # Call original
499
- response = await original_ainvoke(self, messages, *args, **kwargs)
500
-
501
- # Send to OSMOSIS if enabled
502
- if utils.enabled:
503
- # Try to get model name
504
- model_name = "unknown_model"
505
- if hasattr(self, "model_name"):
506
- model_name = self.model_name
507
-
508
- # Create payload
509
- payload = {
510
- "chat_model_type": self.__class__.__name__,
511
- "model_name": model_name,
512
- "messages": [
513
- str(msg) for msg in messages
514
- ], # Convert to strings for serialization
515
- "response": str(
516
- response
517
- ), # Convert to string since it may not be serializable
518
- "kwargs": kwargs,
519
- }
520
-
521
- send_to_osmosis(
522
- query={
523
- "type": "langchain_chat_ainvoke",
524
- "messages": [str(msg) for msg in messages],
525
- "model": model_name,
526
- },
527
- response=payload,
528
- status=200,
529
- )
530
-
531
- return response
532
-
533
- wrapped_ainvoke._osmosis_aiped = True
534
- BaseChatModel.ainvoke = wrapped_ainvoke
535
- else:
536
- logger.info("LangChain BaseChatModel.ainvoke already wrapped.")
537
-
538
- # For modern LangChain, patch __call__ which could be the Model.__call__ method
539
- if hasattr(BaseChatModel, "__call__"):
540
- # Get the method, not the descriptor
541
- original_call_method = BaseChatModel.__call__
542
-
543
- if not hasattr(original_call_method, "_osmosis_aiped"):
544
-
545
- @functools.wraps(original_call_method)
546
- def wrapped_call_method(self, messages, stop=None, **kwargs):
547
- # Get the response
548
- response = original_call_method(self, messages, stop=stop, **kwargs)
549
-
550
- # Send to OSMOSIS if enabled
551
- if utils.enabled:
552
- # Try to get model name
553
- model_name = "unknown_model"
554
- if hasattr(self, "model_name"):
555
- model_name = self.model_name
556
-
557
- # Create payload
558
- payload = {
559
- "chat_model_type": self.__class__.__name__,
560
- "model_name": model_name,
561
- "messages": [
562
- str(msg) for msg in messages
563
- ], # Convert to strings for serialization
564
- "response": str(
565
- response
566
- ), # Convert to string since it may not be serializable
567
- "kwargs": {"stop": stop, **kwargs},
568
- }
569
-
570
- send_to_osmosis(
571
- query={
572
- "type": "langchain_chat_call",
573
- "messages": [str(msg) for msg in messages],
574
- "model": model_name,
575
- },
576
- response=payload,
577
- status=200,
578
- )
579
-
580
- return response
581
-
582
- wrapped_call_method._osmosis_aiped = True
583
- BaseChatModel.__call__ = wrapped_call_method
584
- else:
585
- logger.info("LangChain BaseChatModel.__call__ already wrapped.")
586
-
587
- except Exception as e:
588
- logger.error(f"Failed to patch LangChain Chat model classes: {e}")
589
-
590
-
591
- def _patch_langchain_prompts() -> None:
592
- """Patch LangChain prompt templates to send data to OSMOSIS."""
593
- try:
594
- # Try to import BasePromptTemplate from different possible locations
595
- try:
596
- # Try from langchain_core first (newer versions)
597
- from langchain_core.prompts import BasePromptTemplate
598
-
599
- logger.info(f"Successfully imported from langchain_core.prompts")
600
- import_path = "langchain_core.prompts"
601
- except ImportError:
602
- # Try from langchain for older versions
603
- try:
604
- from langchain.prompts import BasePromptTemplate
605
-
606
- logger.info(f"Found prompt templates via langchain.prompts")
607
- import_path = "langchain.prompts"
608
- except ImportError:
609
- # Last attempt
610
- try:
611
- from langchain.prompts.base import BasePromptTemplate
612
-
613
- logger.info(f"Found prompt templates via langchain.prompts.base")
614
- import_path = "langchain.prompts.base"
615
- except ImportError:
616
- logger.warning(
617
- "Could not import BasePromptTemplate from any expected location."
618
- )
619
- return
620
-
621
- # Patch the format method
622
- original_format = BasePromptTemplate.format
623
- logger.debug(f"Original format method: {original_format}")
624
-
625
- # Only patch if not already patched
626
- if not hasattr(original_format, "_osmosis_aiped"):
627
- logger.info("Calling wrap_langchain()...")
628
-
629
- @functools.wraps(original_format)
630
- def wrapped_format(self, **kwargs):
631
- # Call the original format method
632
- formatted_prompt = original_format(self, **kwargs)
633
-
634
- # Send to OSMOSIS if enabled
635
- if utils.enabled:
636
- # Create payload
637
- payload = {
638
- "prompt_type": self.__class__.__name__,
639
- "template": getattr(self, "template", None),
640
- "input_variables": getattr(self, "input_variables", []),
641
- "template_format": getattr(self, "template_format", None),
642
- "kwargs": kwargs,
643
- "formatted_prompt": formatted_prompt,
644
- }
645
-
646
- send_to_osmosis(
647
- query={
648
- "type": "langchain_prompt",
649
- "template": getattr(self, "template", str(self)),
650
- },
651
- response=payload,
652
- status=200,
653
- )
654
-
655
- return formatted_prompt
656
-
657
- # Mark the method as wrapped to avoid double wrapping
658
- wrapped_format._osmosis_aiped = True
659
- BasePromptTemplate.format = wrapped_format
660
- else:
661
- logger.info("LangChain BasePromptTemplate.format already wrapped.")
662
-
663
- except Exception as e:
664
- logger.error(f"Failed to patch LangChain prompt templates: {e}")
665
- # If format method patching failed but the class exists, try direct patching
666
- try:
667
- if "BasePromptTemplate" in locals():
668
- logger.debug("Format method wasn't patched, patching manually...")
669
- BasePromptTemplate.format = wrapped_format
670
- logger.debug(
671
- f"After manual patch: {BasePromptTemplate.format != original_format}"
672
- )
673
- except Exception as inner_e:
674
- logger.error(f"Manual patching also failed: {inner_e}")