agenta 0.14.0__py3-none-any.whl → 0.14.1a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agenta might be problematic. Click here for more details.

agenta/__init__.py CHANGED
@@ -16,6 +16,7 @@ from .sdk.types import (
16
16
  )
17
17
  from .sdk.tracing.decorators import span
18
18
  from .sdk.agenta_init import Config, init, llm_tracing
19
+ from .sdk.tracing.callbacks import agenta_litellm_handler
19
20
  from .sdk.utils.helper.openai_cost import calculate_token_usage
20
21
  from .sdk.client import Agenta
21
22
 
agenta/sdk/__init__.py CHANGED
@@ -17,6 +17,7 @@ from .types import (
17
17
  )
18
18
  from .tracing.decorators import span
19
19
  from .agenta_init import Config, init, llm_tracing
20
+ from .tracing.callbacks import agenta_litellm_handler
20
21
  from .utils.helper.openai_cost import calculate_token_usage
21
22
 
22
23
 
@@ -17,6 +17,7 @@ from fastapi import Body, FastAPI, UploadFile, HTTPException
17
17
 
18
18
  import agenta
19
19
  from .context import save_context
20
+ from .tracing.llm_tracing import Tracing
20
21
  from .router import router as router
21
22
  from .types import (
22
23
  Context,
@@ -155,10 +156,7 @@ def entrypoint(func: Callable[..., Any]) -> Callable[..., Any]:
155
156
 
156
157
  if is_main_script(func):
157
158
  handle_terminal_run(
158
- func,
159
- func_signature.parameters,
160
- config_params,
161
- ingestible_files,
159
+ func, func_signature.parameters, config_params, ingestible_files, tracing
162
160
  )
163
161
  return None
164
162
 
@@ -350,6 +348,7 @@ def handle_terminal_run(
350
348
  func_params: Dict[str, Any],
351
349
  config_params: Dict[str, Any],
352
350
  ingestible_files: Dict,
351
+ tracing: Tracing,
353
352
  ) -> None:
354
353
  """
355
354
  Parses command line arguments and sets configuration when script is run from the terminal.
@@ -357,9 +356,8 @@ def handle_terminal_run(
357
356
  Args:
358
357
  func_params (dict): A dictionary containing the function parameters and their annotations.
359
358
  config_params (dict): A dictionary containing the configuration parameters.
360
-
361
- Example:
362
- handle_terminal_run(func_params=inspect.signature(my_function).parameters, config_params=config.all())
359
+ ingestible_files (dict): A dictionary containing the files that should be ingested.
360
+ tracing (Tracing): The tracing object
363
361
  """
364
362
 
365
363
  # For required parameters, we add them as arguments
@@ -398,13 +396,29 @@ def handle_terminal_run(
398
396
  )
399
397
  agenta.config.set(**args_config_params)
400
398
 
399
+ # Start tracing
400
+ tracing.start_parent_span(
401
+ name=func.__name__,
402
+ inputs=args_func_params,
403
+ config=args_config_params,
404
+ environment="shell", # type: ignore
405
+ )
406
+
401
407
  loop = asyncio.get_event_loop()
402
408
  result = loop.run_until_complete(
403
409
  execute_function(
404
410
  func, **{"params": args_func_params, "config_params": args_config_params}
405
411
  )
406
412
  )
407
- print(result)
413
+
414
+ # End trace recording
415
+ tracing.end_recording(
416
+ outputs=result.dict(),
417
+ span=tracing.active_trace, # type: ignore
418
+ )
419
+ print(
420
+ f"\n========== Result ==========\n\nMessage: {result.message}\nCost: {result.cost}\nToken Usage: {result.usage}"
421
+ )
408
422
 
409
423
 
410
424
  def override_schema(openapi_schema: dict, func_name: str, endpoint: str, params: dict):
@@ -0,0 +1,125 @@
1
+ # Own Imports
2
+ from agenta.sdk import llm_tracing
3
+
4
+ # Third Party Imports
5
+ from litellm.utils import ModelResponse
6
+ from litellm.integrations.custom_logger import CustomLogger as LitellmCustomLogger
7
+
8
+
9
+ class AgentaLiteLLMHandler(LitellmCustomLogger):
10
+ """This handler is responsible for logging certain events when using litellm to call LLMs.
11
+
12
+ Args:
13
+ LitellmCustomLogger (object): custom logger that allows us to override the events to capture.
14
+ """
15
+
16
+ @property
17
+ def _trace(self):
18
+ return llm_tracing()
19
+
20
+ def log_pre_api_call(self, model, messages, kwargs):
21
+ self._trace.start_span(
22
+ name="pre_api_call",
23
+ input=(
24
+ {"messages": messages}
25
+ if isinstance(messages, list)
26
+ else {"inputs": messages}
27
+ ),
28
+ spankind=(
29
+ "llm"
30
+ if kwargs["call_type"] in ["completion", "acompletion"]
31
+ else "unset"
32
+ ),
33
+ )
34
+ self._trace.set_span_attribute(
35
+ "model_config",
36
+ {
37
+ "model": kwargs.get("model"),
38
+ "temperature": kwargs["optional_params"]["temperature"],
39
+ },
40
+ )
41
+
42
+ def log_stream_event(self, kwargs, response_obj, start_time, end_time):
43
+ self._trace.update_span_status(span=self._trace.active_span, value="OK")
44
+ self._trace.end_span(
45
+ outputs={
46
+ "message": kwargs["complete_streaming_response"],
47
+ "usage": kwargs["usage"],
48
+ "cost": kwargs.get("response_cost"),
49
+ },
50
+ span=self._trace.active_span,
51
+ )
52
+
53
+ def log_success_event(
54
+ self, kwargs, response_obj: ModelResponse, start_time, end_time
55
+ ):
56
+ self._trace.update_span_status(span=self._trace.active_span, value="OK")
57
+ self._trace.end_span(
58
+ outputs={
59
+ "message": kwargs["message"],
60
+ "usage": kwargs["usage"],
61
+ "cost": kwargs.get("response_cost"),
62
+ },
63
+ span=self._trace.active_span,
64
+ )
65
+
66
+ def log_failure_event(
67
+ self, kwargs, response_obj: ModelResponse, start_time, end_time
68
+ ):
69
+ self._trace.update_span_status(span=self._trace.active_span, value="ERROR")
70
+ self._trace.set_span_attribute(
71
+ attributes={
72
+ "traceback_exception": kwargs["traceback_exception"],
73
+ "call_end_time": kwargs["end_time"],
74
+ },
75
+ )
76
+ self._trace.end_span(
77
+ outputs={
78
+ "message": kwargs["exception"],
79
+ "usage": kwargs["usage"],
80
+ "cost": kwargs.get("response_cost"),
81
+ },
82
+ span=self._trace.active_span,
83
+ )
84
+
85
+ async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time):
86
+ self._trace.update_span_status(span=self._trace.active_span, value="OK")
87
+ self._trace.end_span(
88
+ outputs={
89
+ "message": kwargs["complete_streaming_response"],
90
+ "usage": kwargs["usage"],
91
+ "cost": kwargs.get("response_cost"),
92
+ },
93
+ span=self._trace.active_span,
94
+ )
95
+
96
+ async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
97
+ self._trace.update_span_status(span=self._trace.active_span, value="OK")
98
+ self._trace.end_span(
99
+ outputs={
100
+ "message": kwargs["message"],
101
+ "usage": kwargs["usage"],
102
+ "cost": kwargs.get("response_cost"),
103
+ },
104
+ span=self._trace.active_span,
105
+ )
106
+
107
+ async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
108
+ self._trace.update_span_status(span=self._trace.active_span, value="ERROR")
109
+ self._trace.set_span_attribute(
110
+ attributes={
111
+ "traceback_exception": kwargs["traceback_exception"],
112
+ "call_end_time": kwargs["end_time"],
113
+ },
114
+ )
115
+ self._trace.end_span(
116
+ outputs={
117
+ "message": kwargs["exception"],
118
+ "usage": kwargs["usage"],
119
+ "cost": kwargs.get("response_cost"),
120
+ },
121
+ span=self._trace.active_span,
122
+ )
123
+
124
+
125
+ agenta_litellm_handler = AgentaLiteLLMHandler()
@@ -1,4 +1,5 @@
1
1
  # Stdlib Imports
2
+ from threading import Lock
2
3
  from datetime import datetime, timezone
3
4
  from typing import Optional, Dict, Any, List, Union
4
5
 
@@ -13,10 +14,43 @@ from agenta.client.backend.types.create_span import CreateSpan, SpanKind, SpanSt
13
14
  from bson.objectid import ObjectId
14
15
 
15
16
 
16
- class Tracing(object):
17
- """Agenta llm tracing object.
17
+ class SingletonMeta(type):
18
+ """
19
+ Thread-safe implementation of Singleton.
20
+ """
21
+
22
+ _instances = {}
23
+
24
+ # We need the lock mechanism to synchronize threads \
25
+ # during the initial access to the Singleton object.
26
+ _lock: Lock = Lock()
18
27
 
19
- Args:
28
+ def __call__(cls, *args, **kwargs):
29
+ """
30
+ Possible changes to the value of the `__init__` argument do not affect
31
+ the returned instance.
32
+ """
33
+ # Now, imagine that the program has just been launched. Since there's no
34
+ # Singleton instance yet, multiple threads can simultaneously pass the
35
+ # previous conditional and reach this point almost at the same time. The
36
+ # first of them will acquire lock and will proceed further, while the
37
+ # rest will wait here.
38
+ with cls._lock:
39
+ # The first thread to acquire the lock, reaches this conditional,
40
+ # goes inside and creates the Singleton instance. Once it leaves the
41
+ # lock block, a thread that might have been waiting for the lock
42
+ # release may then enter this section. But since the Singleton field
43
+ # is already initialized, the thread won't create a new object.
44
+ if cls not in cls._instances:
45
+ instance = super().__call__(*args, **kwargs)
46
+ cls._instances[cls] = instance
47
+ return cls._instances[cls]
48
+
49
+
50
+ class Tracing(metaclass=SingletonMeta):
51
+ """The `Tracing` class is an agent for LLM tracing with specific initialization arguments.
52
+
53
+ __init__ args:
20
54
  base_url (str): The URL of the backend host
21
55
  api_key (str): The API Key of the backend host
22
56
  tasks_manager (TaskQueue): The tasks manager dedicated to handling asynchronous tasks
@@ -24,13 +58,6 @@ class Tracing(object):
24
58
  max_workers (int): The maximum number of workers to run tracing
25
59
  """
26
60
 
27
- _instance = None
28
-
29
- def __new__(cls, *args, **kwargs):
30
- if not cls._instance:
31
- cls._instance = super().__new__(cls)
32
- return cls._instance
33
-
34
61
  def __init__(
35
62
  self,
36
63
  base_url: str,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: agenta
3
- Version: 0.14.0
3
+ Version: 0.14.1a0
4
4
  Summary: The SDK for agenta is an open-source LLMOps platform.
5
5
  Home-page: https://agenta.ai
6
6
  Keywords: LLMOps,LLM,evaluation,prompt engineering
@@ -22,6 +22,7 @@ Requires-Dist: fastapi (>=0.96.1)
22
22
  Requires-Dist: httpx (>=0.27.0,<0.28.0)
23
23
  Requires-Dist: importlib-metadata (>=6.7.0,<7.0.0)
24
24
  Requires-Dist: ipdb (>=0.13)
25
+ Requires-Dist: litellm (>=1.35.33,<2.0.0)
25
26
  Requires-Dist: posthog (>=3.1.0,<4.0.0)
26
27
  Requires-Dist: pydantic (==1.10.13)
27
28
  Requires-Dist: pymongo (>=4.6.3,<5.0.0)
@@ -1,4 +1,4 @@
1
- agenta/__init__.py,sha256=rCZ-mUOGnyKQpfWQFzXIEFX7KBlGC3qFMighdIVJgDc,610
1
+ agenta/__init__.py,sha256=ZJZlhn56j2rjl-QKTuNU6HHlJLVlmd_NcU67VIBvGGI,668
2
2
  agenta/cli/evaluation_commands.py,sha256=fs6492tprPId9p8eGO02Xy-NCBm2RZNJLZWcUxugwd8,474
3
3
  agenta/cli/helper.py,sha256=vRxHyeNaltzNIGrfU2vO0H28_rXDzx9QqIZ_S-W6zL4,6212
4
4
  agenta/cli/main.py,sha256=GgYu6UsrnHbqPV7zPlO14b61IyaDiTIjGMYQS9DlqC4,9551
@@ -126,15 +126,16 @@ agenta/docker/docker-assets/entrypoint.sh,sha256=29XK8VQjQsx4hN2j-4JDy-6kQb5y4LC
126
126
  agenta/docker/docker-assets/lambda_function.py,sha256=h4UZSSfqwpfsCgERv6frqwm_4JrYu9rLz3I-LxCfeEg,83
127
127
  agenta/docker/docker-assets/main.py,sha256=7MI-21n81U7N7A0GxebNi0cmGWtJKcR2sPB6FcH2QfA,251
128
128
  agenta/docker/docker_utils.py,sha256=5uHMCzXkCvIsDdEiwbnnn97KkzsFbBvyMwogCsv_Z5U,3509
129
- agenta/sdk/__init__.py,sha256=jmeLRuXrew02ZruODZYIu4kpw0S8vV6JhMPQWFGtj30,648
130
- agenta/sdk/agenta_decorator.py,sha256=6vz0G3YCRKRzK8JrQFyy8c2RIXy2kVMwyxTS093_8vQ,17296
129
+ agenta/sdk/__init__.py,sha256=3At8FvbuYFoP5IIYs4FZWYQK8l4XlVD1zv4E6QsgQv0,702
130
+ agenta/sdk/agenta_decorator.py,sha256=ucoGffoIefsYaxniMfMgsepKDo8jrlVMA7gGoTYyQUc,17805
131
131
  agenta/sdk/agenta_init.py,sha256=wDfStpe8_3ZXRLtikarwDKI_VpA1YW4eIz_3fXq39is,9044
132
132
  agenta/sdk/client.py,sha256=trKyBOYFZRk0v5Eptxvh87yPf50Y9CqY6Qgv4Fy-VH4,2142
133
133
  agenta/sdk/context.py,sha256=q-PxL05-I84puunUAs9LGsffEXcYhDxhQxjuOz2vK90,901
134
134
  agenta/sdk/router.py,sha256=0sbajvn5C7t18anH6yNo7-oYxldHnYfwcbmQnIXBePw,269
135
+ agenta/sdk/tracing/callbacks.py,sha256=hwOHgu1pGp5C7KTs0Tm07y7Uv_0gCo2YoyckKfnjmqw,4342
135
136
  agenta/sdk/tracing/context_manager.py,sha256=HskDaiORoOhjeN375gm05wYnieQzh5UnoIsnSAHkAyc,252
136
137
  agenta/sdk/tracing/decorators.py,sha256=ujtU8gf3GDoHYuLTfEYK_2eIYZ-1oX5dpv02Mf4l_II,1191
137
- agenta/sdk/tracing/llm_tracing.py,sha256=UiotJ56EFA3VPt7LREkcK2w51D9-0T1QNvBy4zNWEdY,7348
138
+ agenta/sdk/tracing/llm_tracing.py,sha256=f69FtUIA187gNob8N6IZ-eiF6QQyBVl2JUMJ4Nnnp7M,8671
138
139
  agenta/sdk/tracing/logger.py,sha256=4zG9c51p8xPdKA5SL8MOgBfkpCnBSuV6JfWiXO0A7oc,473
139
140
  agenta/sdk/tracing/tasks_manager.py,sha256=XVGBEOwmHa6KcCC0PApk0_bZ0Ilk2ESuduNObB1rw2s,3792
140
141
  agenta/sdk/types.py,sha256=Mn0yBlHh_Yr_5oQXUfsYI3V7sJAVWkJgkxEOBDOOMS0,5852
@@ -156,7 +157,7 @@ agenta/templates/simple_prompt/app.py,sha256=kODgF6lhzsaJPdgL5b21bUki6jkvqjWZzWR
156
157
  agenta/templates/simple_prompt/env.example,sha256=g9AE5bYcGPpxawXMJ96gh8oenEPCHTabsiOnfQo3c5k,70
157
158
  agenta/templates/simple_prompt/requirements.txt,sha256=ywRglRy7pPkw8bljmMEJJ4aOOQKrt9FGKULZ-DGkoBU,23
158
159
  agenta/templates/simple_prompt/template.toml,sha256=DQBtRrF4GU8LBEXOZ-GGuINXMQDKGTEG5y37tnvIUIE,60
159
- agenta-0.14.0.dist-info/METADATA,sha256=pY6HvnMLhCHryK46gnKyCLI2WTg7wwv1Q4nCa6558oY,26474
160
- agenta-0.14.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
161
- agenta-0.14.0.dist-info/entry_points.txt,sha256=PDiu8_8AsL7ibU9v4iNoOKR1S7F2rdxjlEprjM9QOgo,46
162
- agenta-0.14.0.dist-info/RECORD,,
160
+ agenta-0.14.1a0.dist-info/METADATA,sha256=Z4cOQ0WJuec014xZcsQcU5CtcaKIKsAOMlLVux4rnJ4,26518
161
+ agenta-0.14.1a0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
162
+ agenta-0.14.1a0.dist-info/entry_points.txt,sha256=PDiu8_8AsL7ibU9v4iNoOKR1S7F2rdxjlEprjM9QOgo,46
163
+ agenta-0.14.1a0.dist-info/RECORD,,