agenta 0.19.8a0__py3-none-any.whl → 0.20.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agenta might be problematic. Click here for more details.

agenta/__init__.py CHANGED
@@ -17,7 +17,7 @@ from .sdk.types import (
17
17
  from .sdk.tracing.logger import llm_logger as logging
18
18
  from .sdk.tracing.llm_tracing import Tracing
19
19
  from .sdk.decorators.tracing import instrument
20
- from .sdk.decorators.llm_entrypoint import entrypoint, app
20
+ from .sdk.decorators.llm_entrypoint import entrypoint, app, route
21
21
  from .sdk.agenta_init import Config, AgentaSingleton, init
22
22
  from .sdk.utils.helper.openai_cost import calculate_token_usage
23
23
  from .sdk.client import Agenta
@@ -20,7 +20,10 @@ class CreateSpan(pydantic.BaseModel):
20
20
  variant_id: typing.Optional[str]
21
21
  variant_name: typing.Optional[str]
22
22
  inputs: typing.Optional[typing.Dict[str, typing.Any]]
23
- outputs: typing.Optional[typing.List[str]]
23
+ internals: typing.Optional[typing.Dict[str, typing.Any]]
24
+ outputs: typing.Optional[
25
+ typing.Union[typing.Dict[str, typing.Any], typing.List[str]]
26
+ ]
24
27
  config: typing.Optional[typing.Dict[str, typing.Any]]
25
28
  environment: typing.Optional[str]
26
29
  tags: typing.Optional[typing.List[str]]
@@ -12,9 +12,9 @@ except ImportError:
12
12
 
13
13
 
14
14
  class LlmTokens(pydantic.BaseModel):
15
- prompt_tokens: typing.Optional[int]
16
- completion_tokens: typing.Optional[int]
17
- total_tokens: typing.Optional[int]
15
+ prompt_tokens: typing.Optional[int] = 0
16
+ completion_tokens: typing.Optional[int] = 0
17
+ total_tokens: typing.Optional[int] = 0
18
18
 
19
19
  def json(self, **kwargs: typing.Any) -> str:
20
20
  kwargs_with_defaults: typing.Any = {
agenta/sdk/__init__.py CHANGED
@@ -16,7 +16,7 @@ from .types import (
16
16
 
17
17
  from .tracing.llm_tracing import Tracing
18
18
  from .decorators.tracing import instrument
19
- from .decorators.llm_entrypoint import entrypoint, app
19
+ from .decorators.llm_entrypoint import entrypoint, app, route
20
20
  from .agenta_init import Config, AgentaSingleton, init
21
21
  from .utils.helper.openai_cost import calculate_token_usage
22
22
 
@@ -3,6 +3,7 @@
3
3
  import os
4
4
  import sys
5
5
  import time
6
+ import json
6
7
  import inspect
7
8
  import argparse
8
9
  import asyncio
@@ -15,11 +16,11 @@ from typing import Any, Callable, Dict, Optional, Tuple, List
15
16
  from fastapi.middleware.cors import CORSMiddleware
16
17
  from fastapi import Body, FastAPI, UploadFile, HTTPException
17
18
 
18
- import agenta
19
+ import agenta as ag
19
20
  from agenta.sdk.context import save_context
20
21
  from agenta.sdk.router import router as router
21
22
  from agenta.sdk.tracing.logger import llm_logger as logging
22
- from agenta.sdk.tracing.llm_tracing import Tracing
23
+ from agenta.sdk.tracing.tracing_context import tracing_context, TracingContext
23
24
  from agenta.sdk.decorators.base import BaseDecorator
24
25
  from agenta.sdk.types import (
25
26
  Context,
@@ -32,10 +33,12 @@ from agenta.sdk.types import (
32
33
  TextParam,
33
34
  MessagesInput,
34
35
  FileInputURL,
35
- FuncResponse,
36
+ BaseResponse,
36
37
  BinaryParam,
37
38
  )
38
39
 
40
+ from pydantic import BaseModel, HttpUrl
41
+
39
42
  app = FastAPI()
40
43
 
41
44
  origins = [
@@ -52,16 +55,56 @@ app.add_middleware(
52
55
 
53
56
  app.include_router(router, prefix="")
54
57
 
55
-
56
58
  from agenta.sdk.utils.debug import debug, DEBUG, SHIFT
57
59
 
58
60
 
59
61
  logging.setLevel("DEBUG")
60
62
 
61
63
 
64
+ class PathValidator(BaseModel):
65
+ url: HttpUrl
66
+
67
+
68
+ class route(BaseDecorator):
69
+ # This decorator is used to expose specific stages of a workflow (embedding, retrieval, summarization, etc.)
70
+ # as independent endpoints. It is designed for backward compatibility with existing code that uses
71
+ # the @entrypoint decorator, which has certain limitations. By using @route(), we can create new
72
+ # routes without altering the main workflow entrypoint. This helps in modularizing the services
73
+ # and provides flexibility in how we expose different functionalities as APIs.
74
+ def __init__(self, path):
75
+ path = "/" + path.strip("/").strip()
76
+ path = "" if path == "/" else path
77
+
78
+ PathValidator(url=f"http://example.com{path}")
79
+
80
+ self.route_path = path
81
+
82
+ def __call__(self, f):
83
+ self.e = entrypoint(f, route_path=self.route_path)
84
+
85
+ return f
86
+
87
+
62
88
  class entrypoint(BaseDecorator):
63
- """Decorator class to wrap a function for HTTP POST, terminal exposure and enable tracing.
89
+ """
90
+ Decorator class to wrap a function for HTTP POST, terminal exposure and enable tracing.
91
+
92
+ This decorator generates the following endpoints:
93
+
94
+ Playground Endpoints
95
+ - /generate with @entrypoint, @route("/"), @route(path="") # LEGACY
96
+ - /playground/run with @entrypoint, @route("/"), @route(path="")
97
+ - /playground/run/{route} with @route({route}), @route(path={route})
64
98
 
99
+ Deployed Endpoints:
100
+ - /generate_deployed with @entrypoint, @route("/"), @route(path="") # LEGACY
101
+ - /run with @entrypoint, @route("/"), @route(path="")
102
+ - /run/{route} with @route({route}), @route(path={route})
103
+
104
+ The rationale is:
105
+ - There may be multiple endpoints, based on the different routes.
106
+ - It's better to make it explicit that an endpoint is for the playground.
107
+ - Prefixing the routes with /run is more futureproof in case we add more endpoints.
65
108
 
66
109
  Example:
67
110
  ```python
@@ -73,31 +116,64 @@ class entrypoint(BaseDecorator):
73
116
  ```
74
117
  """
75
118
 
76
- def __init__(self, func: Callable[..., Any]):
77
- endpoint_name = "generate"
119
+ routes = list()
120
+
121
+ def __init__(self, func: Callable[..., Any], route_path=""):
122
+ DEFAULT_PATH = "generate"
123
+ PLAYGROUND_PATH = "/playground"
124
+ RUN_PATH = "/run"
125
+
78
126
  func_signature = inspect.signature(func)
79
- config_params = agenta.config.all()
127
+ config_params = ag.config.all()
80
128
  ingestible_files = self.extract_ingestible_files(func_signature)
81
129
 
130
+ ### --- Playground --- #
82
131
  @debug()
83
132
  @functools.wraps(func)
84
133
  async def wrapper(*args, **kwargs) -> Any:
85
134
  func_params, api_config_params = self.split_kwargs(kwargs, config_params)
86
135
  self.ingest_files(func_params, ingestible_files)
87
- agenta.config.set(**api_config_params)
136
+ ag.config.set(**api_config_params)
88
137
 
89
138
  # Set the configuration and environment of the LLM app parent span at run-time
90
- agenta.tracing.update_baggage(
139
+ ag.tracing.update_baggage(
91
140
  {"config": config_params, "environment": "playground"}
92
141
  )
93
142
 
94
- # Exceptions are all handled inside self.execute_function()
95
- llm_result = await self.execute_function(
143
+ entrypoint_result = await self.execute_function(
96
144
  func, *args, params=func_params, config_params=config_params
97
145
  )
98
146
 
99
- return llm_result
147
+ return entrypoint_result
148
+
149
+ self.update_function_signature(
150
+ wrapper, func_signature, config_params, ingestible_files
151
+ )
100
152
 
153
+ #
154
+ if route_path == "":
155
+ route = f"/{DEFAULT_PATH}"
156
+ app.post(route, response_model=BaseResponse)(wrapper)
157
+ entrypoint.routes.append(
158
+ {
159
+ "func": func.__name__,
160
+ "endpoint": DEFAULT_PATH,
161
+ "params": {**config_params, **func_signature.parameters},
162
+ }
163
+ )
164
+
165
+ route = f"{PLAYGROUND_PATH}{RUN_PATH}{route_path}"
166
+ app.post(route, response_model=BaseResponse)(wrapper)
167
+ entrypoint.routes.append(
168
+ {
169
+ "func": func.__name__,
170
+ "endpoint": route[1:].replace("/", "_"),
171
+ "params": {**config_params, **func_signature.parameters},
172
+ }
173
+ )
174
+ ### ---------------------------- #
175
+
176
+ ### --- Deployed / Published --- #
101
177
  @debug()
102
178
  @functools.wraps(func)
103
179
  async def wrapper_deployed(*args, **kwargs) -> Any:
@@ -106,44 +182,51 @@ class entrypoint(BaseDecorator):
106
182
  }
107
183
 
108
184
  if "environment" in kwargs and kwargs["environment"] is not None:
109
- agenta.config.pull(environment_name=kwargs["environment"])
185
+ ag.config.pull(environment_name=kwargs["environment"])
110
186
  elif "config" in kwargs and kwargs["config"] is not None:
111
- agenta.config.pull(config_name=kwargs["config"])
187
+ ag.config.pull(config_name=kwargs["config"])
112
188
  else:
113
- agenta.config.pull(config_name="default")
189
+ ag.config.pull(config_name="default")
114
190
 
115
191
  # Set the configuration and environment of the LLM app parent span at run-time
116
- agenta.tracing.update_baggage(
192
+ ag.tracing.update_baggage(
117
193
  {"config": config_params, "environment": kwargs["environment"]}
118
194
  )
119
195
 
120
- llm_result = await self.execute_function(
196
+ entrypoint_result = await self.execute_function(
121
197
  func, *args, params=func_params, config_params=config_params
122
198
  )
123
199
 
124
- return llm_result
125
-
126
- self.update_function_signature(
127
- wrapper, func_signature, config_params, ingestible_files
128
- )
129
- route = f"/{endpoint_name}"
130
- app.post(route, response_model=FuncResponse)(wrapper)
200
+ return entrypoint_result
131
201
 
132
202
  self.update_deployed_function_signature(
133
203
  wrapper_deployed,
134
204
  func_signature,
135
205
  ingestible_files,
136
206
  )
137
- route_deployed = f"/{endpoint_name}_deployed"
138
- app.post(route_deployed, response_model=FuncResponse)(wrapper_deployed)
139
- self.override_schema(
140
- openapi_schema=app.openapi(),
141
- func_name=func.__name__,
142
- endpoint=endpoint_name,
143
- params={**config_params, **func_signature.parameters},
144
- )
145
207
 
146
- if self.is_main_script(func):
208
+ if route_path == "/":
209
+ route_deployed = f"/{DEFAULT_PATH}_deployed"
210
+ app.post(route_deployed, response_model=BaseResponse)(wrapper_deployed)
211
+
212
+ route_deployed = f"{RUN_PATH}{route_path}"
213
+ app.post(route_deployed, response_model=BaseResponse)(wrapper_deployed)
214
+ ### ---------------------------- #
215
+
216
+ ### --- Update OpenAPI --- #
217
+ app.openapi_schema = None # Forces FastAPI to re-generate the schema
218
+ openapi_schema = app.openapi()
219
+
220
+ for route in entrypoint.routes:
221
+ self.override_schema(
222
+ openapi_schema=openapi_schema,
223
+ func=route["func"],
224
+ endpoint=route["endpoint"],
225
+ params=route["params"],
226
+ )
227
+ ### ---------------------- #
228
+
229
+ if self.is_main_script(func) and route_path == "":
147
230
  self.handle_terminal_run(
148
231
  func,
149
232
  func_signature.parameters, # type: ignore
@@ -198,51 +281,55 @@ class entrypoint(BaseDecorator):
198
281
  For synchronous functions, it calls them directly, while for asynchronous functions,
199
282
  it awaits their execution.
200
283
  """
284
+ data = None
285
+ trace = None
286
+
287
+ token = None
288
+ if tracing_context.get() is None:
289
+ token = tracing_context.set(TracingContext())
290
+
201
291
  is_coroutine_function = inspect.iscoroutinefunction(func)
202
- start_time = time.perf_counter()
203
292
 
204
293
  if is_coroutine_function:
205
294
  result = await func(*args, **func_params["params"])
206
295
  else:
207
296
  result = func(*args, **func_params["params"])
208
297
 
209
- end_time = time.perf_counter()
210
- latency = end_time - start_time
298
+ if token is not None:
299
+ trace = ag.tracing.dump_trace()
300
+ tracing_context.reset(token)
211
301
 
212
302
  if isinstance(result, Context):
213
303
  save_context(result)
304
+
214
305
  if isinstance(result, Dict):
215
- return FuncResponse(**result, latency=round(latency, 4))
216
- if isinstance(result, str):
217
- return FuncResponse(
218
- message=result, usage=None, cost=None, latency=round(latency, 4)
219
- )
220
- if isinstance(result, int) or isinstance(result, float):
221
- return FuncResponse(
222
- message=str(result),
223
- usage=None,
224
- cost=None,
225
- latency=round(latency, 4),
226
- )
227
- if result is None:
228
- return FuncResponse(
229
- message="Function executed successfully, but did return None. \n Are you sure you did not forget to return a value?",
230
- usage=None,
231
- cost=None,
232
- latency=round(latency, 4),
306
+ data = result
307
+ elif isinstance(result, str):
308
+ data = {"message": result}
309
+ elif isinstance(result, int) or isinstance(result, float):
310
+ data = {"message": str(result)}
311
+
312
+ if data is None:
313
+ warning = (
314
+ "Function executed successfully, but did return None. \n Are you sure you did not forget to return a value?",
233
315
  )
316
+
317
+ data = {"message": warning}
318
+
319
+ return BaseResponse(data=data, trace=trace)
320
+
234
321
  except Exception as e:
235
322
  self.handle_exception(e)
236
- return FuncResponse(message="Unexpected error occurred when calling the @entrypoint decorated function", latency=0) # type: ignore
237
323
 
238
324
  def handle_exception(self, e: Exception):
239
- """Handle exceptions."""
325
+ status_code = e.status_code if hasattr(e, "status_code") else 500
326
+ message = str(e)
327
+ stacktrace = traceback.format_exception(e, value=e, tb=e.__traceback__) # type: ignore
328
+ detail = {"message": message, "stacktrace": stacktrace}
240
329
 
241
- status_code: int = e.status_code if hasattr(e, "status_code") else 500
242
- traceback_str = traceback.format_exception(e, value=e, tb=e.__traceback__) # type: ignore
243
330
  raise HTTPException(
244
331
  status_code=status_code,
245
- detail={"error": str(e), "traceback": "".join(traceback_str)},
332
+ detail=detail,
246
333
  )
247
334
 
248
335
  def update_wrapper_signature(
@@ -418,26 +505,29 @@ class entrypoint(BaseDecorator):
418
505
  file_path=args_func_params[name],
419
506
  )
420
507
 
421
- agenta.config.set(**args_config_params)
508
+ ag.config.set(**args_config_params)
422
509
 
423
510
  # Set the configuration and environment of the LLM app parent span at run-time
424
- agenta.tracing.update_baggage(
425
- {"config": agenta.config.all(), "environment": "bash"}
426
- )
511
+ ag.tracing.update_baggage({"config": ag.config.all(), "environment": "bash"})
427
512
 
428
513
  loop = asyncio.get_event_loop()
514
+
429
515
  result = loop.run_until_complete(
430
516
  self.execute_function(
431
517
  func,
432
518
  **{"params": args_func_params, "config_params": args_config_params},
433
519
  )
434
520
  )
435
- print(
436
- f"\n========== Result ==========\n\nMessage: {result.message}\nCost: {result.cost}\nToken Usage: {result.usage}"
437
- )
521
+
522
+ print(f"\n========== Result ==========\n")
523
+
524
+ print("-> data")
525
+ print(json.dumps(result.data, indent=2))
526
+ print("-> trace")
527
+ print(json.dumps(result.trace, indent=2))
438
528
 
439
529
  def override_schema(
440
- self, openapi_schema: dict, func_name: str, endpoint: str, params: dict
530
+ self, openapi_schema: dict, func: str, endpoint: str, params: dict
441
531
  ):
442
532
  """
443
533
  Overrides the default openai schema generated by fastapi with additional information about:
@@ -452,7 +542,7 @@ class entrypoint(BaseDecorator):
452
542
 
453
543
  Args:
454
544
  openapi_schema (dict): The openapi schema generated by fastapi
455
- func_name (str): The name of the function to override
545
+ func (str): The name of the function to override
456
546
  endpoint (str): The name of the endpoint to override
457
547
  params (dict(param_name, param_val)): The dictionary of the parameters for the function
458
548
  """
@@ -484,8 +574,29 @@ class entrypoint(BaseDecorator):
484
574
  # value = {'temperature': { "type": "number", "title": "Temperature", "x-parameter": "float" }}
485
575
  return value
486
576
 
577
+ def get_type_from_param(param_val):
578
+ param_type = "string"
579
+ annotation = param_val.annotation
580
+
581
+ if annotation == int:
582
+ param_type = "integer"
583
+ elif annotation == float:
584
+ param_type = "number"
585
+ elif annotation == dict:
586
+ param_type = "object"
587
+ elif annotation == bool:
588
+ param_type = "boolean"
589
+ elif annotation == list:
590
+ param_type = "list"
591
+ elif annotation == str:
592
+ param_type = "string"
593
+ else:
594
+ print("ERROR, unhandled annotation:", annotation)
595
+
596
+ return param_type
597
+
487
598
  schema_to_override = openapi_schema["components"]["schemas"][
488
- f"Body_{func_name}_{endpoint}_post"
599
+ f"Body_{func}_{endpoint}_post"
489
600
  ]["properties"]
490
601
  for param_name, param_val in params.items():
491
602
  if isinstance(param_val, GroupedMultipleChoiceParam):
@@ -501,7 +612,7 @@ class entrypoint(BaseDecorator):
501
612
  subschema["choices"] = param_val.choices # type: ignore
502
613
  subschema["default"] = param_val.default # type: ignore
503
614
 
504
- if isinstance(param_val, MultipleChoiceParam):
615
+ elif isinstance(param_val, MultipleChoiceParam):
505
616
  subschema = find_in_schema(
506
617
  param_val.__schema_type_properties__(),
507
618
  schema_to_override,
@@ -520,7 +631,7 @@ class entrypoint(BaseDecorator):
520
631
  default if default in param_choices else choices[0]
521
632
  )
522
633
 
523
- if isinstance(param_val, FloatParam):
634
+ elif isinstance(param_val, FloatParam):
524
635
  subschema = find_in_schema(
525
636
  param_val.__schema_type_properties__(),
526
637
  schema_to_override,
@@ -531,7 +642,7 @@ class entrypoint(BaseDecorator):
531
642
  subschema["maximum"] = param_val.maxval # type: ignore
532
643
  subschema["default"] = param_val
533
644
 
534
- if isinstance(param_val, IntParam):
645
+ elif isinstance(param_val, IntParam):
535
646
  subschema = find_in_schema(
536
647
  param_val.__schema_type_properties__(),
537
648
  schema_to_override,
@@ -542,7 +653,7 @@ class entrypoint(BaseDecorator):
542
653
  subschema["maximum"] = param_val.maxval # type: ignore
543
654
  subschema["default"] = param_val
544
655
 
545
- if (
656
+ elif (
546
657
  isinstance(param_val, inspect.Parameter)
547
658
  and param_val.annotation is DictInput
548
659
  ):
@@ -554,7 +665,7 @@ class entrypoint(BaseDecorator):
554
665
  )
555
666
  subschema["default"] = param_val.default["default_keys"]
556
667
 
557
- if isinstance(param_val, TextParam):
668
+ elif isinstance(param_val, TextParam):
558
669
  subschema = find_in_schema(
559
670
  param_val.__schema_type_properties__(),
560
671
  schema_to_override,
@@ -563,7 +674,7 @@ class entrypoint(BaseDecorator):
563
674
  )
564
675
  subschema["default"] = param_val
565
676
 
566
- if (
677
+ elif (
567
678
  isinstance(param_val, inspect.Parameter)
568
679
  and param_val.annotation is MessagesInput
569
680
  ):
@@ -575,7 +686,7 @@ class entrypoint(BaseDecorator):
575
686
  )
576
687
  subschema["default"] = param_val.default
577
688
 
578
- if (
689
+ elif (
579
690
  isinstance(param_val, inspect.Parameter)
580
691
  and param_val.annotation is FileInputURL
581
692
  ):
@@ -587,7 +698,7 @@ class entrypoint(BaseDecorator):
587
698
  )
588
699
  subschema["default"] = "https://example.com"
589
700
 
590
- if isinstance(param_val, BinaryParam):
701
+ elif isinstance(param_val, BinaryParam):
591
702
  subschema = find_in_schema(
592
703
  param_val.__schema_type_properties__(),
593
704
  schema_to_override,
@@ -595,3 +706,11 @@ class entrypoint(BaseDecorator):
595
706
  "bool",
596
707
  )
597
708
  subschema["default"] = param_val.default # type: ignore
709
+ else:
710
+ subschema = {
711
+ "title": str(param_name).capitalize(),
712
+ "type": get_type_from_param(param_val),
713
+ }
714
+ if param_val.default != inspect._empty:
715
+ subschema["default"] = param_val.default # type: ignore
716
+ schema_to_override[param_name] = subschema
@@ -8,7 +8,6 @@ from typing import Any, Callable, Optional
8
8
  import agenta as ag
9
9
  from agenta.sdk.decorators.base import BaseDecorator
10
10
  from agenta.sdk.tracing.logger import llm_logger as logging
11
- from agenta.sdk.tracing.tracing_context import tracing_context, TracingContext
12
11
  from agenta.sdk.utils.debug import debug, DEBUG, SHIFT
13
12
 
14
13
 
@@ -38,149 +37,55 @@ class instrument(BaseDecorator):
38
37
  """
39
38
 
40
39
  def __init__(
41
- self, config: Optional[dict] = None, spankind: str = "workflow"
40
+ self,
41
+ config: Optional[dict] = None,
42
+ spankind: str = "workflow",
42
43
  ) -> None:
43
44
  self.config = config
44
45
  self.spankind = spankind
45
- self.tracing = ag.tracing
46
46
 
47
47
  def __call__(self, func: Callable[..., Any]):
48
48
  is_coroutine_function = inspect.iscoroutinefunction(func)
49
49
 
50
- @debug()
51
- @wraps(func)
52
- async def async_wrapper(*args, **kwargs):
53
- result = None
50
+ def get_inputs(*args, **kwargs):
54
51
  func_args = inspect.getfullargspec(func).args
55
52
  input_dict = {name: value for name, value in zip(func_args, args)}
56
53
  input_dict.update(kwargs)
57
54
 
58
- async def wrapped_func(*args, **kwargs):
59
- # logging.debug(" ".join([">..", str(tracing_context.get())]))
60
-
61
- token = None
62
- if tracing_context.get() is None:
63
- token = tracing_context.set(TracingContext())
64
-
65
- # logging.debug(" ".join([">>.", str(tracing_context.get())]))
55
+ return input_dict
66
56
 
67
- self.tracing.start_span(
57
+ @wraps(func)
58
+ async def async_wrapper(*args, **kwargs):
59
+ async def wrapped_func(*args, **kwargs):
60
+ with ag.tracing.Context(
68
61
  name=func.__name__,
69
- input=input_dict,
62
+ input=get_inputs(*args, **kwargs),
70
63
  spankind=self.spankind,
71
64
  config=self.config,
72
- )
73
-
74
- try:
65
+ ):
75
66
  result = await func(*args, **kwargs)
76
67
 
77
- self.tracing.set_status(status="OK")
78
- self.tracing.end_span(
79
- outputs=(
80
- {"message": result}
81
- if not isinstance(result, dict)
82
- else result
83
- )
84
- )
85
-
86
- # logging.debug(" ".join(["<<.", str(tracing_context.get())]))
87
-
88
- if token is not None:
89
- tracing_context.reset(token)
90
-
91
- # logging.debug(" ".join(["<..", str(tracing_context.get())]))
68
+ ag.tracing.store_outputs(result)
92
69
 
93
70
  return result
94
71
 
95
- except Exception as e:
96
- result = {
97
- "message": str(e),
98
- "stacktrace": traceback.format_exc(),
99
- }
100
-
101
- self.tracing.set_attributes(
102
- {"traceback_exception": traceback.format_exc()}
103
- )
104
- self.tracing.set_status(status="ERROR")
105
- self.tracing.end_span(outputs=result)
106
-
107
- # logging.debug(" ".join(["<<.", str(tracing_context.get())]))
108
-
109
- if token is not None:
110
- tracing_context.reset(token)
111
-
112
- # logging.debug(" ".join(["<..", str(tracing_context.get())]))
113
-
114
- raise e
115
-
116
72
  return await wrapped_func(*args, **kwargs)
117
73
 
118
74
  @wraps(func)
119
75
  def sync_wrapper(*args, **kwargs):
120
- result = None
121
- func_args = inspect.getfullargspec(func).args
122
- input_dict = {name: value for name, value in zip(func_args, args)}
123
- input_dict.update(kwargs)
124
-
125
76
  def wrapped_func(*args, **kwargs):
126
- # logging.debug(" ".join([">..", str(tracing_context.get())]))
127
-
128
- token = None
129
- if tracing_context.get() is None:
130
- token = tracing_context.set(TracingContext())
131
-
132
- # logging.debug(" ".join([">>.", str(tracing_context.get())]))
133
-
134
- span = self.tracing.start_span(
77
+ with ag.tracing.Context(
135
78
  name=func.__name__,
136
- input=input_dict,
79
+ input=get_inputs(*args, **kwargs),
137
80
  spankind=self.spankind,
138
81
  config=self.config,
139
- )
140
-
141
- try:
82
+ ):
142
83
  result = func(*args, **kwargs)
143
84
 
144
- self.tracing.set_status(status="OK")
145
- self.tracing.end_span(
146
- outputs=(
147
- {"message": result}
148
- if not isinstance(result, dict)
149
- else result
150
- )
151
- )
152
-
153
- # logging.debug(" ".join(["<<.", str(tracing_context.get())]))
154
-
155
- if token is not None:
156
- tracing_context.reset(token)
157
-
158
- # logging.debug(" ".join(["<..", str(tracing_context.get())]))
85
+ ag.tracing.store_outputs(result)
159
86
 
160
87
  return result
161
88
 
162
- except Exception as e:
163
- result = {
164
- "message": str(e),
165
- "stacktrace": traceback.format_exc(),
166
- }
167
-
168
- self.tracing.set_attributes(
169
- {"traceback_exception": traceback.format_exc()}
170
- )
171
-
172
- self.tracing.set_status(status="ERROR")
173
- self.tracing.end_span(outputs=result)
174
-
175
- # logging.debug(" ".join(["<<.", str(tracing_context.get())]))
176
-
177
- if token is not None:
178
- tracing_context.reset(token)
179
-
180
- # logging.debug(" ".join(["<..", str(tracing_context.get())]))
181
-
182
- raise e
183
-
184
89
  return wrapped_func(*args, **kwargs)
185
90
 
186
91
  return async_wrapper if is_coroutine_function else sync_wrapper
@@ -1,11 +1,16 @@
1
1
  import os
2
+ import copy
3
+ import json
2
4
  from uuid import uuid4
3
5
 
6
+ import traceback
4
7
  from threading import Lock
5
8
  from datetime import datetime, timezone
6
9
  from typing import Optional, Dict, Any, List
7
10
 
8
- from agenta.sdk.tracing.tracing_context import tracing_context
11
+ from contextlib import contextmanager
12
+
13
+ from agenta.sdk.tracing.tracing_context import tracing_context, TracingContext
9
14
  from agenta.sdk.tracing.logger import llm_logger as logging
10
15
  from agenta.sdk.tracing.tasks_manager import TaskQueue
11
16
  from agenta.client.backend.client import AsyncAgentaApi
@@ -91,6 +96,46 @@ class Tracing(metaclass=SingletonMeta):
91
96
  base_url=self.host, api_key=self.api_key, timeout=120 # type: ignore
92
97
  ).observability
93
98
 
99
+ ### --- Context Manager --- ###
100
+
101
+ @contextmanager
102
+ def Context(self, **kwargs):
103
+ # This will evolve as be work towards OTel compliance
104
+
105
+ token = None
106
+
107
+ try:
108
+ if tracing_context.get() is None:
109
+ token = tracing_context.set(TracingContext())
110
+
111
+ self.open_span(**kwargs)
112
+
113
+ yield
114
+
115
+ self.set_status(status="OK")
116
+
117
+ except Exception as e:
118
+ logging.error(e)
119
+
120
+ result = {
121
+ "message": str(e),
122
+ "stacktrace": traceback.format_exc(),
123
+ }
124
+
125
+ self.set_status(status="ERROR")
126
+ self.set_attributes({"traceback_exception": traceback.format_exc()})
127
+ self.store_outputs(result)
128
+
129
+ raise
130
+
131
+ finally:
132
+ self.close_span()
133
+
134
+ if token is not None:
135
+ self.flush_spans()
136
+
137
+ tracing_context.reset(token)
138
+
94
139
  ### --- API --- ###
95
140
 
96
141
  @debug()
@@ -177,11 +222,7 @@ class Tracing(metaclass=SingletonMeta):
177
222
  if not self.api_key:
178
223
  logging.error("No API key")
179
224
  else:
180
- self._process_closed_spans()
181
-
182
- self._clear_closed_spans()
183
- self._clear_tracked_spans()
184
- self._clear_active_span()
225
+ self._process_spans()
185
226
 
186
227
  self._clear_trace_tags()
187
228
 
@@ -213,6 +254,7 @@ class Tracing(metaclass=SingletonMeta):
213
254
  attributes={},
214
255
  status=SpanStatusCode.UNSET.value,
215
256
  start_time=datetime.now(timezone.utc),
257
+ internals=None,
216
258
  outputs=None,
217
259
  tags=None,
218
260
  user=None,
@@ -224,12 +266,11 @@ class Tracing(metaclass=SingletonMeta):
224
266
  )
225
267
 
226
268
  if tracing.trace_id is None:
227
- self.start_trace(span, config)
269
+ self.open_trace(span, config)
228
270
  else:
229
271
  span.parent_span_id = tracing.active_span.id # type: ignore
230
272
 
231
- tracing.tracked_spans[span.id] = span
232
- tracing.active_span = span
273
+ tracing.push(span)
233
274
  ### --- TO BE CLEANED --- <<<
234
275
 
235
276
  logging.info(f"Opened span {span_id} {spankind.upper()}")
@@ -282,7 +323,7 @@ class Tracing(metaclass=SingletonMeta):
282
323
  tracing.active_span.status = status
283
324
 
284
325
  @debug()
285
- def close_span(self, outputs: Dict[str, Any]) -> None:
326
+ def close_span(self) -> None:
286
327
  """
287
328
  Ends the active span, if it is a parent span, ends the trace too.
288
329
 
@@ -313,24 +354,19 @@ class Tracing(metaclass=SingletonMeta):
313
354
  ### --- TO BE CLEANED --- >>>
314
355
  tracing.active_span.end_time = datetime.now(timezone.utc)
315
356
 
316
- tracing.active_span.outputs = [outputs.get("message", "")]
317
-
318
- if tracing.active_span.spankind.upper() in [
319
- "LLM",
320
- "RETRIEVER",
321
- ]: # TODO: Remove this whole part. Setting the cost should be done through set_span_attribute
322
- self._update_span_cost(tracing.active_span, outputs.get("cost", None))
323
- self._update_span_tokens(tracing.active_span, outputs.get("usage", None))
324
-
325
- tracing.closed_spans.append(tracing.active_span)
357
+ # TODO: Remove this whole part. Setting the cost should be done through set_span_attribute
358
+ if isinstance(tracing.active_span.outputs, dict):
359
+ self._update_span_cost(
360
+ tracing.active_span, tracing.active_span.outputs.get("cost", None)
361
+ )
362
+ self._update_span_tokens(
363
+ tracing.active_span, tracing.active_span.outputs.get("usage", None)
364
+ )
326
365
 
327
366
  active_span_parent_id = tracing.active_span.parent_span_id
328
367
 
329
- if active_span_parent_id is None:
330
- self.end_trace(parent_span=tracing.active_span)
331
-
332
- else:
333
- parent_span = tracing.tracked_spans[active_span_parent_id]
368
+ if active_span_parent_id is not None:
369
+ parent_span = tracing.spans[active_span_parent_id]
334
370
  self._update_span_cost(parent_span, tracing.active_span.cost)
335
371
  self._update_span_tokens(parent_span, tracing.active_span.tokens)
336
372
  tracing.active_span = parent_span
@@ -338,6 +374,95 @@ class Tracing(metaclass=SingletonMeta):
338
374
 
339
375
  logging.info(f"Closed span {span_id} {spankind}")
340
376
 
377
+ @debug()
378
+ def store_internals(self, internals: Dict[str, Any] = {}) -> None:
379
+ """
380
+ Set internals for the active span.
381
+
382
+ Args:
383
+ internals (Dict[str, Any], optional): A dictionary of local variables to set. Defaults to {}.
384
+ """
385
+
386
+ tracing = tracing_context.get()
387
+
388
+ if tracing.active_span is None:
389
+ logging.error(f"Cannot set internals ({set(internals)}), no active span")
390
+ return
391
+
392
+ logging.info(
393
+ f"Setting span {tracing.active_span.id} {tracing.active_span.spankind.upper()} internals={internals}"
394
+ )
395
+
396
+ if tracing.active_span.internals is None:
397
+ tracing.active_span.internals = dict()
398
+
399
+ for key, value in internals.items():
400
+ tracing.active_span.internals[key] = value # type: ignore
401
+
402
+ @debug()
403
+ def store_outputs(self, outputs: Dict[str, Any] = {}) -> None:
404
+ """
405
+ Set outputs for the active span.
406
+
407
+ Args:
408
+ outputs (Dict[str, Any], optional): A dictionary of output variables to set. Defaults to {}.
409
+ """
410
+
411
+ tracing = tracing_context.get()
412
+
413
+ if tracing.active_span is None:
414
+ logging.error(f"Cannot set outputs ({set(outputs)}), no active span")
415
+ return
416
+
417
+ logging.info(
418
+ f"Setting span {tracing.active_span.id} {tracing.active_span.spankind.upper()} outputs={outputs}"
419
+ )
420
+
421
+ tracing.active_span.outputs = outputs
422
+
423
+ def dump_trace(self):
424
+ """
425
+ Collects and organizes tracing information into a dictionary.
426
+ This function retrieves the current tracing context and extracts relevant data such as `trace_id`, `cost`, `tokens`, and `latency` for the whole trace.
427
+ It also dumps detailed span information using the `dump_spans` method and includes it in the trace dictionary.
428
+ If an error occurs during the process, it logs the error message and stack trace.
429
+
430
+ Returns:
431
+ dict: A dictionary containing the trace information.
432
+ """
433
+ try:
434
+ trace = dict()
435
+
436
+ tracing = tracing_context.get()
437
+
438
+ trace["trace_id"] = tracing.trace_id
439
+
440
+ for span in tracing.spans.values():
441
+ if span.parent_span_id is None:
442
+ trace["cost"] = span.cost
443
+ trace["usage"] = (
444
+ None if span.tokens is None else json.loads(span.tokens.json())
445
+ )
446
+ trace["latency"] = (span.end_time - span.start_time).total_seconds()
447
+
448
+ spans = (
449
+ []
450
+ if len(tracing.spans) == 0
451
+ else [json.loads(span.json()) for span in tracing.spans.values()]
452
+ )
453
+
454
+ if spans is not None:
455
+ trace["spans"] = spans
456
+
457
+ except Exception as e:
458
+ logging.error(e)
459
+ logging.error(traceback.format_exc())
460
+
461
+ return trace
462
+
463
+ def flush_spans(self) -> None:
464
+ self.close_trace()
465
+
341
466
  ### --- Legacy API --- ###
342
467
 
343
468
  def start_trace(
@@ -348,7 +473,7 @@ class Tracing(metaclass=SingletonMeta):
348
473
  ) -> None: # Legacy
349
474
  self.open_trace(span, config, **kwargs)
350
475
 
351
- def end_trace(self, parent_span: CreateSpan) -> None: # Legacy
476
+ def end_trace(self, _: CreateSpan) -> None: # Legacy
352
477
  self.close_trace()
353
478
 
354
479
  def start_span(
@@ -371,7 +496,8 @@ class Tracing(metaclass=SingletonMeta):
371
496
  self.set_attributes(attributes)
372
497
 
373
498
  def end_span(self, outputs: Dict[str, Any]) -> None: # Legacy
374
- self.close_span(outputs)
499
+ self.store_outputs(outputs)
500
+ self.close_span()
375
501
 
376
502
  ### --- Helper Functions --- ###
377
503
 
@@ -400,46 +526,24 @@ class Tracing(metaclass=SingletonMeta):
400
526
  # return uuid4().hex[:16]
401
527
  return str(ObjectId())
402
528
 
403
- def _process_closed_spans(self) -> None:
529
+ def _process_spans(self) -> None:
404
530
  tracing = tracing_context.get()
405
531
 
406
- logging.info(f"Sending spans {tracing.trace_id} #={len(tracing.closed_spans)} ")
532
+ spans = list(tracing.spans.values())
407
533
 
408
- # async def mock_create_traces(trace, spans):
409
- # print("trace-id", trace)
410
- # print("spans", spans)
534
+ logging.info(f"Sending trace {tracing.trace_id} spans={len(spans)} ")
411
535
 
412
536
  self.tasks_manager.add_task(
413
537
  tracing.trace_id,
414
- "trace",
538
+ "send-trace",
415
539
  # mock_create_traces(
416
540
  self.client.create_traces(
417
- trace=tracing.trace_id,
418
- spans=tracing.closed_spans, # type: ignore
541
+ trace=tracing.trace_id, spans=spans # type: ignore
419
542
  ),
420
543
  self.client,
421
544
  )
422
545
 
423
- logging.info(f"Sent spans {tracing.trace_id} #={len(tracing.closed_spans)}")
424
-
425
- def _clear_closed_spans(self) -> None:
426
- tracing = tracing_context.get()
427
-
428
- tracing.closed_spans.clear()
429
-
430
- def _clear_tracked_spans(self) -> None:
431
- tracing = tracing_context.get()
432
-
433
- tracing.tracked_spans.clear()
434
-
435
- def _clear_active_span(self) -> None:
436
- tracing = tracing_context.get()
437
-
438
- span_id = tracing.active_span.id
439
-
440
- tracing.active_span = None
441
-
442
- logging.debug(f"Cleared active span {span_id}")
546
+ logging.info(f"Sent trace {tracing.trace_id}")
443
547
 
444
548
  def _update_span_cost(self, span: CreateSpan, cost: Optional[float]) -> None:
445
549
  if span is not None and cost is not None and isinstance(cost, float):
@@ -455,6 +559,16 @@ class Tracing(metaclass=SingletonMeta):
455
559
  if span.tokens is None:
456
560
  span.tokens = LlmTokens(**tokens)
457
561
  else:
458
- span.tokens.prompt_tokens += tokens["prompt_tokens"]
459
- span.tokens.completion_tokens += tokens["completion_tokens"]
460
- span.tokens.total_tokens += tokens["total_tokens"]
562
+ span.tokens.prompt_tokens += (
563
+ tokens["prompt_tokens"]
564
+ if tokens["prompt_tokens"] is not None
565
+ else 0
566
+ )
567
+ span.tokens.completion_tokens += (
568
+ tokens["completion_tokens"]
569
+ if tokens["completion_tokens"] is not None
570
+ else 0
571
+ )
572
+ span.tokens.total_tokens += (
573
+ tokens["total_tokens"] if tokens["total_tokens"] is not None else 0
574
+ )
@@ -105,8 +105,9 @@ class TaskQueue(object):
105
105
  future = self._thread_pool.submit(asyncio.run, task.run())
106
106
  future.result()
107
107
  except Exception as exc:
108
- self._logger.error(f"Error running task: {str(exc)}")
109
- self._logger.error(f"Recording {task.coroutine_type} status to ERROR.")
108
+ self._logger.error(
109
+ f"Task '{task.coroutine_type}' failed with error: {str(exc)}"
110
+ )
110
111
  break
111
112
  finally:
112
113
  self.tasks.task_done()
@@ -1,6 +1,6 @@
1
- import contextvars
1
+ from contextvars import ContextVar
2
2
 
3
- from typing import Optional, Dict, Any, List
3
+ from typing import Optional, Dict, List
4
4
 
5
5
  from agenta.client.backend.types.create_span import CreateSpan
6
6
 
@@ -15,14 +15,17 @@ class TracingContext:
15
15
 
16
16
  ### --- SPANS --- ###
17
17
  self.active_span: Optional[CreateSpan] = None
18
- self.tracked_spans: Dict[str, CreateSpan] = {}
19
- self.closed_spans: List[CreateSpan] = []
18
+ self.spans: Dict[str, CreateSpan] = {}
20
19
 
21
20
  def __repr__(self) -> str:
22
- return f"TracingContext(trace_id=[{self.trace_id}], active_span=[{self.active_span.id if self.active_span else None}{' ' + self.active_span.spankind if self.active_span else ''}])"
21
+ return f"TracingContext(trace='{self.trace_id}', spans={[f'{span.id} {span.spankind}' for span in self.spans.values()]})"
23
22
 
24
23
  def __str__(self) -> str:
25
24
  return self.__repr__()
26
25
 
26
+ def push(self, span) -> None:
27
+ self.active_span = span
28
+ self.spans[span.id] = span
27
29
 
28
- tracing_context = contextvars.ContextVar(CURRENT_TRACING_CONTEXT_KEY, default=None)
30
+
31
+ tracing_context = ContextVar(CURRENT_TRACING_CONTEXT_KEY, default=None)
agenta/sdk/types.py CHANGED
@@ -1,5 +1,5 @@
1
1
  import json
2
- from typing import Dict, List, Optional
2
+ from typing import Dict, List, Optional, Any
3
3
 
4
4
  from pydantic import ConfigDict, BaseModel, HttpUrl
5
5
 
@@ -16,11 +16,10 @@ class LLMTokenUsage(BaseModel):
16
16
  total_tokens: int
17
17
 
18
18
 
19
- class FuncResponse(BaseModel):
20
- message: str
21
- usage: Optional[LLMTokenUsage]
22
- cost: Optional[float]
23
- latency: float
19
+ class BaseResponse(BaseModel):
20
+ version: Optional[str] = "2.0"
21
+ data: Optional[Dict[str, Any]]
22
+ trace: Optional[Dict[str, Any]]
24
23
 
25
24
 
26
25
  class DictInput(dict):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: agenta
3
- Version: 0.19.8a0
3
+ Version: 0.20.0a0
4
4
  Summary: The SDK for agenta is an open-source LLMOps platform.
5
5
  Home-page: https://agenta.ai
6
6
  Keywords: LLMOps,LLM,evaluation,prompt engineering
@@ -1,4 +1,4 @@
1
- agenta/__init__.py,sha256=6ZfEnXNHoJVXO9HT_U-WuitFvE6Oq_UTrjAZGIRzaDY,887
1
+ agenta/__init__.py,sha256=nL_Fogmx_64xS--WjhE4gE_0h5LIJDbDGKpjZoNJ9oI,894
2
2
  agenta/cli/evaluation_commands.py,sha256=fs6492tprPId9p8eGO02Xy-NCBm2RZNJLZWcUxugwd8,474
3
3
  agenta/cli/helper.py,sha256=vRxHyeNaltzNIGrfU2vO0H28_rXDzx9QqIZ_S-W6zL4,6212
4
4
  agenta/cli/main.py,sha256=Wz0ODhoeKK3Qg_CFUhu6D909szk05tc8ZVBB6H1-w7k,9763
@@ -54,7 +54,7 @@ agenta/client/backend/types/base_output.py,sha256=ynXhDBQKrkR6Lnkx-yv6Q8xW4wXmzX
54
54
  agenta/client/backend/types/body_import_testset.py,sha256=7dVF3mv3VO0Co8F0qxLAgu4jabqDPjebK4mYvcd_TuA,1061
55
55
  agenta/client/backend/types/config_db.py,sha256=P0cSYvVOn0ZxpYMIdvhWpQVjRuBS5APe6qlc69AXaF4,1028
56
56
  agenta/client/backend/types/create_app_output.py,sha256=pgnTnfZx35Q-8wZ1yTZBQ0ydYacGzFC9kyLug_UvymM,986
57
- agenta/client/backend/types/create_span.py,sha256=Ldb2zclVtVyBRKxM2Ap3YjE-FP3jbaOya96_ZsYw1cg,1794
57
+ agenta/client/backend/types/create_span.py,sha256=CfTLCHRnDyicJS3-cioDM5ZWZQq8SEi6z3BEDfnQ9Hs,1913
58
58
  agenta/client/backend/types/create_trace_response.py,sha256=FO-Ii9JEn2AQ1nmZYmjnKRbACsNxRvY_-xn7Ys7Yo8A,1012
59
59
  agenta/client/backend/types/docker_env_vars.py,sha256=altCvA1k-zdAkKNYLwaCnmV48HZg9cwe2cHu_BGImac,986
60
60
  agenta/client/backend/types/environment_output.py,sha256=dl0GKodeqB7kWK5mH6Y4iBppkpwRzSTmtkXH1II4L6w,1257
@@ -85,7 +85,7 @@ agenta/client/backend/types/image.py,sha256=p7Vmp7HlMV3YyXe8SFdXYJjCbPNIypW6NfVG
85
85
  agenta/client/backend/types/invite_request.py,sha256=1nJTUHspzw2WYpUSd4UUtRnjDHM-dqDBvYewgw-hCQE,993
86
86
  agenta/client/backend/types/list_api_keys_response.py,sha256=ZNh7jKwHEMKNp8OV5WJ5XxtKn39DwqK1f8vlFKl54x4,1097
87
87
  agenta/client/backend/types/llm_run_rate_limit.py,sha256=mfT4lTczPxrJvd8ZCOAjPvw58QoM151p_uZT0PWNOJ4,1045
88
- agenta/client/backend/types/llm_tokens.py,sha256=J236Fgmz5TeFO0MQA1ZA1QozvR6d3kt8aEUyWkq3jLI,1070
88
+ agenta/client/backend/types/llm_tokens.py,sha256=nDaiJRZs1jvsXVDDoc-CXGNLUP8hRhioY37P1CGEav0,1082
89
89
  agenta/client/backend/types/new_human_evaluation.py,sha256=lIgMjVccSp22RRfMGGLH4-yKjMtJeQvjhlwX9EtAxmY,1150
90
90
  agenta/client/backend/types/new_testset.py,sha256=9NOC1-f_UZASy4ptzidLNcRU6Odq609ayvSQxEva-40,1009
91
91
  agenta/client/backend/types/organization.py,sha256=vJf6Gbz8WCnqabPQmt_t_gfrWPpuvTXgTxKCJKJsrmc,1218
@@ -126,22 +126,22 @@ agenta/docker/docker-assets/entrypoint.sh,sha256=29XK8VQjQsx4hN2j-4JDy-6kQb5y4LC
126
126
  agenta/docker/docker-assets/lambda_function.py,sha256=h4UZSSfqwpfsCgERv6frqwm_4JrYu9rLz3I-LxCfeEg,83
127
127
  agenta/docker/docker-assets/main.py,sha256=7MI-21n81U7N7A0GxebNi0cmGWtJKcR2sPB6FcH2QfA,251
128
128
  agenta/docker/docker_utils.py,sha256=5uHMCzXkCvIsDdEiwbnnn97KkzsFbBvyMwogCsv_Z5U,3509
129
- agenta/sdk/__init__.py,sha256=cF0de6DiH-NZWEm0XvPN8_TeC1whPBnDf1WYYE1qK2g,762
129
+ agenta/sdk/__init__.py,sha256=ewYNjm6AHlqkIrPfX2D_pXZMwShOdhEUcWXb7xGA2bk,769
130
130
  agenta/sdk/agenta_init.py,sha256=8MfDuypxohd0qRTdtGjX7L17KW-1UGmzNVdiqF15_ak,9790
131
131
  agenta/sdk/client.py,sha256=trKyBOYFZRk0v5Eptxvh87yPf50Y9CqY6Qgv4Fy-VH4,2142
132
132
  agenta/sdk/context.py,sha256=q-PxL05-I84puunUAs9LGsffEXcYhDxhQxjuOz2vK90,901
133
133
  agenta/sdk/decorators/base.py,sha256=9aNdX5h8a2mFweuhdO-BQPwXGKY9ONPIdLRhSGAGMfY,217
134
- agenta/sdk/decorators/llm_entrypoint.py,sha256=umcniOOQfKVdPgHVb_jRhoGKei14Yc3PIZmEC8CU2Wg,22996
135
- agenta/sdk/decorators/tracing.py,sha256=c9LwQJkhJcyO7Uq-sNpDSwfwOUTAmlqNuJjz4bSx-k0,6172
134
+ agenta/sdk/decorators/llm_entrypoint.py,sha256=cyXUHJ-cldbXmBOo3CF5XTSz_Pr2vAA5Inv5LHdigyE,26910
135
+ agenta/sdk/decorators/tracing.py,sha256=BzXa_2b2VvbO5ZNTh126OkL3LFq8dMK-QFug8BHUBKA,2687
136
136
  agenta/sdk/router.py,sha256=0sbajvn5C7t18anH6yNo7-oYxldHnYfwcbmQnIXBePw,269
137
137
  agenta/sdk/tracing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
138
138
  agenta/sdk/tracing/callbacks.py,sha256=0rqkW-PGZSaNSMp_t4bGI7R9HQRgTyIy0gxmpGVJWpE,6915
139
139
  agenta/sdk/tracing/context_manager.py,sha256=HskDaiORoOhjeN375gm05wYnieQzh5UnoIsnSAHkAyc,252
140
- agenta/sdk/tracing/llm_tracing.py,sha256=OL9OZ9sKv58hAsSxf33E-VYFx2ZeJpr9jqe3Hvz1CX8,13706
140
+ agenta/sdk/tracing/llm_tracing.py,sha256=VZskBPxa4S-0810KhsTnSy7XdrG3ovcpsKjfxQEGUx8,17167
141
141
  agenta/sdk/tracing/logger.py,sha256=GfH7V-jBHcn7h5dbdrnkDMe_ml3wkXFBeoQiqR4KVRc,474
142
- agenta/sdk/tracing/tasks_manager.py,sha256=ROrWIaqS2J2HHiJtRWiHKlLY8CCsqToP5VeXu7mamck,3748
143
- agenta/sdk/tracing/tracing_context.py,sha256=EOi1zfqpb2cBjhBtHIphUkVHi4jiWes-RRNdbgk1kMc,906
144
- agenta/sdk/types.py,sha256=KMnQUOdjaHSWctDLIiMHnk0o3c-C47Vm4Mn2kIZ88YI,5740
142
+ agenta/sdk/tracing/tasks_manager.py,sha256=FBSFOWIKBycyA4ShB2ZVMzrzYQ8pWGWWBClFX8nlZFA,3726
143
+ agenta/sdk/tracing/tracing_context.py,sha256=dTbsBMbIAmSOaWIxSfhKHOB1JjrAZ0eSHAlfzZs08Z4,891
144
+ agenta/sdk/types.py,sha256=1rVy8ob-rTOrIFcSSseXrt0JQtqqhlJfVgVxCB2ErCk,5754
145
145
  agenta/sdk/utils/debug.py,sha256=QyuPsSoN0425UD13x_msPxSF_VT6YwHiQunZUibI-jg,2149
146
146
  agenta/sdk/utils/globals.py,sha256=JmhJcCOSbwvjQ6GDyUc2_SYR27DZk7YcrRH80ktHHOM,435
147
147
  agenta/sdk/utils/helper/openai_cost.py,sha256=1VkgvucDnNZm1pTfcVLz9icWunntp1d7zwMmnviy3Uw,5877
@@ -161,7 +161,7 @@ agenta/templates/simple_prompt/app.py,sha256=kODgF6lhzsaJPdgL5b21bUki6jkvqjWZzWR
161
161
  agenta/templates/simple_prompt/env.example,sha256=g9AE5bYcGPpxawXMJ96gh8oenEPCHTabsiOnfQo3c5k,70
162
162
  agenta/templates/simple_prompt/requirements.txt,sha256=ywRglRy7pPkw8bljmMEJJ4aOOQKrt9FGKULZ-DGkoBU,23
163
163
  agenta/templates/simple_prompt/template.toml,sha256=DQBtRrF4GU8LBEXOZ-GGuINXMQDKGTEG5y37tnvIUIE,60
164
- agenta-0.19.8a0.dist-info/METADATA,sha256=SJjoIarkgSM03m_L3eZ0_xLTZwYNRHr6JbO0bj4r4WA,26462
165
- agenta-0.19.8a0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
166
- agenta-0.19.8a0.dist-info/entry_points.txt,sha256=PDiu8_8AsL7ibU9v4iNoOKR1S7F2rdxjlEprjM9QOgo,46
167
- agenta-0.19.8a0.dist-info/RECORD,,
164
+ agenta-0.20.0a0.dist-info/METADATA,sha256=xMd6YzEzMtRW5i6uO62QRRePYN0r91gLpIgaF8QrzIE,26462
165
+ agenta-0.20.0a0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
166
+ agenta-0.20.0a0.dist-info/entry_points.txt,sha256=PDiu8_8AsL7ibU9v4iNoOKR1S7F2rdxjlEprjM9QOgo,46
167
+ agenta-0.20.0a0.dist-info/RECORD,,