agenta 0.19.9__py3-none-any.whl → 0.20.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agenta might be problematic. Click here for more details.

agenta/__init__.py CHANGED
@@ -17,7 +17,7 @@ from .sdk.types import (
17
17
  from .sdk.tracing.logger import llm_logger as logging
18
18
  from .sdk.tracing.llm_tracing import Tracing
19
19
  from .sdk.decorators.tracing import instrument
20
- from .sdk.decorators.llm_entrypoint import entrypoint, app
20
+ from .sdk.decorators.llm_entrypoint import entrypoint, app, route
21
21
  from .sdk.agenta_init import Config, AgentaSingleton, init
22
22
  from .sdk.utils.helper.openai_cost import calculate_token_usage
23
23
  from .sdk.client import Agenta
@@ -20,7 +20,10 @@ class CreateSpan(pydantic.BaseModel):
20
20
  variant_id: typing.Optional[str]
21
21
  variant_name: typing.Optional[str]
22
22
  inputs: typing.Optional[typing.Dict[str, typing.Any]]
23
- outputs: typing.Optional[typing.List[str]]
23
+ internals: typing.Optional[typing.Dict[str, typing.Any]]
24
+ outputs: typing.Optional[
25
+ typing.Union[typing.Dict[str, typing.Any], typing.List[str]]
26
+ ]
24
27
  config: typing.Optional[typing.Dict[str, typing.Any]]
25
28
  environment: typing.Optional[str]
26
29
  tags: typing.Optional[typing.List[str]]
@@ -12,9 +12,9 @@ except ImportError:
12
12
 
13
13
 
14
14
  class LlmTokens(pydantic.BaseModel):
15
- prompt_tokens: typing.Optional[int]
16
- completion_tokens: typing.Optional[int]
17
- total_tokens: typing.Optional[int]
15
+ prompt_tokens: typing.Optional[int] = 0
16
+ completion_tokens: typing.Optional[int] = 0
17
+ total_tokens: typing.Optional[int] = 0
18
18
 
19
19
  def json(self, **kwargs: typing.Any) -> str:
20
20
  kwargs_with_defaults: typing.Any = {
agenta/sdk/__init__.py CHANGED
@@ -16,7 +16,7 @@ from .types import (
16
16
 
17
17
  from .tracing.llm_tracing import Tracing
18
18
  from .decorators.tracing import instrument
19
- from .decorators.llm_entrypoint import entrypoint, app
19
+ from .decorators.llm_entrypoint import entrypoint, app, route
20
20
  from .agenta_init import Config, AgentaSingleton, init
21
21
  from .utils.helper.openai_cost import calculate_token_usage
22
22
 
@@ -3,6 +3,7 @@
3
3
  import os
4
4
  import sys
5
5
  import time
6
+ import json
6
7
  import inspect
7
8
  import argparse
8
9
  import asyncio
@@ -15,11 +16,11 @@ from typing import Any, Callable, Dict, Optional, Tuple, List
15
16
  from fastapi.middleware.cors import CORSMiddleware
16
17
  from fastapi import Body, FastAPI, UploadFile, HTTPException
17
18
 
18
- import agenta
19
+ import agenta as ag
19
20
  from agenta.sdk.context import save_context
20
21
  from agenta.sdk.router import router as router
21
22
  from agenta.sdk.tracing.logger import llm_logger as logging
22
- from agenta.sdk.tracing.llm_tracing import Tracing
23
+ from agenta.sdk.tracing.tracing_context import tracing_context, TracingContext
23
24
  from agenta.sdk.decorators.base import BaseDecorator
24
25
  from agenta.sdk.types import (
25
26
  Context,
@@ -32,10 +33,12 @@ from agenta.sdk.types import (
32
33
  TextParam,
33
34
  MessagesInput,
34
35
  FileInputURL,
35
- FuncResponse,
36
+ BaseResponse,
36
37
  BinaryParam,
37
38
  )
38
39
 
40
+ from pydantic import BaseModel, HttpUrl
41
+
39
42
  app = FastAPI()
40
43
 
41
44
  origins = [
@@ -52,16 +55,56 @@ app.add_middleware(
52
55
 
53
56
  app.include_router(router, prefix="")
54
57
 
55
-
56
58
  from agenta.sdk.utils.debug import debug, DEBUG, SHIFT
57
59
 
58
60
 
59
61
  logging.setLevel("DEBUG")
60
62
 
61
63
 
64
+ class PathValidator(BaseModel):
65
+ url: HttpUrl
66
+
67
+
68
+ class route(BaseDecorator):
69
+ # This decorator is used to expose specific stages of a workflow (embedding, retrieval, summarization, etc.)
70
+ # as independent endpoints. It is designed for backward compatibility with existing code that uses
71
+ # the @entrypoint decorator, which has certain limitations. By using @route(), we can create new
72
+ # routes without altering the main workflow entrypoint. This helps in modularizing the services
73
+ # and provides flexibility in how we expose different functionalities as APIs.
74
+ def __init__(self, path):
75
+ path = "/" + path.strip("/").strip()
76
+ path = "" if path == "/" else path
77
+
78
+ PathValidator(url=f"http://example.com{path}")
79
+
80
+ self.route_path = path
81
+
82
+ def __call__(self, f):
83
+ self.e = entrypoint(f, route_path=self.route_path)
84
+
85
+ return f
86
+
87
+
62
88
  class entrypoint(BaseDecorator):
63
- """Decorator class to wrap a function for HTTP POST, terminal exposure and enable tracing.
89
+ """
90
+ Decorator class to wrap a function for HTTP POST, terminal exposure and enable tracing.
91
+
92
+ This decorator generates the following endpoints:
93
+
94
+ Playground Endpoints
95
+ - /generate with @entrypoint, @route("/"), @route(path="") # LEGACY
96
+ - /playground/run with @entrypoint, @route("/"), @route(path="")
97
+ - /playground/run/{route} with @route({route}), @route(path={route})
98
+
99
+ Deployed Endpoints:
100
+ - /generate_deployed with @entrypoint, @route("/"), @route(path="") # LEGACY
101
+ - /run with @entrypoint, @route("/"), @route(path="")
102
+ - /run/{route} with @route({route}), @route(path={route})
64
103
 
104
+ The rationale is:
105
+ - There may be multiple endpoints, based on the different routes.
106
+ - It's better to make it explicit that an endpoint is for the playground.
107
+ - Prefixing the routes with /run is more futureproof in case we add more endpoints.
65
108
 
66
109
  Example:
67
110
  ```python
@@ -73,31 +116,64 @@ class entrypoint(BaseDecorator):
73
116
  ```
74
117
  """
75
118
 
76
- def __init__(self, func: Callable[..., Any]):
77
- endpoint_name = "generate"
119
+ routes = list()
120
+
121
+ def __init__(self, func: Callable[..., Any], route_path=""):
122
+ DEFAULT_PATH = "generate"
123
+ PLAYGROUND_PATH = "/playground"
124
+ RUN_PATH = "/run"
125
+
78
126
  func_signature = inspect.signature(func)
79
- config_params = agenta.config.all()
127
+ config_params = ag.config.all()
80
128
  ingestible_files = self.extract_ingestible_files(func_signature)
81
129
 
130
+ ### --- Playground --- #
82
131
  @debug()
83
132
  @functools.wraps(func)
84
133
  async def wrapper(*args, **kwargs) -> Any:
85
134
  func_params, api_config_params = self.split_kwargs(kwargs, config_params)
86
135
  self.ingest_files(func_params, ingestible_files)
87
- agenta.config.set(**api_config_params)
136
+ ag.config.set(**api_config_params)
88
137
 
89
138
  # Set the configuration and environment of the LLM app parent span at run-time
90
- agenta.tracing.update_baggage(
139
+ ag.tracing.update_baggage(
91
140
  {"config": config_params, "environment": "playground"}
92
141
  )
93
142
 
94
- # Exceptions are all handled inside self.execute_function()
95
- llm_result = await self.execute_function(
143
+ entrypoint_result = await self.execute_function(
96
144
  func, *args, params=func_params, config_params=config_params
97
145
  )
98
146
 
99
- return llm_result
147
+ return entrypoint_result
148
+
149
+ self.update_function_signature(
150
+ wrapper, func_signature, config_params, ingestible_files
151
+ )
152
+
153
+ #
154
+ if route_path == "":
155
+ route = f"/{DEFAULT_PATH}"
156
+ app.post(route, response_model=BaseResponse)(wrapper)
157
+ entrypoint.routes.append(
158
+ {
159
+ "func": func.__name__,
160
+ "endpoint": DEFAULT_PATH,
161
+ "params": {**config_params, **func_signature.parameters},
162
+ }
163
+ )
164
+
165
+ route = f"{PLAYGROUND_PATH}{RUN_PATH}{route_path}"
166
+ app.post(route, response_model=BaseResponse)(wrapper)
167
+ entrypoint.routes.append(
168
+ {
169
+ "func": func.__name__,
170
+ "endpoint": route[1:].replace("/", "_"),
171
+ "params": {**config_params, **func_signature.parameters},
172
+ }
173
+ )
174
+ ### ---------------------------- #
100
175
 
176
+ ### --- Deployed / Published --- #
101
177
  @debug()
102
178
  @functools.wraps(func)
103
179
  async def wrapper_deployed(*args, **kwargs) -> Any:
@@ -106,44 +182,51 @@ class entrypoint(BaseDecorator):
106
182
  }
107
183
 
108
184
  if "environment" in kwargs and kwargs["environment"] is not None:
109
- agenta.config.pull(environment_name=kwargs["environment"])
185
+ ag.config.pull(environment_name=kwargs["environment"])
110
186
  elif "config" in kwargs and kwargs["config"] is not None:
111
- agenta.config.pull(config_name=kwargs["config"])
187
+ ag.config.pull(config_name=kwargs["config"])
112
188
  else:
113
- agenta.config.pull(config_name="default")
189
+ ag.config.pull(config_name="default")
114
190
 
115
191
  # Set the configuration and environment of the LLM app parent span at run-time
116
- agenta.tracing.update_baggage(
192
+ ag.tracing.update_baggage(
117
193
  {"config": config_params, "environment": kwargs["environment"]}
118
194
  )
119
195
 
120
- llm_result = await self.execute_function(
196
+ entrypoint_result = await self.execute_function(
121
197
  func, *args, params=func_params, config_params=config_params
122
198
  )
123
199
 
124
- return llm_result
125
-
126
- self.update_function_signature(
127
- wrapper, func_signature, config_params, ingestible_files
128
- )
129
- route = f"/{endpoint_name}"
130
- app.post(route, response_model=FuncResponse)(wrapper)
200
+ return entrypoint_result
131
201
 
132
202
  self.update_deployed_function_signature(
133
203
  wrapper_deployed,
134
204
  func_signature,
135
205
  ingestible_files,
136
206
  )
137
- route_deployed = f"/{endpoint_name}_deployed"
138
- app.post(route_deployed, response_model=FuncResponse)(wrapper_deployed)
139
- self.override_schema(
140
- openapi_schema=app.openapi(),
141
- func_name=func.__name__,
142
- endpoint=endpoint_name,
143
- params={**config_params, **func_signature.parameters},
144
- )
145
207
 
146
- if self.is_main_script(func):
208
+ if route_path == "/":
209
+ route_deployed = f"/{DEFAULT_PATH}_deployed"
210
+ app.post(route_deployed, response_model=BaseResponse)(wrapper_deployed)
211
+
212
+ route_deployed = f"{RUN_PATH}{route_path}"
213
+ app.post(route_deployed, response_model=BaseResponse)(wrapper_deployed)
214
+ ### ---------------------------- #
215
+
216
+ ### --- Update OpenAPI --- #
217
+ app.openapi_schema = None # Forces FastAPI to re-generate the schema
218
+ openapi_schema = app.openapi()
219
+
220
+ for route in entrypoint.routes:
221
+ self.override_schema(
222
+ openapi_schema=openapi_schema,
223
+ func=route["func"],
224
+ endpoint=route["endpoint"],
225
+ params=route["params"],
226
+ )
227
+ ### ---------------------- #
228
+
229
+ if self.is_main_script(func) and route_path == "":
147
230
  self.handle_terminal_run(
148
231
  func,
149
232
  func_signature.parameters, # type: ignore
@@ -198,51 +281,68 @@ class entrypoint(BaseDecorator):
198
281
  For synchronous functions, it calls them directly, while for asynchronous functions,
199
282
  it awaits their execution.
200
283
  """
284
+ WAIT_FOR_SPANS = True
285
+ TIMEOUT = 10
286
+ TIMESTEP = 0.01
287
+ NOFSTEPS = TIMEOUT / TIMESTEP
288
+
289
+ data = None
290
+ trace = None
291
+
292
+ token = None
293
+ if tracing_context.get() is None:
294
+ token = tracing_context.set(TracingContext())
295
+
201
296
  is_coroutine_function = inspect.iscoroutinefunction(func)
202
- start_time = time.perf_counter()
203
297
 
204
298
  if is_coroutine_function:
205
299
  result = await func(*args, **func_params["params"])
206
300
  else:
207
301
  result = func(*args, **func_params["params"])
208
302
 
209
- end_time = time.perf_counter()
210
- latency = end_time - start_time
303
+ if token is not None:
304
+ if WAIT_FOR_SPANS:
305
+ remaining_steps = NOFSTEPS
306
+
307
+ while not ag.tracing.is_trace_ready() and remaining_steps > 0:
308
+ await asyncio.sleep(0.01)
309
+ remaining_steps -= 1
310
+
311
+ trace = ag.tracing.dump_trace()
312
+ ag.tracing.flush_spans()
313
+ tracing_context.reset(token)
211
314
 
212
315
  if isinstance(result, Context):
213
316
  save_context(result)
317
+
214
318
  if isinstance(result, Dict):
215
- return FuncResponse(**result, latency=round(latency, 4))
216
- if isinstance(result, str):
217
- return FuncResponse(
218
- message=result, usage=None, cost=None, latency=round(latency, 4)
219
- )
220
- if isinstance(result, int) or isinstance(result, float):
221
- return FuncResponse(
222
- message=str(result),
223
- usage=None,
224
- cost=None,
225
- latency=round(latency, 4),
226
- )
227
- if result is None:
228
- return FuncResponse(
229
- message="Function executed successfully, but did return None. \n Are you sure you did not forget to return a value?",
230
- usage=None,
231
- cost=None,
232
- latency=round(latency, 4),
319
+ data = result
320
+ elif isinstance(result, str):
321
+ data = {"message": result}
322
+ elif isinstance(result, int) or isinstance(result, float):
323
+ data = {"message": str(result)}
324
+
325
+ if data is None:
326
+ warning = (
327
+ "Function executed successfully, but did return None. \n Are you sure you did not forget to return a value?",
233
328
  )
329
+
330
+ data = {"message": warning}
331
+
332
+ return BaseResponse(data=data, trace=trace)
333
+
234
334
  except Exception as e:
235
335
  self.handle_exception(e)
236
- return FuncResponse(message="Unexpected error occurred when calling the @entrypoint decorated function", latency=0) # type: ignore
237
336
 
238
337
  def handle_exception(self, e: Exception):
239
- """Handle exceptions."""
338
+ status_code = e.status_code if hasattr(e, "status_code") else 500
339
+ message = str(e)
340
+ stacktrace = traceback.format_exception(e, value=e, tb=e.__traceback__) # type: ignore
341
+ detail = {"message": message, "stacktrace": stacktrace}
240
342
 
241
- status_code: int = e.status_code if hasattr(e, "status_code") else 500
242
- traceback_str = traceback.format_exception(e, value=e, tb=e.__traceback__) # type: ignore
243
343
  raise HTTPException(
244
344
  status_code=status_code,
245
- detail={"error": str(e), "traceback": "".join(traceback_str)},
345
+ detail=detail,
246
346
  )
247
347
 
248
348
  def update_wrapper_signature(
@@ -418,26 +518,29 @@ class entrypoint(BaseDecorator):
418
518
  file_path=args_func_params[name],
419
519
  )
420
520
 
421
- agenta.config.set(**args_config_params)
521
+ ag.config.set(**args_config_params)
422
522
 
423
523
  # Set the configuration and environment of the LLM app parent span at run-time
424
- agenta.tracing.update_baggage(
425
- {"config": agenta.config.all(), "environment": "bash"}
426
- )
524
+ ag.tracing.update_baggage({"config": ag.config.all(), "environment": "bash"})
427
525
 
428
526
  loop = asyncio.get_event_loop()
527
+
429
528
  result = loop.run_until_complete(
430
529
  self.execute_function(
431
530
  func,
432
531
  **{"params": args_func_params, "config_params": args_config_params},
433
532
  )
434
533
  )
435
- print(
436
- f"\n========== Result ==========\n\nMessage: {result.message}\nCost: {result.cost}\nToken Usage: {result.usage}"
437
- )
534
+
535
+ print(f"\n========== Result ==========\n")
536
+
537
+ print("-> data")
538
+ print(json.dumps(result.data, indent=2))
539
+ print("-> trace")
540
+ print(json.dumps(result.trace, indent=2))
438
541
 
439
542
  def override_schema(
440
- self, openapi_schema: dict, func_name: str, endpoint: str, params: dict
543
+ self, openapi_schema: dict, func: str, endpoint: str, params: dict
441
544
  ):
442
545
  """
443
546
  Overrides the default openai schema generated by fastapi with additional information about:
@@ -452,7 +555,7 @@ class entrypoint(BaseDecorator):
452
555
 
453
556
  Args:
454
557
  openapi_schema (dict): The openapi schema generated by fastapi
455
- func_name (str): The name of the function to override
558
+ func (str): The name of the function to override
456
559
  endpoint (str): The name of the endpoint to override
457
560
  params (dict(param_name, param_val)): The dictionary of the parameters for the function
458
561
  """
@@ -484,8 +587,29 @@ class entrypoint(BaseDecorator):
484
587
  # value = {'temperature': { "type": "number", "title": "Temperature", "x-parameter": "float" }}
485
588
  return value
486
589
 
590
+ def get_type_from_param(param_val):
591
+ param_type = "string"
592
+ annotation = param_val.annotation
593
+
594
+ if annotation == int:
595
+ param_type = "integer"
596
+ elif annotation == float:
597
+ param_type = "number"
598
+ elif annotation == dict:
599
+ param_type = "object"
600
+ elif annotation == bool:
601
+ param_type = "boolean"
602
+ elif annotation == list:
603
+ param_type = "list"
604
+ elif annotation == str:
605
+ param_type = "string"
606
+ else:
607
+ print("ERROR, unhandled annotation:", annotation)
608
+
609
+ return param_type
610
+
487
611
  schema_to_override = openapi_schema["components"]["schemas"][
488
- f"Body_{func_name}_{endpoint}_post"
612
+ f"Body_{func}_{endpoint}_post"
489
613
  ]["properties"]
490
614
  for param_name, param_val in params.items():
491
615
  if isinstance(param_val, GroupedMultipleChoiceParam):
@@ -501,7 +625,7 @@ class entrypoint(BaseDecorator):
501
625
  subschema["choices"] = param_val.choices # type: ignore
502
626
  subschema["default"] = param_val.default # type: ignore
503
627
 
504
- if isinstance(param_val, MultipleChoiceParam):
628
+ elif isinstance(param_val, MultipleChoiceParam):
505
629
  subschema = find_in_schema(
506
630
  param_val.__schema_type_properties__(),
507
631
  schema_to_override,
@@ -520,7 +644,7 @@ class entrypoint(BaseDecorator):
520
644
  default if default in param_choices else choices[0]
521
645
  )
522
646
 
523
- if isinstance(param_val, FloatParam):
647
+ elif isinstance(param_val, FloatParam):
524
648
  subschema = find_in_schema(
525
649
  param_val.__schema_type_properties__(),
526
650
  schema_to_override,
@@ -531,7 +655,7 @@ class entrypoint(BaseDecorator):
531
655
  subschema["maximum"] = param_val.maxval # type: ignore
532
656
  subschema["default"] = param_val
533
657
 
534
- if isinstance(param_val, IntParam):
658
+ elif isinstance(param_val, IntParam):
535
659
  subschema = find_in_schema(
536
660
  param_val.__schema_type_properties__(),
537
661
  schema_to_override,
@@ -542,7 +666,7 @@ class entrypoint(BaseDecorator):
542
666
  subschema["maximum"] = param_val.maxval # type: ignore
543
667
  subschema["default"] = param_val
544
668
 
545
- if (
669
+ elif (
546
670
  isinstance(param_val, inspect.Parameter)
547
671
  and param_val.annotation is DictInput
548
672
  ):
@@ -554,7 +678,7 @@ class entrypoint(BaseDecorator):
554
678
  )
555
679
  subschema["default"] = param_val.default["default_keys"]
556
680
 
557
- if isinstance(param_val, TextParam):
681
+ elif isinstance(param_val, TextParam):
558
682
  subschema = find_in_schema(
559
683
  param_val.__schema_type_properties__(),
560
684
  schema_to_override,
@@ -563,7 +687,7 @@ class entrypoint(BaseDecorator):
563
687
  )
564
688
  subschema["default"] = param_val
565
689
 
566
- if (
690
+ elif (
567
691
  isinstance(param_val, inspect.Parameter)
568
692
  and param_val.annotation is MessagesInput
569
693
  ):
@@ -575,7 +699,7 @@ class entrypoint(BaseDecorator):
575
699
  )
576
700
  subschema["default"] = param_val.default
577
701
 
578
- if (
702
+ elif (
579
703
  isinstance(param_val, inspect.Parameter)
580
704
  and param_val.annotation is FileInputURL
581
705
  ):
@@ -587,7 +711,7 @@ class entrypoint(BaseDecorator):
587
711
  )
588
712
  subschema["default"] = "https://example.com"
589
713
 
590
- if isinstance(param_val, BinaryParam):
714
+ elif isinstance(param_val, BinaryParam):
591
715
  subschema = find_in_schema(
592
716
  param_val.__schema_type_properties__(),
593
717
  schema_to_override,
@@ -595,3 +719,11 @@ class entrypoint(BaseDecorator):
595
719
  "bool",
596
720
  )
597
721
  subschema["default"] = param_val.default # type: ignore
722
+ else:
723
+ subschema = {
724
+ "title": str(param_name).capitalize(),
725
+ "type": get_type_from_param(param_val),
726
+ }
727
+ if param_val.default != inspect._empty:
728
+ subschema["default"] = param_val.default # type: ignore
729
+ schema_to_override[param_name] = subschema