agenta 0.32.0__py3-none-any.whl → 0.32.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agenta might be problematic. Click here for more details.

agenta/sdk/types.py CHANGED
@@ -6,6 +6,9 @@ from pydantic import ConfigDict, BaseModel, HttpUrl
6
6
 
7
7
  from agenta.client.backend.types.agenta_node_dto import AgentaNodeDto
8
8
  from agenta.client.backend.types.agenta_nodes_response import AgentaNodesResponse
9
+ from typing import Annotated, List, Union, Optional, Dict, Literal, Any
10
+ from pydantic import BaseModel, Field, model_validator
11
+ from agenta.sdk.assets import supported_llm_models
9
12
 
10
13
 
11
14
  @dataclass
@@ -13,10 +16,17 @@ class MultipleChoice:
13
16
  choices: Union[List[str], Dict[str, List[str]]]
14
17
 
15
18
 
16
- class InFile:
17
- def __init__(self, file_name: str, file_path: str):
18
- self.file_name = file_name
19
- self.file_path = file_path
19
+ def MCField( # pylint: disable=invalid-name
20
+ default: str,
21
+ choices: Union[List[str], Dict[str, List[str]]],
22
+ ) -> Field:
23
+ field = Field(default=default, description="ID of the model to use")
24
+ if isinstance(choices, dict):
25
+ field.json_schema_extra = {"choices": choices, "x-parameter": "grouped_choice"}
26
+ elif isinstance(choices, list):
27
+ field.json_schema_extra = {"choices": choices, "x-parameter": "choice"}
28
+
29
+ return field
20
30
 
21
31
 
22
32
  class LLMTokenUsage(BaseModel):
@@ -28,7 +38,11 @@ class LLMTokenUsage(BaseModel):
28
38
  class BaseResponse(BaseModel):
29
39
  version: Optional[str] = "3.0"
30
40
  data: Optional[Union[str, Dict[str, Any]]] = None
41
+ content_type: Optional[str] = "string"
31
42
  tree: Optional[AgentaNodesResponse] = None
43
+ tree_id: Optional[str] = None
44
+
45
+ model_config = ConfigDict(use_enum_values=True, exclude_none=True)
32
46
 
33
47
 
34
48
  class DictInput(dict):
@@ -248,3 +262,319 @@ class Prompt(BaseModel):
248
262
  top_p: float
249
263
  frequency_penalty: float
250
264
  presence_penalty: float
265
+
266
+
267
+ # -----------------------------------------------------
268
+ # New Prompt model
269
+ # -----------------------------------------------------
270
+
271
+
272
+ class ToolCall(BaseModel):
273
+ id: str
274
+ type: Literal["function"] = "function"
275
+ function: Dict[str, str]
276
+
277
+
278
+ class Message(BaseModel):
279
+ role: Literal["system", "user", "assistant", "tool", "function"]
280
+ content: Optional[str] = None
281
+ name: Optional[str] = None
282
+ tool_calls: Optional[List[ToolCall]] = None
283
+ tool_call_id: Optional[str] = None
284
+
285
+
286
+ class ResponseFormatText(BaseModel):
287
+ type: Literal["text"]
288
+ """The type of response format being defined: `text`"""
289
+
290
+
291
+ class ResponseFormatJSONObject(BaseModel):
292
+ type: Literal["json_object"]
293
+ """The type of response format being defined: `json_object`"""
294
+
295
+
296
+ class JSONSchema(BaseModel):
297
+ name: str
298
+ """The name of the response format."""
299
+ description: Optional[str] = None
300
+ """A description of what the response format is for."""
301
+ schema_: Optional[Dict[str, object]] = Field(alias="schema", default=None)
302
+ """The schema for the response format, described as a JSON Schema object."""
303
+ strict: Optional[bool] = None
304
+ """Whether to enable strict schema adherence."""
305
+
306
+ model_config = {
307
+ "populate_by_name": True,
308
+ "json_schema_extra": {"required": ["name", "schema"]},
309
+ }
310
+
311
+
312
+ class ResponseFormatJSONSchema(BaseModel):
313
+ type: Literal["json_schema"]
314
+ """The type of response format being defined: `json_schema`"""
315
+ json_schema: JSONSchema
316
+
317
+
318
+ ResponseFormat = Union[
319
+ ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema
320
+ ]
321
+
322
+
323
+ class ModelConfig(BaseModel):
324
+ """Configuration for model parameters"""
325
+
326
+ model: str = MCField(
327
+ default="gpt-3.5-turbo",
328
+ choices=supported_llm_models,
329
+ )
330
+
331
+ temperature: Optional[float] = Field(
332
+ default=None,
333
+ ge=0.0,
334
+ le=2.0,
335
+ description="What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic",
336
+ )
337
+ max_tokens: Optional[int] = Field(
338
+ default=None,
339
+ ge=0,
340
+ le=4000,
341
+ description="The maximum number of tokens that can be generated in the chat completion",
342
+ )
343
+ top_p: Optional[float] = Field(
344
+ default=None,
345
+ ge=0.0,
346
+ le=1.0,
347
+ description="An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass",
348
+ )
349
+ frequency_penalty: Optional[float] = Field(
350
+ default=None,
351
+ ge=-2.0,
352
+ le=2.0,
353
+ description="Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far",
354
+ )
355
+ presence_penalty: Optional[float] = Field(
356
+ default=None,
357
+ ge=-2.0,
358
+ le=2.0,
359
+ description="Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far",
360
+ )
361
+ response_format: Optional[ResponseFormat] = Field(
362
+ default=None,
363
+ description="An object specifying the format that the model must output",
364
+ )
365
+ stream: Optional[bool] = Field(
366
+ default=None, description="If set, partial message deltas will be sent"
367
+ )
368
+ tools: Optional[List[Dict]] = Field(
369
+ default=None,
370
+ description="A list of tools the model may call. Currently, only functions are supported as a tool",
371
+ )
372
+ tool_choice: Optional[Union[Literal["none", "auto"], Dict]] = Field(
373
+ default=None, description="Controls which (if any) tool is called by the model"
374
+ )
375
+
376
+
377
+ class PromptTemplateError(Exception):
378
+ """Base exception for all PromptTemplate errors"""
379
+
380
+ pass
381
+
382
+
383
+ class InputValidationError(PromptTemplateError):
384
+ """Raised when input validation fails"""
385
+
386
+ def __init__(
387
+ self, message: str, missing: Optional[set] = None, extra: Optional[set] = None
388
+ ):
389
+ self.missing = missing
390
+ self.extra = extra
391
+ super().__init__(message)
392
+
393
+
394
+ class TemplateFormatError(PromptTemplateError):
395
+ """Raised when template formatting fails"""
396
+
397
+ def __init__(self, message: str, original_error: Optional[Exception] = None):
398
+ self.original_error = original_error
399
+ super().__init__(message)
400
+
401
+
402
+ class PromptTemplate(BaseModel):
403
+ """A template for generating prompts with formatting capabilities"""
404
+
405
+ messages: List[Message] = Field(
406
+ default=[Message(role="system", content=""), Message(role="user", content="")]
407
+ )
408
+ system_prompt: Optional[str] = None
409
+ user_prompt: Optional[str] = None
410
+ template_format: Literal["fstring", "jinja2", "curly"] = Field(
411
+ default="curly",
412
+ description="Format type for template variables: fstring {var}, jinja2 {{ var }}, or curly {{var}}",
413
+ )
414
+ input_keys: Optional[List[str]] = Field(
415
+ default=None,
416
+ description="Optional list of input keys for validation. If not provided, any inputs will be accepted",
417
+ )
418
+ llm_config: ModelConfig = Field(
419
+ default_factory=ModelConfig,
420
+ description="Configuration for the model parameters",
421
+ )
422
+
423
+ model_config = {
424
+ "json_schema_extra": {
425
+ "x-parameters": {
426
+ "prompt": "true",
427
+ }
428
+ }
429
+ }
430
+
431
+ @model_validator(mode="before")
432
+ def init_messages(cls, values):
433
+ if "messages" not in values:
434
+ messages = []
435
+ if "system_prompt" in values and values["system_prompt"]:
436
+ messages.append(Message(role="system", content=values["system_prompt"]))
437
+ if "user_prompt" in values and values["user_prompt"]:
438
+ messages.append(Message(role="user", content=values["user_prompt"]))
439
+ if messages:
440
+ values["messages"] = messages
441
+ return values
442
+
443
+ def _format_with_template(self, content: str, kwargs: Dict[str, Any]) -> str:
444
+ """Internal method to format content based on template_format"""
445
+ try:
446
+ if self.template_format == "fstring":
447
+ return content.format(**kwargs)
448
+ elif self.template_format == "jinja2":
449
+ from jinja2 import Template, TemplateError
450
+
451
+ try:
452
+ return Template(content).render(**kwargs)
453
+ except TemplateError as e:
454
+ raise TemplateFormatError(
455
+ f"Jinja2 template error in content: '{content}'. Error: {str(e)}",
456
+ original_error=e,
457
+ )
458
+ elif self.template_format == "curly":
459
+ import re
460
+
461
+ result = content
462
+ for key, value in kwargs.items():
463
+ result = re.sub(r"\{\{" + key + r"\}\}", str(value), result)
464
+ if re.search(r"\{\{.*?\}\}", result):
465
+ unreplaced = re.findall(r"\{\{(.*?)\}\}", result)
466
+ raise TemplateFormatError(
467
+ f"Unreplaced variables in curly template: {unreplaced}"
468
+ )
469
+ return result
470
+ else:
471
+ raise TemplateFormatError(
472
+ f"Unknown template format: {self.template_format}"
473
+ )
474
+ except KeyError as e:
475
+ key = str(e).strip("'")
476
+ raise TemplateFormatError(
477
+ f"Missing required variable '{key}' in template: '{content}'"
478
+ )
479
+ except Exception as e:
480
+ raise TemplateFormatError(
481
+ f"Error formatting template '{content}': {str(e)}", original_error=e
482
+ )
483
+
484
+ def format(self, **kwargs) -> "PromptTemplate":
485
+ """
486
+ Format the template with provided inputs.
487
+ Only validates against input_keys if they are specified.
488
+
489
+ Raises:
490
+ InputValidationError: If input validation fails
491
+ TemplateFormatError: If template formatting fails
492
+ """
493
+ # Validate inputs if input_keys is set
494
+ if self.input_keys is not None:
495
+ missing = set(self.input_keys) - set(kwargs.keys())
496
+ extra = set(kwargs.keys()) - set(self.input_keys)
497
+
498
+ error_parts = []
499
+ if missing:
500
+ error_parts.append(
501
+ f"Missing required inputs: {', '.join(sorted(missing))}"
502
+ )
503
+ if extra:
504
+ error_parts.append(f"Unexpected inputs: {', '.join(sorted(extra))}")
505
+
506
+ if error_parts:
507
+ raise InputValidationError(
508
+ " | ".join(error_parts),
509
+ missing=missing if missing else None,
510
+ extra=extra if extra else None,
511
+ )
512
+
513
+ new_messages = []
514
+ for i, msg in enumerate(self.messages):
515
+ if msg.content:
516
+ try:
517
+ new_content = self._format_with_template(msg.content, kwargs)
518
+ except TemplateFormatError as e:
519
+ raise TemplateFormatError(
520
+ f"Error in message {i} ({msg.role}): {str(e)}",
521
+ original_error=e.original_error,
522
+ )
523
+ else:
524
+ new_content = None
525
+
526
+ new_messages.append(
527
+ Message(
528
+ role=msg.role,
529
+ content=new_content,
530
+ name=msg.name,
531
+ tool_calls=msg.tool_calls,
532
+ tool_call_id=msg.tool_call_id,
533
+ )
534
+ )
535
+
536
+ return PromptTemplate(
537
+ messages=new_messages,
538
+ template_format=self.template_format,
539
+ llm_config=self.llm_config,
540
+ input_keys=self.input_keys,
541
+ )
542
+
543
+ def to_openai_kwargs(self) -> dict:
544
+ """Convert the prompt template to kwargs compatible with litellm/openai"""
545
+ kwargs = {
546
+ "model": self.llm_config.model,
547
+ "messages": [msg.dict(exclude_none=True) for msg in self.messages],
548
+ }
549
+
550
+ # Add optional parameters only if they are set
551
+ if self.llm_config.temperature is not None:
552
+ kwargs["temperature"] = self.llm_config.temperature
553
+
554
+ if self.llm_config.top_p is not None:
555
+ kwargs["top_p"] = self.llm_config.top_p
556
+
557
+ if self.llm_config.stream is not None:
558
+ kwargs["stream"] = self.llm_config.stream
559
+
560
+ if self.llm_config.max_tokens is not None:
561
+ kwargs["max_tokens"] = self.llm_config.max_tokens
562
+
563
+ if self.llm_config.frequency_penalty is not None:
564
+ kwargs["frequency_penalty"] = self.llm_config.frequency_penalty
565
+
566
+ if self.llm_config.presence_penalty is not None:
567
+ kwargs["presence_penalty"] = self.llm_config.presence_penalty
568
+
569
+ if self.llm_config.response_format:
570
+ kwargs["response_format"] = self.llm_config.response_format.dict(
571
+ by_alias=True
572
+ )
573
+
574
+ if self.llm_config.tools:
575
+ kwargs["tools"] = self.llm_config.tools
576
+ # Only set tool_choice if tools are present
577
+ if self.llm_config.tool_choice is not None:
578
+ kwargs["tool_choice"] = self.llm_config.tool_choice
579
+
580
+ return kwargs
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: agenta
3
- Version: 0.32.0
3
+ Version: 0.32.0a2
4
4
  Summary: The SDK for agenta is an open-source LLMOps platform.
5
5
  Keywords: LLMOps,LLM,evaluation,prompt engineering
6
6
  Author: Mahmoud Mabrouk
@@ -1,4 +1,4 @@
1
- agenta/__init__.py,sha256=BytAaijm7h51NG-H7RmllolVQ3QzLCM9WTlqMdQgYow,2150
1
+ agenta/__init__.py,sha256=A2slx8eFxACWFIcRhm-4WZS1ineUf9cMDgxdd3QhhhQ,2216
2
2
  agenta/cli/evaluation_commands.py,sha256=fs6492tprPId9p8eGO02Xy-NCBm2RZNJLZWcUxugwd8,474
3
3
  agenta/cli/helper.py,sha256=P97HbNb_qzOyl5CM_MjAqWEBCdgebU6M81G_4UCmF1A,6288
4
4
  agenta/cli/main.py,sha256=WJSp-kJ6j0bea64l5QJlnOPpLwTgNcN7Am4X2YZBP1A,7939
@@ -16,7 +16,7 @@ agenta/client/backend/apps/__init__.py,sha256=9mUnTDeA1TxYvkj1l01A1prqsJV0ERRY2t
16
16
  agenta/client/backend/apps/client.py,sha256=6ZGBcR37ILwS2VNp8BZR4Tz09W2MlaSChPQ7it0tWoE,54946
17
17
  agenta/client/backend/bases/__init__.py,sha256=9mUnTDeA1TxYvkj1l01A1prqsJV0ERRY2tzkY1fA4MQ,64
18
18
  agenta/client/backend/bases/client.py,sha256=s8EAwrdxNkOjhDSP2-HIbqp51vNlvhXxS4nGb8YoWPk,6068
19
- agenta/client/backend/client.py,sha256=jWUJXLKjpi-0J6MPwrLjiXx2gdnRps2JnZ7-m23i3uo,105043
19
+ agenta/client/backend/client.py,sha256=psZK0A7dZJLyQfXXSDEjxPeLXn3svvTGmx04Px_23sE,105267
20
20
  agenta/client/backend/configs/__init__.py,sha256=9mUnTDeA1TxYvkj1l01A1prqsJV0ERRY2tzkY1fA4MQ,64
21
21
  agenta/client/backend/configs/client.py,sha256=_nuh5K5D5SuCOeckXcGv6P4ZtVdTGWQ7JhcoIclmWfY,19267
22
22
  agenta/client/backend/containers/__init__.py,sha256=Haw2PwiPhNvM26PLQN57jY0bN-QqPoDG4VA-P_uGL3A,153
@@ -28,7 +28,7 @@ agenta/client/backend/core/api_error.py,sha256=TtMtCdxXjd7Tasc9c8ooFg124nPrb2MXG
28
28
  agenta/client/backend/core/client_wrapper.py,sha256=esOteB9jDmUGJkv6EotT54nPNU42TR_kjF3pPjU6lqY,1817
29
29
  agenta/client/backend/core/datetime_utils.py,sha256=BHjt_H3WVslcuPsr6qjJoVif_SsdLvFN0c43ABE5UiQ,1069
30
30
  agenta/client/backend/core/file.py,sha256=mUvNH6Wc-k2mSlXCJ9VoYzvgi7NcCkhTzlW2VXxSucg,2710
31
- agenta/client/backend/core/http_client.py,sha256=Wz6swfW0qdrpVNGEmf8HGdWdDTRq-0bAjsdV3xvbFxE,21004
31
+ agenta/client/backend/core/http_client.py,sha256=e7jy8gDqwvdWiNGoJdY-jX73cOYFiF_p_5at82gAC6Y,21006
32
32
  agenta/client/backend/core/jsonable_encoder.py,sha256=SHXw4G4n-f0IPgNkxj_-Fip3kN8NUAI-YrKxdZw8kl0,3662
33
33
  agenta/client/backend/core/pydantic_utilities.py,sha256=BZTSULs3wlfRQwTtsEyKlyY6SkHCtf3WxHqfhH9YSG4,12325
34
34
  agenta/client/backend/core/query_encoder.py,sha256=8qYl5VPl1jU4cDF0X7oSU_DXjlVWY5ayigFBpNTMGOA,2150
@@ -190,42 +190,47 @@ agenta/docker/docker-assets/entrypoint.sh,sha256=29XK8VQjQsx4hN2j-4JDy-6kQb5y4LC
190
190
  agenta/docker/docker-assets/lambda_function.py,sha256=h4UZSSfqwpfsCgERv6frqwm_4JrYu9rLz3I-LxCfeEg,83
191
191
  agenta/docker/docker-assets/main.py,sha256=7MI-21n81U7N7A0GxebNi0cmGWtJKcR2sPB6FcH2QfA,251
192
192
  agenta/docker/docker_utils.py,sha256=kO1q2_IR0fEAo4M-2Pt_v-zC7GxxnkLogjKFhU869Ps,3555
193
- agenta/sdk/__init__.py,sha256=ux4l87ko0uGxBv0NqSd237s6icFZ7iAYXkj6JqIY0e0,1993
193
+ agenta/sdk/__init__.py,sha256=Kx86cuRBKOvl8VecWtDV75elDLz5teQ5qkh_CQIPz6g,2022
194
194
  agenta/sdk/agenta_init.py,sha256=OZf-pjFyr71hfgcZC7S323t9PJLpNb-qYXOvVRmRlpA,7961
195
195
  agenta/sdk/assets.py,sha256=co49Y8T8WWINk2p7ddQPzB39AuwtVfrsyE_AnR1n5TQ,3062
196
196
  agenta/sdk/client.py,sha256=trKyBOYFZRk0v5Eptxvh87yPf50Y9CqY6Qgv4Fy-VH4,2142
197
197
  agenta/sdk/context/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
198
198
  agenta/sdk/context/exporting.py,sha256=16X8fgMhl58gehSlqANX97FiKxx4TkGiG4d2B0-7ZX0,516
199
- agenta/sdk/context/routing.py,sha256=6QclrCDXR8Kn7uQf7Q-IzQ6lRAyXrPwFDL5RJxaOWpU,567
199
+ agenta/sdk/context/routing.py,sha256=FEsjw8EttI1SMyUo96ptcUsvHJnhoKwdr1szlkxxJNU,598
200
200
  agenta/sdk/context/tracing.py,sha256=zp7T_wLVkR-V1c0k7UAN69rwH9VV7MhoZD_IdNu-_RE,649
201
201
  agenta/sdk/decorators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
202
- agenta/sdk/decorators/routing.py,sha256=g2F1BCnEufEXW_QRlua9oLY6x4O7_fwmUDJrqKgRTts,33104
203
- agenta/sdk/decorators/tracing.py,sha256=n5tsbZ2z1G7aIPQEJjvsuVGxbNSstdf4fXUx9bOASEY,8705
202
+ agenta/sdk/decorators/routing.py,sha256=6auINyIdIS7mO7KTDDlaWDjl9iZJ4S2vobPdQVf4nvw,20874
203
+ agenta/sdk/decorators/tracing.py,sha256=Wf7KWR1NknXlQEdB2sjMFrcSI-tSSvO9XubN-Ro5tkU,9216
204
204
  agenta/sdk/litellm/__init__.py,sha256=Bpz1gfHQc0MN1yolWcjifLWznv6GjHggvRGQSpxpihM,37
205
- agenta/sdk/litellm/litellm.py,sha256=Ke0It-jA1z0KQ2770gIlWIEgramZGmt1k0GjmpEnFV4,8793
205
+ agenta/sdk/litellm/litellm.py,sha256=Xx_YJu05BYkmIe6uObjS6DwzjgwfNytGWf807Zh0vcU,10153
206
+ agenta/sdk/litellm/mockllm.py,sha256=8V6dqdv8eA4P-VoXIwHNYlIjHG189P14POSfSfluVw0,678
207
+ agenta/sdk/litellm/mocks/__init__.py,sha256=c-afSm0YkiHyyaLegvtFs6WuWZTl2Q7Fq_iUoxqBbQc,616
206
208
  agenta/sdk/managers/__init__.py,sha256=SN-LRwG0pRRDV3u2Q4JiiSTigN3-mYpzGNM35RzT4mc,238
207
209
  agenta/sdk/managers/config.py,sha256=8-TJn56SssNjfxCY7NhDwqL4in5gtPeMrsvyEf-W_u4,7421
208
210
  agenta/sdk/managers/deployment.py,sha256=SEokjZeh6n7HRKZ92Y0WncdG49hIFx-Z3B3HAl2kmUg,1174
209
211
  agenta/sdk/managers/secrets.py,sha256=HAXswBOTbF_LydoNGnQgHr-KVqdFUOvLujUg6S_5taM,878
210
212
  agenta/sdk/managers/shared.py,sha256=e53jckQq5PIMpjdxADOonUj7o8aGfzmSvdeH5f43rGs,21497
211
213
  agenta/sdk/managers/variant.py,sha256=A5ga3mq3b0weUTXa9HO72MGaspthGcu1uK9K5OnP738,4172
214
+ agenta/sdk/managers/vault.py,sha256=054ce9X_xKa2M4NtQWz-GugO6q_pYVWCP3IxbAJJcRw,337
212
215
  agenta/sdk/middleware/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
213
- agenta/sdk/middleware/auth.py,sha256=SoVnzdm5ygP03bQgXDd86aTvIdeTfr4uIoeJsPxN7No,5491
216
+ agenta/sdk/middleware/auth.py,sha256=2jV32_JdjaOZFI9VGy3OGVg6tOvgEN64RUpM71AqVZ0,5572
214
217
  agenta/sdk/middleware/cache.py,sha256=-_e3_6f4cPfpuUpGRK_ZQvUx_54ez68db4SnBwfuESk,1078
215
- agenta/sdk/middleware/config.py,sha256=tOMEPwS4PHaxOtH4OII0ymqx4OlP0gvMSrMepoHbtxk,7229
218
+ agenta/sdk/middleware/config.py,sha256=_oBj9XNzxiMyoLo1DLh7Tck5s05H6_udaNCGYe5orZA,7577
216
219
  agenta/sdk/middleware/cors.py,sha256=q3r7lGkrIdMcT_vuhsburMcjG7pyl7w0ycxrIrGJ2e8,921
220
+ agenta/sdk/middleware/inline.py,sha256=1bNBEoen9NRF2O3ZAP7z-izaEFUI1hBT22xLPIvof3A,937
221
+ agenta/sdk/middleware/mock.py,sha256=G028gYDMzaHtjULl2yU-zqXrEueMNC9JYmnIqRmjtYM,794
217
222
  agenta/sdk/middleware/otel.py,sha256=HB-JYAHejZHHlPuHtp12jIpJ6E7k4CdnMdOhyhO77tU,1030
218
- agenta/sdk/middleware/vault.py,sha256=H2pquBwRLC8KwAQBPAm7bKWlc407I-vIu-Ox4ncFY1M,5114
223
+ agenta/sdk/middleware/vault.py,sha256=Hyz9Zuq9fm_lvm-z5Ai8-PZectXMuIyaytAlpVOpqDU,4312
219
224
  agenta/sdk/router.py,sha256=mOguvtOwl2wmyAgOuWTsf98pQwpNiUILKIo67W_hR3A,119
220
225
  agenta/sdk/tracing/__init__.py,sha256=rQNe5-zT5Kt7_CDhq-lnUIi1EYTBVzVf_MbfcIxVD98,41
221
226
  agenta/sdk/tracing/attributes.py,sha256=zh8JQZSeYCLBeIRSopKJx6QQ-WEgw08Cr64DS_WOcT8,3833
222
227
  agenta/sdk/tracing/conventions.py,sha256=JBtznBXZ3aRkGKkLl7cPwdMNh3w1G-H2Ta2YrAxbr38,950
223
- agenta/sdk/tracing/exporters.py,sha256=5njI08ezWp2mUXL5mPZFU8qwM9E3RYyQlGvY5iHvEKo,2688
224
- agenta/sdk/tracing/inline.py,sha256=GI2jo4R9hrKGpiUnQ6nfWNR5DR_kxl2PdEnW23Jc7UU,31354
228
+ agenta/sdk/tracing/exporters.py,sha256=PuSbamuLa4e9eGBM6tp7Smh5R41Jz07VBoBtMnkbtz8,2687
229
+ agenta/sdk/tracing/inline.py,sha256=BQecJkZDlR85aodfvye-LuDhouee3RjLuUl7GMpfsSw,31282
225
230
  agenta/sdk/tracing/processors.py,sha256=d7MvJ_DRAP0RAlp8V8XrrY-m8pJ03nLxKoq15RnjedA,3334
226
231
  agenta/sdk/tracing/spans.py,sha256=nqUOjjirBxB8Eacv8Qj4Ra_6rknGi3lbJdNyKmk5ODQ,3707
227
232
  agenta/sdk/tracing/tracing.py,sha256=iZCIggUkR5f2zVcq0o7T8-to7pcFO349gUEBYXnBMzg,6888
228
- agenta/sdk/types.py,sha256=_lGsGSEaZJrUT4cVcT3zSpgEqex2jFaPtfpFeUEetbc,7247
233
+ agenta/sdk/types.py,sha256=HvFok4lEFxWow6WT71OrJ23WhTsj_FqqMyzlKByAqYk,19120
229
234
  agenta/sdk/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
230
235
  agenta/sdk/utils/constants.py,sha256=zW3R4rjXOo2L5lz6q84l_zYuOM9u4mpPRHw_B1Dr_hI,67
231
236
  agenta/sdk/utils/costs.py,sha256=i8C7ud__pThLS55XkN4YW8czXtGeXr2mx7jjcOFeiXg,5955
@@ -251,7 +256,7 @@ agenta/templates/simple_prompt/app.py,sha256=kODgF6lhzsaJPdgL5b21bUki6jkvqjWZzWR
251
256
  agenta/templates/simple_prompt/env.example,sha256=g9AE5bYcGPpxawXMJ96gh8oenEPCHTabsiOnfQo3c5k,70
252
257
  agenta/templates/simple_prompt/requirements.txt,sha256=ywRglRy7pPkw8bljmMEJJ4aOOQKrt9FGKULZ-DGkoBU,23
253
258
  agenta/templates/simple_prompt/template.toml,sha256=DQBtRrF4GU8LBEXOZ-GGuINXMQDKGTEG5y37tnvIUIE,60
254
- agenta-0.32.0.dist-info/METADATA,sha256=4iT189ml4TdEIGZIDJQlVhxZluAI2GZDSjAX8HYvGaQ,29623
255
- agenta-0.32.0.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
256
- agenta-0.32.0.dist-info/entry_points.txt,sha256=PDiu8_8AsL7ibU9v4iNoOKR1S7F2rdxjlEprjM9QOgo,46
257
- agenta-0.32.0.dist-info/RECORD,,
259
+ agenta-0.32.0a2.dist-info/METADATA,sha256=iDQKOgsr6EsCMTJDm9G6PxNZJK11iqyKVY_aXiF5jD4,29625
260
+ agenta-0.32.0a2.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
261
+ agenta-0.32.0a2.dist-info/entry_points.txt,sha256=PDiu8_8AsL7ibU9v4iNoOKR1S7F2rdxjlEprjM9QOgo,46
262
+ agenta-0.32.0a2.dist-info/RECORD,,