atendentepro 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. atendentepro/README.md +890 -0
  2. atendentepro/__init__.py +215 -0
  3. atendentepro/agents/__init__.py +45 -0
  4. atendentepro/agents/answer.py +62 -0
  5. atendentepro/agents/confirmation.py +69 -0
  6. atendentepro/agents/flow.py +64 -0
  7. atendentepro/agents/interview.py +68 -0
  8. atendentepro/agents/knowledge.py +296 -0
  9. atendentepro/agents/onboarding.py +65 -0
  10. atendentepro/agents/triage.py +57 -0
  11. atendentepro/agents/usage.py +56 -0
  12. atendentepro/config/__init__.py +19 -0
  13. atendentepro/config/settings.py +134 -0
  14. atendentepro/guardrails/__init__.py +21 -0
  15. atendentepro/guardrails/manager.py +419 -0
  16. atendentepro/license.py +502 -0
  17. atendentepro/models/__init__.py +21 -0
  18. atendentepro/models/context.py +21 -0
  19. atendentepro/models/outputs.py +118 -0
  20. atendentepro/network.py +325 -0
  21. atendentepro/prompts/__init__.py +35 -0
  22. atendentepro/prompts/answer.py +114 -0
  23. atendentepro/prompts/confirmation.py +124 -0
  24. atendentepro/prompts/flow.py +112 -0
  25. atendentepro/prompts/interview.py +123 -0
  26. atendentepro/prompts/knowledge.py +135 -0
  27. atendentepro/prompts/onboarding.py +146 -0
  28. atendentepro/prompts/triage.py +42 -0
  29. atendentepro/templates/__init__.py +51 -0
  30. atendentepro/templates/manager.py +530 -0
  31. atendentepro/utils/__init__.py +19 -0
  32. atendentepro/utils/openai_client.py +154 -0
  33. atendentepro/utils/tracing.py +71 -0
  34. atendentepro-0.3.0.dist-info/METADATA +306 -0
  35. atendentepro-0.3.0.dist-info/RECORD +39 -0
  36. atendentepro-0.3.0.dist-info/WHEEL +5 -0
  37. atendentepro-0.3.0.dist-info/entry_points.txt +2 -0
  38. atendentepro-0.3.0.dist-info/licenses/LICENSE +25 -0
  39. atendentepro-0.3.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,296 @@
1
+ # -*- coding: utf-8 -*-
2
+ """Knowledge Agent for AtendentePro."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import logging
7
+ import pickle
8
+ from pathlib import Path
9
+ from typing import Any, Callable, List, Optional, TYPE_CHECKING
10
+
11
+ from pydantic import BaseModel, Field
12
+
13
+ from agents import Agent, function_tool
14
+
15
+ from atendentepro.config import RECOMMENDED_PROMPT_PREFIX, get_config
16
+ from atendentepro.models import ContextNote, KnowledgeToolResult
17
+ from atendentepro.prompts import get_knowledge_prompt
18
+
19
+ if TYPE_CHECKING:
20
+ from atendentepro.guardrails import GuardrailCallable
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ # Type alias for the Knowledge Agent
26
+ KnowledgeAgent = Agent[ContextNote]
27
+
28
+
29
+ # Global embedding path (can be configured per client)
30
+ _EMBEDDINGS_PATH: Optional[Path] = None
31
+
32
+
33
+ def set_embeddings_path(path: Path) -> None:
34
+ """Set the path for embeddings file."""
35
+ global _EMBEDDINGS_PATH
36
+ _EMBEDDINGS_PATH = path
37
+
38
+
39
+ def get_embeddings_path() -> Optional[Path]:
40
+ """Get the current embeddings path."""
41
+ return _EMBEDDINGS_PATH
42
+
43
+
44
+ def load_embeddings() -> List[dict]:
45
+ """Load embeddings from disk."""
46
+ embeddings_path = _EMBEDDINGS_PATH
47
+ if not embeddings_path or not embeddings_path.exists():
48
+ logger.warning("No embeddings path configured or file doesn't exist")
49
+ return []
50
+
51
+ try:
52
+ with open(embeddings_path, "rb") as file:
53
+ loaded_data = pickle.load(file)
54
+ logger.info("Embeddings loaded successfully from %s", embeddings_path)
55
+ return loaded_data
56
+ except Exception as exc:
57
+ logger.error("Failed to load embeddings from %s: %s", embeddings_path, exc)
58
+ return []
59
+
60
+
61
+ async def _find_relevant_chunks(query: str, top_k: int = 3) -> List[dict]:
62
+ """Find most relevant chunks for a given query."""
63
+ try:
64
+ from sklearn.metrics.pairwise import cosine_similarity
65
+ import numpy as np
66
+
67
+ from atendentepro.utils import get_async_client
68
+
69
+ client = get_async_client()
70
+ response = await client.embeddings.create(model="text-embedding-3-large", input=query)
71
+ query_embedding = response.data[0].embedding
72
+
73
+ chunk_embeddings = load_embeddings()
74
+ if not chunk_embeddings:
75
+ logger.error("No embeddings loaded")
76
+ return []
77
+
78
+ similarities: List[tuple] = []
79
+ for chunk_data in chunk_embeddings:
80
+ chunk_embedding = chunk_data.get("embedding")
81
+ if not chunk_embedding:
82
+ continue
83
+
84
+ query_emb = np.array(query_embedding).reshape(1, -1)
85
+ chunk_emb = np.array(chunk_embedding).reshape(1, -1)
86
+ similarity = cosine_similarity(query_emb, chunk_emb)[0][0]
87
+ similarities.append((similarity, chunk_data))
88
+
89
+ similarities.sort(key=lambda x: x[0], reverse=True)
90
+
91
+ top_results: List[dict] = []
92
+ for score, chunk_data in similarities[:top_k]:
93
+ enriched_chunk = dict(chunk_data)
94
+ enriched_chunk["similarity"] = score
95
+ top_results.append(enriched_chunk)
96
+
97
+ return top_results
98
+
99
+ except Exception as exc:
100
+ logger.error("Error finding relevant chunks: %s", exc)
101
+ return []
102
+
103
+
104
+ @function_tool
105
+ async def go_to_rag(question: str) -> KnowledgeToolResult:
106
+ """
107
+ Utilize RAG to answer the user's question.
108
+
109
+ Args:
110
+ question: The question to answer using RAG.
111
+
112
+ Returns:
113
+ KnowledgeToolResult with answer, context, sources, and confidence.
114
+ """
115
+ from atendentepro.utils import get_async_client
116
+ from atendentepro.config import get_config
117
+
118
+ logger.info("Processing question: %s", question)
119
+
120
+ relevant_chunks = await _find_relevant_chunks(question, top_k=3)
121
+
122
+ if not relevant_chunks:
123
+ return KnowledgeToolResult(
124
+ answer="Não consegui encontrar informações relevantes nos documentos para responder sua pergunta.",
125
+ context="",
126
+ sources=[],
127
+ confidence=0.0,
128
+ )
129
+
130
+ context_sections: List[str] = []
131
+ sources: List[str] = []
132
+ seen_sources: set = set()
133
+ similarities: List[float] = []
134
+
135
+ for chunk in relevant_chunks:
136
+ chunk_info = chunk.get("chunk", {}) or {}
137
+ source = chunk_info.get("source", "Desconhecido")
138
+ content = chunk_info.get("content", "")
139
+ similarity = float(chunk.get("similarity", 0.0))
140
+
141
+ if source not in seen_sources:
142
+ sources.append(source)
143
+ seen_sources.add(source)
144
+
145
+ similarities.append(similarity)
146
+ context_sections.append(f"Documento: {source}\nConteúdo: {content}")
147
+
148
+ context = "\n\n".join(context_sections)
149
+ logger.info("Context: %s", context)
150
+
151
+ confidence = (
152
+ sum(max(score, 0.0) for score in similarities) / len(similarities) if similarities else 0.0
153
+ )
154
+
155
+ answer = (
156
+ "Encontrei trechos relevantes, mas não consegui sintetizar uma resposta a partir deles. "
157
+ "Use o contexto abaixo para responder manualmente."
158
+ )
159
+
160
+ try:
161
+ config = get_config()
162
+ client = get_async_client()
163
+ completion = await client.responses.create(
164
+ model=config.default_model,
165
+ input=[
166
+ {
167
+ "role": "system",
168
+ "content": (
169
+ "Você é um especialista no domínio deste produto. Use apenas o contexto fornecido para "
170
+ "responder de forma objetiva. Se não houver informação suficiente, informe isso."
171
+ ),
172
+ },
173
+ {
174
+ "role": "user",
175
+ "content": (
176
+ f"Pergunta: {question}\n\n"
177
+ f"Contexto:\n{context}\n\n"
178
+ "Responda em português, destacando os passos principais e cite os documentos utilizados."
179
+ ),
180
+ },
181
+ ],
182
+ )
183
+
184
+ if hasattr(completion, "output_text"):
185
+ answer_candidate = completion.output_text.strip()
186
+ if answer_candidate:
187
+ answer = answer_candidate
188
+ else:
189
+ parts: List[str] = []
190
+ for output in getattr(completion, "output", []):
191
+ for content in getattr(output, "content", []):
192
+ text_part = getattr(content, "text", None)
193
+ if text_part:
194
+ parts.append(text_part)
195
+ if parts:
196
+ answer = "\n".join(parts).strip()
197
+ except Exception as exc:
198
+ logger.error("Failed to synthesize answer: %s", exc)
199
+
200
+ return KnowledgeToolResult(
201
+ answer=answer,
202
+ context=context,
203
+ sources=sources,
204
+ confidence=confidence,
205
+ )
206
+
207
+
208
+ def create_knowledge_agent(
209
+ knowledge_about: str = "",
210
+ knowledge_template: str = "",
211
+ knowledge_format: str = "",
212
+ embeddings_path: Optional[Path] = None,
213
+ data_sources_description: str = "",
214
+ include_rag_tool: bool = True,
215
+ handoffs: Optional[List] = None,
216
+ tools: Optional[List] = None,
217
+ guardrails: Optional[List["GuardrailCallable"]] = None,
218
+ name: str = "Knowledge Agent",
219
+ custom_instructions: Optional[str] = None,
220
+ ) -> KnowledgeAgent:
221
+ """
222
+ Create a Knowledge Agent instance.
223
+
224
+ The knowledge agent handles document research (RAG) and structured data queries.
225
+ It can be configured to use:
226
+ - Document-based RAG with embeddings
227
+ - Structured data sources (CSV, databases, APIs) via custom tools
228
+ - Both simultaneously
229
+
230
+ Args:
231
+ knowledge_about: Description of available knowledge documents.
232
+ knowledge_template: Document metadata template.
233
+ knowledge_format: Response format template.
234
+ embeddings_path: Path to the embeddings file for RAG.
235
+ data_sources_description: Description of available structured data sources.
236
+ include_rag_tool: Whether to include the go_to_rag tool (default True if embeddings_path).
237
+ handoffs: List of agents to hand off to.
238
+ tools: Additional tools for structured data queries.
239
+ guardrails: List of input guardrails.
240
+ name: Agent name.
241
+ custom_instructions: Optional custom instructions to override default.
242
+
243
+ Returns:
244
+ Configured Knowledge Agent instance.
245
+ """
246
+ # Set embeddings path if provided
247
+ if embeddings_path:
248
+ set_embeddings_path(embeddings_path)
249
+
250
+ # Build about section including data sources
251
+ full_about = knowledge_about
252
+ if data_sources_description:
253
+ full_about = f"{knowledge_about}\n\nFontes de dados estruturados disponíveis:\n{data_sources_description}"
254
+
255
+ if custom_instructions:
256
+ instructions = f"{RECOMMENDED_PROMPT_PREFIX} {custom_instructions}"
257
+ else:
258
+ prompt = get_knowledge_prompt(
259
+ knowledge_about=full_about,
260
+ knowledge_template=knowledge_template,
261
+ knowledge_format=knowledge_format,
262
+ )
263
+ instructions = f"{RECOMMENDED_PROMPT_PREFIX} {prompt}"
264
+
265
+ # Build tools list
266
+ agent_tools: List = []
267
+
268
+ # Include RAG tool if embeddings are configured
269
+ if include_rag_tool and embeddings_path:
270
+ agent_tools.append(go_to_rag)
271
+
272
+ # Add custom tools for structured data
273
+ if tools:
274
+ agent_tools.extend(tools)
275
+
276
+ # Build handoff description based on capabilities
277
+ capabilities = []
278
+ if embeddings_path:
279
+ capabilities.append("pesquisar documentos")
280
+ if tools:
281
+ capabilities.append("consultar dados estruturados")
282
+
283
+ handoff_desc = (
284
+ f"Agente voltado a {' e '.join(capabilities) if capabilities else 'recuperar informações'} "
285
+ "quando é preciso descobrir ou contextualizar algo novo."
286
+ )
287
+
288
+ return Agent[ContextNote](
289
+ name=name,
290
+ handoff_description=handoff_desc,
291
+ instructions=instructions,
292
+ tools=agent_tools,
293
+ handoffs=handoffs or [],
294
+ input_guardrails=guardrails or [],
295
+ )
296
+
@@ -0,0 +1,65 @@
1
+ # -*- coding: utf-8 -*-
2
+ """Onboarding Agent for AtendentePro."""
3
+
4
+ from __future__ import annotations
5
+
6
+ from typing import List, Optional, TYPE_CHECKING
7
+
8
+ from agents import Agent
9
+
10
+ from atendentepro.config import RECOMMENDED_PROMPT_PREFIX
11
+ from atendentepro.models import ContextNote
12
+ from atendentepro.prompts import get_onboarding_prompt
13
+ from atendentepro.prompts.onboarding import OnboardingField
14
+
15
+ if TYPE_CHECKING:
16
+ from atendentepro.guardrails import GuardrailCallable
17
+
18
+
19
+ # Type alias for the Onboarding Agent
20
+ OnboardingAgent = Agent[ContextNote]
21
+
22
+
23
+ def create_onboarding_agent(
24
+ required_fields: Optional[List[OnboardingField]] = None,
25
+ handoffs: Optional[List] = None,
26
+ tools: Optional[List] = None,
27
+ guardrails: Optional[List["GuardrailCallable"]] = None,
28
+ name: str = "Onboarding Agent",
29
+ custom_instructions: Optional[str] = None,
30
+ ) -> OnboardingAgent:
31
+ """
32
+ Create an Onboarding Agent instance.
33
+
34
+ The onboarding agent welcomes new users and guides them through
35
+ the registration process.
36
+
37
+ Args:
38
+ required_fields: List of required fields for onboarding.
39
+ handoffs: List of agents to hand off to.
40
+ tools: List of tools available to the agent (e.g., find_user_on_csv).
41
+ guardrails: List of input guardrails.
42
+ name: Agent name.
43
+ custom_instructions: Optional custom instructions to override default.
44
+
45
+ Returns:
46
+ Configured Onboarding Agent instance.
47
+ """
48
+ if custom_instructions:
49
+ instructions = f"{RECOMMENDED_PROMPT_PREFIX} {custom_instructions}"
50
+ else:
51
+ prompt = get_onboarding_prompt(required_fields=required_fields)
52
+ instructions = f"{RECOMMENDED_PROMPT_PREFIX} {prompt}"
53
+
54
+ return Agent[ContextNote](
55
+ name=name,
56
+ handoff_description=(
57
+ "Agente de onboarding responsável por acolher usuários não encontrados no cadastro "
58
+ "e orientar o registro inicial."
59
+ ),
60
+ instructions=instructions,
61
+ handoffs=handoffs or [],
62
+ tools=tools or [],
63
+ input_guardrails=guardrails or [],
64
+ )
65
+
@@ -0,0 +1,57 @@
1
+ # -*- coding: utf-8 -*-
2
+ """Triage Agent for AtendentePro."""
3
+
4
+ from __future__ import annotations
5
+
6
+ from typing import List, Optional, TYPE_CHECKING
7
+
8
+ from agents import Agent
9
+
10
+ from atendentepro.config import RECOMMENDED_PROMPT_PREFIX
11
+ from atendentepro.models import ContextNote
12
+ from atendentepro.prompts import get_triage_prompt
13
+
14
+ if TYPE_CHECKING:
15
+ from atendentepro.guardrails import GuardrailCallable
16
+
17
+
18
+ # Type alias for the Triage Agent
19
+ TriageAgent = Agent[ContextNote]
20
+
21
+
22
+ def create_triage_agent(
23
+ keywords_text: str = "",
24
+ handoffs: Optional[List] = None,
25
+ guardrails: Optional[List["GuardrailCallable"]] = None,
26
+ name: str = "Triage Agent",
27
+ custom_instructions: Optional[str] = None,
28
+ ) -> TriageAgent:
29
+ """
30
+ Create a Triage Agent instance.
31
+
32
+ The triage agent is responsible for understanding user needs and
33
+ directing them to the appropriate specialized agent.
34
+
35
+ Args:
36
+ keywords_text: Formatted keywords for agent routing.
37
+ handoffs: List of agents to hand off to.
38
+ guardrails: List of input guardrails.
39
+ name: Agent name.
40
+ custom_instructions: Optional custom instructions to override default.
41
+
42
+ Returns:
43
+ Configured Triage Agent instance.
44
+ """
45
+ if custom_instructions:
46
+ instructions = f"{RECOMMENDED_PROMPT_PREFIX} {custom_instructions}"
47
+ else:
48
+ instructions = f"{RECOMMENDED_PROMPT_PREFIX} {get_triage_prompt(keywords_text)}"
49
+
50
+ return Agent[ContextNote](
51
+ name=name,
52
+ handoff_description="A triage agent that can delegate a customer's request to the appropriate agent.",
53
+ instructions=instructions,
54
+ handoffs=handoffs or [],
55
+ input_guardrails=guardrails or [],
56
+ )
57
+
@@ -0,0 +1,56 @@
1
+ # -*- coding: utf-8 -*-
2
+ """Usage Agent for AtendentePro."""
3
+
4
+ from __future__ import annotations
5
+
6
+ from typing import List, Optional, TYPE_CHECKING
7
+
8
+ from agents import Agent
9
+
10
+ from atendentepro.config import RECOMMENDED_PROMPT_PREFIX
11
+ from atendentepro.models import ContextNote
12
+
13
+ if TYPE_CHECKING:
14
+ from atendentepro.guardrails import GuardrailCallable
15
+
16
+
17
+ # Type alias for the Usage Agent
18
+ UsageAgent = Agent[ContextNote]
19
+
20
+
21
+ DEFAULT_USAGE_INSTRUCTIONS = """
22
+ You are a helpful usage agent. You will answer questions about the usage of the system.
23
+ Respond in natural language and never mention internal agents, transfers, or reasoning steps.
24
+ """
25
+
26
+
27
+ def create_usage_agent(
28
+ handoffs: Optional[List] = None,
29
+ guardrails: Optional[List["GuardrailCallable"]] = None,
30
+ name: str = "Usage Agent",
31
+ custom_instructions: Optional[str] = None,
32
+ ) -> UsageAgent:
33
+ """
34
+ Create a Usage Agent instance.
35
+
36
+ The usage agent answers questions about system usage.
37
+
38
+ Args:
39
+ handoffs: List of agents to hand off to.
40
+ guardrails: List of input guardrails.
41
+ name: Agent name.
42
+ custom_instructions: Optional custom instructions to override default.
43
+
44
+ Returns:
45
+ Configured Usage Agent instance.
46
+ """
47
+ instructions = custom_instructions or DEFAULT_USAGE_INSTRUCTIONS
48
+
49
+ return Agent[ContextNote](
50
+ name=name,
51
+ handoff_description="A usage agent that can answer questions about the usage of the system.",
52
+ instructions=instructions,
53
+ handoffs=handoffs or [],
54
+ input_guardrails=guardrails or [],
55
+ )
56
+
@@ -0,0 +1,19 @@
1
+ # -*- coding: utf-8 -*-
2
+ """Configuration module for AtendentePro library."""
3
+
4
+ from .settings import (
5
+ AtendentProConfig,
6
+ get_config,
7
+ configure,
8
+ RECOMMENDED_PROMPT_PREFIX,
9
+ DEFAULT_MODEL,
10
+ )
11
+
12
+ __all__ = [
13
+ "AtendentProConfig",
14
+ "get_config",
15
+ "configure",
16
+ "RECOMMENDED_PROMPT_PREFIX",
17
+ "DEFAULT_MODEL",
18
+ ]
19
+
@@ -0,0 +1,134 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ AtendentePro Configuration Settings.
4
+
5
+ This module provides centralized configuration management for the AtendentePro library.
6
+ It supports both OpenAI and Azure OpenAI providers.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import os
12
+ from dataclasses import dataclass, field
13
+ from functools import lru_cache
14
+ from pathlib import Path
15
+ from typing import Literal, Optional
16
+
17
+ from dotenv import load_dotenv
18
+
19
+ load_dotenv()
20
+
21
+
22
+ @dataclass
23
+ class AtendentProConfig:
24
+ """Main configuration class for AtendentePro."""
25
+
26
+ # Provider configuration
27
+ provider: Literal["openai", "azure"] = "openai"
28
+
29
+ # OpenAI settings
30
+ openai_api_key: Optional[str] = None
31
+
32
+ # Azure OpenAI settings
33
+ azure_api_key: Optional[str] = None
34
+ azure_api_endpoint: Optional[str] = None
35
+ azure_api_version: Optional[str] = None
36
+ azure_deployment_name: Optional[str] = None
37
+
38
+ # Model settings
39
+ default_model: str = "gpt-4.1"
40
+
41
+ # OCR settings (optional)
42
+ ocr_enabled: bool = True
43
+ azure_ai_vision_endpoint: Optional[str] = None
44
+ azure_ai_vision_key: Optional[str] = None
45
+
46
+ # Tracing settings
47
+ application_insights_connection_string: Optional[str] = None
48
+
49
+ # Templates settings
50
+ templates_root: Optional[Path] = None
51
+ default_client: str = "standard"
52
+
53
+ # Context output directory
54
+ context_output_dir: str = "context"
55
+
56
+ @classmethod
57
+ def from_env(cls) -> "AtendentProConfig":
58
+ """Create configuration from environment variables."""
59
+ # Determine provider
60
+ provider_env = (os.getenv("OPENAI_PROVIDER") or "").strip().lower()
61
+ azure_api_key = os.getenv("AZURE_API_KEY")
62
+ azure_api_endpoint = os.getenv("AZURE_API_ENDPOINT") or os.getenv("AZURE_OPENAI_ENDPOINT")
63
+
64
+ if provider_env:
65
+ provider: Literal["openai", "azure"] = "azure" if provider_env == "azure" else "openai"
66
+ else:
67
+ provider = "azure" if azure_api_key and azure_api_endpoint else "openai"
68
+
69
+ return cls(
70
+ provider=provider,
71
+ openai_api_key=os.getenv("OPENAI_API_KEY"),
72
+ azure_api_key=azure_api_key,
73
+ azure_api_endpoint=azure_api_endpoint,
74
+ azure_api_version=os.getenv("AZURE_API_VERSION") or os.getenv("AZURE_OPENAI_API_VERSION"),
75
+ azure_deployment_name=os.getenv("AZURE_DEPLOYMENT_NAME"),
76
+ default_model=os.getenv("DEFAULT_MODEL", "gpt-4.1"),
77
+ ocr_enabled=os.getenv("OCR_ENABLED", "true").lower() == "true",
78
+ azure_ai_vision_endpoint=os.getenv("AZURE_AI_VISION_ENDPOINT"),
79
+ azure_ai_vision_key=os.getenv("AZURE_AI_VISION_KEY"),
80
+ application_insights_connection_string=os.getenv("APPLICATION_INSIGHTS_CONNECTION_STRING"),
81
+ context_output_dir=os.getenv("CONTEXT_OUTPUT_DIR", "context"),
82
+ )
83
+
84
+
85
+ # Global configuration instance
86
+ _config: Optional[AtendentProConfig] = None
87
+
88
+
89
+ def get_config() -> AtendentProConfig:
90
+ """Get the current configuration, initializing from environment if needed."""
91
+ global _config
92
+ if _config is None:
93
+ _config = AtendentProConfig.from_env()
94
+ return _config
95
+
96
+
97
+ def configure(config: Optional[AtendentProConfig] = None, **kwargs) -> AtendentProConfig:
98
+ """
99
+ Configure the AtendentePro library.
100
+
101
+ Args:
102
+ config: Optional pre-built configuration object
103
+ **kwargs: Configuration parameters to override
104
+
105
+ Returns:
106
+ The active configuration object
107
+ """
108
+ global _config
109
+
110
+ if config is not None:
111
+ _config = config
112
+ elif kwargs:
113
+ current = get_config()
114
+ for key, value in kwargs.items():
115
+ if hasattr(current, key):
116
+ setattr(current, key, value)
117
+
118
+ return get_config()
119
+
120
+
121
+ # Default model
122
+ DEFAULT_MODEL = "gpt-4.1"
123
+
124
+ # Recommended prompt prefix for all agents
125
+ RECOMMENDED_PROMPT_PREFIX = """
126
+ [CONTEXT SYSTEM]
127
+ - Você faz parte de um sistema multiagente chamado Agents SDK, criado para facilitar a coordenação e execução de agentes.
128
+ - O Agents SDK utiliza duas principais abstrações: **Agentes** e **Handoffs** (transferências).
129
+ - Um agente abrange instruções e ferramentas e pode transferir uma conversa para outro agente quando apropriado.
130
+ - Transferências entre agentes são realizadas chamando uma função de transferência, geralmente nomeada como `transfer_to_<nome_do_agente>`.
131
+ - As transferências entre agentes ocorrem de forma transparente em segundo plano; não mencione nem chame atenção para essas transferências na sua conversa com o usuário.
132
+ - Produza respostas naturais, evitando termos como "transferindo para...", "análise concluída", "aqui está a situação" ou qualquer indicação de lógica interna.
133
+ """
134
+
@@ -0,0 +1,21 @@
1
+ # -*- coding: utf-8 -*-
2
+ """Guardrails module for AtendentePro library."""
3
+
4
+ from .manager import (
5
+ GuardrailManager,
6
+ get_guardrails_for_agent,
7
+ get_out_of_scope_message,
8
+ load_guardrail_config,
9
+ set_guardrails_client,
10
+ clear_guardrail_cache,
11
+ )
12
+
13
+ __all__ = [
14
+ "GuardrailManager",
15
+ "get_guardrails_for_agent",
16
+ "get_out_of_scope_message",
17
+ "load_guardrail_config",
18
+ "set_guardrails_client",
19
+ "clear_guardrail_cache",
20
+ ]
21
+