pbesa 4.0.15__tar.gz → 4.0.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pbesa-4.0.15 → pbesa-4.0.17}/PKG-INFO +1 -1
- pbesa-4.0.17/pbesa/celulas/celula_expertos.py +57 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/cognitive.py +29 -16
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/util.py +2 -1
- pbesa-4.0.17/pbesa/models.py +333 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/social/collaborative_team.py +6 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa.egg-info/PKG-INFO +1 -1
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa.egg-info/SOURCES.txt +1 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/setup.py +1 -1
- pbesa-4.0.15/pbesa/models.py +0 -349
- {pbesa-4.0.15 → pbesa-4.0.17}/.gitignore +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/LICENSE +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/LICENSE.txt +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/MANIFEST +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/README.md +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/db.sqlite3 +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/helloworld/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/helloworld/__pycache__/__init__.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/helloworld/__pycache__/pbesa.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/helloworld/__pycache__/settings.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/helloworld/__pycache__/urls.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/helloworld/__pycache__/wsgi.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/helloworld/asgi.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/helloworld/pbesa.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/helloworld/settings.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/helloworld/urls.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/helloworld/wsgi.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/manage.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/__pycache__/__init__.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/__pycache__/admin.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/__pycache__/apps.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/__pycache__/models.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/__pycache__/urls.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/__pycache__/views.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/admin.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/apps.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/mas/controller/__pycache__/translatecontroller.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/mas/controller/__pycache__/translatedelegate.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/mas/controller/__pycache__/translateresponse.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/mas/controller/translatecontroller.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/mas/controller/translatedelegate.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/mas/controller/translateresponse.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/mas/worker/__pycache__/translatetask.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/mas/worker/__pycache__/workeragent.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/mas/worker/translatetask.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/mas/worker/workeragent.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/migrations/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/migrations/__pycache__/__init__.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/models.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/tests.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/urls.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/django/helloworld/translate/views.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/controller/__pycache__/countercontroller.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/controller/__pycache__/counterdelegate.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/controller/__pycache__/counterresponse.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/controller/__pycache__/translatecontroller.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/controller/__pycache__/translatedelegate.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/controller/__pycache__/translateresponse.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/controller/countercontroller.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/controller/counterdelegate.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/controller/counterresponse.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/worker/__pycache__/counteragent.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/worker/__pycache__/countertask.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/worker/__pycache__/translatetask.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/worker/__pycache__/workeragent.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/worker/counteragent.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/mas/worker/countertask.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/remote_a.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/remote_b.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/examples/remote/remote_c.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/celulas/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/celulas/celula_casos.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/celulas/celula_consultas.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/celulas/celula_datos_identificables.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/celulas/celula_generar_documento.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/celulas/celula_preguntas.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/celulas/celula_saludos.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/celulas/web.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/adapter.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/agent.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/io/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/io/system_file.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/io/tcp_server.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/res/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/res/__pycache__/__init__.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/res/__pycache__/__init__.cpython-37.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/res/__pycache__/__init__.cpython-38.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/res/__pycache__/__init__.cpython-39.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/res/conf.json +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/kernel/world.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/mas.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/remote/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/remote/adm_listener.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/remote/adm_listener_handler.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/remote/exceptions.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/remote/remote_adm.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/remote/remote_adm_handler.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/social/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/social/delegator.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/social/delegator_team.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/social/dialog.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/social/dispatcher_team.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/social/prompts.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/social/selected_dispatcher_team.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/social/templates.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa/social/worker.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa.egg-info/dependency_links.txt +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa.egg-info/requires.txt +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/pbesa.egg-info/top_level.txt +0 -0
- {pbesa-4.0.15 → pbesa-4.0.17}/setup.cfg +0 -0
@@ -0,0 +1,57 @@
|
|
1
|
+
import logging
|
2
|
+
|
3
|
+
PROMPT = """
|
4
|
+
Eres un clasificador de textos que determina a qué entidad o dependencia gubernamental colombiana corresponde un texto dado, basándose en la descripción de sus funciones. Tu tarea es analizar el siguiente texto y determinar la clase más apropiada.
|
5
|
+
|
6
|
+
Responde únicamente con el nombre completo de la clase o con "NO_CORRESPONDE" si el texto no se ajusta a ninguna de las clases descritas. No agregues explicaciones ni comentarios adicionales.
|
7
|
+
|
8
|
+
%s
|
9
|
+
|
10
|
+
Opciones de respuesta posibles:
|
11
|
+
|
12
|
+
DIMAR - Dirección General Marítima
|
13
|
+
DNDA - Dirección Nacional de Derecho de Autor
|
14
|
+
ICA - Instituto Colombiano Agropecuario
|
15
|
+
SFC - Superintendencia Financiera de Colombia
|
16
|
+
SIC - Superintendencia de Industria y Comercio - Competencia Desleal
|
17
|
+
SIC - Superintendencia de Industria y Comercio - Propiedad Industrial
|
18
|
+
SIC - Superintendencia de Industria y Comercio - Protección al Consumidor
|
19
|
+
Supersalud - Superintendencia Nacional de Salud - Cobertura Servicios
|
20
|
+
Supersalud - Superintendencia Nacional de Salud - Conflictos Devoluciones
|
21
|
+
Supersalud - Superintendencia Nacional de Salud - Conflicto Entidades
|
22
|
+
Supersalud - Superintendencia Nacional de Salud - Libre Elección
|
23
|
+
Supersalud - Superintendencia Nacional de Salud - Multiafiliación
|
24
|
+
Supersalud - Superintendencia Nacional de Salud - Reconocimiento Económico
|
25
|
+
Supersociedades - Superintendencia de Sociedades - Liquidación Insolvencia
|
26
|
+
Supersociedades - Superintendencia de Sociedades - Reorganización Insolvencia
|
27
|
+
Supersociedades - Superintendencia de Sociedades - Validación Judicial
|
28
|
+
Supersociedades - Superintendencia de Sociedades - Mercantiles
|
29
|
+
NO_CORRESPONDE
|
30
|
+
"""
|
31
|
+
|
32
|
+
# Efectua la inferencia del modelo.
|
33
|
+
def derive(service, text, conocimiento, max_tkns=4096) -> any:
|
34
|
+
try:
|
35
|
+
tmp_work_memory = []
|
36
|
+
prompt = PROMPT % conocimiento
|
37
|
+
tmp_work_memory.append({"role": "system", "content": prompt})
|
38
|
+
user_prompt = """
|
39
|
+
Texto: "%s"
|
40
|
+
|
41
|
+
Clasificación:
|
42
|
+
""" % text
|
43
|
+
tmp_work_memory.append({"role": "system", "content": prompt})
|
44
|
+
tmp_work_memory.append({"role": "user", "content": user_prompt})
|
45
|
+
res = service.generate(tmp_work_memory, max_tkns)
|
46
|
+
logging.info("\n")
|
47
|
+
logging.info(f"[Celula][Expertos][Text]: {text}")
|
48
|
+
logging.info(f"[Celula][Expertos][Respuesta]: {res}")
|
49
|
+
logging.info("\n")
|
50
|
+
if not res or res == "":
|
51
|
+
res = text
|
52
|
+
logging.warning(f"[Celula][Expertos]: No obtener una respuesta.")
|
53
|
+
return res
|
54
|
+
except Exception as e:
|
55
|
+
logging.error(f"[Celula][Expertos]: Error al procesar: {text}")
|
56
|
+
logging.error(e)
|
57
|
+
return None
|
@@ -26,7 +26,7 @@ from pbesa.social.dialog import (
|
|
26
26
|
DialogState, imprimir_grafo, recorrer_interacciones, extraer_diccionario_nodos,
|
27
27
|
ActionNode, DeclarativeNode, GotoNode)
|
28
28
|
from .celulas import (celula_casos, celula_consultas, celula_saludos, celula_datos_identificables,
|
29
|
-
celula_generar_documento)
|
29
|
+
celula_generar_documento, celula_expertos)
|
30
30
|
from pbesa.social.prompts import ANALIZER_PROMPT, CLASSIFICATION_PROMPT, DERIVE_PROMPT, RECOVERY_PROMPT, ADAPT_PROMPT, SINTETIZER_PROMPT
|
31
31
|
|
32
32
|
# --------------------------------------------------------
|
@@ -109,7 +109,7 @@ class AgentMetadata():
|
|
109
109
|
# Define common functions
|
110
110
|
# --------------------------------------------------------
|
111
111
|
|
112
|
-
def define_service_provider(provider, ai_service=None) -> None:
|
112
|
+
def define_service_provider(provider, ai_service=None, substitudes = False) -> None:
|
113
113
|
# Define provider
|
114
114
|
service = None
|
115
115
|
service_provider = ServiceProvider()
|
@@ -126,7 +126,12 @@ def define_service_provider(provider, ai_service=None) -> None:
|
|
126
126
|
service = AzureInference()
|
127
127
|
service_provider.register("AZURE_INFERENCE", service)
|
128
128
|
elif "AZURE_OPEN_AI_INFERENCE" in provider:
|
129
|
-
|
129
|
+
substitude_1 = None
|
130
|
+
substitude_2 = None
|
131
|
+
if substitudes:
|
132
|
+
substitude_1 = AzureInference()
|
133
|
+
substitude_2 = AzureInference()
|
134
|
+
service = AzureOpenAIInference(substitude_1, substitude_2)
|
130
135
|
service_provider.register("AZURE_OPEN_AI_INFERENCE", service)
|
131
136
|
return service_provider, service
|
132
137
|
|
@@ -268,8 +273,8 @@ class AugmentedGeneration(ABC):
|
|
268
273
|
"""
|
269
274
|
return self.model
|
270
275
|
|
271
|
-
def load_model(self, provider, config, ai_service=None) -> None:
|
272
|
-
self.__service_provider, service = define_service_provider(provider, ai_service)
|
276
|
+
def load_model(self, provider, config, ai_service=None, substitudes = False) -> None:
|
277
|
+
self.__service_provider, service = define_service_provider(provider, ai_service, substitudes)
|
273
278
|
service.setup(config)
|
274
279
|
self.__ai_service = service
|
275
280
|
|
@@ -286,12 +291,15 @@ class AugmentedGeneration(ABC):
|
|
286
291
|
# Set up model
|
287
292
|
self.setup_world()
|
288
293
|
|
289
|
-
def command_derive(self, command, query) -> str | None:
|
294
|
+
def command_derive(self, command, query, max_tkns=2000) -> str | None:
|
290
295
|
try:
|
291
296
|
if command == "DATA_TYPE":
|
292
297
|
return celula_datos_identificables.derive(self.__ai_service, query)
|
293
298
|
elif command == "GENERATE_DOCUMENT":
|
294
299
|
return celula_generar_documento.derive(self.__ai_service, query["template"], query["text"])
|
300
|
+
elif command == "EXPERTS":
|
301
|
+
retrieval = self.retrieval(query)
|
302
|
+
return celula_expertos.derive(self.__ai_service, query, retrieval, max_tkns)
|
295
303
|
return None
|
296
304
|
except Exception as e:
|
297
305
|
traceback.print_exc()
|
@@ -650,8 +658,8 @@ class Dialog(ABC):
|
|
650
658
|
# Set dialog state
|
651
659
|
self.__dfa['start'] = iniciadores
|
652
660
|
|
653
|
-
def load_model(self, provider, config, ai_service=None) -> None:
|
654
|
-
self.__service_provider, service = define_service_provider(provider, ai_service)
|
661
|
+
def load_model(self, provider, config, ai_service=None, substitudes = False) -> None:
|
662
|
+
self.__service_provider, service = define_service_provider(provider, ai_service, substitudes)
|
655
663
|
service.setup(config)
|
656
664
|
self.__ai_service = service
|
657
665
|
|
@@ -781,7 +789,10 @@ class Dialog(ABC):
|
|
781
789
|
|
782
790
|
def recovery(self, session_id, query):
|
783
791
|
try:
|
784
|
-
|
792
|
+
logging.info("\n\n\n")
|
793
|
+
logging.info(f"------------RECOVERY---------------")
|
794
|
+
logging.info("\n\n\n")
|
795
|
+
self.__work_memory = self.__system_work_memory.copy()
|
785
796
|
prompt = RECOVERY_PROMPT % query
|
786
797
|
temp_work_memory = [{"role": "system", "content": prompt}]
|
787
798
|
res = self.__ai_service.generate(temp_work_memory, max_tokens=500)
|
@@ -795,7 +806,9 @@ class Dialog(ABC):
|
|
795
806
|
self.notify(session_id, "STOP")
|
796
807
|
return "Web", DialogState.START, self.RECOVERY_MSG, "Web"
|
797
808
|
except Exception as e:
|
798
|
-
|
809
|
+
logging.error(f"Error en la recuperación: {e}")
|
810
|
+
msg = "Lo lamento, no puedo responder en este momento."
|
811
|
+
return "Web", DialogState.START, msg, "Web"
|
799
812
|
|
800
813
|
def stage_one_classification(self, session_id, messages, attemps, query):
|
801
814
|
""" Stage one classification """
|
@@ -880,7 +893,7 @@ class Dialog(ABC):
|
|
880
893
|
dicriminador = "consulta"
|
881
894
|
else:
|
882
895
|
dicriminador = "caso"
|
883
|
-
self.notify(session_id, f"
|
896
|
+
self.notify(session_id, f"primera fase se identifica: {dicriminador}.")
|
884
897
|
res = query
|
885
898
|
else:
|
886
899
|
logging.info("[Stage-1]: Respuesta con ambiguedad")
|
@@ -1272,10 +1285,10 @@ class Dialog(ABC):
|
|
1272
1285
|
res = self.__ai_service.generate(self.__work_memory, max_tokens=1000)
|
1273
1286
|
res = self.get_text(res)
|
1274
1287
|
logging.info(f"[Inferencia]:[Thought]:[DEEP]: {res}")
|
1275
|
-
self.__work_memory.append({"role": "assistant", "content": res})
|
1276
1288
|
# Check if res is empty
|
1277
|
-
if not res or res == "":
|
1289
|
+
if not res or res == "" or res == "ERROR":
|
1278
1290
|
return self.recovery(session['session_id'], query)
|
1291
|
+
self.__work_memory.append({"role": "assistant", "content": res})
|
1279
1292
|
new_dialog_state = node.performative
|
1280
1293
|
if not node.is_terminal:
|
1281
1294
|
# Verifica recursion
|
@@ -1371,8 +1384,8 @@ class SpecialDispatch():
|
|
1371
1384
|
# Reference of ADM
|
1372
1385
|
self.adm = Adm()
|
1373
1386
|
|
1374
|
-
def load_model(self, provider, config, ai_service=None) -> None:
|
1375
|
-
self.__service_provider, service = define_service_provider(provider, ai_service)
|
1387
|
+
def load_model(self, provider, config, ai_service=None, substitudes = False) -> None:
|
1388
|
+
self.__service_provider, service = define_service_provider(provider, ai_service, substitudes)
|
1376
1389
|
service.setup(config)
|
1377
1390
|
self.__ai_service = service
|
1378
1391
|
# Setup options dictionary
|
@@ -1386,7 +1399,7 @@ class SpecialDispatch():
|
|
1386
1399
|
role = agent.get_role()
|
1387
1400
|
self.__options_dict[agent_id] = role.description
|
1388
1401
|
# Log
|
1389
|
-
logging.info(f"Agentes disponibles: {self.__options_dict.keys()}")
|
1402
|
+
#logging.info(f"Agentes disponibles: {self.__options_dict.keys()}")
|
1390
1403
|
|
1391
1404
|
def get_text(self, mensaje) -> str:
|
1392
1405
|
if mensaje:
|
@@ -73,7 +73,8 @@ class APIClient:
|
|
73
73
|
url=f"{self.base_url}/{endpoint}",
|
74
74
|
headers=self.headers,
|
75
75
|
json=payload,
|
76
|
-
timeout=self.timeout
|
76
|
+
timeout=self.timeout,
|
77
|
+
verify=False
|
77
78
|
)
|
78
79
|
response.raise_for_status()
|
79
80
|
print(f"POST request to {endpoint} succeeded: {response.status_code}")
|
@@ -0,0 +1,333 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""
|
3
|
+
----------------------------------------------------------
|
4
|
+
-------------------------- PBESA -------------------------
|
5
|
+
----------------------------------------------------------
|
6
|
+
|
7
|
+
@autor AKEN
|
8
|
+
@version 4.0.0
|
9
|
+
@date 08/08/24
|
10
|
+
"""
|
11
|
+
|
12
|
+
# --------------------------------------------------------
|
13
|
+
# Define resources
|
14
|
+
# --------------------------------------------------------
|
15
|
+
|
16
|
+
import json
|
17
|
+
import logging
|
18
|
+
import traceback
|
19
|
+
from openai import AzureOpenAI
|
20
|
+
from abc import ABC, abstractmethod
|
21
|
+
from azure.ai.projects import AIProjectClient
|
22
|
+
from azure.identity import DefaultAzureCredential
|
23
|
+
from azure.ai.inference import ChatCompletionsClient
|
24
|
+
from azure.core.credentials import AzureKeyCredential
|
25
|
+
from azure.core.exceptions import HttpResponseError
|
26
|
+
import time
|
27
|
+
from openai import AzureOpenAI, RateLimitError, APIStatusError
|
28
|
+
|
29
|
+
|
30
|
+
# --------------------------------------------------------
|
31
|
+
# Define classes
|
32
|
+
# --------------------------------------------------------
|
33
|
+
|
34
|
+
class AIService(ABC):
|
35
|
+
"""Abstract class for AI services."""
|
36
|
+
|
37
|
+
def __init__(self) -> None:
|
38
|
+
self.model:any = None
|
39
|
+
self.model_conf:dict = None
|
40
|
+
self.substitude_1_model:any = None
|
41
|
+
self.substitude_2_model:any = None
|
42
|
+
|
43
|
+
@abstractmethod
|
44
|
+
def setup(self, config: dict) -> None:
|
45
|
+
"""Method to setup the AI service."""
|
46
|
+
raise NotImplementedError("Method 'setup' must be implemented.")
|
47
|
+
|
48
|
+
@abstractmethod
|
49
|
+
def generate(self, work_memory, max_tokens=4096, temperature=0, top_p=0.9) -> str:
|
50
|
+
"""Method to generate a response based on user input."""
|
51
|
+
raise NotImplementedError("Method 'setup' must be implemented.")
|
52
|
+
|
53
|
+
class GPTService(AIService):
|
54
|
+
|
55
|
+
def __init__(self) -> None:
|
56
|
+
super().__init__()
|
57
|
+
|
58
|
+
def setup(self, config: dict, work_memory) -> None:
|
59
|
+
try:
|
60
|
+
self.model:any = config['model']
|
61
|
+
self.model_conf:dict = config
|
62
|
+
self.__work_memory:list = work_memory
|
63
|
+
except Exception as e:
|
64
|
+
raise Exception("Could not setup GPTService: check the configuration.")
|
65
|
+
|
66
|
+
def generate(self) -> str:
|
67
|
+
""" Generate method
|
68
|
+
:return: str
|
69
|
+
"""
|
70
|
+
try:
|
71
|
+
# Genera texto con OpenAI.
|
72
|
+
self.model.api_key = self.model_conf['API_KEY']
|
73
|
+
engine = self.model_conf['OPENAI_ENGINE']
|
74
|
+
response = self.model.ChatCompletion.create(
|
75
|
+
model = engine,
|
76
|
+
messages = self.__work_memory
|
77
|
+
)
|
78
|
+
# Verifica si se obtuvo respuesta.
|
79
|
+
if response['choices'][0]['finish_reason'] == 'completed' or response['choices'][0]['finish_reason'] == 'stop':
|
80
|
+
res = response['choices'][0]['message']['content']
|
81
|
+
try:
|
82
|
+
if not res or res == 'null' or res == 'N/A' or 'N/A' in res:
|
83
|
+
#self.log.warning("OpenAI response not completed", extra={'log_data': {'gpt_response': response}})
|
84
|
+
logging.info("OpenAI response not completed")
|
85
|
+
return None
|
86
|
+
#self.log.info("OpenAI response completed", extra={'log_data': {'gpt_response': response}})
|
87
|
+
logging.info("OpenAI response completed")
|
88
|
+
self.__work_memory.append({"role": "assistant", "content": res})
|
89
|
+
return res
|
90
|
+
except:
|
91
|
+
#self.log.warning("OpenAI response not completed", extra={'log_data': {'gpt_response': response}})
|
92
|
+
logging.info("OpenAI response not completed")
|
93
|
+
return None
|
94
|
+
else:
|
95
|
+
#self.log.warning("OpenAI response not completed", extra={'log_data': {'gpt_response': response}})
|
96
|
+
logging.info("OpenAI response not completed")
|
97
|
+
return None
|
98
|
+
except Exception as e:
|
99
|
+
trace_err = traceback.format_exc()
|
100
|
+
err = str(e) + " - " + trace_err
|
101
|
+
logging.info(err)
|
102
|
+
return None
|
103
|
+
|
104
|
+
class AIFoundry(AIService):
|
105
|
+
|
106
|
+
def __init__(self) -> None:
|
107
|
+
super().__init__()
|
108
|
+
|
109
|
+
def setup(self, config: dict, work_memory) -> None:
|
110
|
+
self.model_conf:dict = config
|
111
|
+
self.__work_memory:list = work_memory
|
112
|
+
project = AIProjectClient.from_connection_string(
|
113
|
+
conn_str=config['PROJECT_CONNECTION_STRING'], credential=DefaultAzureCredential()
|
114
|
+
)
|
115
|
+
self.model:any = project.inference.get_chat_completions_client()
|
116
|
+
|
117
|
+
def generate(self) -> str:
|
118
|
+
response = self.model.complete(
|
119
|
+
model=self.model_conf['AIFOUNDRY_MODEL'],
|
120
|
+
messages=self.__work_memory,
|
121
|
+
)
|
122
|
+
return response.choices[0].message.content
|
123
|
+
|
124
|
+
class AzureInference(AIService):
|
125
|
+
|
126
|
+
def __init__(self) -> None:
|
127
|
+
super().__init__()
|
128
|
+
self.max_tokens = 2000
|
129
|
+
self.deployment = "Llama-3.3-70B-Instruct"
|
130
|
+
|
131
|
+
def setup(self, config: dict, substitude=None) -> None:
|
132
|
+
self.model_conf:dict = config
|
133
|
+
self.model:any = ChatCompletionsClient(
|
134
|
+
endpoint=config['AZURE_GENERAL_INFERENCE_SDK_ENDPOINT'],
|
135
|
+
credential=AzureKeyCredential(config['AZURE_INFERENCE_SDK_KEY'])
|
136
|
+
)
|
137
|
+
self.max_tokens = self.model_conf['MAX_TOKENS']
|
138
|
+
if substitude:
|
139
|
+
self.deployment = substitude
|
140
|
+
else:
|
141
|
+
self.deployment = self.model_conf['DEPLOYMENT_NAME']
|
142
|
+
|
143
|
+
def generate(self, work_memory, max_tokens=2000, temperature=0, top_p=0.9) -> str:
|
144
|
+
again = False
|
145
|
+
try:
|
146
|
+
response = self.model.complete(
|
147
|
+
messages= work_memory,
|
148
|
+
model= self.deployment,
|
149
|
+
max_tokens= self.max_tokens
|
150
|
+
)
|
151
|
+
res = response.choices[0].message.content
|
152
|
+
if not res or res == "" or res == "ERROR":
|
153
|
+
again = True
|
154
|
+
else:
|
155
|
+
return res
|
156
|
+
except Exception as e:
|
157
|
+
# Maneja otros errores
|
158
|
+
again = True
|
159
|
+
trace_err = traceback.format_exc()
|
160
|
+
err = str(e) + " - " + trace_err
|
161
|
+
logging.info(f"Error en la respuesta de Azure: {err}")
|
162
|
+
if again:
|
163
|
+
if self.model_conf['SUBSTITUDE_DEPLOYMENT_NAME'] == "Llama-3.3-70B-Instruct":
|
164
|
+
logging.info("\n\n\n")
|
165
|
+
logging.info("----------------------------------------")
|
166
|
+
logging.info("Sustitudo atiendendo Llama-3.3-70B-Instruct")
|
167
|
+
try:
|
168
|
+
logging.info("\n\n\n.............................................")
|
169
|
+
logging.info("\n%s", json.dumps(work_memory, indent=4))
|
170
|
+
logging.info("........................................\n\n\n")
|
171
|
+
response = self.model.complete(
|
172
|
+
messages= work_memory,
|
173
|
+
model =self.model_conf['SUBSTITUDE_DEPLOYMENT_NAME'],
|
174
|
+
max_tokens=self.model_conf['MAX_TOKENS']
|
175
|
+
)
|
176
|
+
logging.info("----------------------------------------")
|
177
|
+
logging.info("\n\n\n")
|
178
|
+
return response.choices[0].message.content
|
179
|
+
except Exception as e2:
|
180
|
+
trace_err2 = traceback.format_exc()
|
181
|
+
err2 = str(e2) + " - " + trace_err2
|
182
|
+
logging.info(f"Error en la respuesta de Azure: {err2}")
|
183
|
+
logging.info("----------------------------------------")
|
184
|
+
logging.info("\n\n\n")
|
185
|
+
raise e2
|
186
|
+
logging.error("\n\n\n****************************************")
|
187
|
+
logging.error("No se pudo generar una respuesta válida.")
|
188
|
+
return ""
|
189
|
+
|
190
|
+
class AzureOpenAIInference(AIService):
|
191
|
+
|
192
|
+
def __init__(self, substitude_1_model = None, substitude_2_model = None) -> None:
|
193
|
+
super().__init__()
|
194
|
+
self.substitude_1_model = substitude_1_model
|
195
|
+
self.substitude_2_model = substitude_2_model
|
196
|
+
|
197
|
+
def setup(self, config: dict) -> None:
|
198
|
+
self.model_conf:dict = config
|
199
|
+
self.model:any = AzureOpenAI(
|
200
|
+
api_version=config['API_VERSION'],
|
201
|
+
azure_endpoint=config['AZURE_OPEN_AI_INFERENCE_SDK_ENDPOINT'],
|
202
|
+
api_key=config['AZURE_INFERENCE_SDK_KEY'],
|
203
|
+
max_retries=0
|
204
|
+
)
|
205
|
+
self.wait_time = 0
|
206
|
+
self.total_tokens = 0
|
207
|
+
self.exception_time = None
|
208
|
+
self.main_model_enable = True
|
209
|
+
self.current_time = time.time()
|
210
|
+
if self.substitude_1_model is not None:
|
211
|
+
self.substitude_1_model.setup(config, config['SUBSTITUDE_1_DEPLOYMENT_NAME'])
|
212
|
+
if self.substitude_2_model is not None:
|
213
|
+
self.substitude_2_model.setup(config, config['SUBSTITUDE_2_DEPLOYMENT_NAME'])
|
214
|
+
|
215
|
+
def wait_strategy_for_rate_limit(self, exception):
|
216
|
+
self.wait_time = 0
|
217
|
+
self.main_model_enable = False
|
218
|
+
self.exception_time = time.time()
|
219
|
+
print(exception)
|
220
|
+
if isinstance(exception, (RateLimitError, APIStatusError)):
|
221
|
+
if hasattr(exception, 'response') and exception.response:
|
222
|
+
headers = exception.response.headers
|
223
|
+
retry_after_seconds = headers.get("Retry-After")
|
224
|
+
if retry_after_seconds:
|
225
|
+
try:
|
226
|
+
wait_time = int(retry_after_seconds)
|
227
|
+
logging.info(f"Rate limit: Respetando header Retry-After: esperando {wait_time} segundos.")
|
228
|
+
self.wait_time = wait_time
|
229
|
+
except ValueError:
|
230
|
+
logging.info(f"Rate limit: Retry-After header no es un entero ({retry_after_seconds}). Usando backoff exponencial.")
|
231
|
+
else: # No hay Retry-After, usar backoff exponencial
|
232
|
+
logging.info("Rate limit: No se encontró header Retry-After. Usando backoff exponencial.")
|
233
|
+
else: # No hay objeto response, usar backoff exponencial
|
234
|
+
logging.info("Rate limit: No se encontró objeto response en la excepción. Usando backoff exponencial.")
|
235
|
+
if self.wait_time == 0:
|
236
|
+
logging.warning("Rate limit: No se especificó Retry-After. Usando backoff exponencial.")
|
237
|
+
# Si no se especifica Retry-After, usar backoff exponencial
|
238
|
+
self.wait_time = 2 ** (self.wait_time // 60)
|
239
|
+
|
240
|
+
def generate(self, work_memory, max_tokens=4096, temperature=0, top_p=0.9) -> str:
|
241
|
+
again = False
|
242
|
+
try:
|
243
|
+
if self.main_model_enable:
|
244
|
+
response = self.model.chat.completions.create(
|
245
|
+
messages=work_memory,
|
246
|
+
max_tokens=max_tokens,
|
247
|
+
temperature=temperature,
|
248
|
+
top_p=top_p,
|
249
|
+
model=self.model_conf['DEPLOYMENT_NAME'],
|
250
|
+
)
|
251
|
+
if hasattr(response, 'usage') and response.usage is not None:
|
252
|
+
logging.info("\n--- Uso de Tokens ---")
|
253
|
+
logging.info(f"Tokens enviados (prompt): {response.usage.prompt_tokens}")
|
254
|
+
logging.info(f"Tokens recibidos (completion): {response.usage.completion_tokens}")
|
255
|
+
logging.info(f"Tokens totales: {response.usage.total_tokens}")
|
256
|
+
self.total_tokens += response.usage.total_tokens
|
257
|
+
current_t = time.time()
|
258
|
+
elapsed_time = current_t - self.current_time
|
259
|
+
logging.info(f"Tiempo transcurrido para la generación: {elapsed_time:.2f} segundos")
|
260
|
+
# En minutos
|
261
|
+
elapsed_time_minutes = elapsed_time / 60
|
262
|
+
logging.info(f"Tiempo transcurrido para la generación: {elapsed_time_minutes:.2f} minutos")
|
263
|
+
logging.info(f"Total de tokens generados hasta ahora: {self.total_tokens}")
|
264
|
+
# Reinicia el contador si ha pasado más de 1 minuto
|
265
|
+
if elapsed_time >= 60:
|
266
|
+
logging.info("Reiniciando el contador de tiempo y tokens...")
|
267
|
+
self.total_tokens = 0
|
268
|
+
self.current_time = time.time()
|
269
|
+
self.exception_time = None
|
270
|
+
else:
|
271
|
+
if self.total_tokens >= 40000:
|
272
|
+
logging.info("Total de tokens alcanzado (40,000). Activando el modo de excepcion.")
|
273
|
+
self.main_model_enable = False
|
274
|
+
self.wait_time = 60 - elapsed_time + 5
|
275
|
+
self.exception_time = time.time()
|
276
|
+
logging.info(f"Esperando {self.wait_time} segundos antes de reintentar.")
|
277
|
+
logging.info("---------------------\n")
|
278
|
+
else:
|
279
|
+
logging.info("\n--- Uso de Tokens no disponible en la respuesta ---")
|
280
|
+
res = response.choices[0].message.content
|
281
|
+
if not res or res == "" or res == "ERROR":
|
282
|
+
again = True
|
283
|
+
else:
|
284
|
+
return response.choices[0].message.content
|
285
|
+
except Exception as e:
|
286
|
+
self.wait_strategy_for_rate_limit(e)
|
287
|
+
#----------------------------------
|
288
|
+
# Exception mode
|
289
|
+
#----------------------------------
|
290
|
+
if not self.main_model_enable or again:
|
291
|
+
again = False
|
292
|
+
# Si ha pasado más de 1 minuto desde la última excepción, reinicia el modelo principal
|
293
|
+
current_t = time.time()
|
294
|
+
elapsed_time = current_t - self.exception_time
|
295
|
+
logging.info(f"Esperando {self.wait_time} segundos antes de reintentar. Transcurridos: {elapsed_time:.2f} segundos")
|
296
|
+
if elapsed_time >= self.wait_time:
|
297
|
+
logging.info("Reiniciando el modelo principal después de 1 minuto.")
|
298
|
+
self.main_model_enable = True
|
299
|
+
self.current_time = time.time()
|
300
|
+
self.total_tokens = 0
|
301
|
+
# Si el modelo principal está deshabilitado, intenta con los modelos de sustitución
|
302
|
+
try:
|
303
|
+
logging.warning("Modelo principal en espera. Intentando con el modelo de sustitución-1...")
|
304
|
+
if self.substitude_1_model is None:
|
305
|
+
raise ValueError("No se ha configurado un modelo de sustitución-1.")
|
306
|
+
return self.substitude_1_model.generate(work_memory)
|
307
|
+
except Exception as e:
|
308
|
+
try:
|
309
|
+
logging.warning("Modelo principal en espera. Intentando con el modelo de sustitución-2...")
|
310
|
+
if self.substitude_2_model is None:
|
311
|
+
raise ValueError("No se ha configurado un modelo de sustitución-2.")
|
312
|
+
return self.substitude_2_model.generate(work_memory)
|
313
|
+
except Exception as e2:
|
314
|
+
trace_err = traceback.format_exc()
|
315
|
+
err = str(e2) + " - " + trace_err
|
316
|
+
logging.fatal(f"Error en la respuesta de Azure: {err}")
|
317
|
+
raise e2
|
318
|
+
|
319
|
+
class ServiceProvider:
|
320
|
+
_services = {}
|
321
|
+
|
322
|
+
@classmethod
|
323
|
+
def register(cls, name: str, service):
|
324
|
+
"""Register a service with a unique name."""
|
325
|
+
cls._services[name] = service
|
326
|
+
|
327
|
+
@classmethod
|
328
|
+
def get(cls, name: str):
|
329
|
+
"""Retrieve a registered service."""
|
330
|
+
service = cls._services.get(name)
|
331
|
+
if not service:
|
332
|
+
raise ValueError(f"Service '{name}' not found!")
|
333
|
+
return service
|
@@ -139,6 +139,12 @@ class DelegateAction(Action):
|
|
139
139
|
"""
|
140
140
|
pass
|
141
141
|
|
142
|
+
def send_response(self, response:any) -> None:
|
143
|
+
""" Send response
|
144
|
+
@param response: Response
|
145
|
+
"""
|
146
|
+
self.agent.get_gateway().put(response)
|
147
|
+
|
142
148
|
# --------------------------------------------------------
|
143
149
|
# Define ResponseAction component
|
144
150
|
# --------------------------------------------------------
|
@@ -74,6 +74,7 @@ pbesa/celulas/__init__.py
|
|
74
74
|
pbesa/celulas/celula_casos.py
|
75
75
|
pbesa/celulas/celula_consultas.py
|
76
76
|
pbesa/celulas/celula_datos_identificables.py
|
77
|
+
pbesa/celulas/celula_expertos.py
|
77
78
|
pbesa/celulas/celula_generar_documento.py
|
78
79
|
pbesa/celulas/celula_preguntas.py
|
79
80
|
pbesa/celulas/celula_saludos.py
|