pbesa 4.0.15__tar.gz → 4.0.16__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pbesa-4.0.15 → pbesa-4.0.16}/PKG-INFO +1 -1
- pbesa-4.0.16/pbesa/celulas/celula_expertos.py +57 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/cognitive.py +20 -12
- pbesa-4.0.16/pbesa/models.py +320 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/social/collaborative_team.py +6 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa.egg-info/PKG-INFO +1 -1
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa.egg-info/SOURCES.txt +1 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/setup.py +1 -1
- pbesa-4.0.15/pbesa/models.py +0 -349
- {pbesa-4.0.15 → pbesa-4.0.16}/.gitignore +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/LICENSE +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/LICENSE.txt +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/MANIFEST +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/README.md +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/db.sqlite3 +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/helloworld/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/helloworld/__pycache__/__init__.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/helloworld/__pycache__/pbesa.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/helloworld/__pycache__/settings.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/helloworld/__pycache__/urls.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/helloworld/__pycache__/wsgi.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/helloworld/asgi.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/helloworld/pbesa.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/helloworld/settings.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/helloworld/urls.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/helloworld/wsgi.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/manage.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/__pycache__/__init__.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/__pycache__/admin.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/__pycache__/apps.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/__pycache__/models.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/__pycache__/urls.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/__pycache__/views.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/admin.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/apps.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/mas/controller/__pycache__/translatecontroller.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/mas/controller/__pycache__/translatedelegate.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/mas/controller/__pycache__/translateresponse.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/mas/controller/translatecontroller.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/mas/controller/translatedelegate.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/mas/controller/translateresponse.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/mas/worker/__pycache__/translatetask.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/mas/worker/__pycache__/workeragent.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/mas/worker/translatetask.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/mas/worker/workeragent.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/migrations/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/migrations/__pycache__/__init__.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/models.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/tests.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/urls.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/django/helloworld/translate/views.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/controller/__pycache__/countercontroller.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/controller/__pycache__/counterdelegate.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/controller/__pycache__/counterresponse.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/controller/__pycache__/translatecontroller.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/controller/__pycache__/translatedelegate.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/controller/__pycache__/translateresponse.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/controller/countercontroller.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/controller/counterdelegate.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/controller/counterresponse.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/worker/__pycache__/counteragent.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/worker/__pycache__/countertask.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/worker/__pycache__/translatetask.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/worker/__pycache__/workeragent.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/worker/counteragent.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/mas/worker/countertask.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/remote_a.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/remote_b.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/examples/remote/remote_c.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/celulas/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/celulas/celula_casos.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/celulas/celula_consultas.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/celulas/celula_datos_identificables.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/celulas/celula_generar_documento.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/celulas/celula_preguntas.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/celulas/celula_saludos.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/celulas/web.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/adapter.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/agent.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/io/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/io/system_file.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/io/tcp_server.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/res/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/res/__pycache__/__init__.cpython-36.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/res/__pycache__/__init__.cpython-37.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/res/__pycache__/__init__.cpython-38.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/res/__pycache__/__init__.cpython-39.pyc +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/res/conf.json +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/util.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/kernel/world.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/mas.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/remote/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/remote/adm_listener.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/remote/adm_listener_handler.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/remote/exceptions.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/remote/remote_adm.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/remote/remote_adm_handler.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/social/__init__.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/social/delegator.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/social/delegator_team.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/social/dialog.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/social/dispatcher_team.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/social/prompts.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/social/selected_dispatcher_team.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/social/templates.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa/social/worker.py +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa.egg-info/dependency_links.txt +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa.egg-info/requires.txt +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/pbesa.egg-info/top_level.txt +0 -0
- {pbesa-4.0.15 → pbesa-4.0.16}/setup.cfg +0 -0
@@ -0,0 +1,57 @@
|
|
1
|
+
import logging
|
2
|
+
|
3
|
+
PROMPT = """
|
4
|
+
Eres un clasificador de textos que determina a qué entidad o dependencia gubernamental colombiana corresponde un texto dado, basándose en la descripción de sus funciones. Tu tarea es analizar el siguiente texto y determinar la clase más apropiada.
|
5
|
+
|
6
|
+
Responde únicamente con el nombre completo de la clase o con "NO_CORRESPONDE" si el texto no se ajusta a ninguna de las clases descritas. No agregues explicaciones ni comentarios adicionales.
|
7
|
+
|
8
|
+
%s
|
9
|
+
|
10
|
+
Opciones de respuesta posibles:
|
11
|
+
|
12
|
+
DIMAR - Dirección General Marítima
|
13
|
+
DNDA - Dirección Nacional de Derecho de Autor
|
14
|
+
ICA - Instituto Colombiano Agropecuario
|
15
|
+
SFC - Superintendencia Financiera de Colombia
|
16
|
+
SIC - Superintendencia de Industria y Comercio - Competencia Desleal
|
17
|
+
SIC - Superintendencia de Industria y Comercio - Propiedad Industrial
|
18
|
+
SIC - Superintendencia de Industria y Comercio - Protección al Consumidor
|
19
|
+
Supersalud - Superintendencia Nacional de Salud - Cobertura Servicios
|
20
|
+
Supersalud - Superintendencia Nacional de Salud - Conflictos Devoluciones
|
21
|
+
Supersalud - Superintendencia Nacional de Salud - Conflicto Entidades
|
22
|
+
Supersalud - Superintendencia Nacional de Salud - Libre Elección
|
23
|
+
Supersalud - Superintendencia Nacional de Salud - Multiafiliación
|
24
|
+
Supersalud - Superintendencia Nacional de Salud - Reconocimiento Económico
|
25
|
+
Supersociedades - Superintendencia de Sociedades - Liquidación Insolvencia
|
26
|
+
Supersociedades - Superintendencia de Sociedades - Reorganización Insolvencia
|
27
|
+
Supersociedades - Superintendencia de Sociedades - Validación Judicial
|
28
|
+
Supersociedades - Superintendencia de Sociedades - Mercantiles
|
29
|
+
NO_CORRESPONDE
|
30
|
+
"""
|
31
|
+
|
32
|
+
# Efectua la inferencia del modelo.
|
33
|
+
def derive(service, text, conocimiento, max_tkns=4096) -> any:
|
34
|
+
try:
|
35
|
+
tmp_work_memory = []
|
36
|
+
prompt = PROMPT % conocimiento
|
37
|
+
tmp_work_memory.append({"role": "system", "content": prompt})
|
38
|
+
user_prompt = """
|
39
|
+
Texto: "%s"
|
40
|
+
|
41
|
+
Clasificación:
|
42
|
+
""" % text
|
43
|
+
tmp_work_memory.append({"role": "system", "content": prompt})
|
44
|
+
tmp_work_memory.append({"role": "user", "content": user_prompt})
|
45
|
+
res = service.generate(tmp_work_memory, max_tkns)
|
46
|
+
logging.info("\n")
|
47
|
+
logging.info(f"[Celula][Expertos][Text]: {text}")
|
48
|
+
logging.info(f"[Celula][Expertos][Respuesta]: {res}")
|
49
|
+
logging.info("\n")
|
50
|
+
if not res or res == "":
|
51
|
+
res = text
|
52
|
+
logging.warning(f"[Celula][Expertos]: No obtener una respuesta.")
|
53
|
+
return res
|
54
|
+
except Exception as e:
|
55
|
+
logging.error(f"[Celula][Expertos]: Error al procesar: {text}")
|
56
|
+
logging.error(e)
|
57
|
+
return None
|
@@ -26,7 +26,7 @@ from pbesa.social.dialog import (
|
|
26
26
|
DialogState, imprimir_grafo, recorrer_interacciones, extraer_diccionario_nodos,
|
27
27
|
ActionNode, DeclarativeNode, GotoNode)
|
28
28
|
from .celulas import (celula_casos, celula_consultas, celula_saludos, celula_datos_identificables,
|
29
|
-
celula_generar_documento)
|
29
|
+
celula_generar_documento, celula_expertos)
|
30
30
|
from pbesa.social.prompts import ANALIZER_PROMPT, CLASSIFICATION_PROMPT, DERIVE_PROMPT, RECOVERY_PROMPT, ADAPT_PROMPT, SINTETIZER_PROMPT
|
31
31
|
|
32
32
|
# --------------------------------------------------------
|
@@ -109,7 +109,7 @@ class AgentMetadata():
|
|
109
109
|
# Define common functions
|
110
110
|
# --------------------------------------------------------
|
111
111
|
|
112
|
-
def define_service_provider(provider, ai_service=None) -> None:
|
112
|
+
def define_service_provider(provider, ai_service=None, substitudes = False) -> None:
|
113
113
|
# Define provider
|
114
114
|
service = None
|
115
115
|
service_provider = ServiceProvider()
|
@@ -126,7 +126,12 @@ def define_service_provider(provider, ai_service=None) -> None:
|
|
126
126
|
service = AzureInference()
|
127
127
|
service_provider.register("AZURE_INFERENCE", service)
|
128
128
|
elif "AZURE_OPEN_AI_INFERENCE" in provider:
|
129
|
-
|
129
|
+
substitude_1 = None
|
130
|
+
substitude_2 = None
|
131
|
+
if substitudes:
|
132
|
+
substitude_1 = AzureInference()
|
133
|
+
substitude_2 = AzureInference()
|
134
|
+
service = AzureOpenAIInference(substitude_1, substitude_2)
|
130
135
|
service_provider.register("AZURE_OPEN_AI_INFERENCE", service)
|
131
136
|
return service_provider, service
|
132
137
|
|
@@ -268,8 +273,8 @@ class AugmentedGeneration(ABC):
|
|
268
273
|
"""
|
269
274
|
return self.model
|
270
275
|
|
271
|
-
def load_model(self, provider, config, ai_service=None) -> None:
|
272
|
-
self.__service_provider, service = define_service_provider(provider, ai_service)
|
276
|
+
def load_model(self, provider, config, ai_service=None, substitudes = False) -> None:
|
277
|
+
self.__service_provider, service = define_service_provider(provider, ai_service, substitudes)
|
273
278
|
service.setup(config)
|
274
279
|
self.__ai_service = service
|
275
280
|
|
@@ -286,12 +291,15 @@ class AugmentedGeneration(ABC):
|
|
286
291
|
# Set up model
|
287
292
|
self.setup_world()
|
288
293
|
|
289
|
-
def command_derive(self, command, query) -> str | None:
|
294
|
+
def command_derive(self, command, query, max_tkns=2000) -> str | None:
|
290
295
|
try:
|
291
296
|
if command == "DATA_TYPE":
|
292
297
|
return celula_datos_identificables.derive(self.__ai_service, query)
|
293
298
|
elif command == "GENERATE_DOCUMENT":
|
294
299
|
return celula_generar_documento.derive(self.__ai_service, query["template"], query["text"])
|
300
|
+
elif command == "EXPERTS":
|
301
|
+
retrieval = self.retrieval(query)
|
302
|
+
return celula_expertos.derive(self.__ai_service, query, retrieval, max_tkns)
|
295
303
|
return None
|
296
304
|
except Exception as e:
|
297
305
|
traceback.print_exc()
|
@@ -650,8 +658,8 @@ class Dialog(ABC):
|
|
650
658
|
# Set dialog state
|
651
659
|
self.__dfa['start'] = iniciadores
|
652
660
|
|
653
|
-
def load_model(self, provider, config, ai_service=None) -> None:
|
654
|
-
self.__service_provider, service = define_service_provider(provider, ai_service)
|
661
|
+
def load_model(self, provider, config, ai_service=None, substitudes = False) -> None:
|
662
|
+
self.__service_provider, service = define_service_provider(provider, ai_service, substitudes)
|
655
663
|
service.setup(config)
|
656
664
|
self.__ai_service = service
|
657
665
|
|
@@ -880,7 +888,7 @@ class Dialog(ABC):
|
|
880
888
|
dicriminador = "consulta"
|
881
889
|
else:
|
882
890
|
dicriminador = "caso"
|
883
|
-
self.notify(session_id, f"
|
891
|
+
self.notify(session_id, f"primera fase se identifica: {dicriminador}.")
|
884
892
|
res = query
|
885
893
|
else:
|
886
894
|
logging.info("[Stage-1]: Respuesta con ambiguedad")
|
@@ -1371,8 +1379,8 @@ class SpecialDispatch():
|
|
1371
1379
|
# Reference of ADM
|
1372
1380
|
self.adm = Adm()
|
1373
1381
|
|
1374
|
-
def load_model(self, provider, config, ai_service=None) -> None:
|
1375
|
-
self.__service_provider, service = define_service_provider(provider, ai_service)
|
1382
|
+
def load_model(self, provider, config, ai_service=None, substitudes = False) -> None:
|
1383
|
+
self.__service_provider, service = define_service_provider(provider, ai_service, substitudes)
|
1376
1384
|
service.setup(config)
|
1377
1385
|
self.__ai_service = service
|
1378
1386
|
# Setup options dictionary
|
@@ -1386,7 +1394,7 @@ class SpecialDispatch():
|
|
1386
1394
|
role = agent.get_role()
|
1387
1395
|
self.__options_dict[agent_id] = role.description
|
1388
1396
|
# Log
|
1389
|
-
logging.info(f"Agentes disponibles: {self.__options_dict.keys()}")
|
1397
|
+
#logging.info(f"Agentes disponibles: {self.__options_dict.keys()}")
|
1390
1398
|
|
1391
1399
|
def get_text(self, mensaje) -> str:
|
1392
1400
|
if mensaje:
|
@@ -0,0 +1,320 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""
|
3
|
+
----------------------------------------------------------
|
4
|
+
-------------------------- PBESA -------------------------
|
5
|
+
----------------------------------------------------------
|
6
|
+
|
7
|
+
@autor AKEN
|
8
|
+
@version 4.0.0
|
9
|
+
@date 08/08/24
|
10
|
+
"""
|
11
|
+
|
12
|
+
# --------------------------------------------------------
|
13
|
+
# Define resources
|
14
|
+
# --------------------------------------------------------
|
15
|
+
|
16
|
+
import json
|
17
|
+
import logging
|
18
|
+
import traceback
|
19
|
+
from openai import AzureOpenAI
|
20
|
+
from abc import ABC, abstractmethod
|
21
|
+
from azure.ai.projects import AIProjectClient
|
22
|
+
from azure.identity import DefaultAzureCredential
|
23
|
+
from azure.ai.inference import ChatCompletionsClient
|
24
|
+
from azure.core.credentials import AzureKeyCredential
|
25
|
+
from azure.core.exceptions import HttpResponseError
|
26
|
+
import time
|
27
|
+
from openai import AzureOpenAI, RateLimitError, APIStatusError
|
28
|
+
|
29
|
+
|
30
|
+
# --------------------------------------------------------
|
31
|
+
# Define classes
|
32
|
+
# --------------------------------------------------------
|
33
|
+
|
34
|
+
class AIService(ABC):
|
35
|
+
"""Abstract class for AI services."""
|
36
|
+
|
37
|
+
def __init__(self) -> None:
|
38
|
+
self.model:any = None
|
39
|
+
self.model_conf:dict = None
|
40
|
+
self.substitude_1_model:any = None
|
41
|
+
self.substitude_2_model:any = None
|
42
|
+
|
43
|
+
@abstractmethod
|
44
|
+
def setup(self, config: dict) -> None:
|
45
|
+
"""Method to setup the AI service."""
|
46
|
+
raise NotImplementedError("Method 'setup' must be implemented.")
|
47
|
+
|
48
|
+
@abstractmethod
|
49
|
+
def generate(self, work_memory, max_tokens=4096, temperature=0, top_p=0.9) -> str:
|
50
|
+
"""Method to generate a response based on user input."""
|
51
|
+
raise NotImplementedError("Method 'setup' must be implemented.")
|
52
|
+
|
53
|
+
class GPTService(AIService):
|
54
|
+
|
55
|
+
def __init__(self) -> None:
|
56
|
+
super().__init__()
|
57
|
+
|
58
|
+
def setup(self, config: dict, work_memory) -> None:
|
59
|
+
try:
|
60
|
+
self.model:any = config['model']
|
61
|
+
self.model_conf:dict = config
|
62
|
+
self.__work_memory:list = work_memory
|
63
|
+
except Exception as e:
|
64
|
+
raise Exception("Could not setup GPTService: check the configuration.")
|
65
|
+
|
66
|
+
def generate(self) -> str:
|
67
|
+
""" Generate method
|
68
|
+
:return: str
|
69
|
+
"""
|
70
|
+
try:
|
71
|
+
# Genera texto con OpenAI.
|
72
|
+
self.model.api_key = self.model_conf['API_KEY']
|
73
|
+
engine = self.model_conf['OPENAI_ENGINE']
|
74
|
+
response = self.model.ChatCompletion.create(
|
75
|
+
model = engine,
|
76
|
+
messages = self.__work_memory
|
77
|
+
)
|
78
|
+
# Verifica si se obtuvo respuesta.
|
79
|
+
if response['choices'][0]['finish_reason'] == 'completed' or response['choices'][0]['finish_reason'] == 'stop':
|
80
|
+
res = response['choices'][0]['message']['content']
|
81
|
+
try:
|
82
|
+
if not res or res == 'null' or res == 'N/A' or 'N/A' in res:
|
83
|
+
#self.log.warning("OpenAI response not completed", extra={'log_data': {'gpt_response': response}})
|
84
|
+
logging.info("OpenAI response not completed")
|
85
|
+
return None
|
86
|
+
#self.log.info("OpenAI response completed", extra={'log_data': {'gpt_response': response}})
|
87
|
+
logging.info("OpenAI response completed")
|
88
|
+
self.__work_memory.append({"role": "assistant", "content": res})
|
89
|
+
return res
|
90
|
+
except:
|
91
|
+
#self.log.warning("OpenAI response not completed", extra={'log_data': {'gpt_response': response}})
|
92
|
+
logging.info("OpenAI response not completed")
|
93
|
+
return None
|
94
|
+
else:
|
95
|
+
#self.log.warning("OpenAI response not completed", extra={'log_data': {'gpt_response': response}})
|
96
|
+
logging.info("OpenAI response not completed")
|
97
|
+
return None
|
98
|
+
except Exception as e:
|
99
|
+
trace_err = traceback.format_exc()
|
100
|
+
err = str(e) + " - " + trace_err
|
101
|
+
logging.info(err)
|
102
|
+
return None
|
103
|
+
|
104
|
+
class AIFoundry(AIService):
|
105
|
+
|
106
|
+
def __init__(self) -> None:
|
107
|
+
super().__init__()
|
108
|
+
|
109
|
+
def setup(self, config: dict, work_memory) -> None:
|
110
|
+
self.model_conf:dict = config
|
111
|
+
self.__work_memory:list = work_memory
|
112
|
+
project = AIProjectClient.from_connection_string(
|
113
|
+
conn_str=config['PROJECT_CONNECTION_STRING'], credential=DefaultAzureCredential()
|
114
|
+
)
|
115
|
+
self.model:any = project.inference.get_chat_completions_client()
|
116
|
+
|
117
|
+
def generate(self) -> str:
|
118
|
+
response = self.model.complete(
|
119
|
+
model=self.model_conf['AIFOUNDRY_MODEL'],
|
120
|
+
messages=self.__work_memory,
|
121
|
+
)
|
122
|
+
return response.choices[0].message.content
|
123
|
+
|
124
|
+
class AzureInference(AIService):
|
125
|
+
|
126
|
+
def __init__(self) -> None:
|
127
|
+
super().__init__()
|
128
|
+
self.max_tokens = 2000
|
129
|
+
self.deployment = "Llama-3.3-70B-Instruct"
|
130
|
+
|
131
|
+
def setup(self, config: dict, substitude=None) -> None:
|
132
|
+
self.model_conf:dict = config
|
133
|
+
self.model:any = ChatCompletionsClient(
|
134
|
+
endpoint=config['AZURE_GENERAL_INFERENCE_SDK_ENDPOINT'],
|
135
|
+
credential=AzureKeyCredential(config['AZURE_INFERENCE_SDK_KEY'])
|
136
|
+
)
|
137
|
+
self.max_tokens = self.model_conf['MAX_TOKENS']
|
138
|
+
if substitude:
|
139
|
+
self.deployment = substitude
|
140
|
+
else:
|
141
|
+
self.deployment = self.model_conf['DEPLOYMENT_NAME']
|
142
|
+
|
143
|
+
def generate(self, work_memory, max_tokens=2000, temperature=0, top_p=0.9) -> str:
|
144
|
+
try:
|
145
|
+
response = self.model.complete(
|
146
|
+
messages= work_memory,
|
147
|
+
model= self.deployment,
|
148
|
+
max_tokens= self.max_tokens
|
149
|
+
)
|
150
|
+
return response.choices[0].message.content
|
151
|
+
except Exception as e:
|
152
|
+
# Maneja otros errores
|
153
|
+
trace_err = traceback.format_exc()
|
154
|
+
err = str(e) + " - " + trace_err
|
155
|
+
logging.info(f"Error en la respuesta de Azure: {err}")
|
156
|
+
if self.model_conf['SUBSTITUDE_DEPLOYMENT_NAME'] == "Llama-3.3-70B-Instruct":
|
157
|
+
logging.info("\n\n\n")
|
158
|
+
logging.info("----------------------------------------")
|
159
|
+
logging.info("Entra a jugar el sustituto")
|
160
|
+
try:
|
161
|
+
logging.info("\n\n\n.............................................")
|
162
|
+
logging.info("\n%s", json.dumps(work_memory, indent=4))
|
163
|
+
logging.info("........................................\n\n\n")
|
164
|
+
|
165
|
+
response = self.model.complete(
|
166
|
+
messages= work_memory,
|
167
|
+
model =self.model_conf['SUBSTITUDE_DEPLOYMENT_NAME'],
|
168
|
+
max_tokens=self.model_conf['MAX_TOKENS']
|
169
|
+
)
|
170
|
+
logging.info("----------------------------------------")
|
171
|
+
logging.info("\n\n\n")
|
172
|
+
|
173
|
+
return response.choices[0].message.content
|
174
|
+
except Exception as e2:
|
175
|
+
trace_err2 = traceback.format_exc()
|
176
|
+
err2 = str(e2) + " - " + trace_err2
|
177
|
+
logging.info(f"Error en la respuesta de Azure: {err2}")
|
178
|
+
logging.info("----------------------------------------")
|
179
|
+
logging.info("\n\n\n")
|
180
|
+
|
181
|
+
raise e2
|
182
|
+
|
183
|
+
class AzureOpenAIInference(AIService):
|
184
|
+
|
185
|
+
def __init__(self, substitude_1_model = None, substitude_2_model = None) -> None:
|
186
|
+
super().__init__()
|
187
|
+
self.substitude_1_model = substitude_1_model
|
188
|
+
self.substitude_2_model = substitude_2_model
|
189
|
+
|
190
|
+
def setup(self, config: dict) -> None:
|
191
|
+
self.model_conf:dict = config
|
192
|
+
self.model:any = AzureOpenAI(
|
193
|
+
api_version=config['API_VERSION'],
|
194
|
+
azure_endpoint=config['AZURE_OPEN_AI_INFERENCE_SDK_ENDPOINT'],
|
195
|
+
api_key=config['AZURE_INFERENCE_SDK_KEY'],
|
196
|
+
max_retries=0
|
197
|
+
)
|
198
|
+
self.wait_time = 0
|
199
|
+
self.total_tokens = 0
|
200
|
+
self.exception_time = None
|
201
|
+
self.main_model_enable = True
|
202
|
+
self.current_time = time.time()
|
203
|
+
if self.substitude_1_model is not None:
|
204
|
+
self.substitude_1_model.setup(config, config['SUBSTITUDE_1_DEPLOYMENT_NAME'])
|
205
|
+
if self.substitude_2_model is not None:
|
206
|
+
self.substitude_2_model.setup(config, config['SUBSTITUDE_2_DEPLOYMENT_NAME'])
|
207
|
+
|
208
|
+
def wait_strategy_for_rate_limit(self, exception):
|
209
|
+
self.wait_time = 0
|
210
|
+
self.main_model_enable = False
|
211
|
+
self.exception_time = time.time()
|
212
|
+
print(exception)
|
213
|
+
if isinstance(exception, (RateLimitError, APIStatusError)):
|
214
|
+
if hasattr(exception, 'response') and exception.response:
|
215
|
+
headers = exception.response.headers
|
216
|
+
retry_after_seconds = headers.get("Retry-After")
|
217
|
+
if retry_after_seconds:
|
218
|
+
try:
|
219
|
+
wait_time = int(retry_after_seconds)
|
220
|
+
logging.info(f"Rate limit: Respetando header Retry-After: esperando {wait_time} segundos.")
|
221
|
+
self.wait_time = wait_time
|
222
|
+
except ValueError:
|
223
|
+
logging.info(f"Rate limit: Retry-After header no es un entero ({retry_after_seconds}). Usando backoff exponencial.")
|
224
|
+
else: # No hay Retry-After, usar backoff exponencial
|
225
|
+
logging.info("Rate limit: No se encontró header Retry-After. Usando backoff exponencial.")
|
226
|
+
else: # No hay objeto response, usar backoff exponencial
|
227
|
+
logging.info("Rate limit: No se encontró objeto response en la excepción. Usando backoff exponencial.")
|
228
|
+
if self.wait_time == 0:
|
229
|
+
logging.warning("Rate limit: No se especificó Retry-After. Usando backoff exponencial.")
|
230
|
+
# Si no se especifica Retry-After, usar backoff exponencial
|
231
|
+
self.wait_time = 2 ** (self.wait_time // 60)
|
232
|
+
|
233
|
+
def generate(self, work_memory, max_tokens=4096, temperature=0, top_p=0.9) -> str:
|
234
|
+
try:
|
235
|
+
if self.main_model_enable:
|
236
|
+
response = self.model.chat.completions.create(
|
237
|
+
messages=work_memory,
|
238
|
+
max_tokens=max_tokens,
|
239
|
+
temperature=temperature,
|
240
|
+
top_p=top_p,
|
241
|
+
model=self.model_conf['DEPLOYMENT_NAME'],
|
242
|
+
)
|
243
|
+
if hasattr(response, 'usage') and response.usage is not None:
|
244
|
+
logging.info("\n--- Uso de Tokens ---")
|
245
|
+
logging.info(f"Tokens enviados (prompt): {response.usage.prompt_tokens}")
|
246
|
+
logging.info(f"Tokens recibidos (completion): {response.usage.completion_tokens}")
|
247
|
+
logging.info(f"Tokens totales: {response.usage.total_tokens}")
|
248
|
+
self.total_tokens += response.usage.total_tokens
|
249
|
+
current_t = time.time()
|
250
|
+
elapsed_time = current_t - self.current_time
|
251
|
+
logging.info(f"Tiempo transcurrido para la generación: {elapsed_time:.2f} segundos")
|
252
|
+
# En minutos
|
253
|
+
elapsed_time_minutes = elapsed_time / 60
|
254
|
+
logging.info(f"Tiempo transcurrido para la generación: {elapsed_time_minutes:.2f} minutos")
|
255
|
+
logging.info(f"Total de tokens generados hasta ahora: {self.total_tokens}")
|
256
|
+
# Reinicia el contador si ha pasado más de 1 minuto
|
257
|
+
if elapsed_time >= 60:
|
258
|
+
logging.info("Reiniciando el contador de tiempo y tokens...")
|
259
|
+
self.total_tokens = 0
|
260
|
+
self.current_time = time.time()
|
261
|
+
self.exception_time = None
|
262
|
+
else:
|
263
|
+
if self.total_tokens >= 40000:
|
264
|
+
logging.info("Total de tokens alcanzado (40,000). Activando el modo de excepcion.")
|
265
|
+
self.main_model_enable = False
|
266
|
+
self.wait_time = 60 - elapsed_time + 5
|
267
|
+
self.exception_time = time.time()
|
268
|
+
logging.info(f"Esperando {self.wait_time} segundos antes de reintentar.")
|
269
|
+
logging.info("---------------------\n")
|
270
|
+
else:
|
271
|
+
logging.info("\n--- Uso de Tokens no disponible en la respuesta ---")
|
272
|
+
return response.choices[0].message.content
|
273
|
+
except Exception as e:
|
274
|
+
self.wait_strategy_for_rate_limit(e)
|
275
|
+
#----------------------------------
|
276
|
+
# Exception mode
|
277
|
+
#----------------------------------
|
278
|
+
if not self.main_model_enable:
|
279
|
+
# Si ha pasado más de 1 minuto desde la última excepción, reinicia el modelo principal
|
280
|
+
current_t = time.time()
|
281
|
+
elapsed_time = current_t - self.exception_time
|
282
|
+
logging.info(f"Esperando {self.wait_time} segundos antes de reintentar. Transcurridos: {elapsed_time:.2f} segundos")
|
283
|
+
if elapsed_time >= self.wait_time:
|
284
|
+
logging.info("Reiniciando el modelo principal después de 1 minuto.")
|
285
|
+
self.main_model_enable = True
|
286
|
+
self.current_time = time.time()
|
287
|
+
self.total_tokens = 0
|
288
|
+
# Si el modelo principal está deshabilitado, intenta con los modelos de sustitución
|
289
|
+
try:
|
290
|
+
logging.warning("Modelo principal en espera. Intentando con el modelo de sustitución-1...")
|
291
|
+
if self.substitude_1_model is None:
|
292
|
+
raise ValueError("No se ha configurado un modelo de sustitución-1.")
|
293
|
+
return self.substitude_1_model.generate(work_memory)
|
294
|
+
except Exception as e:
|
295
|
+
try:
|
296
|
+
logging.warning("Modelo principal en espera. Intentando con el modelo de sustitución-2...")
|
297
|
+
if self.substitude_2_model is None:
|
298
|
+
raise ValueError("No se ha configurado un modelo de sustitución-2.")
|
299
|
+
return self.substitude_2_model.generate(work_memory)
|
300
|
+
except Exception as e2:
|
301
|
+
trace_err = traceback.format_exc()
|
302
|
+
err = str(e2) + " - " + trace_err
|
303
|
+
logging.fatal(f"Error en la respuesta de Azure: {err}")
|
304
|
+
raise e2
|
305
|
+
|
306
|
+
class ServiceProvider:
|
307
|
+
_services = {}
|
308
|
+
|
309
|
+
@classmethod
|
310
|
+
def register(cls, name: str, service):
|
311
|
+
"""Register a service with a unique name."""
|
312
|
+
cls._services[name] = service
|
313
|
+
|
314
|
+
@classmethod
|
315
|
+
def get(cls, name: str):
|
316
|
+
"""Retrieve a registered service."""
|
317
|
+
service = cls._services.get(name)
|
318
|
+
if not service:
|
319
|
+
raise ValueError(f"Service '{name}' not found!")
|
320
|
+
return service
|
@@ -139,6 +139,12 @@ class DelegateAction(Action):
|
|
139
139
|
"""
|
140
140
|
pass
|
141
141
|
|
142
|
+
def send_response(self, response:any) -> None:
|
143
|
+
""" Send response
|
144
|
+
@param response: Response
|
145
|
+
"""
|
146
|
+
self.agent.get_gateway().put(response)
|
147
|
+
|
142
148
|
# --------------------------------------------------------
|
143
149
|
# Define ResponseAction component
|
144
150
|
# --------------------------------------------------------
|
@@ -74,6 +74,7 @@ pbesa/celulas/__init__.py
|
|
74
74
|
pbesa/celulas/celula_casos.py
|
75
75
|
pbesa/celulas/celula_consultas.py
|
76
76
|
pbesa/celulas/celula_datos_identificables.py
|
77
|
+
pbesa/celulas/celula_expertos.py
|
77
78
|
pbesa/celulas/celula_generar_documento.py
|
78
79
|
pbesa/celulas/celula_preguntas.py
|
79
80
|
pbesa/celulas/celula_saludos.py
|