pyxecm 3.0.1__py3-none-any.whl → 3.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyxecm might be problematic. Click here for more details.
- pyxecm/avts.py +4 -4
- pyxecm/coreshare.py +14 -15
- pyxecm/helper/data.py +2 -1
- pyxecm/helper/web.py +11 -11
- pyxecm/helper/xml.py +41 -10
- pyxecm/otac.py +1 -1
- pyxecm/otawp.py +19 -19
- pyxecm/otca.py +870 -67
- pyxecm/otcs.py +1567 -280
- pyxecm/otds.py +332 -153
- pyxecm/otkd.py +4 -4
- pyxecm/otmm.py +1 -1
- pyxecm/otpd.py +246 -30
- {pyxecm-3.0.1.dist-info → pyxecm-3.1.0.dist-info}/METADATA +2 -1
- pyxecm-3.1.0.dist-info/RECORD +82 -0
- pyxecm_api/app.py +45 -35
- pyxecm_api/auth/functions.py +2 -2
- pyxecm_api/auth/router.py +2 -3
- pyxecm_api/common/functions.py +164 -12
- pyxecm_api/settings.py +0 -8
- pyxecm_api/terminal/router.py +1 -1
- pyxecm_api/v1_csai/router.py +33 -18
- pyxecm_customizer/browser_automation.py +98 -48
- pyxecm_customizer/customizer.py +43 -25
- pyxecm_customizer/guidewire.py +422 -8
- pyxecm_customizer/k8s.py +23 -27
- pyxecm_customizer/knowledge_graph.py +501 -20
- pyxecm_customizer/m365.py +45 -44
- pyxecm_customizer/payload.py +1684 -1159
- pyxecm_customizer/payload_list.py +3 -0
- pyxecm_customizer/salesforce.py +122 -79
- pyxecm_customizer/servicenow.py +27 -7
- pyxecm_customizer/settings.py +3 -1
- pyxecm_customizer/successfactors.py +2 -2
- pyxecm_customizer/translate.py +1 -1
- pyxecm-3.0.1.dist-info/RECORD +0 -96
- pyxecm_api/agents/__init__.py +0 -7
- pyxecm_api/agents/app.py +0 -13
- pyxecm_api/agents/functions.py +0 -119
- pyxecm_api/agents/models.py +0 -10
- pyxecm_api/agents/otcm_knowledgegraph/__init__.py +0 -1
- pyxecm_api/agents/otcm_knowledgegraph/functions.py +0 -85
- pyxecm_api/agents/otcm_knowledgegraph/models.py +0 -61
- pyxecm_api/agents/otcm_knowledgegraph/router.py +0 -74
- pyxecm_api/agents/otcm_user_agent/__init__.py +0 -1
- pyxecm_api/agents/otcm_user_agent/models.py +0 -20
- pyxecm_api/agents/otcm_user_agent/router.py +0 -65
- pyxecm_api/agents/otcm_workspace_agent/__init__.py +0 -1
- pyxecm_api/agents/otcm_workspace_agent/models.py +0 -40
- pyxecm_api/agents/otcm_workspace_agent/router.py +0 -200
- {pyxecm-3.0.1.dist-info → pyxecm-3.1.0.dist-info}/WHEEL +0 -0
- {pyxecm-3.0.1.dist-info → pyxecm-3.1.0.dist-info}/entry_points.txt +0 -0
pyxecm/otca.py
CHANGED
|
@@ -17,6 +17,7 @@ import logging
|
|
|
17
17
|
import platform
|
|
18
18
|
import sys
|
|
19
19
|
import time
|
|
20
|
+
import urllib.parse
|
|
20
21
|
from importlib.metadata import version
|
|
21
22
|
|
|
22
23
|
import requests
|
|
@@ -39,10 +40,19 @@ USER_AGENT = (
|
|
|
39
40
|
|
|
40
41
|
REQUEST_HEADERS = {"User-Agent": USER_AGENT, "accept": "application/json", "Content-Type": "application/json"}
|
|
41
42
|
|
|
42
|
-
REQUEST_TIMEOUT = 60
|
|
43
|
-
REQUEST_RETRY_DELAY = 20
|
|
43
|
+
REQUEST_TIMEOUT = 60.0
|
|
44
|
+
REQUEST_RETRY_DELAY = 20.0
|
|
44
45
|
REQUEST_MAX_RETRIES = 2
|
|
45
46
|
|
|
47
|
+
DEFAULT_LLM_ATTRIBUTES = {
|
|
48
|
+
"temperature": 0.2,
|
|
49
|
+
"maxTokens": 8000,
|
|
50
|
+
"maxRetries": 2,
|
|
51
|
+
"topK": 40,
|
|
52
|
+
"topP": 0.8,
|
|
53
|
+
"cache": False,
|
|
54
|
+
}
|
|
55
|
+
|
|
46
56
|
default_logger = logging.getLogger(MODULE_NAME)
|
|
47
57
|
|
|
48
58
|
try:
|
|
@@ -57,7 +67,10 @@ except ModuleNotFoundError:
|
|
|
57
67
|
|
|
58
68
|
|
|
59
69
|
class OTCA:
|
|
60
|
-
"""Interact with Content Aviator REST API."""
|
|
70
|
+
"""Interact with Content Aviator / Aviator Studio REST API."""
|
|
71
|
+
|
|
72
|
+
AGENT = "ai" # name of the agent role (used in messages)
|
|
73
|
+
USER = "user" # name of the user role (used in messages)
|
|
61
74
|
|
|
62
75
|
logger: logging.Logger = default_logger
|
|
63
76
|
|
|
@@ -66,6 +79,7 @@ class OTCA:
|
|
|
66
79
|
_embed_token: str | None = None
|
|
67
80
|
_chat_token: str | None = None
|
|
68
81
|
_chat_token_hashed: str | None = None
|
|
82
|
+
_studio_token: str | None = None
|
|
69
83
|
_node_dictionary: dict = {}
|
|
70
84
|
|
|
71
85
|
def __init__(
|
|
@@ -97,14 +111,14 @@ class OTCA:
|
|
|
97
111
|
The Core Share Client ID.
|
|
98
112
|
client_secret (str):
|
|
99
113
|
The Core Share client secret.
|
|
100
|
-
content_system (dict | None):
|
|
114
|
+
content_system (dict | None, optional):
|
|
101
115
|
The Content System configuration for the services which control the authentication.
|
|
102
|
-
otcs_object (OTCS):
|
|
116
|
+
otcs_object (OTCS | None, optional):
|
|
103
117
|
The OTCS object..
|
|
104
|
-
synonyms (list):
|
|
118
|
+
synonyms (list | None, optional):
|
|
105
119
|
List of synonyms that are used to generate a better response to the user.
|
|
106
|
-
inline_citation (bool):
|
|
107
|
-
Enable/Disable citations in the answers.
|
|
120
|
+
inline_citation (bool, optional):
|
|
121
|
+
Enable/Disable citations in the answers. Default is True.
|
|
108
122
|
logger (logging.Logger, optional):
|
|
109
123
|
The logging object to use for all log messages. Defaults to default_logger.
|
|
110
124
|
|
|
@@ -117,19 +131,41 @@ class OTCA:
|
|
|
117
131
|
|
|
118
132
|
otca_config = {}
|
|
119
133
|
|
|
134
|
+
otca_config["studioUrl"] = studio_url.rstrip("/")
|
|
135
|
+
|
|
136
|
+
# Health and Readiness endpoints:
|
|
137
|
+
otca_config["livenessUrl"] = otca_config["studioUrl"] + "/liveness"
|
|
138
|
+
otca_config["readinessUrl"] = otca_config["studioUrl"] + "/readiness"
|
|
139
|
+
|
|
140
|
+
# Chat endpoints:
|
|
120
141
|
otca_config["chatUrl"] = chat_url + "/v1/chat"
|
|
121
|
-
otca_config["
|
|
122
|
-
otca_config["embedUrl"] = embed_url + "/v1/embeddings"
|
|
123
|
-
otca_config["studioGraphsUrl"] = studio_url + "/studio/v1/graphs"
|
|
124
|
-
otca_config["studioAgentsUrl"] = studio_url + "/studio/v1/agents"
|
|
125
|
-
otca_config["studioToolsUrl"] = studio_url + "/studio/v1/tools"
|
|
126
|
-
otca_config["studioRulesUrl"] = studio_url + "/studio/v1/rules"
|
|
127
|
-
otca_config["studioModelsUrl"] = studio_url + "/studio/v1/api/models"
|
|
142
|
+
otca_config["directChatUrl"] = chat_url + "/v1/direct-chat"
|
|
128
143
|
|
|
129
|
-
|
|
144
|
+
# RAG endpoints:
|
|
145
|
+
otca_config["semanticSearchUrl"] = studio_url.rstrip("/") + "/api/v1/semantic_search"
|
|
146
|
+
otca_config["contextUrl"] = studio_url.rstrip("/") + "/v1/context"
|
|
147
|
+
otca_config["embedUrl"] = embed_url + "/v1/embeddings"
|
|
148
|
+
otca_config["directEmbedUrl"] = embed_url + "/v1/direct-embed"
|
|
149
|
+
|
|
150
|
+
# Aviator Studio endpoints:
|
|
151
|
+
otca_config["studioAgentsUrl"] = otca_config["studioUrl"] + "/studio/v1/agents"
|
|
152
|
+
otca_config["studioToolsUrl"] = otca_config["studioUrl"] + "/studio/v1/tools"
|
|
153
|
+
otca_config["studioGraphsUrl"] = otca_config["studioUrl"] + "/studio/v1/graphs"
|
|
154
|
+
otca_config["studioRulesUrl"] = otca_config["studioUrl"] + "/studio/v1/rules"
|
|
155
|
+
otca_config["studioPromptsUrl"] = otca_config["studioUrl"] + "/studio/v1/prompts"
|
|
156
|
+
otca_config["studioLLModelsUrl"] = otca_config["studioUrl"] + "/studio/v1/llmmodels"
|
|
157
|
+
otca_config["studioImportUrl"] = otca_config["studioUrl"] + "/studio/v1/import"
|
|
158
|
+
otca_config["studioExportUrl"] = otca_config["studioUrl"] + "/studio/v1/export"
|
|
159
|
+
|
|
160
|
+
# Studio 'low-level' APIs:
|
|
161
|
+
otca_config["studioModelsUrl"] = otca_config["studioUrl"] + "/studio/v1/api/models"
|
|
162
|
+
otca_config["studioTenantsUrl"] = otca_config["studioModelsUrl"] + "/tenants"
|
|
163
|
+
otca_config["scratchPadUrl"] = otca_config["studioUrl"] + "/v1/scratchpad"
|
|
164
|
+
|
|
165
|
+
otca_config["contentSystem"] = content_system if content_system else {"chat": "xecm", "embed": "xecm"}
|
|
130
166
|
otca_config["clientId"] = client_id
|
|
131
167
|
otca_config["clientSecret"] = client_secret
|
|
132
|
-
otca_config["otdsUrl"] = otds_url
|
|
168
|
+
otca_config["otdsUrl"] = otds_url.rstrip("/")
|
|
133
169
|
|
|
134
170
|
otca_config["synonyms"] = synonyms if synonyms else []
|
|
135
171
|
otca_config["inlineCitation"] = inline_citation
|
|
@@ -222,7 +258,7 @@ class OTCA:
|
|
|
222
258
|
request_header["Content-Type"] = content_type
|
|
223
259
|
|
|
224
260
|
# Configure default Content System
|
|
225
|
-
content_system = self.config()["
|
|
261
|
+
content_system = self.config()["contentSystem"].get(service_type, "none")
|
|
226
262
|
|
|
227
263
|
if content_system == "none":
|
|
228
264
|
return request_header
|
|
@@ -233,13 +269,19 @@ class OTCA:
|
|
|
233
269
|
|
|
234
270
|
if content_system == "xecm":
|
|
235
271
|
request_header["Authorization"] = "Bearer {}".format(self._chat_token_hashed)
|
|
236
|
-
|
|
272
|
+
if content_system == "otcm":
|
|
273
|
+
request_header["Authorization"] = "Bearer {}".format(self._chat_token)
|
|
274
|
+
elif content_system == "xecm-direct" | content_system == "otcm-direct":
|
|
237
275
|
request_header["otcsticket"] = self._chat_token
|
|
238
276
|
|
|
239
277
|
elif service_type == "embed":
|
|
240
278
|
if self._embed_token is None:
|
|
241
279
|
self.authenticate_embed()
|
|
242
280
|
request_header["Authorization"] = "Bearer {}".format(self._embed_token)
|
|
281
|
+
elif service_type == "studio":
|
|
282
|
+
if self._studio_token is None:
|
|
283
|
+
self.authenticate_studio()
|
|
284
|
+
request_header["Authorization"] = "Bearer {}".format(self._studio_token)
|
|
243
285
|
|
|
244
286
|
return request_header
|
|
245
287
|
|
|
@@ -253,7 +295,7 @@ class OTCA:
|
|
|
253
295
|
data: dict | list | None = None,
|
|
254
296
|
json_data: dict | None = None,
|
|
255
297
|
files: dict | None = None,
|
|
256
|
-
timeout:
|
|
298
|
+
timeout: float | None = REQUEST_TIMEOUT,
|
|
257
299
|
show_error: bool = True,
|
|
258
300
|
failure_message: str = "",
|
|
259
301
|
success_message: str = "",
|
|
@@ -278,7 +320,7 @@ class OTCA:
|
|
|
278
320
|
Dictionary of {"name": file-tuple} for multipart encoding upload.
|
|
279
321
|
The file-tuple can be a 2-tuple ("filename", fileobj) or a 3-tuple
|
|
280
322
|
("filename", fileobj, "content_type").
|
|
281
|
-
timeout (
|
|
323
|
+
timeout (float | None, optional):
|
|
282
324
|
Timeout for the request in seconds. Defaults to REQUEST_TIMEOUT.
|
|
283
325
|
show_error (bool, optional):
|
|
284
326
|
Whether or not an error should be logged in case of a failed REST call.
|
|
@@ -367,7 +409,7 @@ class OTCA:
|
|
|
367
409
|
|
|
368
410
|
return None
|
|
369
411
|
except requests.exceptions.Timeout:
|
|
370
|
-
if retries <= max_retries:
|
|
412
|
+
if retries <= max_retries or max_retries < 0:
|
|
371
413
|
self.logger.warning(
|
|
372
414
|
"Request timed out. Retrying in %s seconds...",
|
|
373
415
|
str(REQUEST_RETRY_DELAY),
|
|
@@ -386,16 +428,19 @@ class OTCA:
|
|
|
386
428
|
else:
|
|
387
429
|
return None
|
|
388
430
|
except requests.exceptions.ConnectionError:
|
|
389
|
-
if retries <= max_retries:
|
|
431
|
+
if retries <= max_retries or max_retries < 0:
|
|
390
432
|
self.logger.warning(
|
|
391
|
-
"Connection error
|
|
392
|
-
|
|
433
|
+
"Connection error (%s)! Retrying in %d seconds... %d/%d",
|
|
434
|
+
url,
|
|
435
|
+
REQUEST_RETRY_DELAY,
|
|
436
|
+
retries,
|
|
437
|
+
max_retries,
|
|
393
438
|
)
|
|
394
439
|
retries += 1
|
|
395
440
|
time.sleep(REQUEST_RETRY_DELAY) # Add a delay before retrying
|
|
396
441
|
else:
|
|
397
442
|
self.logger.error(
|
|
398
|
-
"%s; connection error
|
|
443
|
+
"%s; connection error!",
|
|
399
444
|
failure_message,
|
|
400
445
|
)
|
|
401
446
|
if retry_forever:
|
|
@@ -461,6 +506,43 @@ class OTCA:
|
|
|
461
506
|
|
|
462
507
|
# end method definition
|
|
463
508
|
|
|
509
|
+
def exist_result_item(
|
|
510
|
+
self,
|
|
511
|
+
response: dict,
|
|
512
|
+
key: str,
|
|
513
|
+
value: str,
|
|
514
|
+
) -> bool:
|
|
515
|
+
"""Check existence of key / value pair in the response properties of an Aviator Studio call.
|
|
516
|
+
|
|
517
|
+
There are two types of Aviator Studio responses. The /studio/v1/api seems to deliver
|
|
518
|
+
plain lists while the /studio/v1 [non-api] seems to be be a dictionary with an embedded
|
|
519
|
+
"results" list. This method handles both cases.
|
|
520
|
+
|
|
521
|
+
Args:
|
|
522
|
+
response (dict):
|
|
523
|
+
REST response from an Aviator Studio REST call.
|
|
524
|
+
key (str):
|
|
525
|
+
The property name (key).
|
|
526
|
+
value (str):
|
|
527
|
+
The value to find in the item with the matching key.
|
|
528
|
+
|
|
529
|
+
Returns:
|
|
530
|
+
bool:
|
|
531
|
+
True if the value was found, False otherwise.
|
|
532
|
+
|
|
533
|
+
"""
|
|
534
|
+
|
|
535
|
+
if not response:
|
|
536
|
+
return False
|
|
537
|
+
|
|
538
|
+
# The lower level model REST APIs return directly a list.
|
|
539
|
+
# We want to handle both cases:
|
|
540
|
+
results = response if isinstance(response, list) else response.get("results", [])
|
|
541
|
+
|
|
542
|
+
return any(key in result and result[key] == value for result in results)
|
|
543
|
+
|
|
544
|
+
# end method definition
|
|
545
|
+
|
|
464
546
|
def authenticate_chat(self) -> str:
|
|
465
547
|
"""Authenticate for Chat service at Content Aviator / CSAI.
|
|
466
548
|
|
|
@@ -535,8 +617,38 @@ class OTCA:
|
|
|
535
617
|
|
|
536
618
|
# end method definition
|
|
537
619
|
|
|
538
|
-
def
|
|
539
|
-
|
|
620
|
+
def authenticate_studio(self) -> str | None:
|
|
621
|
+
"""Authenticate at Aviator Studio.
|
|
622
|
+
|
|
623
|
+
Returns:
|
|
624
|
+
str | None:
|
|
625
|
+
Authentication token or None if the authentication fails.
|
|
626
|
+
|
|
627
|
+
"""
|
|
628
|
+
|
|
629
|
+
url = self.config()["otdsUrl"] + "/otdsws/oauth2/token"
|
|
630
|
+
|
|
631
|
+
data = {
|
|
632
|
+
"grant_type": "client_credentials",
|
|
633
|
+
"client_id": self.config()["clientId"],
|
|
634
|
+
"client_secret": self.config()["clientSecret"],
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
result = self.do_request(url=url, method="Post", data=data)
|
|
638
|
+
|
|
639
|
+
if result:
|
|
640
|
+
self._studio_token = result["access_token"]
|
|
641
|
+
return self._studio_token
|
|
642
|
+
else:
|
|
643
|
+
self.logger.error(
|
|
644
|
+
"Authentication failed with client ID -> '%s' against -> %s", self.config()["clientId"], url
|
|
645
|
+
)
|
|
646
|
+
return None
|
|
647
|
+
|
|
648
|
+
# end method definition
|
|
649
|
+
|
|
650
|
+
def chat(self, context: str | None, messages: list, where: list | None = None, service_type: str = "chat") -> dict:
|
|
651
|
+
"""Process a chat interaction with Content Aviator.
|
|
540
652
|
|
|
541
653
|
Chat requests are meant to be called as end-users. This should involve
|
|
542
654
|
passing the end-user's access token via the Authorization HTTP header.
|
|
@@ -548,13 +660,17 @@ class OTCA:
|
|
|
548
660
|
(empty initially, returned by previous responses from POST /v1/chat).
|
|
549
661
|
messages (list):
|
|
550
662
|
List of messages from conversation history.
|
|
663
|
+
TODO: document the message format. Especially which values the auther key can have.
|
|
551
664
|
where (list):
|
|
552
665
|
Metadata name/value pairs for the query.
|
|
553
666
|
Could be used to specify workspaces, documents, or other criteria in the future.
|
|
554
667
|
Values need to match those passed as metadata to the embeddings API.
|
|
668
|
+
service_type (str, optional):
|
|
669
|
+
Determines if Aviator Studio, OTCM Chat or Embedding API is used for the Authentication header.
|
|
555
670
|
|
|
556
671
|
Returns:
|
|
557
|
-
dict:
|
|
672
|
+
dict:
|
|
673
|
+
Conversation status
|
|
558
674
|
|
|
559
675
|
Example:
|
|
560
676
|
{
|
|
@@ -634,16 +750,18 @@ class OTCA:
|
|
|
634
750
|
"""
|
|
635
751
|
|
|
636
752
|
request_url = self.config()["chatUrl"]
|
|
637
|
-
request_header = self.request_header()
|
|
753
|
+
request_header = self.request_header(service_type=service_type)
|
|
638
754
|
|
|
639
755
|
chat_data = {
|
|
640
756
|
"context": context,
|
|
641
757
|
"messages": messages,
|
|
642
|
-
"where": where,
|
|
643
758
|
# "synonyms": self.config()["synonyms"],
|
|
644
759
|
# "inlineCitation": self.config()["inlineCitation"],
|
|
645
760
|
}
|
|
646
761
|
|
|
762
|
+
if where:
|
|
763
|
+
chat_data["where"] = where
|
|
764
|
+
|
|
647
765
|
return self.do_request(
|
|
648
766
|
url=request_url,
|
|
649
767
|
method="POST",
|
|
@@ -655,10 +773,10 @@ class OTCA:
|
|
|
655
773
|
|
|
656
774
|
# end method definition
|
|
657
775
|
|
|
658
|
-
def
|
|
776
|
+
def context(
|
|
659
777
|
self, query: str, document_ids: list, workspace_ids: list, threshold: float = 0.5, num_results: int = 10
|
|
660
778
|
) -> dict:
|
|
661
|
-
"""
|
|
779
|
+
"""Get semantic context for a given query string.
|
|
662
780
|
|
|
663
781
|
Search requests are meant to be called as end-users. This should involve
|
|
664
782
|
passing the end-user's access token via the Authorization HTTP header.
|
|
@@ -707,12 +825,12 @@ class OTCA:
|
|
|
707
825
|
"""
|
|
708
826
|
|
|
709
827
|
# Validations:
|
|
710
|
-
if not workspace_ids and not document_ids:
|
|
711
|
-
|
|
712
|
-
|
|
828
|
+
# if not workspace_ids and not document_ids:
|
|
829
|
+
# self.logger.error("Either workspace ID(s) or document ID(s) need to be provided!")
|
|
830
|
+
# return None
|
|
713
831
|
|
|
714
|
-
request_url = self.config()["
|
|
715
|
-
request_header = self.request_header()
|
|
832
|
+
request_url = self.config()["contextUrl"]
|
|
833
|
+
request_header = self.request_header(service_type="studio")
|
|
716
834
|
|
|
717
835
|
search_data = {
|
|
718
836
|
"query": query,
|
|
@@ -721,18 +839,18 @@ class OTCA:
|
|
|
721
839
|
"metadata": [],
|
|
722
840
|
}
|
|
723
841
|
|
|
724
|
-
for document_id in document_ids:
|
|
842
|
+
for document_id in document_ids or []:
|
|
725
843
|
search_data["metadata"].append({"documentID": str(document_id)})
|
|
726
|
-
for workspace_id in workspace_ids:
|
|
844
|
+
for workspace_id in workspace_ids or []:
|
|
727
845
|
search_data["metadata"].append({"workspaceID": str(workspace_id)})
|
|
728
846
|
|
|
729
847
|
return self.do_request(
|
|
730
848
|
url=request_url,
|
|
731
849
|
method="POST",
|
|
732
850
|
headers=request_header,
|
|
733
|
-
|
|
851
|
+
json_data=search_data,
|
|
734
852
|
timeout=None,
|
|
735
|
-
failure_message="Failed to to do a semantic search with query -> '{}'".format(query),
|
|
853
|
+
failure_message="Failed to to do a semantic search with query -> '{}' !".format(query),
|
|
736
854
|
)
|
|
737
855
|
|
|
738
856
|
# end method definition
|
|
@@ -744,7 +862,7 @@ class OTCA:
|
|
|
744
862
|
document_id: int | None = None,
|
|
745
863
|
workspace_id: int | None = None,
|
|
746
864
|
additional_metadata: dict | None = None,
|
|
747
|
-
) -> dict:
|
|
865
|
+
) -> dict | None:
|
|
748
866
|
"""Embed a given content.
|
|
749
867
|
|
|
750
868
|
Requests are meant to be called as a service user. This would involve passing a service user's access token
|
|
@@ -753,17 +871,17 @@ class OTCA:
|
|
|
753
871
|
Args:
|
|
754
872
|
content (str | None):
|
|
755
873
|
Content to be embedded. This is a document chunk. Can be empty for "delete" operations.
|
|
756
|
-
operation (str):
|
|
874
|
+
operation (str, optional):
|
|
757
875
|
This can be either "add", "update" or "delete".
|
|
758
|
-
document_id (int):
|
|
876
|
+
document_id (int | None, optional):
|
|
759
877
|
The ID of the document the content originates from. This becmes metadata in the vector store.
|
|
760
|
-
workspace_id (int):
|
|
878
|
+
workspace_id (int | None, optional):
|
|
761
879
|
The ID of the workspace the content originates from. This becomes metadata in the vector store.
|
|
762
|
-
additional_metadata (dict | None):
|
|
880
|
+
additional_metadata (dict | None, optional):
|
|
763
881
|
Dictionary with additional metadata.
|
|
764
882
|
|
|
765
883
|
Returns:
|
|
766
|
-
dict:
|
|
884
|
+
dict | None:
|
|
767
885
|
REST API response or None in case of an error.
|
|
768
886
|
|
|
769
887
|
"""
|
|
@@ -804,11 +922,61 @@ class OTCA:
|
|
|
804
922
|
|
|
805
923
|
# end method definition
|
|
806
924
|
|
|
925
|
+
def direct_embed(
|
|
926
|
+
self,
|
|
927
|
+
content: list[str] | None = None,
|
|
928
|
+
options: dict | None = None,
|
|
929
|
+
) -> dict | None:
|
|
930
|
+
"""Direct embed a given a list of strings. This is an Aviator Studio endpoint.
|
|
931
|
+
|
|
932
|
+
Args:
|
|
933
|
+
content (list[str] | None):
|
|
934
|
+
Content to be embedded. This is a list of strings.
|
|
935
|
+
options (dict | None):
|
|
936
|
+
Optional parameters. Supported parameters (keys):
|
|
937
|
+
* embeddingType (str) - e.g. "openai"
|
|
938
|
+
* model (str) - e.g. "text-embedding-ada-002"
|
|
939
|
+
* baseUrl (str) - e.g. "https://api.openai.com/v1"
|
|
940
|
+
|
|
941
|
+
Returns:
|
|
942
|
+
dict | None:
|
|
943
|
+
REST API response or None in case of an error.
|
|
944
|
+
|
|
945
|
+
Example:
|
|
946
|
+
{
|
|
947
|
+
'vectors': [
|
|
948
|
+
[-0.04728065803647041, -0.006598987616598606, ...],
|
|
949
|
+
[...]
|
|
950
|
+
]
|
|
951
|
+
}
|
|
952
|
+
|
|
953
|
+
"""
|
|
954
|
+
|
|
955
|
+
request_url = self.config()["directEmbedUrl"]
|
|
956
|
+
request_header = self.request_header(service_type="studio")
|
|
957
|
+
|
|
958
|
+
embed_data = {
|
|
959
|
+
"content": content,
|
|
960
|
+
}
|
|
961
|
+
if options:
|
|
962
|
+
embed_data["options"] = options
|
|
963
|
+
|
|
964
|
+
return self.do_request(
|
|
965
|
+
url=request_url,
|
|
966
|
+
method="POST",
|
|
967
|
+
headers=request_header,
|
|
968
|
+
json_data=embed_data,
|
|
969
|
+
timeout=None,
|
|
970
|
+
failure_message="Failed to embed content",
|
|
971
|
+
)
|
|
972
|
+
|
|
973
|
+
# end method definition
|
|
974
|
+
|
|
807
975
|
def get_graphs(self) -> list | None:
|
|
808
976
|
"""Get all graphs.
|
|
809
977
|
|
|
810
978
|
Returns:
|
|
811
|
-
list:
|
|
979
|
+
list | None:
|
|
812
980
|
A list of all graphs.
|
|
813
981
|
|
|
814
982
|
Example:
|
|
@@ -984,6 +1152,10 @@ class OTCA:
|
|
|
984
1152
|
def get_graph_nodes_iterator(self, graph_id: str) -> iter:
|
|
985
1153
|
"""Get an iterator object that can be used to traverse graph nodes.
|
|
986
1154
|
|
|
1155
|
+
Args:
|
|
1156
|
+
graph_id (str):
|
|
1157
|
+
The ID of the Graph to retrieve the nodes for.
|
|
1158
|
+
|
|
987
1159
|
Returns:
|
|
988
1160
|
iter:
|
|
989
1161
|
A generator yielding one node per iteration.
|
|
@@ -1001,12 +1173,14 @@ class OTCA:
|
|
|
1001
1173
|
|
|
1002
1174
|
# end method definition
|
|
1003
1175
|
|
|
1004
|
-
def get_graph_nodes_by_name(self, name: str) -> list | None:
|
|
1176
|
+
def get_graph_nodes_by_name(self, name: str, retry_forever: bool = False) -> list | None:
|
|
1005
1177
|
"""Get all nodes of a graph by name.
|
|
1006
1178
|
|
|
1007
1179
|
Args:
|
|
1008
1180
|
name (str):
|
|
1009
1181
|
The Name of the Graph to retrieve the nodes for.
|
|
1182
|
+
retry_forever (bool, optional):
|
|
1183
|
+
Whether to wait forever without timeout. Defaults to False.
|
|
1010
1184
|
|
|
1011
1185
|
Returns:
|
|
1012
1186
|
list | None:
|
|
@@ -1035,6 +1209,7 @@ class OTCA:
|
|
|
1035
1209
|
timeout=None,
|
|
1036
1210
|
show_error=True,
|
|
1037
1211
|
failure_message="Failed get list of graphs!",
|
|
1212
|
+
retry_forever=retry_forever,
|
|
1038
1213
|
)
|
|
1039
1214
|
|
|
1040
1215
|
if response is None:
|
|
@@ -1095,6 +1270,10 @@ class OTCA:
|
|
|
1095
1270
|
def get_graph_edges_iterator(self, graph_id: str) -> iter:
|
|
1096
1271
|
"""Get an iterator object that can be used to traverse graph edges.
|
|
1097
1272
|
|
|
1273
|
+
Args:
|
|
1274
|
+
graph_id (str):
|
|
1275
|
+
The ID of the Graph to retrieve the nodes for.
|
|
1276
|
+
|
|
1098
1277
|
Returns:
|
|
1099
1278
|
iter:
|
|
1100
1279
|
A generator yielding one edge per iteration.
|
|
@@ -1120,7 +1299,8 @@ class OTCA:
|
|
|
1120
1299
|
The ID of the graph.
|
|
1121
1300
|
|
|
1122
1301
|
Returns:
|
|
1123
|
-
str:
|
|
1302
|
+
str:
|
|
1303
|
+
Filename of the generated html file
|
|
1124
1304
|
|
|
1125
1305
|
"""
|
|
1126
1306
|
|
|
@@ -1192,6 +1372,561 @@ class OTCA:
|
|
|
1192
1372
|
|
|
1193
1373
|
# end method definition
|
|
1194
1374
|
|
|
1375
|
+
def import_configuration(self) -> bool:
|
|
1376
|
+
"""Import Aviator Studio default configuration.
|
|
1377
|
+
|
|
1378
|
+
Returns:
|
|
1379
|
+
bool:
|
|
1380
|
+
True = success, False = error.
|
|
1381
|
+
|
|
1382
|
+
"""
|
|
1383
|
+
|
|
1384
|
+
request_url = self.config()["studioImportUrl"]
|
|
1385
|
+
request_header = self.request_header(service_type="studio")
|
|
1386
|
+
|
|
1387
|
+
response = self.do_request(
|
|
1388
|
+
url=request_url,
|
|
1389
|
+
method="POST",
|
|
1390
|
+
headers=request_header,
|
|
1391
|
+
timeout=None,
|
|
1392
|
+
show_error=True,
|
|
1393
|
+
parse_request_response=False,
|
|
1394
|
+
failure_message="Failed to load default Aviator Studio configuration!",
|
|
1395
|
+
)
|
|
1396
|
+
|
|
1397
|
+
if not response or response.text != "Accepted":
|
|
1398
|
+
self.logger.error("Failed to import Aviator Studio configuration!")
|
|
1399
|
+
return False
|
|
1400
|
+
|
|
1401
|
+
self.logger.info("Successfully imported Aviator Studio configuration.")
|
|
1402
|
+
|
|
1403
|
+
return True
|
|
1404
|
+
|
|
1405
|
+
# end method definition
|
|
1406
|
+
|
|
1407
|
+
def export_configuration(self, show_ids: bool = False) -> dict | None:
|
|
1408
|
+
"""Export the current Aviator Studio configuration.
|
|
1409
|
+
|
|
1410
|
+
Args:
|
|
1411
|
+
show_ids(bool, optional):
|
|
1412
|
+
Determines if the ids of the database records will included in the export.
|
|
1413
|
+
|
|
1414
|
+
Returns:
|
|
1415
|
+
dict | None:
|
|
1416
|
+
List of tenants or None in case the request failed.
|
|
1417
|
+
|
|
1418
|
+
Example:
|
|
1419
|
+
{
|
|
1420
|
+
'default': {
|
|
1421
|
+
'id': '8302ca78-a6e1-416d-a93c-39aab189d943',
|
|
1422
|
+
'graphs': {
|
|
1423
|
+
'supervisor': {
|
|
1424
|
+
'id': 'abc7436a-33bf-4775-81f6-916961dbb9a0',
|
|
1425
|
+
'nodes': {...},
|
|
1426
|
+
'edges': [...]
|
|
1427
|
+
},
|
|
1428
|
+
'breakdown': {
|
|
1429
|
+
'id': 'ea748d81-554f-4638-9789-fd905c8e680f',
|
|
1430
|
+
'nodes': {...},
|
|
1431
|
+
'edges': [...]
|
|
1432
|
+
},
|
|
1433
|
+
'root': {
|
|
1434
|
+
'id': 'faf54d3f-b6d7-4954-b222-12f99fd9eb51',
|
|
1435
|
+
'nodes': {...},
|
|
1436
|
+
'edges': [...]
|
|
1437
|
+
},
|
|
1438
|
+
'answer': {
|
|
1439
|
+
'id': 'eb563724-4fae-4c82-b24b-955ba57f827c',
|
|
1440
|
+
'nodes': {...},
|
|
1441
|
+
'edges': [...]
|
|
1442
|
+
},
|
|
1443
|
+
'directChat': {
|
|
1444
|
+
'id': '702176fa-1701-43d4-84eb-d7628f1f29f7',
|
|
1445
|
+
'nodes': {...},
|
|
1446
|
+
'edges': [...]
|
|
1447
|
+
}
|
|
1448
|
+
},
|
|
1449
|
+
'prompts': {
|
|
1450
|
+
'cat_prompt': {
|
|
1451
|
+
'id': '3c96c5e3-dfa2-4aa8-9ce3-2080e0726241',
|
|
1452
|
+
'type': 'system',
|
|
1453
|
+
'template': 'Your name is Cat Aviator and you are an AI Assitant that answers questions and always ends answers with jokes about cats.',
|
|
1454
|
+
'description': 'This is a Cat prompt',
|
|
1455
|
+
'attributes': {},
|
|
1456
|
+
'overrides': [...]
|
|
1457
|
+
},
|
|
1458
|
+
'breakdown_system': {
|
|
1459
|
+
'id': 'db797917-4657-48a8-bcf3-fb4a3cd9a0d3',
|
|
1460
|
+
'type': 'system',
|
|
1461
|
+
'template': "Given a user message, break it down into separate messages. Guidelines: ..."
|
|
1462
|
+
},
|
|
1463
|
+
'chart_prompt': {
|
|
1464
|
+
'id': 'fa9ff09f-6294-4265-8971-75324024b9b5',
|
|
1465
|
+
'type': 'system',
|
|
1466
|
+
'template': 'You are Aviator, an expert in producing data visualizations using Vega-Lite. Your primary task is ...',
|
|
1467
|
+
},
|
|
1468
|
+
'agent_route_branch_query': {
|
|
1469
|
+
'id': '3a117045-191d-4603-84e7-4ee6b0ba7bb1',
|
|
1470
|
+
'type': 'message',
|
|
1471
|
+
'template': 'Given the conversation above, pick the right agent to perform the task. Select one of: {options}'
|
|
1472
|
+
},
|
|
1473
|
+
'general_system': {
|
|
1474
|
+
'id': '8f499e25-d07a-4fc0-bb9c-b5392825f7c8',
|
|
1475
|
+
'type': 'system',
|
|
1476
|
+
'template': "Your name is Aviator and you are a friendly chatbot assisting users with their queries ...',
|
|
1477
|
+
},
|
|
1478
|
+
'breakdown_message': {
|
|
1479
|
+
'id': 'c2498919-9cba-44f4-aecc-add09a6e94ad',
|
|
1480
|
+
'type': 'message',
|
|
1481
|
+
'template': 'Remember, only respond with a JSON object. E.g. {{"input": ["message1", "message2"]}}'
|
|
1482
|
+
},
|
|
1483
|
+
'summarize': {
|
|
1484
|
+
'id': '4fe7d77d-a28d-489f-83c8-fa514745b8d0',
|
|
1485
|
+
'type': 'message',
|
|
1486
|
+
'template': 'The CONTEXT contains text of tool calls, arguments and their responses in the format...',
|
|
1487
|
+
},
|
|
1488
|
+
'email_system': {
|
|
1489
|
+
'id': '0e8e8eaf-dcce-4b35-b0ae-898bd1ba662a',
|
|
1490
|
+
'type': 'system',
|
|
1491
|
+
'template': 'Your name is Aviator and you are a friendly chatbot assisting customers ...',
|
|
1492
|
+
},
|
|
1493
|
+
'llm_compiler_system': {
|
|
1494
|
+
'id': 'd0ed1d43-b212-4025-bfff-021d43970b93',
|
|
1495
|
+
'type': 'system',
|
|
1496
|
+
'template': 'Given a user query, create a plan to solve it ...',
|
|
1497
|
+
'attributes': {...}
|
|
1498
|
+
},
|
|
1499
|
+
'compare_documents_message': {
|
|
1500
|
+
'id': 'ccc6b435-f24b-4396-a196-6cd771f486c5',
|
|
1501
|
+
'type': 'message',
|
|
1502
|
+
'template': 'You are tasked with a comparative analysis of the documents...',
|
|
1503
|
+
},
|
|
1504
|
+
'agent_route_branch_system': {
|
|
1505
|
+
'id': '55a573dc-9e83-4901-88b9-f81d18c35ffb',
|
|
1506
|
+
'type': 'system',
|
|
1507
|
+
'template': 'Your job is to decide which agent to run based on the information provided to you. ...',
|
|
1508
|
+
},
|
|
1509
|
+
'check_answer_prompt': {},
|
|
1510
|
+
'validator_branch_system': {},
|
|
1511
|
+
'search_query_system': {},
|
|
1512
|
+
'search_query_message': {},
|
|
1513
|
+
'general_message': {},
|
|
1514
|
+
'cite_references': {},
|
|
1515
|
+
...
|
|
1516
|
+
'classes': {...},
|
|
1517
|
+
'rules': {...},
|
|
1518
|
+
'llmModels': {
|
|
1519
|
+
'qwen3:8b': {
|
|
1520
|
+
'id': 'abbbddf4-2850-4fbb-9b49-b7354b348785',
|
|
1521
|
+
'family': 'qwen3',
|
|
1522
|
+
'version': 'qwen3:8b',
|
|
1523
|
+
'attributes': {
|
|
1524
|
+
'topK': 40,
|
|
1525
|
+
'topP': 0.8,
|
|
1526
|
+
'cache': False,
|
|
1527
|
+
'baseUrl': 'http://localhost:11434',
|
|
1528
|
+
'maxTokens': 8000,
|
|
1529
|
+
'maxRetries': 2,
|
|
1530
|
+
'temperature': 0.2,
|
|
1531
|
+
'llmIntegration': 'ollama'
|
|
1532
|
+
}
|
|
1533
|
+
}
|
|
1534
|
+
}
|
|
1535
|
+
},
|
|
1536
|
+
...
|
|
1537
|
+
}
|
|
1538
|
+
|
|
1539
|
+
"""
|
|
1540
|
+
|
|
1541
|
+
query = {}
|
|
1542
|
+
if show_ids:
|
|
1543
|
+
query["showIds"] = "true" if show_ids else "false"
|
|
1544
|
+
|
|
1545
|
+
if query:
|
|
1546
|
+
encoded_query = urllib.parse.urlencode(query=query, doseq=True)
|
|
1547
|
+
request_url = self.config()["studioExportUrl"] + "?{}".format(encoded_query)
|
|
1548
|
+
else:
|
|
1549
|
+
request_url = self.config()["studioExportUrl"]
|
|
1550
|
+
|
|
1551
|
+
request_header = self.request_header(service_type="studio")
|
|
1552
|
+
|
|
1553
|
+
response = self.do_request(
|
|
1554
|
+
url=request_url,
|
|
1555
|
+
method="GET",
|
|
1556
|
+
headers=request_header,
|
|
1557
|
+
timeout=None,
|
|
1558
|
+
show_error=True,
|
|
1559
|
+
parse_request_response=True,
|
|
1560
|
+
failure_message="Failed to export Aviator Studio configuration!",
|
|
1561
|
+
)
|
|
1562
|
+
|
|
1563
|
+
return response
|
|
1564
|
+
|
|
1565
|
+
# end method definition
|
|
1566
|
+
|
|
1567
|
+
def get_scratchpad(self, chat_id: str) -> dict | None:
|
|
1568
|
+
"""Get the current scratchpad content.
|
|
1569
|
+
|
|
1570
|
+
Args:
|
|
1571
|
+
chat_id (str):
|
|
1572
|
+
The chat ID.
|
|
1573
|
+
|
|
1574
|
+
Returns:
|
|
1575
|
+
dict | None:
|
|
1576
|
+
Scratchpad content or None in case of an error.
|
|
1577
|
+
|
|
1578
|
+
Example:
|
|
1579
|
+
{
|
|
1580
|
+
'id': 'default',
|
|
1581
|
+
'content': 'This is some scratchpad content.'
|
|
1582
|
+
}
|
|
1583
|
+
|
|
1584
|
+
"""
|
|
1585
|
+
|
|
1586
|
+
request_url = self.config()["scratchPadUrl"] + "/" + str(chat_id)
|
|
1587
|
+
request_header = self.request_header(service_type="studio")
|
|
1588
|
+
|
|
1589
|
+
response = self.do_request(
|
|
1590
|
+
url=request_url,
|
|
1591
|
+
method="GET",
|
|
1592
|
+
headers=request_header,
|
|
1593
|
+
timeout=None,
|
|
1594
|
+
show_error=True,
|
|
1595
|
+
failure_message="Failed to get scratchpad content!",
|
|
1596
|
+
)
|
|
1597
|
+
|
|
1598
|
+
return response
|
|
1599
|
+
|
|
1600
|
+
# end method definition
|
|
1601
|
+
|
|
1602
|
+
def get_tenants(self) -> list | None:
|
|
1603
|
+
"""Get list of Aviator Studio tenants.
|
|
1604
|
+
|
|
1605
|
+
Returns:
|
|
1606
|
+
dict | None:
|
|
1607
|
+
List of tenants or None in case the request failed.
|
|
1608
|
+
|
|
1609
|
+
Example:
|
|
1610
|
+
[
|
|
1611
|
+
{
|
|
1612
|
+
'id': 'edfb5af5-eb82-4867-bbea-fb7e3cba74f5',
|
|
1613
|
+
'externalId': 'default',
|
|
1614
|
+
'createdAt': '2025-08-29T22:59:26.579Z',
|
|
1615
|
+
'updatedAt': '2025-08-29T22:59:26.579Z'
|
|
1616
|
+
}
|
|
1617
|
+
]
|
|
1618
|
+
|
|
1619
|
+
"""
|
|
1620
|
+
|
|
1621
|
+
request_url = self.config()["studioTenantsUrl"]
|
|
1622
|
+
request_header = self.request_header(service_type="studio")
|
|
1623
|
+
|
|
1624
|
+
response = self.do_request(
|
|
1625
|
+
url=request_url,
|
|
1626
|
+
method="GET",
|
|
1627
|
+
headers=request_header,
|
|
1628
|
+
timeout=None,
|
|
1629
|
+
show_error=True,
|
|
1630
|
+
failure_message="Failed to get list of tenants!",
|
|
1631
|
+
)
|
|
1632
|
+
|
|
1633
|
+
if response is None:
|
|
1634
|
+
return None
|
|
1635
|
+
|
|
1636
|
+
return response.get("results", [])
|
|
1637
|
+
|
|
1638
|
+
# end method definition
|
|
1639
|
+
|
|
1640
|
+
def get_llms(self, attributes: str | None = None) -> dict | None:
|
|
1641
|
+
"""Get a list of configured LLMs in Aviator Studio.
|
|
1642
|
+
|
|
1643
|
+
Args:
|
|
1644
|
+
attributes (str | None, optional):
|
|
1645
|
+
A comma-separated list of attribute fields (in a string).
|
|
1646
|
+
The default is None. In this case all fields are returned.
|
|
1647
|
+
Example: "name,id,tenantId,family,version,attributes"
|
|
1648
|
+
|
|
1649
|
+
Returns:
|
|
1650
|
+
dict | None:
|
|
1651
|
+
List of tenants or None in case the request failed.
|
|
1652
|
+
|
|
1653
|
+
Example:
|
|
1654
|
+
{
|
|
1655
|
+
'results': [
|
|
1656
|
+
{
|
|
1657
|
+
'id': 'abbbddf4-2850-4fbb-9b49-b7354b348785',
|
|
1658
|
+
'tenantId': '8302ca78-a6e1-416d-a93c-39aab189d943',
|
|
1659
|
+
'family': 'qwen3',
|
|
1660
|
+
'version': 'qwen3:8b',
|
|
1661
|
+
'name': 'qwen3:8b',
|
|
1662
|
+
'attributes': {
|
|
1663
|
+
'topK': 40,
|
|
1664
|
+
'topP': 0.8,
|
|
1665
|
+
'cache': False,
|
|
1666
|
+
'baseUrl': 'http://localhost:11434',
|
|
1667
|
+
'maxTokens': 8000,
|
|
1668
|
+
'maxRetries': 2,
|
|
1669
|
+
'temperature': 0.2,
|
|
1670
|
+
'llmIntegration': 'ollama'
|
|
1671
|
+
},
|
|
1672
|
+
'createdAt': '2025-08-30T15:30:03.727Z',
|
|
1673
|
+
'updatedAt': '2025-08-30T15:30:03.727Z',
|
|
1674
|
+
'status': 0
|
|
1675
|
+
},
|
|
1676
|
+
...
|
|
1677
|
+
],
|
|
1678
|
+
_links': {
|
|
1679
|
+
'self': {'href': '/'}
|
|
1680
|
+
}
|
|
1681
|
+
}
|
|
1682
|
+
|
|
1683
|
+
"""
|
|
1684
|
+
|
|
1685
|
+
query = {}
|
|
1686
|
+
if attributes:
|
|
1687
|
+
query["attributes"] = attributes
|
|
1688
|
+
|
|
1689
|
+
if query:
|
|
1690
|
+
encoded_query = urllib.parse.urlencode(query=query, doseq=True)
|
|
1691
|
+
request_url = self.config()["studioLLModelsUrl"] + "?{}".format(encoded_query)
|
|
1692
|
+
else:
|
|
1693
|
+
request_url = self.config()["studioLLModelsUrl"]
|
|
1694
|
+
|
|
1695
|
+
request_header = self.request_header(service_type="studio")
|
|
1696
|
+
|
|
1697
|
+
response = self.do_request(
|
|
1698
|
+
url=request_url,
|
|
1699
|
+
method="GET",
|
|
1700
|
+
headers=request_header,
|
|
1701
|
+
timeout=None,
|
|
1702
|
+
show_error=True,
|
|
1703
|
+
failure_message="Failed to get configured LLMs!",
|
|
1704
|
+
)
|
|
1705
|
+
|
|
1706
|
+
return response
|
|
1707
|
+
|
|
1708
|
+
# end method definition
|
|
1709
|
+
|
|
1710
|
+
def add_llm(
|
|
1711
|
+
self,
|
|
1712
|
+
name: str,
|
|
1713
|
+
family: str,
|
|
1714
|
+
version: str,
|
|
1715
|
+
tenant_id: str,
|
|
1716
|
+
status: int = 0,
|
|
1717
|
+
attributes: dict | None = None,
|
|
1718
|
+
llm_integration: str = "",
|
|
1719
|
+
base_url: str = "",
|
|
1720
|
+
) -> dict | None:
|
|
1721
|
+
"""Add an LLM to Aviator Studio.
|
|
1722
|
+
|
|
1723
|
+
Args:
|
|
1724
|
+
name (str):
|
|
1725
|
+
The name of the model, e.g. ""gemini-2.5-flash-001".
|
|
1726
|
+
family (str):
|
|
1727
|
+
The model family name, e.g. "gemini".
|
|
1728
|
+
version (str):
|
|
1729
|
+
The model version (normally the same as name)
|
|
1730
|
+
tenant_id (str):
|
|
1731
|
+
The tenant ID. Should be retrieved with get_tenants() before.
|
|
1732
|
+
status (int, optional):
|
|
1733
|
+
0 = enabled
|
|
1734
|
+
1 = disabled
|
|
1735
|
+
2 = deleted
|
|
1736
|
+
attributes (dict | None, optional):
|
|
1737
|
+
The LLM attributes.
|
|
1738
|
+
* temperature (float)
|
|
1739
|
+
* maxTokens (int)
|
|
1740
|
+
* maxRetries (int)
|
|
1741
|
+
* topK (int)
|
|
1742
|
+
* topP (float)
|
|
1743
|
+
* cache (bool)
|
|
1744
|
+
* llmIntegration (str)
|
|
1745
|
+
llm_integration (str, optional):
|
|
1746
|
+
Name of the LLM integration
|
|
1747
|
+
* "vertex" (for Google)
|
|
1748
|
+
* "ollama" (for Ollama hosted models)
|
|
1749
|
+
* "localai" (for other locally running models)
|
|
1750
|
+
* "bedrock" (AWS)
|
|
1751
|
+
* "azure" (Microsoft)
|
|
1752
|
+
base_url (str, optional):
|
|
1753
|
+
Not required for Gemini. Should be "http://localhost:11434" for Ollama running locally.
|
|
1754
|
+
|
|
1755
|
+
Returns:
|
|
1756
|
+
dict | None:
|
|
1757
|
+
List of tenants or None in case the request failed.
|
|
1758
|
+
|
|
1759
|
+
Example:
|
|
1760
|
+
{
|
|
1761
|
+
'id': 'abbbddf4-2850-4fbb-9b49-b7354b348785',
|
|
1762
|
+
'name': 'qwen3:8b',
|
|
1763
|
+
'family': 'qwen3',
|
|
1764
|
+
'version': 'qwen3:8b',
|
|
1765
|
+
'tenantId': '8302ca78-a6e1-416d-a93c-39aab189d943',
|
|
1766
|
+
'status': 0,
|
|
1767
|
+
'attributes': {
|
|
1768
|
+
'topK': 40,
|
|
1769
|
+
'topP': 0.8,
|
|
1770
|
+
'cache': False,
|
|
1771
|
+
'baseUrl': 'http://localhost:11434',
|
|
1772
|
+
'maxTokens': 8000,
|
|
1773
|
+
'maxRetries': 2,
|
|
1774
|
+
'temperature': 0.2,
|
|
1775
|
+
'llmIntegration': 'ollama'
|
|
1776
|
+
},
|
|
1777
|
+
'updatedAt': '2025-08-30T15:30:03.727Z',
|
|
1778
|
+
'createdAt': '2025-08-30T15:30:03.727Z'
|
|
1779
|
+
}
|
|
1780
|
+
|
|
1781
|
+
"""
|
|
1782
|
+
|
|
1783
|
+
if attributes is None:
|
|
1784
|
+
attributes = DEFAULT_LLM_ATTRIBUTES
|
|
1785
|
+
|
|
1786
|
+
if llm_integration:
|
|
1787
|
+
attributes["llmIntegration"] = llm_integration
|
|
1788
|
+
if base_url:
|
|
1789
|
+
attributes["baseUrl"] = base_url
|
|
1790
|
+
|
|
1791
|
+
request_url = self.config()["studioLLModelsUrl"]
|
|
1792
|
+
request_header = self.request_header(service_type="studio")
|
|
1793
|
+
request_data = {
|
|
1794
|
+
"name": name,
|
|
1795
|
+
"family": family,
|
|
1796
|
+
"version": version,
|
|
1797
|
+
"tenantId": tenant_id,
|
|
1798
|
+
"status": status,
|
|
1799
|
+
"attributes": attributes,
|
|
1800
|
+
}
|
|
1801
|
+
|
|
1802
|
+
response = self.do_request(
|
|
1803
|
+
url=request_url,
|
|
1804
|
+
method="POST",
|
|
1805
|
+
headers=request_header,
|
|
1806
|
+
json_data=request_data,
|
|
1807
|
+
timeout=None,
|
|
1808
|
+
show_error=True,
|
|
1809
|
+
failure_message="Failed to add LLM -> '{}' to tenant ID -> '{}'!".format(name, tenant_id),
|
|
1810
|
+
)
|
|
1811
|
+
|
|
1812
|
+
return response
|
|
1813
|
+
|
|
1814
|
+
# end method definition
|
|
1815
|
+
|
|
1816
|
+
def add_prompt(
|
|
1817
|
+
self,
|
|
1818
|
+
name: str,
|
|
1819
|
+
template: str,
|
|
1820
|
+
description: str,
|
|
1821
|
+
llm_model: str,
|
|
1822
|
+
attributes: dict | None = None,
|
|
1823
|
+
) -> dict | None:
|
|
1824
|
+
"""Add a prompt for a specific LLM.
|
|
1825
|
+
|
|
1826
|
+
Args:
|
|
1827
|
+
name (str):
|
|
1828
|
+
A given name fpor the prompt.
|
|
1829
|
+
template (str):
|
|
1830
|
+
The actual prompt string.
|
|
1831
|
+
description (str):
|
|
1832
|
+
An arbitrary desciption of the prompt.
|
|
1833
|
+
llm_model (str):
|
|
1834
|
+
The name of the LLM that has been registered by calling add_llm().
|
|
1835
|
+
attributes (dict | None, optional):
|
|
1836
|
+
* "type": the type of the prompt, e.g. "system"
|
|
1837
|
+
|
|
1838
|
+
Returns:
|
|
1839
|
+
dict | None:
|
|
1840
|
+
The data of the created prompt. This includes the prompt ID and the prompt version.
|
|
1841
|
+
|
|
1842
|
+
Example:
|
|
1843
|
+
{
|
|
1844
|
+
'id': '9e491456-3b72-4fec-8e51-3af2b4f036fb',
|
|
1845
|
+
'name': 'cat_prompt',
|
|
1846
|
+
'template': 'Your name is Cat Aviator and you are an AI Assitant that answers questions and always ends answers with jokes about cats.',
|
|
1847
|
+
'description': 'This is a Cat prompt',
|
|
1848
|
+
'attributes': {'type': 'system'},
|
|
1849
|
+
'llmModel': 'qwen3:8b',
|
|
1850
|
+
'version': 1,
|
|
1851
|
+
'promptId': '3c96c5e3-dfa2-4aa8-9ce3-2080e0726241'
|
|
1852
|
+
}
|
|
1853
|
+
|
|
1854
|
+
"""
|
|
1855
|
+
|
|
1856
|
+
request_url = self.config()["studioPromptsUrl"]
|
|
1857
|
+
request_header = self.request_header(service_type="studio")
|
|
1858
|
+
request_data = {
|
|
1859
|
+
"name": name,
|
|
1860
|
+
"template": template,
|
|
1861
|
+
"description": description,
|
|
1862
|
+
"llmModel": llm_model,
|
|
1863
|
+
"attributes": attributes,
|
|
1864
|
+
}
|
|
1865
|
+
|
|
1866
|
+
response = self.do_request(
|
|
1867
|
+
url=request_url,
|
|
1868
|
+
method="POST",
|
|
1869
|
+
headers=request_header,
|
|
1870
|
+
json_data=request_data,
|
|
1871
|
+
timeout=None,
|
|
1872
|
+
show_error=True,
|
|
1873
|
+
failure_message="Failed to add prompt -> '%s' for LLM -> '{}'!".format(name),
|
|
1874
|
+
)
|
|
1875
|
+
|
|
1876
|
+
return response
|
|
1877
|
+
|
|
1878
|
+
# end method definition
|
|
1879
|
+
|
|
1880
|
+
def direct_chat(
|
|
1881
|
+
self,
|
|
1882
|
+
llm_model: str | None = None,
|
|
1883
|
+
messages: list | None = None,
|
|
1884
|
+
) -> dict | None:
|
|
1885
|
+
r"""Chat with a LLM directly. This is bypassing the configured LangGraph completely.
|
|
1886
|
+
|
|
1887
|
+
Args:
|
|
1888
|
+
llm_model (str | None, optional):
|
|
1889
|
+
The name of the model to use. If None then the default model is used.
|
|
1890
|
+
messages (list | None, optional):
|
|
1891
|
+
List of messages including conversation history. Each list element is
|
|
1892
|
+
a dictionary with two keys: "author" and "content".
|
|
1893
|
+
Example: [{"author": "user", "content": "What is the recommended fridge temperature?"}]
|
|
1894
|
+
|
|
1895
|
+
Returns:
|
|
1896
|
+
dict | None:
|
|
1897
|
+
The data of the created prompt. This includes the prompt ID and the prompt version.
|
|
1898
|
+
|
|
1899
|
+
Example:
|
|
1900
|
+
{
|
|
1901
|
+
'result': "The recommended temperature for a refrigerator is below 40°F (4°C). The ideal temperature range is between 37°F (3°C) and 40°F (4°C). "
|
|
1902
|
+
}
|
|
1903
|
+
|
|
1904
|
+
"""
|
|
1905
|
+
|
|
1906
|
+
request_url = self.config()["directChatUrl"]
|
|
1907
|
+
request_header = self.request_header(service_type="studio")
|
|
1908
|
+
request_data = {
|
|
1909
|
+
"messages": messages,
|
|
1910
|
+
}
|
|
1911
|
+
if llm_model is not None:
|
|
1912
|
+
request_data["llmModelName"] = llm_model
|
|
1913
|
+
|
|
1914
|
+
response = self.do_request(
|
|
1915
|
+
url=request_url,
|
|
1916
|
+
method="POST",
|
|
1917
|
+
headers=request_header,
|
|
1918
|
+
json_data=request_data,
|
|
1919
|
+
timeout=None,
|
|
1920
|
+
show_error=True,
|
|
1921
|
+
failure_message="Failed to chat with LLM -> '{}'!".format(
|
|
1922
|
+
llm_model if llm_model is not None else "<default model>"
|
|
1923
|
+
),
|
|
1924
|
+
)
|
|
1925
|
+
|
|
1926
|
+
return response
|
|
1927
|
+
|
|
1928
|
+
# end method definition
|
|
1929
|
+
|
|
1195
1930
|
def get_models(self, model_type: str) -> list | None:
|
|
1196
1931
|
"""Get all model details by type.
|
|
1197
1932
|
|
|
@@ -1236,6 +1971,19 @@ class OTCA:
|
|
|
1236
1971
|
def get_models_iterator(self, model_type: str) -> iter:
|
|
1237
1972
|
"""Get an iterator object that can be used to traverse models.
|
|
1238
1973
|
|
|
1974
|
+
Args:
|
|
1975
|
+
model_type (str):
|
|
1976
|
+
The type of the model. Possible model types:
|
|
1977
|
+
* tenants
|
|
1978
|
+
* graphs
|
|
1979
|
+
* nodes
|
|
1980
|
+
* edges
|
|
1981
|
+
* actions
|
|
1982
|
+
* tools
|
|
1983
|
+
* prompts
|
|
1984
|
+
* rules
|
|
1985
|
+
* klasses
|
|
1986
|
+
|
|
1239
1987
|
Returns:
|
|
1240
1988
|
iter:
|
|
1241
1989
|
A generator yielding one model per iteration.
|
|
@@ -1363,7 +2111,7 @@ class OTCA:
|
|
|
1363
2111
|
|
|
1364
2112
|
"""
|
|
1365
2113
|
|
|
1366
|
-
self.logger.
|
|
2114
|
+
self.logger.debug("Updating existing model -> '%s' (%s)", model_type, model_id)
|
|
1367
2115
|
|
|
1368
2116
|
request_header = self.request_header(service_type="studio")
|
|
1369
2117
|
request_url = self.config()["studioModelsUrl"] + "/" + model_type + "/" + model_id
|
|
@@ -1606,24 +2354,29 @@ class OTCA:
|
|
|
1606
2354
|
# Validations:
|
|
1607
2355
|
for key in ["name", "description", "APISchema", "agents"]:
|
|
1608
2356
|
if key not in request_body:
|
|
1609
|
-
self.logger.error("%s is missing in provided request body for tool registration!", key)
|
|
2357
|
+
self.logger.error("%s is missing in provided request body for AI tool registration!", key)
|
|
1610
2358
|
return None
|
|
1611
2359
|
|
|
1612
2360
|
# Check if the tool already exists and need to be updated only:
|
|
1613
|
-
self.logger.debug("Check if tool -> '%s' already
|
|
2361
|
+
self.logger.debug("Check if AI tool -> '%s' is already registered...", request_body["name"])
|
|
1614
2362
|
model = self.get_model_by_type_and_name(model_type="tools", name=request_body["name"])
|
|
1615
2363
|
if model:
|
|
1616
|
-
self.logger.info("Updating existing tool -> '%s'...", request_body["name"])
|
|
2364
|
+
self.logger.info("Updating existing AI tool -> '%s'...", request_body["name"])
|
|
2365
|
+
|
|
2366
|
+
request_header = self.request_header(service_type="studio")
|
|
2367
|
+
request_url = self.config()["studioToolsUrl"] + "/" + request_body["name"]
|
|
2368
|
+
response = self.do_request(
|
|
2369
|
+
url=request_url,
|
|
2370
|
+
method="PUT",
|
|
2371
|
+
headers=request_header,
|
|
2372
|
+
json_data=request_body,
|
|
2373
|
+
timeout=None,
|
|
2374
|
+
show_error=True,
|
|
2375
|
+
failure_message="Failed to update AI tool -> '{}'!".format(request_body["name"]),
|
|
2376
|
+
)
|
|
1617
2377
|
|
|
1618
|
-
update_body = {
|
|
1619
|
-
"description": request_body["description"],
|
|
1620
|
-
"attributes": {**model.get("attributes", {}), "APISchema": request_body["APISchema"]},
|
|
1621
|
-
}
|
|
1622
|
-
response = self.update_model(model_type="tools", model_id=model["id"], request_body=update_body)
|
|
1623
|
-
if not response:
|
|
1624
|
-
self.logger.error("Failed to update model -> '%s' (%s)", request_body["name"], model["id"])
|
|
1625
2378
|
else:
|
|
1626
|
-
self.logger.info("Registering
|
|
2379
|
+
self.logger.info("Registering AI tool -> '%s'...", request_body["name"])
|
|
1627
2380
|
request_header = self.request_header(service_type="studio")
|
|
1628
2381
|
request_url = self.config()["studioToolsUrl"]
|
|
1629
2382
|
response = self.do_request(
|
|
@@ -1633,7 +2386,7 @@ class OTCA:
|
|
|
1633
2386
|
json_data=request_body,
|
|
1634
2387
|
timeout=None,
|
|
1635
2388
|
show_error=True,
|
|
1636
|
-
failure_message="Failed to register tool -> '{}'!".format(request_body["name"]),
|
|
2389
|
+
failure_message="Failed to register AI tool -> '{}'!".format(request_body["name"]),
|
|
1637
2390
|
)
|
|
1638
2391
|
|
|
1639
2392
|
return response
|
|
@@ -1792,7 +2545,7 @@ class OTCA:
|
|
|
1792
2545
|
|
|
1793
2546
|
Yields:
|
|
1794
2547
|
Iterator[iter]:
|
|
1795
|
-
One
|
|
2548
|
+
One prompt at a time.
|
|
1796
2549
|
|
|
1797
2550
|
"""
|
|
1798
2551
|
|
|
@@ -1803,7 +2556,7 @@ class OTCA:
|
|
|
1803
2556
|
# end method definition
|
|
1804
2557
|
|
|
1805
2558
|
def get_prompt(self, prompt_id: str) -> dict | None:
|
|
1806
|
-
r"""Get a
|
|
2559
|
+
r"""Get a prompt by its ID.
|
|
1807
2560
|
|
|
1808
2561
|
Args:
|
|
1809
2562
|
prompt_id (str):
|
|
@@ -1837,7 +2590,7 @@ class OTCA:
|
|
|
1837
2590
|
# end method definition
|
|
1838
2591
|
|
|
1839
2592
|
def get_actions(self) -> list | None:
|
|
1840
|
-
|
|
2593
|
+
"""Get all actions.
|
|
1841
2594
|
|
|
1842
2595
|
Returns:
|
|
1843
2596
|
list:
|
|
@@ -2073,3 +2826,53 @@ class OTCA:
|
|
|
2073
2826
|
yield from relationships
|
|
2074
2827
|
|
|
2075
2828
|
# end method definition
|
|
2829
|
+
|
|
2830
|
+
def is_ready(self, service: str, wait: bool = False) -> bool | None:
|
|
2831
|
+
"""Check if service is ready to be used.
|
|
2832
|
+
|
|
2833
|
+
Args:
|
|
2834
|
+
service (str):
|
|
2835
|
+
The name of the service to check.
|
|
2836
|
+
wait (bool):
|
|
2837
|
+
If True, will wait until the service is ready.
|
|
2838
|
+
Default is False.
|
|
2839
|
+
|
|
2840
|
+
Returns:
|
|
2841
|
+
bool | None:
|
|
2842
|
+
True if ready, False if not, None if unknown service.
|
|
2843
|
+
|
|
2844
|
+
"""
|
|
2845
|
+
|
|
2846
|
+
match service.lower():
|
|
2847
|
+
case "studio":
|
|
2848
|
+
request_url = self.config()["studioUrl"]
|
|
2849
|
+
|
|
2850
|
+
case "chat":
|
|
2851
|
+
request_url = self.config()["chatUrl"]
|
|
2852
|
+
|
|
2853
|
+
case _:
|
|
2854
|
+
self.logger.error("Service '%s' is not supported for readiness check!", service)
|
|
2855
|
+
return None
|
|
2856
|
+
|
|
2857
|
+
if wait:
|
|
2858
|
+
self.logger.info("Waiting for Aviator %s to be available at %s ...", service, request_url)
|
|
2859
|
+
|
|
2860
|
+
response = None
|
|
2861
|
+
while not response:
|
|
2862
|
+
response = self.do_request(
|
|
2863
|
+
url=request_url,
|
|
2864
|
+
method="GET",
|
|
2865
|
+
max_retries=-1,
|
|
2866
|
+
timeout=None,
|
|
2867
|
+
show_error=False,
|
|
2868
|
+
failure_message=f"Aviator {service} is not available!",
|
|
2869
|
+
parse_request_response=False,
|
|
2870
|
+
)
|
|
2871
|
+
|
|
2872
|
+
if not wait:
|
|
2873
|
+
break
|
|
2874
|
+
|
|
2875
|
+
# Return True if we got a response, False if not:
|
|
2876
|
+
return response is not None
|
|
2877
|
+
|
|
2878
|
+
# end method definition
|