yait-aichain 0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,5 @@
1
+ Metadata-Version: 2.1
2
+ Name: yait-aichain
3
+ Version: 0.1
4
+ Summary: A simple Python library designed to wrap and unify AI functions from various popular services
5
+ Author: YAIT
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,9 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name='yait-aichain',
5
+ version='0.1',
6
+ description='A simple Python library designed to wrap and unify AI functions from various popular services',
7
+ author='YAIT',
8
+ packages=find_packages(),
9
+ )
File without changes
@@ -0,0 +1,372 @@
1
+ import urllib3
2
+ import json
3
+ import base64
4
+
5
+ from _constants import (
6
+ DEFAULT_TIMEOUT,
7
+ MAX_RETRY_DELAY,
8
+ DEFAULT_MAX_RETRIES,
9
+ INITIAL_RETRY_DELAY,
10
+ DEFAULT_CHATMODEL_TEMPERATURE,
11
+ DEFAULT_MAX_TOKENS
12
+ )
13
+
14
+ class BaseClient:
15
+ _client: urllib3.PoolManager
16
+ _base_url: str
17
+ _tiemout: urllib3.Timeout
18
+ _retries: urllib3.Retry
19
+
20
+ def __init__(self, base_url: str, timeout: urllib3.Timeout = DEFAULT_TIMEOUT, retries: urllib3.Retry = DEFAULT_MAX_RETRIES):
21
+ self._base_url = base_url
22
+ self._client = urllib3.PoolManager(timeout=timeout, retries=retries)
23
+
24
+ def get(self, path: str, headers: dict = None):
25
+ response = self._client.request('GET', self._base_url + path, headers = headers)
26
+ if response.status == 200:
27
+ return response.data.decode('utf-8')
28
+ else:
29
+ return None
30
+
31
+
32
+ def download(self, headers: dict = None):
33
+ response = self._client.request('GET', self._base_url, headers = headers)
34
+
35
+ if response.status == 200:
36
+ return {
37
+ "data": response.data,
38
+ "media_type": response.headers['Content-Type']
39
+ }
40
+
41
+ else:
42
+ return None
43
+
44
+ def post(self, path: str, data: dict = None, formdata: dict = None, headers: dict = None):
45
+ try:
46
+ if data:
47
+ response = self._client.request('POST', self._base_url + path, body = json.dumps(data), headers = headers)
48
+ elif formdata:
49
+ response = self._client.request('POST', self._base_url + path, fields = formdata, headers = headers)
50
+ else:
51
+ return None
52
+
53
+ if response.status == 200:
54
+ return response.data
55
+ else:
56
+ e = json.loads(response.data.decode('utf-8'))
57
+ print(f"An error occurred: {str(e)}")
58
+ return None
59
+ except Exception as e:
60
+ print(f"An error occurred: {str(e)}")
61
+ return None
62
+
63
+ class OpenAIClient(BaseClient):
64
+ _model: str
65
+
66
+ def __init__(self, api_key: str, model: str):
67
+ super().__init__(
68
+ base_url = 'https://api.openai.com',
69
+ timeout = DEFAULT_TIMEOUT,
70
+ retries = DEFAULT_MAX_RETRIES
71
+ )
72
+ self._api_key = api_key
73
+ self._model = model
74
+
75
+ def completion(self, messages: list):
76
+
77
+ body = {
78
+ "model": self._model,
79
+ "messages": messages,
80
+ "temperature": DEFAULT_CHATMODEL_TEMPERATURE
81
+ }
82
+
83
+ responce = self.post(
84
+ '/v1/chat/completions',
85
+ data = body,
86
+ headers = {
87
+ 'Authorization': 'Bearer ' + self._api_key,
88
+ 'Content-Type': 'application/json'
89
+ }
90
+ )
91
+
92
+ return json.loads(responce.decode('utf-8')) if responce is not None else None
93
+
94
+ def generation(self, messages: object, options: dict = None):
95
+
96
+ response_format = options['response_format'] if (options is not None) and ('response_format' in options) else "url"
97
+
98
+ body = {
99
+ "prompt" : messages[0]['content'],
100
+ "n" : 1,
101
+ "model" : self._model,
102
+ "size" : "1024x1024",
103
+ "response_format" : response_format
104
+ }
105
+
106
+ responce = self.post(
107
+ '/v1/images/generations',
108
+ data = body,
109
+ headers = {
110
+ 'Authorization': 'Bearer ' + self._api_key,
111
+ 'Content-Type': 'application/json'
112
+ }
113
+ )
114
+
115
+ if responce is not None:
116
+ if response_format == "b64_json":
117
+ return {"type" : "image", "data" : base64.b64decode(json.loads(responce)["data"][0]["b64_json"])}
118
+ else:
119
+ return {"type" : "url", "data" : json.loads(responce.decode('utf-8'))["data"][0]["url"]}
120
+ else:
121
+ None
122
+
123
+ def embeddings(self, input: str):
124
+
125
+ response = self.post(
126
+ '/v1/embeddings',
127
+ data = { "model": self._model, "input": input},
128
+ headers = {
129
+ 'Authorization': 'Bearer ' + self._api_key,
130
+ 'Content-Type': 'application/json'
131
+ }
132
+ )
133
+
134
+ return json.loads(response.decode('utf-8'))['data'][0]['embedding'] if response is not None else None
135
+
136
+ class MistralAIClient(BaseClient):
137
+ _model: str
138
+
139
+ def __init__(self, api_key: str, model: str):
140
+ super().__init__(
141
+ base_url = 'https://api.mistral.ai',
142
+ timeout = DEFAULT_TIMEOUT,
143
+ retries = DEFAULT_MAX_RETRIES
144
+ )
145
+ self._api_key = api_key
146
+ self._model = model
147
+
148
+ def completion(self, messages: list):
149
+
150
+ body = {
151
+ "model": self._model,
152
+ "messages": messages,
153
+ "temperature": DEFAULT_CHATMODEL_TEMPERATURE
154
+ }
155
+
156
+ responce = self.post(
157
+ '/v1/chat/completions',
158
+ data = body,
159
+ headers = {
160
+ 'Authorization': 'Bearer ' + self._api_key,
161
+ 'Content-Type': 'application/json'
162
+ }
163
+ )
164
+
165
+ return json.loads(responce.decode('utf-8')) if responce is not None else None
166
+
167
+ class YandexClient(BaseClient):
168
+ _model: str
169
+
170
+ def __init__(self, api_key: str, catalogId: str, model: str):
171
+ super().__init__(
172
+ base_url = 'https://llm.api.cloud.yandex.net',
173
+ timeout = DEFAULT_TIMEOUT,
174
+ retries = DEFAULT_MAX_RETRIES
175
+ )
176
+ self._api_key = api_key
177
+ self._model = model
178
+ self.catalogId = catalogId
179
+
180
+ def __prepareBody(self, messages):
181
+ for message in messages:
182
+ message['text'] = message.pop('content')
183
+
184
+ return {
185
+ "modelUri": f"gpt://{self.catalogId}/{self._model}/latest",
186
+ "completionOptions": {
187
+ "stream": False,
188
+ "temperature": DEFAULT_CHATMODEL_TEMPERATURE,
189
+ "maxTokens": DEFAULT_MAX_TOKENS
190
+ },
191
+ "messages": messages
192
+ }
193
+
194
+ def completion(self, messages: list):
195
+
196
+ responce = self.post(
197
+ '/foundationModels/v1/completion',
198
+ data = self.__prepareBody(messages),
199
+ headers = {
200
+ 'Authorization': 'Api-Key ' + self._api_key,
201
+ 'Content-Type': 'application/json'
202
+ }
203
+ )
204
+
205
+ return json.loads(responce.decode('utf-8')) if responce is not None else None
206
+
207
+ class StabilityAIClient(BaseClient):
208
+ _model: str
209
+
210
+ def __init__(self, api_key: str, model: str):
211
+ super().__init__(
212
+ base_url = 'https://api.stability.ai/v2beta',
213
+ timeout = DEFAULT_TIMEOUT,
214
+ retries = DEFAULT_MAX_RETRIES
215
+ )
216
+ self._api_key = api_key
217
+ self._model = model
218
+
219
+ def generation(self, messages: object, options: dict = None):
220
+
221
+ fileds = {
222
+ "prompt": messages[0]['content']
223
+ }
224
+
225
+ if self._model == "core":
226
+ url = '/stable-image/generate/core'
227
+ else:
228
+ url = '/stable-image/generate/sd3'
229
+ fileds['model'] = self._model
230
+
231
+
232
+ if options is not None:
233
+ if 'output_format' in options: fileds['output_format'] = options['output_format']
234
+ if 'seed' in options: fileds['seed'] = options['seed']
235
+ if 'style' in options: fileds['style_preset'] = options['style']
236
+ if 'output_format' in options: fileds['output_format'] = options['output_format']
237
+ if 'aspect_ratio' in options: fileds['aspect_ratio'] = options['aspect_ratio']
238
+
239
+ responce = self.post(
240
+ url,
241
+ formdata = fileds,
242
+ headers = {
243
+ 'Authorization': 'Bearer ' + self._api_key,
244
+ 'accept': 'image/*'
245
+ }
246
+ )
247
+
248
+ if responce is not None:
249
+ return {"type" : "image", "data" : responce}
250
+ else:
251
+ return None
252
+
253
+ class AnthropicClient(BaseClient):
254
+ _model: str
255
+
256
+ def __init__(self, api_key: str, model: str):
257
+ super().__init__(
258
+ base_url = 'https://api.anthropic.com',
259
+ timeout = DEFAULT_TIMEOUT,
260
+ retries = DEFAULT_MAX_RETRIES
261
+ )
262
+ self._api_key = api_key
263
+ self._model = model
264
+
265
+ def __prepareBody(self, messages):
266
+
267
+ systemPrompt = None
268
+ for idx, message in enumerate(messages):
269
+ if message['role'] == 'system':
270
+ systemPrompt = message['content']
271
+ del messages[idx]
272
+ break
273
+
274
+ body = {
275
+ "model": self._model,
276
+ "messages": messages,
277
+ "max_tokens": DEFAULT_MAX_TOKENS
278
+ }
279
+ if systemPrompt is not None: body["system"] = systemPrompt
280
+ return body
281
+
282
+ def completion(self, messages: list):
283
+ responce = self.post(
284
+ '/v1/messages',
285
+ data = self.__prepareBody(messages),
286
+ headers = {
287
+ 'x-api-key': self._api_key,
288
+ 'Content-Type': 'application/json',
289
+ 'anthropic-version': '2023-06-01'
290
+ }
291
+ )
292
+
293
+ return json.loads(responce.decode('utf-8')) if responce is not None else None
294
+
295
+ class GoogleAIClient(BaseClient):
296
+ _model: str
297
+
298
+ def __init__(self, api_key: str, model: str):
299
+ super().__init__(
300
+ base_url = 'https://generativelanguage.googleapis.com/v1beta',
301
+ timeout = DEFAULT_TIMEOUT,
302
+ retries = DEFAULT_MAX_RETRIES
303
+ )
304
+ self._api_key = api_key
305
+ self._model = model
306
+
307
+ def __prepareBody(self, messages):
308
+
309
+ systemPrompt = None
310
+ for idx, message in enumerate(messages):
311
+ if message['role'] == 'system':
312
+ systemPrompt = message['content']
313
+ del messages[idx]
314
+ break
315
+
316
+ isSystemPromptApplicable = True
317
+ for message in messages:
318
+ if message['role'] == 'user':
319
+ message['parts'] = [] if systemPrompt is None and isSystemPromptApplicable else [{"text": systemPrompt}]
320
+ isSystemPromptApplicable = False
321
+ if message['content'] is not None and type(message['content']) is list:
322
+ message['parts'] = message.pop('content')
323
+ else:
324
+ message['parts'].append({"text": message.pop('content')})
325
+
326
+ if message['role'] == 'assistant':
327
+ message['role'] = 'model'
328
+ message['parts'] = [{"text": message.pop('content')}]
329
+
330
+ return {
331
+ "contents": message
332
+ }
333
+
334
+ def completion(self, messages: list):
335
+
336
+ responce = self.post(
337
+ f'/models/{self._model}:generateContent?key={self._api_key}',
338
+ data = self.__prepareBody(messages),
339
+ headers = {
340
+ 'Content-Type': 'application/json',
341
+ }
342
+ )
343
+
344
+ return json.loads(responce.decode('utf-8')) if responce is not None else None
345
+
346
+ class VoyageAIClient(BaseClient):
347
+ _model: str
348
+
349
+ def __init__(self, api_key: str, model: str):
350
+ super().__init__(
351
+ base_url = 'https://api.voyageai.com/v1',
352
+ timeout = DEFAULT_TIMEOUT,
353
+ retries = DEFAULT_MAX_RETRIES
354
+ )
355
+ self._api_key = api_key
356
+ self._model = model
357
+
358
+ def embeddings(self, input: str | list, options: dict = None):
359
+ response = self.post(
360
+ '/embeddings',
361
+ data = {
362
+ "model": self._model,
363
+ "input": input,
364
+ "input_type": options['type'] if (options is not None) and ('type' in options) else None
365
+ },
366
+ headers = {
367
+ 'Authorization': 'Bearer ' + self._api_key,
368
+ 'Content-Type': 'application/json'
369
+ }
370
+ )
371
+
372
+ return json.loads(response.decode('utf-8'))['data'][0]['embedding'] if response is not None else None
@@ -0,0 +1,11 @@
1
+ import urllib3
2
+
3
+ # client default timeout is 10 minutes
4
+ DEFAULT_TIMEOUT = urllib3.Timeout(connect=10.0, read=600.0)
5
+ DEFAULT_MAX_RETRIES = urllib3.Retry(3, redirect=2)
6
+ INITIAL_RETRY_DELAY = 1.0
7
+ MAX_RETRY_DELAY = 8.0
8
+
9
+ # models constants
10
+ DEFAULT_CHATMODEL_TEMPERATURE = 0.75
11
+ DEFAULT_MAX_TOKENS = 4096
@@ -0,0 +1,271 @@
1
+ from _client import BaseClient, YandexClient, OpenAIClient, StabilityAIClient, AnthropicClient, GoogleAIClient, MistralAIClient
2
+ from copy import deepcopy
3
+ import base64
4
+ import mimetypes
5
+
6
+ class Role:
7
+ name: str
8
+ description: str
9
+ model: str
10
+ instructions: dict
11
+ options: dict
12
+ api_key: str
13
+ variables: list
14
+
15
+ def __new__(cls, name: str, description: str, modelname: str, instructions:list):
16
+ match modelname:
17
+ case "yandexgpt" | "yandexgpt-lite" | "summarization" as model:
18
+ instance = Role_YandexGPT
19
+ case "gpt-3.5-turbo" | "gpt-4" | "gpt-4-turbo" | "dall-e-3" | "gpt-4o" | "gpt-4o-mini" as model:
20
+ instance = Role_OpenAI
21
+ case "core" | "sd3" | "sd3-turbo":
22
+ instance = Role_StabilityAI
23
+ case "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "claude-3-5-sonnet-20240620" as model:
24
+ instance = Role_AnthropicAI
25
+ case "gemini-pro" | "gemini-pro-vision" as model:
26
+ instance = Role_GoogleAI
27
+ case "mistral-large-latest" | "mistral-medium-latest" | "mistral-small-latest" as model:
28
+ instance = Role_MistralAI
29
+ case _:
30
+ instance = cls
31
+ return super().__new__(instance)
32
+
33
+ def __init__(self, name: str, description: str, modelname: str, instructions:list ) -> None:
34
+ self.model = modelname
35
+ self.instructions = deepcopy(instructions)
36
+ self.name = name
37
+ self.description = description
38
+ self.variables = []
39
+
40
+ for instruction in self.instructions:
41
+ self.variables = self.variables + self.__getVariables(instruction['content'])
42
+
43
+ def __getVariables(self, content:str|list) -> list:
44
+ variables = []
45
+ if isinstance(content, list):
46
+ for item in content:
47
+ if isinstance(item, dict) and 'text' in item:
48
+ for variable in item['text'].split():
49
+ if '{' in variable and '}' in variable:
50
+ variable_name = variable.strip('{},.?!')
51
+ variables.append(variable_name)
52
+ else:
53
+ for variable in content.split():
54
+ if '{' in variable and '}' in variable:
55
+ variable_name = variable.strip('{},.?!')
56
+ variables.append(variable_name)
57
+
58
+ return variables
59
+
60
+ def castInstructions(self, options: dict):
61
+ for instruction in self.instructions:
62
+ if isinstance(instruction["content"], list) and instruction["role"] == "user":
63
+ updated = instruction.pop('content')
64
+ for item in updated:
65
+ for key in item.keys():
66
+ item[key] = self.__setVariables(item[key], options)
67
+ instruction['content'] = updated
68
+ else:
69
+ updated = self.__setVariables(instruction.pop('content'), options)
70
+ instruction['content'] = updated
71
+
72
+ def __setVariables(self, input_string, variables):
73
+ for key in variables:
74
+ input_string = input_string.replace("{" + key + "}", str(variables[key]))
75
+ return input_string
76
+
77
+ def run(self, api_key = None, options = None):
78
+ print("Running Role")
79
+
80
+ class Role_OpenAI(Role):
81
+ def visionInstructions(self, instruction:list):
82
+ for item in instruction:
83
+ if "text" in item: item["type"] = "text"
84
+ if "image" in item:
85
+
86
+ item["type"] = "image_url"
87
+ source = item.pop('image')
88
+
89
+ if 'data' in source and 'media_type' in source:
90
+ item["image_url"] = {"url":f"data:{source['media_type']};base64,{source['data']}"}
91
+ elif 'file' in source:
92
+ try:
93
+ mimetypes.init()
94
+ with open(source['file'], "rb") as image_file:
95
+ image_data = base64.b64encode(image_file.read()).decode('utf-8')
96
+ image_media_type = mimetypes.guess_type(source['file'])[0]
97
+ item["image_url"] = {"url":f"data:{image_media_type};base64,{image_data}"}
98
+
99
+ except Exception as e:
100
+ print("An error occurred:", str(e))
101
+ return None
102
+
103
+ elif 'url' in source:
104
+ item["image_url"] = {"url":source['url']}
105
+ else:
106
+ print(f"An error occurred: Unknown source type for image, file, data or url expected.")
107
+ return None
108
+
109
+ def run(self, api_key: str, options: dict = None):
110
+ #Apply variables to instructions
111
+ if options: self.castInstructions(options)
112
+
113
+ match self.model:
114
+ case "gpt-3.5-turbo"as model:
115
+ openaiChatCompletion = OpenAIClient(api_key = api_key, model = self.model).completion(messages = self.instructions)
116
+ return openaiChatCompletion['choices'][0]['message']['content'] if openaiChatCompletion is not None else None
117
+
118
+ case "gpt-4" | "gpt-4-turbo" | "gpt-4o" | "gpt-4o-mini" as model: #Extend instructions for Vision models
119
+ for instruction in self.instructions:
120
+ if isinstance(instruction["content"], list) and instruction["role"] == "user":
121
+ self.visionInstructions(instruction["content"]);
122
+
123
+ openaiChatCompletion = OpenAIClient(api_key = api_key, model = self.model).completion(messages = self.instructions)
124
+ return openaiChatCompletion['choices'][0]['message']['content'] if openaiChatCompletion is not None else None
125
+
126
+ case "dall-e-3":
127
+ openaiImageGeneration = OpenAIClient(api_key = api_key, model = self.model).generation(messages = self.instructions, options = {"response_format" : "b64_json"})
128
+ return openaiImageGeneration if openaiImageGeneration is not None else None
129
+ case _:
130
+ return None
131
+
132
+ class Role_MistralAI(Role):
133
+ def run(self, api_key: str, options: dict = None):
134
+
135
+ for instruction in self.instructions: #Extend instructions for Vision models
136
+ if isinstance(instruction["content"], list) and instruction["role"] == "user":
137
+ print("An error occurred: this model doesn't support vision functions")
138
+ return None
139
+
140
+ #Apply variables to instructions
141
+ if options: self.castInstructions(options)
142
+
143
+ mistralChatCompletion = MistralAIClient(api_key = api_key, model = self.model).completion(messages = self.instructions)
144
+ return mistralChatCompletion['choices'][0]['message']['content'] if mistralChatCompletion is not None else None
145
+
146
+ class Role_AnthropicAI(Role):
147
+ def visionInstructions(self, instruction:list):
148
+ for item in instruction:
149
+ if "text" in item: item["type"] = "text"
150
+ if "image" in item:
151
+
152
+ source = item.pop('image')
153
+ item["type"] = "image"
154
+
155
+ if 'data' in source and 'media_type' in source:
156
+ item["source"] = {
157
+ "type": "base64",
158
+ "media_type": source['media_type'],
159
+ "data": source['data']
160
+ }
161
+ elif 'file' in source:
162
+ try:
163
+ mimetypes.init()
164
+ with open(source['file'], "rb") as image_file:
165
+ image_data = base64.b64encode(image_file.read()).decode('utf-8')
166
+ image_media_type = mimetypes.guess_type(source['file'])[0]
167
+
168
+ item["source"] = {
169
+ "data":image_data,
170
+ "type": "base64",
171
+ "media_type": image_media_type
172
+ }
173
+
174
+ except Exception as e:
175
+ print("An error occurred:", str(e))
176
+ return None
177
+
178
+ elif 'url' in source:
179
+ image_client = BaseClient(source['url'])
180
+ image = image_client.download()
181
+
182
+ if image is not None:
183
+ item["source"] = {
184
+ "data": base64.b64encode(image['data']).decode('utf-8'),
185
+ "media_type": image['media_type'],
186
+ "type": "base64",
187
+ }
188
+
189
+ else:
190
+ print(f"An error occurred: Unknown source type for image, file, data or url expected.")
191
+ return None
192
+
193
+ def run(self, api_key: str, options: dict = None):
194
+ #Apply variables to instructions
195
+ if options: self.castInstructions(options)
196
+
197
+ #Extend instructions for Vision models
198
+ for instruction in self.instructions:
199
+ if isinstance(instruction["content"], list) and instruction["role"] == "user":
200
+ self.visionInstructions(instruction["content"]);
201
+
202
+ AnthropicAICompletion = AnthropicClient(api_key = api_key, model = self.model).completion(messages = self.instructions)
203
+ return AnthropicAICompletion['content'][0]['text'] if AnthropicAICompletion is not None else None
204
+
205
+ class Role_GoogleAI(Role):
206
+
207
+ def visionInstructions(self, instruction:list):
208
+ for item in instruction:
209
+ if "image" in item:
210
+ source = item.pop('image')
211
+ if 'data' in source and 'media_type' in source:
212
+ item["inline_data"] = {
213
+ "mime_type": source['media_type'],
214
+ "data": source['data']
215
+ }
216
+ elif 'file' in source:
217
+ try:
218
+ mimetypes.init()
219
+ with open(source['file'], "rb") as image_file:
220
+ image_data = base64.b64encode(image_file.read()).decode('utf-8')
221
+ image_media_type = mimetypes.guess_type(source['file'])[0]
222
+
223
+ item["inline_data"] = {
224
+ "data":image_data,
225
+ "mime_type": image_media_type
226
+ }
227
+
228
+ except Exception as e:
229
+ print("An error occurred:", str(e))
230
+ return None
231
+ elif 'url' in source:
232
+ image_client = BaseClient(source['url'])
233
+ image = image_client.download()
234
+
235
+ if image is not None:
236
+ item["inline_data"] = {
237
+ "data": base64.b64encode(image['data']).decode('utf-8'),
238
+ "mime_type": image['media_type']
239
+ }
240
+ else:
241
+ print(f"An error occurred: Unknown source type for image, file, data or url expected.")
242
+ return None
243
+
244
+ def run(self, api_key: str, options: dict = None):
245
+ #Apply variables to instructions
246
+ if options: self.castInstructions(options)
247
+
248
+ #Extend instructions for Vision models
249
+ for instruction in self.instructions:
250
+ if isinstance(instruction["content"], list) and instruction["role"] == "user":
251
+ self.visionInstructions(instruction["content"]);
252
+
253
+ GoogleAICompletion = GoogleAIClient(api_key = api_key, model = self.model).completion(messages = self.instructions)
254
+ return GoogleAICompletion['candidates'][0]['content']['parts'][0]['text'] if GoogleAICompletion is not None else None
255
+
256
+ class Role_YandexGPT(Role):
257
+ def run(self, api_key: str, options: dict):
258
+ if options is None or 'YaFolderID' not in options:
259
+ print(f"An error occurred: You must provide a YaFolderID in the options for ant Yandex model.")
260
+ return None
261
+ if options: self.castInstructions(options)
262
+
263
+ yandexChatCompletion = YandexClient(api_key = api_key, catalogId = options['YaFolderID'], model = self.model).completion(messages = self.instructions)
264
+ return yandexChatCompletion['result']['alternatives'][0]['message']['text'] if yandexChatCompletion is not None else None
265
+
266
+ class Role_StabilityAI(Role):
267
+ def run(self, api_key: str, options: dict):
268
+ if options: self.castInstructions(options)
269
+
270
+ stabilityaiImageGeneration = StabilityAIClient(api_key = api_key, model = self.model).generation(messages = self.instructions)
271
+ return stabilityaiImageGeneration if stabilityaiImageGeneration is not None else None
@@ -0,0 +1,5 @@
1
+ Metadata-Version: 2.1
2
+ Name: yait-aichain
3
+ Version: 0.1
4
+ Summary: A simple Python library designed to wrap and unify AI functions from various popular services
5
+ Author: YAIT
@@ -0,0 +1,9 @@
1
+ setup.py
2
+ yait-aichain/__init__.py
3
+ yait-aichain/_client.py
4
+ yait-aichain/_constants.py
5
+ yait-aichain/_role.py
6
+ yait_aichain.egg-info/PKG-INFO
7
+ yait_aichain.egg-info/SOURCES.txt
8
+ yait_aichain.egg-info/dependency_links.txt
9
+ yait_aichain.egg-info/top_level.txt
@@ -0,0 +1 @@
1
+ yait-aichain