cloudpss 4.0.4__py3-none-any.whl → 4.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,34 +0,0 @@
1
- from cloudpss.asyncio.utils.httpAsyncRequest import graphql_request
2
- from cloudpss.model.topology import ModelTopology as ModelTopologyBase
3
-
4
-
5
- class ModelTopology(ModelTopologyBase):
6
- @staticmethod
7
- async def fetch(hash, implementType, config, maximumDepth=None):
8
- """
9
- 获取拓扑
10
-
11
- :params: hash
12
- :params: implementType 实现类型
13
- :params: config 参数方案
14
- :params: maximumDepth 最大递归深度,用于自定义项目中使用 diagram 实现元件展开情况
15
-
16
- : return: 拓扑实例
17
-
18
- >>> data = ModelTopology.fetch('','emtp',{})
19
-
20
- """
21
- args = {} if config is None else config['args']
22
- variables = {
23
- "a": {
24
- 'hash': hash,
25
- 'args': args,
26
- 'acceptImplementType': implementType,
27
- 'maximumDepth': maximumDepth
28
- }
29
- }
30
- data = await graphql_request(ModelTopology.__modelTopologyQuery, variables)
31
- if 'errors' in data:
32
- raise Exception(data['errors'][0]['message'])
33
-
34
- return ModelTopology(data['data']['modelTopology'])
@@ -1,27 +0,0 @@
1
- import asyncio
2
-
3
-
4
- class CustomAsyncIterable:
5
- async def __init__(self, async_func, *args):
6
- self.async_func = async_func
7
- self.queue = asyncio.Queue()
8
- self.index = 0
9
- self.args = args
10
-
11
- async def __aiter__(self):
12
- return self
13
-
14
- async def __anext__(self):
15
- try:
16
- result = await self.queue.get()
17
- if result is None:
18
- raise StopAsyncIteration
19
- return result
20
- except asyncio.QueueEmpty:
21
- tasks = []
22
- for arg in self.args:
23
- tasks.append(asyncio.create_task(self.async_func(arg)))
24
- await asyncio.gather(*tasks)
25
- for task in tasks:
26
- await self.queue.put(await task)
27
- return await self.__anext__()
@@ -1,6 +0,0 @@
1
-
2
- from .httpAsyncRequest import graphql_request
3
-
4
- __all__ = [
5
- 'graphql_request'
6
- ]
@@ -1,68 +0,0 @@
1
- import json
2
- import os
3
- from aiohttp import ClientSession, WSMsgType
4
- from urllib.parse import urljoin
5
- import logging
6
- from ...version import __version__
7
-
8
-
9
- def graphql_version_check(uri, response):
10
- if uri.startswith("graphql"):
11
- if "X-Cloudpss-Version" not in response.headers:
12
- raise Exception("当前SDK版本(ver 3.X.X)与服务器版本(3.0.0 以下)不兼容,请更换服务器地址或更换SDK版本。")
13
- os.environ["X_CLOUDPSS_VERSION"] = response.headers["X-Cloudpss-Version"]
14
- if float(response.headers["X-Cloudpss-Version"]) >= 5:
15
- raise Exception(
16
- "当前SDK版本(ver "
17
- + __version__
18
- + ")与服务器版本(ver "
19
- + response.headers["X-Cloudpss-Version"]
20
- + ".X.X)不兼容,请更换服务器地址或更换SDK版本(pip 使用 pip install -U cloudpss 命令更新, conda 使用 conda update cloudpss 命令更新)。"
21
- )
22
-
23
-
24
- ### 通过aiohttp实现请求
25
- async def fetch_data(method: str, uri, data, baseUrl=None, params={}, **kwargs):
26
- if baseUrl == None:
27
- baseUrl = os.environ.get("CLOUDPSS_API_URL", "https://cloudpss.net/")
28
- url = urljoin(baseUrl, uri)
29
- token = os.environ.get("CLOUDPSS_TOKEN", None)
30
- if token:
31
- headers = {
32
- "Authorization": "Bearer " + token,
33
- "Content-Type": "application/json; charset=utf-8",
34
- }
35
- else:
36
- raise Exception("token undefined")
37
- logging.debug("fetch start:",uri)
38
- async with ClientSession() as session:
39
- async with session.request(
40
- method, url, data=data, params=params, headers=headers
41
- ) as response:
42
- if response.status == 200:
43
- data = await response.json()
44
- graphql_version_check(uri, response)
45
- if "errors" in data:
46
- raise Exception(data["errors"])
47
- return data
48
- elif 400 <= response.status < 500:
49
- raise Exception(f"请求失败,状态码:{response.status}")
50
- elif 500 <= response.status < 600:
51
- raise Exception(f"请求失败,状态码:{response.status}")
52
- else:
53
- return
54
-
55
-
56
- # graphql实现方法
57
- async def graphql_request(query, variables=None):
58
- payload = {"query": query, "variables": variables}
59
- return await fetch_data("POST", "graphql", data=json.dumps(payload))
60
-
61
-
62
- # websocket
63
- async def websocket_connect(url, open_func):
64
- async with ClientSession() as session:
65
- async with session.ws_connect(url) as ws:
66
- open_func(ws)
67
- async for msg in ws:
68
- yield msg
cloudpss/job/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- from .job import Job
2
-
3
- __all__ = [
4
- 'Job'
5
- ]
cloudpss/job/job.py DELETED
@@ -1,260 +0,0 @@
1
- import asyncio
2
- import random
3
- import re
4
- import time
5
-
6
- from cloudpss.job.view.view import View
7
- from .view import getViewClass
8
-
9
- from cloudpss.utils.IO import IO
10
- from .messageStreamReceiver import MessageStreamReceiver
11
-
12
- from cloudpss.utils.graphqlUtil import graphql_request
13
- from .jobMachine import JobMachine
14
- from .messageStreamSender import MessageStreamSender
15
- from typing import Any, Callable, Generic, TypeVar
16
- F = TypeVar('F', bound=Callable[..., Any])
17
- T = TypeVar('T', bound=Callable[..., View])
18
- class Job(Generic[T]):
19
- """docstring for Job"""
20
- __jobQuery = """query($_a:JobInput!){
21
- job(input:$_a){
22
- id
23
- args
24
- createTime
25
- startTime
26
- endTime
27
- status
28
- context
29
- user
30
- priority
31
- policy {
32
- name
33
- queue
34
- tres {
35
- cpu
36
- ecpu
37
- mem
38
- }
39
- priority
40
- maxDuration
41
- }
42
- machine {
43
- id
44
- name
45
- tres {
46
- cpu
47
- ecpu
48
- mem
49
- }
50
- }
51
- input
52
- output
53
- position
54
- }
55
- }"""
56
-
57
- __createJobQuery = """mutation($input:CreateJobInput!){job:createJob(input:$input){id input output status position}}"""
58
- def __init__(
59
- self,
60
- id,
61
- args,
62
- createTime,
63
- startTime,
64
- endTime,
65
- status,
66
- context,
67
- user,
68
- priority,
69
- policy,
70
- machine,
71
- input,
72
- output,
73
- position,
74
- ):
75
- super(Job, self).__init__()
76
- self.id = id
77
- self.args = args
78
- self.createTime = createTime
79
- self.startTime = startTime
80
- self.endTime = endTime
81
- self.job_status = status #这里的status字段与原本的status()冲突
82
- self.context = context
83
- self.user = user
84
- self.priority = priority
85
- self.policy = policy # type: ignore
86
- self.machine = JobMachine(**machine) # type: ignore
87
- self.input = input
88
- self.output = output
89
- self.position = position
90
- self.__receiver = None
91
- self.__sender = None
92
- self._result = None
93
-
94
- @staticmethod
95
- def fetch(id):
96
- """
97
- 获取job信息
98
- """
99
- if id is None:
100
- raise Exception("id is None")
101
-
102
- variables = {"_a": {"id": id}}
103
-
104
- r = graphql_request(Job.__jobQuery, variables)
105
- if "errors" in r:
106
- raise Exception(r["errors"])
107
- return Job(**r["data"]["job"])
108
-
109
-
110
- # @staticmethod
111
- # def fetchMany(*args):
112
- # """
113
- # 批量获取任务信息
114
- # """
115
- # # jobs = CustomAsyncIterable(Job.fetch,*args)
116
- # # return jobs
117
-
118
-
119
-
120
- @staticmethod
121
- def __createJobVariables(job, config, revisionHash, rid, policy, **kwargs):
122
- # 处理policy字段
123
- if policy is None:
124
- policy = {}
125
- if policy.get("tres", None) is None:
126
- policy["tres"] = {}
127
- policy["queue"] = job["args"].get("@queue", 1)
128
- policy["priority"] = job["args"].get("@priority", 0)
129
- tres = {"cpu": 1, "ecpu": 0, "mem": 0}
130
- tresStr = job["args"].get("@tres", "")
131
- for t in re.split("\s+", tresStr):
132
- if t == "":
133
- continue
134
- k, v = t.split("=")
135
- tres[k] = float(v) # type: ignore
136
- policy["tres"] = tres
137
- function = job["rid"].replace("job-definition/cloudpss/", "function/CloudPSS/")
138
- implement = kwargs.get("implement", None)
139
- debug = job["args"].get("@debug", None )
140
- debugargs={}
141
- if debug:
142
- t= [ i.split('=') for i in re.split(r'\s+',debug) if i.find('=')>0]
143
- for i in t:
144
- debugargs[i[0]]=i[1]
145
- variables = {
146
- "input": {
147
- "args": {
148
- **job["args"],
149
- "_ModelRevision": revisionHash,
150
- "_ModelArgs": config["args"],
151
- "implement":implement
152
- },
153
- "context": [
154
- function,
155
- rid,
156
- f"model/@sdk/{str(int(time.time() * random.random()))}",
157
- ],
158
- "policy": policy,
159
- "debug":debugargs
160
- }
161
- }
162
- return variables
163
- @staticmethod
164
- def create(revisionHash, job, config, name=None, rid="", policy=None, **kwargs):
165
- """
166
- 创建一个运行任务
167
-
168
- :params: revision 项目版本号
169
- :params: job 调用仿真时使用的计算方案,为空时使用项目的第一个计算方案
170
- :params: config 调用仿真时使用的参数方案,为空时使用项目的第一个参数方案
171
- :params: name 任务名称,为空时使用项目的参数方案名称和计算方案名称
172
- :params: rid 项目rid,可为空
173
-
174
- :return: 返回一个运行实例
175
-
176
- >>> runner = Runner.runRevision(revision,job,config,'')
177
- """
178
- variables=Job.__createJobVariables(job, config, revisionHash, rid, policy)
179
- r = graphql_request(Job.__createJobQuery, variables)
180
- if "errors" in r:
181
- raise Exception(r["errors"])
182
- id = r["data"]["job"]["id"]
183
- return Job.fetch(id)
184
-
185
-
186
-
187
- @staticmethod
188
- def load(file, format="yaml"):
189
- return IO.load(file, format)
190
-
191
- @staticmethod
192
- def dump(job, file, format="yaml", compress="gzip"):
193
- return IO.dump(job, file, format, compress)
194
-
195
-
196
-
197
- def read(self, receiver=None, **kwargs):
198
- """
199
- 使用接收器获取当前运行实例的输出
200
- """
201
- if receiver is not None:
202
- self.__receiver = receiver
203
- if self.__receiver is None:
204
- self.__receiver = MessageStreamReceiver(self)
205
- self.__receiver.connect(**kwargs)
206
- return self.__receiver
207
-
208
-
209
-
210
- def write(self, sender=None, **kwargs) -> MessageStreamSender:
211
- """
212
- 使用发送器为当前运行实例输入
213
- """
214
-
215
- if sender is not None:
216
- self.__sender = sender
217
- if self.__sender is None:
218
- self.__sender = MessageStreamSender(self)
219
- self.__sender.connect_legacy(**kwargs)
220
- return self.__sender
221
-
222
- def status(self):
223
- """
224
- return: 0: 运行中 1: 运行完成 2: 运行失败
225
- """
226
- time.sleep(0)
227
- if self.__receiver is not None:
228
- return self.__receiver.status
229
- if self.__receiver is None:
230
- self.__connect()
231
-
232
- return 0
233
-
234
- def __connect(self):
235
- """
236
- 连接接收器和发送器
237
- """
238
- viewType = getViewClass(self.context[0])
239
- self._result = self.view(viewType)
240
-
241
- @property
242
- def result(self)->T:
243
- """
244
- 获取当前运行实例的输出
245
- """
246
- if self._result is None:
247
- self.__connect()
248
- return self._result
249
-
250
-
251
- def view(self, viewType:F)->F:
252
- """
253
- 获取当前运行实例的输出
254
- """
255
- receiver = self.read()
256
- sender = self.write()
257
- return viewType(receiver, sender)
258
-
259
-
260
-
@@ -1,11 +0,0 @@
1
- from cloudpss.job.jobTres import JobTres
2
-
3
-
4
- class JobMachine(object):
5
- '''job machine'''
6
-
7
- def __init__(self, id, name, tres, valid=None):
8
- self.id = id
9
- self.name = name
10
- self.tres = JobTres(**tres) # type: ignore
11
- self.valid = valid
cloudpss/job/jobPolicy.py DELETED
@@ -1,127 +0,0 @@
1
- from cloudpss.job.jobQueue import JobQueue
2
- from cloudpss.job.jobTres import JobTres
3
- from cloudpss.utils.graphqlUtil import graphql_request
4
-
5
-
6
- class JobPolicy(object):
7
-
8
- def __init__(self, id, name, users, functions, tres, minPriority,
9
- maxPriority, maxDuration, createTime, updateTime, visibility,
10
- queue):
11
- self.id = id
12
- self.name = name
13
- self.users = users
14
- self.functions = functions
15
- self.tres = []
16
- for tre in tres:
17
- self.tres.append(JobTres(**tre)) # type: ignore
18
- self.minPriority = minPriority
19
- self.maxPriority = maxPriority
20
- self.maxDuration = maxDuration
21
- self.createTime = createTime
22
- self.updateTime = updateTime
23
- self.visibility = visibility
24
- self.queue = JobQueue(**queue) # type: ignore
25
-
26
- @staticmethod
27
- def fetch(id):
28
- query = '''query($input:JobPolicyInput!)
29
- {
30
- jobPolicy(input:$input)
31
- {
32
- id
33
- name
34
- users
35
- functions
36
- tres {
37
- cpu
38
- ecpu
39
- mem
40
- }
41
- minPriority
42
- maxPriority
43
- maxDuration
44
- createTime
45
- updateTime
46
- visibility
47
- queue {
48
- id
49
- name
50
- scheduler
51
- machines {
52
- id
53
- name
54
- tres {
55
- cpu
56
- ecpu
57
- mem
58
- }
59
- valid
60
- }
61
- createTime
62
- updateTime
63
- load
64
- }
65
- }
66
-
67
- }'''
68
- variables = {'input': {'id': id}}
69
- r = graphql_request(query, variables)
70
- if 'errors' in r:
71
- raise Exception(r['errors'])
72
- return JobPolicy(**r['data']['jobPolicy'])
73
-
74
- @staticmethod
75
- def fetchMany(input):
76
- query = '''query($input:JobPoliciesInput!)
77
- {
78
- jobPolicies(input:$input)
79
- {
80
- items {
81
- id
82
- name
83
- users
84
- functions
85
- tres {
86
- cpu
87
- ecpu
88
- mem
89
- }
90
- minPriority
91
- maxPriority
92
- maxDuration
93
- createTime
94
- updateTime
95
- visibility
96
- queue {
97
- id
98
- name
99
- scheduler
100
- machines {
101
- id
102
- name
103
- tres {
104
- cpu
105
- ecpu
106
- mem
107
- }
108
- valid
109
- }
110
- createTime
111
- updateTime
112
- load
113
- }
114
- }
115
- cursor
116
- count
117
- total
118
- }
119
- }'''
120
- variables = {'input': input}
121
- r = graphql_request(query, variables)
122
- if 'errors' in r:
123
- raise Exception(r['errors'])
124
- policies = []
125
- for policy in r['data']['jobPolicies']['items']:
126
- policies.append(JobPolicy(**policy))
127
- return policies
cloudpss/job/jobQueue.py DELETED
@@ -1,14 +0,0 @@
1
- from cloudpss.job.jobMachine import JobMachine
2
-
3
-
4
- class JobQueue(object):
5
-
6
- def __init__(self, id, name, scheduler, machines, createTime, updateTime,
7
- load):
8
- self.id = id
9
- self.name = name
10
- self.scheduler = scheduler
11
- self.machines = [JobMachine(**m) for m in machines]
12
- self.createTime = createTime
13
- self.updateTime = updateTime
14
- self.load = load
@@ -1,39 +0,0 @@
1
- from deprecated import deprecated
2
- class JobReceiver(object):
3
- messages = []
4
- index = 0
5
-
6
- def __init__(self):
7
- self.index = 0
8
- self.messages = []
9
-
10
- def __len__(self):
11
- return len(self.messages)
12
-
13
- def __iter__(self):
14
- return self
15
-
16
- def __next__(self):
17
- maxLength = len(self.messages)
18
- if self.index < maxLength:
19
- message = self.messages[self.index]
20
- self.index += 1
21
- return message
22
- raise StopIteration()
23
-
24
- def view(self, ViewType):
25
- """
26
- 获取指定类型的视图数据
27
-
28
- :params viewType: 视图类型
29
-
30
- :returns: 对应类型的视图数据
31
-
32
- >>> view= receiver.view(EMTView)
33
- """
34
- return ViewType(self)
35
-
36
- @property
37
- @deprecated(version='3.0', reason="该方法将在 5.0 版本移除")
38
- def message(self):
39
- return self.messages
cloudpss/job/jobTres.py DELETED
@@ -1,6 +0,0 @@
1
- class JobTres(object):
2
- '''tres'''
3
- def __init__(self, cpu, mem, ecpu):
4
- self.cpu = cpu
5
- self.mem = mem
6
- self.ecpu = ecpu