reflexive 1.2.8__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
reflexive/__init__.py CHANGED
@@ -1,19 +1,4 @@
1
- # Make classes available at top level
1
+ from reflexive.service import (AWS_service)
2
+ from reflexive.res_analysis import (RES_analyser,RES_visualiser)
2
3
 
3
- from reflexive.cfg import Config
4
- from reflexive.session import (
5
- AWS,
6
- S3,
7
- Comprehend)
8
-
9
- # import reflexive.util
10
- from reflexive.analyse import Nlp
11
- from reflexive.visualise import (
12
- Display,
13
- RES_graph)
14
-
15
- from reflexive.res import (
16
- Res_analyse,
17
- Res_display)
18
-
19
- __all__ = ["Config","AWS","S3","Comprehend","Nlp","Display","RES_graph","Res_analyse","Res_display"]
4
+ __all__ = ["AWS_service","RES_analyser","RES_visualiser"]
@@ -0,0 +1,399 @@
1
+ from typing import Callable
2
+ from pandas import (DataFrame,Series)
3
+ from datetime import datetime
4
+ from zoneinfo import ZoneInfo
5
+ from time import sleep
6
+ from functools import partial
7
+ import tarfile
8
+ import json
9
+ import os
10
+ import numpy as np
11
+ from numpy.linalg import norm
12
+ from itertools import chain
13
+ from graph_tool.all import (
14
+ Graph,
15
+ similarity,
16
+ adjacency)
17
+
18
+ ### PIPELINE FUNCTIONS
19
+
20
+ # Clean text using supplied function and calculate text length
21
+ # Used by RES_analyser.preprocess_text()
22
+
23
+ def _clean_text(df:DataFrame,text_cleaner:Callable[[str],str])->DataFrame:
24
+ return (df
25
+ .assign(text=lambda d: d.text.apply(text_cleaner))
26
+ .assign(text_length=lambda d: [len(row.text) for row in d.itertuples()]))
27
+
28
+ # Upload text using supplied uploader function
29
+ # Used by RES_analyser.upload_text_to_s3()
30
+
31
+ def _upload_text(df:DataFrame,uploader:Callable,res_analyser)->DataFrame:
32
+ upload = partial(uploader,aws_service=res_analyser.aws_service,config=res_analyser.config,logger=res_analyser.logger)
33
+ return df.assign(uploaded=lambda d: [upload(str(row.Index),row.text) for row in d.itertuples()])
34
+
35
+ # Initiate comprehend analysis on S3 text documents
36
+
37
+ def _analyse_text(analyser:Callable,res_analyser)->dict:
38
+ analyse = partial(analyser,
39
+ aws_service=res_analyser.aws_service,
40
+ config=res_analyser.config,
41
+ logger = res_analyser.logger)
42
+ job_status = analyse()
43
+ return job_status['EntitiesDetectionJobProperties']
44
+
45
+ # Add comprehend analysis results to dataframe
46
+ def _analysis_to_dataframe(df:DataFrame,results:list)->DataFrame:
47
+ analysis = _extract_analysis(results=results)
48
+ df['res_results']=Series(analysis)
49
+ return df
50
+
51
+ # Offsets to dataframe
52
+ def _add_offsets(df:DataFrame,offset_cleaner,orphan_joiner)->DataFrame:
53
+ return (df
54
+ .assign(offsets=lambda d: d.res_results.apply(offset_cleaner))
55
+ .assign(offsets_clean=lambda d: [orphan_joiner(row.text,row.offsets) for row in d.itertuples()]))
56
+
57
+ def _offset_cleaner(res_results):
58
+ offsets = _collect_offsets(res_results)
59
+ tuples = _offset_tuples(offsets)
60
+ return _sorted_offsets(tuples)
61
+
62
+ def _orphan_joiner(text,offsets):
63
+ otuples = _orphaned_I(text,offsets)
64
+ offs = _orphaned_word(text,otuples)
65
+ return _regroup(offs)
66
+
67
+ def _collect_offsets(rrs):
68
+ new_rrs = {}
69
+ for rr in rrs:
70
+ if rr['Score']>0.6:
71
+ ent_type = rr['Type']
72
+ if ent_type in ['VR','ER']:
73
+ label = "NR"
74
+ elif ent_type in ['EP','EV']:
75
+ label = "EP"
76
+ elif ent_type in ['CN','AF']:
77
+ label = "AF"
78
+ else:
79
+ label = ent_type
80
+ new_rrs.setdefault(label,[]).append((rr['BeginOffset'],rr['EndOffset']))
81
+ return new_rrs
82
+
83
+
84
+
85
+ #####
86
+
87
+ def _add_res_sequence(df):
88
+ temp_df = df.copy()
89
+ temp_df['res_sequence'] = temp_df.offsets_clean.apply(_get_res_sequence)
90
+ return temp_df
91
+
92
+ def _add_res_interactions(df):
93
+ temp_df = df.copy()
94
+ temp_df['res_interactions'] = temp_df.res_sequence.apply(_count_res_interactions)
95
+ return temp_df
96
+
97
+ def _add_res_weights(df):
98
+ temp_df = df.copy()
99
+ temp_df['res_weights'] = temp_df.res_interactions.apply(_calc_res_weights)
100
+ return temp_df
101
+
102
+ def _add_semantic_weights(df,ranking_factors={}):
103
+ temp_df = df.copy()
104
+ ranks = partial(_calc_semantic_weights,factors=ranking_factors)
105
+ temp_df['semantic_weights'] = temp_df.res_weights.apply(ranks)
106
+ return temp_df
107
+
108
+ def _add_res_adj_matrix(df):
109
+ temp_df = df.copy()
110
+ temp_df['res_adj_matrix'] = temp_df.semantic_weights.apply(_create_adj_matrix)
111
+ return temp_df
112
+
113
+ def _get_res_sequence(offsets_clean):
114
+ return [label for label in offsets_clean.values()]
115
+
116
+
117
+ def _empty_res_interactions() -> dict[tuple,int]:
118
+ RE_types = ['RR','NR','AR','AF','EP']
119
+ RE_interactions:dict[tuple,int] = dict()
120
+ for t1 in RE_types:
121
+ for t2 in RE_types:
122
+ entry = tuple(sorted((t1,t2)))
123
+ if entry not in RE_interactions.keys():
124
+ RE_interactions[entry] = 0
125
+ return RE_interactions
126
+
127
+ def _count_res_interactions(re_sequence:list[str]) -> dict[tuple,int]:
128
+ re_ints = _empty_res_interactions()
129
+ limit = len(re_sequence)-1
130
+ for i,s in enumerate(re_sequence):
131
+ if i < limit:
132
+ rei = tuple(sorted((s,re_sequence[i+1])))
133
+ #print(i,rei)
134
+ re_ints[rei] += 1
135
+ return re_ints
136
+
137
+ def _calc_res_weights(interactions:dict[tuple,int])->dict[tuple,float]:
138
+ max_count = max(interactions.values())
139
+ weights = dict()
140
+ for edge,count in interactions.items():
141
+ weights[edge] = round(count/(max_count),2)
142
+ return weights
143
+
144
+
145
+
146
+ def _calc_semantic_weights(weights:dict[tuple,float], factors:dict[tuple,float]={})->dict[tuple,float]:
147
+ if not factors:
148
+ return weights
149
+ else:
150
+ for edge,w in weights.items():
151
+ weights[edge] = factors[edge] * w
152
+ return weights
153
+
154
+
155
+ def _create_adj_matrix(weights:dict[tuple,float])->list[list[float]]:
156
+ re_types = ["RR","NR","AR","AF","EP"]
157
+ matrix = []
158
+ for r in re_types:
159
+ row = []
160
+ for c in re_types:
161
+ key = tuple(sorted((r,c)))
162
+ #print(key)
163
+ weight = weights.get(key,0)
164
+ row.append(weight)
165
+ matrix.append(row)
166
+ return matrix
167
+
168
+ ### GRAPH ANALYSIS
169
+
170
+ def _jaccard_similarity(g1:Graph,g2:Graph)->float:
171
+ return similarity(g1, g2,
172
+ eweight1=g1.ep['e_weights'], eweight2=g2.ep['e_weights'],
173
+ #label1=g1.vp['v_labels'], label2=g2.vp['v_labels'],
174
+ norm=True, p=1.0, distance=False, asymmetric=False)
175
+
176
+ def _cosine_similarity(m1,m2)->float:
177
+ v1 = list(chain.from_iterable(m1))
178
+ v2 = list(chain.from_iterable(m2))
179
+ return np.dot(v1,v2)/(norm(v1)*norm(v2))
180
+
181
+
182
+
183
+ ### PIPELINE SUPPORT FUNCTIONS
184
+
185
+ # Clean return characters and strip whitespace
186
+ # Used by preprocess_text()
187
+ def _whitespace_cleaner(text:str)->str:
188
+ return text.strip().replace('\r\n','\n')
189
+
190
+ # Upload text to S3
191
+ def _s3_text_uploader(idx:str,text:str,aws_service,config:dict,logger)->bool:
192
+ try:
193
+ response = aws_service.s3_client.put_object(Body=text,
194
+ Bucket=aws_service.aws_params["s3_bucket_name"],
195
+ Key=f"{config["s3_source_dir"]}/{idx}.txt")
196
+ except Exception as e:
197
+ logger.error("There was an error when uploading text to s3 %s",repr(e))
198
+ return False
199
+ else:
200
+ if response['ResponseMetadata']['HTTPStatusCode']==200:
201
+ logger.debug(f"File {idx} uploaded successfully")
202
+ return True
203
+ else:
204
+ logger.error(f"File {idx} did not upload successfully to S3: {response}")
205
+ return False
206
+
207
+ # Analyse text with comprehend custom entity recognizer
208
+ def _comprehend_cer_analyser(aws_service,config,logger)->dict:
209
+ try:
210
+ response = aws_service.comprehend_client.start_entities_detection_job(
211
+ InputDataConfig={
212
+ 'S3Uri': _comprehend_input_uri(aws_service.aws_params["s3_bucket_name"],
213
+ config["s3_source_dir"]),
214
+ 'InputFormat': 'ONE_DOC_PER_FILE'
215
+ },
216
+ OutputDataConfig={
217
+ 'S3Uri': _comprehend_output_uri(aws_service.aws_params["s3_bucket_name"],
218
+ config["s3_target_dir"])
219
+ },
220
+ DataAccessRoleArn=_comprehend_access_role_arn(aws_service.aws_params["comprehend_service_role_name"],
221
+ aws_service.aws_account_number),
222
+ JobName=f"res_analysis_{_date_string()}",
223
+ EntityRecognizerArn=_comprehend_cer_arn(aws_service.aws_session.region_name,
224
+ aws_service.aws_account_number,
225
+ aws_service.aws_params["reflexive_entity_name"],
226
+ aws_service.aws_params["reflexive_entity_version"]),
227
+ LanguageCode='en'
228
+ )
229
+ except Exception as e:
230
+ logger.error("There was an error when analysing text with comprehend %s",repr(e))
231
+ return {"ERROR":repr(e)}
232
+ else:
233
+ return aws_service.comprehend_client.describe_entities_detection_job(JobId=response['JobId'])
234
+
235
+ # Monitor a CER Analysis Job
236
+ def _cer_job_progress(status:dict,aws_service,tz,output)->dict:
237
+ # Submitted
238
+ job_name = status['JobName']
239
+ job_id = status['JobId']
240
+ submit_time = status['SubmitTime'].astimezone(ZoneInfo(tz))
241
+ output(f"RES_ANALYSIS JOB {job_name} ({job_id}) submitted at: {submit_time}")
242
+
243
+ # In progress
244
+ while status['JobStatus'] in ["SUBMITTED","IN_PROGRESS"]:
245
+ time = datetime.now().astimezone(ZoneInfo(tz))
246
+ job_status = status['JobStatus']
247
+ output(f"{time} [{job_id}] {job_name} status: {job_status}")
248
+ sleep(10)
249
+ properties = aws_service.comprehend_client.describe_entities_detection_job(JobId=job_id)
250
+ status=properties['EntitiesDetectionJobProperties']
251
+
252
+ # Finished (complete or error)
253
+ job_status = status['JobStatus']
254
+ end_time = status['EndTime'].astimezone(ZoneInfo(tz))
255
+ time_taken = end_time - submit_time
256
+ output_url = status['OutputDataConfig']['S3Uri']
257
+ output(f"RES_ANALYSIS JOB {job_name} ({job_id}) finished with status: {job_status} at: {end_time}")
258
+ output(f"Analysis time: {str(time_taken)}")
259
+ output(f"Results available at: {output_url}")
260
+ return status
261
+
262
+
263
+ # Download from S3 to local
264
+ def _download_from_s3(res_analyser,status)->str:
265
+ local_file_path = f"{res_analyser.config['local_data_dir']}/{status['JobName']}.tar.gz"
266
+ bucket_name = res_analyser.aws_service.aws_params["s3_bucket_name"]
267
+ try:
268
+ output_key = status['OutputDataConfig']['S3Uri'].split(bucket_name)[1]
269
+ with open(f"{local_file_path}",'wb') as output_data:
270
+ res_analyser.aws_service.s3_client.download_fileobj(bucket_name,output_key[1:],output_data)
271
+ except Exception as e:
272
+ res_analyser.logger.error("An error occured when downloading results from S3: %s",repr(e))
273
+ local_file_path = None
274
+ return local_file_path
275
+
276
+ # Extract results from tar.gz file and save as json
277
+ def _extract_save_results(res_analyser,local_file_path)->list:
278
+ # extract the tar archive
279
+ files = list()
280
+ with tarfile.open(f"{local_file_path}", "r:gz") as tf:
281
+ for member in tf.getmembers():
282
+ f = tf.extractfile(member)
283
+ if f is not None:
284
+ content = f.read()
285
+ files.append(content)
286
+ # extract results and save and return
287
+ raw_results = files[0].decode("utf-8").split('\n')
288
+ raw_results.pop() # pop last item off as empty entry due to final \n
289
+ #
290
+ #json_results = json.dumps(raw_results)
291
+ #res_analyser.logger.info("raw_results>> ",raw_results)
292
+ results = [json.loads(result) for result in raw_results]
293
+ with open(f"{local_file_path[:-7]}.json","w") as fp:
294
+ json.dump(results,fp)
295
+ return results
296
+
297
+ # Get a dict of (index,entities) from cer analysis results
298
+ def _extract_analysis(results):
299
+ file_ents = ((result["File"],result["Entities"]) for result in results)
300
+ idx_ents = ((int(file.split('_')[-1].split('.')[0]),ents) for file,ents in file_ents)
301
+ return dict(idx_ents)
302
+
303
+
304
+
305
+ # Comprehend access role arn
306
+ def _comprehend_access_role_arn(comprehend_service_role_name,aws_account_number):
307
+ return f"arn:aws:iam::{aws_account_number}:role/service-role/{comprehend_service_role_name}"
308
+
309
+ # Comprehend input url
310
+ def _comprehend_input_uri(s3_bucket_name,s3_files,prefix=""):
311
+ return f"s3://{s3_bucket_name}/{s3_files}/{prefix}"
312
+
313
+ # Comprehend output url
314
+ def _comprehend_output_uri(s3_bucket_name,s3_results):
315
+ return f"s3://{s3_bucket_name}/{s3_results}/"
316
+
317
+ # Comprehend entity recognizer arn
318
+ def _comprehend_cer_arn(region,account_number,cer_name,cer_version):
319
+ return f"arn:aws:comprehend:{region}:{account_number}:entity-recognizer/{cer_name}/version/{cer_version}"
320
+
321
+ ## Offset functions
322
+
323
+ def _offset_tuples(offsets):
324
+ for k,vs in offsets.items():
325
+ for b,e in vs:
326
+ yield (b,(e,k))
327
+
328
+ def _sorted_offsets(offsets):
329
+ return sorted(offsets)
330
+
331
+ def _orphaned_I(text,offsets):
332
+ for b,(e,t) in offsets:
333
+ if 'I' in text[(b-2):(b-1)].strip():
334
+ #print(text[(b-2):e],t)
335
+ yield (b-2, (e,t))
336
+ else:
337
+ yield (b, (e,t))
338
+
339
+ def _orphaned_word(text,offsets):
340
+ coffs = {}
341
+ p = (0,(-2,''))
342
+ for b,(e,t) in offsets:
343
+ #print(p[1][0])
344
+ if (p[1][0]+3)>=b:
345
+ #print("Prev:",p,f"|{df.text[0][p[0]:p[1][0]]}|")
346
+ #print("<--->",f"|{df.text[0][(p[1][0]+1):(b-1)]}|")
347
+ #print("This:",b,e,t,f"|{df.text[0][b:e]}|")
348
+ #print()
349
+ if len((text[p[0]:p[1][0]]).split(' '))<2:
350
+ #print(f"Removing {p[0]},{p[1][0]},{p[1][1]}")
351
+ coffs.pop(p[0])
352
+ #print(f"Replacing {b},{e},{t} with {p[0]},{e},{t}")
353
+ coffs[p[0]] = (e,t)
354
+ p=(p[0],(e,t))
355
+ else:
356
+ coffs[b] = (e,t)
357
+ p = (b,(e,t))
358
+ else:
359
+ coffs[b] = (e,t)
360
+ p = (b,(e,t))
361
+ return coffs.items()
362
+
363
+ def _regroup(offsets):
364
+ grouped = (((b,e),k) for (b,(e,k)) in offsets)
365
+ return dict(grouped)
366
+
367
+
368
+
369
+
370
+ ### UTILITY FUNCTIONS
371
+
372
+ # Create a reverse date string YYYYmmdd based on current local time
373
+ def _date_string()->str:
374
+ return datetime.today().strftime('%Y%m%d')
375
+
376
+ # Get the current local working dir
377
+ def _local_path(dir)->str:
378
+ return os.getcwd()+dir
379
+
380
+ # Check if local directory exists
381
+ def _dir_exists_local(dir:str)->bool:
382
+ return os.path.exists(_local_path(dir))
383
+
384
+ # Return function to create directory
385
+ def _create_dir(dir)->str:
386
+ os.makedirs(_local_path(dir))
387
+ return _local_path(dir)
388
+
389
+ # Create local directory if required
390
+ def _create_local_dir(dir,logger)->str:
391
+ if not _dir_exists_local(dir):
392
+ try:
393
+ path = _create_dir(dir)
394
+ except Exception as e:
395
+ logger.error("There was an error creating the local directory: %s",repr(e))
396
+ finally:
397
+ return path
398
+ else:
399
+ return _local_path(dir)
@@ -0,0 +1,165 @@
1
+ from graph_tool.all import (
2
+ Graph,
3
+ VertexPropertyMap,
4
+ EdgePropertyMap,
5
+ GraphPropertyMap,
6
+ graph_draw,
7
+ ungroup_vector_property,
8
+ group_vector_property
9
+ )
10
+ import cairo
11
+ from itertools import chain
12
+ from spacy import displacy
13
+
14
+ # Text Display functions
15
+
16
+ def _create_displacy_ents(name,text,offsets):
17
+ return {"text": text,
18
+ "ents": [{"start":s,"end":e,"label":l}for (s,e),l in offsets.items()],
19
+ "title": name}
20
+
21
+ def _render_annotated_text(ents,inline=True):
22
+ #default to inline
23
+ page_opt = False
24
+ jupyter_opt = True
25
+ if not inline:
26
+ page_opt = True
27
+ jupyter_opt = False
28
+
29
+ return displacy.render(ents,manual=True,style="ent", options=_get_text_display_options(),page=page_opt,jupyter=jupyter_opt)
30
+
31
+ def _get_text_display_options():
32
+ colours = dict([(prop['lbl'],prop['clr']) for prop in _res_graph_properties().values()])
33
+ return {"ents": list(colours.keys()), "colors": colours}
34
+
35
+ # RES properties for all graphs
36
+ def _res_graph_properties()->dict:
37
+ return {0:{ "lbl":"RR",
38
+ "pos":(0.2,6.5),
39
+ "clr":"#00AEEF"},
40
+ 1:{ "lbl":"NR",
41
+ "pos":(5,10),
42
+ "clr":"#ED1B23"},
43
+ 2:{ "lbl":"AR",
44
+ "pos":(9.8,6.5),
45
+ "clr":"#00A64F"},
46
+ 3:{ "lbl":"AF",
47
+ "pos":(7.9,1),
48
+ "clr":"#EC008C"},
49
+ 4:{ "lbl":"EP",
50
+ "pos":(2.1,1),
51
+ "clr":"#FFF200"}}
52
+
53
+ # Create a graph from an adjacency matrix
54
+ def _create_graph(matrix,id)->Graph:
55
+ if matrix:
56
+ graph = _graph_from_edges(dict(_matrix_to_dict(matrix)))
57
+ else:
58
+ graph = _graph_no_edges()
59
+ prop_list = _res_graph_properties().values()
60
+ graph.vp["v_positions"] = graph.new_vp("vector<double>",vals=[prop['pos'] for prop in prop_list])
61
+ graph.vp["v_labels"] = graph.new_vp("string",vals=[prop['lbl'] for prop in prop_list])
62
+ graph.gp["id"] = graph.new_gp("string",val=id)
63
+ return graph
64
+
65
+ # # Vertex properties common to all graphs
66
+ # v_lbl = graph.new_vp("string",vals=_get_prop_values('lbl'))
67
+ # v_pos = graph.new_vp("vector<double>",vals=_get_prop_values('pos'))
68
+ # # Make propertyMaps internal to the graph
69
+ # graph.vp["v_colour"] = v_clr
70
+ # graph.vp["v_position"] = v_pos
71
+ # graph.vp["v_label"] = v_lbl
72
+ # graph.ep["e_weights"] = e_weight
73
+
74
+ def _graph_from_edges(edges:dict)->Graph:
75
+ graph = Graph(g=edges.keys(),directed=False)
76
+ graph.ep["e_weights"] = graph.new_ep("double",vals=edges.values())
77
+ graph.ep["e_widths"] = graph.new_ep("double",vals=_scale_weights(edges.values()))
78
+ graph.vp["v_colours"] = _get_vcolours_from_edges(graph)
79
+ return graph
80
+
81
+ def _scale_weights(weights,factor=5):
82
+ return [round(w*factor,1) for w in weights]
83
+
84
+ def _graph_no_edges()->Graph:
85
+ graph = Graph(g=_empty_edge_dict(),directed=False)
86
+ graph.ep["e_weights"] = graph.new_ep("double")
87
+ graph.ep["e_widths"] = graph.new_ep("double")
88
+ graph.vp["v_colours"] = graph.new_vp("string",val="#cccccc")
89
+ return graph
90
+
91
+ def _get_vcolours_from_edges(graph:Graph)->VertexPropertyMap:
92
+ prop_list:dict[int,dict] = _res_graph_properties()
93
+ for i in _isolated_vertices(graph):
94
+ prop_list[i]['clr']= "#cccccc"
95
+ return graph.new_vp("string",[prop['clr'] for prop in prop_list.values()])
96
+
97
+ def _isolated_vertices(graph):
98
+ edgelist = chain.from_iterable([sorted((int(e.source()),int(e.target()))) for e in graph.edges()])
99
+ return set(range(5)) - set([e for e in set(edgelist)])
100
+
101
+ #
102
+ def _matrix_to_dict(matrix):
103
+ egen = ((((tuple(sorted((r,c))),w)) for c,w in enumerate(row) if w>0) for r,row in enumerate(matrix) if sum(row)>0)
104
+ return dict(chain.from_iterable(egen))
105
+ # edges = {}
106
+ # for r,row in enumerate(matrix):
107
+ # # if empty row, add to iso_vertices
108
+ # # if sum(row) == 0:
109
+ # # self.iso_vertices.add(r)
110
+ # # else:
111
+ # if sum(row) > 0: # edge exists
112
+ # for c,weight in enumerate(row):
113
+ # if weight > 0:
114
+ # edge = tuple(sorted((r,c)))
115
+ # #print("r,c:",edge," - ",weight)
116
+ # edges[edge] = weight
117
+ # return edges
118
+
119
+ #
120
+ def _empty_edge_dict():
121
+ empty_edges = {}
122
+ for idx in range(5): #self.gt_props.keys():
123
+ empty_edges[idx] = []
124
+ return empty_edges
125
+
126
+ #
127
+ def _get_prop_values(key):
128
+ values_list = self.gt_props.values()
129
+ return [p[key] for p in values_list]
130
+
131
+ # flip coordinates for graph-tool
132
+ def _flipY(vpositions):
133
+ x, y = ungroup_vector_property(vpositions, [0, 1])
134
+ y.fa *= -1
135
+ y.fa -= y.fa.min()
136
+ return group_vector_property([x, y])
137
+
138
+ #
139
+ def _draw_graph(graph:Graph,inline=True):
140
+
141
+ positions = _flipY(graph.vp["v_positions"])
142
+ labels = graph.vp["v_labels"]
143
+ colors = graph.vp["v_colours"]
144
+ widths = graph.ep["e_widths"]
145
+ graph_draw(graph, inline=inline,output_size=(300,300),fit_view=0.7,
146
+ pos=positions,
147
+ vertex_text=labels,
148
+ vertex_font_family="sans serif",
149
+ vertex_font_size=18,
150
+ vertex_font_weight=cairo.FONT_WEIGHT_BOLD,
151
+ vertex_fill_color=colors,
152
+ vertex_size = 50,
153
+ vertex_halo=False,
154
+ vertex_pen_width=1.2,
155
+ vertex_color="#999999",
156
+ edge_pen_width=widths)
157
+
158
+ # def get_vertex_labels(self):
159
+ # return self._get_prop_values('lbl')
160
+
161
+ # def get_vertex_colours(self):
162
+ # return self._get_prop_values('clr')
163
+
164
+ # def get_vertex_positions(self):
165
+ # return self._get_prop_values('pos')