tricc-oo 1.0.1__py3-none-any.whl → 1.4.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. tests/build.py +213 -0
  2. tests/test_cql.py +197 -0
  3. tests/to_ocl.py +51 -0
  4. {tricc → tricc_oo}/__init__.py +3 -1
  5. tricc_oo/converters/codesystem_to_ocl.py +169 -0
  6. tricc_oo/converters/cql/cqlLexer.py +822 -0
  7. tricc_oo/converters/cql/cqlListener.py +1632 -0
  8. tricc_oo/converters/cql/cqlParser.py +11204 -0
  9. tricc_oo/converters/cql/cqlVisitor.py +913 -0
  10. tricc_oo/converters/cql_to_operation.py +402 -0
  11. tricc_oo/converters/datadictionnary.py +115 -0
  12. tricc_oo/converters/drawio_type_map.py +222 -0
  13. tricc_oo/converters/tricc_to_xls_form.py +61 -0
  14. tricc_oo/converters/utils.py +65 -0
  15. tricc_oo/converters/xml_to_tricc.py +1003 -0
  16. tricc_oo/models/__init__.py +4 -0
  17. tricc_oo/models/base.py +732 -0
  18. tricc_oo/models/calculate.py +216 -0
  19. tricc_oo/models/ocl.py +281 -0
  20. tricc_oo/models/ordered_set.py +125 -0
  21. tricc_oo/models/tricc.py +418 -0
  22. tricc_oo/parsers/xml.py +138 -0
  23. tricc_oo/serializers/__init__.py +0 -0
  24. tricc_oo/serializers/xls_form.py +745 -0
  25. tricc_oo/strategies/__init__.py +0 -0
  26. tricc_oo/strategies/input/__init__.py +0 -0
  27. tricc_oo/strategies/input/base_input_strategy.py +111 -0
  28. tricc_oo/strategies/input/drawio.py +317 -0
  29. tricc_oo/strategies/output/base_output_strategy.py +148 -0
  30. tricc_oo/strategies/output/spice.py +365 -0
  31. tricc_oo/strategies/output/xls_form.py +697 -0
  32. tricc_oo/strategies/output/xlsform_cdss.py +189 -0
  33. tricc_oo/strategies/output/xlsform_cht.py +200 -0
  34. tricc_oo/strategies/output/xlsform_cht_hf.py +334 -0
  35. tricc_oo/visitors/__init__.py +0 -0
  36. tricc_oo/visitors/tricc.py +2198 -0
  37. tricc_oo/visitors/utils.py +17 -0
  38. tricc_oo/visitors/xform_pd.py +264 -0
  39. tricc_oo-1.4.15.dist-info/METADATA +219 -0
  40. tricc_oo-1.4.15.dist-info/RECORD +46 -0
  41. {tricc_oo-1.0.1.dist-info → tricc_oo-1.4.15.dist-info}/WHEEL +1 -1
  42. tricc_oo-1.4.15.dist-info/top_level.txt +2 -0
  43. tricc/converters/mc_to_tricc.py +0 -542
  44. tricc/converters/tricc_to_xls_form.py +0 -553
  45. tricc/converters/utils.py +0 -44
  46. tricc/converters/xml_to_tricc.py +0 -740
  47. tricc/models/tricc.py +0 -1093
  48. tricc/parsers/xml.py +0 -81
  49. tricc/serializers/xls_form.py +0 -364
  50. tricc/strategies/input/base_input_strategy.py +0 -80
  51. tricc/strategies/input/drawio.py +0 -246
  52. tricc/strategies/input/medalcreator.py +0 -168
  53. tricc/strategies/output/base_output_strategy.py +0 -92
  54. tricc/strategies/output/xls_form.py +0 -194
  55. tricc/strategies/output/xlsform_cdss.py +0 -46
  56. tricc/strategies/output/xlsform_cht.py +0 -106
  57. tricc/visitors/tricc.py +0 -375
  58. tricc_oo-1.0.1.dist-info/LICENSE +0 -78
  59. tricc_oo-1.0.1.dist-info/METADATA +0 -229
  60. tricc_oo-1.0.1.dist-info/RECORD +0 -26
  61. tricc_oo-1.0.1.dist-info/top_level.txt +0 -2
  62. venv/bin/vba_extract.py +0 -78
  63. {tricc → tricc_oo}/converters/__init__.py +0 -0
  64. {tricc → tricc_oo}/models/lang.py +0 -0
  65. {tricc/serializers → tricc_oo/parsers}/__init__.py +0 -0
  66. {tricc → tricc_oo}/serializers/planuml.py +0 -0
@@ -1,194 +0,0 @@
1
- '''
2
- Strategy to build the skyp logic following the XLSForm way
3
-
4
- '''
5
-
6
- import datetime
7
- import logging
8
- import os
9
-
10
- import pandas as pd
11
-
12
- from tricc.converters.tricc_to_xls_form import (generate_xls_form_calculate,
13
- generate_xls_form_condition,
14
- generate_xls_form_relevance)
15
- from tricc.models.tricc import (TriccNodeActivity, check_stashed_loop,
16
- walktrhough_tricc_node_processed_stached, TriccGroup)
17
- from tricc.serializers.xls_form import (CHOICE_MAP, SURVEY_MAP, end_group,
18
- generate_xls_form_export, start_group)
19
- from tricc.strategies.output.base_output_strategy import BaseOutPutStrategy
20
-
21
- logger = logging.getLogger('default')
22
-
23
-
24
- class XLSFormStrategy(BaseOutPutStrategy):
25
-
26
- df_survey = pd.DataFrame(columns=SURVEY_MAP.keys())
27
- df_calculate = pd.DataFrame(columns=SURVEY_MAP.keys())
28
- df_choice = pd.DataFrame(columns=CHOICE_MAP.keys())
29
-
30
-
31
- # add save nodes and merge nodes
32
-
33
- def generate_base(self,node, **kwargs):
34
- return generate_xls_form_condition(node, **kwargs)
35
-
36
- def generate_relevance(self, node, **kwargs):
37
- return generate_xls_form_relevance(node, **kwargs)
38
-
39
- def generate_calculate(self, node, **kwargs):
40
- return generate_xls_form_calculate( node, **kwargs)
41
-
42
-
43
- def __init__(self, output_path):
44
- super().__init__( output_path)
45
- self.do_clean()
46
-
47
- def do_clean(self, **kwargs):
48
- self.calculates= {}
49
- self.used_calculates = {}
50
-
51
-
52
- def get_kwargs(self):
53
- return {
54
- 'df_survey':self.df_survey,
55
- 'df_choice':self.df_choice,
56
- 'df_calculate':self.df_calculate
57
- }
58
-
59
- def generate_export(self, node, **kwargs):
60
- return generate_xls_form_export(node, **kwargs)
61
-
62
- def export(self,start_pages ):
63
-
64
- if start_pages['main'].root.form_id is not None:
65
- form_id= str(start_pages['main'].root.form_id )
66
- else:
67
- logger.error("form id required in the first start node")
68
- exit()
69
- title = start_pages['main'].root.label
70
- file_name = form_id + ".xlsx"
71
- # make a 'settings' tab
72
- now = datetime.datetime.now()
73
- version=now.strftime('%Y%m%d%H%M')
74
- indx=[[1]]
75
-
76
- settings={'form_title':title,'form_id':form_id,'version':version,'default_language':'English (en)','style':'pages'}
77
- df_settings=pd.DataFrame(settings,index=indx)
78
- df_settings.head()
79
-
80
- newpath = os.path.join(self.output_path, file_name)
81
- #create a Pandas Excel writer using XlsxWriter as the engine
82
- writer = pd.ExcelWriter(newpath, engine='xlsxwriter')
83
- self.df_survey.to_excel(writer, sheet_name='survey',index=False)
84
- self.df_choice.to_excel(writer, sheet_name='choices',index=False)
85
- df_settings.to_excel(writer, sheet_name='settings',index=False)
86
-
87
- #close the Pandas Excel writer and output the Excel file
88
- #writer.save()
89
-
90
- # run this on a windows python instance because if not then the generated xlsx file remains open
91
- writer.close()
92
- #writer.handles = None
93
-
94
- def process_export(self, start_pages, **kwargs):
95
- self.activity_export(start_pages['main'], **kwargs)
96
-
97
- def activity_export(self, activity, processed_nodes = [], **kwargs):
98
- stashed_nodes = []
99
- # The stashed node are all the node that have all their prevnode processed but not from the same group
100
- # This logic works only because the prev node are ordered by group/parent ..
101
- skip_header = 0
102
- groups= {}
103
- cur_group = activity
104
- groups[activity.id] = 0
105
- path_len = 0
106
- # keep the vesrions on the group id, max version
107
- start_group( cur_group=cur_group, groups=groups, **self.get_kwargs())
108
- walktrhough_tricc_node_processed_stached(activity.root, self.generate_export, processed_nodes, stashed_nodes,path_len , cur_group = activity.root.group, **self.get_kwargs() )
109
- end_group( cur_group =activity, groups=groups, **self.get_kwargs())
110
- # we save the survey data frame
111
- df_survey_final = pd.DataFrame(columns=SURVEY_MAP.keys())
112
- if len(self.df_survey)>(2+skip_header):
113
- df_survey_final = self.df_survey
114
- ## MANAGE STASHED NODES
115
- prev_stashed_nodes = stashed_nodes.copy()
116
- loop_count = 0
117
- len_prev_processed_nodes = 0
118
- while len(stashed_nodes)>0:
119
- self.df_survey = pd.DataFrame(columns=SURVEY_MAP.keys())
120
- loop_count = check_stashed_loop(stashed_nodes,prev_stashed_nodes, processed_nodes,len_prev_processed_nodes, loop_count)
121
- prev_stashed_nodes = stashed_nodes.copy()
122
- len_prev_processed_nodes = len(processed_nodes)
123
- if len(stashed_nodes)>0:
124
- s_node = stashed_nodes.pop()
125
- #while len(stashed_nodes)>0 and isinstance(s_node,TriccGroup):
126
- # s_node = stashed_nodes.pop()
127
- if len(s_node.prev_nodes)>0:
128
- path_len = sorted(s_node.prev_nodes, key=lambda p_node:p_node.path_len, reverse=True )[0].path_len+1
129
- if s_node.group is None:
130
- logger.error("ERROR group is none for node {}".format(s_node.get_name()))
131
- start_group( cur_group =s_node.group, groups=groups, relevance= True, **self.get_kwargs())
132
- # arrange empty group
133
- walktrhough_tricc_node_processed_stached(s_node, self.generate_export, processed_nodes, stashed_nodes, path_len, groups=groups,cur_group = s_node.group, **self.get_kwargs() )
134
- # add end group if new node where added OR if the previous end group was removed
135
- end_group( cur_group =s_node.group, groups=groups, **self.get_kwargs())
136
- # if two line then empty grou
137
- if len(self.df_survey)>(2+skip_header):
138
- if cur_group == s_node.group:
139
- # drop the end group (to merge)
140
- logger.debug("printing same group {}::{}::{}::{}".format(s_node.group.__class__, s_node.group.get_name(),s_node.id, s_node.group.instance))
141
- df_survey_final.drop(index=df_survey_final.index[-1], axis=0, inplace=True)
142
- self.df_survey = self.df_survey[(1+skip_header):]
143
- df_survey_final=pd.concat([df_survey_final, self.df_survey], ignore_index=True)
144
-
145
- else:
146
- logger.debug("printing group {}::{}::{}::{}".format(s_node.group.__class__, s_node.group.get_name(),s_node.id,s_node.group.instance))
147
- df_survey_final =pd.concat([df_survey_final, self.df_survey], ignore_index=True)
148
- cur_group = s_node.group
149
-
150
-
151
- # add the calulate
152
-
153
- self.df_calculate=self.df_calculate.dropna(axis=0, subset=['calculation'])
154
- df_empty_calc = self.df_calculate[self.df_calculate['calculation'] == '']
155
- self.df_calculate=self.df_calculate.drop(df_empty_calc.index)
156
- self.df_survey = pd.concat([df_survey_final,self.df_calculate], ignore_index=True)
157
- df_duplicate = self.df_calculate[self.df_calculate.duplicated(subset=['calculation'], keep='first')]
158
- #self.df_survey=self.df_survey.drop_duplicates(subset=['name'])
159
- for index, drop_calc in df_duplicate.iterrows():
160
- #remove the duplicate
161
- replace_name = False
162
- #find the actual calcualte
163
- similar_calc = self.df_survey[(drop_calc['calculation'] == self.df_survey['calculation']) & (self.df_survey['type'] == 'calculate')]
164
- same_calc = self.df_survey[self.df_survey['name'] == drop_calc['name']]
165
- if len(same_calc) > 1:
166
- # check if all calc have the same name
167
- if len(same_calc) == len(similar_calc):
168
- # drop all but one
169
- self.df_survey.drop(same_calc.index[1:])
170
- elif len(same_calc) < len(similar_calc):
171
- self.df_survey.drop(same_calc.index)
172
- replace_name = True
173
- elif len(same_calc) == 1:
174
- self.df_survey.drop(similar_calc.index)
175
- replace_name = True
176
-
177
-
178
- if replace_name:
179
- save_calc = self.df_survey[(drop_calc['calculation'] == self.df_survey['calculation']) & (self.df_survey['type'] == 'calculate') ]
180
- if len(save_calc) >= 1:
181
- save_calc = save_calc.iloc[0]
182
- if save_calc['name']!= drop_calc['name']:
183
- self.df_survey.replace('\$\{'+drop_calc['name']+'\}', '\$\{'+save_calc['name']+'\}', regex=True)
184
- else:
185
- logger.error("duplicate reference not found for calculation: {}".format(drop_calc['calculation']))
186
- for index, empty_calc in df_empty_calc.iterrows():
187
- self.df_survey.replace('\$\{'+empty_calc['name']+'\}', '1', regex=True)
188
-
189
- #TODO try to reinject calc to reduce complexity
190
- for i,c in self.df_calculate[~self.df_calculate['name'].isin(self.df_survey['name'])].iterrows():
191
- real_calc = re.find(r'^number\((.+)\)$',c['calculation'])
192
- if real_calc is not None and real_calc != '':
193
- self.df_survey[~self.df_survey['name']==c['name']].replace(real_calc, '\$\{'+c['name']+'\}')
194
- return processed_nodes
@@ -1,46 +0,0 @@
1
- from tricc.models.tricc import TriccNodeActivity
2
- from tricc.serializers.xls_form import (get_diagnostic_add_line,
3
- get_diagnostic_line,
4
- get_diagnostic_none_line,
5
- get_diagnostic_start_group_line,
6
- get_diagnostic_stop_group_line)
7
- from tricc.converters.tricc_to_xls_form import get_export_name
8
- from tricc.strategies.output.xls_form import XLSFormStrategy
9
-
10
-
11
-
12
- class XLSFormCDSSStrategy(XLSFormStrategy):
13
-
14
-
15
-
16
- def process_export(self, start_pages, **kwargs):
17
- diags = []
18
- self.activity_export(start_pages[self.processes[0]], **kwargs)
19
-
20
- diags += self.export_diag( start_pages[self.processes[0]], **kwargs)
21
-
22
- # add the diag
23
- self.df_survey.loc[len(self.df_survey)] = get_diagnostic_start_group_line()
24
- # TODO inject flow driven diag list, the folowing fonction will fill the missing ones
25
-
26
- if len(diags)>0:
27
- for diag in diags:
28
- self.df_survey.loc[len(self.df_survey)] = get_diagnostic_line(diag)
29
- self.df_survey.loc[len(self.df_survey)] = get_diagnostic_none_line(diags)
30
- self.df_survey.loc[len(self.df_survey)] = get_diagnostic_add_line(diags, self.df_choice)
31
-
32
- self.df_survey.loc[len(self.df_survey)] = get_diagnostic_stop_group_line()
33
- #TODO inject the TT flow
34
-
35
-
36
-
37
-
38
- def export_diag(self, activity, diags = [], **kwargs):
39
- for node in activity.nodes.values():
40
- if isinstance(node, TriccNodeActivity):
41
- diags = self.export_diag(node, diags, **kwargs)
42
- if hasattr(node, 'name') and node.name is not None:
43
- if node.name.startswith('diag') and node.last\
44
- and not any([get_export_name(diag) == get_export_name(node) for diag in diags]):
45
- diags.append(node)
46
- return diags
@@ -1,106 +0,0 @@
1
- import datetime
2
- import logging
3
- import os
4
- import shutil
5
-
6
- import pandas as pd
7
-
8
- from tricc.models.lang import SingletonLangClass
9
- from tricc.serializers.xls_form import SURVEY_MAP
10
- from tricc.strategies.output.xlsform_cdss import XLSFormCDSSStrategy
11
-
12
- langs = SingletonLangClass()
13
-
14
- class XLSFormCHTStrategy(XLSFormCDSSStrategy):
15
- def process_export(self, start_pages, **kwargs):
16
-
17
- super().process_export( start_pages, **kwargs)
18
- cht_header = pd.DataFrame(columns=SURVEY_MAP.keys())
19
-
20
-
21
- self.df_survey = pd.concat([self.get_cht_input(),self.df_survey[~self.df_survey['name'].isin(['select_sex','p_age_day','p_age_month','p_age_year','p_name','dob'])],self.get_cht_summary() ], ignore_index=True)
22
-
23
- def get_cht_input(self):
24
- df_input = pd.DataFrame(columns=SURVEY_MAP.keys())
25
- #[ #type, '',#name ''#label, '',#hint '',#help '',#default '',#'appearance', '',#'constraint', '',#'constraint_message' '',#'relevance' '',#'disabled' '',#'required' '',#'required message' '',#'read only' '',#'expression' '',#'repeat_count' ''#'image' ],
26
- df_input.loc[len(df_input)] = [ 'begin group', 'inputs' ,*list(langs.get_trads('Inputs', force_dict = True).values()), *list(langs.get_trads('', force_dict = True).values()), *list(langs.get_trads('', force_dict = True).values()), '', 'field-list', '', *list(langs.get_trads('', force_dict = True).values()), './source = "user"', '','', *list(langs.get_trads('', force_dict = True).values()) ,'', '', '', '' ]
27
- df_input.loc[len(df_input)] = [ 'hidden', 'source', *list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', '' ]
28
- df_input.loc[len(df_input)] = [ 'hidden', 'source_id',*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', '' ]
29
- df_input.loc[len(df_input)] = [ 'hidden', 'task_id' ,*list(langs.get_trads('Task ID', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', '']
30
- df_input.loc[len(df_input)] = [ 'begin group ', 'contact' ,*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', '' ]
31
- df_input.loc[len(df_input)] = [ 'db:person', '_id', *list(langs.get_trads('Patient ID', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', 'db-object', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', '' ]
32
- df_input.loc[len(df_input)] = [ 'string', 'patient_id' ,*list(langs.get_trads('Medic ID', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', 'hidden', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', '' ]
33
- df_input.loc[len(df_input)] = [ 'string', 'patient_name',*list(langs.get_trads('Patient Name', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', 'hidden', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '','' ]
34
- df_input.loc[len(df_input)] = [ 'date', 'date_of_birth',*list(langs.get_trads('Date of birth', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', 'hidden', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '','' ]
35
- df_input.loc[len(df_input)] = [ 'string', 'sex',*list(langs.get_trads('Patient Sex', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', 'hidden', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '','' ]
36
- df_input.loc[len(df_input)] = [ 'end group', '' ,*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', '']
37
- df_input.loc[len(df_input)] = [ 'end group', '' ,*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', '' ]
38
- df_input.loc[len(df_input)] = [ 'calculate', '_id' ,*list(langs.get_trads('label', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '../inputs/contact/_id', '', '' ]
39
- df_input.loc[len(df_input)] = [ 'calculate', 'patient_uuid' ,*list(langs.get_trads('label', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '../inputs/contact/patient_id', '', '' ]
40
- df_input.loc[len(df_input)] = [ 'calculate', 'p_name' ,*list(langs.get_trads('label', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '../inputs/contact/patient_name', '', '' ]
41
-
42
- df_input.loc[len(df_input)] = [ 'calculate', 'p_age_day' ,*list(langs.get_trads('label', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', 'int((today()-date(${date_of_birth})))', '', '' ]
43
- df_input.loc[len(df_input)] = [ 'calculate', 'p_age_month' ,*list(langs.get_trads('label', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', 'int(${p_age_day} div 30.4)', '', '' ]
44
- df_input.loc[len(df_input)] = [ 'calculate', 'p_age_year' ,*list(langs.get_trads('label', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', 'int(${p_age_month} div 12)', '', '' ]
45
- df_input.loc[len(df_input)] = [ 'calculate', 'select_sex' ,*list(langs.get_trads('label', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '../inputs/contact/sex', '', '' ]
46
- df_input.loc[len(df_input)] = [ 'calculate', 'dob',*list(langs.get_trads('Date of birth', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()),*list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', '', '', *list(langs.get_trads('', force_dict = True).values()), '', 'date(../inputs/contact/date_of_birth)', '','' ]
47
-
48
-
49
- return df_input
50
-
51
- def get_cht_summary(self):
52
-
53
- df_summary = pd.DataFrame(columns=SURVEY_MAP.keys())
54
- #[ #type, '',#name ''#label, '',#hint '',#help '',#default '',#'appearance', '',#'constraint', '',#'constraint_message' '',#'relevance' '',#'disabled' '',#'required' '',#'required message' '',#'read only' '',#'expression' '',#'repeat_count' ''#'image' ],
55
- #df_summary.loc[len(df_summary)] = [ 'begin group', 'group_summary' , 'Summary', '', '', '', 'field-list summary', '', '', '', '', '', '', '', '', '', '' ]
56
- #df_summary.loc[len(df_summary)] = [ 'note', 'r_patient_info', '**${patient_name}** ID: ${patient_id}', '', '', '', '', '', '', '', '', '', '', '', '', '', '' ]
57
- #df_summary.loc[len(df_summary)] = [ 'note', 'r_followup', 'Follow Up <i class=“fa fa-flag”></i>', '', '', '', '', '', '','', '', '', '', '', '', '', '' ]
58
- #df_summary.loc[len(df_summary)] = [ 'note', 'r_followup_note' ,'FOLLOWUP instruction', '', '', '', '', '', '', '','', '', '', '', '', '', '' ]
59
- #df_summary.loc[len(df_summary)] = [ 'end group', '' ,'', '', '', '', '', '', '', '', '', '', '', '', '','', '' ]
60
- return df_summary
61
-
62
- def export(self, start_pages):
63
- form_id = None
64
- if start_pages[self.processes[0]].root.form_id is not None:
65
- form_id= str(start_pages[self.processes[0]].root.form_id )
66
- else:
67
- logger.error("form id required in the first start node")
68
- exit()
69
- title = start_pages[self.processes[0]].root.label
70
- file_name = form_id + ".xlsx"
71
- # make a 'settings' tab
72
- now = datetime.datetime.now()
73
- version=now.strftime('%Y%m%d%H%M')
74
- indx=[[1]]
75
- # CHT FORCE file name to be equal to id
76
-
77
- newfilename = form_id + ".xlsx"
78
- newpath = os.path.join(self.output_path, newfilename)
79
- media_path = os.path.join(self.output_path, form_id + "-media")
80
-
81
- settings={'form_title':title,'form_id':form_id,'version':version,'default_language':'English (en)','style':'pages'}
82
- df_settings=pd.DataFrame(settings,index=indx)
83
- df_settings.head()
84
-
85
- #create a Pandas Excel writer using XlsxWriter as the engine
86
- writer = pd.ExcelWriter(newpath, engine='xlsxwriter')
87
- self.df_survey.to_excel(writer, sheet_name='survey',index=False)
88
- self.df_choice.to_excel(writer, sheet_name='choices',index=False)
89
- df_settings.to_excel(writer, sheet_name='settings',index=False)
90
-
91
- #close the Pandas Excel writer and output the Excel file
92
- #writer.save()
93
-
94
- # run this on a windows python instance because if not then the generated xlsx file remains open
95
- writer.close()
96
- media_path_tmp = os.path.join(self.output_path, 'media-tmp')
97
- if (os.path.isdir(media_path_tmp)):
98
- if os.path.isdir(media_path): # check if it exists, because if it does, error will be raised
99
- shutil.rmtree(media_path)
100
- # (later change to make folder complaint to CHT)
101
- os.mkdir(media_path)
102
-
103
- file_names = os.listdir(media_path_tmp)
104
- for file_name in file_names:
105
- shutil.move(os.path.join(media_path_tmp, file_name), media_path)
106
- shutil.rmtree(media_path_tmp)