idmtools-test 0.0.0.dev0__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. idmtools_test/__init__.py +16 -8
  2. idmtools_test/inputs/__init__.py +0 -0
  3. idmtools_test/inputs/assets/collections/1/a.txt +0 -0
  4. idmtools_test/inputs/assets/collections/1/b.txt +0 -0
  5. idmtools_test/inputs/assets/collections/2/c.txt +0 -0
  6. idmtools_test/inputs/assets/collections/d.txt +0 -0
  7. idmtools_test/inputs/builder/sweeps.csv +6 -0
  8. idmtools_test/inputs/builder/sweeps.yaml +8 -0
  9. idmtools_test/inputs/compsplatform/__init__.py +0 -0
  10. idmtools_test/inputs/compsplatform/failing_model.py +5 -0
  11. idmtools_test/inputs/compsplatform/mixed_model.py +10 -0
  12. idmtools_test/inputs/compsplatform/working_model.py +5 -0
  13. idmtools_test/inputs/configuration/idmtools_test.ini +71 -0
  14. idmtools_test/inputs/custom/Eradication.exe +0 -0
  15. idmtools_test/inputs/custom/Local_Migration.bin +0 -0
  16. idmtools_test/inputs/custom/Local_Migration.bin.json +12 -0
  17. idmtools_test/inputs/custom/Regional_Migration.bin +0 -0
  18. idmtools_test/inputs/custom/Regional_Migration.bin.json +12 -0
  19. idmtools_test/inputs/custom/Zambia_30arcsec_air_temperature_daily.bin +0 -0
  20. idmtools_test/inputs/custom/Zambia_30arcsec_air_temperature_daily.bin.json +26 -0
  21. idmtools_test/inputs/custom/Zambia_30arcsec_rainfall_daily.bin +0 -0
  22. idmtools_test/inputs/custom/Zambia_30arcsec_rainfall_daily.bin.json +26 -0
  23. idmtools_test/inputs/custom/Zambia_30arcsec_relative_humidity_daily.bin +0 -0
  24. idmtools_test/inputs/custom/Zambia_30arcsec_relative_humidity_daily.bin.json +26 -0
  25. idmtools_test/inputs/custom/campaign.json +95384 -0
  26. idmtools_test/inputs/custom/config.json +943 -0
  27. idmtools_test/inputs/custom/custom_reports.json +163 -0
  28. idmtools_test/inputs/custom/demo.json +1258 -0
  29. idmtools_test/inputs/custom/emodules_map.json +9 -0
  30. idmtools_test/inputs/custom/reporter_plugins/libReportMalariaFiltered.dll +0 -0
  31. idmtools_test/inputs/custom/reporter_plugins/libSpatialReportMalariaFiltered.dll +0 -0
  32. idmtools_test/inputs/custom/reporter_plugins/libreporteventcounter.dll +0 -0
  33. idmtools_test/inputs/duplicated_model/exe/Eradication +0 -0
  34. idmtools_test/inputs/duplicated_model/f1 +0 -0
  35. idmtools_test/inputs/emod/Eradication.exe +0 -0
  36. idmtools_test/inputs/emod_files/campaign.json +21 -0
  37. idmtools_test/inputs/emod_files/config.json +125 -0
  38. idmtools_test/inputs/emod_files/demographics.json +81 -0
  39. idmtools_test/inputs/fakemodels/AnotherOne +0 -0
  40. idmtools_test/inputs/fakemodels/Eradication +0 -0
  41. idmtools_test/inputs/fakemodels/Eradication-2.11.custom.exe +0 -0
  42. idmtools_test/inputs/fakemodels/Eradication.exe +0 -0
  43. idmtools_test/inputs/files/campaign.json +21 -0
  44. idmtools_test/inputs/files/config.json +119 -0
  45. idmtools_test/inputs/files/demographics.json +82 -0
  46. idmtools_test/inputs/files/hello.txt +1 -0
  47. idmtools_test/inputs/id_files/slurm.example_python_experiment.id +1 -0
  48. idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_air_temperature_daily.bin +0 -0
  49. idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_air_temperature_daily.bin.json +26 -0
  50. idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_demographics.json +559 -0
  51. idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_rainfall_daily.bin +0 -0
  52. idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_rainfall_daily.bin.json +26 -0
  53. idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_relative_humidity_daily.bin +0 -0
  54. idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_relative_humidity_daily.bin.json +26 -0
  55. idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Eradication +0 -0
  56. idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Eradication.exe +0 -0
  57. idmtools_test/inputs/malaria_brazil_central_west_spatial/campaign.json +4 -0
  58. idmtools_test/inputs/malaria_brazil_central_west_spatial/config.json +667 -0
  59. idmtools_test/inputs/malaria_brazil_central_west_spatial/malaria_brazil_central_west_spatial-ERA5Input_demo.csv +37 -0
  60. idmtools_test/inputs/python/Assets/MyExternalLibrary/__init__.py +0 -0
  61. idmtools_test/inputs/python/Assets/MyExternalLibrary/functions.py +15 -0
  62. idmtools_test/inputs/python/Assets/MyLib/functions.py +2 -0
  63. idmtools_test/inputs/python/Assets/MyLib/temp.py +271 -0
  64. idmtools_test/inputs/python/Assets/__init__.py +0 -0
  65. idmtools_test/inputs/python/__init__.py +0 -0
  66. idmtools_test/inputs/python/folder_dup_file/__init__.py +0 -0
  67. idmtools_test/inputs/python/folder_dup_file/model1.py +19 -0
  68. idmtools_test/inputs/python/hello_world.py +1 -0
  69. idmtools_test/inputs/python/model.py +26 -0
  70. idmtools_test/inputs/python/model1.py +20 -0
  71. idmtools_test/inputs/python/model3.py +21 -0
  72. idmtools_test/inputs/python/newmodel2.py +20 -0
  73. idmtools_test/inputs/python/output_generator/generate.py +39 -0
  74. idmtools_test/inputs/python/realpath_verify.py +6 -0
  75. idmtools_test/inputs/python/ye_seir_model/Assets/MyExternalLibrary/Python36/dtk_generic_intrahost.pyd +0 -0
  76. idmtools_test/inputs/python/ye_seir_model/Assets/MyExternalLibrary/Python36/dtk_nodedemog.pyd +0 -0
  77. idmtools_test/inputs/python/ye_seir_model/Assets/MyExternalLibrary/Python37/dtk_generic_intrahost.pyd +0 -0
  78. idmtools_test/inputs/python/ye_seir_model/Assets/MyExternalLibrary/Python37/dtk_nodedemog.pyd +0 -0
  79. idmtools_test/inputs/python/ye_seir_model/Assets/SEIR_model.py +252 -0
  80. idmtools_test/inputs/python/ye_seir_model/Assets/SEIR_model_slurm.py +242 -0
  81. idmtools_test/inputs/python/ye_seir_model/Assets/config_sim.py +48 -0
  82. idmtools_test/inputs/python/ye_seir_model/Assets/custom_csv_analyzer.py +133 -0
  83. idmtools_test/inputs/python/ye_seir_model/Assets/python.sh +4 -0
  84. idmtools_test/inputs/python/ye_seir_model/Assets/requirements.txt +4 -0
  85. idmtools_test/inputs/python/ye_seir_model/Assets/templates/config.json +68 -0
  86. idmtools_test/inputs/python/ye_seir_model/Assets/templates/demographics_template.json +44 -0
  87. idmtools_test/inputs/python/ye_seir_model/__init__.py +0 -0
  88. idmtools_test/inputs/python_experiments/__init__.py +0 -0
  89. idmtools_test/inputs/python_experiments/model.py +10 -0
  90. idmtools_test/inputs/r/model1.R +1 -0
  91. idmtools_test/inputs/r/ncov_analysis/individual_dynamics_estimates/estimate_incubation_period.R +89 -0
  92. idmtools_test/inputs/regression/107/Assets/__init__.py +0 -0
  93. idmtools_test/inputs/regression/107/Assets/model.py +1 -0
  94. idmtools_test/inputs/regression/107/__init__.py +0 -0
  95. idmtools_test/inputs/regression/125/Assets/__init__.py +0 -0
  96. idmtools_test/inputs/regression/125/Assets/model.py +1 -0
  97. idmtools_test/inputs/regression/125/Assets2/__init__.py +0 -0
  98. idmtools_test/inputs/regression/125/Assets2/dir1/__init__.py +0 -0
  99. idmtools_test/inputs/regression/125/Assets2/dir1/model.py +1 -0
  100. idmtools_test/inputs/regression/125/Assets2/dir2/__init__.py +0 -0
  101. idmtools_test/inputs/regression/125/Assets2/dir2/model.py +1 -0
  102. idmtools_test/inputs/regression/125/__init__.py +0 -0
  103. idmtools_test/inputs/regression/__init__.py +0 -0
  104. idmtools_test/inputs/scheduling/hpc/WorkOrder.json +7 -0
  105. idmtools_test/inputs/scheduling/slurm/WorkOrder.json +11 -0
  106. idmtools_test/inputs/scheduling/slurm/WorkOrder1.json +11 -0
  107. idmtools_test/inputs/scheduling/slurm/WorkOrder2.json +13 -0
  108. idmtools_test/inputs/scheduling/slurm/commandline_model.py +22 -0
  109. idmtools_test/inputs/serialization/Eradication.exe +0 -0
  110. idmtools_test/inputs/serialization/single_node_demographics.json +82 -0
  111. idmtools_test/inputs/singularity/alpine_simple/Singularity.def +28 -0
  112. idmtools_test/inputs/singularity/alpine_simple/run_model.py +41 -0
  113. idmtools_test/inputs/singularity/alpine_template/Singularity.jinja +22 -0
  114. idmtools_test/test_precreate_hooks.py +25 -0
  115. idmtools_test/utils/__init__.py +0 -0
  116. idmtools_test/utils/cli.py +41 -0
  117. idmtools_test/utils/common_experiments.py +79 -0
  118. idmtools_test/utils/comps.py +152 -0
  119. idmtools_test/utils/decorators.py +208 -0
  120. idmtools_test/utils/execute_operations/__init__.py +0 -0
  121. idmtools_test/utils/execute_operations/experiment_operations.py +237 -0
  122. idmtools_test/utils/execute_operations/simulate_operations.py +368 -0
  123. idmtools_test/utils/itest_with_persistence.py +25 -0
  124. idmtools_test/utils/operations/__init__.py +0 -0
  125. idmtools_test/utils/operations/experiment_operations.py +64 -0
  126. idmtools_test/utils/operations/simulation_operations.py +114 -0
  127. idmtools_test/utils/shared_functions.py +25 -0
  128. idmtools_test/utils/test_asset.py +89 -0
  129. idmtools_test/utils/test_asset_collection.py +223 -0
  130. idmtools_test/utils/test_execute_platform.py +137 -0
  131. idmtools_test/utils/test_platform.py +94 -0
  132. idmtools_test/utils/test_task.py +69 -0
  133. idmtools_test/utils/utils.py +146 -0
  134. idmtools_test-0.0.3.dist-info/METADATA +48 -0
  135. idmtools_test-0.0.3.dist-info/RECORD +139 -0
  136. idmtools_test-0.0.3.dist-info/entry_points.txt +9 -0
  137. idmtools_test-0.0.3.dist-info/licenses/LICENSE.TXT +3 -0
  138. idmtools_test-0.0.0.dev0.dist-info/METADATA +0 -41
  139. idmtools_test-0.0.0.dev0.dist-info/RECORD +0 -5
  140. {idmtools_test-0.0.0.dev0.dist-info → idmtools_test-0.0.3.dist-info}/WHEEL +0 -0
  141. {idmtools_test-0.0.0.dev0.dist-info → idmtools_test-0.0.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,252 @@
1
+ import sys
2
+ import os
3
+ import random
4
+ current_directory = os.path.dirname(__file__)
5
+ LIBRARY_PATH = os.path.join(current_directory, 'site-packages') # Need to site-packages level!!!
6
+
7
+ sys.path.insert(0, LIBRARY_PATH)
8
+ sys.path.append(os.path.join(current_directory))
9
+ import pandas as pd
10
+ from pathlib import Path
11
+ # import utils
12
+
13
+ assets_dir = os.path.abspath(os.path.dirname(__file__))
14
+ #current_directory = os.getcwd()
15
+ p_version = sys.version_info
16
+ if p_version.major == 3:
17
+ if p_version.minor == 7:
18
+ sys.path.append(str(Path(os.path.join(assets_dir, './MyExternalLibrary/Python37')).resolve().absolute()))
19
+ elif p_version.minor == 6:
20
+ sys.path.append(str(Path(os.path.join(assets_dir, './MyExternalLibrary/Python36')).resolve().absolute()))
21
+ else:
22
+ print("Sorry, this model only supports Python 3.6, 3.7")
23
+ import dtk_nodedemog as nd # noqa
24
+ import dtk_generic_intrahost as gi # noqa
25
+ from config_sim import configure_simulation # noqa
26
+
27
+
28
+ class Constant():
29
+ SIMULATION_TIMESTEP = "SIMULATION_TIMESTEP"
30
+ STATISTICAL_POPULATION = "STATISTICAL_POPULATION"
31
+ CONTAGION = "CONTAGION"
32
+ NUM_INFECTED = "NUM_INFECTED"
33
+ NUM_NEW_INFECTIONS = "NUM_NEW_INFECTIONS"
34
+ hum_id = "individual_id"
35
+ is_infected = "is_infected"
36
+ infections = "infections"
37
+ infectiousness = "infectiousness"
38
+ immunity = "immunity"
39
+
40
+
41
+ class Persion():
42
+ def __init__(self, mcw, age, gender, id):
43
+ self.mcw = mcw
44
+ self.age = age
45
+ self.sex = gender
46
+ self.id = id
47
+
48
+
49
+ # Write a SEIR class using dtk_nodedemog and dtk_generic_intrahost
50
+ class SEIR():
51
+ def __init__(self, config_template="config.json", simulation_duration=10, initial_population=1000,
52
+ outbreak_timestep=0, outbreak_demographic_coverage=0.01, outbreak_ignore_immunity=True,
53
+ other_config_params: dict = None):
54
+ """
55
+ Define a SEIR model with the following parameeters
56
+ :param config_template: template file for configuration
57
+ :param simulation_duration: number of time step for one simulation
58
+ :param initial_population: number of initial population
59
+ :param outbreak_timestep: the day to start distributing the outbreak
60
+ :param outbreak_demographic_coverage: the fraction of individuals that will receive the outbreak
61
+ :param outbreak_ignore_immunity: iIndividuals will be force-infected regardless of actual immunity level when set to true
62
+ :param other_config_params: other parameter/value pairs in config.json
63
+ """
64
+ self.human_pop = {} # dictionary of individual objects at run time
65
+ self.well_mixed_contagion_pool = []
66
+ self.statistical_population = []
67
+ self.num_infected = []
68
+ self.num_new_infections = []
69
+ self.individual_df = None
70
+ self.node_df = None
71
+ self.timestep = 0
72
+ self.config_template = config_template
73
+ self.simulation_duration = simulation_duration
74
+ self.initial_population = initial_population
75
+ self.outbreak_timestep = outbreak_timestep
76
+ self.outbreak_demographic_coverage = outbreak_demographic_coverage
77
+ self.outbreak_ignore_immunity = outbreak_ignore_immunity
78
+ self.other_config_params = other_config_params
79
+
80
+ def create_person_callback(self, mcw, age, gender):
81
+ new_id = gi.create((gender, age, mcw))
82
+ person = Persion(mcw, age, gender, new_id)
83
+ if new_id in self.human_pop:
84
+ raise Exception(" individual {0} is already created.".format(new_id))
85
+ else:
86
+ self.human_pop[new_id] = person
87
+
88
+ def expose_callback(self, action, prob, individual_id):
89
+ random_draw = random.random()
90
+ if self.timestep == self.outbreak_timestep:
91
+ print(f"expose {individual_id} with outbreak.")
92
+ if random_draw < self.outbreak_demographic_coverage:
93
+ if self.outbreak_ignore_immunity:
94
+ print(f"Let's infect {individual_id} with outbreak ignore immunity.")
95
+ return 1
96
+ else:
97
+ return self.infect_base_on_immunity(individual_id)
98
+ else:
99
+ print(f"Let's NOT infect {individual_id} with outbreak.")
100
+ return 0
101
+ else:
102
+ print(f"expose {individual_id} with transmission.")
103
+ if random_draw < self.well_mixed_contagion_pool[-1]:
104
+ return self.infect_base_on_immunity(individual_id)
105
+ else:
106
+ print(f"Let's NOT infect {individual_id} based on random draw.")
107
+ return 0
108
+
109
+ def deposit_callback(self, contagion, individual):
110
+ self.well_mixed_contagion_pool[-1] += contagion
111
+ print(f"Depositing {contagion} contagion creates total of {self.well_mixed_contagion_pool[-1]}.")
112
+ return
113
+
114
+ def infect_base_on_immunity(self, individual_id):
115
+ random_draw = random.random()
116
+ if random_draw < gi.get_immunity(individual_id):
117
+ print(f"Let's infect {individual_id} base on immunity.")
118
+ return 1
119
+ else:
120
+ print(f"Let's NOT infect {individual_id} base on immunity.")
121
+ return 0
122
+
123
+ def run(self):
124
+ """
125
+ This is the method to run the SEIR model and generate output files.
126
+ """
127
+ print("\tWe cleared out human_pop. Should get populated via populate_from_files and callback...")
128
+ gi.reset()
129
+ nd.reset()
130
+ self.human_pop = {}
131
+
132
+ configure_simulation(initial_population=self.initial_population,
133
+ nd_template_filename=os.path.join(current_directory, self.config_template),
134
+ demo_template_filename=os.path.join(assets_dir, "templates/demographics_template.json"),
135
+ other_config_params=self.other_config_params)
136
+
137
+ # set callbacks
138
+ nd.set_callback(self.create_person_callback)
139
+ nd.populate_from_files()
140
+ gi.my_set_callback(self.expose_callback)
141
+ gi.set_deposit_callback(self.deposit_callback)
142
+
143
+ data = {Constant.SIMULATION_TIMESTEP: [],
144
+ Constant.hum_id: [],
145
+ Constant.is_infected: [],
146
+ Constant.infectiousness: [],
147
+ Constant.immunity: []
148
+ }
149
+ infected = dict()
150
+ for t in range(self.simulation_duration):
151
+ self.timestep = t
152
+ # logging.info("Updating individuals at timestep {0}.".format(t))
153
+ self.statistical_population.append(len(self.human_pop))
154
+ self.well_mixed_contagion_pool.append(0) # 100% decay at the end of every time step
155
+
156
+ # this is for shedding only
157
+ print("Updating individuals (shedding) at timestep {0}.".format(t))
158
+ for hum_id in self.human_pop:
159
+ nd.update_node_stats(
160
+ (1.0, 0.0, gi.is_possible_mother(hum_id), 0)) # mcw, infectiousness, is_poss_mom, is_infected
161
+ gi.update1(hum_id) # this should do shedding
162
+
163
+ # Normalize contagion
164
+ if self.well_mixed_contagion_pool[-1] > 0:
165
+ self.well_mixed_contagion_pool[-1] /= len(self.human_pop)
166
+ print("well_mixed_contagion_pool = {0}.".format(self.well_mixed_contagion_pool[-1]))
167
+
168
+ print("Updating individuals (exposing) at timestep {0}.".format(t))
169
+ self.num_infected.append(0)
170
+ self.num_new_infections.append(0)
171
+ for hum_id in list(self.human_pop.keys()): # avoid "RuntimeError: dictionary changed size during iteration"
172
+ gi.update2(hum_id) # this should do exposure & vital-dynamics(turn off in this example)
173
+ # Collect individual level data for every time step
174
+ data[Constant.SIMULATION_TIMESTEP].append(t)
175
+ data[Constant.hum_id].append(hum_id)
176
+ data[Constant.is_infected].append(gi.is_infected(hum_id))
177
+ data[Constant.infectiousness].append(round(gi.get_infectiousness(hum_id), 6))
178
+ data[Constant.immunity].append(gi.get_immunity(hum_id))
179
+ if gi.is_infected(hum_id):
180
+ self.num_infected[-1] += 1
181
+ if hum_id not in infected or not infected[hum_id]:
182
+ self.num_new_infections[-1] += 1
183
+ infected[hum_id] = 1
184
+ else:
185
+ infected[hum_id] = 0
186
+
187
+ print(f"num_infected = {self.num_infected[-1]}.")
188
+ # End of one timestep
189
+
190
+ # save individual level and node level data
191
+ self.individual_df = pd.DataFrame.from_dict(data)
192
+ self.individual_df.index.name = "index"
193
+
194
+ self.node_df = pd.DataFrame.from_dict({Constant.STATISTICAL_POPULATION: self.statistical_population,
195
+ Constant.CONTAGION: self.well_mixed_contagion_pool,
196
+ Constant.NUM_INFECTED: self.num_infected,
197
+ Constant.NUM_NEW_INFECTIONS: self.num_new_infections})
198
+ self.node_df.index.name = "TimeStep"
199
+
200
+ if self.individual_df.empty or self.node_df.empty:
201
+ print("BAD: Simulation data is empty.")
202
+
203
+ print("writing result:")
204
+ self.write()
205
+ print("Simulation exits.")
206
+ pass
207
+
208
+ def write(self, output_path="output", node_filename="node.csv", individual_filename="individual.csv"):
209
+ output_path = os.path.join(current_directory, output_path)
210
+ if not os.path.exists(output_path):
211
+ print(f"making output folder {output_path}")
212
+ os.makedirs(output_path)
213
+
214
+ with open(os.path.join(output_path, individual_filename), 'w') as individual_file:
215
+ print(f"writing to {os.path.join(output_path, individual_filename)} now")
216
+ self.individual_df.to_csv(individual_file, line_terminator="\n")
217
+
218
+ with open(os.path.join(output_path, node_filename), 'w') as node_file:
219
+ print(f"writing to {os.path.join(output_path, node_filename)} now")
220
+ self.node_df.to_csv(node_file, line_terminator="\n")
221
+
222
+
223
+ if __name__ == "__main__":
224
+ # pathname = os.path.dirname(".")
225
+ # file = os.path.join(pathname, 'config.json')
226
+ # if os.path.isfile(file):
227
+ # config = utils.readJson(file)
228
+ # print('configfile:', config)
229
+ # parameters = config["parameters"]
230
+
231
+ # execute only if run as a script
232
+ import argparse
233
+ parser = argparse.ArgumentParser()
234
+ parser.add_argument('-c', '--config', default="config.json", help="config file name(default to config.json)")
235
+ parser.add_argument('-d', '--duration', default=10, help="simulation duration(number of time steps)(default to 10)")
236
+ parser.add_argument('-p', '--population', default=1000, help="number of initial population(default to 100)")
237
+ parser.add_argument('-o', '--outbreak', default=0, help="outbreak time step(default to 0)")
238
+ parser.add_argument('-b', '--outbreak_coverage', default=0.01,
239
+ help="demographic coverage for outbreak(default to 0.1)")
240
+ parser.add_argument('-i', '--outbreak_ignore_immunity',
241
+ help="if outbreak ignore the immunity status(default to True)", action='store_true')
242
+ args = parser.parse_args()
243
+
244
+ model = SEIR(config_template=args.config,
245
+ simulation_duration=int(args.duration),
246
+ initial_population=int(args.population),
247
+ outbreak_timestep=int(args.outbreak),
248
+ outbreak_demographic_coverage=float(args.outbreak_coverage),
249
+ outbreak_ignore_immunity=args.outbreak_ignore_immunity)
250
+ model.run()
251
+ # The local platform needs to know the resulting status of a work item. We provide it through a return code
252
+ sys.exit(0)
@@ -0,0 +1,242 @@
1
+ import os
2
+ import random
3
+ import sys
4
+
5
+ assets_dir = os.path.abspath(os.path.dirname(__file__))
6
+ current_directory = os.getcwd()
7
+ p_version = sys.version_info
8
+ CURRENT_DIRECTORY = os.path.dirname(__file__)
9
+ LIBRARY_PATH = os.path.join(CURRENT_DIRECTORY, 'site-packages') # Need to site-packages level!!!
10
+
11
+ sys.path.insert(0, LIBRARY_PATH) # Very Important!
12
+ import pandas as pd # noqa
13
+ import dtk_nodedemog as nd # noqa
14
+ import dtk_generic_intrahost as gi # noqa
15
+ from config_sim import configure_simulation # noqa
16
+
17
+
18
+ class Constant():
19
+ SIMULATION_TIMESTEP = "SIMULATION_TIMESTEP"
20
+ STATISTICAL_POPULATION = "STATISTICAL_POPULATION"
21
+ CONTAGION = "CONTAGION"
22
+ NUM_INFECTED = "NUM_INFECTED"
23
+ NUM_NEW_INFECTIONS = "NUM_NEW_INFECTIONS"
24
+ hum_id = "individual_id"
25
+ is_infected = "is_infected"
26
+ infections = "infections"
27
+ infectiousness = "infectiousness"
28
+ immunity = "immunity"
29
+
30
+
31
+ class Persion():
32
+ def __init__(self, mcw, age, gender, id):
33
+ self.mcw = mcw
34
+ self.age = age
35
+ self.sex = gender
36
+ self.id = id
37
+
38
+
39
+ # Write a SEIR class using dtk_nodedemog and dtk_generic_intrahost
40
+ class SEIR():
41
+ def __init__(self, config_template="config.json", simulation_duration=10, initial_population=1000,
42
+ outbreak_timestep=0, outbreak_demographic_coverage=0.01, outbreak_ignore_immunity=True,
43
+ other_config_params: dict = None):
44
+ """
45
+ Define a SEIR model with the following parameeters
46
+ :param config_template: template file for configuration
47
+ :param simulation_duration: number of time step for one simulation
48
+ :param initial_population: number of initial population
49
+ :param outbreak_timestep: the day to start distributing the outbreak
50
+ :param outbreak_demographic_coverage: the fraction of individuals that will receive the outbreak
51
+ :param outbreak_ignore_immunity: iIndividuals will be force-infected regardless of actual immunity level when set to true
52
+ :param other_config_params: other parameter/value pairs in config.json
53
+ """
54
+ self.human_pop = {} # dictionary of individual objects at run time
55
+ self.well_mixed_contagion_pool = []
56
+ self.statistical_population = []
57
+ self.num_infected = []
58
+ self.num_new_infections = []
59
+ self.individual_df = None
60
+ self.node_df = None
61
+ self.timestep = 0
62
+ self.config_template = config_template
63
+ self.simulation_duration = simulation_duration
64
+ self.initial_population = initial_population
65
+ self.outbreak_timestep = outbreak_timestep
66
+ self.outbreak_demographic_coverage = outbreak_demographic_coverage
67
+ self.outbreak_ignore_immunity = outbreak_ignore_immunity
68
+ self.other_config_params = other_config_params
69
+
70
+ def create_person_callback(self, mcw, age, gender):
71
+ new_id = gi.create((gender, age, mcw))
72
+ person = Persion(mcw, age, gender, new_id)
73
+ if new_id in self.human_pop:
74
+ raise Exception(" individual {0} is already created.".format(new_id))
75
+ else:
76
+ self.human_pop[new_id] = person
77
+
78
+ def expose_callback(self, action, prob, individual_id):
79
+ random_draw = random.random()
80
+ if self.timestep == self.outbreak_timestep:
81
+ print(f"expose {individual_id} with outbreak.")
82
+ if random_draw < self.outbreak_demographic_coverage:
83
+ if self.outbreak_ignore_immunity:
84
+ print(f"Let's infect {individual_id} with outbreak ignore immunity.")
85
+ return 1
86
+ else:
87
+ return self.infect_base_on_immunity(individual_id)
88
+ else:
89
+ print(f"Let's NOT infect {individual_id} with outbreak.")
90
+ return 0
91
+ else:
92
+ print(f"expose {individual_id} with transmission.")
93
+ if random_draw < self.well_mixed_contagion_pool[-1]:
94
+ return self.infect_base_on_immunity(individual_id)
95
+ else:
96
+ print(f"Let's NOT infect {individual_id} based on random draw.")
97
+ return 0
98
+
99
+ def deposit_callback(self, contagion, individual):
100
+ self.well_mixed_contagion_pool[-1] += contagion
101
+ print(f"Depositing {contagion} contagion creates total of {self.well_mixed_contagion_pool[-1]}.")
102
+ return
103
+
104
+ def infect_base_on_immunity(self, individual_id):
105
+ random_draw = random.random()
106
+ if random_draw < gi.get_immunity(individual_id):
107
+ print(f"Let's infect {individual_id} base on immunity.")
108
+ return 1
109
+ else:
110
+ print(f"Let's NOT infect {individual_id} base on immunity.")
111
+ return 0
112
+
113
+ def run(self):
114
+ """
115
+ This is the method to run the SEIR model and generate output files.
116
+ """
117
+ print("\tWe cleared out human_pop. Should get populated via populate_from_files and callback...")
118
+ gi.reset()
119
+ nd.reset()
120
+ self.human_pop = {}
121
+
122
+ configure_simulation(initial_population=self.initial_population,
123
+ nd_template_filename=os.path.join(current_directory, self.config_template),
124
+ demo_template_filename=os.path.join(assets_dir, "templates/demographics_template.json"),
125
+ other_config_params=self.other_config_params)
126
+
127
+ # set callbacks
128
+ nd.set_callback(self.create_person_callback)
129
+ nd.populate_from_files()
130
+ gi.my_set_callback(self.expose_callback)
131
+ gi.set_deposit_callback(self.deposit_callback)
132
+
133
+ data = {Constant.SIMULATION_TIMESTEP: [],
134
+ Constant.hum_id: [],
135
+ Constant.is_infected: [],
136
+ Constant.infectiousness: [],
137
+ Constant.immunity: []
138
+ }
139
+ infected = dict()
140
+ for t in range(self.simulation_duration):
141
+ self.timestep = t
142
+ # logging.info("Updating individuals at timestep {0}.".format(t))
143
+ self.statistical_population.append(len(self.human_pop))
144
+ self.well_mixed_contagion_pool.append(0) # 100% decay at the end of every time step
145
+
146
+ # this is for shedding only
147
+ print("Updating individuals (shedding) at timestep {0}.".format(t))
148
+ for hum_id in self.human_pop:
149
+ nd.update_node_stats(
150
+ (1.0, 0.0, gi.is_possible_mother(hum_id), 0)) # mcw, infectiousness, is_poss_mom, is_infected
151
+ gi.update1(hum_id) # this should do shedding
152
+
153
+ # Normalize contagion
154
+ if self.well_mixed_contagion_pool[-1] > 0:
155
+ self.well_mixed_contagion_pool[-1] /= len(self.human_pop)
156
+ print("well_mixed_contagion_pool = {0}.".format(self.well_mixed_contagion_pool[-1]))
157
+
158
+ print("Updating individuals (exposing) at timestep {0}.".format(t))
159
+ self.num_infected.append(0)
160
+ self.num_new_infections.append(0)
161
+ for hum_id in list(self.human_pop.keys()): # avoid "RuntimeError: dictionary changed size during iteration"
162
+ gi.update2(hum_id) # this should do exposure & vital-dynamics(turn off in this example)
163
+ # Collect individual level data for every time step
164
+ data[Constant.SIMULATION_TIMESTEP].append(t)
165
+ data[Constant.hum_id].append(hum_id)
166
+ data[Constant.is_infected].append(gi.is_infected(hum_id))
167
+ data[Constant.infectiousness].append(round(gi.get_infectiousness(hum_id), 6))
168
+ data[Constant.immunity].append(gi.get_immunity(hum_id))
169
+ if gi.is_infected(hum_id):
170
+ self.num_infected[-1] += 1
171
+ if hum_id not in infected or not infected[hum_id]:
172
+ self.num_new_infections[-1] += 1
173
+ infected[hum_id] = 1
174
+ else:
175
+ infected[hum_id] = 0
176
+
177
+ print(f"num_infected = {self.num_infected[-1]}.")
178
+ # End of one timestep
179
+
180
+ # save individual level and node level data
181
+ self.individual_df = pd.DataFrame.from_dict(data)
182
+ self.individual_df.index.name = "index"
183
+
184
+ self.node_df = pd.DataFrame.from_dict({Constant.STATISTICAL_POPULATION: self.statistical_population,
185
+ Constant.CONTAGION: self.well_mixed_contagion_pool,
186
+ Constant.NUM_INFECTED: self.num_infected,
187
+ Constant.NUM_NEW_INFECTIONS: self.num_new_infections})
188
+ self.node_df.index.name = "TimeStep"
189
+
190
+ if self.individual_df.empty or self.node_df.empty:
191
+ print("BAD: Simulation data is empty.")
192
+
193
+ print("writing result:")
194
+ self.write()
195
+ print("Simulation exits.")
196
+ pass
197
+
198
+ def write(self, output_path="output", node_filename="node.csv", individual_filename="individual.csv"):
199
+ output_path = os.path.join(current_directory, output_path)
200
+ if not os.path.exists(output_path):
201
+ print(f"making output folder {output_path}")
202
+ os.makedirs(output_path)
203
+
204
+ with open(os.path.join(output_path, individual_filename), 'w') as individual_file:
205
+ print(f"writing to {os.path.join(output_path, individual_filename)} now")
206
+ self.individual_df.to_csv(individual_file, line_terminator="\n")
207
+
208
+ with open(os.path.join(output_path, node_filename), 'w') as node_file:
209
+ print(f"writing to {os.path.join(output_path, node_filename)} now")
210
+ self.node_df.to_csv(node_file, line_terminator="\n")
211
+
212
+
213
+ if __name__ == "__main__":
214
+ # pathname = os.path.dirname(".")
215
+ # file = os.path.join(pathname, 'config.json')
216
+ # if os.path.isfile(file):
217
+ # config = utils.readJson(file)
218
+ # print('configfile:', config)
219
+ # parameters = config["parameters"]
220
+
221
+ # execute only if run as a script
222
+ import argparse
223
+ parser = argparse.ArgumentParser()
224
+ parser.add_argument('-c', '--config', default="config.json", help="config file name(default to config.json)")
225
+ parser.add_argument('-d', '--duration', default=10, help="simulation duration(number of time steps)(default to 10)")
226
+ parser.add_argument('-p', '--population', default=1000, help="number of initial population(default to 100)")
227
+ parser.add_argument('-o', '--outbreak', default=0, help="outbreak time step(default to 0)")
228
+ parser.add_argument('-b', '--outbreak_coverage', default=0.01,
229
+ help="demographic coverage for outbreak(default to 0.1)")
230
+ parser.add_argument('-i', '--outbreak_ignore_immunity',
231
+ help="if outbreak ignore the immunity status(default to True)", action='store_true')
232
+ args = parser.parse_args()
233
+
234
+ model = SEIR(config_template=args.config,
235
+ simulation_duration=int(args.duration),
236
+ initial_population=int(args.population),
237
+ outbreak_timestep=int(args.outbreak),
238
+ outbreak_demographic_coverage=float(args.outbreak_coverage),
239
+ outbreak_ignore_immunity=args.outbreak_ignore_immunity)
240
+ model.run()
241
+ # The local platform needs to know the resulting status of a work item. We provide it through a return code
242
+ sys.exit(0)
@@ -0,0 +1,48 @@
1
+ import json
2
+
3
+
4
+ class DemographicsParameters():
5
+ Nodes = "Nodes"
6
+ NodeAttributes = "NodeAttributes"
7
+ InitialPopulation = "InitialPopulation"
8
+
9
+
10
+ def get_json_template(json_filename="demographics_template.json"):
11
+ with open(json_filename) as infile:
12
+ j_file_obj = json.load(infile)
13
+ return j_file_obj
14
+
15
+
16
+ def set_demographics_file(demographics, demo_filename="demographics.json"):
17
+ with open(demo_filename, 'w') as outfile:
18
+ json.dump(demographics, outfile, indent=4, sort_keys=True)
19
+
20
+
21
+ def set_config_file(config, config_filename="nd.json"):
22
+ with open(config_filename, 'w') as outfile:
23
+ json.dump(config, outfile, indent=4, sort_keys=True)
24
+
25
+
26
+ def set_gi_file(config, gi_filename="gi.json"):
27
+ with open(gi_filename, 'w') as outfile:
28
+ json.dump(config, outfile, indent=4, sort_keys=True)
29
+
30
+
31
+ def configure_simulation(initial_population,
32
+ nd_template_filename,
33
+ demo_template_filename,
34
+ other_config_params: dict = None):
35
+ print("configure demographics.json.\n")
36
+ demographics = get_json_template(json_filename=demo_template_filename)
37
+ demographics[DemographicsParameters.Nodes][0][DemographicsParameters.NodeAttributes][
38
+ DemographicsParameters.InitialPopulation] = initial_population
39
+ set_demographics_file(demographics)
40
+
41
+ print("configure nd.json and gi.json.\n")
42
+ config = get_json_template(json_filename=nd_template_filename)
43
+
44
+ if other_config_params:
45
+ for param in other_config_params:
46
+ config[param] = other_config_params[param]
47
+ set_config_file(config)
48
+ set_gi_file(config)
@@ -0,0 +1,133 @@
1
+ # Example of two csv analyzers to concatenate csv results into one csv and plot the data from your experiment
2
+
3
+ # First, import some necessary system and idmtools packages.
4
+ import os
5
+ from sys import platform
6
+ import matplotlib
7
+ import matplotlib.pyplot as plt
8
+ import pandas as pd
9
+ import numpy as np
10
+ from idmtools.entities import IAnalyzer
11
+ if platform == "linux" or platform == "linux2":
12
+ print('Linux OS. Using non-interactive Agg backend')
13
+ matplotlib.use('Agg')
14
+
15
+
16
+ # Create a class for a base CSV analyzer
17
+ class CSVAnalyzer(IAnalyzer):
18
+ def __init__(self, filenames, uid=None, working_dir=None, parse=True):
19
+ super().__init__(uid, working_dir, parse, filenames=filenames)
20
+ # Raise exception early if files are not csv files
21
+ if not all(['csv' in os.path.splitext(f)[1].lower() for f in self.filenames]):
22
+ raise Exception('Please ensure all filenames provided to CSVAnalyzer have a csv extension.')
23
+
24
+
25
+ # Create a class for node level analyzer
26
+ class NodeCSVAnalyzer(CSVAnalyzer):
27
+ def __init__(self, filenames):
28
+ super().__init__(filenames=filenames)
29
+
30
+ # Map is called to get for each simulation a data object (all the metadata of the simulations) and simulation object
31
+ def map(self, data, simulation):
32
+ # If there are 1 to many csv files, concatenate csv data columns into one dataframe
33
+ # filter the data by filenames (data could contains data/dataframe from other analyzers within the same AnalyzerManager)
34
+ concatenated_df = pd.concat([data[filename] for filename in self.filenames],
35
+ axis=0, ignore_index=True, sort=True)
36
+ return concatenated_df
37
+
38
+ # In reduce, we are printing and plotting the simulation and result data filtered in map
39
+ def reduce(self, all_data):
40
+ # Let's hope the first simulation is representative
41
+ first_sim = next(iter(all_data.keys())) # Iterate over the dataframe keys
42
+ exp_id = str(first_sim.experiment.uid) # Get the exp id from the first sim data
43
+
44
+ results = pd.concat(list(all_data.values()), axis=0, # Combine a list of all the sims csv data column values
45
+ keys=[str(k.uid) for k in all_data.keys()], # Add a hierarchical index with the keys option
46
+ names=['SimId']) # Label the index keys you create with the names option
47
+
48
+ results.index = results.index.droplevel(1) # Remove default index
49
+
50
+ # Make a directory labeled the exp id to write the csv results to
51
+ os.makedirs(exp_id, exist_ok=True)
52
+ # NOTE: If running twice with different filename, the output files will collide
53
+ results.to_csv(os.path.join(exp_id, self.__class__.__name__ + '.csv'))
54
+
55
+ channels = results.drop(['TimeStep'], axis=1).columns.tolist()
56
+ sims = results.index.unique().to_list()
57
+ # Create the sub plots
58
+ ncol = int(len(channels) / 2)
59
+ nrow = int(np.ceil(float(len(channels)) / ncol))
60
+ figsize = (max(10, min(10, 8 * ncol)), min(10, 6 * nrow))
61
+ fig, axs = plt.subplots(figsize=figsize, nrows=nrow, ncols=ncol, sharex=True)
62
+ flat_axes = [axs] if ncol * nrow == 1 else axs.flat
63
+
64
+ # Plot
65
+ for channel, ax in zip(channels, flat_axes):
66
+ ax.set_title(channel)
67
+ ax.set_xlabel("TimeStep")
68
+ for sim in sims:
69
+ ax.plot(results[results.index == sim]['TimeStep'], results[results.index == sim][channel])
70
+
71
+ # Create the legend
72
+ sims_label = [str(sim).split('-')[0] for sim in sims]
73
+ plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), fancybox=True, shadow=True, ncol=5,
74
+ fontsize='xx-small', labels=sims_label)
75
+
76
+ # Save the figure
77
+ plt.savefig(os.path.join(exp_id, self.__class__.__name__ + '.png'))
78
+
79
+
80
+ # Create a class for individual level analyzer
81
+ class InfectiousnessCSVAnalyzer(CSVAnalyzer):
82
+ def __init__(self, filenames, channel="infectiousness", filter_by='is_infected'):
83
+ super().__init__(filenames=filenames)
84
+ self.channel = channel
85
+ self.filter_by = filter_by
86
+
87
+ # Map is called to get for each simulation a data object (all the metadata of the simulations) and simulation object
88
+ def map(self, data, simulation):
89
+ # Filter by filenames in this analyzer
90
+ my_data = [data[filename] for filename in self.filenames]
91
+ # If there are 1 to many csv files, concatenate csv data columns into one dataframe
92
+ # Collect the infectiousness data when is_infected is True only
93
+ concatenated_df = pd.concat([channel_data[channel_data[self.filter_by] == 1][self.channel] for
94
+ channel_data in my_data],
95
+ axis=0, ignore_index=True, sort=True)
96
+ return concatenated_df
97
+
98
+ # In reduce, we are printing and plotting the simulation and result data filtered in map
99
+ def reduce(self, all_data):
100
+ # Let's hope the first simulation is representative
101
+ first_sim = next(iter(all_data.keys())) # Iterate over the dataframe keys
102
+ exp_id = str(first_sim.experiment.uid) # Set the exp id from the first sim data
103
+
104
+ keys = []
105
+ for k in all_data.keys():
106
+ if 'simulation_name_tag' in k.tags:
107
+ k.tags.pop('simulation_name_tag')
108
+ keys.append(str(k.tags))
109
+ results = pd.concat(list(all_data.values()), axis=0, # Combine a list of all the sims csv data column values
110
+ keys=keys, # Add a hierarchical index with the keys option
111
+ names=['SimTags']) # Using Simulation tags as the keys
112
+ results.index = results.index.droplevel(1) # Remove default index
113
+
114
+ # Make a directory labeled the exp id to write the csv results to
115
+ os.makedirs(exp_id, exist_ok=True)
116
+ # NOTE: If running twice with different filename, the output files will collide
117
+ results.to_csv(os.path.join(exp_id, self.__class__.__name__ + '.csv'))
118
+
119
+ sims = results.index.unique().to_list()
120
+ fig, ax = plt.subplots()
121
+
122
+ # Plot
123
+ for sim in sims:
124
+ # Filter data by sim tags and then generate the kernel density estimation plot
125
+ results[results.index == sim].plot.kde(bw_method='scott', ax=ax, label=str(sim))
126
+ # sns.distplot() would generate a prettier kde plot but seaborn is not a required library in idmtools.
127
+ # import seaborn as sns
128
+ # sns.distplot(results[results.index==sim], kde=True, hist=False, ax=ax, label=str(sim))
129
+ plt.legend(loc=0, fontsize='xx-small')
130
+ ax.set_title(self.channel + "(kde)")
131
+ ax.set_ylim(bottom=0)
132
+ # Save the figure
133
+ plt.savefig(os.path.join(exp_id, self.__class__.__name__ + '.png'))
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env bash
2
+ export PYTHONPATH=$PWD/Assets/site-packages
3
+ echo $PYTHONPATH
4
+ python3 Assets/SEIR_model.py