metacountregressor 0.1.65__tar.gz → 0.1.67__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. metacountregressor-0.1.65/README.md → metacountregressor-0.1.67/PKG-INFO +14 -0
  2. metacountregressor-0.1.67/README.rst +357 -0
  3. {metacountregressor-0.1.65 → metacountregressor-0.1.67/metacountregressor.egg-info}/PKG-INFO +9 -16
  4. metacountregressor-0.1.67/metacountregressor.egg-info/SOURCES.txt +24 -0
  5. metacountregressor-0.1.67/metacountregressor.egg-info/dependency_links.txt +1 -0
  6. metacountregressor-0.1.67/metacountregressor.egg-info/not-zip-safe +1 -0
  7. metacountregressor-0.1.67/metacountregressor.egg-info/requires.txt +2 -0
  8. metacountregressor-0.1.67/metacountregressor.egg-info/top_level.txt +1 -0
  9. metacountregressor-0.1.67/setup.cfg +7 -0
  10. metacountregressor-0.1.67/setup.py +43 -0
  11. metacountregressor-0.1.67/tests/test.py +0 -0
  12. metacountregressor-0.1.65/metacountregressor/data/1848.csv +0 -1849
  13. metacountregressor-0.1.65/metacountregressor/data/4000.csv +0 -4746
  14. metacountregressor-0.1.65/metacountregressor/data/Copy of 190613_HV Crash Data 2007-2017 Dates.xlsx +0 -0
  15. metacountregressor-0.1.65/metacountregressor/data/Ex-16-3.csv +0 -276
  16. metacountregressor-0.1.65/metacountregressor/data/Ex-16-3variables.csv +0 -276
  17. metacountregressor-0.1.65/metacountregressor/data/Indiana_data.csv +0 -339
  18. metacountregressor-0.1.65/metacountregressor/data/MichiganData.csv +0 -33972
  19. metacountregressor-0.1.65/metacountregressor/data/Stage5A.csv +0 -1849
  20. metacountregressor-0.1.65/metacountregressor/data/Stage5A_1848_All_Initial_Columns.csv +0 -1849
  21. metacountregressor-0.1.65/metacountregressor/data/ThaiAccident.csv +0 -20230
  22. metacountregressor-0.1.65/metacountregressor/data/artificial_1h_mixed_corr_2023_MOOF.csv +0 -1001
  23. metacountregressor-0.1.65/metacountregressor/data/artificial_ZA.csv +0 -20001
  24. metacountregressor-0.1.65/metacountregressor/data/artificial_mixed_corr_2023_MOOF.csv +0 -2001
  25. metacountregressor-0.1.65/metacountregressor/data/artificial_mixed_corr_2023_MOOF_copy.csv +0 -2001
  26. metacountregressor-0.1.65/metacountregressor/data/latex_summary_output.tex +0 -2034
  27. metacountregressor-0.1.65/metacountregressor/data/rqc40516_MotorcycleQUT_engineer_crash.csv +0 -8287
  28. metacountregressor-0.1.65/metacountregressor/data/rural_int.csv +0 -37081
  29. metacountregressor-0.1.65/metacountregressor/data/sum_stats.R +0 -83
  30. metacountregressor-0.1.65/metacountregressor/data/summary_output.txt +0 -302
  31. metacountregressor-0.1.65/metacountregressor/plt_style.txt +0 -52
  32. metacountregressor-0.1.65/metacountregressor/requirements.txt +0 -16
  33. metacountregressor-0.1.65/metacountregressor/requirements_new.txt +0 -145
  34. metacountregressor-0.1.65/metacountregressor/set_data.csv +0 -8440
  35. metacountregressor-0.1.65/pyproject.toml +0 -25
  36. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/LICENSE.txt +0 -0
  37. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/__init__.py +0 -0
  38. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/_device_cust.py +0 -0
  39. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/halton.py +0 -0
  40. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/helperprocess.py +0 -0
  41. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/main.py +0 -0
  42. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/main_old.py +0 -0
  43. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/metaheuristics.py +0 -0
  44. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/pareto_file.py +0 -0
  45. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/pareto_logger__plot.py +0 -0
  46. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/setup.py +0 -0
  47. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/single_objective_finder.py +0 -0
  48. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/solution.py +0 -0
  49. {metacountregressor-0.1.65 → metacountregressor-0.1.67}/metacountregressor/test_generated_paper2.py +0 -0
@@ -1,3 +1,17 @@
1
+ Metadata-Version: 2.1
2
+ Name: metacountregressor
3
+ Version: 0.1.67
4
+ Summary: Extensions for a Python package for estimation of count models.
5
+ Home-page: https://github.com/zahern/CountDataEstimation
6
+ Author: Zeke Ahern
7
+ Author-email: zeke.ahern@hdr.qut.edu.au
8
+ License: QUT
9
+ Requires-Python: >=3.10
10
+ Description-Content-Type: text/markdown
11
+ License-File: LICENSE.txt
12
+ Requires-Dist: numpy>=1.13.1
13
+ Requires-Dist: scipy>=1.0.0
14
+
1
15
  <div style="display: flex; align-items: center;">
2
16
  <img src="https://github.com/zahern/data/raw/main/m.png" alt="My Image" style="width: 200px; margin-right: 20px;">
3
17
  <p><span style="font-size: 60px;"><strong>MetaCountRegressor</strong></span></p>
@@ -0,0 +1,357 @@
1
+ .. container::
2
+
3
+ ::
4
+
5
+ <img src="https://github.com/zahern/data/raw/main/m.png" alt="My Image" style="width: 200px; margin-right: 20px;">
6
+ <p><span style="font-size: 60px;"><strong>MetaCountRegressor</strong></span></p>
7
+
8
+ Quick Setup
9
+ '''''''''''
10
+
11
+ The Below code demonstrates how to set up automatic optimization
12
+ assisted by the harmony search algorithm. References to the Differential
13
+ Evolution and Simulated Annealing has been mentioned (change
14
+ accordingly)
15
+
16
+ Quick install: Requires Python 3.10
17
+ -----------------------------------
18
+
19
+ Install ``metacountregressor`` using pip as follows:
20
+
21
+ \```bash pip install metacountregressor
22
+
23
+ .. code:: ipython3
24
+
25
+ import pandas as pd
26
+ import numpy as np
27
+ from metacountregressor.solution import ObjectiveFunction
28
+ from metacountregressor.metaheuristics import (harmony_search,
29
+ differential_evolution,
30
+ simulated_annealing)
31
+
32
+ Basic setup.
33
+ ^^^^^^^^^^^^
34
+
35
+ The initial setup involves reading in the data and selecting an
36
+ optimization algorithm. As the runtime progresses, new solutions will be
37
+ continually evaluated. Finally, at the end of the runtime, the best
38
+ solution will be identified and printed out. In the case of multiple
39
+ objectives all of the best solutions will be printed out that belong to
40
+ the Pareto frontier.
41
+
42
+ .. code:: ipython3
43
+
44
+ # Read data from CSV file
45
+ df = pd.read_csv(
46
+ "https://raw.githubusercontent.com/zahern/data/main/Ex-16-3.csv")
47
+ X = df
48
+ y = df['FREQ'] # Frequency of crashes
49
+ X['Offset'] = np.log(df['AADT']) # Explicitley define how to offset the data, no offset otherwise
50
+ # Drop Y, selected offset term and ID as there are no panels
51
+ X = df.drop(columns=['FREQ', 'ID', 'AADT'])
52
+
53
+ #some example argument, these are defualt so the following line is just for claritity. See the later agruments section for detials.
54
+ arguments = {'algorithm': 'hs', 'test_percentage': 0.15, 'test_complexity': 6, 'instance_number':1,
55
+ 'val_percentage':0.15, 'obj_1': 'bic', '_obj_2': 'RMSE_TEST', "MAX_TIME": 6}
56
+ # Fit the model with metacountregressor
57
+ obj_fun = ObjectiveFunction(X, y, **arguments)
58
+ #replace with other metaheuristics if desired
59
+ results = harmony_search(obj_fun)
60
+
61
+
62
+
63
+ Arguments to feed into the Objective Function:
64
+ ----------------------------------------------
65
+
66
+ Note: Please Consider the main arguments to change.
67
+
68
+ - ``algorithm``: This parameter has multiple choices for the algorithm,
69
+ such as �hs�, �sa�, and �de�. Only one choice should be defined as a
70
+ string value.
71
+ - ``test_percentage``: This parameter represents the percentage of data
72
+ used for in-sample prediction of the model. The value 0.15
73
+ corresponds to 15% of the data.
74
+ - ``val_percentage``: This parameter represents the percentage of data
75
+ used to validate the model. The value 0.15 corresponds to 15% of the
76
+ data.
77
+ - ``test_complexity``: This parameter defines the complexity level for
78
+ testing. The value 6 tests all complexities. Alternatively, you can
79
+ provide a list of numbers to consider different complexities. The
80
+ complexities are further explained later in this document.
81
+ - ``instance_number``: This parameter is used to give a name to the
82
+ outputs.
83
+ - ``obj_1``: This parameter has multiple choices for obj_1, such as
84
+ �bic�, �aic�, and �hqic�. Only one choice should be defined as a
85
+ string value.
86
+ - ``_obj_2``: This parameter has multiple choices for objective 2, such
87
+ as �RMSE_TEST�, �MSE_TEST�, and �MAE_TEST�.
88
+ - ``_max_time``: This parameter specifies the maximum number of seconds
89
+ for the total estimation before stopping.
90
+ - ``distribution``: This parameter is a list of distributions to
91
+ consider. Please select all of the available options and put them
92
+ into a list of valid options if you want to to consider the
93
+ distribution type for use when modellign with random parameters. The
94
+ valid options include: �Normal�, �LnNormal�, �Triangular�, and
95
+ �Uniform�.
96
+ - ``transformations``: This parameters is a list of transformations to
97
+ consider. Plesee select all of the available options and put them
98
+ into a list of valid options if you want to consider the
99
+ transformation type. The valid options include �Normal�, �LnNormal�,
100
+ �Triangular�, �Uniform�.
101
+ - ``method_ll``: This is a specificication on the type of solvers are
102
+ avilable to solve the lower level maximum likilihood objective. The
103
+ valid options include: �Normal�, �LnNormal�, �Triangular�, and
104
+ �Uniform�.
105
+
106
+ An Example of changing the arguments.
107
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
108
+
109
+ Modify the arguments according to your preferences using the commented
110
+ code as a guide.
111
+
112
+ .. code:: ipython3
113
+
114
+ #Solution Arguments
115
+ arguments = {
116
+ 'algorithm': 'hs', #alternatively input 'de', or 'sa'
117
+ 'is_multi': 1,
118
+ 'test_percentage': 0.2, # used in multi-objective optimisation only. Saves 20% of data for testing.
119
+ 'val_percenetage:': 0.2, # Saves 20% of data for testing.
120
+ 'test_complexity': 6, # Complexity level for testing (6 tests all) or a list to consider potential differences in complexity
121
+ 'instance_number': 'name', # used for creeating a named folder where your models are saved into from the directory
122
+ 'distribution': ['Normal', 'LnNormal', 'Triangular', 'Uniform'],
123
+ 'Model': [0,1], # or equivalently ['POS', 'NB']
124
+ 'transformations': ['no', 'sqrt', 'archsinh'],
125
+ 'method_ll': 'BFGS_2',
126
+ '_max_time': 10
127
+ }
128
+ obj_fun = ObjectiveFunction(X, y, **arguments)
129
+ results = harmony_search(obj_fun)
130
+
131
+ Initial Solution Configurement
132
+ ------------------------------
133
+
134
+ Listed below is an example of how to specify an initial solution within
135
+ the framework. This initial solution will be used to calculate the
136
+ fitness and considered in the objective-based search. However, as the
137
+ search progresses, different hypotheses may be proposed, and alternative
138
+ modeling components may completely replace the initial solution.
139
+
140
+ .. code:: ipython3
141
+
142
+ #Model Decisions, Specify for Intial Optimization
143
+ manual_fit_spec = {
144
+ 'fixed_terms': ['SINGLE', 'LENGTH'],
145
+ 'rdm_terms': ['AADT:normal'],
146
+ 'rdm_cor_terms': ['GRADEBR:uniform', 'CURVES:triangular'],
147
+ 'grouped_terms': [],
148
+ 'hetro_in_means': ['ACCESS:normal', 'MINRAD:normal'],
149
+ 'transformations': ['no', 'no', 'log', 'no', 'no', 'no', 'no'],
150
+ 'dispersion': 1
151
+ }
152
+ #Search Arguments
153
+ arguments = {
154
+ 'algorithm': 'hs',
155
+ 'test_percentage': 0.2,
156
+ 'test_complexity': 6,
157
+ 'instance_number': 'name',
158
+ 'Manual_Fit': manual_fit_spec
159
+ }
160
+ obj_fun = ObjectiveFunction(X, y, **arguments)
161
+
162
+ simarly to return the results feed the objective function into a
163
+ metaheuristic solution algorithm. An example of this is provided below:
164
+
165
+ .. code:: ipython3
166
+
167
+ results = harmony_search(obj_fun)
168
+ print(results)
169
+
170
+ Notes:
171
+ ------
172
+
173
+ Capabilities of the software include:
174
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
175
+
176
+ - Handling of Panel Data
177
+ - Support for Data Transformations
178
+ - Implementation of Models with Correlated and Non-Correlated Random
179
+ Parameters
180
+ - A variety of mixing distributions for parameter estimations,
181
+ including normal, lognormal, truncated normal, Lindley, Gamma,
182
+ triangular, and uniform distributions Capability to handle
183
+ heterogeneity in the means of the random parameters
184
+ - Use of Halton draws for simulated maximum likelihood estimation
185
+ - Support for grouped random parameters with unbalanced groups
186
+ - Post-estimation tools for assessing goodness of fit, making
187
+ predictions, and conducting out-of-sample validation
188
+ - Multiple parameter optimization routines, such as the BFGS method
189
+ - Comprehensive hypothesis testing using single objectives, such as
190
+ in-sample BIC and log-likelihood
191
+ - Extensive hypothesis testing using multiple objectives, such as
192
+ in-sample BIC and out-of-sample MAE (Mean Absolute Error), or
193
+ in-sample AIC and out-of-sample MSPE (mean-square prediction errorr)
194
+ - Features that allow analysts to pre-specify variables, interactions,
195
+ and mixing distributions, among others
196
+ - Meta-heuristic Guided Optimization, including techniques like
197
+ Simulated Annealing, Harmony Search, and Differential Evolution
198
+ - Customization of Hyper-parameters to solve problems tailored to your
199
+ dataset
200
+ - Out-of-the-box optimization capability using default metaheuristics
201
+
202
+ Intreting the output of the model:
203
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
204
+
205
+ A regression table is produced. The following text elements are
206
+ explained: - Std. Dev.: This column appears for effects that are related
207
+ to random paramters and displays the assument distributional assumption
208
+ next to it - Chol: This term refers to Cholesky decomposition element,
209
+ to show the correlation between two random paramaters. The combination
210
+ of the cholesky element on iyself is equivalent to a normal random
211
+ parameter. - hetro group #: This term represents the heterogeneity group
212
+ number, which refers all of the contributing factors that share
213
+ hetrogentiy in the means to each other under the same numbered value. -
214
+ :math:`\tau`: This column, displays the type of transformation that was
215
+ applied to the specific contributing factor in the data.
216
+
217
+ Arguments:
218
+ ----------
219
+
220
+ In reference to the arguments that can be fed into the solution alrogithm, a dictionary system is utilised with relecant names these include
221
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
222
+
223
+ The following list describes the arguments available in this function.
224
+ By default, all of the capabilities described are enabled unless
225
+ specified otherwise as an argument. For list arguments, include all
226
+ desired elements in the list to ensure the corresponding options are
227
+ considered. Example code will be provided later in this guide.
228
+
229
+ 1. **``complexity_level``**: This argument accepts an integer 1-6 or a
230
+ list based of integegers between 0 to 5 eg might be a possible
231
+ configuration [0, 2, 3]. Each integer represents a hierarchy level
232
+ for estimable models associated with each explanatory variable. Here
233
+ is a summary of the hierarchy:
234
+
235
+ - 0: Null model
236
+ - 1: Simple fixed effects model
237
+ - 2: Random parameters model
238
+ - 3: Random correlated parameters model
239
+ - 4: Grouped random parameters model
240
+ - 5: Heterogeneity in the means random parameter model
241
+
242
+ **Note:** For the grouped random parameters model, groupings need to
243
+ be defined prior to estimation. This can be achieved by including the
244
+ following key-value pair in the arguments of the
245
+ ``ObjectiveFunction``: ``'group': "Enter Column Grouping in data"``.
246
+ Replace ``"Enter Column Grouping in data"`` with the actual column
247
+ grouping in your dataset.
248
+
249
+ Similarly, for panel data, the panel column needs to be defined using
250
+ the key-value pair:
251
+ ``'panel': "enter column string covering panels"``. Replace
252
+ ``"enter column string covering panels"`` with the appropriate column
253
+ string that represents the panel information in your dataset.
254
+
255
+ 2. **``distributions``**: This argument accepts a list of strings where
256
+ each string corresponds to a distribution. Valid options include:
257
+
258
+ - �Normal�
259
+ - �Lindley�
260
+ - �Uniform�
261
+ - �LogNormal�
262
+ - �Triangular�
263
+ - �Gamma�
264
+ - �TruncatedNormal�
265
+ - Any of the above, concatenated with �:� (e.g., �Normal:grouped�;
266
+ requires a grouping term defined in the model)
267
+
268
+ 3. **``Model``**: This argument specifies the model form. It can be a
269
+ list of integers representing different models to test:
270
+
271
+ - 0: Poisson
272
+ - 1: Negative-Binomial
273
+ - 2: Generalized-Poisson
274
+
275
+ 4. **``transformations``**: This argument accepts a list of strings
276
+ representing available transformations within the framework. Valid
277
+ options include:
278
+
279
+ - �no�
280
+ - �square-root�
281
+ - �logarithmic�
282
+ - �archsinh�
283
+ - �as_factor�
284
+
285
+ 5. **``is_multi``**: This argument accepts an integer indicating whether
286
+ single or multiple objectives are to be tested (0 for single, 1 for
287
+ multiple).
288
+
289
+ 6. **``test_percentage``**: This argument is used for multi-objective
290
+ optimization. Define it as a decimal; for example, 0.2 represents 20%
291
+ of the data for testing.
292
+
293
+ 7. **``val_percentage``**: This argument saves data for validation.
294
+ Define it as a decimal; for example, 0.2 represents 20% of the data
295
+ for validation.
296
+
297
+ 8. **``_max_time``**: This argument is used to add a termination time in
298
+ the algorithm. It takes values as seconds. Note the time is only
299
+ dependenant on the time after intial population of solutions are
300
+ generated.
301
+
302
+ Example
303
+ =======
304
+
305
+ Let�s start by fitting very simple models, use those model sto help and
306
+ define the objectives, then perform more of an extensive search on the
307
+ variables that are identified more commonly
308
+
309
+ .. code:: ipython3
310
+
311
+ df = pd.read_csv(
312
+ "https://raw.githubusercontent.com/zahern/data/main/Ex-16-3.csv")
313
+ X = df
314
+ y = df['FREQ'] # Frequency of crashes
315
+ X['Offset'] = np.log(df['AADT']) # Explicitley define how to offset the data, no offset otherwise
316
+ # Drop Y, selected offset term and ID as there are no panels
317
+ X = df.drop(columns=['FREQ', 'ID', 'AADT'])
318
+
319
+ arguments = {
320
+ 'algorithm': 'hs', #alternatively input 'de', or 'sa'
321
+ 'is_multi': 1,
322
+ 'test_percentage': 0.2, # used in multi-objective optimisation only. Saves 20% of data for testing.
323
+ 'val_percentage:': 0.2, # Saves 20% of data for testing.
324
+ 'test_complexity': 3, # For Very simple Models
325
+ 'obj_1': 'BIC', '_obj_2': 'RMSE_TEST',
326
+ 'instance_number': 'name', # used for creeating a named folder where your models are saved into from the directory
327
+ 'distribution': ['Normal'],
328
+ 'Model': [0], # or equivalently ['POS', 'NB']
329
+ 'transformations': ['no', 'sqrt', 'archsinh'],
330
+ '_max_time': 10000
331
+ }
332
+ obj_fun = ObjectiveFunction(X, y, **arguments)
333
+
334
+ results = harmony_search(obj_fun)
335
+ print(results)
336
+
337
+ Contact
338
+ -------
339
+
340
+ If you have any questions, ideas to improve MetaCountRegressor, or want
341
+ to report a bug, just open a new issue in `GitHub
342
+ repository <https://github.com/zahern/CountDataEstimation>`__.
343
+
344
+ Citing MetaCountRegressor
345
+ -------------------------
346
+
347
+ Please cite MetaCountRegressor as follows:
348
+
349
+ Ahern, Z., Corry P., Paz A. (2023). MetaCountRegressor [Computer
350
+ software]. https://pypi.org/project/metacounregressor/
351
+
352
+ Or using BibTex as follows:
353
+
354
+ \```bibtex @misc{Ahern2023, author = {Zeke Ahern and Paul Corry and
355
+ Alexander Paz}, journal = {PyPi}, title = {metacountregressor · PyPI},
356
+ url = {https://pypi.org/project/metacountregressor/0.1.47/}, year =
357
+ {2023}, }
@@ -1,22 +1,16 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: metacountregressor
3
- Version: 0.1.65
4
- Summary: A python package for count regression of rare events assisted by metaheuristics
5
- Author: zahern
3
+ Version: 0.1.67
4
+ Summary: Extensions for a Python package for estimation of count models.
5
+ Home-page: https://github.com/zahern/CountDataEstimation
6
+ Author: Zeke Ahern
6
7
  Author-email: zeke.ahern@hdr.qut.edu.au
7
- Requires-Python: >=3.10,<3.11
8
- Classifier: Programming Language :: Python :: 3
9
- Classifier: Programming Language :: Python :: 3.10
10
- Requires-Dist: latextable (>=1.0.0,<2.0.0)
11
- Requires-Dist: matplotlib (>=3.7.1,<4.0.0)
12
- Requires-Dist: numpy (>=1.24.3,<2.0.0)
13
- Requires-Dist: pandas (>=2.0.2,<3.0.0)
14
- Requires-Dist: psutil (>=5.9.5,<6.0.0)
15
- Requires-Dist: scikit-learn (>=1.2.2,<2.0.0)
16
- Requires-Dist: scipy (>=1.10.1,<2.0.0)
17
- Requires-Dist: statsmodels (>=0.14.0,<0.15.0)
18
- Requires-Dist: tabulate (>=0.9.0,<0.10.0)
8
+ License: QUT
9
+ Requires-Python: >=3.10
19
10
  Description-Content-Type: text/markdown
11
+ License-File: LICENSE.txt
12
+ Requires-Dist: numpy>=1.13.1
13
+ Requires-Dist: scipy>=1.0.0
20
14
 
21
15
  <div style="display: flex; align-items: center;">
22
16
  <img src="https://github.com/zahern/data/raw/main/m.png" alt="My Image" style="width: 200px; margin-right: 20px;">
@@ -271,4 +265,3 @@ Or using BibTex as follows:
271
265
  year = {2023},
272
266
  }
273
267
 
274
-
@@ -0,0 +1,24 @@
1
+ LICENSE.txt
2
+ README.rst
3
+ setup.cfg
4
+ setup.py
5
+ metacountregressor/__init__.py
6
+ metacountregressor/_device_cust.py
7
+ metacountregressor/halton.py
8
+ metacountregressor/helperprocess.py
9
+ metacountregressor/main.py
10
+ metacountregressor/main_old.py
11
+ metacountregressor/metaheuristics.py
12
+ metacountregressor/pareto_file.py
13
+ metacountregressor/pareto_logger__plot.py
14
+ metacountregressor/setup.py
15
+ metacountregressor/single_objective_finder.py
16
+ metacountregressor/solution.py
17
+ metacountregressor/test_generated_paper2.py
18
+ metacountregressor.egg-info/PKG-INFO
19
+ metacountregressor.egg-info/SOURCES.txt
20
+ metacountregressor.egg-info/dependency_links.txt
21
+ metacountregressor.egg-info/not-zip-safe
22
+ metacountregressor.egg-info/requires.txt
23
+ metacountregressor.egg-info/top_level.txt
24
+ tests/test.py
@@ -0,0 +1,2 @@
1
+ numpy>=1.13.1
2
+ scipy>=1.0.0
@@ -0,0 +1 @@
1
+ metacountregressor
@@ -0,0 +1,7 @@
1
+ [metadata]
2
+ description-file = README.md
3
+
4
+ [egg_info]
5
+ tag_build =
6
+ tag_date = 0
7
+
@@ -0,0 +1,43 @@
1
+ import codecs
2
+
3
+ import setuptools
4
+
5
+ # Read the README.md file for the long description
6
+ with open('README.md', 'r', encoding='utf-8') as fh:
7
+ long_description = fh.read()
8
+
9
+ with open('version.txt', 'r') as f:
10
+ current_version = f.read().strip()
11
+
12
+ # Split the current version into its components
13
+ version_parts = current_version.split('.')
14
+ major, minor, patch = map(int, version_parts)
15
+
16
+ # Increment the patch version
17
+ patch += 1
18
+
19
+ # Construct the new version string
20
+ new_version = f"{major}.{minor}.{patch}"
21
+
22
+ # Write the new version number back to the file
23
+ with open('version.txt', 'w') as f:
24
+ f.write(new_version)
25
+
26
+ setuptools.setup(
27
+ name='metacountregressor',
28
+ version=new_version,
29
+ description='Extensions for a Python package for estimation of count models.',
30
+ long_description=long_description,
31
+ long_description_content_type='text/markdown', # Specify the content type as Markdown
32
+ url='https://github.com/zahern/CountDataEstimation',
33
+ author='Zeke Ahern',
34
+ author_email='zeke.ahern@hdr.qut.edu.au',
35
+ license='QUT',
36
+ packages=['metacountregressor'],
37
+ zip_safe=False,
38
+ python_requires='>=3.10',
39
+ install_requires=[
40
+ 'numpy>=1.13.1',
41
+ 'scipy>=1.0.0'
42
+ ]
43
+ )
File without changes