defirl 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
defirl-0.0.1/LICENSE ADDED
@@ -0,0 +1,19 @@
1
+ Copyright (c) 2022 Nicolus Rotich
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ of this software and associated documentation files (the "Software"), to deal
5
+ in the Software without restriction, including without limitation the rights
6
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ copies of the Software, and to permit persons to whom the Software is
8
+ furnished to do so, subject to the following conditions:
9
+
10
+ The above copyright notice and this permission notice shall be included in all
11
+ copies or substantial portions of the Software.
12
+
13
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19
+ SOFTWARE.
@@ -0,0 +1,2 @@
1
+ graft src/defirl/*
2
+ recursive-include src/defirl *.npy
defirl-0.0.1/PKG-INFO ADDED
@@ -0,0 +1,32 @@
1
+ Metadata-Version: 2.1
2
+ Name: defirl
3
+ Version: 0.0.1
4
+ Summary: A reinforcement learning package for predicting buy and sell signals
5
+ Home-page: https://nkrtech.com
6
+ Author: Nicolus Rotich
7
+ Author-email: nicholas.rotich@gmail.com
8
+ License: MIT
9
+ Download-URL: https://github.com/moinonin/defirl/archive/refs/heads/main.zip
10
+ Platform: any
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Requires-Python: >=3.10
13
+ Description-Content-Type: text/markdown
14
+ Provides-Extra: dev
15
+ License-File: LICENSE
16
+
17
+ # NLP project
18
+ This is an attempt to build a reinforcement learning model for generating trading signals. It uses a limited vocabulary such as 'go-long', 'go-short' and 'do-nothing'.
19
+ ## Usage
20
+ Install the project with:
21
+ ```
22
+ pip install defirl
23
+ ```
24
+ Then:
25
+ ```
26
+ from defirl.defi import main as RLmodel
27
+ ```
28
+
29
+ # Warning
30
+ This is not financial advise. NLP project is entirely on its preliminary stages. Use it at your own risk.
31
+
32
+
defirl-0.0.1/README.md ADDED
@@ -0,0 +1,14 @@
1
+ # NLP project
2
+ This is an attempt to build a reinforcement learning model for generating trading signals. It uses a limited vocabulary such as 'go-long', 'go-short' and 'do-nothing'.
3
+ ## Usage
4
+ Install the project with:
5
+ ```
6
+ pip install defirl
7
+ ```
8
+ Then:
9
+ ```
10
+ from defirl.defi import main as RLmodel
11
+ ```
12
+
13
+ # Warning
14
+ This is not financial advise. NLP project is entirely on its preliminary stages. Use it at your own risk.
@@ -0,0 +1,8 @@
1
+ [build-system]
2
+ requires = [
3
+ "setuptools>=58.1.0",
4
+ "wheel>=0.37.1",
5
+ "scikit-learn>=1.4.0",
6
+ "fire"
7
+ ]
8
+ build-backend = "setuptools.build_meta"
defirl-0.0.1/setup.cfg ADDED
@@ -0,0 +1,40 @@
1
+ [metadata]
2
+ name = defirl # Change to your project name
3
+ version = 0.0.1
4
+ author = Nicolus Rotich
5
+ author_email = nicholas.rotich@gmail.com
6
+ description = A short description of your project
7
+ long_description = file: README.md
8
+ long_description_content_type = text/markdown
9
+ url = https://github.com/moinonin/defirl/
10
+ license = MIT
11
+ classifiers =
12
+ Programming Language :: Python :: 3
13
+ License :: OSI Approved :: MIT License
14
+ Operating System :: OS Independent
15
+
16
+ [options]
17
+ packages = find:
18
+ package_dir =
19
+ = src # The root directory for the packages is `src`
20
+ python_requires = >=3.10
21
+ install_requires =
22
+ pandas>=2.2.2
23
+ scikit-learn>=1.4.0
24
+ numpy>=1.26.4
25
+ fire>=0.4.0
26
+ setuptools>=58.1.0
27
+
28
+ [options.packages.find]
29
+ where = src # Look for packages in the `src` directory
30
+
31
+ [options.extras_require]
32
+ dev =
33
+ pytest
34
+ black
35
+ flake8
36
+
37
+ [egg_info]
38
+ tag_build =
39
+ tag_date = 0
40
+
defirl-0.0.1/setup.py ADDED
@@ -0,0 +1,28 @@
1
+ try:
2
+ from setuptools import setup, find_packages
3
+ except ImportError:
4
+ from distutils.core import setup, find_packages
5
+
6
+ setup(
7
+ name='defirl',
8
+ packages=find_packages(where='src'),
9
+ package_dir={'': 'src'},
10
+ include_package_data=True,
11
+ platforms='any',
12
+ version='0.0.1',
13
+ description='A reinforcement learning package for predicting buy and sell signals',
14
+ license='MIT',
15
+ author='Nicolus Rotich',
16
+ author_email='nicholas.rotich@gmail.com',
17
+ install_requires=[
18
+ "setuptools>=58.1.0",
19
+ "wheel>=0.37.1",
20
+ "scikit-learn>=1.4.0",
21
+ "fire"
22
+ ],
23
+ url='https://nkrtech.com',
24
+ download_url='https://github.com/moinonin/defirl/archive/refs/heads/main.zip',
25
+ classifiers=[
26
+ 'License :: OSI Approved :: MIT License',
27
+ ],
28
+ )
File without changes
Binary file
@@ -0,0 +1,119 @@
1
+ import pandas as pd
2
+ import numpy as np
3
+ from dataclasses import dataclass
4
+ import pickle
5
+ from pathlib import Path
6
+ import fire
7
+ from scipy.spatial import KDTree
8
+
9
+
10
+ @dataclass
11
+ class RLmodel:
12
+ opening: float
13
+ high: float
14
+ ema_26: float
15
+ ema_12: float
16
+ low: float
17
+ mean_grad_hist: float
18
+ close: float
19
+ volume: float
20
+ sma_25: float
21
+ long_jcrosk: float
22
+ short_kdj: int
23
+ sma_compare: int
24
+ is_short: int
25
+
26
+ model_file_path: str = f'{Path(__file__).resolve().parent}/q_table.npy'
27
+ state_index_file: str = f'{Path(__file__).resolve().parent}/state_to_index.npy'
28
+
29
+ def load_qtable(self):
30
+ with open(self.model_file_path, "rb") as f:
31
+ q_table = np.load(f)
32
+ return q_table
33
+
34
+ def load_state_index(self):
35
+ with open(self.state_index_file, "rb") as f:
36
+ state_to_index = np.load(f, allow_pickle=True).item()
37
+ return state_to_index
38
+
39
+ def load_action_mapping(self):
40
+ action_mapping = {"go_long": 0, "go_short": 1, "do_nothing": 2}
41
+ return action_mapping
42
+
43
+ def prep_state(self):
44
+ state = np.array([[self.opening, self.high, \
45
+ self.ema_26, self.ema_12, self.low, self.mean_grad_hist, \
46
+ self.close, self.volume, self.sma_25, self.long_jcrosk, \
47
+ self.short_kdj, self.sma_compare, self.is_short]]
48
+ )
49
+
50
+ # Check for NaN or Inf values in the state
51
+ if not np.all(np.isfinite(state)):
52
+ state = np.nan_to_num(state, nan=0.0, posinf=0.0, neginf=0.0)
53
+
54
+ return state
55
+
56
+ def predict_action(self):
57
+ state = self.prep_state()
58
+ loaded_qtable = self.load_qtable()
59
+ loaded_state_to_index = self.load_state_index()
60
+ loaded_mapping = self.load_action_mapping()
61
+
62
+
63
+ state_tuple = tuple(state.flatten())
64
+ state_index = loaded_state_to_index.get(state_tuple, -1)
65
+ if not state_index == -1:
66
+ try:
67
+ q_values = loaded_qtable[state_index]
68
+ except ValueError as e:
69
+ print(e)
70
+ else:
71
+ # Create a KDTree from the states in the loaded_state_to_index mapping
72
+ state_tuples = list(loaded_state_to_index.keys())
73
+ kdtree = KDTree(state_tuples)
74
+
75
+ # Find the nearest neighbor to the current state
76
+ distance, index = kdtree.query(state.flatten())
77
+ nearest_state_tuple = state_tuples[index]
78
+ new_state_index = loaded_state_to_index[nearest_state_tuple]
79
+ q_values = loaded_qtable[new_state_index]
80
+ #raise ValueError("State not found in the state index mapping.")
81
+ best_action_index = np.argmax(q_values)
82
+
83
+ action = [action for action, index in loaded_mapping.items() if index == best_action_index][0]
84
+
85
+ results_dict = {
86
+ "raw_state": state,
87
+ "state_tuple": state_tuple,
88
+ "best_action_index": best_action_index,
89
+ "action": action
90
+ }
91
+ return results_dict
92
+
93
+ def main(opening, high, ema_26, ema_12, low,
94
+ mean_grad_hist, close, volume, sma_25, long_jcrosk,
95
+ short_kdj, sma_compare, is_short
96
+ ) -> dict:
97
+ try:
98
+ rl_model = RLmodel(
99
+ opening,
100
+ high,
101
+ ema_26,
102
+ ema_12,
103
+ low,
104
+ mean_grad_hist,
105
+ close,
106
+ volume,
107
+ sma_25,
108
+ long_jcrosk,
109
+ short_kdj,
110
+ sma_compare,
111
+ is_short
112
+ )
113
+
114
+ return rl_model.predict_action().get("action")
115
+ except Exception as e:
116
+ print(e)
117
+
118
+ if __name__ == "__main__":
119
+ fire.Fire(main)
@@ -0,0 +1,32 @@
1
+ Metadata-Version: 2.1
2
+ Name: defirl
3
+ Version: 0.0.1
4
+ Summary: A reinforcement learning package for predicting buy and sell signals
5
+ Home-page: https://nkrtech.com
6
+ Author: Nicolus Rotich
7
+ Author-email: nicholas.rotich@gmail.com
8
+ License: MIT
9
+ Download-URL: https://github.com/moinonin/defirl/archive/refs/heads/main.zip
10
+ Platform: any
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Requires-Python: >=3.10
13
+ Description-Content-Type: text/markdown
14
+ Provides-Extra: dev
15
+ License-File: LICENSE
16
+
17
+ # NLP project
18
+ This is an attempt to build a reinforcement learning model for generating trading signals. It uses a limited vocabulary such as 'go-long', 'go-short' and 'do-nothing'.
19
+ ## Usage
20
+ Install the project with:
21
+ ```
22
+ pip install defirl
23
+ ```
24
+ Then:
25
+ ```
26
+ from defirl.defi import main as RLmodel
27
+ ```
28
+
29
+ # Warning
30
+ This is not financial advise. NLP project is entirely on its preliminary stages. Use it at your own risk.
31
+
32
+
@@ -0,0 +1,16 @@
1
+ LICENSE
2
+ MANIFEST.in
3
+ README.md
4
+ pyproject.toml
5
+ setup.cfg
6
+ setup.py
7
+ src/defirl/__init__.py
8
+ src/defirl/q_table.npy
9
+ src/defirl/rl.py
10
+ src/defirl/state_to_index.npy
11
+ src/defirl.egg-info/PKG-INFO
12
+ src/defirl.egg-info/SOURCES.txt
13
+ src/defirl.egg-info/dependency_links.txt
14
+ src/defirl.egg-info/requires.txt
15
+ src/defirl.egg-info/top_level.txt
16
+ src/defirl/__pycache__/rl.cpython-310.pyc
@@ -0,0 +1,9 @@
1
+ setuptools>=58.1.0
2
+ wheel>=0.37.1
3
+ scikit-learn>=1.4.0
4
+ fire
5
+
6
+ [dev]
7
+ pytest
8
+ black
9
+ flake8
@@ -0,0 +1 @@
1
+ defirl