deepboard 0.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. deepboard-0.0.0/MANIFEST.in +1 -0
  2. deepboard-0.0.0/PKG-INFO +162 -0
  3. deepboard-0.0.0/README.md +128 -0
  4. deepboard-0.0.0/deepboard/__init__.py +1 -0
  5. deepboard-0.0.0/deepboard/__version__.py +1 -0
  6. deepboard-0.0.0/deepboard/gui/__init__.py +0 -0
  7. deepboard-0.0.0/deepboard/gui/assets/base.css +168 -0
  8. deepboard-0.0.0/deepboard/gui/assets/base.js +77 -0
  9. deepboard-0.0.0/deepboard/gui/assets/charts.css +188 -0
  10. deepboard-0.0.0/deepboard/gui/assets/compare.css +90 -0
  11. deepboard-0.0.0/deepboard/gui/assets/datagrid.css +117 -0
  12. deepboard-0.0.0/deepboard/gui/assets/fileview.css +13 -0
  13. deepboard-0.0.0/deepboard/gui/assets/right_panel.css +222 -0
  14. deepboard-0.0.0/deepboard/gui/assets/theme.css +76 -0
  15. deepboard-0.0.0/deepboard/gui/components/__init__.py +3 -0
  16. deepboard-0.0.0/deepboard/gui/components/chart_type.py +22 -0
  17. deepboard-0.0.0/deepboard/gui/components/legend.py +34 -0
  18. deepboard-0.0.0/deepboard/gui/components/smoother.py +21 -0
  19. deepboard-0.0.0/deepboard/gui/entry.py +14 -0
  20. deepboard-0.0.0/deepboard/gui/main.py +84 -0
  21. deepboard-0.0.0/deepboard/gui/pages/__init__.py +1 -0
  22. deepboard-0.0.0/deepboard/gui/pages/compare_page/__init__.py +6 -0
  23. deepboard-0.0.0/deepboard/gui/pages/compare_page/compare_page.py +22 -0
  24. deepboard-0.0.0/deepboard/gui/pages/compare_page/components/__init__.py +4 -0
  25. deepboard-0.0.0/deepboard/gui/pages/compare_page/components/card_list.py +19 -0
  26. deepboard-0.0.0/deepboard/gui/pages/compare_page/components/chart.py +54 -0
  27. deepboard-0.0.0/deepboard/gui/pages/compare_page/components/compare_setup.py +29 -0
  28. deepboard-0.0.0/deepboard/gui/pages/compare_page/components/split_card.py +50 -0
  29. deepboard-0.0.0/deepboard/gui/pages/compare_page/components/utils.py +20 -0
  30. deepboard-0.0.0/deepboard/gui/pages/compare_page/routes.py +50 -0
  31. deepboard-0.0.0/deepboard/gui/pages/main_page/__init__.py +4 -0
  32. deepboard-0.0.0/deepboard/gui/pages/main_page/datagrid/__init__.py +5 -0
  33. deepboard-0.0.0/deepboard/gui/pages/main_page/datagrid/compare_button.py +21 -0
  34. deepboard-0.0.0/deepboard/gui/pages/main_page/datagrid/datagrid.py +59 -0
  35. deepboard-0.0.0/deepboard/gui/pages/main_page/datagrid/handlers.py +54 -0
  36. deepboard-0.0.0/deepboard/gui/pages/main_page/datagrid/header.py +43 -0
  37. deepboard-0.0.0/deepboard/gui/pages/main_page/datagrid/routes.py +112 -0
  38. deepboard-0.0.0/deepboard/gui/pages/main_page/datagrid/row.py +20 -0
  39. deepboard-0.0.0/deepboard/gui/pages/main_page/datagrid/sortable_column_js.py +45 -0
  40. deepboard-0.0.0/deepboard/gui/pages/main_page/datagrid/utils.py +9 -0
  41. deepboard-0.0.0/deepboard/gui/pages/main_page/handlers.py +14 -0
  42. deepboard-0.0.0/deepboard/gui/pages/main_page/main_page.py +17 -0
  43. deepboard-0.0.0/deepboard/gui/pages/main_page/right_panel/__init__.py +8 -0
  44. deepboard-0.0.0/deepboard/gui/pages/main_page/right_panel/config.py +38 -0
  45. deepboard-0.0.0/deepboard/gui/pages/main_page/right_panel/hparams.py +25 -0
  46. deepboard-0.0.0/deepboard/gui/pages/main_page/right_panel/run_info.py +88 -0
  47. deepboard-0.0.0/deepboard/gui/pages/main_page/right_panel/scalars.py +228 -0
  48. deepboard-0.0.0/deepboard/gui/pages/main_page/right_panel/template.py +86 -0
  49. deepboard-0.0.0/deepboard/gui/pages/main_page/routes.py +16 -0
  50. deepboard-0.0.0/deepboard/gui/pages/not_found.py +3 -0
  51. deepboard-0.0.0/deepboard/gui/utils.py +240 -0
  52. deepboard-0.0.0/deepboard/resultTable/__init__.py +2 -0
  53. deepboard-0.0.0/deepboard/resultTable/cursor.py +20 -0
  54. deepboard-0.0.0/deepboard/resultTable/logwritter.py +321 -0
  55. deepboard-0.0.0/deepboard/resultTable/resultTable.py +479 -0
  56. deepboard-0.0.0/deepboard/resultTable/scalar.py +29 -0
  57. deepboard-0.0.0/deepboard/resultTable/table_schema.py +183 -0
  58. deepboard-0.0.0/deepboard/resultTable/utils.py +50 -0
  59. deepboard-0.0.0/deepboard.egg-info/PKG-INFO +162 -0
  60. deepboard-0.0.0/deepboard.egg-info/SOURCES.txt +64 -0
  61. deepboard-0.0.0/deepboard.egg-info/dependency_links.txt +1 -0
  62. deepboard-0.0.0/deepboard.egg-info/entry_points.txt +2 -0
  63. deepboard-0.0.0/deepboard.egg-info/requires.txt +4 -0
  64. deepboard-0.0.0/deepboard.egg-info/top_level.txt +1 -0
  65. deepboard-0.0.0/setup.cfg +4 -0
  66. deepboard-0.0.0/setup.py +53 -0
@@ -0,0 +1 @@
1
+ recursive-include deepboard *.css *.js *.html
@@ -0,0 +1,162 @@
1
+ Metadata-Version: 2.4
2
+ Name: deepboard
3
+ Version: 0.0.0
4
+ Summary: A tool to log you experiment results and explore them in a gui
5
+ Home-page: https://github.com/anthol42/deepboard
6
+ Author: Anthony Lavertu
7
+ Author-email: alavertu2@gmail.com
8
+ Project-URL: Issues, https://github.com/anthol42/deepboard/issues
9
+ Keywords: deepboard,deep,board,pytorch,torch,tensorflow,jax,tensorboard
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Operating System :: POSIX :: Linux
12
+ Classifier: Operating System :: MacOS
13
+ Classifier: Development Status :: 4 - Beta
14
+ Classifier: Intended Audience :: Science/Research
15
+ Classifier: Intended Audience :: Education
16
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
+ Requires-Python: >=3.9
18
+ Description-Content-Type: text/markdown
19
+ Requires-Dist: python-fasthtml
20
+ Requires-Dist: fh-plotly
21
+ Requires-Dist: MarkupSafe
22
+ Requires-Dist: pandas
23
+ Dynamic: author
24
+ Dynamic: author-email
25
+ Dynamic: classifier
26
+ Dynamic: description
27
+ Dynamic: description-content-type
28
+ Dynamic: home-page
29
+ Dynamic: keywords
30
+ Dynamic: project-url
31
+ Dynamic: requires-dist
32
+ Dynamic: requires-python
33
+ Dynamic: summary
34
+
35
+ # Deepboard
36
+ This package include two modules that are work together:
37
+ `deepboard gui` and `resultTable`. The `resultTable` module
38
+ keeps track of all of your experiment and helps you organize
39
+ your code to make results reproducible. The `deepboard gui` module
40
+ implement a webUI to visualize the training details and training
41
+ curves of any runs. In addition, it lets you commpare training curves
42
+ between runs. You can even download the charts that you have generated:)
43
+ ## 🔥 Screenshots 🔥
44
+ ![](assets/main_view.png)
45
+ ![](assets/compare_view.png)
46
+ ## 🌟 Project Philosophy
47
+ Before diving in, it’s important to understand the philosophy behind this project. In deep learning, it’s easy to get
48
+ swept up in the excitement — experimenting with countless configurations in search of the perfect setup. 🔬✨
49
+ Eventually, we stumble upon something that works well... only to keep tweaking and lose track of what actually worked
50
+ best. This package is built to help you stay focused, organized, and efficient — so you never lose track of that perfect
51
+ combination again. 🧠✅
52
+
53
+ The idea is simple: always make your code reproducible!
54
+ Sure, easier said than done... 😅 My recommended approach is to use a multi-level configuration system. Let me explain
55
+ how it works! 👇
56
+
57
+ Before jumping into experiments, we usually know the minimal set of parameters required for a project to run.
58
+ For instance, if you're training a Transformer model, you already know you'll need to specify things like the number of
59
+ layers, number of attention heads, learning rate, and so on. All these known parameters can (and should) be stored in a
60
+ configuration file — I personally prefer using YAML for its readability. 📄 When running the experiment, we simply load
61
+ this config file and use it to parameterize each part of the code. Usually, the parameters stored in the config gives
62
+ us the baseline.
63
+
64
+ Once we’ve established a baseline, it’s natural to want to improve it — whether it's by testing out a new technique from
65
+ a paper or an idea that came to us in a dream. 🚀 But here's the challenge: how do we add new functionality to our code
66
+ without breaking compatibility with earlier runs? In other words, if we use the same config file and script parameters,
67
+ we should still get the exact same results as before. My solution? Add new parameters to functions with sensible
68
+ default values — specifically, defaults that reflect the original behavior. You can then include these parameters in
69
+ your configuration file and toggle them on or off to test their effect. For example, say you’re building an image
70
+ classifier and want to try `MixUp`. Your training function might look like this:
71
+ ```python
72
+ def train_model(..., use_mixup: bool = False):
73
+ ...
74
+ ```
75
+ By setting the default to False, your baseline run remains intact. Only when `use_mixup` is explicitly set to True will
76
+ the new logic kick in. This approach ensures clean, reproducible experimentation with minimal disruption. ✅
77
+
78
+ Sometimes, we don’t want to modify the configuration file directly — for example, when we've decided that a particular
79
+ config represents a fixed setup for a specific model or training strategy.
80
+ In these cases, it's often more convenient to override a few parameters via the command line. 🧪
81
+ To do this, I use Python’s built-in argparse module. It adds an extra layer of configuration that’s ideal for quick
82
+ experiments — without changing the original YAML file. And just like before, the same principle applies: always use
83
+ default values that reproduce the results of previous runs. This ensures your experiments stay flexible and reproducible. 🔁
84
+
85
+ This project promotes a simple but powerful principle: make your deep learning experiments reproducible — without
86
+ slowing down iteration or innovation. To achieve that, it recommends a multi-level configuration system:
87
+ 1. YAML Configuration Files – Store all known parameters for a clean, reproducible baseline. 📄
88
+ 2. Function Defaults – Add new features with default values that preserve past behavior. This ensures that re-running
89
+ with the same config and cli parameters always gives the same result. ✅
90
+ 3. CLI Overrides – For quick tweaks, use cli parameters to add new functionalities or to override config's parameters
91
+ without editing the base config. Perfect for fast experimentation. 🧪
92
+
93
+ This layered setup keeps your workflow organized, traceable, and easy to extend, so you can explore new ideas without
94
+ losing sight of what actually works. 🔁
95
+
96
+ If you're feeling a bit overwhelmed or would like a project example, the
97
+ [torchbuilder](https://github.com/anthol42/torchbuilder/tree/dev) app can generate various project templates. The
98
+ default template implements this philosophy, including the resultTable, making it a great starting point! 🚀
99
+
100
+ ## 🛠️ Installation
101
+ ```shell
102
+ pip install deepboard
103
+ ```
104
+
105
+ ## 🚀 How to Use
106
+ For your project, you will only need the `resultTable` module, as the `deepboard` module is primarily for the UI.
107
+
108
+ ### ResultTable
109
+ First, import the `ResultTable` class from `deepboard.resultTable`, then create a new run. You can also create a debug run.
110
+ A **debug run** will be logged in the result table like any other run, but all results will be overwritten by the next
111
+ debug run. This helps keep the result table clean by containing only the runs you intend to test, rather than those
112
+ meant solely for verifying if the code executed correctly.
113
+
114
+ Note: **Debug runs always have a runID of -1.** 🔧
115
+ ```python
116
+ from deepboard.resultTable import ResultTable
117
+
118
+ rtable = ResultTable("results/resultTable.db")
119
+ if DEBUG:
120
+ resultSocket = rtable.new_debug_run("Experiment1", "path/to/config", cli=vars(args).copy())
121
+ else:
122
+ resultSocket = rtable.new_run("Experiment1", "path/to/config", cli=vars(args).copy())
123
+ ```
124
+
125
+ Next, you can specify hyperparameters that will appear in the table
126
+ ```python
127
+ resultSocket.add_hparams(
128
+ lr=config["training"]["learning_rate"],
129
+ wd=...,
130
+ min_lr=...,
131
+ dropout2d=...,
132
+ dropout=...
133
+ )
134
+ ```
135
+
136
+ During training, we can log scalars associated to the run with:
137
+ ```python
138
+ resultSocket.add_scalar(f'Train/Accuracy', 0.99, step)
139
+ ```
140
+
141
+ Finally, you can log the final evaluation results that will be included into the table with:
142
+ ```python
143
+ resultSocket.write_result(accuracy=final_accuracy, crossEntropy=final_loss)
144
+ ```
145
+
146
+ Note: If you want to do multiple training iterations of the same run (to test variance for example), you can call the
147
+ ```resultSocket.new_repetition``` method after each repetition.
148
+ ```python
149
+ for rep in range(number_of_repetitions):
150
+ for epoch in range(n_epochs):
151
+ ... # Train here
152
+ resultSocket.new_repetition()
153
+
154
+ # Finally, write the final results once:
155
+ resultSocket.write_result(accuracy=accuracies.mean(), crossEntropy=losses.mean())
156
+ ```
157
+
158
+ ### Deepboard UI
159
+ To launch deepboard Web UI, simply run the command `deepboard` in your terminal with the path to your resultTable db:
160
+ ```shell
161
+ deepboard /path/to/resultTable.db
162
+ ```
@@ -0,0 +1,128 @@
1
+ # Deepboard
2
+ This package include two modules that are work together:
3
+ `deepboard gui` and `resultTable`. The `resultTable` module
4
+ keeps track of all of your experiment and helps you organize
5
+ your code to make results reproducible. The `deepboard gui` module
6
+ implement a webUI to visualize the training details and training
7
+ curves of any runs. In addition, it lets you commpare training curves
8
+ between runs. You can even download the charts that you have generated:)
9
+ ## 🔥 Screenshots 🔥
10
+ ![](assets/main_view.png)
11
+ ![](assets/compare_view.png)
12
+ ## 🌟 Project Philosophy
13
+ Before diving in, it’s important to understand the philosophy behind this project. In deep learning, it’s easy to get
14
+ swept up in the excitement — experimenting with countless configurations in search of the perfect setup. 🔬✨
15
+ Eventually, we stumble upon something that works well... only to keep tweaking and lose track of what actually worked
16
+ best. This package is built to help you stay focused, organized, and efficient — so you never lose track of that perfect
17
+ combination again. 🧠✅
18
+
19
+ The idea is simple: always make your code reproducible!
20
+ Sure, easier said than done... 😅 My recommended approach is to use a multi-level configuration system. Let me explain
21
+ how it works! 👇
22
+
23
+ Before jumping into experiments, we usually know the minimal set of parameters required for a project to run.
24
+ For instance, if you're training a Transformer model, you already know you'll need to specify things like the number of
25
+ layers, number of attention heads, learning rate, and so on. All these known parameters can (and should) be stored in a
26
+ configuration file — I personally prefer using YAML for its readability. 📄 When running the experiment, we simply load
27
+ this config file and use it to parameterize each part of the code. Usually, the parameters stored in the config gives
28
+ us the baseline.
29
+
30
+ Once we’ve established a baseline, it’s natural to want to improve it — whether it's by testing out a new technique from
31
+ a paper or an idea that came to us in a dream. 🚀 But here's the challenge: how do we add new functionality to our code
32
+ without breaking compatibility with earlier runs? In other words, if we use the same config file and script parameters,
33
+ we should still get the exact same results as before. My solution? Add new parameters to functions with sensible
34
+ default values — specifically, defaults that reflect the original behavior. You can then include these parameters in
35
+ your configuration file and toggle them on or off to test their effect. For example, say you’re building an image
36
+ classifier and want to try `MixUp`. Your training function might look like this:
37
+ ```python
38
+ def train_model(..., use_mixup: bool = False):
39
+ ...
40
+ ```
41
+ By setting the default to False, your baseline run remains intact. Only when `use_mixup` is explicitly set to True will
42
+ the new logic kick in. This approach ensures clean, reproducible experimentation with minimal disruption. ✅
43
+
44
+ Sometimes, we don’t want to modify the configuration file directly — for example, when we've decided that a particular
45
+ config represents a fixed setup for a specific model or training strategy.
46
+ In these cases, it's often more convenient to override a few parameters via the command line. 🧪
47
+ To do this, I use Python’s built-in argparse module. It adds an extra layer of configuration that’s ideal for quick
48
+ experiments — without changing the original YAML file. And just like before, the same principle applies: always use
49
+ default values that reproduce the results of previous runs. This ensures your experiments stay flexible and reproducible. 🔁
50
+
51
+ This project promotes a simple but powerful principle: make your deep learning experiments reproducible — without
52
+ slowing down iteration or innovation. To achieve that, it recommends a multi-level configuration system:
53
+ 1. YAML Configuration Files – Store all known parameters for a clean, reproducible baseline. 📄
54
+ 2. Function Defaults – Add new features with default values that preserve past behavior. This ensures that re-running
55
+ with the same config and cli parameters always gives the same result. ✅
56
+ 3. CLI Overrides – For quick tweaks, use cli parameters to add new functionalities or to override config's parameters
57
+ without editing the base config. Perfect for fast experimentation. 🧪
58
+
59
+ This layered setup keeps your workflow organized, traceable, and easy to extend, so you can explore new ideas without
60
+ losing sight of what actually works. 🔁
61
+
62
+ If you're feeling a bit overwhelmed or would like a project example, the
63
+ [torchbuilder](https://github.com/anthol42/torchbuilder/tree/dev) app can generate various project templates. The
64
+ default template implements this philosophy, including the resultTable, making it a great starting point! 🚀
65
+
66
+ ## 🛠️ Installation
67
+ ```shell
68
+ pip install deepboard
69
+ ```
70
+
71
+ ## 🚀 How to Use
72
+ For your project, you will only need the `resultTable` module, as the `deepboard` module is primarily for the UI.
73
+
74
+ ### ResultTable
75
+ First, import the `ResultTable` class from `deepboard.resultTable`, then create a new run. You can also create a debug run.
76
+ A **debug run** will be logged in the result table like any other run, but all results will be overwritten by the next
77
+ debug run. This helps keep the result table clean by containing only the runs you intend to test, rather than those
78
+ meant solely for verifying if the code executed correctly.
79
+
80
+ Note: **Debug runs always have a runID of -1.** 🔧
81
+ ```python
82
+ from deepboard.resultTable import ResultTable
83
+
84
+ rtable = ResultTable("results/resultTable.db")
85
+ if DEBUG:
86
+ resultSocket = rtable.new_debug_run("Experiment1", "path/to/config", cli=vars(args).copy())
87
+ else:
88
+ resultSocket = rtable.new_run("Experiment1", "path/to/config", cli=vars(args).copy())
89
+ ```
90
+
91
+ Next, you can specify hyperparameters that will appear in the table
92
+ ```python
93
+ resultSocket.add_hparams(
94
+ lr=config["training"]["learning_rate"],
95
+ wd=...,
96
+ min_lr=...,
97
+ dropout2d=...,
98
+ dropout=...
99
+ )
100
+ ```
101
+
102
+ During training, we can log scalars associated to the run with:
103
+ ```python
104
+ resultSocket.add_scalar(f'Train/Accuracy', 0.99, step)
105
+ ```
106
+
107
+ Finally, you can log the final evaluation results that will be included into the table with:
108
+ ```python
109
+ resultSocket.write_result(accuracy=final_accuracy, crossEntropy=final_loss)
110
+ ```
111
+
112
+ Note: If you want to do multiple training iterations of the same run (to test variance for example), you can call the
113
+ ```resultSocket.new_repetition``` method after each repetition.
114
+ ```python
115
+ for rep in range(number_of_repetitions):
116
+ for epoch in range(n_epochs):
117
+ ... # Train here
118
+ resultSocket.new_repetition()
119
+
120
+ # Finally, write the final results once:
121
+ resultSocket.write_result(accuracy=accuracies.mean(), crossEntropy=losses.mean())
122
+ ```
123
+
124
+ ### Deepboard UI
125
+ To launch deepboard Web UI, simply run the command `deepboard` in your terminal with the path to your resultTable db:
126
+ ```shell
127
+ deepboard /path/to/resultTable.db
128
+ ```
@@ -0,0 +1 @@
1
+ from .__version__ import __version__
@@ -0,0 +1 @@
1
+ __version__ = "0.0.0"
File without changes
@@ -0,0 +1,168 @@
1
+ @import url('theme.css');
2
+ body {
3
+ margin: 0;
4
+ font-family: system-ui, sans-serif;
5
+ background-color:var(--background-color);
6
+ color: var(--text_color);
7
+ overflow-y: hidden;
8
+ overflow-x: hidden;
9
+ }
10
+
11
+ .container {
12
+ display: flex;
13
+ }
14
+
15
+
16
+ /*Menu when you right click*/
17
+ #custom-menu {
18
+ position: absolute;
19
+ background-color: var(--menu-bg);
20
+ border: var(--menu-bg);
21
+ border-radius: 6px;
22
+ padding: 0;
23
+ display: flex;
24
+ flex-direction: column;
25
+ z-index: 1000;
26
+ box-shadow: var(--menu-shadow);
27
+ visibility: hidden;
28
+ }
29
+
30
+ .dropdown-menu {
31
+ background-color: var(--menu-bg);
32
+ color: var(--text_color);
33
+ padding: 0;
34
+ width: 200px;
35
+ font-family: sans-serif;
36
+ margin: 0;
37
+ list-style: none;
38
+ }
39
+
40
+ .menu-item {
41
+ padding: 0.5rem;
42
+ cursor: pointer;
43
+ position: relative;
44
+ white-space: nowrap;
45
+ }
46
+
47
+ .menu-item:hover {
48
+ background-color: var(--menu-bg-hover);
49
+ }
50
+
51
+ .has-submenu-wrapper {
52
+ position: relative;
53
+ }
54
+ .has-submenu {
55
+ text-decoration: none;
56
+ color: inherit;
57
+ display: block;
58
+ }
59
+ .submenu {
60
+ display: none;
61
+ position: absolute;
62
+ top: 0;
63
+ left: 100%;
64
+ background-color: var(--menu-bg);
65
+ border-radius: 6px;
66
+ padding: 0;
67
+ list-style: none;
68
+ min-width: 150px;
69
+ z-index: 1001;
70
+ margin-left: 0;
71
+ max-height: 40vh;
72
+ overflow-y: scroll;
73
+ }
74
+
75
+ .has-submenu-wrapper:hover .submenu {
76
+ display: block;
77
+ }
78
+
79
+ .submenu .menu-item:hover {
80
+ background-color: var(--menu-bg-hover);
81
+ }
82
+
83
+
84
+ .copy-container {
85
+ position: relative;
86
+ display: inline-flex;
87
+ align-items: center;
88
+ gap: 8px;
89
+ cursor: pointer;
90
+ padding: 0;
91
+ }
92
+
93
+ .copy-icon-container {
94
+ position: relative;
95
+ width: 1em; /* match icon size */
96
+ height: 1em;
97
+ display: inline-block;
98
+ }
99
+
100
+ /* Overlap icons inside wrapper */
101
+ .copy-icon {
102
+ position: absolute;
103
+ top: 0;
104
+ left: 0;
105
+ opacity: 0;
106
+ visibility: hidden;
107
+ transition: opacity 0.2s ease;
108
+ pointer-events: none; /* so they don't block clicks */
109
+ }
110
+
111
+ .copy-container:hover .default-icon {
112
+ visibility: visible;
113
+ opacity: 1;
114
+ }
115
+
116
+ .copy-container.copied .default-icon {
117
+ display: none;
118
+ }
119
+
120
+ .copy-container.copied .check-icon {
121
+ visibility: visible;
122
+ opacity: 1;
123
+ }
124
+
125
+ .align-right {
126
+ display: flex;
127
+ justify-content: flex-end;
128
+ }
129
+
130
+
131
+ /*Compare Button*/
132
+ .compare-button-container {
133
+ width: 100%;
134
+ display: flex;
135
+ justify-content: flex-end; /* aligns content to the right horizontally */
136
+ }
137
+
138
+ .compare-button {
139
+ margin-top: 0.5rem;
140
+ display: inline-flex;
141
+ align-items: center;
142
+ justify-content: center;
143
+ padding: 8px 32px;
144
+ font-size: 1.1rem;
145
+ font-weight: 600;
146
+ color: var(--text_color);
147
+ background-color: var(--primary-color); /* Material purple */
148
+ border: none;
149
+ border-radius: 8px;
150
+ box-shadow: 0 2px 6px var(--button-shadow-color);
151
+ cursor: pointer;
152
+ }
153
+ .compare-button:hover {
154
+ background-color: var(--primary-color-hover); /* darker on hover */
155
+ box-shadow: 0 2px 10px var(--button-shadow-color);
156
+ }
157
+
158
+ .compare-button:active {
159
+ box-shadow: 0 2px 5px var(--button-shadow-color);
160
+ }
161
+
162
+ .center-center{
163
+ width: 100%;
164
+ height: 100vh;
165
+ display: flex;
166
+ justify-content: center; /* Horizontal centering */
167
+ align-items: center; /* Vertical centering */
168
+ }
@@ -0,0 +1,77 @@
1
+ window.addEventListener("contextmenu", function(e) {
2
+ // Get the target element (the element that was clicked)
3
+ let element = e.target;
4
+
5
+ e.preventDefault(); // Prevent the default browser context menu
6
+ const ids = [];
7
+
8
+ while (element) {
9
+ if (element.id) {
10
+ ids.push(element.id);
11
+ }
12
+ element = element.parentElement;
13
+ }
14
+
15
+ // You can pass this information to your HTMX request
16
+ const menu = document.getElementById('custom-menu');
17
+ menu.style.top = `${e.clientY}px`;
18
+ menu.style.left = `${e.clientX}px`;
19
+
20
+ // Trigger HTMX request to load the menu content
21
+ // Join ids with ,
22
+ str_ids = ids.join(",")
23
+ htmx.ajax('GET', `/get-context-menu?elementIds=${str_ids}&top=${e.clientY}&left=${e.clientX}`, {
24
+ target: '#custom-menu',
25
+ swap: 'outerHTML', // Correct usage of swap attribute
26
+ headers: {
27
+ 'HX-Swap-OOB': 'true' // Use correct OOB header for out-of-band swaps
28
+ }
29
+ });
30
+ });
31
+
32
+ // Hide the menu when clicking elsewhere
33
+ window.addEventListener("click", () => {
34
+ const menu = document.getElementById('custom-menu');
35
+ menu.style.visibility = "hidden";
36
+ });
37
+
38
+
39
+ function copyToClipboard(container) {
40
+ const text = container.querySelector('.copy-text').innerText;
41
+
42
+ navigator.clipboard.writeText(text).then(() => {
43
+ container.classList.add('copied');
44
+ setTimeout(() => {
45
+ container.classList.remove('copied');
46
+ }, 1200);
47
+ });
48
+ }
49
+
50
+
51
+ function shiftClickDataGrid(event){
52
+ const el = event.target.closest('.table-row');
53
+ if (!el) return; // Not one of ours
54
+ if (event.ctrlKey || event.metaKey) {
55
+ const originalUrl = el.getAttribute('hx-get'); // e.g. "/default-endpoint?runID=3"
56
+ const url = new URL(originalUrl, window.location.origin); // create full URL to parse
57
+ const params = url.search;
58
+
59
+ // Instead of modifying the attribute, trigger htmx manually with the new URL
60
+ htmx.ajax('GET', `/shift_click_row${params}`, {target: el.getAttribute('hx-target') || el});
61
+
62
+ // Prevent the original click handler from firing
63
+ event.preventDefault();
64
+ event.stopPropagation();
65
+ }
66
+ }
67
+ document.addEventListener('click', shiftClickDataGrid);
68
+
69
+ // New htmx event: open in a new tab when data-new-tab attribute is present
70
+ document.addEventListener('htmx:beforeOnLoad', function (event) {
71
+ const redirectUrl = event.detail.xhr.getResponseHeader('HX-Blank-Redirect');
72
+ if (redirectUrl && event.detail.elt.hasAttribute('data-new-tab')) {
73
+ // Prevent htmx from performing the redirect in the current tab
74
+ console.log("Here")
75
+ window.open(redirectUrl, '_blank');
76
+ }
77
+ });