glam4cm 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. glam4cm/__init__.py +9 -0
  2. glam4cm/data_loading/__init__.py +0 -0
  3. glam4cm/data_loading/data.py +631 -0
  4. glam4cm/data_loading/encoding.py +76 -0
  5. glam4cm/data_loading/graph_dataset.py +940 -0
  6. glam4cm/data_loading/metadata.py +84 -0
  7. glam4cm/data_loading/models_dataset.py +361 -0
  8. glam4cm/data_loading/utils.py +20 -0
  9. glam4cm/downstream_tasks/__init__.py +0 -0
  10. glam4cm/downstream_tasks/bert_edge_classification.py +144 -0
  11. glam4cm/downstream_tasks/bert_graph_classification.py +137 -0
  12. glam4cm/downstream_tasks/bert_graph_classification_comp.py +156 -0
  13. glam4cm/downstream_tasks/bert_link_prediction.py +145 -0
  14. glam4cm/downstream_tasks/bert_node_classification.py +164 -0
  15. glam4cm/downstream_tasks/cm_gpt_edge_classification.py +73 -0
  16. glam4cm/downstream_tasks/cm_gpt_node_classification.py +76 -0
  17. glam4cm/downstream_tasks/cm_gpt_pretraining.py +64 -0
  18. glam4cm/downstream_tasks/common_args.py +160 -0
  19. glam4cm/downstream_tasks/create_dataset.py +51 -0
  20. glam4cm/downstream_tasks/gnn_edge_classification.py +106 -0
  21. glam4cm/downstream_tasks/gnn_graph_cls.py +101 -0
  22. glam4cm/downstream_tasks/gnn_link_prediction.py +109 -0
  23. glam4cm/downstream_tasks/gnn_node_classification.py +103 -0
  24. glam4cm/downstream_tasks/tf_idf_text_classification.py +22 -0
  25. glam4cm/downstream_tasks/utils.py +35 -0
  26. glam4cm/downstream_tasks/word2vec_text_classification.py +108 -0
  27. glam4cm/embeddings/__init__.py +0 -0
  28. glam4cm/embeddings/bert.py +72 -0
  29. glam4cm/embeddings/common.py +43 -0
  30. glam4cm/embeddings/fasttext.py +0 -0
  31. glam4cm/embeddings/tfidf.py +25 -0
  32. glam4cm/embeddings/w2v.py +41 -0
  33. glam4cm/encoding/__init__.py +0 -0
  34. glam4cm/encoding/common.py +0 -0
  35. glam4cm/encoding/encoders.py +100 -0
  36. glam4cm/graph2str/__init__.py +0 -0
  37. glam4cm/graph2str/common.py +34 -0
  38. glam4cm/graph2str/constants.py +15 -0
  39. glam4cm/graph2str/ontouml.py +141 -0
  40. glam4cm/graph2str/uml.py +0 -0
  41. glam4cm/lang2graph/__init__.py +0 -0
  42. glam4cm/lang2graph/archimate.py +31 -0
  43. glam4cm/lang2graph/bpmn.py +0 -0
  44. glam4cm/lang2graph/common.py +416 -0
  45. glam4cm/lang2graph/ecore.py +221 -0
  46. glam4cm/lang2graph/ontouml.py +169 -0
  47. glam4cm/lang2graph/utils.py +80 -0
  48. glam4cm/models/cmgpt.py +352 -0
  49. glam4cm/models/gnn_layers.py +273 -0
  50. glam4cm/models/hf.py +10 -0
  51. glam4cm/run.py +99 -0
  52. glam4cm/run_configs.py +126 -0
  53. glam4cm/settings.py +54 -0
  54. glam4cm/tokenization/__init__.py +0 -0
  55. glam4cm/tokenization/special_tokens.py +4 -0
  56. glam4cm/tokenization/utils.py +37 -0
  57. glam4cm/trainers/__init__.py +0 -0
  58. glam4cm/trainers/bert_classifier.py +105 -0
  59. glam4cm/trainers/cm_gpt_trainer.py +153 -0
  60. glam4cm/trainers/gnn_edge_classifier.py +126 -0
  61. glam4cm/trainers/gnn_graph_classifier.py +123 -0
  62. glam4cm/trainers/gnn_link_predictor.py +144 -0
  63. glam4cm/trainers/gnn_node_classifier.py +135 -0
  64. glam4cm/trainers/gnn_trainer.py +129 -0
  65. glam4cm/trainers/metrics.py +55 -0
  66. glam4cm/utils.py +194 -0
  67. glam4cm-0.1.0.dist-info/LICENSE +21 -0
  68. glam4cm-0.1.0.dist-info/METADATA +86 -0
  69. glam4cm-0.1.0.dist-info/RECORD +72 -0
  70. glam4cm-0.1.0.dist-info/WHEEL +5 -0
  71. glam4cm-0.1.0.dist-info/entry_points.txt +2 -0
  72. glam4cm-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,55 @@
1
+ from sklearn.metrics import (
2
+ accuracy_score,
3
+ balanced_accuracy_score,
4
+ f1_score,
5
+ precision_score,
6
+ recall_score
7
+ )
8
+
9
+ import torch
10
+ import numpy as np
11
+
12
+
13
+ def compute_metrics(p):
14
+ preds = np.argmax(p.predictions, axis=1)
15
+ acc = accuracy_score(p.label_ids, preds)
16
+ balanced_acc = balanced_accuracy_score(p.label_ids, preds)
17
+ return {
18
+ 'accuracy': acc,
19
+ 'balanced_accuracy': balanced_acc,
20
+ }
21
+
22
+
23
+ def compute_classification_metrics(preds, labels):
24
+ """
25
+ Compute F1-score, balanced accuracy, precision, and recall for multi-class classification.
26
+
27
+ Args:
28
+ preds (torch.Tensor): Predictions from the model (logits or probabilities). Shape: [num_samples, num_classes]
29
+ labels (torch.Tensor): Ground truth labels. Shape: [num_samples]
30
+
31
+ Returns:
32
+ dict: Dictionary containing metrics (F1-score, balanced accuracy, precision, recall).
33
+ """
34
+ # Convert predictions to class labels
35
+ preds = torch.argmax(preds, dim=1).cpu().numpy()
36
+ labels = labels.cpu().numpy()
37
+
38
+ metrics = {}
39
+
40
+ # F1-score (macro and weighted)
41
+ metrics['f1_macro'] = f1_score(labels, preds, average='macro')
42
+ metrics['f1_weighted'] = f1_score(labels, preds, average='weighted')
43
+
44
+ # Balanced Accuracy
45
+ metrics['balanced_accuracy'] = balanced_accuracy_score(labels, preds)
46
+
47
+ # Precision (macro and weighted)
48
+ metrics['precision_macro'] = precision_score(labels, preds, average='macro', zero_division=0)
49
+ metrics['precision_weighted'] = precision_score(labels, preds, average='weighted', zero_division=0)
50
+
51
+ # Recall (macro and weighted)
52
+ metrics['recall_macro'] = recall_score(labels, preds, average='macro', zero_division=0)
53
+ metrics['recall_weighted'] = recall_score(labels, preds, average='weighted', zero_division=0)
54
+
55
+ return metrics
glam4cm/utils.py ADDED
@@ -0,0 +1,194 @@
1
+ from argparse import ArgumentParser
2
+ import random
3
+ import numpy as np
4
+ import torch
5
+ import os
6
+ import fnmatch
7
+ import json
8
+ from typing import List
9
+ import xmltodict
10
+ from torch_geometric.data import Data
11
+ import hashlib
12
+ import networkx as nx
13
+ from collections import deque
14
+
15
+
16
+
17
+ def find_files_with_extension(root_dir, extension):
18
+ matching_files: List[str] = list()
19
+
20
+ # Recursively search for files with the specified extension
21
+ for root, _, files in os.walk(root_dir):
22
+ for filename in fnmatch.filter(files, f'*.{extension}'):
23
+ matching_files.append(os.path.join(root, filename))
24
+
25
+ return matching_files
26
+
27
+
28
+ def xml_to_json(xml_string):
29
+ xml_dict = xmltodict.parse(xml_string)
30
+ json_data = json.dumps(xml_dict, indent=4)
31
+ return json_data
32
+
33
+
34
+ def set_seed(seed):
35
+ random.seed(seed)
36
+ np.random.seed(seed)
37
+ torch.manual_seed(seed)
38
+ torch.cuda.manual_seed(seed)
39
+
40
+
41
+ def create_dummy_graph(num_nodes, num_edges):
42
+ # Node and edge attribute types
43
+ node_types = ['nt1', 'nt2', 'nt3', 'nt4', 'nt5', 'nt6', 'nt7']
44
+ edge_types = ['et1', 'et2', 'et3', 'et4']
45
+
46
+ # Create a graph
47
+ G = nx.Graph()
48
+
49
+ # Add nodes with attributes
50
+ for i in range(1, num_nodes + 1):
51
+ G.add_node(i, name=f'node_{i}', type=random.choice(node_types))
52
+
53
+ # Add edges with attributes
54
+ edges_added = 0
55
+ while edges_added < num_edges:
56
+ u = random.randint(1, num_nodes)
57
+ v = random.randint(1, num_nodes)
58
+ if u != v and not G.has_edge(u, v): # Ensure no self-loops and no duplicate edges
59
+ G.add_edge(u, v, name=f'edge_{edges_added + 1}', type=random.choice(edge_types))
60
+ edges_added += 1
61
+
62
+ return G
63
+
64
+
65
+ def bfs(graph: nx.Graph, start_node, d, exclude_edges: List[str] = None):
66
+ """Perform BFS to get all paths up to a given depth."""
67
+ if exclude_edges is None:
68
+ exclude_edges = []
69
+
70
+ queue = deque([(start_node, [start_node])])
71
+ paths = []
72
+
73
+ while queue:
74
+ current_node, path = queue.popleft()
75
+
76
+ # Stop if the path length exceeds the maximum depth
77
+ if len(path) - 1 > d:
78
+ continue
79
+
80
+ # Store the path
81
+ if len(path) >= 1: # Exclude single-node paths
82
+ paths.append(path)
83
+
84
+ # Add neighbors to the queue
85
+ for neighbor in graph.neighbors(current_node):
86
+ edge = (current_node, neighbor)
87
+ if neighbor not in path and edge not in exclude_edges:
88
+ queue.append((neighbor, path + [neighbor]))
89
+
90
+ return paths
91
+
92
+
93
+ def remove_subsets(list_of_lists):
94
+ sorted_lists = sorted(list_of_lists, key=len, reverse=True)
95
+ unique_lists = []
96
+ for lst in sorted_lists:
97
+ current_set = set(lst)
98
+ if not any(current_set <= set(ul) for ul in unique_lists):
99
+ unique_lists.append(lst)
100
+
101
+ return unique_lists
102
+
103
+
104
+ def get_size_format(sz):
105
+ for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
106
+ if sz < 1024.0:
107
+ return "%3.1f %s" % (sz, x)
108
+ sz /= 1024.0
109
+
110
+
111
+ def get_file_size(file_path):
112
+ sz = os.path.getsize(file_path)
113
+ return get_size_format(sz)
114
+
115
+ def get_directory_size(directory):
116
+ total_size = 0
117
+ for dirpath, _, filenames in os.walk(directory):
118
+ for f in filenames:
119
+ fp = os.path.join(dirpath, f)
120
+ total_size += os.path.getsize(fp)
121
+ return get_size_format(total_size)
122
+
123
+ def get_tensor_size(tensor: torch.Tensor):
124
+ return get_size_format(tensor.element_size() * tensor.nelement())
125
+
126
+ def get_size_of_data(data: Data):
127
+ size = 0
128
+ for _, value in data:
129
+ if isinstance(value, torch.Tensor):
130
+ size += value.element_size() * value.nelement()
131
+ elif isinstance(value, int):
132
+ size += value.bit_length() // 8
133
+
134
+ return get_size_format(size)
135
+
136
+
137
+ def md5_hash(input_string):
138
+ md5_hash = hashlib.md5()
139
+ md5_hash.update(input_string.encode('utf-8'))
140
+ return md5_hash.hexdigest()
141
+
142
+ def randomize_features(dataset: List[Data], num_feats, mode):
143
+ for data in dataset:
144
+ num_nodes = data.num_nodes
145
+ num_edges = data.overall_edge_index.shape[1] if hasattr(data, 'overall_edge_index') else data.edge_index.shape[1]
146
+ if mode == 'node':
147
+ data.x = torch.randn((num_nodes, num_feats))
148
+ elif mode == 'edge':
149
+ data.edge_attr = torch.randn((num_edges, num_feats))
150
+ else:
151
+ raise ValueError("Invalid mode. Choose 'node' or 'edge'.")
152
+
153
+
154
+ def merge_argument_parsers(p1: ArgumentParser, p2: ArgumentParser):
155
+ merged_parser = ArgumentParser(description="Merged Parser")
156
+
157
+ # Combine arguments from parser1
158
+ for action in p1._actions:
159
+ if action.dest != "help": # Skip the help action
160
+ merged_parser._add_action(action)
161
+
162
+ # Combine arguments from parser2
163
+ for action in p2._actions:
164
+ if action.dest != "help": # Skip the help action
165
+ merged_parser._add_action(action)
166
+
167
+ return merged_parser
168
+
169
+
170
+ def is_meaningful_line(line: str):
171
+ stripped_line: str = line.strip()
172
+ # Ignore empty lines, comments, and docstrings
173
+ if stripped_line == "" or stripped_line.startswith("#") or stripped_line.startswith('"""') or stripped_line.startswith("'''"):
174
+ return False
175
+ return True
176
+
177
+ def count_lines_of_code_in_file(file_path):
178
+ with open(file_path, 'r', encoding='utf-8') as file:
179
+ lines = file.readlines()
180
+ meaningful_lines = [line for line in lines if is_meaningful_line(line)]
181
+ return len(meaningful_lines)
182
+
183
+ def count_total_lines_of_code(directory):
184
+ total_lines = 0
185
+ for root, _, files in os.walk(directory):
186
+ for file in files:
187
+ if file.endswith('.py'):
188
+ file_path = os.path.join(root, file)
189
+ total_lines += count_lines_of_code_in_file(file_path)
190
+ return total_lines
191
+
192
+
193
+ def snake_to_title(snake_str: str):
194
+ return snake_str.replace("_", " ").title()
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Syed Juned Ali
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,86 @@
1
+ Metadata-Version: 2.2
2
+ Name: glam4cm
3
+ Version: 0.1.0
4
+ Summary: Graph Neural Networks and Language Models Trainer (Separate or combined) for conceptual models
5
+ Author-email: Syed Juned Ali <syed.juned.ali@tuwien.ac.at>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2025 Syed Juned Ali
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+
28
+ Project-URL: Homepage, https://github.com/junaidiiith/glam4cm
29
+ Project-URL: Issues, https://github.com/junaidiiith/glam4cm/issues
30
+ Requires-Python: >=3.8
31
+ Description-Content-Type: text/markdown
32
+ License-File: LICENSE
33
+ Requires-Dist: langchain-text-splitters
34
+ Requires-Dist: scikit-learn
35
+ Requires-Dist: scipy
36
+ Requires-Dist: torch
37
+ Requires-Dist: numpy
38
+ Requires-Dist: transformers
39
+ Requires-Dist: sentence-transformers
40
+ Requires-Dist: tqdm
41
+ Requires-Dist: networkx
42
+ Requires-Dist: torch_geometric
43
+ Requires-Dist: pandas
44
+ Requires-Dist: tensorboardX
45
+ Requires-Dist: xmltodict
46
+ Requires-Dist: fasttext
47
+ Provides-Extra: dev
48
+ Requires-Dist: pytest; extra == "dev"
49
+ Requires-Dist: black; extra == "dev"
50
+
51
+ # glam4cm
52
+
53
+ The data in this archive is derived from the user-contributed content on the
54
+ Cooking Stack Exchange website (https://cooking.stackexchange.com/), used under
55
+ CC-BY-SA 3.0 (http://creativecommons.org/licenses/by-sa/3.0/).
56
+
57
+ The original data dump can be downloaded from:
58
+ https://archive.org/download/stackexchange/cooking.stackexchange.com.7z
59
+ and details about the dump obtained from:
60
+ https://archive.org/details/stackexchange
61
+
62
+ We distribute two files, under CC-BY-SA 3.0:
63
+
64
+ - cooking.stackexchange.txt, which contains all question titles and
65
+ their associated tags (one question per line, tags are prefixed by
66
+ the string "__label__") ;
67
+
68
+ - cooking.stackexchange.id, which contains the corresponding row IDs,
69
+ from the original data dump.
70
+
71
+
72
+ Node classification
73
+ Language Model
74
+ Freezing parameters
75
+ Effect of attributes
76
+ Effect of edge information
77
+ Edge of partial node type information
78
+
79
+ GNN
80
+ GCNConv, SAGEConv, GATConv
81
+ Random Node Embeddings
82
+ Bert Node Embeddings
83
+ Finetuned Node Embeddings
84
+ Random Edge Embeddings
85
+ Bert Edge Embeddings
86
+ Finetuned Edge Embeddings
@@ -0,0 +1,72 @@
1
+ glam4cm/__init__.py,sha256=7D0ayfMYZjOum3A9g2zP2FGabJTO4JUPQifMg9ALcWY,178
2
+ glam4cm/run.py,sha256=kP3Aa8qY5wapRSxeS5qmwukPM0iVLIGnjN6CIZ5-ECE,4204
3
+ glam4cm/run_configs.py,sha256=100hGJm7tx2enEhSckNE-nxBdDtm8kZyu498WEnqhAU,10122
4
+ glam4cm/settings.py,sha256=9PG4ZAqxCNg2Rt3JZ3ghl1_mzAw6vIVswC4JGY2Z2YE,1115
5
+ glam4cm/utils.py,sha256=1qUTS2r0QB38y4UdcScAEMYEpyUohDqpgwoHJvCGbZY,5918
6
+ glam4cm/data_loading/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ glam4cm/data_loading/data.py,sha256=rsH5vewA47km3I_pDS-MZoChK5Y7RSD7zQgtP8QhKhY,22088
8
+ glam4cm/data_loading/encoding.py,sha256=3duFeOShibiG9WL2JGLMKTkBS9nvrAQV9dhk0GWbdaE,2396
9
+ glam4cm/data_loading/graph_dataset.py,sha256=dSCCG8QXexpsVfpseLGxMEbs5qcYFYfVCsjnPTsUlYA,35061
10
+ glam4cm/data_loading/metadata.py,sha256=LpWp7JU7NEIUbwHJ_-OIjJN8il3MvDT66gjJN7EQJXY,1799
11
+ glam4cm/data_loading/models_dataset.py,sha256=mDfE15gDcEACD-rDIMM2Eltw5-xLXD55nvQ4Gws-9pM,12477
12
+ glam4cm/data_loading/utils.py,sha256=GM8DRHxjeAXAZWX1ITq1QfWWVY7jwdXGhbPx8L8V_IQ,907
13
+ glam4cm/downstream_tasks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ glam4cm/downstream_tasks/bert_edge_classification.py,sha256=xV0lgUcdyVVe82MMkESH8iYATwcvktK3uH7TNJZOY70,4280
15
+ glam4cm/downstream_tasks/bert_graph_classification.py,sha256=FfWwRQAXKDPkKsMNaJUcvcpO4D4IjuXE-3byjycwLRc,4177
16
+ glam4cm/downstream_tasks/bert_graph_classification_comp.py,sha256=JUyi9zjbyo6we7mFKLg1DVXKW-W95JPwaONcNOjZ-aA,4613
17
+ glam4cm/downstream_tasks/bert_link_prediction.py,sha256=QogzTERQFNjhYJaB1JT2_0817fWyYJUiodvszXFOGPI,3976
18
+ glam4cm/downstream_tasks/bert_node_classification.py,sha256=706fRltkgn7ucu7Xft1HSksGZY5vQuMqmXWthRLE8tc,4754
19
+ glam4cm/downstream_tasks/cm_gpt_edge_classification.py,sha256=9VGjcFlsetndHoYrm03I1TDcw9PJin41oOBTwma6E5Y,2258
20
+ glam4cm/downstream_tasks/cm_gpt_node_classification.py,sha256=JS0ntdro_WsIAxpL_nR71W8_IAG4enLeFH2Ak7kKQkA,2358
21
+ glam4cm/downstream_tasks/cm_gpt_pretraining.py,sha256=qtsrvrE8t7HG9cbyiKI8bKRzzoWn-j-7Iq1UxzIf--Q,1685
22
+ glam4cm/downstream_tasks/common_args.py,sha256=zj-rxPprn_V3R7nxqNFTw-vIiIqD5FNKixNdkhj-Y2s,5667
23
+ glam4cm/downstream_tasks/create_dataset.py,sha256=9ykTvvqX7f6j2zlhkU96fMrDgfLKvHQ5R9mH9pHxZ4c,1640
24
+ glam4cm/downstream_tasks/gnn_edge_classification.py,sha256=hnncLte0fGIWyyxCvWMEPKguVikqseyIGfd-4eQMYCc,3280
25
+ glam4cm/downstream_tasks/gnn_graph_cls.py,sha256=Fv5tyt0sK_FaHDaB_l5D6FBXdl5v5pbL2fuHB9yX7ns,3179
26
+ glam4cm/downstream_tasks/gnn_link_prediction.py,sha256=zbflVSQSck41XAbUkLRDANFvRTrbUE9E3Kl47mATTGY,3196
27
+ glam4cm/downstream_tasks/gnn_node_classification.py,sha256=qsz-ed26GY5MHbscu6UVnRBX-KeQcGmU76sMUfOf2Ps,3191
28
+ glam4cm/downstream_tasks/tf_idf_text_classification.py,sha256=_GUYIw86YM4q417IAazMaxZDOwDI2CrZAbQp7k0MAMg,691
29
+ glam4cm/downstream_tasks/utils.py,sha256=Hk-M_REZwwc4s0S2agj81hIFXOVqwEKBVcSkUM8ZLbw,989
30
+ glam4cm/downstream_tasks/word2vec_text_classification.py,sha256=tQMaSo9PVJlpqEnRBG7OEikUtOHckzA8FilnGk1N0zY,3531
31
+ glam4cm/embeddings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
+ glam4cm/embeddings/bert.py,sha256=hKitE9zwuUJdpC_VjyI1Ub3SWzV1TI24MQOkhaFlyAM,2789
33
+ glam4cm/embeddings/common.py,sha256=7NVe8Jlyg3jaBrM3pKxHUh8IBlLaXKl0SLebsHTUMY8,1126
34
+ glam4cm/embeddings/fasttext.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
+ glam4cm/embeddings/tfidf.py,sha256=-R10-053tm9mjgXSNgIzRGIDeL0AJKgVx7UtoHoaGgw,737
36
+ glam4cm/embeddings/w2v.py,sha256=acQX9wvtNRF8ghkniR2xikQj_KLBOYRYqQgvg3qD8l4,1384
37
+ glam4cm/encoding/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
+ glam4cm/encoding/common.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
+ glam4cm/encoding/encoders.py,sha256=-qIMGiyljB8hukyVJf3D89gpkKkQNgHlluSeESmzzog,2735
40
+ glam4cm/graph2str/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
+ glam4cm/graph2str/common.py,sha256=vYNkLUM3rfmj0c-BsYfBQnUJGXyIC7B68HlYZrz8auY,1121
42
+ glam4cm/graph2str/constants.py,sha256=LwWXWTwlS_Q4Blbi9vHgqNh8OjWM5X_z-prEkPtnOJI,239
43
+ glam4cm/graph2str/ontouml.py,sha256=ZGDFTxSMTf2p4q0QbVXmY8jMSzZSpkvREt9VBuN--eg,5499
44
+ glam4cm/graph2str/uml.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
+ glam4cm/lang2graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
+ glam4cm/lang2graph/archimate.py,sha256=mg63U9OQB8LgDobW4ChRti8ya4UzfBVUbm-d8ljBhMw,839
47
+ glam4cm/lang2graph/bpmn.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
+ glam4cm/lang2graph/common.py,sha256=snea2JOIt5xvy1wvn6rSCOMCefNTmjO0Ni3hf4-2d50,13148
49
+ glam4cm/lang2graph/ecore.py,sha256=xzms1Bi3Wj6qsz960w_IK0X2dsNhRDBMPkDsaKEnT1M,8145
50
+ glam4cm/lang2graph/ontouml.py,sha256=DAODrTZnEWTWWR_CB2j_KfRk4G0Sw6lovpAd1jInbRk,7485
51
+ glam4cm/lang2graph/utils.py,sha256=d0b6k4MNwnA9GWewaIwr9uS7YzgRuhSAWaXu-GE-JMg,2089
52
+ glam4cm/models/cmgpt.py,sha256=2vnsYO73XZCKwHm0XTKgK8bQiVssF5tRIFr67E2NrCE,13038
53
+ glam4cm/models/gnn_layers.py,sha256=4hKiJTTartoXjS29hZEQxRWTFZ96FtDijUKT6dwe9lU,8127
54
+ glam4cm/models/hf.py,sha256=BE5cnCdSnpUzq_3ww43AqkZCjG67r1ckPDPdF8yfOoQ,508
55
+ glam4cm/tokenization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
56
+ glam4cm/tokenization/special_tokens.py,sha256=tM2WJDSheURKXm7-5QDczMdHuomPEx6HLTW8BFO0EWs,107
57
+ glam4cm/tokenization/utils.py,sha256=rrM2Owd2IQZAmErOHL5vTVDyVQoPZ-j8ztGt5VXK1fE,1206
58
+ glam4cm/trainers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
59
+ glam4cm/trainers/bert_classifier.py,sha256=4yGMX1bkYsvBaTwyP3CV7U05uIYqQdDeUa84N8_uc3I,3599
60
+ glam4cm/trainers/cm_gpt_trainer.py,sha256=au3hXa9F8uEe-QBA139-Aw7IJ6GQEcjcfNcRgPF2TE8,5547
61
+ glam4cm/trainers/gnn_edge_classifier.py,sha256=81SE7dzKeJG60fuOQMn7A4ydpDCVXwdEp-qgfpE40Ok,4249
62
+ glam4cm/trainers/gnn_graph_classifier.py,sha256=vDfGzaklYrM-pXrj-9QrAJtuZWu3_xcKd0aQlwz7qRs,3873
63
+ glam4cm/trainers/gnn_link_predictor.py,sha256=WMomOdmk7H2aBwDBugPPN5yLcjzgdlade0OKlNvYxEo,5093
64
+ glam4cm/trainers/gnn_node_classifier.py,sha256=I4Rrx7m8oxevtBgelXuJbWUx33TBp1k12Y1to7oq3G8,4508
65
+ glam4cm/trainers/gnn_trainer.py,sha256=r06rR1bsmuiIZhzVcuAx3X_R5u0lZKt1B3shgSdLbxA,3871
66
+ glam4cm/trainers/metrics.py,sha256=LPFyRSAT50bPhQtI8yLK3VHuubxiIw6fTXkLUGTL3Ns,1823
67
+ glam4cm-0.1.0.dist-info/LICENSE,sha256=NzIgfG9Z6TaC4HlY6ownebjdGY0DKUXSgF5sM7bmxZI,1071
68
+ glam4cm-0.1.0.dist-info/METADATA,sha256=OJb9bm54MjDp6aA9gm0C0A7kxz1jndGW1GhAZFJOQeA,3260
69
+ glam4cm-0.1.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
70
+ glam4cm-0.1.0.dist-info/entry_points.txt,sha256=sZ-zOIJOyDP-vpTyVTTbdDyNando8uRVQmaFuAo_nuM,45
71
+ glam4cm-0.1.0.dist-info/top_level.txt,sha256=6V4mFMBo1sE2bowD9n2sxYv_ao8IsS8rR1ArAhfpQ4w,8
72
+ glam4cm-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (75.8.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ glam4cm = glam4cm.run:main
@@ -0,0 +1 @@
1
+ glam4cm