exonware-xwnode 0.0.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. exonware/__init__.py +14 -0
  2. exonware/xwnode/__init__.py +127 -0
  3. exonware/xwnode/base.py +676 -0
  4. exonware/xwnode/config.py +178 -0
  5. exonware/xwnode/contracts.py +730 -0
  6. exonware/xwnode/errors.py +503 -0
  7. exonware/xwnode/facade.py +460 -0
  8. exonware/xwnode/strategies/__init__.py +158 -0
  9. exonware/xwnode/strategies/advisor.py +463 -0
  10. exonware/xwnode/strategies/edges/__init__.py +32 -0
  11. exonware/xwnode/strategies/edges/adj_list.py +227 -0
  12. exonware/xwnode/strategies/edges/adj_matrix.py +391 -0
  13. exonware/xwnode/strategies/edges/base.py +169 -0
  14. exonware/xwnode/strategies/flyweight.py +328 -0
  15. exonware/xwnode/strategies/impls/__init__.py +13 -0
  16. exonware/xwnode/strategies/impls/_base_edge.py +403 -0
  17. exonware/xwnode/strategies/impls/_base_node.py +307 -0
  18. exonware/xwnode/strategies/impls/edge_adj_list.py +353 -0
  19. exonware/xwnode/strategies/impls/edge_adj_matrix.py +445 -0
  20. exonware/xwnode/strategies/impls/edge_bidir_wrapper.py +455 -0
  21. exonware/xwnode/strategies/impls/edge_block_adj_matrix.py +539 -0
  22. exonware/xwnode/strategies/impls/edge_coo.py +533 -0
  23. exonware/xwnode/strategies/impls/edge_csc.py +447 -0
  24. exonware/xwnode/strategies/impls/edge_csr.py +492 -0
  25. exonware/xwnode/strategies/impls/edge_dynamic_adj_list.py +503 -0
  26. exonware/xwnode/strategies/impls/edge_flow_network.py +555 -0
  27. exonware/xwnode/strategies/impls/edge_hyperedge_set.py +516 -0
  28. exonware/xwnode/strategies/impls/edge_neural_graph.py +650 -0
  29. exonware/xwnode/strategies/impls/edge_octree.py +574 -0
  30. exonware/xwnode/strategies/impls/edge_property_store.py +655 -0
  31. exonware/xwnode/strategies/impls/edge_quadtree.py +519 -0
  32. exonware/xwnode/strategies/impls/edge_rtree.py +820 -0
  33. exonware/xwnode/strategies/impls/edge_temporal_edgeset.py +558 -0
  34. exonware/xwnode/strategies/impls/edge_tree_graph_basic.py +271 -0
  35. exonware/xwnode/strategies/impls/edge_weighted_graph.py +411 -0
  36. exonware/xwnode/strategies/manager.py +775 -0
  37. exonware/xwnode/strategies/metrics.py +538 -0
  38. exonware/xwnode/strategies/migration.py +432 -0
  39. exonware/xwnode/strategies/nodes/__init__.py +50 -0
  40. exonware/xwnode/strategies/nodes/_base_node.py +307 -0
  41. exonware/xwnode/strategies/nodes/adjacency_list.py +267 -0
  42. exonware/xwnode/strategies/nodes/aho_corasick.py +345 -0
  43. exonware/xwnode/strategies/nodes/array_list.py +209 -0
  44. exonware/xwnode/strategies/nodes/base.py +247 -0
  45. exonware/xwnode/strategies/nodes/deque.py +200 -0
  46. exonware/xwnode/strategies/nodes/hash_map.py +135 -0
  47. exonware/xwnode/strategies/nodes/heap.py +307 -0
  48. exonware/xwnode/strategies/nodes/linked_list.py +232 -0
  49. exonware/xwnode/strategies/nodes/node_aho_corasick.py +520 -0
  50. exonware/xwnode/strategies/nodes/node_array_list.py +175 -0
  51. exonware/xwnode/strategies/nodes/node_avl_tree.py +371 -0
  52. exonware/xwnode/strategies/nodes/node_b_plus_tree.py +542 -0
  53. exonware/xwnode/strategies/nodes/node_bitmap.py +420 -0
  54. exonware/xwnode/strategies/nodes/node_bitset_dynamic.py +513 -0
  55. exonware/xwnode/strategies/nodes/node_bloom_filter.py +347 -0
  56. exonware/xwnode/strategies/nodes/node_btree.py +357 -0
  57. exonware/xwnode/strategies/nodes/node_count_min_sketch.py +470 -0
  58. exonware/xwnode/strategies/nodes/node_cow_tree.py +473 -0
  59. exonware/xwnode/strategies/nodes/node_cuckoo_hash.py +392 -0
  60. exonware/xwnode/strategies/nodes/node_fenwick_tree.py +301 -0
  61. exonware/xwnode/strategies/nodes/node_hash_map.py +269 -0
  62. exonware/xwnode/strategies/nodes/node_heap.py +191 -0
  63. exonware/xwnode/strategies/nodes/node_hyperloglog.py +407 -0
  64. exonware/xwnode/strategies/nodes/node_linked_list.py +409 -0
  65. exonware/xwnode/strategies/nodes/node_lsm_tree.py +400 -0
  66. exonware/xwnode/strategies/nodes/node_ordered_map.py +390 -0
  67. exonware/xwnode/strategies/nodes/node_ordered_map_balanced.py +565 -0
  68. exonware/xwnode/strategies/nodes/node_patricia.py +512 -0
  69. exonware/xwnode/strategies/nodes/node_persistent_tree.py +378 -0
  70. exonware/xwnode/strategies/nodes/node_radix_trie.py +452 -0
  71. exonware/xwnode/strategies/nodes/node_red_black_tree.py +497 -0
  72. exonware/xwnode/strategies/nodes/node_roaring_bitmap.py +570 -0
  73. exonware/xwnode/strategies/nodes/node_segment_tree.py +289 -0
  74. exonware/xwnode/strategies/nodes/node_set_hash.py +354 -0
  75. exonware/xwnode/strategies/nodes/node_set_tree.py +480 -0
  76. exonware/xwnode/strategies/nodes/node_skip_list.py +316 -0
  77. exonware/xwnode/strategies/nodes/node_splay_tree.py +393 -0
  78. exonware/xwnode/strategies/nodes/node_suffix_array.py +487 -0
  79. exonware/xwnode/strategies/nodes/node_treap.py +387 -0
  80. exonware/xwnode/strategies/nodes/node_tree_graph_hybrid.py +1434 -0
  81. exonware/xwnode/strategies/nodes/node_trie.py +252 -0
  82. exonware/xwnode/strategies/nodes/node_union_find.py +187 -0
  83. exonware/xwnode/strategies/nodes/node_xdata_optimized.py +369 -0
  84. exonware/xwnode/strategies/nodes/priority_queue.py +209 -0
  85. exonware/xwnode/strategies/nodes/queue.py +161 -0
  86. exonware/xwnode/strategies/nodes/sparse_matrix.py +206 -0
  87. exonware/xwnode/strategies/nodes/stack.py +152 -0
  88. exonware/xwnode/strategies/nodes/trie.py +274 -0
  89. exonware/xwnode/strategies/nodes/union_find.py +283 -0
  90. exonware/xwnode/strategies/pattern_detector.py +603 -0
  91. exonware/xwnode/strategies/performance_monitor.py +487 -0
  92. exonware/xwnode/strategies/queries/__init__.py +24 -0
  93. exonware/xwnode/strategies/queries/base.py +236 -0
  94. exonware/xwnode/strategies/queries/cql.py +201 -0
  95. exonware/xwnode/strategies/queries/cypher.py +181 -0
  96. exonware/xwnode/strategies/queries/datalog.py +70 -0
  97. exonware/xwnode/strategies/queries/elastic_dsl.py +70 -0
  98. exonware/xwnode/strategies/queries/eql.py +70 -0
  99. exonware/xwnode/strategies/queries/flux.py +70 -0
  100. exonware/xwnode/strategies/queries/gql.py +70 -0
  101. exonware/xwnode/strategies/queries/graphql.py +240 -0
  102. exonware/xwnode/strategies/queries/gremlin.py +181 -0
  103. exonware/xwnode/strategies/queries/hiveql.py +214 -0
  104. exonware/xwnode/strategies/queries/hql.py +70 -0
  105. exonware/xwnode/strategies/queries/jmespath.py +219 -0
  106. exonware/xwnode/strategies/queries/jq.py +66 -0
  107. exonware/xwnode/strategies/queries/json_query.py +66 -0
  108. exonware/xwnode/strategies/queries/jsoniq.py +248 -0
  109. exonware/xwnode/strategies/queries/kql.py +70 -0
  110. exonware/xwnode/strategies/queries/linq.py +238 -0
  111. exonware/xwnode/strategies/queries/logql.py +70 -0
  112. exonware/xwnode/strategies/queries/mql.py +68 -0
  113. exonware/xwnode/strategies/queries/n1ql.py +210 -0
  114. exonware/xwnode/strategies/queries/partiql.py +70 -0
  115. exonware/xwnode/strategies/queries/pig.py +215 -0
  116. exonware/xwnode/strategies/queries/promql.py +70 -0
  117. exonware/xwnode/strategies/queries/sparql.py +220 -0
  118. exonware/xwnode/strategies/queries/sql.py +275 -0
  119. exonware/xwnode/strategies/queries/xml_query.py +66 -0
  120. exonware/xwnode/strategies/queries/xpath.py +223 -0
  121. exonware/xwnode/strategies/queries/xquery.py +258 -0
  122. exonware/xwnode/strategies/queries/xwnode_executor.py +332 -0
  123. exonware/xwnode/strategies/queries/xwquery_strategy.py +424 -0
  124. exonware/xwnode/strategies/registry.py +604 -0
  125. exonware/xwnode/strategies/simple.py +273 -0
  126. exonware/xwnode/strategies/utils.py +532 -0
  127. exonware/xwnode/types.py +912 -0
  128. exonware/xwnode/version.py +78 -0
  129. exonware_xwnode-0.0.1.12.dist-info/METADATA +169 -0
  130. exonware_xwnode-0.0.1.12.dist-info/RECORD +132 -0
  131. exonware_xwnode-0.0.1.12.dist-info/WHEEL +4 -0
  132. exonware_xwnode-0.0.1.12.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,650 @@
1
+ """
2
+ Neural Graph Edge Strategy Implementation
3
+
4
+ This module implements the NEURAL_GRAPH strategy for neural network
5
+ computation graphs with automatic differentiation and gradient tracking.
6
+ """
7
+
8
+ from typing import Any, Iterator, Dict, List, Set, Optional, Tuple, Callable, Union
9
+ from collections import defaultdict, deque
10
+ import math
11
+ from enum import Enum
12
+ from ._base_edge import aEdgeStrategy
13
+ from ...types import EdgeMode, EdgeTrait
14
+
15
+
16
+ class ActivationFunction(Enum):
17
+ """Common activation functions."""
18
+ LINEAR = "linear"
19
+ SIGMOID = "sigmoid"
20
+ TANH = "tanh"
21
+ RELU = "relu"
22
+ LEAKY_RELU = "leaky_relu"
23
+ ELU = "elu"
24
+ SWISH = "swish"
25
+ GELU = "gelu"
26
+
27
+
28
+ class NeuralEdge:
29
+ """Represents a connection between neural network nodes."""
30
+
31
+ def __init__(self, edge_id: str, source: str, target: str,
32
+ weight: float = 1.0, **properties):
33
+ self.edge_id = edge_id
34
+ self.source = source
35
+ self.target = target
36
+ self.weight = float(weight)
37
+ self.properties = properties.copy()
38
+
39
+ # Gradient tracking for backpropagation
40
+ self.gradient = 0.0
41
+ self.accumulated_gradient = 0.0
42
+ self.gradient_history: List[float] = []
43
+
44
+ # Training metadata
45
+ self.last_forward_value = 0.0
46
+ self.last_backward_value = 0.0
47
+ self.update_count = 0
48
+ self.learning_rate = properties.get('learning_rate', 0.01)
49
+
50
+ # Regularization
51
+ self.l1_lambda = properties.get('l1_lambda', 0.0)
52
+ self.l2_lambda = properties.get('l2_lambda', 0.0)
53
+ self.dropout_rate = properties.get('dropout_rate', 0.0)
54
+ self.is_frozen = properties.get('frozen', False)
55
+
56
+ def forward(self, input_value: float) -> float:
57
+ """Forward pass through edge."""
58
+ self.last_forward_value = input_value
59
+ return input_value * self.weight
60
+
61
+ def backward(self, gradient: float) -> float:
62
+ """Backward pass for gradient computation."""
63
+ self.gradient = gradient * self.last_forward_value
64
+ self.accumulated_gradient += self.gradient
65
+ self.last_backward_value = gradient * self.weight
66
+ return self.last_backward_value
67
+
68
+ def update_weight(self, optimizer_func: Optional[Callable] = None) -> None:
69
+ """Update weight based on accumulated gradients."""
70
+ if self.is_frozen:
71
+ return
72
+
73
+ if optimizer_func:
74
+ # Custom optimizer
75
+ self.weight = optimizer_func(self.weight, self.accumulated_gradient)
76
+ else:
77
+ # Simple SGD with regularization
78
+ regularization = 0.0
79
+
80
+ # L1 regularization
81
+ if self.l1_lambda > 0:
82
+ regularization += self.l1_lambda * (1 if self.weight > 0 else -1)
83
+
84
+ # L2 regularization
85
+ if self.l2_lambda > 0:
86
+ regularization += self.l2_lambda * self.weight
87
+
88
+ # Weight update
89
+ self.weight -= self.learning_rate * (self.accumulated_gradient + regularization)
90
+
91
+ # Store gradient history
92
+ self.gradient_history.append(self.accumulated_gradient)
93
+ if len(self.gradient_history) > 1000: # Limit history size
94
+ self.gradient_history = self.gradient_history[-1000:]
95
+
96
+ # Reset accumulated gradient
97
+ self.accumulated_gradient = 0.0
98
+ self.update_count += 1
99
+
100
+ def reset_gradients(self) -> None:
101
+ """Reset all gradient information."""
102
+ self.gradient = 0.0
103
+ self.accumulated_gradient = 0.0
104
+
105
+ def get_weight_statistics(self) -> Dict[str, float]:
106
+ """Get statistics about weight updates."""
107
+ if not self.gradient_history:
108
+ return {'mean_gradient': 0.0, 'gradient_variance': 0.0}
109
+
110
+ mean_grad = sum(self.gradient_history) / len(self.gradient_history)
111
+ variance = sum((g - mean_grad) ** 2 for g in self.gradient_history) / len(self.gradient_history)
112
+
113
+ return {
114
+ 'mean_gradient': mean_grad,
115
+ 'gradient_variance': variance,
116
+ 'gradient_magnitude': abs(mean_grad),
117
+ 'update_count': self.update_count
118
+ }
119
+
120
+ def to_dict(self) -> Dict[str, Any]:
121
+ """Convert to dictionary representation."""
122
+ return {
123
+ 'id': self.edge_id,
124
+ 'source': self.source,
125
+ 'target': self.target,
126
+ 'weight': self.weight,
127
+ 'gradient': self.gradient,
128
+ 'accumulated_gradient': self.accumulated_gradient,
129
+ 'learning_rate': self.learning_rate,
130
+ 'l1_lambda': self.l1_lambda,
131
+ 'l2_lambda': self.l2_lambda,
132
+ 'dropout_rate': self.dropout_rate,
133
+ 'is_frozen': self.is_frozen,
134
+ 'update_count': self.update_count,
135
+ 'properties': self.properties
136
+ }
137
+
138
+
139
+ class NeuralNode:
140
+ """Represents a node in the neural computation graph."""
141
+
142
+ def __init__(self, node_id: str, activation: ActivationFunction = ActivationFunction.LINEAR,
143
+ bias: float = 0.0, **properties):
144
+ self.node_id = node_id
145
+ self.activation = activation
146
+ self.bias = bias
147
+ self.properties = properties.copy()
148
+
149
+ # Computation state
150
+ self.value = 0.0
151
+ self.gradient = 0.0
152
+ self.pre_activation = 0.0
153
+
154
+ # Training metadata
155
+ self.is_input = properties.get('is_input', False)
156
+ self.is_output = properties.get('is_output', False)
157
+ self.is_frozen = properties.get('frozen', False)
158
+
159
+ # Batch processing
160
+ self.batch_values: List[float] = []
161
+ self.batch_gradients: List[float] = []
162
+
163
+ def activate(self, x: float) -> float:
164
+ """Apply activation function."""
165
+ if self.activation == ActivationFunction.LINEAR:
166
+ return x
167
+ elif self.activation == ActivationFunction.SIGMOID:
168
+ return 1.0 / (1.0 + math.exp(-x))
169
+ elif self.activation == ActivationFunction.TANH:
170
+ return math.tanh(x)
171
+ elif self.activation == ActivationFunction.RELU:
172
+ return max(0.0, x)
173
+ elif self.activation == ActivationFunction.LEAKY_RELU:
174
+ alpha = self.properties.get('leaky_alpha', 0.01)
175
+ return x if x > 0 else alpha * x
176
+ elif self.activation == ActivationFunction.ELU:
177
+ alpha = self.properties.get('elu_alpha', 1.0)
178
+ return x if x > 0 else alpha * (math.exp(x) - 1)
179
+ elif self.activation == ActivationFunction.SWISH:
180
+ return x * (1.0 / (1.0 + math.exp(-x)))
181
+ elif self.activation == ActivationFunction.GELU:
182
+ return 0.5 * x * (1.0 + math.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * x**3)))
183
+ else:
184
+ return x
185
+
186
+ def activate_derivative(self, x: float) -> float:
187
+ """Compute derivative of activation function."""
188
+ if self.activation == ActivationFunction.LINEAR:
189
+ return 1.0
190
+ elif self.activation == ActivationFunction.SIGMOID:
191
+ s = self.activate(x)
192
+ return s * (1.0 - s)
193
+ elif self.activation == ActivationFunction.TANH:
194
+ t = math.tanh(x)
195
+ return 1.0 - t * t
196
+ elif self.activation == ActivationFunction.RELU:
197
+ return 1.0 if x > 0 else 0.0
198
+ elif self.activation == ActivationFunction.LEAKY_RELU:
199
+ alpha = self.properties.get('leaky_alpha', 0.01)
200
+ return 1.0 if x > 0 else alpha
201
+ elif self.activation == ActivationFunction.ELU:
202
+ alpha = self.properties.get('elu_alpha', 1.0)
203
+ return 1.0 if x > 0 else alpha * math.exp(x)
204
+ else:
205
+ return 1.0
206
+
207
+ def forward(self, input_sum: float) -> float:
208
+ """Forward pass through node."""
209
+ self.pre_activation = input_sum + self.bias
210
+ self.value = self.activate(self.pre_activation)
211
+ return self.value
212
+
213
+ def backward(self, gradient: float) -> float:
214
+ """Backward pass for gradient computation."""
215
+ activation_grad = self.activate_derivative(self.pre_activation)
216
+ self.gradient = gradient * activation_grad
217
+ return self.gradient
218
+
219
+
220
+ class xNeuralGraphStrategy(aEdgeStrategy):
221
+ """
222
+ Neural Graph strategy for neural network computation graphs.
223
+
224
+ Supports automatic differentiation, gradient-based optimization,
225
+ and various neural network architectures with backpropagation.
226
+ """
227
+
228
+ def __init__(self, traits: EdgeTrait = EdgeTrait.NONE, **options):
229
+ """Initialize the Neural Graph strategy."""
230
+ super().__init__(EdgeMode.NEURAL_GRAPH, traits, **options)
231
+
232
+ self.default_learning_rate = options.get('learning_rate', 0.01)
233
+ self.enable_autodiff = options.get('enable_autodiff', True)
234
+ self.batch_size = options.get('batch_size', 32)
235
+ self.enable_regularization = options.get('enable_regularization', True)
236
+
237
+ # Core storage
238
+ self._edges: Dict[str, NeuralEdge] = {} # edge_id -> NeuralEdge
239
+ self._nodes: Dict[str, NeuralNode] = {} # node_id -> NeuralNode
240
+ self._outgoing: Dict[str, List[str]] = defaultdict(list) # source -> [edge_ids]
241
+ self._incoming: Dict[str, List[str]] = defaultdict(list) # target -> [edge_ids]
242
+
243
+ # Network topology
244
+ self._input_nodes: Set[str] = set()
245
+ self._output_nodes: Set[str] = set()
246
+ self._hidden_nodes: Set[str] = set()
247
+ self._topological_order: List[str] = []
248
+
249
+ # Training state
250
+ self._training_mode = True
251
+ self._epoch_count = 0
252
+ self._total_loss = 0.0
253
+ self._edge_count = 0
254
+ self._edge_id_counter = 0
255
+
256
+ def get_supported_traits(self) -> EdgeTrait:
257
+ """Get the traits supported by the neural graph strategy."""
258
+ return (EdgeTrait.DIRECTED | EdgeTrait.WEIGHTED | EdgeTrait.SPARSE)
259
+
260
+ def _generate_edge_id(self) -> str:
261
+ """Generate unique edge ID."""
262
+ self._edge_id_counter += 1
263
+ return f"neural_edge_{self._edge_id_counter}"
264
+
265
+ def _compute_topological_order(self) -> None:
266
+ """Compute topological ordering for forward/backward passes."""
267
+ # Kahn's algorithm
268
+ in_degree = defaultdict(int)
269
+ for node_id in self._nodes:
270
+ in_degree[node_id] = len(self._incoming[node_id])
271
+
272
+ queue = deque([node for node, degree in in_degree.items() if degree == 0])
273
+ order = []
274
+
275
+ while queue:
276
+ current = queue.popleft()
277
+ order.append(current)
278
+
279
+ for edge_id in self._outgoing[current]:
280
+ edge = self._edges[edge_id]
281
+ target = edge.target
282
+ in_degree[target] -= 1
283
+ if in_degree[target] == 0:
284
+ queue.append(target)
285
+
286
+ self._topological_order = order
287
+
288
+ # ============================================================================
289
+ # CORE EDGE OPERATIONS
290
+ # ============================================================================
291
+
292
+ def add_edge(self, source: str, target: str, **properties) -> str:
293
+ """Add neural connection edge."""
294
+ weight = properties.pop('weight', 1.0)
295
+ edge_id = properties.pop('edge_id', self._generate_edge_id())
296
+
297
+ if edge_id in self._edges:
298
+ raise ValueError(f"Edge ID {edge_id} already exists")
299
+
300
+ # Create neural edge
301
+ neural_edge = NeuralEdge(edge_id, source, target, weight, **properties)
302
+
303
+ # Store edge
304
+ self._edges[edge_id] = neural_edge
305
+ self._outgoing[source].append(edge_id)
306
+ self._incoming[target].append(edge_id)
307
+
308
+ # Ensure nodes exist
309
+ if source not in self._nodes:
310
+ self.add_neural_node(source)
311
+ if target not in self._nodes:
312
+ self.add_neural_node(target)
313
+
314
+ self._edge_count += 1
315
+ self._compute_topological_order()
316
+
317
+ return edge_id
318
+
319
+ def remove_edge(self, source: str, target: str, edge_id: Optional[str] = None) -> bool:
320
+ """Remove neural edge."""
321
+ if edge_id and edge_id in self._edges:
322
+ edge = self._edges[edge_id]
323
+ if edge.source == source and edge.target == target:
324
+ # Remove from indices
325
+ del self._edges[edge_id]
326
+ self._outgoing[source].remove(edge_id)
327
+ self._incoming[target].remove(edge_id)
328
+
329
+ self._edge_count -= 1
330
+ self._compute_topological_order()
331
+ return True
332
+ else:
333
+ # Find edge by endpoints
334
+ for edge_id in self._outgoing.get(source, []):
335
+ edge = self._edges[edge_id]
336
+ if edge.target == target:
337
+ return self.remove_edge(source, target, edge_id)
338
+
339
+ return False
340
+
341
+ def has_edge(self, source: str, target: str) -> bool:
342
+ """Check if edge exists."""
343
+ for edge_id in self._outgoing.get(source, []):
344
+ edge = self._edges[edge_id]
345
+ if edge.target == target:
346
+ return True
347
+ return False
348
+
349
+ def get_edge_data(self, source: str, target: str) -> Optional[Dict[str, Any]]:
350
+ """Get edge data."""
351
+ for edge_id in self._outgoing.get(source, []):
352
+ edge = self._edges[edge_id]
353
+ if edge.target == target:
354
+ return edge.to_dict()
355
+ return None
356
+
357
+ def neighbors(self, vertex: str, direction: str = 'out') -> Iterator[str]:
358
+ """Get neighbors of vertex."""
359
+ if direction in ['out', 'both']:
360
+ for edge_id in self._outgoing.get(vertex, []):
361
+ edge = self._edges[edge_id]
362
+ yield edge.target
363
+
364
+ if direction in ['in', 'both']:
365
+ for edge_id in self._incoming.get(vertex, []):
366
+ edge = self._edges[edge_id]
367
+ yield edge.source
368
+
369
+ def degree(self, vertex: str, direction: str = 'out') -> int:
370
+ """Get degree of vertex."""
371
+ if direction == 'out':
372
+ return len(self._outgoing.get(vertex, []))
373
+ elif direction == 'in':
374
+ return len(self._incoming.get(vertex, []))
375
+ else: # both
376
+ return len(self._outgoing.get(vertex, [])) + len(self._incoming.get(vertex, []))
377
+
378
+ def edges(self, data: bool = False) -> Iterator[tuple]:
379
+ """Get all edges."""
380
+ for edge in self._edges.values():
381
+ if data:
382
+ yield (edge.source, edge.target, edge.to_dict())
383
+ else:
384
+ yield (edge.source, edge.target)
385
+
386
+ def vertices(self) -> Iterator[str]:
387
+ """Get all vertices."""
388
+ return iter(self._nodes.keys())
389
+
390
+ def __len__(self) -> int:
391
+ """Get number of edges."""
392
+ return self._edge_count
393
+
394
+ def vertex_count(self) -> int:
395
+ """Get number of vertices."""
396
+ return len(self._nodes)
397
+
398
+ def clear(self) -> None:
399
+ """Clear all data."""
400
+ self._edges.clear()
401
+ self._nodes.clear()
402
+ self._outgoing.clear()
403
+ self._incoming.clear()
404
+ self._input_nodes.clear()
405
+ self._output_nodes.clear()
406
+ self._hidden_nodes.clear()
407
+ self._topological_order.clear()
408
+
409
+ self._epoch_count = 0
410
+ self._total_loss = 0.0
411
+ self._edge_count = 0
412
+ self._edge_id_counter = 0
413
+
414
+ def add_vertex(self, vertex: str) -> None:
415
+ """Add vertex (neural node)."""
416
+ if vertex not in self._nodes:
417
+ self.add_neural_node(vertex)
418
+
419
+ def remove_vertex(self, vertex: str) -> bool:
420
+ """Remove vertex and all its edges."""
421
+ if vertex not in self._nodes:
422
+ return False
423
+
424
+ # Remove all outgoing edges
425
+ outgoing_edges = list(self._outgoing.get(vertex, []))
426
+ for edge_id in outgoing_edges:
427
+ edge = self._edges[edge_id]
428
+ self.remove_edge(edge.source, edge.target, edge_id)
429
+
430
+ # Remove all incoming edges
431
+ incoming_edges = list(self._incoming.get(vertex, []))
432
+ for edge_id in incoming_edges:
433
+ edge = self._edges[edge_id]
434
+ self.remove_edge(edge.source, edge.target, edge_id)
435
+
436
+ # Remove node
437
+ del self._nodes[vertex]
438
+ self._input_nodes.discard(vertex)
439
+ self._output_nodes.discard(vertex)
440
+ self._hidden_nodes.discard(vertex)
441
+
442
+ self._compute_topological_order()
443
+ return True
444
+
445
+ # ============================================================================
446
+ # NEURAL NETWORK OPERATIONS
447
+ # ============================================================================
448
+
449
+ def add_neural_node(self, node_id: str, activation: ActivationFunction = ActivationFunction.LINEAR,
450
+ bias: float = 0.0, node_type: str = 'hidden', **properties) -> None:
451
+ """Add neural network node."""
452
+ node = NeuralNode(node_id, activation, bias, **properties)
453
+
454
+ if node_type == 'input':
455
+ node.is_input = True
456
+ self._input_nodes.add(node_id)
457
+ elif node_type == 'output':
458
+ node.is_output = True
459
+ self._output_nodes.add(node_id)
460
+ else:
461
+ self._hidden_nodes.add(node_id)
462
+
463
+ self._nodes[node_id] = node
464
+ self._compute_topological_order()
465
+
466
+ def set_node_value(self, node_id: str, value: float) -> None:
467
+ """Set node value (for input nodes)."""
468
+ if node_id in self._nodes:
469
+ self._nodes[node_id].value = value
470
+
471
+ def get_node_value(self, node_id: str) -> float:
472
+ """Get node value."""
473
+ return self._nodes.get(node_id, NeuralNode("")).value
474
+
475
+ def forward_pass(self, inputs: Dict[str, float]) -> Dict[str, float]:
476
+ """Perform forward pass through network."""
477
+ # Set input values
478
+ for node_id, value in inputs.items():
479
+ if node_id in self._input_nodes:
480
+ self.set_node_value(node_id, value)
481
+
482
+ # Process nodes in topological order
483
+ for node_id in self._topological_order:
484
+ if node_id not in self._input_nodes: # Skip input nodes
485
+ node = self._nodes[node_id]
486
+
487
+ # Compute weighted sum of inputs
488
+ input_sum = 0.0
489
+ for edge_id in self._incoming[node_id]:
490
+ edge = self._edges[edge_id]
491
+ source_value = self._nodes[edge.source].value
492
+ input_sum += edge.forward(source_value)
493
+
494
+ # Apply activation function
495
+ node.forward(input_sum)
496
+
497
+ # Return output values
498
+ return {node_id: self.get_node_value(node_id) for node_id in self._output_nodes}
499
+
500
+ def backward_pass(self, target_outputs: Dict[str, float],
501
+ loss_function: str = 'mse') -> float:
502
+ """Perform backward pass for gradient computation."""
503
+ # Compute loss and output gradients
504
+ total_loss = 0.0
505
+
506
+ for node_id in self._output_nodes:
507
+ node = self._nodes[node_id]
508
+ target = target_outputs.get(node_id, 0.0)
509
+
510
+ if loss_function == 'mse':
511
+ # Mean squared error
512
+ error = node.value - target
513
+ total_loss += 0.5 * error * error
514
+ node.gradient = error
515
+ elif loss_function == 'cross_entropy':
516
+ # Cross entropy (simplified)
517
+ total_loss += -target * math.log(max(1e-15, node.value))
518
+ node.gradient = node.value - target
519
+ else:
520
+ # Default to MSE
521
+ error = node.value - target
522
+ total_loss += 0.5 * error * error
523
+ node.gradient = error
524
+
525
+ # Backpropagate gradients
526
+ for node_id in reversed(self._topological_order):
527
+ if node_id not in self._output_nodes: # Skip output nodes (already have gradients)
528
+ node = self._nodes[node_id]
529
+ node.gradient = 0.0
530
+
531
+ # Accumulate gradients from outgoing edges
532
+ for edge_id in self._outgoing[node_id]:
533
+ edge = self._edges[edge_id]
534
+ target_node = self._nodes[edge.target]
535
+ node.gradient += edge.backward(target_node.gradient)
536
+
537
+ # Apply activation derivative
538
+ if not node.is_input:
539
+ node.gradient = node.backward(node.gradient)
540
+
541
+ self._total_loss += total_loss
542
+ return total_loss
543
+
544
+ def update_weights(self, optimizer_func: Optional[Callable] = None) -> None:
545
+ """Update all edge weights based on gradients."""
546
+ for edge in self._edges.values():
547
+ edge.update_weight(optimizer_func)
548
+
549
+ def train_step(self, inputs: Dict[str, float], targets: Dict[str, float],
550
+ loss_function: str = 'mse') -> float:
551
+ """Perform one training step (forward + backward + update)."""
552
+ # Forward pass
553
+ outputs = self.forward_pass(inputs)
554
+
555
+ # Backward pass
556
+ loss = self.backward_pass(targets, loss_function)
557
+
558
+ # Update weights
559
+ if self._training_mode:
560
+ self.update_weights()
561
+
562
+ return loss
563
+
564
+ def reset_gradients(self) -> None:
565
+ """Reset all gradients to zero."""
566
+ for edge in self._edges.values():
567
+ edge.reset_gradients()
568
+ for node in self._nodes.values():
569
+ node.gradient = 0.0
570
+
571
+ def set_training_mode(self, training: bool) -> None:
572
+ """Set training mode (affects weight updates)."""
573
+ self._training_mode = training
574
+
575
+ def get_network_statistics(self) -> Dict[str, Any]:
576
+ """Get comprehensive network statistics."""
577
+ if not self._nodes:
578
+ return {'nodes': 0, 'edges': 0, 'layers': 0}
579
+
580
+ # Compute layer depths
581
+ layer_depths = {}
582
+ for node_id in self._topological_order:
583
+ if node_id in self._input_nodes:
584
+ layer_depths[node_id] = 0
585
+ else:
586
+ max_input_depth = max(
587
+ (layer_depths.get(self._edges[edge_id].source, 0)
588
+ for edge_id in self._incoming[node_id]),
589
+ default=-1
590
+ )
591
+ layer_depths[node_id] = max_input_depth + 1
592
+
593
+ max_depth = max(layer_depths.values()) if layer_depths else 0
594
+
595
+ # Weight statistics
596
+ weights = [edge.weight for edge in self._edges.values()]
597
+ gradients = [edge.accumulated_gradient for edge in self._edges.values()]
598
+
599
+ return {
600
+ 'nodes': len(self._nodes),
601
+ 'edges': self._edge_count,
602
+ 'input_nodes': len(self._input_nodes),
603
+ 'hidden_nodes': len(self._hidden_nodes),
604
+ 'output_nodes': len(self._output_nodes),
605
+ 'layers': max_depth + 1,
606
+ 'training_mode': self._training_mode,
607
+ 'epoch_count': self._epoch_count,
608
+ 'total_loss': self._total_loss,
609
+ 'avg_weight': sum(weights) / len(weights) if weights else 0,
610
+ 'weight_variance': sum((w - sum(weights)/len(weights))**2 for w in weights) / len(weights) if len(weights) > 1 else 0,
611
+ 'avg_gradient': sum(abs(g) for g in gradients) / len(gradients) if gradients else 0
612
+ }
613
+
614
+ # ============================================================================
615
+ # PERFORMANCE CHARACTERISTICS
616
+ # ============================================================================
617
+
618
+ @property
619
+ def backend_info(self) -> Dict[str, Any]:
620
+ """Get backend implementation info."""
621
+ return {
622
+ 'strategy': 'NEURAL_GRAPH',
623
+ 'backend': 'Computation graph with automatic differentiation',
624
+ 'enable_autodiff': self.enable_autodiff,
625
+ 'default_learning_rate': self.default_learning_rate,
626
+ 'batch_size': self.batch_size,
627
+ 'complexity': {
628
+ 'forward_pass': 'O(V + E)',
629
+ 'backward_pass': 'O(V + E)',
630
+ 'weight_update': 'O(E)',
631
+ 'memory': 'O(V + E)',
632
+ 'training_step': 'O(V + E)'
633
+ }
634
+ }
635
+
636
+ @property
637
+ def metrics(self) -> Dict[str, Any]:
638
+ """Get performance metrics."""
639
+ stats = self.get_network_statistics()
640
+
641
+ return {
642
+ 'nodes': stats['nodes'],
643
+ 'edges': stats['edges'],
644
+ 'layers': stats['layers'],
645
+ 'parameters': self._edge_count, # Each edge is a parameter
646
+ 'training_mode': stats['training_mode'],
647
+ 'avg_weight': f"{stats['avg_weight']:.4f}",
648
+ 'avg_gradient': f"{stats['avg_gradient']:.6f}",
649
+ 'memory_usage': f"{self._edge_count * 150 + len(self._nodes) * 100} bytes (estimated)"
650
+ }