pyerualjetwork 4.1.1__py3-none-any.whl → 4.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +1 -1
- pyerualjetwork/activation_functions.py +45 -48
- pyerualjetwork/activation_functions_cuda.py +45 -46
- pyerualjetwork/data_operations.py +2 -2
- pyerualjetwork/data_operations_cuda.py +0 -7
- pyerualjetwork/help.py +4 -4
- pyerualjetwork/metrics_cuda.py +1 -2
- pyerualjetwork/model_operations.py +1 -0
- pyerualjetwork/model_operations_cuda.py +1 -0
- pyerualjetwork/plan.py +3 -11
- pyerualjetwork/plan_cuda.py +4 -17
- pyerualjetwork/visualizations_cuda.py +5 -5
- {pyerualjetwork-4.1.1.dist-info → pyerualjetwork-4.1.2.dist-info}/METADATA +1 -1
- pyerualjetwork-4.1.2.dist-info/RECORD +23 -0
- pyerualjetwork-4.1.1.dist-info/RECORD +0 -23
- {pyerualjetwork-4.1.1.dist-info → pyerualjetwork-4.1.2.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.1.1.dist-info → pyerualjetwork-4.1.2.dist-info}/top_level.txt +0 -0
    
        pyerualjetwork/__init__.py
    CHANGED
    
    | @@ -47,7 +47,7 @@ for package_name in package_names: | |
| 47 47 |  | 
| 48 48 | 
             
            print(f"PyerualJetwork is ready to use with {err} errors")
         | 
| 49 49 |  | 
| 50 | 
            -
            __version__ = "4.1. | 
| 50 | 
            +
            __version__ = "4.1.2"
         | 
| 51 51 | 
             
            __update__ = "* Note: CUDA modules need cupy. Enter this command in your terminal: 'pip install cupy-cuda12x' or your cuda version.\n* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
         | 
| 52 52 |  | 
| 53 53 | 
             
            def print_version(__version__):
         | 
| @@ -218,15 +218,12 @@ def scaled_cubic(x, alpha=1.0): | |
| 218 218 | 
             
            def sine_offset(x, beta=0.0):
         | 
| 219 219 | 
             
                return np.sin(x + beta)
         | 
| 220 220 |  | 
| 221 | 
            -
             | 
| 222 | 
            -
             | 
| 223 | 
            -
            def safe_aggregate(current_sum, new_value):
         | 
| 221 | 
            +
            def safe_add(current_sum, new_value):
         | 
| 224 222 | 
             
                    try:
         | 
| 225 223 | 
             
                        return current_sum + new_value
         | 
| 226 224 | 
             
                    except OverflowError:
         | 
| 227 225 | 
             
                        return np.array(current_sum) + np.array(new_value)
         | 
| 228 226 |  | 
| 229 | 
            -
             | 
| 230 227 | 
             
            def apply_activation(Input, activation_list):
         | 
| 231 228 | 
             
                """
         | 
| 232 229 | 
             
                    Applies a sequence of activation functions to the input.
         | 
| @@ -244,93 +241,93 @@ def apply_activation(Input, activation_list): | |
| 244 241 | 
             
                for i in range(len(activation_list)):
         | 
| 245 242 | 
             
                    try:
         | 
| 246 243 | 
             
                        if activation_list[i] == 'sigmoid':
         | 
| 247 | 
            -
                            Input =  | 
| 244 | 
            +
                            Input = safe_add(Input, Sigmoid(origin_input))
         | 
| 248 245 | 
             
                        elif activation_list[i] == 'swish':
         | 
| 249 | 
            -
                            Input =  | 
| 246 | 
            +
                            Input = safe_add(Input, swish(origin_input))
         | 
| 250 247 | 
             
                        elif activation_list[i] == 'mod_circular':
         | 
| 251 | 
            -
                            Input =  | 
| 248 | 
            +
                            Input = safe_add(Input, modular_circular_activation(origin_input))
         | 
| 252 249 | 
             
                        elif activation_list[i] == 'tanh_circular':
         | 
| 253 | 
            -
                            Input =  | 
| 250 | 
            +
                            Input = safe_add(Input, tanh_circular_activation(origin_input))
         | 
| 254 251 | 
             
                        elif activation_list[i] == 'leaky_relu':
         | 
| 255 | 
            -
                            Input =  | 
| 252 | 
            +
                            Input = safe_add(Input, leaky_relu(origin_input))
         | 
| 256 253 | 
             
                        elif activation_list[i] == 'relu':
         | 
| 257 | 
            -
                            Input =  | 
| 254 | 
            +
                            Input = safe_add(Input, Relu(origin_input))
         | 
| 258 255 | 
             
                        elif activation_list[i] == 'softplus':
         | 
| 259 | 
            -
                            Input =  | 
| 256 | 
            +
                            Input = safe_add(Input, softplus(origin_input))
         | 
| 260 257 | 
             
                        elif activation_list[i] == 'elu':
         | 
| 261 | 
            -
                            Input =  | 
| 258 | 
            +
                            Input = safe_add(Input, elu(origin_input))
         | 
| 262 259 | 
             
                        elif activation_list[i] == 'gelu':
         | 
| 263 | 
            -
                            Input =  | 
| 260 | 
            +
                            Input = safe_add(Input, gelu(origin_input))
         | 
| 264 261 | 
             
                        elif activation_list[i] == 'selu':
         | 
| 265 | 
            -
                            Input =  | 
| 262 | 
            +
                            Input = safe_add(Input, selu(origin_input))
         | 
| 266 263 | 
             
                        elif activation_list[i] == 'tanh':
         | 
| 267 | 
            -
                            Input =  | 
| 264 | 
            +
                            Input = safe_add(Input, tanh(origin_input))
         | 
| 268 265 | 
             
                        elif activation_list[i] == 'sinakt':
         | 
| 269 | 
            -
                            Input =  | 
| 266 | 
            +
                            Input = safe_add(Input, sinakt(origin_input))
         | 
| 270 267 | 
             
                        elif activation_list[i] == 'p_squared':
         | 
| 271 | 
            -
                            Input =  | 
| 268 | 
            +
                            Input = safe_add(Input, p_squared(origin_input))
         | 
| 272 269 | 
             
                        elif activation_list[i] == 'sglu':
         | 
| 273 | 
            -
                            Input =  | 
| 270 | 
            +
                            Input = safe_add(Input, sglu(origin_input, alpha=1.0))
         | 
| 274 271 | 
             
                        elif activation_list[i] == 'dlrelu':
         | 
| 275 | 
            -
                            Input =  | 
| 272 | 
            +
                            Input = safe_add(Input, dlrelu(origin_input))
         | 
| 276 273 | 
             
                        elif activation_list[i] == 'exsig':
         | 
| 277 | 
            -
                            Input =  | 
| 274 | 
            +
                            Input = safe_add(Input, exsig(origin_input))
         | 
| 278 275 | 
             
                        elif activation_list[i] == 'sin_plus':
         | 
| 279 | 
            -
                            Input =  | 
| 276 | 
            +
                            Input = safe_add(Input, sin_plus(origin_input))
         | 
| 280 277 | 
             
                        elif activation_list[i] == 'acos':
         | 
| 281 | 
            -
                            Input =  | 
| 278 | 
            +
                            Input = safe_add(Input, acos(origin_input, alpha=1.0, beta=0.0))
         | 
| 282 279 | 
             
                        elif activation_list[i] == 'gla':
         | 
| 283 | 
            -
                            Input =  | 
| 280 | 
            +
                            Input = safe_add(Input, gla(origin_input, alpha=1.0, mu=0.0))
         | 
| 284 281 | 
             
                        elif activation_list[i] == 'srelu':
         | 
| 285 | 
            -
                            Input =  | 
| 282 | 
            +
                            Input = safe_add(Input, srelu(origin_input))
         | 
| 286 283 | 
             
                        elif activation_list[i] == 'qelu':
         | 
| 287 | 
            -
                            Input =  | 
| 284 | 
            +
                            Input = safe_add(Input, qelu(origin_input))
         | 
| 288 285 | 
             
                        elif activation_list[i] == 'isra':
         | 
| 289 | 
            -
                            Input =  | 
| 286 | 
            +
                            Input = safe_add(Input, isra(origin_input))
         | 
| 290 287 | 
             
                        elif activation_list[i] == 'waveakt':
         | 
| 291 | 
            -
                            Input =  | 
| 288 | 
            +
                            Input = safe_add(Input, waveakt(origin_input))
         | 
| 292 289 | 
             
                        elif activation_list[i] == 'arctan':
         | 
| 293 | 
            -
                            Input =  | 
| 290 | 
            +
                            Input = safe_add(Input, arctan(origin_input))
         | 
| 294 291 | 
             
                        elif activation_list[i] == 'bent_identity':
         | 
| 295 | 
            -
                            Input =  | 
| 292 | 
            +
                            Input = safe_add(Input, bent_identity(origin_input))
         | 
| 296 293 | 
             
                        elif activation_list[i] == 'sech':
         | 
| 297 | 
            -
                            Input =  | 
| 294 | 
            +
                            Input = safe_add(Input, sech(origin_input))
         | 
| 298 295 | 
             
                        elif activation_list[i] == 'softsign':
         | 
| 299 | 
            -
                            Input =  | 
| 296 | 
            +
                            Input = safe_add(Input, softsign(origin_input))
         | 
| 300 297 | 
             
                        elif activation_list[i] == 'pwl':
         | 
| 301 | 
            -
                            Input =  | 
| 298 | 
            +
                            Input = safe_add(Input, pwl(origin_input))
         | 
| 302 299 | 
             
                        elif activation_list[i] == 'cubic':
         | 
| 303 | 
            -
                            Input =  | 
| 300 | 
            +
                            Input = safe_add(Input, cubic(origin_input))
         | 
| 304 301 | 
             
                        elif activation_list[i] == 'gaussian':
         | 
| 305 | 
            -
                            Input =  | 
| 302 | 
            +
                            Input = safe_add(Input, gaussian(origin_input))
         | 
| 306 303 | 
             
                        elif activation_list[i] == 'sine':
         | 
| 307 | 
            -
                            Input =  | 
| 304 | 
            +
                            Input = safe_add(Input, sine(origin_input))
         | 
| 308 305 | 
             
                        elif activation_list[i] == 'tanh_square':
         | 
| 309 | 
            -
                            Input =  | 
| 306 | 
            +
                            Input = safe_add(Input, tanh_square(origin_input))
         | 
| 310 307 | 
             
                        elif activation_list[i] == 'mod_sigmoid':
         | 
| 311 | 
            -
                            Input =  | 
| 308 | 
            +
                            Input = safe_add(Input, mod_sigmoid(origin_input))
         | 
| 312 309 | 
             
                        elif activation_list[i] == 'linear':
         | 
| 313 | 
            -
                            Input =  | 
| 310 | 
            +
                            Input = safe_add(Input, origin_input)
         | 
| 314 311 | 
             
                        elif activation_list[i] == 'quartic':
         | 
| 315 | 
            -
                            Input =  | 
| 312 | 
            +
                            Input = safe_add(Input, quartic(origin_input))
         | 
| 316 313 | 
             
                        elif activation_list[i] == 'square_quartic':
         | 
| 317 | 
            -
                            Input =  | 
| 314 | 
            +
                            Input = safe_add(Input, square_quartic(origin_input))
         | 
| 318 315 | 
             
                        elif activation_list[i] == 'cubic_quadratic':
         | 
| 319 | 
            -
                            Input =  | 
| 316 | 
            +
                            Input = safe_add(Input, cubic_quadratic(origin_input))
         | 
| 320 317 | 
             
                        elif activation_list[i] == 'exp_cubic':
         | 
| 321 | 
            -
                            Input =  | 
| 318 | 
            +
                            Input = safe_add(Input, exp_cubic(origin_input))
         | 
| 322 319 | 
             
                        elif activation_list[i] == 'sine_square':
         | 
| 323 | 
            -
                            Input =  | 
| 320 | 
            +
                            Input = safe_add(Input, sine_square(origin_input))
         | 
| 324 321 | 
             
                        elif activation_list[i] == 'logarithmic':
         | 
| 325 | 
            -
                            Input =  | 
| 322 | 
            +
                            Input = safe_add(Input, logarithmic(origin_input))
         | 
| 326 323 | 
             
                        elif activation_list[i] == 'scaled_cubic':
         | 
| 327 | 
            -
                            Input =  | 
| 324 | 
            +
                            Input = safe_add(Input, scaled_cubic(origin_input, 1.0))
         | 
| 328 325 | 
             
                        elif activation_list[i] == 'sine_offset':
         | 
| 329 | 
            -
                            Input =  | 
| 326 | 
            +
                            Input = safe_add(Input, sine_offset(origin_input, 1.0))
         | 
| 330 327 | 
             
                        elif activation_list[i] == 'spiral':
         | 
| 331 | 
            -
                            Input =  | 
| 328 | 
            +
                            Input = safe_add(Input, spiral_activation(origin_input))
         | 
| 332 329 | 
             
                        elif activation_list[i] == 'circular':
         | 
| 333 | 
            -
                            Input =  | 
| 330 | 
            +
                            Input = safe_add(Input, circular_activation(origin_input))
         | 
| 334 331 |  | 
| 335 332 |  | 
| 336 333 | 
             
                    except Exception as e:
         | 
| @@ -219,13 +219,12 @@ def sine_offset(x, beta=0.0): | |
| 219 219 |  | 
| 220 220 |  | 
| 221 221 |  | 
| 222 | 
            -
            def  | 
| 222 | 
            +
            def safe_add(current_sum, new_value):
         | 
| 223 223 | 
             
                    try:
         | 
| 224 224 | 
             
                        return current_sum + new_value
         | 
| 225 225 | 
             
                    except OverflowError:
         | 
| 226 226 | 
             
                        return cp.array(current_sum) + cp.array(new_value)
         | 
| 227 227 |  | 
| 228 | 
            -
             | 
| 229 228 | 
             
            def apply_activation(Input, activation_list):
         | 
| 230 229 | 
             
                """
         | 
| 231 230 | 
             
                    Applies a sequence of activation functions to the input.
         | 
| @@ -243,93 +242,93 @@ def apply_activation(Input, activation_list): | |
| 243 242 | 
             
                for i in range(len(activation_list)):
         | 
| 244 243 | 
             
                    try:
         | 
| 245 244 | 
             
                        if activation_list[i] == 'sigmoid':
         | 
| 246 | 
            -
                            Input =  | 
| 245 | 
            +
                            Input = safe_add(Input, Sigmoid(origin_input))
         | 
| 247 246 | 
             
                        elif activation_list[i] == 'swish':
         | 
| 248 | 
            -
                            Input =  | 
| 247 | 
            +
                            Input = safe_add(Input, swish(origin_input))
         | 
| 249 248 | 
             
                        elif activation_list[i] == 'mod_circular':
         | 
| 250 | 
            -
                            Input =  | 
| 249 | 
            +
                            Input = safe_add(Input, modular_circular_activation(origin_input))
         | 
| 251 250 | 
             
                        elif activation_list[i] == 'tanh_circular':
         | 
| 252 | 
            -
                            Input =  | 
| 251 | 
            +
                            Input = safe_add(Input, tanh_circular_activation(origin_input))
         | 
| 253 252 | 
             
                        elif activation_list[i] == 'leaky_relu':
         | 
| 254 | 
            -
                            Input =  | 
| 253 | 
            +
                            Input = safe_add(Input, leaky_relu(origin_input))
         | 
| 255 254 | 
             
                        elif activation_list[i] == 'relu':
         | 
| 256 | 
            -
                            Input =  | 
| 255 | 
            +
                            Input = safe_add(Input, Relu(origin_input))
         | 
| 257 256 | 
             
                        elif activation_list[i] == 'softplus':
         | 
| 258 | 
            -
                            Input =  | 
| 257 | 
            +
                            Input = safe_add(Input, softplus(origin_input))
         | 
| 259 258 | 
             
                        elif activation_list[i] == 'elu':
         | 
| 260 | 
            -
                            Input =  | 
| 259 | 
            +
                            Input = safe_add(Input, elu(origin_input))
         | 
| 261 260 | 
             
                        elif activation_list[i] == 'gelu':
         | 
| 262 | 
            -
                            Input =  | 
| 261 | 
            +
                            Input = safe_add(Input, gelu(origin_input))
         | 
| 263 262 | 
             
                        elif activation_list[i] == 'selu':
         | 
| 264 | 
            -
                            Input =  | 
| 263 | 
            +
                            Input = safe_add(Input, selu(origin_input))
         | 
| 265 264 | 
             
                        elif activation_list[i] == 'tanh':
         | 
| 266 | 
            -
                            Input =  | 
| 265 | 
            +
                            Input = safe_add(Input, tanh(origin_input))
         | 
| 267 266 | 
             
                        elif activation_list[i] == 'sinakt':
         | 
| 268 | 
            -
                            Input =  | 
| 267 | 
            +
                            Input = safe_add(Input, sinakt(origin_input))
         | 
| 269 268 | 
             
                        elif activation_list[i] == 'p_squared':
         | 
| 270 | 
            -
                            Input =  | 
| 269 | 
            +
                            Input = safe_add(Input, p_squared(origin_input))
         | 
| 271 270 | 
             
                        elif activation_list[i] == 'sglu':
         | 
| 272 | 
            -
                            Input =  | 
| 271 | 
            +
                            Input = safe_add(Input, sglu(origin_input, alpha=1.0))
         | 
| 273 272 | 
             
                        elif activation_list[i] == 'dlrelu':
         | 
| 274 | 
            -
                            Input =  | 
| 273 | 
            +
                            Input = safe_add(Input, dlrelu(origin_input))
         | 
| 275 274 | 
             
                        elif activation_list[i] == 'exsig':
         | 
| 276 | 
            -
                            Input =  | 
| 275 | 
            +
                            Input = safe_add(Input, exsig(origin_input))
         | 
| 277 276 | 
             
                        elif activation_list[i] == 'sin_plus':
         | 
| 278 | 
            -
                            Input =  | 
| 277 | 
            +
                            Input = safe_add(Input, sin_plus(origin_input))
         | 
| 279 278 | 
             
                        elif activation_list[i] == 'acos':
         | 
| 280 | 
            -
                            Input =  | 
| 279 | 
            +
                            Input = safe_add(Input, acos(origin_input, alpha=1.0, beta=0.0))
         | 
| 281 280 | 
             
                        elif activation_list[i] == 'gla':
         | 
| 282 | 
            -
                            Input =  | 
| 281 | 
            +
                            Input = safe_add(Input, gla(origin_input, alpha=1.0, mu=0.0))
         | 
| 283 282 | 
             
                        elif activation_list[i] == 'srelu':
         | 
| 284 | 
            -
                            Input =  | 
| 283 | 
            +
                            Input = safe_add(Input, srelu(origin_input))
         | 
| 285 284 | 
             
                        elif activation_list[i] == 'qelu':
         | 
| 286 | 
            -
                            Input =  | 
| 285 | 
            +
                            Input = safe_add(Input, qelu(origin_input))
         | 
| 287 286 | 
             
                        elif activation_list[i] == 'isra':
         | 
| 288 | 
            -
                            Input =  | 
| 287 | 
            +
                            Input = safe_add(Input, isra(origin_input))
         | 
| 289 288 | 
             
                        elif activation_list[i] == 'waveakt':
         | 
| 290 | 
            -
                            Input =  | 
| 289 | 
            +
                            Input = safe_add(Input, waveakt(origin_input))
         | 
| 291 290 | 
             
                        elif activation_list[i] == 'arctan':
         | 
| 292 | 
            -
                            Input =  | 
| 291 | 
            +
                            Input = safe_add(Input, arctan(origin_input))
         | 
| 293 292 | 
             
                        elif activation_list[i] == 'bent_identity':
         | 
| 294 | 
            -
                            Input =  | 
| 293 | 
            +
                            Input = safe_add(Input, bent_identity(origin_input))
         | 
| 295 294 | 
             
                        elif activation_list[i] == 'sech':
         | 
| 296 | 
            -
                            Input =  | 
| 295 | 
            +
                            Input = safe_add(Input, sech(origin_input))
         | 
| 297 296 | 
             
                        elif activation_list[i] == 'softsign':
         | 
| 298 | 
            -
                            Input =  | 
| 297 | 
            +
                            Input = safe_add(Input, softsign(origin_input))
         | 
| 299 298 | 
             
                        elif activation_list[i] == 'pwl':
         | 
| 300 | 
            -
                            Input =  | 
| 299 | 
            +
                            Input = safe_add(Input, pwl(origin_input))
         | 
| 301 300 | 
             
                        elif activation_list[i] == 'cubic':
         | 
| 302 | 
            -
                            Input =  | 
| 301 | 
            +
                            Input = safe_add(Input, cubic(origin_input))
         | 
| 303 302 | 
             
                        elif activation_list[i] == 'gaussian':
         | 
| 304 | 
            -
                            Input =  | 
| 303 | 
            +
                            Input = safe_add(Input, gaussian(origin_input))
         | 
| 305 304 | 
             
                        elif activation_list[i] == 'sine':
         | 
| 306 | 
            -
                            Input =  | 
| 305 | 
            +
                            Input = safe_add(Input, sine(origin_input))
         | 
| 307 306 | 
             
                        elif activation_list[i] == 'tanh_square':
         | 
| 308 | 
            -
                            Input =  | 
| 307 | 
            +
                            Input = safe_add(Input, tanh_square(origin_input))
         | 
| 309 308 | 
             
                        elif activation_list[i] == 'mod_sigmoid':
         | 
| 310 | 
            -
                            Input =  | 
| 309 | 
            +
                            Input = safe_add(Input, mod_sigmoid(origin_input))
         | 
| 311 310 | 
             
                        elif activation_list[i] == 'linear':
         | 
| 312 | 
            -
                            Input =  | 
| 311 | 
            +
                            Input = safe_add(Input, origin_input)
         | 
| 313 312 | 
             
                        elif activation_list[i] == 'quartic':
         | 
| 314 | 
            -
                            Input =  | 
| 313 | 
            +
                            Input = safe_add(Input, quartic(origin_input))
         | 
| 315 314 | 
             
                        elif activation_list[i] == 'square_quartic':
         | 
| 316 | 
            -
                            Input =  | 
| 315 | 
            +
                            Input = safe_add(Input, square_quartic(origin_input))
         | 
| 317 316 | 
             
                        elif activation_list[i] == 'cubic_quadratic':
         | 
| 318 | 
            -
                            Input =  | 
| 317 | 
            +
                            Input = safe_add(Input, cubic_quadratic(origin_input))
         | 
| 319 318 | 
             
                        elif activation_list[i] == 'exp_cubic':
         | 
| 320 | 
            -
                            Input =  | 
| 319 | 
            +
                            Input = safe_add(Input, exp_cubic(origin_input))
         | 
| 321 320 | 
             
                        elif activation_list[i] == 'sine_square':
         | 
| 322 | 
            -
                            Input =  | 
| 321 | 
            +
                            Input = safe_add(Input, sine_square(origin_input))
         | 
| 323 322 | 
             
                        elif activation_list[i] == 'logarithmic':
         | 
| 324 | 
            -
                            Input =  | 
| 323 | 
            +
                            Input = safe_add(Input, logarithmic(origin_input))
         | 
| 325 324 | 
             
                        elif activation_list[i] == 'scaled_cubic':
         | 
| 326 | 
            -
                            Input =  | 
| 325 | 
            +
                            Input = safe_add(Input, scaled_cubic(origin_input, 1.0))
         | 
| 327 326 | 
             
                        elif activation_list[i] == 'sine_offset':
         | 
| 328 | 
            -
                            Input =  | 
| 327 | 
            +
                            Input = safe_add(Input, sine_offset(origin_input, 1.0))
         | 
| 329 328 | 
             
                        elif activation_list[i] == 'spiral':
         | 
| 330 | 
            -
                            Input =  | 
| 329 | 
            +
                            Input = safe_add(Input, spiral_activation(origin_input))
         | 
| 331 330 | 
             
                        elif activation_list[i] == 'circular':
         | 
| 332 | 
            -
                            Input =  | 
| 331 | 
            +
                            Input = safe_add(Input, circular_activation(origin_input))
         | 
| 333 332 |  | 
| 334 333 | 
             
                    except Exception as e:
         | 
| 335 334 | 
             
                        warnings.warn(f"Error in activation {activation_list[i]}: {str(e)}", RuntimeWarning)
         | 
| @@ -76,7 +76,7 @@ def decode_one_hot(encoded_data): | |
| 76 76 | 
             
                return decoded_labels
         | 
| 77 77 |  | 
| 78 78 |  | 
| 79 | 
            -
            def split(X, y, test_size, random_state, dtype=np.float32):
         | 
| 79 | 
            +
            def split(X, y, test_size, random_state=42, dtype=np.float32):
         | 
| 80 80 | 
             
                """
         | 
| 81 81 | 
             
                Splits the given X (features) and y (labels) data into training and testing subsets.
         | 
| 82 82 |  | 
| @@ -84,7 +84,7 @@ def split(X, y, test_size, random_state, dtype=np.float32): | |
| 84 84 | 
             
                    X (numpy.ndarray): Features data.
         | 
| 85 85 | 
             
                    y (numpy.ndarray): Labels data.
         | 
| 86 86 | 
             
                    test_size (float or int): Proportion or number of samples for the test subset.
         | 
| 87 | 
            -
                    random_state (int or None): Seed for random state.
         | 
| 87 | 
            +
                    random_state (int or None): Seed for random state. Default: 42.
         | 
| 88 88 | 
             
                    dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!]
         | 
| 89 89 |  | 
| 90 90 | 
             
                Returns:
         | 
| @@ -87,15 +87,10 @@ def split(X, y, test_size, random_state=42, dtype=cp.float32, use_cpu=False): | |
| 87 87 |  | 
| 88 88 | 
             
                Args:
         | 
| 89 89 | 
             
                    X (cupy.ndarray): Features data.
         | 
| 90 | 
            -
                    
         | 
| 91 90 | 
             
                    y (cupy.ndarray): Labels data.
         | 
| 92 | 
            -
                    
         | 
| 93 91 | 
             
                    test_size (float or int): Proportion or number of samples for the test subset.
         | 
| 94 | 
            -
                    
         | 
| 95 92 | 
             
                    random_state (int or None): Seed for random state. Default: 42.
         | 
| 96 | 
            -
                    
         | 
| 97 93 | 
             
                    dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
         | 
| 98 | 
            -
                    
         | 
| 99 94 | 
             
                    use_cpu (bool): If True, output will be same cpu's split function. Default: False.
         | 
| 100 95 | 
             
                Returns:
         | 
| 101 96 | 
             
                    tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
         | 
| @@ -255,7 +250,6 @@ def auto_balancer(x_train, y_train, dtype=cp.float32, use_cpu=False): | |
| 255 250 | 
             
               dtype (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
         | 
| 256 251 |  | 
| 257 252 | 
             
               use_cpu (bool): If True, output will be same cpu's auto_balancer function. Default: False.
         | 
| 258 | 
            -
               
         | 
| 259 253 | 
             
               Returns:
         | 
| 260 254 | 
             
               tuple: A tuple containing balanced input data and labels.
         | 
| 261 255 | 
             
                """
         | 
| @@ -399,7 +393,6 @@ def synthetic_augmentation(x_train, y_train, dtype=cp.float32, use_cpu=False): | |
| 399 393 |  | 
| 400 394 | 
             
                return x_balanced, y_balanced
         | 
| 401 395 |  | 
| 402 | 
            -
             | 
| 403 396 | 
             
            def standard_scaler(x_train=None, x_test=None, scaler_params=None, dtype=cp.float32):
         | 
| 404 397 | 
             
                """
         | 
| 405 398 | 
             
                Standardizes training and test datasets. x_test may be None.
         | 
    
        pyerualjetwork/help.py
    CHANGED
    
    | @@ -10,7 +10,7 @@ def activation_potentiation(): | |
| 10 10 |  | 
| 11 11 | 
             
            def docs_and_examples():
         | 
| 12 12 |  | 
| 13 | 
            -
                print('PLAN document: https://github.com/HCB06/ | 
| 14 | 
            -
                print('PLAN examples: https://github.com/HCB06/ | 
| 15 | 
            -
                print('PLANEAT examples: https://github.com/HCB06/ | 
| 16 | 
            -
                print(' | 
| 13 | 
            +
                print('PLAN document: https://github.com/HCB06/Anaplan/tree/main/Welcome_to_PLAN\n')
         | 
| 14 | 
            +
                print('PLAN examples: https://github.com/HCB06/Anaplan/tree/main/Welcome_to_Anaplan/ExampleCodes\n')
         | 
| 15 | 
            +
                print('PLANEAT examples: https://github.com/HCB06/Anaplan/tree/main/Welcome_to_Anaplan/ExampleCodes/PLANEAT\n')
         | 
| 16 | 
            +
                print('Anaplan document and examples: https://github.com/HCB06/Anaplan/tree/main/Welcome_to_Anaplan')
         | 
    
        pyerualjetwork/metrics_cuda.py
    CHANGED
    
    | @@ -1,7 +1,7 @@ | |
| 1 1 | 
             
            import cupy as cp
         | 
| 2 | 
            -
            from .data_operations_cuda import decode_one_hot
         | 
| 3 2 |  | 
| 4 3 | 
             
            def metrics(y_ts, test_preds, average='weighted'):
         | 
| 4 | 
            +
                from .data_operations import decode_one_hot
         | 
| 5 5 | 
             
                y_test_d = cp.array(decode_one_hot(y_ts))
         | 
| 6 6 | 
             
                y_pred = cp.array(test_preds)
         | 
| 7 7 |  | 
| @@ -50,7 +50,6 @@ def metrics(y_ts, test_preds, average='weighted'): | |
| 50 50 | 
             
                return precision_val.item(), recall_val.item(), f1_val.item()
         | 
| 51 51 |  | 
| 52 52 |  | 
| 53 | 
            -
             | 
| 54 53 | 
             
            def roc_curve(y_true, y_score):
         | 
| 55 54 | 
             
                """
         | 
| 56 55 | 
             
                Compute Receiver Operating Characteristic (ROC) curve.
         | 
    
        pyerualjetwork/plan.py
    CHANGED
    
    | @@ -125,15 +125,7 @@ def fit( | |
| 125 125 |  | 
| 126 126 | 
             
                elif val and (x_val is not None and y_val is not None):
         | 
| 127 127 | 
             
                    x_val = x_val.astype(dtype, copy=False)
         | 
| 128 | 
            -
                     | 
| 129 | 
            -
                        if y_val.dtype != np.uint8:
         | 
| 130 | 
            -
                            y_val = np.array(y_val, copy=False).astype(np.uint8, copy=False)
         | 
| 131 | 
            -
                    elif len(y_val[0]) <= 32767:
         | 
| 132 | 
            -
                        if y_val.dtype != np.uint16:
         | 
| 133 | 
            -
                            y_val = np.array(y_val, copy=False).astype(np.uint16, copy=False)
         | 
| 134 | 
            -
                    else:
         | 
| 135 | 
            -
                        if y_val.dtype != np.uint32:
         | 
| 136 | 
            -
                            y_val = np.array(y_val, copy=False).astype(np.uint32, copy=False)
         | 
| 128 | 
            +
                    y_val = y_val.astype(dtype, copy=False)
         | 
| 137 129 |  | 
| 138 130 | 
             
                val_list = [] if val else None
         | 
| 139 131 | 
             
                val_count = val_count or 10
         | 
| @@ -145,7 +137,7 @@ def fit( | |
| 145 137 |  | 
| 146 138 | 
             
                # Training process
         | 
| 147 139 | 
             
                for index, inp in enumerate(x_train):
         | 
| 148 | 
            -
                    inp = np.array(inp, copy=False).ravel()
         | 
| 140 | 
            +
                    inp = np.array(inp, copy=False, dtype=dtype).ravel()
         | 
| 149 141 | 
             
                    y_decoded = decode_one_hot(y_train)
         | 
| 150 142 | 
             
                    # Weight updates
         | 
| 151 143 | 
             
                    STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded[index], activation_potentiation=activation_potentiation, LTD=LTD)
         | 
| @@ -231,7 +223,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat | |
| 231 223 | 
             
                    tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
         | 
| 232 224 |  | 
| 233 225 | 
             
                """
         | 
| 234 | 
            -
                print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular' | 
| 226 | 
            +
                print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
         | 
| 235 227 |  | 
| 236 228 | 
             
                activation_potentiation = all_activations()
         | 
| 237 229 |  | 
    
        pyerualjetwork/plan_cuda.py
    CHANGED
    
    | @@ -120,22 +120,9 @@ def fit( | |
| 120 120 | 
             
                if len(x_train) != len(y_train):
         | 
| 121 121 | 
             
                    raise ValueError("x_train and y_train must have the same length.")
         | 
| 122 122 |  | 
| 123 | 
            -
                if val and (x_val is None  | 
| 123 | 
            +
                if val and (x_val is None or y_val is None):
         | 
| 124 124 | 
             
                    x_val, y_val = x_train, y_train
         | 
| 125 125 |  | 
| 126 | 
            -
                elif val and (x_val is not None and y_val is not None):
         | 
| 127 | 
            -
                    x_val = cp.array(x_val, copy=False).astype(dtype, copy=False)
         | 
| 128 | 
            -
             | 
| 129 | 
            -
                    if len(y_val[0]) < 256:
         | 
| 130 | 
            -
                        if y_val.dtype != cp.uint8:
         | 
| 131 | 
            -
                            y_val = cp.array(y_val, copy=False).astype(cp.uint8, copy=False)
         | 
| 132 | 
            -
                    elif len(y_val[0]) <= 32767:
         | 
| 133 | 
            -
                        if y_val.dtype != cp.uint16:
         | 
| 134 | 
            -
                            y_val = cp.array(y_val, copy=False).astype(cp.uint16, copy=False)
         | 
| 135 | 
            -
                    else:
         | 
| 136 | 
            -
                        if y_val.dtype != cp.uint32:
         | 
| 137 | 
            -
                            y_val = cp.array(y_val, copy=False).astype(cp.uint32, copy=False)
         | 
| 138 | 
            -
             | 
| 139 126 | 
             
                val_list = [] if val else None
         | 
| 140 127 | 
             
                val_count = val_count or 10
         | 
| 141 128 | 
             
                # Defining weights
         | 
| @@ -232,7 +219,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat | |
| 232 219 | 
             
                    tuple: A list for model parameters: [Weight matrix, Preds, Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
         | 
| 233 220 |  | 
| 234 221 | 
             
                """
         | 
| 235 | 
            -
                print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular' | 
| 222 | 
            +
                print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
         | 
| 236 223 |  | 
| 237 224 | 
             
                activation_potentiation = all_activations()
         | 
| 238 225 |  | 
| @@ -297,9 +284,9 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat | |
| 297 284 |  | 
| 298 285 | 
             
                # Initialize progress bar
         | 
| 299 286 | 
             
                if batch_size == 1:
         | 
| 300 | 
            -
                    ncols =  | 
| 287 | 
            +
                    ncols = 100
         | 
| 301 288 | 
             
                else:
         | 
| 302 | 
            -
                    ncols =  | 
| 289 | 
            +
                    ncols = 140
         | 
| 303 290 | 
             
                progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
         | 
| 304 291 |  | 
| 305 292 | 
             
                # Initialize variables
         | 
| @@ -525,8 +525,8 @@ def plot_decision_boundary(x, y, activation_potentiation, W, artist=None, ax=Non | |
| 525 525 |  | 
| 526 526 | 
             
            def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
         | 
| 527 527 |  | 
| 528 | 
            -
                from  | 
| 529 | 
            -
                from  | 
| 528 | 
            +
                from .metrics_cuda import pca
         | 
| 529 | 
            +
                from .data_operations_cuda import decode_one_hot
         | 
| 530 530 |  | 
| 531 531 | 
             
                if x.shape[1] > 2:
         | 
| 532 532 |  | 
| @@ -587,7 +587,7 @@ def neuron_history(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=Fa | |
| 587 587 |  | 
| 588 588 | 
             
                        title_info = f'{j+1}. Neuron'
         | 
| 589 589 |  | 
| 590 | 
            -
                        art5 = ax1[j].imshow(mat, interpolation='sinc', cmap='viridis')
         | 
| 590 | 
            +
                        art5 = ax1[j].imshow(mat.get(), interpolation='sinc', cmap='viridis')
         | 
| 591 591 |  | 
| 592 592 | 
             
                        ax1[j].set_aspect('equal')
         | 
| 593 593 | 
             
                        ax1[j].set_xticks([])
         | 
| @@ -604,7 +604,7 @@ def neuron_history(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=Fa | |
| 604 604 |  | 
| 605 605 | 
             
            def initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train):
         | 
| 606 606 | 
             
                """Initializes the visualization setup based on the parameters."""
         | 
| 607 | 
            -
                from data_operations import find_closest_factors
         | 
| 607 | 
            +
                from .data_operations import find_closest_factors
         | 
| 608 608 | 
             
                visualization_objects = {}
         | 
| 609 609 |  | 
| 610 610 | 
             
                if show_training:
         | 
| @@ -680,7 +680,7 @@ def display_visualization_for_fit(fig, artist_list, interval): | |
| 680 680 |  | 
| 681 681 | 
             
            def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
         | 
| 682 682 | 
             
                """Initialize all visualization components"""
         | 
| 683 | 
            -
                from data_operations import find_closest_factors
         | 
| 683 | 
            +
                from .data_operations import find_closest_factors
         | 
| 684 684 | 
             
                viz_objects = {}
         | 
| 685 685 |  | 
| 686 686 | 
             
                if show_history:
         | 
| @@ -1,6 +1,6 @@ | |
| 1 1 | 
             
            Metadata-Version: 2.1
         | 
| 2 2 | 
             
            Name: pyerualjetwork
         | 
| 3 | 
            -
            Version: 4.1. | 
| 3 | 
            +
            Version: 4.1.2
         | 
| 4 4 | 
             
            Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
         | 
| 5 5 | 
             
            Author: Hasan Can Beydili
         | 
| 6 6 | 
             
            Author-email: tchasancan@gmail.com
         | 
| @@ -0,0 +1,23 @@ | |
| 1 | 
            +
            pyerualjetwork/__init__.py,sha256=5oH9sQ9xOXUWYxBJt2h4ErHKFw63vREeOnKTdhSQGhk,2542
         | 
| 2 | 
            +
            pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
         | 
| 3 | 
            +
            pyerualjetwork/activation_functions_cuda.py,sha256=7U69VfwAIE8STUng2zEwPPQES9NgnkAXsDtVh-EzaZE,11803
         | 
| 4 | 
            +
            pyerualjetwork/data_operations.py,sha256=2julEScuHsL_ueeJ-JE3hiqw3wibZQW_L2bwwdoXTN0,16552
         | 
| 5 | 
            +
            pyerualjetwork/data_operations_cuda.py,sha256=uVGcLwhhePkZt2BnO9KrsIMq29CW5L_9ucyxN8Wnevw,18711
         | 
| 6 | 
            +
            pyerualjetwork/help.py,sha256=OZghUy7GZTgEX_i3NYtgcpzUgCDOi6r2vVUF1ROkFiI,774
         | 
| 7 | 
            +
            pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
         | 
| 8 | 
            +
            pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
         | 
| 9 | 
            +
            pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
         | 
| 10 | 
            +
            pyerualjetwork/metrics_cuda.py,sha256=Hz4PCeE5GcVUllZdsgXXdIw-UNqUVpqNxMIlPBNTSKY,5069
         | 
| 11 | 
            +
            pyerualjetwork/model_operations.py,sha256=eWYiYlXYZzsRgVfF-4CFvjCHaZOGB2378evre8yCzYk,13084
         | 
| 12 | 
            +
            pyerualjetwork/model_operations_cuda.py,sha256=Hryk2Qi6BwHY9K9G_muDxHW9ILK8dIW6lmwZfioKqYM,13246
         | 
| 13 | 
            +
            pyerualjetwork/plan.py,sha256=1PDMyBnCsQgyks4esnPobcUNBHbex54JG2oFEV_Q_9g,34336
         | 
| 14 | 
            +
            pyerualjetwork/plan_cuda.py,sha256=bpI4HVMexL5WiGU30Nj1mzp8f9sOyxuDw7Ka7LqQR7g,33958
         | 
| 15 | 
            +
            pyerualjetwork/planeat.py,sha256=6uEcCF4bV1_W1aQUTKQjfnDgWp6rP2oluKFo5Y37k7o,39517
         | 
| 16 | 
            +
            pyerualjetwork/planeat_cuda.py,sha256=GXYt_00rDKkDKJrhjE8hHOtu4U_pQZM1yZ6XrMpQo2c,39574
         | 
| 17 | 
            +
            pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
         | 
| 18 | 
            +
            pyerualjetwork/visualizations.py,sha256=DvbiQGlvlKNAgBJ3O3ukAi6uxSheha9SRFh5YX7ZxIA,26678
         | 
| 19 | 
            +
            pyerualjetwork/visualizations_cuda.py,sha256=hH2FMjbsImAxTLIAUS2pfGSufigV-SbgpVMVrj4lYOE,26733
         | 
| 20 | 
            +
            pyerualjetwork-4.1.2.dist-info/METADATA,sha256=vSlo45lfiRI3HHq3z4aUrX1LZXG83t14nLM-w8jqbSU,6357
         | 
| 21 | 
            +
            pyerualjetwork-4.1.2.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
         | 
| 22 | 
            +
            pyerualjetwork-4.1.2.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
         | 
| 23 | 
            +
            pyerualjetwork-4.1.2.dist-info/RECORD,,
         | 
| @@ -1,23 +0,0 @@ | |
| 1 | 
            -
            pyerualjetwork/__init__.py,sha256=-sXxmc1Kgkyw7AHtIrteMR-wA4vafdHZ5Ds7TFRbt3c,2542
         | 
| 2 | 
            -
            pyerualjetwork/activation_functions.py,sha256=UeuuagJWcSoFfmwikDU7O8ph--oySnWDJNqKbEh4SlE,12043
         | 
| 3 | 
            -
            pyerualjetwork/activation_functions_cuda.py,sha256=6pdCthpiGmnMwRfuduGsvwN8cNv3UKNUPOjscxyAWqE,12075
         | 
| 4 | 
            -
            pyerualjetwork/data_operations.py,sha256=rnOYLLK3YnRdWpEsEQABU0RE950lQQI7971eBLBpqOQ,16536
         | 
| 5 | 
            -
            pyerualjetwork/data_operations_cuda.py,sha256=FBcZloHAyzuJnF2L1yJY1PS-3VJ8zlq5pgfxr8z0oKc,18768
         | 
| 6 | 
            -
            pyerualjetwork/help.py,sha256=pZs7hIhgFkovGLle97d9Qu9m5zKhMh7-OAIphIoSxBg,830
         | 
| 7 | 
            -
            pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
         | 
| 8 | 
            -
            pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
         | 
| 9 | 
            -
            pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
         | 
| 10 | 
            -
            pyerualjetwork/metrics_cuda.py,sha256=TCwn5Z_4jQjqPCURX_xtcz9cjsYVzlahgKDA-qCgpU4,5072
         | 
| 11 | 
            -
            pyerualjetwork/model_operations.py,sha256=k_53BJladPm9fBWdlVpS6Uf5IQzpNlJWLH746DXGq_M,13036
         | 
| 12 | 
            -
            pyerualjetwork/model_operations_cuda.py,sha256=-_klhwLo3z3bLIm5LXjgXGW657R203ty-Po-5wDuJBM,13244
         | 
| 13 | 
            -
            pyerualjetwork/plan.py,sha256=-3v0PNNlxL1gx8CTMdvD7HgtJSZt8lEPh7hceIILdDk,34743
         | 
| 14 | 
            -
            pyerualjetwork/plan_cuda.py,sha256=0Q7xF657xGd03W3Kjqev8Ig1IrolI_ZAlOJuuK0NugI,34562
         | 
| 15 | 
            -
            pyerualjetwork/planeat.py,sha256=6uEcCF4bV1_W1aQUTKQjfnDgWp6rP2oluKFo5Y37k7o,39517
         | 
| 16 | 
            -
            pyerualjetwork/planeat_cuda.py,sha256=GXYt_00rDKkDKJrhjE8hHOtu4U_pQZM1yZ6XrMpQo2c,39574
         | 
| 17 | 
            -
            pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
         | 
| 18 | 
            -
            pyerualjetwork/visualizations.py,sha256=DvbiQGlvlKNAgBJ3O3ukAi6uxSheha9SRFh5YX7ZxIA,26678
         | 
| 19 | 
            -
            pyerualjetwork/visualizations_cuda.py,sha256=dA0u85ZIyKqjtoSJ6p3EbEpJs4V4vS5W5ftR6eif8yg,26713
         | 
| 20 | 
            -
            pyerualjetwork-4.1.1.dist-info/METADATA,sha256=K05q0B1Ltsa7LpwTN0yJkFA3HBAimFvfX3vzLMRo0rA,6357
         | 
| 21 | 
            -
            pyerualjetwork-4.1.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
         | 
| 22 | 
            -
            pyerualjetwork-4.1.1.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
         | 
| 23 | 
            -
            pyerualjetwork-4.1.1.dist-info/RECORD,,
         | 
| 
            File without changes
         | 
| 
            File without changes
         |