Post
Topic
Board Development & Technical Discussion
Re: Is there a way to use Python GPU for ECC speed up?
by
Mikorist
on 11/12/2022, 06:15:33 UTC
I have no idea how secp256k1 as ice & fastecdsa  optimized for GPU jit, (maybe not at all) but we can test this way...Without experimentation there is no progress.

You have to install the CUDA Toolkit for this & numba on Linux....
Code:
conda install numba & conda install cudatoolkit
or
Code:
pip3 install numba numpy fastecdsa
(etc...)

Code:
from numba import jit
import numpy as np
from timeit import default_timer as timer
from fastecdsa import keys, curve
import secp256k1 as ice
# Run on CPU
def cpu(a):
           dec   = keys.gen_private_key(curve.P256)
           HEX   = "%064x" % dec
           wifc  = ice.btc_pvk_to_wif(HEX)
           wifu  = ice.btc_pvk_to_wif(HEX, False)
           uaddr = ice.privatekey_to_address(0, False, dec)
           caddr = ice.privatekey_to_address(0, True, dec)
           print(wifu, uaddr)
# Run on GPU
@jit
def gpu(x):
           dec   = keys.gen_private_key(curve.P256)
           HEX   = "%064x" % dec
           wifc  = ice.btc_pvk_to_wif(HEX)
           wifu  = ice.btc_pvk_to_wif(HEX, False)
           uaddr = ice.privatekey_to_address(0, False, dec)
           caddr = ice.privatekey_to_address(0, True, dec)
           print(wifu, uaddr)
if __name__=="__main__":
    n = 30000000
    a = np.ones(n, dtype = np.float64)
    start = timer()
    cpu(a)
    print("without GPU:", timer()-start)
    start = timer()
    gpu(a)
    numba.cuda.profile_stop()
    print("with GPU:", timer()-start)