are you sure this will run on GPU? I quickly tested it and I see only 1 CPU thread being used at 100% for the program. There is absolutely no GPU utilization, GPU = 0%
Sure.
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
import numpy as np
import numba
from numba import cuda, jit
from timeit import default_timer as timer
# Run on CPU
def cpu(a):
for i in range(10000000):
a[i]+= 1
# Run on GPU
numba.jit()
def gpu(x):
return x+1
if __name__=="__main__":
n = 10000000
a = np.ones(n, dtype = np.float64)
start = timer()
cpu(a)
print("without GPU:", timer()-start)
start = timer()
gpu(a)
numba.cuda.profile_stop()
print("with GPU:", timer()-start)
There is no theory that my CPU can go 10 milon operations for 0.08 seconds
without GPU: 3.9892993430003116 seconds
with GPU: 0.08131437800147978 seconds