GPU
In [1]:
import mxnet as mx
from mxnet import nd
from mxnet.gluon import nn
In [2]:
mx.cpu(),mx.gpu(),mx.gpu(1)
Out[2]:
In [3]:
x = nd.array([1,2,3])
x
Out[3]:
In [4]:
x.context
Out[4]:
In [5]:
!nvidia-smi
In [6]:
a = nd.array([1,2,3],ctx=mx.gpu())
In [7]:
a
Out[7]:
In [8]:
b = nd.random.uniform(shape=(2,3),ctx=mx.gpu(1))
b
Out[8]:
In [9]:
a.context
Out[9]:
In [10]:
b.context
Out[10]:
In [11]:
y = x.copyto(mx.gpu())
y
Out[11]:
In [12]:
z = x.as_in_context(mx.gpu())
z
Out[12]:
In [13]:
y.as_in_context(mx.gpu()) is y
Out[13]:
In [14]:
y.copyto(mx.gpu()) is y
Out[14]:
In [15]:
z
Out[15]:
In [16]:
y
Out[16]:
In [17]:
(z + 2).exp()
Out[17]:
In [18]:
(z+2).exp()*y
Out[18]:
gluon的GPU计算
In [19]:
net = nn.Sequential()
In [20]:
net.add(nn.Dense(1))
In [21]:
net.initialize(ctx=mx.gpu())
In [22]:
net(y)
Out[22]:
In [23]:
net[0].weight.data()
Out[23]:
In [ ]: