多项式拟合
In [1]:
%matplotlib inline
import gluonbook as gb
from mxnet import nd,autograd, gluon
from mxnet.gluon import data as gdata,loss as gloss,nn
生成数据集 $$y=1.2x - 3.4x^2 + 5.6x^3 + 5 + alpha$$
In [2]:
n_train = 100
n_test = 100
true_w = [1.2,-3.4,5.6]
true_b = 5
In [10]:
features = nd.random.normal(shape=(n_train+n_test,1))
features.shape
Out[10]:
In [11]:
poly_features = nd.concat(features, nd.power(features, 2),nd.power(features, 3))
poly_features.shape
Out[11]:
In [12]:
labels = (true_w[0]*poly_features[:,0]+true_w[1]*poly_features[:,1]+true_w[2]*poly_features[:,2]+true_b)
labels += nd.random.normal(scale=0.1,shape=labels.shape)
In [13]:
features[:2], poly_features[:2], labels[:2]
Out[13]:
定义,训练,测试模型
平方损失函数
In [14]:
num_epochs = 100
loss = gloss.L2Loss()
In [17]:
def fit_and_plot(train_features,test_features,train_labels,test_labels):
net = nn.Sequential()
net.add(nn.Dense(1))
net.initialize()
batch_size = min(10,train_labels.shape[0])
train_iter = gdata.DataLoader(gdata.ArrayDataset(
train_features,train_labels),batch_size,shuffle=True)
trainer = gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':0.01})
train_ls = []
test_ls = []
for _ in range(num_epochs):
for X, y in train_iter:
with autograd.record():
l = loss(net(X),y)
l.backward()
trainer.step(batch_size)
train_ls.append(loss(net(train_features),
train_labels).mean().asscalar())
test_ls.append(loss(net(test_features),
test_labels).mean().asscalar())
print('final epoch: train loss', train_ls[-1], 'test loss', test_ls[-1])
print('weight:',net[0].weight.data().asnumpy())
print('bias:',net[0].bias.data().asnumpy())
In [18]:
fit_and_plot(poly_features[:n_train, :], poly_features[n_train:, :],
labels[:n_train], labels[n_train:])
In [19]:
poly_features.shape
Out[19]:
欠拟合
In [21]:
features.shape
Out[21]:
In [20]:
fit_and_plot(features[:n_train, :], features[n_train:, :], labels[:n_train],
labels[n_train:])
过拟合
In [22]:
fit_and_plot(poly_features[0:2, :], poly_features[n_train:, :], labels[0:2],
labels[n_train:])