zoukankan      html  css  js  c++  java
  • 自编码器手写 Learner

    import torch
    from torch import nn,optim
    from torch.autograd import Variable
    from torchvision import transforms,datasets
    from torch.utils.data import  DataLoader
    from torchvision.utils  import save_image
    import torchvision
    import scipy
    import os
    import matplotlib.pyplot as plt

    2

    # 加载数据集
    def get_data(batch_size):
        # 将像素点转换到[-1, 1]之间,使得输入变成一个比较对称的分布,训练容易收敛
        data_tf = transforms.Compose([transforms.ToTensor(), transforms.Normalize(0,1)])
        train_dataset = datasets.MNIST(root='./data', train=True, transform=data_tf, download=True)
        train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, drop_last=True)
        return train_loader

    3

    train_loader = get_data(2)
    print(len(train_loader))
    for i, data in enumerate(train_loader):
        # f输出两张图片
        if i>0:
            break
        inputs, labels = data
        for j in range(len(inputs)):
            print(inputs[j].shape)
            decode_img = inputs[j].squeeze()  #去掉1
            print(decode_img.shape)
            decode_img = decode_img.data.cpu().numpy() * 255
            plt.imshow(decode_img.astype('uint8'), cmap='gray')
            plt.show()

    4

    def to_img(x):
        x = (x + 1.) * 0.5
        x = x.clamp(0, 1)
        x = x.view(x.size(0), 1, 28, 28)
        return x

    5

    class autoencoder(nn.Module):
        def __init__(self):
            super(autoencoder, self).__init__()
            self.encoder = nn.Sequential(nn.Linear(28*28, 128),
                                         nn.ReLU(True),
                                         nn.Linear(128, 64),
                                         nn.ReLU(True),
                                         nn.Linear(64, 12),
                                         nn.ReLU(True),
                                         nn.Linear(12, 3))
            self.decoder = nn.Sequential(nn.Linear(3, 12),
                                         nn.ReLU(True),
                                         nn.Linear(12, 64),
                                         nn.ReLU(True),
                                         nn.Linear(64, 128),
                                         nn.ReLU(True),
                                         nn.Linear(128, 28*28),
                                         nn.Tanh())
        def forward(self, x):
            encode = self.encoder(x)
            decode = self.decoder(encode)
            return encode, decode
    autoencoder()

    5

    def main():
        # 超参数设置
        batch_size = 128
        lr = 1e-2
        weight_decay = 1e-5
        epoches = 1
        model = autoencoder()
        train_data = get_data( batch_size)
        criterion = nn.MSELoss()
        optimizier = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
        if torch.cuda.is_available():
            model.cuda()
        for epoch in range(epoches):
            if epoch in [epoches * 0.25, epoches * 0.5]:
                for param_group in optimizier.param_groups:
                    param_group['lr'] *= 0.1
            for img, _ in train_data:
    #             print("img.size(0) = ",img.size(0))
                img = img.view(img.size(0), -1)   #转成batch_size * 784
                img = Variable(img.cuda()) #转到cuda下运行
    #             print(" model(img) = ", model(img))
    #           model 返回的是encoder, decoder ,这里只去decoder
                _, output = model(img)
                loss = criterion(output, img)
                # backward
                optimizier.zero_grad()
                loss.backward()
                optimizier.step()
            print("epoch=", epoch, loss.data.float())
            for param_group in optimizier.param_groups:
                print(param_group['lr'])
            if (epoch+1) % 5 == 0:
                print("epoch: {}, loss is {}".format((epoch+1), loss.data))
                pic = to_img(output.cpu().data)
                if not os.path.exists('./simple_autoencoder'):
                    os.mkdir('./simple_autoencoder')
                save_image(pic, './simple_autoencoder/image_{}.png'.format(epoch + 1))
        code = Variable(torch.FloatTensor([[1.19, -3.36, 2.06]]).cuda())
        decode = model.decoder(code)
        decode_img = to_img(decode).squeeze()
        decode_img = decode_img.data.cpu().numpy() * 255
        plt.imshow(decode_img.astype('uint8'), cmap='gray')
        plt.show()

    6

    main()

    7 demo

    import torch
    import numpy as np
    loss_fn = torch.nn.MSELoss(reduce=False, size_average=False)
    a=np.array([[1,2],[3,4]])
    b=np.array([[2,3],[4,5]])
    input = torch.autograd.Variable(torch.from_numpy(a))
    target = torch.autograd.Variable(torch.from_numpy(b))
    loss = loss_fn(input.float(), target.float())
    print(loss)

     

  • 相关阅读:
    盘点三个网络赚零花钱的小项目,傻瓜式操作
    如何运营一个女性社区?
    女性社区TOP10
    微商怎么做月入过万?新手必看
    电脑设置 账号改名,远程无法复制
    sql server 安装
    C# HTTP
    电脑命令 重启电脑
    使用老毛桃备份 ,还原系统
    c# 截取字符串
  • 原文地址:https://www.cnblogs.com/BlairGrowing/p/15708410.html
Copyright © 2011-2022 走看看