zoukankan      html  css  js  c++  java
  • Pytorch 基础

    numpy和torch.tensor的转化

    # Create a numpy array.
    x = np.array([[1, 2], [3, 4]])
    
    # Convert the numpy array to a torch tensor.
    y = torch.from_numpy(x)
    
    # Convert the torch tensor to a numpy array.
    z = y.numpy()

    pytorch的自动求导

    # Create tensors.
    x = torch.tensor(1., requires_grad=True)
    w = torch.tensor(2., requires_grad=True)
    b = torch.tensor(3., requires_grad=True)
    
    # Build a computational graph.
    y = w * x + b    # y = 2 * x + 3
    
    # Compute gradients.
    y.backward() #自动向后求导,求导的数必须require_grad=True
    
    # Print out the gradients.
    print(x.grad)    # x.grad = 2 
    print(w.grad)    # w.grad = 1 
    print(b.grad)    # b.grad = 1 
    
    
    # Create tensors of shape (10, 3) and (10, 2).
    x = torch.randn(10, 3)
    y = torch.randn(10, 2)
    
    # Build a fully connected layer.
    linear = nn.Linear(3, 2)
    print ('w: ', linear.weight)
    print ('b: ', linear.bias)
    
    # Build loss function and optimizer.
    criterion = nn.MSELoss()
    optimizer = torch.optim.SGD(linear.parameters(), lr=0.01)
    
    # Forward pass.
    pred = linear(x)
    
    # Compute loss.
    loss = criterion(pred, y)
    print('loss: ', loss.item())
    
    # Backward pass.
    optimizer.zero_grad() #计算最好清空优化器中导数的缓存,不然导数会一直累加 loss.backward() #计算loss对参数的导数 # Print
    out the gradients. print ('dL/dw: ', linear.weight.grad) print ('dL/db: ', linear.bias.grad) # 1-step gradient descent. #梯度下降一步,更新参数 optimizer.step() # You can also perform gradient descent at the low level. # linear.weight.data.sub_(0.01 * linear.weight.grad.data) # linear.bias.data.sub_(0.01 * linear.bias.grad.data) # Print out the loss after 1-step gradient descent. pred = linear(x) loss = criterion(pred, y) print('loss after 1 step optimization: ', loss.item())

    加载自带的格式化的数据集

    # Download and construct CIFAR-10 dataset.
    train_dataset = torchvision.datasets.CIFAR10(root='../../data/', 放在和Projects并列的data文件夹下,Projects中含有各种project_i,里面含有我们写的程序
                                                 train=True, 
                                                 transform=transforms.ToTensor(),
                                                 download=True)
    
    # Fetch one data pair (read data from disk).
    image, label = train_dataset[0]
    print (image.size())
    print (label)
    
    # Data loader (this provides queues and threads in a very simple way).
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=64, 
                                               shuffle=True)
    
    # When iteration starts, queue and thread start to load data from files.
    data_iter = iter(train_loader)
    
    # Mini-batch images and labels.
    images, labels = data_iter.next()
    
    # Actual usage of the data loader is as below.
    for images, labels in train_loader:
        # Training code should be written here.
        pass

    DIY自己的数据集

    # You should build your custom dataset as below.
    class CustomDataset(torch.utils.data.Dataset):
        def __init__(self):
            # TODO
            # 1. Initialize file paths or a list of file names. 
            pass
        def __getitem__(self, index):
            # TODO
            # 1. Read one data from file (e.g. using numpy.fromfile, PIL.Image.open).
            # 2. Preprocess the data (e.g. torchvision.Transform).
            # 3. Return a data pair (e.g. image and label).
            pass
        def __len__(self):
            # You should change 0 to the total size of your dataset.
            return 0 
    
    # You can then use the prebuilt data loader. 
    custom_dataset = CustomDataset() #导入缓冲区
    train_loader = torch.utils.data.DataLoader(dataset=custom_dataset, #开始加载数据
                                               batch_size=64, 
                                               shuffle=True)

    预处理模型

    # Download and load the pretrained ResNet-18.
    resnet = torchvision.models.resnet18(pretrained=True)
    
    # If you want to finetune only the top layer of the model, set as below.
    for param in resnet.parameters():
        param.requires_grad = False
    
    # Replace the top layer for finetuning.
    resnet.fc = nn.Linear(resnet.fc.in_features, 100)  # 100 is an example. #fc表示front cover最上层,这里用线性模型替换了,输入保持不变,只是输出变了
    
    # Forward pass.
    images = torch.randn(64, 3, 224, 224) #生成一个64张通道数为3,长宽为224和224的图片
    outputs = resnet(images)
    print (outputs.size())     # (64, 100)

    保存模型及其参数,加载模型、参数

    # Save and load the entire model.
    torch.save(resnet, 'model.ckpt')
    model = torch.load('model.ckpt')
    
    # Save and load only the model parameters (recommended).
    torch.save(resnet.state_dict(), 'params.ckpt')
    resnet.load_state_dict(torch.load('params.ckpt'))
  • 相关阅读:
    php-Zip打包文件
    PHP命令行类库 climate
    vim 添加块注释
    冒泡排序|插入排序
    PHP-SeasLog安装和使用
    链表
    多线程上下文切换
    竞态与线程安全
    线程的生命周期
    线程创建的两种方法
  • 原文地址:https://www.cnblogs.com/raiuny/p/13280904.html
Copyright © 2011-2022 走看看