zoukankan      html  css  js  c++  java
  • pytorch 检测图片中是否有人

    照搬pytorch官方代码,只是将数据集换成了INRIAPerson数据集中的train和test文件夹。

    贴下代码和效果,代码是官方的,就不详细解释了。

    # License: BSD
    # Author: Sasank Chilamkurthy
    
    from __future__ import print_function, division
    
    import torch
    import torch.nn as nn
    import torch.optim as optim
    from torch.optim import lr_scheduler
    import numpy as np
    import torchvision
    from torchvision import datasets, models, transforms
    import matplotlib.pyplot as plt
    import time
    import os
    import copy
    
    plt.ion()   # interactive mode
    # Data augmentation and normalization for training
    # Just normalization for validation
    data_transforms = {
        'train': transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'val': transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
    }
    
    data_dir = 'person'
    image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
                                              data_transforms[x])
                      for x in ['train', 'val']}
    dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
                                                 shuffle=True, num_workers=4)
                  for x in ['train', 'val']}
    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
    class_names = image_datasets['train'].classes
    
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    def imshow(inp, title=None):
        """Imshow for Tensor."""
        inp = inp.numpy().transpose((1, 2, 0))
        mean = np.array([0.485, 0.456, 0.406])
        std = np.array([0.229, 0.224, 0.225])
        inp = std * inp + mean
        inp = np.clip(inp, 0, 1)
        plt.imshow(inp)
        if title is not None:
            plt.title(title)
        plt.pause(0.001)  # pause a bit so that plots are updated
    
    
    # Get a batch of training data
    inputs, classes = next(iter(dataloaders['train']))
    
    # Make a grid from batch
    out = torchvision.utils.make_grid(inputs)
    
    imshow(out, title=[class_names[x] for x in classes])

    def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
        since = time.time()
    
        best_model_wts = copy.deepcopy(model.state_dict())
        best_acc = 0.0
    
        for epoch in range(num_epochs):
            print('Epoch {}/{}'.format(epoch, num_epochs - 1))
            print('-' * 10)
    
            # Each epoch has a training and validation phase
            for phase in ['train', 'val']:
                if phase == 'train':
                    scheduler.step()
                    model.train()  # Set model to training mode
                else:
                    model.eval()   # Set model to evaluate mode
    
                running_loss = 0.0
                running_corrects = 0
    
                # Iterate over data.
                for inputs, labels in dataloaders[phase]:
                    inputs = inputs.to(device)
                    labels = labels.to(device)
    
                    # zero the parameter gradients
                    optimizer.zero_grad()
    
                    # forward
                    # track history if only in train
                    with torch.set_grad_enabled(phase == 'train'):
                        outputs = model(inputs)
                        _, preds = torch.max(outputs, 1)
                        loss = criterion(outputs, labels)
    
                        # backward + optimize only if in training phase
                        if phase == 'train':
                            loss.backward()
                            optimizer.step()
    
                    # statistics
                    running_loss += loss.item() * inputs.size(0)
                    running_corrects += torch.sum(preds == labels.data)
    
                epoch_loss = running_loss / dataset_sizes[phase]
                epoch_acc = running_corrects.double() / dataset_sizes[phase]
    
                print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                    phase, epoch_loss, epoch_acc))
    
                # deep copy the model
                if phase == 'val' and epoch_acc > best_acc:
                    best_acc = epoch_acc
                    best_model_wts = copy.deepcopy(model.state_dict())
    
            print()
    
        time_elapsed = time.time() - since
        print('Training complete in {:.0f}m {:.0f}s'.format(
            time_elapsed // 60, time_elapsed % 60))
        print('Best val Acc: {:4f}'.format(best_acc))
    
        # load best model weights
        model.load_state_dict(best_model_wts)
        return model
    def visualize_model(model, num_images=6):
        was_training = model.training
        model.eval()
        images_so_far = 0
        fig = plt.figure()
    
        with torch.no_grad():
            for i, (inputs, labels) in enumerate(dataloaders['val']):
                inputs = inputs.to(device)
                labels = labels.to(device)
    
                outputs = model(inputs)
                _, preds = torch.max(outputs, 1)
    
                for j in range(inputs.size()[0]):
                    images_so_far += 1
                    ax = plt.subplot(num_images//2, 2, images_so_far)
                    ax.axis('off')
                    ax.set_title('predicted: {}'.format(class_names[preds[j]]))
                    imshow(inputs.cpu().data[j])
    
                    if images_so_far == num_images:
                        model.train(mode=was_training)
                        return
            model.train(mode=was_training)
    model_ft = models.resnet18(pretrained=True)
    num_ftrs = model_ft.fc.in_features
    model_ft.fc = nn.Linear(num_ftrs, 2)
    
    model_ft = model_ft.to(device)
    
    criterion = nn.CrossEntropyLoss()
    
    # Observe that all parameters are being optimized
    optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
    
    # Decay LR by a factor of 0.1 every 7 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
    model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
                           num_epochs=25)
    Epoch 0/24
    ----------
    train Loss: 0.4124 Acc: 0.8477
    val Loss: 0.0737 Acc: 0.9744
    
    Epoch 1/24
    ----------
    train Loss: 0.2891 Acc: 0.9023
    val Loss: 0.0836 Acc: 0.9703
    
    Epoch 2/24
    ----------
    train Loss: 0.3094 Acc: 0.9050
    val Loss: 0.0614 Acc: 0.9771
    
    Epoch 3/24
    ----------
    train Loss: 0.2308 Acc: 0.9279
    val Loss: 0.0429 Acc: 0.9865
    
    Epoch 4/24
    ----------
    train Loss: 0.1748 Acc: 0.9498
    val Loss: 0.0331 Acc: 0.9906
    
    Epoch 5/24
    ----------
    train Loss: 0.2252 Acc: 0.9301
    val Loss: 0.0702 Acc: 0.9906
    
    Epoch 6/24
    ----------
    train Loss: 0.1726 Acc: 0.9531
    val Loss: 0.0442 Acc: 0.9852
    
    Epoch 7/24
    ----------
    train Loss: 0.1595 Acc: 0.9536
    val Loss: 0.0359 Acc: 0.9906
    
    Epoch 8/24
    ----------
    train Loss: 0.1310 Acc: 0.9651
    val Loss: 0.0355 Acc: 0.9892
    
    Epoch 9/24
    ----------
    train Loss: 0.1172 Acc: 0.9689
    val Loss: 0.0325 Acc: 0.9906
    
    Epoch 10/24
    ----------
    train Loss: 0.1070 Acc: 0.9733
    val Loss: 0.0515 Acc: 0.9838
    
    Epoch 11/24
    ----------
    train Loss: 0.1304 Acc: 0.9683
    val Loss: 0.0452 Acc: 0.9892
    
    Epoch 12/24
    ----------
    train Loss: 0.1164 Acc: 0.9656
    val Loss: 0.0424 Acc: 0.9892
    
    Epoch 13/24
    ----------
    train Loss: 0.0751 Acc: 0.9809
    val Loss: 0.0396 Acc: 0.9906
    
    Epoch 14/24
    ----------
    train Loss: 0.1091 Acc: 0.9749
    val Loss: 0.0279 Acc: 0.9946
    
    Epoch 15/24
    ----------
    train Loss: 0.0751 Acc: 0.9842
    val Loss: 0.0352 Acc: 0.9906
    
    Epoch 16/24
    ----------
    train Loss: 0.1353 Acc: 0.9705
    val Loss: 0.0413 Acc: 0.9879
    
    Epoch 17/24
    ----------
    train Loss: 0.0957 Acc: 0.9787
    val Loss: 0.0332 Acc: 0.9906
    
    Epoch 18/24
    ----------
    train Loss: 0.1091 Acc: 0.9689
    val Loss: 0.0317 Acc: 0.9906
    
    Epoch 19/24
    ----------
    train Loss: 0.1101 Acc: 0.9700
    val Loss: 0.0402 Acc: 0.9879
    
    Epoch 20/24
    ----------
    train Loss: 0.1133 Acc: 0.9754
    val Loss: 0.0392 Acc: 0.9892
    
    Epoch 21/24
    ----------
    train Loss: 0.0970 Acc: 0.9776
    val Loss: 0.0424 Acc: 0.9865
    
    Epoch 22/24
    ----------
    train Loss: 0.0865 Acc: 0.9814
    val Loss: 0.0348 Acc: 0.9919
    
    Epoch 23/24
    ----------
    train Loss: 0.1319 Acc: 0.9656
    val Loss: 0.0341 Acc: 0.9892
    
    Epoch 24/24
    ----------
    train Loss: 0.0997 Acc: 0.9771
    val Loss: 0.0328 Acc: 0.9906
    
    Training complete in 9m 32s
    Best val Acc: 0.994602
    In [30]:
    
    visualize_model(model_ft)
    visualize_model(model_ft)

    model_conv = torchvision.models.resnet18(pretrained=True)
    for param in model_conv.parameters():
        param.requires_grad = False
    
    # Parameters of newly constructed modules have requires_grad=True by default
    num_ftrs = model_conv.fc.in_features
    model_conv.fc = nn.Linear(num_ftrs, 2)
    
    model_conv = model_conv.to(device)
    
    criterion = nn.CrossEntropyLoss()
    
    # Observe that only parameters of final layer are being optimized as
    # opoosed to before.
    optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
    
    # Decay LR by a factor of 0.1 every 7 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
    model_conv = train_model(model_conv, criterion, optimizer_conv,
                             exp_lr_scheduler, num_epochs=25)
    Epoch 0/24
    ----------
    train Loss: 0.3994 Acc: 0.8466
    val Loss: 0.2137 Acc: 0.9109
    
    Epoch 1/24
    ----------
    train Loss: 0.2783 Acc: 0.8963
    val Loss: 0.0649 Acc: 0.9744
    
    Epoch 2/24
    ----------
    train Loss: 0.2976 Acc: 0.8870
    val Loss: 0.0577 Acc: 0.9811
    
    Epoch 3/24
    ----------
    train Loss: 0.2873 Acc: 0.9039
    val Loss: 0.0477 Acc: 0.9825
    
    Epoch 4/24
    ----------
    train Loss: 0.3214 Acc: 0.8843
    val Loss: 0.0499 Acc: 0.9798
    
    Epoch 5/24
    ----------
    train Loss: 0.3244 Acc: 0.8772
    val Loss: 0.0483 Acc: 0.9798
    
    Epoch 6/24
    ----------
    train Loss: 0.2855 Acc: 0.8985
    val Loss: 0.0446 Acc: 0.9825
    
    Epoch 7/24
    ----------
    train Loss: 0.2425 Acc: 0.9121
    val Loss: 0.0460 Acc: 0.9798
    
    Epoch 8/24
    ----------
    train Loss: 0.2070 Acc: 0.9219
    val Loss: 0.0390 Acc: 0.9879
    
    Epoch 9/24
    ----------
    train Loss: 0.2189 Acc: 0.9127
    val Loss: 0.0408 Acc: 0.9825
    
    Epoch 10/24
    ----------
    train Loss: 0.2243 Acc: 0.9148
    val Loss: 0.0577 Acc: 0.9825
    
    Epoch 11/24
    ----------
    train Loss: 0.2042 Acc: 0.9236
    val Loss: 0.0519 Acc: 0.9852
    
    Epoch 12/24
    ----------
    train Loss: 0.2425 Acc: 0.9083
    val Loss: 0.0440 Acc: 0.9838
    
    Epoch 13/24
    ----------
    train Loss: 0.2127 Acc: 0.9198
    val Loss: 0.0454 Acc: 0.9865
    
    Epoch 14/24
    ----------
    train Loss: 0.2479 Acc: 0.9045
    val Loss: 0.0551 Acc: 0.9771
    
    Epoch 15/24
    ----------
    train Loss: 0.2562 Acc: 0.8990
    val Loss: 0.0491 Acc: 0.9852
    
    Epoch 16/24
    ----------
    train Loss: 0.2104 Acc: 0.9143
    val Loss: 0.0448 Acc: 0.9852
    
    Epoch 17/24
    ----------
    train Loss: 0.2606 Acc: 0.8974
    val Loss: 0.0480 Acc: 0.9798
    
    Epoch 18/24
    ----------
    train Loss: 0.2474 Acc: 0.9067
    val Loss: 0.0639 Acc: 0.9798
    
    Epoch 19/24
    ----------
    train Loss: 0.2159 Acc: 0.9176
    val Loss: 0.0495 Acc: 0.9852
    
    Epoch 20/24
    ----------
    train Loss: 0.2107 Acc: 0.9170
    val Loss: 0.0482 Acc: 0.9838
    
    Epoch 21/24
    ----------
    train Loss: 0.2128 Acc: 0.9121
    val Loss: 0.0522 Acc: 0.9838
    
    Epoch 22/24
    ----------
    train Loss: 0.2263 Acc: 0.9176
    val Loss: 0.0459 Acc: 0.9852
    
    Epoch 23/24
    ----------
    train Loss: 0.1907 Acc: 0.9329
    val Loss: 0.0460 Acc: 0.9906
    
    Epoch 24/24
    ----------
    train Loss: 0.2302 Acc: 0.9181
    val Loss: 0.0425 Acc: 0.9879
    
    Training complete in 4m 31s
    Best val Acc: 0.990553
    In [33]:
    
    visualize_model(model_conv)
    visualize_model(model_conv)
    
    plt.ioff()
    plt.show()

    微调和特征提取两种方法的效果都很棒

  • 相关阅读:
    Linux面试题(后续会不断更新)
    01 . Shell详细入门介绍及简单应用
    06 . Prometheus监控Redis并配置Grafana
    Django实现图片上传并前端页面显示
    03 . 二进制部署kubernetes1.18.4
    Docker部署Python项目
    02 . Kubeadm部署Kubernetes及简单应用
    01 . 容器编排简介及Kubernetes核心概念
    05 . Prometheus监控Nginx
    GoAccess分析Web日志
  • 原文地址:https://www.cnblogs.com/wzyuan/p/9747302.html
Copyright © 2011-2022 走看看