zoukankan      html  css  js  c++  java
  • 【colab pytorch】使用tensorboardcolab可视化

    import torch
    import torch.nn as nn
    import torch.nn.functional as F
    import torch.optim as optim
    from torch.utils.data import Dataset, DataLoader
    from torchvision import transforms, utils, datasets
    
    !pip install tensorboardcolab
    from tensorboardcolab import TensorBoardColab
    class Network(nn.Module):
      def __init__(self):
        super(Network, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5, 1)
        self.conv2 = nn.Conv2d(20, 50, 5, 1)
        self.fc1 = nn.Linear(4*4*50, 500)
        self.fc2 = nn.Linear(500, 10)
       
      def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x, 2, 2)
        x = F.relu(self.conv2(x))
        x = F.max_pool2d(x, 2, 2)
        x = x.view(-1, 4*4*50)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
        
    class Config:  
      def __init__(self, **kwargs):
        for key, value in kwargs.items():
          setattr(self, key, value)
    
    
    model_config = Config(
        cuda = True if torch.cuda.is_available() else False,
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu"),
        seed = 2,
        lr = 0.01,
        epochs = 4,
        save_model = False,
        batch_size = 32,
        log_interval = 100
    )
        
    class Trainer:
      
      def __init__(self, config):
        
        self.cuda = config.cuda
        self.device = config.device
        self.seed = config.seed
        self.lr = config.lr
        self.epochs = config.epochs
        self.save_model = config.save_model
        self.batch_size = config.batch_size
        self.log_interval = config.log_interval
        
        self.globaliter = 0
        self.tb = TensorBoardColab()
        
        torch.manual_seed(self.seed)
    
        kwargs = {'num_workers': 1, 'pin_memory': True} if self.cuda else {}
    
        self.train_loader = torch.utils.data.DataLoader(
            datasets.MNIST('../data', train=True, download=True,
                         transform=transforms.Compose([
                             transforms.ToTensor(),
                             transforms.Normalize((MNIST_MEAN,), (MNIST_STD,))
                         ])),
            batch_size=self.batch_size, shuffle=True, **kwargs)
    
        self.test_loader = torch.utils.data.DataLoader(
            datasets.MNIST('../data', train=False, transform=transforms.Compose([
                                 transforms.ToTensor(),
                                 transforms.Normalize((MNIST_MEAN,), (MNIST_STD,))
                             ])),
            batch_size=self.batch_size, shuffle=True, **kwargs)
    
    
        self.model = Network().to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
          
          
      def train(self, epoch):
      
        self.model.train()
        for batch_idx, (data, target) in enumerate(self.train_loader):
          
          self.globaliter += 1
          data, target = data.to(self.device), target.to(self.device)
    
          self.optimizer.zero_grad()
          predictions = self.model(data)
    
          loss = F.nll_loss(predictions, target)
          loss.backward()
          self.optimizer.step()
    
          if batch_idx % self.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]	Loss: {:.6f}'.format(
                      epoch, batch_idx * len(data), len(self.train_loader.dataset),
                      100. * batch_idx / len(self.train_loader), loss.item()))
            self.tb.save_value('Train Loss', 'train_loss', self.globaliter, loss.item())
            
            
      def test(self, epoch):
        self.model.eval()
        test_loss = 0
        correct = 0
    
        with torch.no_grad():
          for data, target in self.test_loader:
            data, target = data.to(self.device), target.to(self.device)
            predictions = self.model(data)
    
            test_loss += F.nll_loss(predictions, target, reduction='sum').item()
            prediction = predictions.argmax(dim=1, keepdim=True)
            correct += prediction.eq(target.view_as(prediction)).sum().item()
    
          test_loss /= len(self.test_loader.dataset)
          accuracy = 100. * correct / len(self.test_loader.dataset)
    
          print('
    Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)
    '.format(
              test_loss, correct, len(self.test_loader.dataset), accuracy))
    
          
    def main():
      
      trainer = Trainer(model_config)
      
      for epoch in range(1, trainer.epochs + 1):
          trainer.train(epoch)
          trainer.test(epoch)
          trainer.tb.flush_line('train_loss')
    
      if (trainer.save_model):
          torch.save(trainer.model.state_dict(),"mnist_cnn.pt")
    
      
    main()
    Wait for 8 seconds...
    TensorBoard link:
    http://db797eee.ngrok.io
    Train Epoch: 1 [0/60000 (0%)]    Loss: 2.320306
    Train Epoch: 1 [3200/60000 (5%)]    Loss: 0.881239
    Train Epoch: 1 [6400/60000 (11%)]    Loss: 0.013655
    Train Epoch: 1 [9600/60000 (16%)]    Loss: 0.013620
    Train Epoch: 1 [12800/60000 (21%)]    Loss: 0.225101
    Train Epoch: 1 [16000/60000 (27%)]    Loss: 0.248218
    Train Epoch: 1 [19200/60000 (32%)]    Loss: 0.207354
    Train Epoch: 1 [22400/60000 (37%)]    Loss: 0.139395
    Train Epoch: 1 [25600/60000 (43%)]    Loss: 0.206405
    Train Epoch: 1 [28800/60000 (48%)]    Loss: 0.090241
    Train Epoch: 1 [32000/60000 (53%)]    Loss: 0.216764
    Train Epoch: 1 [35200/60000 (59%)]    Loss: 0.295801
    Train Epoch: 1 [38400/60000 (64%)]    Loss: 0.021000
    Train Epoch: 1 [41600/60000 (69%)]    Loss: 0.050552
    Train Epoch: 1 [44800/60000 (75%)]    Loss: 0.238085
    Train Epoch: 1 [48000/60000 (80%)]    Loss: 0.298676
    Train Epoch: 1 [51200/60000 (85%)]    Loss: 0.301436
    Train Epoch: 1 [54400/60000 (91%)]    Loss: 0.271787
    Train Epoch: 1 [57600/60000 (96%)]    Loss: 0.019811
    
    Test set: Average loss: 0.1088, Accuracy: 9677/10000 (97%)
    
    Train Epoch: 2 [0/60000 (0%)]    Loss: 0.036418
    Train Epoch: 2 [3200/60000 (5%)]    Loss: 0.024196
    Train Epoch: 2 [6400/60000 (11%)]    Loss: 0.029856
    Train Epoch: 2 [9600/60000 (16%)]    Loss: 0.084013
    Train Epoch: 2 [12800/60000 (21%)]    Loss: 0.345446
    Train Epoch: 2 [16000/60000 (27%)]    Loss: 0.453756
    Train Epoch: 2 [19200/60000 (32%)]    Loss: 0.409682
    Train Epoch: 2 [22400/60000 (37%)]    Loss: 0.159656
    Train Epoch: 2 [25600/60000 (43%)]    Loss: 0.009557
    Train Epoch: 2 [28800/60000 (48%)]    Loss: 0.282826
    Train Epoch: 2 [32000/60000 (53%)]    Loss: 0.047159
    Train Epoch: 2 [35200/60000 (59%)]    Loss: 0.379264
    Train Epoch: 2 [38400/60000 (64%)]    Loss: 0.043181
    Train Epoch: 2 [41600/60000 (69%)]    Loss: 0.486660
    Train Epoch: 2 [44800/60000 (75%)]    Loss: 0.108486
    Train Epoch: 2 [48000/60000 (80%)]    Loss: 0.242821
    Train Epoch: 2 [51200/60000 (85%)]    Loss: 0.218120
    Train Epoch: 2 [54400/60000 (91%)]    Loss: 0.381496
    Train Epoch: 2 [57600/60000 (96%)]    Loss: 0.134828
    
    Test set: Average loss: 0.1861, Accuracy: 9496/10000 (95%)
    
    Train Epoch: 3 [0/60000 (0%)]    Loss: 0.081437
    Train Epoch: 3 [3200/60000 (5%)]    Loss: 0.121195
    Train Epoch: 3 [6400/60000 (11%)]    Loss: 0.054902
    Train Epoch: 3 [9600/60000 (16%)]    Loss: 0.031254
    Train Epoch: 3 [12800/60000 (21%)]    Loss: 0.036273
    Train Epoch: 3 [16000/60000 (27%)]    Loss: 0.162744
    Train Epoch: 3 [19200/60000 (32%)]    Loss: 0.028073
    Train Epoch: 3 [22400/60000 (37%)]    Loss: 0.114689
    Train Epoch: 3 [25600/60000 (43%)]    Loss: 0.139724
    Train Epoch: 3 [28800/60000 (48%)]    Loss: 0.353534
    Train Epoch: 3 [32000/60000 (53%)]    Loss: 0.001959
    Train Epoch: 3 [35200/60000 (59%)]    Loss: 0.117742
    Train Epoch: 3 [38400/60000 (64%)]    Loss: 0.024078
    Train Epoch: 3 [41600/60000 (69%)]    Loss: 0.063214
    Train Epoch: 3 [44800/60000 (75%)]    Loss: 0.068128
    Train Epoch: 3 [48000/60000 (80%)]    Loss: 0.055476
    Train Epoch: 3 [51200/60000 (85%)]    Loss: 0.025761
    Train Epoch: 3 [54400/60000 (91%)]    Loss: 0.490388
    Train Epoch: 3 [57600/60000 (96%)]    Loss: 0.275244
    
    Test set: Average loss: 0.1570, Accuracy: 9594/10000 (96%)
    
    Train Epoch: 4 [0/60000 (0%)]    Loss: 0.150237
    Train Epoch: 4 [3200/60000 (5%)]    Loss: 0.049188
    Train Epoch: 4 [6400/60000 (11%)]    Loss: 0.008692
    Train Epoch: 4 [9600/60000 (16%)]    Loss: 0.061360
    Train Epoch: 4 [12800/60000 (21%)]    Loss: 0.004389
    Train Epoch: 4 [16000/60000 (27%)]    Loss: 0.027968
    Train Epoch: 4 [19200/60000 (32%)]    Loss: 0.075881
    Train Epoch: 4 [22400/60000 (37%)]    Loss: 0.074000
    Train Epoch: 4 [25600/60000 (43%)]    Loss: 0.069731
    Train Epoch: 4 [28800/60000 (48%)]    Loss: 0.330368
    Train Epoch: 4 [32000/60000 (53%)]    Loss: 0.393174
    Train Epoch: 4 [35200/60000 (59%)]    Loss: 0.318519
    Train Epoch: 4 [38400/60000 (64%)]    Loss: 0.164669
    Train Epoch: 4 [41600/60000 (69%)]    Loss: 0.161486
    Train Epoch: 4 [44800/60000 (75%)]    Loss: 0.017525
    Train Epoch: 4 [48000/60000 (80%)]    Loss: 0.104918
    Train Epoch: 4 [51200/60000 (85%)]    Loss: 0.000450
    Train Epoch: 4 [54400/60000 (91%)]    Loss: 0.128227
    Train Epoch: 4 [57600/60000 (96%)]    Loss: 0.005374
    
    Test set: Average loss: 0.1227, Accuracy: 9717/10000 (97%)

    核心就是标红的地方。

  • 相关阅读:
    MySql创建库 Challenge
    未能启用约束。一行或多行中包含违反非空、唯一或外键约束的值的解决办法.
    小总结:用反射机制创建的分配数据分配器
    工厂模式的反思
    单机安装“完整”SharePoint 2010
    作业调度框架 Quartz.NET 2.0 StepByStep(2)
    UI线程同步
    每日见闻(一)
    作业调度框架 Quartz.NET 2.0 StepByStep
    基础算法(ACwing)
  • 原文地址:https://www.cnblogs.com/xiximayou/p/12470715.html
Copyright © 2011-2022 走看看