minhui study

Pytorch - Tensorboard, Learning Rate Schedule, save and load model 본문

딥러닝,인공지능

Pytorch - Tensorboard, Learning Rate Schedule, save and load model

minhui 2021. 1. 28. 15:11

Pytorch에서_ Tensorboard

 

import os
from glob import glob
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import Dataset, DataLoader
import numpy as np
from PIL import Image

seed = 1
lr = 0.001
momentum = 0.5
batch_size = 64
test_batch_size = 64
epochs = 1000
no_cuda = False
log_interval = 100

 

Model

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5, 1)
        self.conv2 = nn.Conv2d(20, 50, 5, 1)
        self.fc1 = nn.Linear(4*4*50, 500)
        self.fc2 = nn.Linear(500, 10)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x, 2, 2)
        x = F.relu(self.conv2(x))
        x = F.max_pool2d(x, 2, 2)
        x = x.view(-1, 4*4*50)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)

 

Preprocess

train_paths = glob('../dataset/mnist_png/training/*/*.png')[:1000]
test_paths = glob('../dataset/mnist_png/testing/*/*.png')[:1000]

class Dataset(Dataset):
    def __init__(self, data_paths, transform=None):

        self.data_paths = data_paths
        self.transform = transform

    def __len__(self):
        return len(self.data_paths)

    def __getitem__(self, idx):
        path = self.data_paths[idx]
        image = Image.open(path).convert("L")
        label = int(path.split('\\')[-2])
        
        if self.transform:
            image = self.transform(image)

        return image, label
        
        
torch.manual_seed(seed)

use_cuda = not no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")

kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

train_loader = torch.utils.data.DataLoader(
    Dataset(train_paths, 
            transforms.Compose([
                transforms.RandomHorizontalFlip(), 
                transforms.ToTensor(), 
                transforms.Normalize(
                    mean=[0.406], 
                    std=[0.225])])
           ),
    batch_size=batch_size, 
    shuffle=True
)

test_loader = torch.utils.data.DataLoader(
    Dataset(test_paths,
           transforms.Compose([
               transforms.ToTensor(), 
               transforms.Normalize(
                   mean=[0.406], 
                   std=[0.225])])
           ),
    batch_size=batch_size, 
    shuffle=False
)

 

Optimization

model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)

 

 

Training

import torchvision
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
for epoch in range(1, epochs + 1):
    # Train Mode
    model.train()

    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)

        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)  # https://pytorch.org/docs/stable/nn.html#nll-loss
        loss.backward()
        optimizer.step()

        if batch_idx % log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))
    
    # Test mode
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
            pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    
    accuracy = 100. * correct / len(test_loader.dataset)
    
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        accuracy))
    if epoch == 0:
        grid = torchvision.utils.make_grid(data)
        writer.add_image('images', grid, epoch)
        writer.add_graph(model, data)
    writer.add_scalar('Loss/train/', loss, epoch)
    writer.add_scalar('Loss/test.', test_loss, epoch)
    writer.add_scalar('Accuracy/test', accuracy, epoch)

 

 

 

 

 

Learning Rate Schedule

Build Model → Data Preprocess   Optimization → Learning Rate Scheduler Training

from torch.optim.lr_scheduler import ReduceLROnPlateau
scheduler = ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=0, verbose=True)

 

 

 

 

 

Save_and_Load_Model

Build Model → Data Preprocess   Optimization Training → Save Model / Save Entire Model / Save, Load and Resuming Training

 

Save Model

save_path = 'model_weight.pt'
torch.save(model.state_dict(), save_path)
model = Net().to(device)
weight_dict = torch.load(save_path)
weight_dict.keys()
# odict_keys(['conv1.weight', 'conv1.bias', 'conv2.weight', 'conv2.bias', 'fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias'])


weight_dict['conv1.weight'].shape
# torch.Size([20, 1, 5, 5])

model.load_state_dict(weight_dict)
# <All keys matched successfully>

model.eval()
# Net(
  (conv1): Conv2d(1, 20, kernel_size=(5, 5), stride=(1, 1))
  (conv2): Conv2d(20, 50, kernel_size=(5, 5), stride=(1, 1))
  (fc1): Linear(in_features=800, out_features=500, bias=True)
  (fc2): Linear(in_features=500, out_features=10, bias=True)
)

 

 

Save Entire Model

save_path = 'model.pt'
torch.save(model, save_path)
model = torch.load(save_path) # load Model
model.eval()
# Net(
#  (conv1): Conv2d(1, 20, kernel_size=(5, 5), stride=(1, 1))
#  (conv2): Conv2d(20, 50, kernel_size=(5, 5), stride=(1, 1))
#  (fc1): Linear(in_features=800, out_features=500, bias=True)
#  (fc2): Linear(in_features=500, out_features=10, bias=True)
#)

 

 

Save, Load and Resuming Training

checkpoint_path = 'checkpoint.pt'
torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'loss': loss
            }, checkpoint_path)
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)

checkpoint = torch.load(checkpoint_path)
checkpoint.keys()
# dict_keys(['epoch', 'model_state_dict', 'optimizer_state_dict', 'loss'])

model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
optimizer
#SGD (
#Parameter Group 0
#    dampening: 0
#    lr: 0.001
#    momentum: 0.5
#    nesterov: False
#    weight_decay: 0
#)
model
#Net(
#  (conv1): Conv2d(1, 20, kernel_size=(5, 5), stride=(1, 1))
#  (conv2): Conv2d(20, 50, kernel_size=(5, 5), stride=(1, 1))
#  (fc1): Linear(in_features=800, out_features=500, bias=True)
#  (fc2): Linear(in_features=500, out_features=10, bias=True)
#)
epoch
# 5

loss
# tensor(0.0145, requires_grad=True)
Comments