空飛ぶロボットのつくりかた

ロボットをつくるために必要な技術をまとめます。ロボットの未来についても考えたりします。

機械学習のお勉強(pytorchを使ってみる)

PyTorch

f:id:robonchu:20171017221616p:plain

すごくわかりやすい参考、講義

fast.ai · Making neural nets uncool again

GitHub - ritchieng/the-incredible-pytorch: The Incredible PyTorch: a curated list of tutorials, papers, projects, communities and more relating to PyTorch.

Practical Deep Learning with PyTorch | Udemy

PyTorch – Pytorch MXNet Caffe2 ドキュメント/応用 – クラスキャット

GitHub - bharathgs/Awesome-pytorch-list: A comprehensive list of pytorch related content on github,such as different models,implementations,helper libraries,tutorials etc.

PyTorchリンク集 - Qiita

Practical Deep Learning with PyTorch | Udemy

インストール

ソースから GitHub - pytorch/pytorch: Tensors and Dynamic neural networks in Python with strong GPU acceleration

Tutorial

GitHub - pytorch/tutorials

PyTorchでCNN入門 | moskomule log

GitHub - yunjey/pytorch-tutorial: PyTorch Tutorial for Deep Learning Researchers

pytorch超入門 - Qiita

CNN参考

PyTorch-Mini-Tutorials/5_convolutional_net.py at master · vinhkhuc/PyTorch-Mini-Tutorials · GitHub

PyTorch-Tutorial/401_CNN.py at master · MorvanZhou/PyTorch-Tutorial · GitHub

pytorch_tutorial/example1.py at master · soravux/pytorch_tutorial · GitHub

データの読み込み

import os
import torch
from torchvision import datasets, transforms
from torch.autograd import Variable

# load data
data_root = os.path.expanduser('~/.torch/data/mnist')
train_loader = torch.utils.data.DataLoader(
    datasets.MNIST(data_root, train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])), shuffle=True, batch_size=32)

test_loader = torch.utils.data.DataLoader(
    datasets.MNIST(data_root, train=False,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])), shuffle=True, batch_size=32)

for i, (images, labels) in enumerate(train_loader):
    images = Variable(images)
    labels = Variable(labels)
    print i
    print images.data.shape
    print labels.data.shape

最後一行の結果

1874
(32L, 1L, 28L, 28L)
(32L,)

データの個数

バッチサイズ、チャンネル、サイズ、サイズ

ラベルデータ

となっている。

学習:ケース1

pytorch.learning/mnist_conv.py at master · moskomule/pytorch.learning · GitHub

"""
mnist classification which use nn module
"""

import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable

cuda = torch.cuda.is_available()

# load data
data_root = os.path.expanduser('~/.torch/data/mnist')
train_loader = torch.utils.data.DataLoader(
    datasets.MNIST(data_root, train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])), shuffle=True, batch_size=32)

test_loader = torch.utils.data.DataLoader(
    datasets.MNIST(data_root, train=False,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])), shuffle=True, batch_size=32)


class Net1(nn.Module):
    def __init__(self):
        super(Net1, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=1,
                               out_channels=10,
                               kernel_size=5,
                               stride=1)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.dense1 = nn.Linear(in_features=320,
                                out_features=50)
        self.dense2 = nn.Linear(50, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = F.max_pool2d(x, kernel_size=2)
        x = F.relu(x)
        x = self.conv2(x)
        x = F.max_pool2d(x, 2)
        x = F.relu(x)
        print x
        x = x.view(-1, 320)
        x = self.dense1(x)
        x = F.relu(x)
        x = self.dense2(x)
        return F.log_softmax(x)


# alternative way
class Net2(nn.Module):
    def __init__(self):
        super(Net2, self).__init__()
        self.head = nn.Sequential(
            nn.Conv2d(in_channels=1, out_channels=10,
                      kernel_size=5, stride=1),
            nn.MaxPool2d(kernel_size=2),
            nn.ReLU(),
            nn.Conv2d(10, 20, kernel_size=5),
            nn.MaxPool2d(kernel_size=2),
            nn.ReLU())
        self.tail = nn.Sequential(
            nn.Linear(320, 50),
            nn.ReLU(),
            nn.Linear(50, 10))

    def forward(self, x):
        x = self.head(x)
        x = x.view(-1, 320)
        x = self.tail(x)
        return F.log_softmax(x)


def train(model, optimizer, epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        if cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()  # reset reset optimizer
        output = model(data)
        loss = F.nll_loss(output, target)  # negative log likelihood loss
        loss.backward()  # backprop
        optimizer.step()
        if batch_idx % 20 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.data[0]))


def test(model):
    model.eval()
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        if cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        output = model(data)
        test_loss += F.nll_loss(output, target).data[0]
        pred = output.data.max(1)[1]  # get the index of the max log-probability
        correct += pred.eq(target.data).cpu().sum()

    test_loss /= len(test_loader)  # loss function already averages over batch size
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
    return test_loss


if __name__ == '__main__':
    model = Net1()
    epochs = 10
    if cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters(),
                           lr=5e-4)

    loss = []
    for i in range(1, epochs + 1):
        train(model, optimizer, i)
    loss.append(test(model))

print x の値は [torch.FloatTensor of size 32x20x4x4]

全結合層320は20x4x4=320で計算できる。4は

(( 28 - ( 5 -1 )) / 2 - ( 5 - 1)) / 2

から計算できる。

学習:ケース2

GitHub - yunjey/pytorch-tutorial: PyTorch Tutorial for Deep Learning Researchers

import torch 
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable


# Hyper Parameters
num_epochs = 10
batch_size = 100
learning_rate = 0.001

# MNIST Dataset
train_dataset = dsets.MNIST(root='./data/',
                            train=True, 
                            transform=transforms.ToTensor(),
                            download=True)

test_dataset = dsets.MNIST(root='./data/',
                           train=False, 
                           transform=transforms.ToTensor())

# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size, 
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size, 
                                          shuffle=False)

# CNN Model (2 conv layer)
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=5, padding=2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2))
        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=5, padding=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2))
        self.fc = nn.Linear(7*7*32, 10)
        
    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        print out
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out
        
cnn = CNN()


# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)

# Train the Model
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):
        images = Variable(images)
        labels = Variable(labels)
        
        # Forward + Backward + Optimize
        optimizer.zero_grad()
        outputs = cnn(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        if (i+1) % 100 == 0:
            print ('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f' 
                   %(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.data[0]))

# Test the Model
cnn.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
correct = 0
total = 0
for images, labels in test_loader:
    images = Variable(images)
    outputs = cnn(images)
    _, predicted = torch.max(outputs.data, 1)
    total += labels.size(0)
    correct += (predicted == labels).sum()

print('Test Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))

# Save the Trained Model
torch.save(cnn.state_dict(), 'cnn.pkl')

print x

[torch.FloatTensor of size 100x20x7x7]

out.size(0)=100

モデルの読み込み

実践Pytorch - Qiita

ライトニングpytorch入門 - Qiita

model = models.resnet50(pretrained=True)
# modelの保存
torch.save(model.state_dict(), 'weight.pth')
model2 = models.resnet50()
# パラメータの読み込み
param = torch.load('weight.pth')
model2.load_state_dict(param)

AweSome Pytorch List

GitHub - bharathgs/Awesome-pytorch-list: A comprehensive list of pytorch related content on github,such as different models,implementations,helper libraries,tutorials etc.

Model Converter

GitHub - ysh329/deep-learning-model-convertor: The convertor/conversion of deep learning models for different deep learning frameworks/softwares.

Segmentation Sample

pspnet

あつい!試してみたい!! https://github.com/Lextal/pspnet-pytorch