TensorFlow 2.0과 PyTorch 비교

TensorFlow 2.0

1
2
3
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import datasets

Hyperparameter

1
2
3
4
5
6
7
batch_size = 64

learning_rate = 0.001
dropout_rate = 0.7

input_shape = (28, 28, 1)
num_classes = 10

Preprocess

1
2
3
4
5
6
7
(train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()

train_x = train_x[..., tf.newaxis]
test_x = test_x[..., tf.newaxis]

train_x = train_x / 255.
test_x = test_x / 255.

Build Model

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
inputs = layers.Input(input_shape)
net = layers.Conv2D(32, (3, 3), padding='SAME')(inputs)
net = layers.Activation('relu')(net)
net = layers.Conv2D(32, (3, 3), padding='SAME')(net)
net = layers.Activation('relu')(net)
net = layers.MaxPooling2D(pool_size=(2, 2))(net)
net = layers.Dropout(dropout_rate)(net)

net = layers.Conv2D(64, (3, 3), padding='SAME')(net)
net = layers.Activation('relu')(net)
net = layers.Conv2D(64, (3, 3), padding='SAME')(net)
net = layers.Activation('relu')(net)
net = layers.MaxPooling2D(pool_size=(2, 2))(net)
net = layers.Dropout(dropout_rate)(net)

net = layers.Flatten()(net)
net = layers.Dense(512)(net)
net = layers.Activation('relu')(net)
net = layers.Dropout(dropout_rate)(net)
net = layers.Dense(num_classes)(net)
net = layers.Activation('softmax')(net)

model = tf.keras.Model(inputs=inputs, outputs=net, name='Basic_CNN')
1
2
3
4
# Model is the full model w/o custom layers
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate), # Optimization
loss='sparse_categorical_crossentropy', # Loss Function
metrics=['accuracy']) # Metrics / Accuracy

Training

1
2
3
4
5
6
7
model.fit(train_x, train_y,
batch_size=batch_size,
shuffle=True)

# Out
Train on 60000 samples
60000/60000 [==============================] - 6s 103us/sample - loss: 0.3794 - accuracy: 0.8767
1
2
model.evaluate(test_x, test_y, batch_size=batch_size)
=> [0.06760077927671373, 0.9776]

PyTorch

1
2
3
4
5
6
7
8
import torch

import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

# 이미지 데이터를 관리하기 위한 라이브러리
from torchvision import datasets, transforms

Hyperparameter

1
2
3
4
5
6
7
8
9
10
11
12
13
# 랜덤 값을 고정시키기 위해
seed = 1

lr = 0.001
momentum = 0.5

batch_size = 64
test_batch_size = 64

epochs = 5

no_cuda = False
log_interval = 100

Model

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)

def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)

Preprocess

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
torch.manual_seed(seed)

use_cuda = not no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")

kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)

test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=test_batch_size, shuffle=True, **kwargs)

Optimization

1
2
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)

Training

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
for epoch in range(1, epochs + 1):
# Train Mode
model.train()

for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad() # backpropagation 계산하기 전에 0으로 기울기 계산
output = model(data)
loss = F.nll_loss(output, target) # https://pytorch.org/docs/stable/nn.html#nll-loss
loss.backward() # 계산한 기울기를
optimizer.step()

if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))

# Test mode
model.eval() # batch norm이나 dropout 등을 train mode 변환
test_loss = 0
correct = 0
with torch.no_grad(): # autograd engine, 즉 backpropagatin이나 gradient 계산 등을 꺼서 memory usage를 줄이고 속도를 높임
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item() # pred와 target과 같은지 확인

test_loss /= len(test_loader.dataset)

print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))

결과

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
Train Epoch: 1 [0/60000 (0%)] Loss: 2.300039
Train Epoch: 1 [6400/60000 (11%)] Loss: 2.239658
Train Epoch: 1 [12800/60000 (21%)] Loss: 2.133953
Train Epoch: 1 [19200/60000 (32%)] Loss: 2.007281
Train Epoch: 1 [25600/60000 (43%)] Loss: 1.656340
Train Epoch: 1 [32000/60000 (53%)] Loss: 1.400340
Train Epoch: 1 [38400/60000 (64%)] Loss: 0.864186
Train Epoch: 1 [44800/60000 (75%)] Loss: 0.613089
Train Epoch: 1 [51200/60000 (85%)] Loss: 0.615066
Train Epoch: 1 [57600/60000 (96%)] Loss: 0.427881

Test set: Average loss: 0.4822, Accuracy: 8651/10000 (87%)

Train Epoch: 2 [0/60000 (0%)] Loss: 0.635855
Train Epoch: 2 [6400/60000 (11%)] Loss: 0.361398
Train Epoch: 2 [12800/60000 (21%)] Loss: 0.457372
Train Epoch: 2 [19200/60000 (32%)] Loss: 0.393982
Train Epoch: 2 [25600/60000 (43%)] Loss: 0.536390
Train Epoch: 2 [32000/60000 (53%)] Loss: 0.258121
Train Epoch: 2 [38400/60000 (64%)] Loss: 0.413061
Train Epoch: 2 [44800/60000 (75%)] Loss: 0.174248
Train Epoch: 2 [51200/60000 (85%)] Loss: 0.222638
Train Epoch: 2 [57600/60000 (96%)] Loss: 0.401988

Test set: Average loss: 0.2956, Accuracy: 9128/10000 (91%)

Train Epoch: 3 [0/60000 (0%)] Loss: 0.425370
Train Epoch: 3 [6400/60000 (11%)] Loss: 0.242881
Train Epoch: 3 [12800/60000 (21%)] Loss: 0.266271
Train Epoch: 3 [19200/60000 (32%)] Loss: 0.344442
Train Epoch: 3 [25600/60000 (43%)] Loss: 0.271553
Train Epoch: 3 [32000/60000 (53%)] Loss: 0.233463
Train Epoch: 3 [38400/60000 (64%)] Loss: 0.206387
Train Epoch: 3 [44800/60000 (75%)] Loss: 0.289864
Train Epoch: 3 [51200/60000 (85%)] Loss: 0.227708
Train Epoch: 3 [57600/60000 (96%)] Loss: 0.266247

Test set: Average loss: 0.2279, Accuracy: 9320/10000 (93%)

Train Epoch: 4 [0/60000 (0%)] Loss: 0.197864
Train Epoch: 4 [6400/60000 (11%)] Loss: 0.334545
Train Epoch: 4 [12800/60000 (21%)] Loss: 0.171417
Train Epoch: 4 [19200/60000 (32%)] Loss: 0.261458
Train Epoch: 4 [25600/60000 (43%)] Loss: 0.129152
Train Epoch: 4 [32000/60000 (53%)] Loss: 0.129057
Train Epoch: 4 [38400/60000 (64%)] Loss: 0.203361
Train Epoch: 4 [44800/60000 (75%)] Loss: 0.304724
Train Epoch: 4 [51200/60000 (85%)] Loss: 0.136878
Train Epoch: 4 [57600/60000 (96%)] Loss: 0.171525

Test set: Average loss: 0.1808, Accuracy: 9482/10000 (95%)

Train Epoch: 5 [0/60000 (0%)] Loss: 0.199721
Train Epoch: 5 [6400/60000 (11%)] Loss: 0.246554
Train Epoch: 5 [12800/60000 (21%)] Loss: 0.217517
Train Epoch: 5 [19200/60000 (32%)] Loss: 0.255912
Train Epoch: 5 [25600/60000 (43%)] Loss: 0.182075
Train Epoch: 5 [32000/60000 (53%)] Loss: 0.296272
Train Epoch: 5 [38400/60000 (64%)] Loss: 0.283726
Train Epoch: 5 [44800/60000 (75%)] Loss: 0.239351
Train Epoch: 5 [51200/60000 (85%)] Loss: 0.293096
Train Epoch: 5 [57600/60000 (96%)] Loss: 0.157650

Test set: Average loss: 0.1533, Accuracy: 9581/10000 (96%)
Share