From ade56890b77e91d4cfe81ec6837b5329fa344ba1 Mon Sep 17 00:00:00 2001 From: Claudio Scheer Date: Mon, 6 Apr 2020 01:31:10 -0300 Subject: [PATCH] Correct MLP XOR example --- mpl-xor/main.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/mpl-xor/main.py b/mpl-xor/main.py index 7d93ddf..99ccf65 100644 --- a/mpl-xor/main.py +++ b/mpl-xor/main.py @@ -2,23 +2,25 @@ import torch import torch.nn as nn train_data = torch.tensor([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=torch.float).cuda() -train_data_y = torch.tensor([[0], [1], [1], [1]], dtype=torch.float).cuda() +train_data_y = torch.tensor([[0], [1], [1], [0]], dtype=torch.float).cuda() class MultilayerPerceptron(nn.Module): - def __init__(self): + def __init__(self, hidden_layer_size): super(MultilayerPerceptron, self).__init__() - self.layers = nn.Sequential(nn.Linear(2, 2), nn.Sigmoid(), nn.Linear(2, 1)) + self.layers = nn.Sequential( + nn.Linear(2, hidden_layer_size), nn.ReLU(), nn.Linear(hidden_layer_size, 1) + ) def forward(self, x): return self.layers(x) -model = MultilayerPerceptron() +model = MultilayerPerceptron(5) model.cuda() -loss_function = torch.nn.L1Loss() -optimization_function = torch.optim.SGD(model.parameters(), lr=0.03) +loss_function = nn.MSELoss() +optimization_function = torch.optim.Adam(model.parameters(), lr=0.001) model.train() -- GitLab