fix bug for windows
This commit is contained in:
@ -43,43 +43,44 @@ class Net(torch.nn.Module):
|
||||
x = self.predict(x) # linear output
|
||||
return x
|
||||
|
||||
# different nets
|
||||
net_SGD = Net()
|
||||
net_Momentum = Net()
|
||||
net_RMSprop = Net()
|
||||
net_Adam = Net()
|
||||
nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam]
|
||||
if __name__ == '__main__':
|
||||
# different nets
|
||||
net_SGD = Net()
|
||||
net_Momentum = Net()
|
||||
net_RMSprop = Net()
|
||||
net_Adam = Net()
|
||||
nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam]
|
||||
|
||||
# different optimizers
|
||||
opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=LR)
|
||||
opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.8)
|
||||
opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9)
|
||||
opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9, 0.99))
|
||||
optimizers = [opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]
|
||||
# different optimizers
|
||||
opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=LR)
|
||||
opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.8)
|
||||
opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9)
|
||||
opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9, 0.99))
|
||||
optimizers = [opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]
|
||||
|
||||
loss_func = torch.nn.MSELoss()
|
||||
losses_his = [[], [], [], []] # record loss
|
||||
loss_func = torch.nn.MSELoss()
|
||||
losses_his = [[], [], [], []] # record loss
|
||||
|
||||
# training
|
||||
for epoch in range(EPOCH):
|
||||
print('Epoch: ', epoch)
|
||||
for step, (batch_x, batch_y) in enumerate(loader): # for each training step
|
||||
b_x = Variable(batch_x)
|
||||
b_y = Variable(batch_y)
|
||||
# training
|
||||
for epoch in range(EPOCH):
|
||||
print('Epoch: ', epoch)
|
||||
for step, (batch_x, batch_y) in enumerate(loader): # for each training step
|
||||
b_x = Variable(batch_x)
|
||||
b_y = Variable(batch_y)
|
||||
|
||||
for net, opt, l_his in zip(nets, optimizers, losses_his):
|
||||
output = net(b_x) # get output for every net
|
||||
loss = loss_func(output, b_y) # compute loss for every net
|
||||
opt.zero_grad() # clear gradients for next train
|
||||
loss.backward() # backpropagation, compute gradients
|
||||
opt.step() # apply gradients
|
||||
l_his.append(loss.data[0]) # loss recoder
|
||||
for net, opt, l_his in zip(nets, optimizers, losses_his):
|
||||
output = net(b_x) # get output for every net
|
||||
loss = loss_func(output, b_y) # compute loss for every net
|
||||
opt.zero_grad() # clear gradients for next train
|
||||
loss.backward() # backpropagation, compute gradients
|
||||
opt.step() # apply gradients
|
||||
l_his.append(loss.data[0]) # loss recoder
|
||||
|
||||
labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
|
||||
for i, l_his in enumerate(losses_his):
|
||||
plt.plot(l_his, label=labels[i])
|
||||
plt.legend(loc='best')
|
||||
plt.xlabel('Steps')
|
||||
plt.ylabel('Loss')
|
||||
plt.ylim((0, 0.2))
|
||||
plt.show()
|
||||
labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
|
||||
for i, l_his in enumerate(losses_his):
|
||||
plt.plot(l_his, label=labels[i])
|
||||
plt.legend(loc='best')
|
||||
plt.xlabel('Steps')
|
||||
plt.ylabel('Loss')
|
||||
plt.ylim((0, 0.2))
|
||||
plt.show()
|
||||
|
||||
Reference in New Issue
Block a user