Merge pull request #106 from changmeen/master

Update 504_batch_normalization.ipynb
This commit is contained in:
Morvan
2022-05-16 11:32:56 +08:00
committed by GitHub

View File

@ -96,7 +96,7 @@
"test_x = Variable(torch.from_numpy(test_x).float(), volatile=True) # not for computing gradients\n",
"test_y = Variable(torch.from_numpy(test_y).float(), volatile=True)\n",
"\n",
"train_dataset = Data.TensorDataset(data_tensor=train_x, target_tensor=train_y)\n",
"train_dataset = Data.TensorDataset(train_x, train_y)\n",
"train_loader = Data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2,)\n",
"\n",
"# show data\n",
@ -274,7 +274,7 @@
" for net, l in zip(nets, losses):\n",
" net.eval() # set eval mode to fix moving_mean and moving_var\n",
" pred, layer_input, pre_act = net(test_x)\n",
" l.append(loss_func(pred, test_y).data[0])\n",
" l.append(loss_func(pred, test_y).data)\n",
" layer_inputs.append(layer_input)\n",
" pre_acts.append(pre_act)\n",
" net.train() # free moving_mean and moving_var\n",