From 8c5754874d3fd61c08f886c26bb050d2360b86bb Mon Sep 17 00:00:00 2001 From: changmeen <73987361+changmeen@users.noreply.github.com> Date: Sat, 14 May 2022 21:12:49 +0900 Subject: [PATCH] Update 504_batch_normalization.ipynb Found 2 Errors that does not activate in Colab --- tutorial-contents-notebooks/504_batch_normalization.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial-contents-notebooks/504_batch_normalization.ipynb b/tutorial-contents-notebooks/504_batch_normalization.ipynb index 3ef4afb..f8919e4 100644 --- a/tutorial-contents-notebooks/504_batch_normalization.ipynb +++ b/tutorial-contents-notebooks/504_batch_normalization.ipynb @@ -96,7 +96,7 @@ "test_x = Variable(torch.from_numpy(test_x).float(), volatile=True) # not for computing gradients\n", "test_y = Variable(torch.from_numpy(test_y).float(), volatile=True)\n", "\n", - "train_dataset = Data.TensorDataset(data_tensor=train_x, target_tensor=train_y)\n", + "train_dataset = Data.TensorDataset(train_x, train_y)\n", "train_loader = Data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2,)\n", "\n", "# show data\n", @@ -274,7 +274,7 @@ " for net, l in zip(nets, losses):\n", " net.eval() # set eval mode to fix moving_mean and moving_var\n", " pred, layer_input, pre_act = net(test_x)\n", - " l.append(loss_func(pred, test_y).data[0])\n", + " l.append(loss_func(pred, test_y).data)\n", " layer_inputs.append(layer_input)\n", " pre_acts.append(pre_act)\n", " net.train() # free moving_mean and moving_var\n",