diff --git a/tutorial-contents/401_CNN.py b/tutorial-contents/401_CNN.py index 1f9daea..8016158 100644 --- a/tutorial-contents/401_CNN.py +++ b/tutorial-contents/401_CNN.py @@ -65,7 +65,7 @@ class CNN(nn.Module): out_channels=16, # n_filters kernel_size=5, # filter size stride=1, # filter movement/step - padding=2, # if want same width and length of this image after con2d, padding=(kernel_size-1)/2 if stride=1 + padding=2, # if want same width and length of this image after Conv2d, padding=(kernel_size-1)/2 if stride=1 ), # output shape (16, 28, 28) nn.ReLU(), # activation nn.MaxPool2d(kernel_size=2), # choose max value in 2x2 area, output shape (16, 14, 14) @@ -115,7 +115,7 @@ for epoch in range(EPOCH): if step % 50 == 0: test_output, last_layer = cnn(test_x) - pred_y = torch.max(test_output, 1)[1].data.squeeze().numpy() + pred_y = torch.max(test_output, 1)[1].data.numpy() accuracy = float((pred_y == test_y.data.numpy()).astype(int).sum()) / float(test_y.size(0)) print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy) if HAS_SK: @@ -129,6 +129,6 @@ plt.ioff() # print 10 predictions from test data test_output, _ = cnn(test_x[:10]) -pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze() +pred_y = torch.max(test_output, 1)[1].data.numpy() print(pred_y, 'prediction number') print(test_y[:10].numpy(), 'real number') diff --git a/tutorial-contents/402_RNN_classifier.py b/tutorial-contents/402_RNN_classifier.py index 9ef11f7..3bb8231 100644 --- a/tutorial-contents/402_RNN_classifier.py +++ b/tutorial-contents/402_RNN_classifier.py @@ -47,7 +47,7 @@ train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_ # convert test data into Variable, pick 2000 samples to speed up testing test_data = dsets.MNIST(root='./mnist/', train=False, transform=transforms.ToTensor()) test_x = test_data.test_data.type(torch.FloatTensor)[:2000]/255. # shape (2000, 28, 28) value in range(0,1) -test_y = test_data.test_labels.numpy().squeeze()[:2000] # covert to numpy array +test_y = test_data.test_labels.numpy()[:2000] # covert to numpy array class RNN(nn.Module): @@ -94,13 +94,13 @@ for epoch in range(EPOCH): if step % 50 == 0: test_output = rnn(test_x) # (samples, time_step, input_size) - pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze() + pred_y = torch.max(test_output, 1)[1].data.numpy() accuracy = float((pred_y == test_y).astype(int).sum()) / float(test_y.size) print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy) # print 10 predictions from test data test_output = rnn(test_x[:10].view(-1, 28, 28)) -pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze() +pred_y = torch.max(test_output, 1)[1].data.numpy() print(pred_y, 'prediction number') print(test_y[:10], 'real number') diff --git a/tutorial-contents/403_RNN_regressor.py b/tutorial-contents/403_RNN_regressor.py index c8b19ca..e9d9bae 100644 --- a/tutorial-contents/403_RNN_regressor.py +++ b/tutorial-contents/403_RNN_regressor.py @@ -20,8 +20,8 @@ INPUT_SIZE = 1 # rnn input size LR = 0.02 # learning rate # show data -steps = np.linspace(0, np.pi*2, 100, dtype=np.float32) -x_np = np.sin(steps) # float32 for converting torch FloatTensor +steps = np.linspace(0, np.pi*2, 100, dtype=np.float32) # float32 for converting torch FloatTensor +x_np = np.sin(steps) y_np = np.cos(steps) plt.plot(steps, y_np, 'r-', label='target (cos)') plt.plot(steps, x_np, 'b-', label='input (sin)') @@ -55,7 +55,13 @@ class RNN(nn.Module): # instead, for simplicity, you can replace above codes by follows # r_out = r_out.view(-1, 32) # outs = self.out(r_out) + # outs = outs.view(-1, TIME_STEP, 1) # return outs, h_state + + # or even simpler, since nn.Linear can accept inputs of any dimension + # and returns outputs with same dimension except for the last + # outs = self.out(r_out) + # return outs rnn = RNN() print(rnn) @@ -71,8 +77,8 @@ plt.ion() # continuously plot for step in range(100): start, end = step * np.pi, (step+1)*np.pi # time range # use sin predicts cos - steps = np.linspace(start, end, TIME_STEP, dtype=np.float32) - x_np = np.sin(steps) # float32 for converting torch FloatTensor + steps = np.linspace(start, end, TIME_STEP, dtype=np.float32) # float32 for converting torch FloatTensor + x_np = np.sin(steps) y_np = np.cos(steps) x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis]) # shape (batch, time_step, input_size) diff --git a/tutorial-contents/502_GPU.py b/tutorial-contents/502_GPU.py index 58cd07b..383e5ba 100644 --- a/tutorial-contents/502_GPU.py +++ b/tutorial-contents/502_GPU.py @@ -68,7 +68,7 @@ for epoch in range(EPOCH): test_output = cnn(test_x) # !!!!!!!! Change in here !!!!!!!!! # - pred_y = torch.max(test_output, 1)[1].cuda().data.squeeze() # move the computation in GPU + pred_y = torch.max(test_output, 1)[1].cuda().data # move the computation in GPU accuracy = torch.sum(pred_y == test_y).type(torch.FloatTensor) / test_y.size(0) print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.cpu().numpy(), '| test accuracy: %.2f' % accuracy) @@ -77,7 +77,7 @@ for epoch in range(EPOCH): test_output = cnn(test_x[:10]) # !!!!!!!! Change in here !!!!!!!!! # -pred_y = torch.max(test_output, 1)[1].cuda().data.squeeze() # move the computation in GPU +pred_y = torch.max(test_output, 1)[1].cuda().data # move the computation in GPU print(pred_y, 'prediction number') print(test_y[:10], 'real number')