diff --git a/tutorial-contents/301_regression.py b/tutorial-contents/301_regression.py index afed23e..9745082 100644 --- a/tutorial-contents/301_regression.py +++ b/tutorial-contents/301_regression.py @@ -41,7 +41,6 @@ optimizer = torch.optim.SGD(net.parameters(), lr=0.5) loss_func = torch.nn.MSELoss() # this is for regression mean squared loss plt.ion() # something about plotting -plt.show() for t in range(100): prediction = net(x) # input x and predict based on x diff --git a/tutorial-contents/302_classification.py b/tutorial-contents/302_classification.py index 1381311..98b8653 100644 --- a/tutorial-contents/302_classification.py +++ b/tutorial-contents/302_classification.py @@ -47,7 +47,6 @@ optimizer = torch.optim.SGD(net.parameters(), lr=0.02) loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted plt.ion() # something about plotting -plt.show() for t in range(100): out = net(x) # input x and predict based on x diff --git a/tutorial-contents/303_build_nn_quickly.py b/tutorial-contents/303_build_nn_quickly.py index 8e72e7c..8c3c788 100644 --- a/tutorial-contents/303_build_nn_quickly.py +++ b/tutorial-contents/303_build_nn_quickly.py @@ -32,4 +32,18 @@ net2 = torch.nn.Sequential( print(net1) # net1 architecture -print(net2) # net2 architecture \ No newline at end of file +""" +Net ( + (hidden): Linear (1 -> 10) + (predict): Linear (10 -> 1) +) +""" + +print(net2) # net2 architecture +""" +Sequential ( + (0): Linear (1 -> 10) + (1): ReLU () + (2): Linear (10 -> 1) +) +""" \ No newline at end of file diff --git a/tutorial-contents/403_RNN_regressor.py b/tutorial-contents/403_RNN_regressor.py index dd78217..76dab54 100644 --- a/tutorial-contents/403_RNN_regressor.py +++ b/tutorial-contents/403_RNN_regressor.py @@ -64,7 +64,6 @@ h_state = None # for initial hidden state plt.figure(1, figsize=(12, 5)) plt.ion() # continuously plot -plt.show() for step in range(60): start, end = step * np.pi, (step+1)*np.pi # time range diff --git a/tutorial-contents/404_autoencoder.py b/tutorial-contents/404_autoencoder.py index f9dc956..b229685 100644 --- a/tutorial-contents/404_autoencoder.py +++ b/tutorial-contents/404_autoencoder.py @@ -85,7 +85,6 @@ loss_func = nn.MSELoss() # initialize figure f, a = plt.subplots(2, N_TEST_IMG, figsize=(5, 2)) plt.ion() # continuously plot -plt.show() # original data (first row) for viewing view_data = Variable(train_data.train_data[:N_TEST_IMG].view(-1, 28*28).type(torch.FloatTensor)/255.) diff --git a/tutorial-contents/406_GAN.py b/tutorial-contents/406_GAN.py index 2489d95..1f57bdc 100644 --- a/tutorial-contents/406_GAN.py +++ b/tutorial-contents/406_GAN.py @@ -54,7 +54,7 @@ opt_D = torch.optim.Adam(D.parameters(), lr=LR_D) opt_G = torch.optim.Adam(G.parameters(), lr=LR_G) plt.ion() # something about continuous plotting -plt.show() + for step in range(10000): artist_paintings = artist_works() # real painting from artist G_ideas = Variable(torch.randn(BATCH_SIZE, N_IDEAS)) # random ideas diff --git a/tutorial-contents/406_conditional_GAN.py b/tutorial-contents/406_conditional_GAN.py index d66e069..8e13ff5 100644 --- a/tutorial-contents/406_conditional_GAN.py +++ b/tutorial-contents/406_conditional_GAN.py @@ -57,7 +57,7 @@ opt_D = torch.optim.Adam(D.parameters(), lr=LR_D) opt_G = torch.optim.Adam(G.parameters(), lr=LR_G) plt.ion() # something about continuous plotting -plt.show() + for step in range(10000): artist_paintings, labels = artist_works_with_labels() # real painting, label from artist G_ideas = Variable(torch.randn(BATCH_SIZE, N_IDEAS)) # random ideas diff --git a/tutorial-contents/501_why_torch_dynamic_graph.py b/tutorial-contents/501_why_torch_dynamic_graph.py index 90b4f3e..761bc54 100644 --- a/tutorial-contents/501_why_torch_dynamic_graph.py +++ b/tutorial-contents/501_why_torch_dynamic_graph.py @@ -54,7 +54,6 @@ h_state = None # for initial hidden state plt.figure(1, figsize=(12, 5)) plt.ion() # continuously plot -plt.show() ######################## Below is different ######################### diff --git a/tutorial-contents/503_dropout.py b/tutorial-contents/503_dropout.py index 4f4f23c..5608392 100644 --- a/tutorial-contents/503_dropout.py +++ b/tutorial-contents/503_dropout.py @@ -58,7 +58,6 @@ optimizer_drop = torch.optim.Adam(net_dropped.parameters(), lr=0.01) loss_func = torch.nn.MSELoss() plt.ion() # something about plotting -plt.show() for t in range(500): pred_ofit = net_overfitting(x) diff --git a/tutorial-contents/504_batch_normalization.py b/tutorial-contents/504_batch_normalization.py index 1a19100..2ca061b 100644 --- a/tutorial-contents/504_batch_normalization.py +++ b/tutorial-contents/504_batch_normalization.py @@ -48,7 +48,6 @@ train_loader = Data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shu # show data plt.scatter(train_x.numpy(), train_y.numpy(), c='#FF9359', s=50, alpha=0.2, label='train') plt.legend(loc='upper left') -plt.show() class Net(nn.Module): def __init__(self, batch_normalization=False):