From 51f1c938f3c99ba6eea772b9493760680e39879d Mon Sep 17 00:00:00 2001 From: Morvan Zhou Date: Sun, 18 Jun 2017 00:18:42 +1000 Subject: [PATCH] update --- tutorial-contents/401_CNN.py | 18 +++-------- tutorial-contents/403_RNN_regressor.py | 3 +- tutorial-contents/404_autoencoder.py | 24 +++++---------- tutorial-contents/406_GAN.py | 13 +++----- tutorial-contents/406_conditional_GAN.py | 17 ++++------- tutorial-contents/503_dropout.py | 4 +-- tutorial-contents/504_batch_normalization.py | 32 +++++--------------- 7 files changed, 32 insertions(+), 79 deletions(-) diff --git a/tutorial-contents/401_CNN.py b/tutorial-contents/401_CNN.py index ed639fb..00775d0 100644 --- a/tutorial-contents/401_CNN.py +++ b/tutorial-contents/401_CNN.py @@ -85,26 +85,16 @@ loss_func = nn.CrossEntropyLoss() # the target label is no # following function (plot_with_labels) is for visualization, can be ignored if not interested from matplotlib import cm -try: - from sklearn.manifold import TSNE - HAS_SK = True -except: - HAS_SK = False - print('Please install sklearn for layer visualization') +try: from sklearn.manifold import TSNE; HAS_SK = True +except: HAS_SK = False; print('Please install sklearn for layer visualization') def plot_with_labels(lowDWeights, labels): plt.cla() X, Y = lowDWeights[:, 0], lowDWeights[:, 1] for x, y, s in zip(X, Y, labels): - c = cm.rainbow(int(255 * s / 9)) - plt.text(x, y, s, backgroundcolor=c, fontsize=9) - plt.xlim(X.min(), X.max()) - plt.ylim(Y.min(), Y.max()) - plt.title('Visualize last layer') - plt.show() - plt.pause(0.01) + c = cm.rainbow(int(255 * s / 9)); plt.text(x, y, s, backgroundcolor=c, fontsize=9) + plt.xlim(X.min(), X.max()); plt.ylim(Y.min(), Y.max()); plt.title('Visualize last layer'); plt.show(); plt.pause(0.01) plt.ion() - # training and testing for epoch in range(EPOCH): for step, (x, y) in enumerate(train_loader): # gives batch data, normalize x when iterate train_loader diff --git a/tutorial-contents/403_RNN_regressor.py b/tutorial-contents/403_RNN_regressor.py index 76dab54..7a0d018 100644 --- a/tutorial-contents/403_RNN_regressor.py +++ b/tutorial-contents/403_RNN_regressor.py @@ -87,8 +87,7 @@ for step in range(60): # plotting plt.plot(steps, y_np.flatten(), 'r-') plt.plot(steps, prediction.data.numpy().flatten(), 'b-') - plt.draw() - plt.pause(0.05) + plt.draw(); plt.pause(0.05) plt.ioff() plt.show() diff --git a/tutorial-contents/404_autoencoder.py b/tutorial-contents/404_autoencoder.py index b229685..877dd86 100644 --- a/tutorial-contents/404_autoencoder.py +++ b/tutorial-contents/404_autoencoder.py @@ -89,9 +89,7 @@ plt.ion() # continuously plot # original data (first row) for viewing view_data = Variable(train_data.train_data[:N_TEST_IMG].view(-1, 28*28).type(torch.FloatTensor)/255.) for i in range(N_TEST_IMG): - a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (28, 28)), cmap='gray') - a[0][i].set_xticks(()) - a[0][i].set_yticks(()) + a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (28, 28)), cmap='gray'); a[0][i].set_xticks(()); a[0][i].set_yticks(()) for epoch in range(EPOCH): for step, (x, y) in enumerate(train_loader): @@ -114,10 +112,8 @@ for epoch in range(EPOCH): for i in range(N_TEST_IMG): a[1][i].clear() a[1][i].imshow(np.reshape(decoded_data.data.numpy()[i], (28, 28)), cmap='gray') - a[1][i].set_xticks(()) - a[1][i].set_yticks(()) - plt.draw() - plt.pause(0.05) + a[1][i].set_xticks(()); a[1][i].set_yticks(()) + plt.draw(); plt.pause(0.05) plt.ioff() plt.show() @@ -125,16 +121,10 @@ plt.show() # visualize in 3D plot view_data = Variable(train_data.train_data[:200].view(-1, 28*28).type(torch.FloatTensor)/255.) encoded_data, _ = autoencoder(view_data) -fig = plt.figure(2) -ax = Axes3D(fig) -X = encoded_data.data[:, 0].numpy() -Y = encoded_data.data[:, 1].numpy() -Z = encoded_data.data[:, 2].numpy() +fig = plt.figure(2); ax = Axes3D(fig) +X, Y, Z = encoded_data.data[:, 0].numpy(), encoded_data.data[:, 1].numpy(), encoded_data.data[:, 2].numpy() values = train_data.train_labels[:200].numpy() for x, y, z, s in zip(X, Y, Z, values): - c = cm.rainbow(int(255*s/9)) - ax.text(x, y, z, s, backgroundcolor=c) -ax.set_xlim(X.min(), X.max()) -ax.set_ylim(Y.min(), Y.max()) -ax.set_zlim(Z.min(), Z.max()) + c = cm.rainbow(int(255*s/9)); ax.text(x, y, z, s, backgroundcolor=c) +ax.set_xlim(X.min(), X.max()); ax.set_ylim(Y.min(), Y.max()); ax.set_zlim(Z.min(), Z.max()) plt.show() diff --git a/tutorial-contents/406_GAN.py b/tutorial-contents/406_GAN.py index 1f57bdc..22ae2a2 100644 --- a/tutorial-contents/406_GAN.py +++ b/tutorial-contents/406_GAN.py @@ -25,10 +25,10 @@ ART_COMPONENTS = 15 # it could be total point G can draw in the canvas PAINT_POINTS = np.vstack([np.linspace(-1, 1, ART_COMPONENTS) for _ in range(BATCH_SIZE)]) # show our beautiful painting range -plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound') -plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound') -plt.legend(loc='upper right') -plt.show() +# plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound') +# plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound') +# plt.legend(loc='upper right') +# plt.show() def artist_works(): # painting from the famous artist (real target) @@ -81,10 +81,7 @@ for step in range(10000): plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound') plt.text(-.5, 2.3, 'D accuracy=%.2f (0.5 for D to converge)' % prob_artist0.data.numpy().mean(), fontdict={'size': 15}) plt.text(-.5, 2, 'D score= %.2f (-1.38 for G to converge)' % -D_loss.data.numpy(), fontdict={'size': 15}) - plt.ylim((0, 3)) - plt.legend(loc='upper right', fontsize=12) - plt.draw() - plt.pause(0.01) + plt.ylim((0, 3));plt.legend(loc='upper right', fontsize=12);plt.draw();plt.pause(0.01) plt.ioff() plt.show() \ No newline at end of file diff --git a/tutorial-contents/406_conditional_GAN.py b/tutorial-contents/406_conditional_GAN.py index 8e13ff5..b86030a 100644 --- a/tutorial-contents/406_conditional_GAN.py +++ b/tutorial-contents/406_conditional_GAN.py @@ -25,10 +25,10 @@ ART_COMPONENTS = 15 # it could be total point G can draw in the canvas PAINT_POINTS = np.vstack([np.linspace(-1, 1, ART_COMPONENTS) for _ in range(BATCH_SIZE)]) # show our beautiful painting range -plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound') -plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound') -plt.legend(loc='upper right') -plt.show() +# plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound') +# plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound') +# plt.legend(loc='upper right') +# plt.show() def artist_works_with_labels(): # painting from the famous artist (real target) @@ -91,10 +91,7 @@ for step in range(10000): plt.text(-.5, 2.3, 'D accuracy=%.2f (0.5 for D to converge)' % prob_artist0.data.numpy().mean(), fontdict={'size': 15}) plt.text(-.5, 2, 'D score= %.2f (-1.38 for G to converge)' % -D_loss.data.numpy(), fontdict={'size': 15}) plt.text(-.5, 1.7, 'Class = %i' % int(labels.data[0, 0]), fontdict={'size': 15}) - plt.ylim((0, 3)) - plt.legend(loc='upper right', fontsize=12) - plt.draw() - plt.pause(0.1) + plt.ylim((0, 3));plt.legend(loc='upper right', fontsize=12);plt.draw();plt.pause(0.1) plt.ioff() plt.show() @@ -107,6 +104,4 @@ G_paintings = G(G_inputs) plt.plot(PAINT_POINTS[0], G_paintings.data.numpy()[0], c='#4AD631', lw=3, label='G painting for upper class',) plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + bound[1], c='#74BCFF', lw=3, label='upper bound (class 1)') plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + bound[0], c='#FF9359', lw=3, label='lower bound (class 1)') -plt.ylim((0, 3)) -plt.legend(loc='upper right', fontsize=12) -plt.show() \ No newline at end of file +plt.ylim((0, 3));plt.legend(loc='upper right', fontsize=12);plt.show() \ No newline at end of file diff --git a/tutorial-contents/503_dropout.py b/tutorial-contents/503_dropout.py index 5608392..88b0351 100644 --- a/tutorial-contents/503_dropout.py +++ b/tutorial-contents/503_dropout.py @@ -87,9 +87,7 @@ for t in range(500): plt.plot(test_x.data.numpy(), test_pred_drop.data.numpy(), 'b--', lw=3, label='dropout(50%)') plt.text(0, -1.2, 'overfitting loss=%.4f' % loss_func(test_pred_ofit, test_y).data[0], fontdict={'size': 20, 'color': 'red'}) plt.text(0, -1.5, 'dropout loss=%.4f' % loss_func(test_pred_drop, test_y).data[0], fontdict={'size': 20, 'color': 'blue'}) - plt.legend(loc='upper left') - plt.ylim((-2.5, 2.5)) - plt.pause(0.1) + plt.legend(loc='upper left'); plt.ylim((-2.5, 2.5));plt.pause(0.1) # change back to train mode net_overfitting.train() diff --git a/tutorial-contents/504_batch_normalization.py b/tutorial-contents/504_batch_normalization.py index 2ca061b..4bcf01d 100644 --- a/tutorial-contents/504_batch_normalization.py +++ b/tutorial-contents/504_batch_normalization.py @@ -99,30 +99,17 @@ loss_func = torch.nn.MSELoss() f, axs = plt.subplots(4, N_HIDDEN+1, figsize=(10, 5)) plt.ion() # something about plotting plt.show() - def plot_histogram(l_in, l_in_bn, pre_ac, pre_ac_bn): for i, (ax_pa, ax_pa_bn, ax, ax_bn) in enumerate(zip(axs[0, :], axs[1, :], axs[2, :], axs[3, :])): [a.clear() for a in [ax_pa, ax_pa_bn, ax, ax_bn]] - if i == 0: - p_range = (-7, 10) - the_range = (-7, 10) - else: - p_range = (-4, 4) - the_range = (-1, 1) + if i == 0: p_range = (-7, 10);the_range = (-7, 10) + else:p_range = (-4, 4);the_range = (-1, 1) ax_pa.set_title('L' + str(i)) - ax_pa.hist(pre_ac[i].data.numpy().ravel(), bins=10, range=p_range, color='#FF9359', alpha=0.5) - ax_pa_bn.hist(pre_ac_bn[i].data.numpy().ravel(), bins=10, range=p_range, color='#74BCFF', alpha=0.5) - ax.hist(l_in[i].data.numpy().ravel(), bins=10, range=the_range, color='#FF9359') - ax_bn.hist(l_in_bn[i].data.numpy().ravel(), bins=10, range=the_range, color='#74BCFF') - for a in [ax_pa, ax, ax_pa_bn, ax_bn]: - a.set_yticks(()) - a.set_xticks(()) - ax_pa_bn.set_xticks(p_range) - ax_bn.set_xticks(the_range) - axs[0, 0].set_ylabel('PreAct') - axs[1, 0].set_ylabel('BN PreAct') - axs[2, 0].set_ylabel('Act') - axs[3, 0].set_ylabel('BN Act') + ax_pa.hist(pre_ac[i].data.numpy().ravel(), bins=10, range=p_range, color='#FF9359', alpha=0.5);ax_pa_bn.hist(pre_ac_bn[i].data.numpy().ravel(), bins=10, range=p_range, color='#74BCFF', alpha=0.5) + ax.hist(l_in[i].data.numpy().ravel(), bins=10, range=the_range, color='#FF9359');ax_bn.hist(l_in_bn[i].data.numpy().ravel(), bins=10, range=the_range, color='#74BCFF') + for a in [ax_pa, ax, ax_pa_bn, ax_bn]: a.set_yticks(());a.set_xticks(()) + ax_pa_bn.set_xticks(p_range);ax_bn.set_xticks(the_range) + axs[0, 0].set_ylabel('PreAct');axs[1, 0].set_ylabel('BN PreAct');axs[2, 0].set_ylabel('Act');axs[3, 0].set_ylabel('BN Act') plt.pause(0.01) # training @@ -155,10 +142,7 @@ plt.ioff() plt.figure(2) plt.plot(losses[0], c='#FF9359', lw=3, label='Original') plt.plot(losses[1], c='#74BCFF', lw=3, label='Batch Normalization') -plt.xlabel('step') -plt.ylabel('test loss') -plt.ylim((0, 2000)) -plt.legend(loc='best') +plt.xlabel('step');plt.ylabel('test loss');plt.ylim((0, 2000));plt.legend(loc='best') # evaluation # set net to eval mode to freeze the parameters in batch normalization layers