diff --git a/tutorial-contents-notebooks/203_activation.ipynb b/tutorial-contents-notebooks/203_activation.ipynb index a53772b..75331f9 100644 --- a/tutorial-contents-notebooks/203_activation.ipynb +++ b/tutorial-contents-notebooks/203_activation.ipynb @@ -59,10 +59,18 @@ "metadata": { "collapsed": true }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\morvanzhou\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\torch\\nn\\functional.py:1006: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\nC:\\Users\\morvanzhou\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\torch\\nn\\functional.py:995: UserWarning: nn.functional.tanh is deprecated. Use torch.tanh instead.\n warnings.warn(\"nn.functional.tanh is deprecated. Use torch.tanh instead.\")\n" + ] + } + ], "source": [ "y_relu = F.relu(x).data.numpy()\n", - "y_sigmoid = F.sigmoid(x).data.numpy()\n", + "y_sigmoid = torch.sigmoid(x).data.numpy()\n", "y_tanh = F.tanh(x).data.numpy()\n", "y_softplus = F.softplus(x).data.numpy()\n", "\n", diff --git a/tutorial-contents-notebooks/306_optimizer.ipynb b/tutorial-contents-notebooks/306_optimizer.ipynb index dc831dc..06c7742 100644 --- a/tutorial-contents-notebooks/306_optimizer.ipynb +++ b/tutorial-contents-notebooks/306_optimizer.ipynb @@ -249,7 +249,7 @@ " opt.zero_grad() # clear gradients for next train\n", " loss.backward() # backpropagation, compute gradients\n", " opt.step() # apply gradients\n", - " l_his.append(loss.data[0]) # loss recoder\n", + " l_his.append(loss.item()) # loss recoder\n", "\n", "labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']\n", "for i, l_his in enumerate(losses_his):\n",