diff --git a/diffphys-code-sol.ipynb b/diffphys-code-sol.ipynb index 3d738b2..c2d5061 100644 --- a/diffphys-code-sol.ipynb +++ b/diffphys-code-sol.ipynb @@ -784,7 +784,7 @@ "source": [ "## Training\n", "\n", - "For the training, we use a standard Adam optimizer, and run 15 epochs by default. This should be increased for the larger network or to obtain more accurate results. For longer training runs, it would also be beneficial to decrease the learning rate over the course of the epochs, but for simplicity, we'll keep `LR` constant here.\n", + "For the training, we use a standard Adam optimizer, and run 5 epochs by default. This is a fairly low number to keep the runtime low. If you have the resources, 10 or 15 epochs will yield more accurate results. For even longer training runs and larger networks, it would also be beneficial to decrease the learning rate over the course of the epochs, but for simplicity, we'll keep `LR` constant here.\n", "\n", "Optionally, this is also the right point to load a network state to resume training." ] @@ -798,7 +798,7 @@ "outputs": [], "source": [ "LR = 1e-4\n", - "EPOCHS = 15\n", + "EPOCHS = 5\n", "\n", "opt = tf.keras.optimizers.Adam(learning_rate=LR) \n", "\n", @@ -855,7 +855,6 @@ ], "source": [ "steps = 0\n", - "EPOCHS=5 # NT_DEBUG\n", "for j in range(EPOCHS): # training\n", " dataset.newEpoch(exclude_tail=msteps)\n", " if j