small SoL cleanup

This commit is contained in:
NT 2021-07-28 20:39:38 +02:00
parent db3a18977e
commit 521f3f3604

View File

@ -170,7 +170,7 @@
},
"outputs": [],
"source": [
"# ??? !pip install --upgrade --quiet phiflow\n",
"!pip install --upgrade --quiet phiflow\n",
"\n",
"from phi.tf.flow import *\n",
"import tensorflow as tf\n",
@ -369,13 +369,9 @@
"source": [
"\n",
"def to_keras(dens_vel_grid_array, ext_const_channel):\n",
" # drop the unused edges of the staggered velocity grid making its dim same to the centered grid's\n",
" # align the sides the staggered velocity grid making its size the same as the centered grid\n",
" return math.stack(\n",
" [\n",
" #dens_vel_grid_array[1].vector['x'].x[:-1].values, # u\n",
" # ? tf.pad( dens_vel_grid_array[1].vector['x'].values, [(0,0), (0,0), (0,1)] ), \n",
" # ? tf.pad( dens_vel_grid_array[1].native(['batch', 'y', 'x']), [(0,0), (0,0), (0,1)] ), \n",
" # works! tf.pad( dens_vel_grid_array[1].vector['x'].values.native(['batch', 'y', 'x']), [(0,0), (0,0), (0,1)] ), \n",
" math.pad( dens_vel_grid_array[1].vector['x'].values, {'x':(0,1)} , math.extrapolation.ZERO),\n",
" dens_vel_grid_array[1].vector['y'].y[:-1].values, # v\n",
" math.ones(dens_vel_grid_array[0].shape)*ext_const_channel # Re\n",
@ -388,8 +384,6 @@
" math.stack(\n",
" [\n",
" math.tensor(tf.pad(tf_tensor[..., 1], [(0,0), (0,1), (0,0)]), math.batch('batch'), math.spatial('y, x')), # v\n",
" #math.tensor(tf.pad(tf_tensor[..., 0], [(0,0), (0,0), (0,1)]), math.batch('batch'), math.spatial('y, x')), # u\n",
" # NT_DEBUG check\n",
" math.tensor( tf_tensor[...,:-1, 0], math.batch('batch'), math.spatial('y, x')), # u \n",
" ], math.channel('vector')\n",
" )\n",
@ -447,9 +441,7 @@
" self.extConstChannelPerSim = { self.dataSims[i]:[ReNrs[i]] for i in range(num_sims) }\n",
" else:\n",
" self.dataSims = ['karman-fdt-hires-testset/sim_%06d'%i for i in range(num_sims) ]\n",
" #ReNrs = [240000.0, 480000.0, 960000.0, 1920000.0, 3840000.0] \n",
" #ReNrs = [120000.0, 240000.0, 480000.0, 960000.0, 1920000.0, 3840000.0, 7680000.0] # new extende\n",
" ReNrs = [120000.0, 480000.0, 1920000.0, 7680000.0] # new reduced to 4\n",
" ReNrs = [120000.0, 480000.0, 1920000.0, 7680000.0] \n",
" self.extConstChannelPerSim = { self.dataSims[i]:[ReNrs[i]] for i in range(num_sims) }\n",
"\n",
" self.dataFrames = [ np.arange(num_frames) for _ in self.dataSims ] \n",
@ -593,7 +585,7 @@
"batch_size = 3\n",
"simsteps = 500\n",
"\n",
"dataset = Dataset( data_preloaded=data_preloaded, num_frames=simsteps, num_sims=nsims, batch_size=batch_size )\n"
"dataset = Dataset( data_preloaded=data_preloaded, num_frames=simsteps, num_sims=nsims, batch_size=batch_size )"
]
},
{
@ -694,7 +686,6 @@
"source": [
"def training_step(dens_gt, vel_gt, Re, i_step):\n",
" with tf.GradientTape() as tape:\n",
" #prediction, correction = [], [] # predicted states with correction, inferred velocity corrections\n",
" prediction, correction = [ [dens_gt[0],vel_gt[0]] ], [0] # predicted states with correction, inferred velocity corrections\n",
"\n",
" for i in range(msteps):\n",
@ -1325,8 +1316,6 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"## Next steps\n",
"\n",
"* Modify the training to further reduce the training error. With the _medium_ network you should be able to get the loss down to around 1.\n",