fixed cleanup script
This commit is contained in:
parent
2db1f59d24
commit
1548db1731
File diff suppressed because one or more lines are too long
@ -8,152 +8,154 @@ pdfMode = True
|
|||||||
|
|
||||||
print(format(sys.argv))
|
print(format(sys.argv))
|
||||||
if len(sys.argv)>1:
|
if len(sys.argv)>1:
|
||||||
if int(sys.argv[1])>0:
|
if int(sys.argv[1])>0:
|
||||||
print("WWW mode on")
|
print("WWW mode on")
|
||||||
pdfMode = False
|
pdfMode = False
|
||||||
|
|
||||||
fileList = [
|
fileList = [
|
||||||
"diffphys-code-burgers.ipynb", "diffphys-code-ns.ipynb", "diffphys-code-sol.ipynb", "physicalloss-code.ipynb", # TF
|
"diffphys-code-burgers.ipynb", "diffphys-code-ns.ipynb", "diffphys-code-sol.ipynb", "physicalloss-code.ipynb", # TF
|
||||||
"bayesian-code.ipynb", "supervised-airfoils.ipynb", # pytorch
|
"bayesian-code.ipynb", "supervised-airfoils.ipynb", # pytorch
|
||||||
"reinflearn-code.ipynb", # phiflow
|
"reinflearn-code.ipynb", # phiflow
|
||||||
"physgrad-comparison.ipynb", # jax
|
"physgrad-comparison.ipynb", # jax
|
||||||
"physgrad-code.ipynb", # pip
|
"physgrad-code.ipynb", # pip
|
||||||
]
|
]
|
||||||
|
|
||||||
#fileList = [ "physgrad-code.ipynb"] # debug, only 1 file
|
#fileList = [ "physgrad-code.ipynb"] # debug, only 1 file
|
||||||
#fileList = [ "t1.ipynb" ] # debug
|
#fileList = [ "diffphys-code-sol.ipynb" ] # debug
|
||||||
|
|
||||||
|
|
||||||
# main
|
# main
|
||||||
|
|
||||||
for fnOut in fileList:
|
for fnOut in fileList:
|
||||||
if not os.path.isfile(fnOut):
|
if not os.path.isfile(fnOut):
|
||||||
print("Error: "+fnOut+" not found!"); exit(1)
|
print("Error: "+fnOut+" not found!"); exit(1)
|
||||||
|
|
||||||
# create backups
|
# create backups
|
||||||
fn0 = fnOut[:-5] + "bak"
|
fn0 = fnOut[:-5] + "bak"
|
||||||
fn = fn0 + "0"; cnt = 0
|
fn = fn0 + "0"; cnt = 0
|
||||||
while os.path.isfile(fn):
|
while os.path.isfile(fn):
|
||||||
#print("Error: "+fn+" already exists!"); exit(1)
|
#print("Error: "+fn+" already exists!"); exit(1)
|
||||||
print("Warning: "+fn+" already exists!")
|
print("Warning: "+fn+" already exists!")
|
||||||
fn = fn0 + format(cnt); cnt=cnt+1
|
fn = fn0 + format(cnt); cnt=cnt+1
|
||||||
|
|
||||||
print("renaming "+fnOut+ " to "+fn )
|
print("renaming "+fnOut+ " to "+fn )
|
||||||
if os.path.isfile(fnOut):
|
if os.path.isfile(fnOut):
|
||||||
os.rename(fnOut, fn)
|
os.rename(fnOut, fn)
|
||||||
if not os.path.isfile(fn):
|
if not os.path.isfile(fn):
|
||||||
print("Error: "+fn+" missing!")
|
print("Error: "+fn+" missing!")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
with open(fn) as file:
|
with open(fn) as file:
|
||||||
d = json.load(file)
|
d = json.load(file)
|
||||||
|
|
||||||
#print(d.keys()) #print(d["cells"][0].keys())
|
#print(d.keys()) #print(d["cells"][0].keys())
|
||||||
|
|
||||||
# remove TF / pytorch warnings, build list of regular expressions to search for
|
# remove TF / pytorch warnings, build list of regular expressions to search for
|
||||||
# double check, redundant with removing stderr cells (cf delE)
|
# double check, redundant with removing stderr cells (cf delE)
|
||||||
res = []
|
res = []
|
||||||
res.append( re.compile(r"WARNING:tensorflow:") )
|
res.append( re.compile(r"WARNING:tensorflow:") )
|
||||||
res.append( re.compile(r"UserWarning:") )
|
res.append( re.compile(r"UserWarning:") )
|
||||||
res.append( re.compile(r"DeprecationWarning:") )
|
res.append( re.compile(r"DeprecationWarning:") )
|
||||||
res.append( re.compile(r"InsecureRequestWarning") ) # for https download
|
res.append( re.compile(r"InsecureRequestWarning") ) # for https download
|
||||||
res.append( re.compile(r"Building wheel") ) # phiflow install, also gives weird unicode characters
|
res.append( re.compile(r"Building wheel") ) # phiflow install, also gives weird unicode characters
|
||||||
res.append( re.compile(r"warnings.warn") ) # phiflow warnings
|
res.append( re.compile(r"warnings.warn") ) # phiflow warnings
|
||||||
res.append( re.compile(r"WARNING:absl") ) # jax warnings
|
res.append( re.compile(r"WARNING:absl") ) # jax warnings
|
||||||
|
|
||||||
res.append( re.compile(r"ERROR: pip") ) # pip dependencies
|
res.append( re.compile(r"ERROR: pip") ) # pip dependencies
|
||||||
res.append( re.compile(r"requires imgaug") ) # pip dependencies
|
res.append( re.compile(r"requires imgaug") ) # pip dependencies
|
||||||
res.append( re.compile(r"See the documentation of nn.Upsample") ) # pip dependencies
|
res.append( re.compile(r"See the documentation of nn.Upsample") ) # pip dependencies
|
||||||
|
|
||||||
# remove all "warnings.warn" from phiflow?
|
# remove all "warnings.warn" from phiflow?
|
||||||
|
|
||||||
# shorten data line: "0.008612174447657694, 0.02584669669548606, 0.043136357266407785"
|
# shorten data line: "0.008612174447657694, 0.02584669669548606, 0.043136357266407785"
|
||||||
reD = re.compile(r"\[0.008612174447657694, 0.02584669669548606, 0.043136357266407785.+\]" )
|
reD = re.compile(r"\[0.008612174447657694, 0.02584669669548606, 0.043136357266407785.+\]" )
|
||||||
reDt = "[0.008612174447657694, 0.02584669669548606, 0.043136357266407785 ... ]"
|
reDt = "[0.008612174447657694, 0.02584669669548606, 0.043136357266407785 ... ]"
|
||||||
|
|
||||||
t="cells"
|
t="cells"
|
||||||
okay = 0
|
okay = 0
|
||||||
deletes = 0
|
deletes = 0
|
||||||
for i in range(len(d[t])):
|
for i in range(len(d[t])):
|
||||||
#for i in range(len(d[t])):
|
#for i in range(len(d[t])):
|
||||||
#print(d[t][0]["cell_type"])
|
#print(d[t][0]["cell_type"])
|
||||||
#print(d[t][i]["cell_type"])
|
#print(d[t][i]["cell_type"])
|
||||||
|
|
||||||
# remove images after code
|
# remove images after code
|
||||||
|
|
||||||
if d[t][i]["cell_type"]=="code":
|
if d[t][i]["cell_type"]=="code":
|
||||||
#print(d[t][i].keys())
|
#print(d[t][i].keys())
|
||||||
#d[t][i]["outputs"] = ""
|
#d[t][i]["outputs"] = ""
|
||||||
#print(d[t][i]["outputs"])
|
#print(d[t][i]["outputs"])
|
||||||
|
|
||||||
if pdfMode:
|
if pdfMode:
|
||||||
for j in range(len( d[t][i]["source"] )):
|
for j in range(len( d[t][i]["source"] )):
|
||||||
#print( d[t][i]["source"][j] )
|
#print( d[t][i]["source"][j] )
|
||||||
#print( type(d[t][i]["source"][j] ))
|
#print( type(d[t][i]["source"][j] ))
|
||||||
dsOut = reD.sub( reDt, d[t][i]["source"][j] ) # replace long number string (only for burgers)
|
dsOut = reD.sub( reDt, d[t][i]["source"][j] ) # replace long number string (only for burgers)
|
||||||
d[t][i]["source"][j] = dsOut
|
d[t][i]["source"][j] = dsOut
|
||||||
deletes = deletes+1
|
deletes = deletes+1
|
||||||
#print( d[t][i]["source"][j] +"\n >>> \n" +d2 )
|
#print( d[t][i]["source"][j] +"\n >>> \n" +d2 )
|
||||||
|
|
||||||
delE = [] # collect whole entries (sections) to delete
|
delE = [] # collect whole entries (sections) to delete
|
||||||
|
|
||||||
#print(len( d[t][i]["outputs"] ))
|
#print(len( d[t][i]["outputs"] ))
|
||||||
for j in range(len( d[t][i]["outputs"] )):
|
for j in range(len( d[t][i]["outputs"] )):
|
||||||
#print(type( d[t][i]["outputs"][j] ))
|
#print(type( d[t][i]["outputs"][j] ))
|
||||||
#print( d[t][i]["outputs"][j].keys() )
|
#print( d[t][i]["outputs"][j].keys() )
|
||||||
|
|
||||||
# search for error stderr cells
|
# search for error stderr cells
|
||||||
if d[t][i]["outputs"][j]["output_type"]=="stream":
|
if d[t][i]["outputs"][j]["output_type"]=="stream":
|
||||||
#print("output j name: "+ format( d[t][i]["outputs"][j]["name"] ) )
|
#print("output j name: "+ format( d[t][i]["outputs"][j]["name"] ) )
|
||||||
#print("output j: "+ format( d[t][i]["outputs"][j] ) )
|
#print("output j: "+ format( d[t][i]["outputs"][j] ) )
|
||||||
if d[t][i]["outputs"][j]["name"]=="stderr":
|
if d[t][i]["outputs"][j]["name"]=="stderr":
|
||||||
print("stderr found! len text "+ format(len( d[t][i]["outputs"][j]["text"]) ) +", removing entry "+format(j) )
|
print("stderr found! len text "+ format(len( d[t][i]["outputs"][j]["text"]) ) +", removing entry "+format(j) )
|
||||||
delE.append(j) # remove the whole stderr entry
|
delE.append(j) # remove the whole stderr entry
|
||||||
|
|
||||||
# images
|
# images
|
||||||
if d[t][i]["outputs"][j]["output_type"]=="stream":
|
if d[t][i]["outputs"][j]["output_type"]=="stream":
|
||||||
#print("len "+ format(len( d[t][i]["outputs"][j]["text"] )) )
|
#print("len "+ format(len( d[t][i]["outputs"][j]["text"] )) )
|
||||||
|
|
||||||
dell = [] # collect lines to delete
|
dell = [] # collect lines to delete
|
||||||
for k in range( len( d[t][i]["outputs"][j]["text"] ) ):
|
for k in range( len( d[t][i]["outputs"][j]["text"] ) ):
|
||||||
#print(" tout "+ d[t][i]["outputs"][j]["text"][k] ) # debug , print all lines
|
#print(" tout "+ d[t][i]["outputs"][j]["text"][k] ) # debug , print all lines - ACTIVATE to LOCATE errors
|
||||||
nums = []; all_good = True
|
nums = []; all_good = True
|
||||||
for rr in range(len(res)):
|
for rr in range(len(res)):
|
||||||
nums.append( res[rr].search( d[t][i]["outputs"][j]["text"][k] ) )
|
nums.append( res[rr].search( d[t][i]["outputs"][j]["text"][k] ) )
|
||||||
if nums[-1] is not None:
|
if nums[-1] is not None:
|
||||||
all_good = False # skip!
|
all_good = False # skip!
|
||||||
|
|
||||||
if all_good:
|
if all_good:
|
||||||
okay = okay+1
|
okay = okay+1
|
||||||
else: # delete line "dell"
|
else: # delete line "dell"
|
||||||
deletes = deletes+1
|
deletes = deletes+1
|
||||||
dell.append(d[t][i]["outputs"][j]["text"][k])
|
dell.append(d[t][i]["outputs"][j]["text"][k])
|
||||||
#print( format(nums) +" " + d[t][i]["outputs"][j]["text"][k] ) # len( d[t][i]["outputs"][j]["text"][k] ) )
|
#print( format(nums) +" " + d[t][i]["outputs"][j]["text"][k] ) # len( d[t][i]["outputs"][j]["text"][k] ) )
|
||||||
|
|
||||||
for dl in dell:
|
for dl in dell:
|
||||||
d[t][i]["outputs"][j]["text"].remove(dl)
|
d[t][i]["outputs"][j]["text"].remove(dl)
|
||||||
#print("len after "+format( len( d[t][i]["outputs"][j]["text"] )) + " A") # debug
|
#print("len after "+format( len( d[t][i]["outputs"][j]["text"] )) + " A") # debug
|
||||||
|
|
||||||
# afterwards (potentially remove whole entries)
|
# afterwards (potentially remove whole entries)
|
||||||
# if len(delE)>0:
|
if len(delE)>0:
|
||||||
# print("len bef "+format( len( d[t][i]["outputs"] )) + " A " + format(delE)) # debug
|
delE.sort(reverse=True)
|
||||||
for de in delE:
|
#print("len bef "+format( len( d[t][i]["outputs"] )) + " A " + format(delE)) # debug
|
||||||
#print(type(d[t][i]["outputs"])); print(de)
|
|
||||||
d[t][i]["outputs"].pop(de) # remove array element
|
for de in delE:
|
||||||
deletes+=1
|
#print(type(d[t][i]["outputs"])); print(de)
|
||||||
# if len(delE)>0:
|
d[t][i]["outputs"].pop(de) # remove array element
|
||||||
# print("len after "+format( len( d[t][i]["outputs"] )) + " A") # debug
|
deletes+=1
|
||||||
|
# if len(delE)>0:
|
||||||
|
# print("len after "+format( len( d[t][i]["outputs"] )) + " A") # debug
|
||||||
|
|
||||||
if deletes==0:
|
if deletes==0:
|
||||||
print("Warning: Nothing found in "+fn+"!")
|
print("Warning: Nothing found in "+fn+"!")
|
||||||
if not os.path.isfile(fnOut):
|
if not os.path.isfile(fnOut):
|
||||||
os.rename(fn, fnOut)
|
os.rename(fn, fnOut)
|
||||||
else:
|
else:
|
||||||
print("Error, both files exist!?")
|
print("Error, both files exist!?")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print(" ... writing "+fnOut )
|
print(" ... writing "+fnOut )
|
||||||
with open(fnOut,'w') as fileOut:
|
with open(fnOut,'w') as fileOut:
|
||||||
json.dump(d,fileOut, indent=1, sort_keys=True)
|
json.dump(d,fileOut, indent=1, sort_keys=True)
|
||||||
|
|
||||||
|
17
make-pdf.sh
17
make-pdf.sh
@ -10,7 +10,6 @@ echo
|
|||||||
echo WARNING - still requires one manual quit of first pdf/latex pass, use shift-x to quit
|
echo WARNING - still requires one manual quit of first pdf/latex pass, use shift-x to quit
|
||||||
echo
|
echo
|
||||||
|
|
||||||
PYT=python3.7
|
|
||||||
PYT=python3
|
PYT=python3
|
||||||
|
|
||||||
# warning - modifies notebooks!
|
# warning - modifies notebooks!
|
||||||
@ -18,9 +17,21 @@ ${PYT} json-cleanup-for-pdf.py
|
|||||||
|
|
||||||
# clean / remove _build dir ?
|
# clean / remove _build dir ?
|
||||||
|
|
||||||
# GEN!
|
/Users/thuerey/Library/Python/3.9/bin/jupyter-book build .
|
||||||
|
xelatex book
|
||||||
|
|
||||||
|
exit # sufficient for newer jupyter book versions
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# old "pre" GEN
|
||||||
#/Users/thuerey/Library/Python/3.7/bin/jupyter-book build . --builder pdflatex
|
#/Users/thuerey/Library/Python/3.7/bin/jupyter-book build . --builder pdflatex
|
||||||
/Users/thuerey/Library/Python/3.9/bin/jupyter-book build . --builder pdflatex
|
#/Users/thuerey/Library/Python/3.9/bin/jupyter-book build . --builder pdflatex
|
||||||
|
|
||||||
|
# old cleanup
|
||||||
|
|
||||||
cd _build/latex
|
cd _build/latex
|
||||||
#mv book.pdf book-xetex.pdf # not necessary, failed anyway
|
#mv book.pdf book-xetex.pdf # not necessary, failed anyway
|
||||||
|
Loading…
Reference in New Issue
Block a user