diff --git a/intro.md b/intro.md index 57bd80c..08b950c 100644 --- a/intro.md +++ b/intro.md @@ -15,7 +15,7 @@ We are living in an era of rapid transformation. These methods have the potentia ```{note} _What's new in v0.3?_ -This latest edition takes things even further with a major new chapter on generative modeling, covering cutting-edge techniques like denoising, flow-matching, autoregressive learning, physics-integrated constraints, and diffusion-based graph networks. We've also introduced a dedicated section on neural architectures specifically designed for physics simulations. All code examples have been updated to leverage the latest frameworks. +This latest edition adds a major new chapter on generative modeling, covering powerful techniques like denoising, flow-matching, autoregressive learning, physics-integrated constraints, and diffusion-based graph networks. We've also introduced a dedicated section on neural architectures specifically designed for physics simulations. All code examples have been updated to leverage the latest frameworks. ``` --- diff --git a/overview-optconv.md b/overview-optconv.md index ba51031..27ee3fc 100644 --- a/overview-optconv.md +++ b/overview-optconv.md @@ -62,7 +62,7 @@ In several instances we'll make use of the fundamental theorem of calculus, repe $$f(x+\Delta) = f(x) + \int_0^1 \text{d}s ~ f'(x+s \Delta) \Delta \ . $$ In addition, we'll make use of Lipschitz-continuity with constant $\mathcal L$: -$|f(x+\Delta) + f(x)|\le \mathcal L \Delta$, and the well-known Cauchy-Schwartz inequality: +$|f(x+\Delta) - f(x)|\le \mathcal L \Delta$, and the well-known Cauchy-Schwartz inequality: $ u^T v \le |u| \cdot |v| $. ## Newton's method diff --git a/probmodels-diffusion.ipynb b/probmodels-diffusion.ipynb index 91f9a99..a4c3314 100644 --- a/probmodels-diffusion.ipynb +++ b/probmodels-diffusion.ipynb @@ -31,7 +31,7 @@ "Given a data point $x_0$, we can sample the noisy latent state $x_t$ from the forward Markov chain via\n", "\n", "$$\n", - " q(x_t|x_0) = \\mathcal{N}(x_t, \\sqrt{\\overline{\\alpha}_t}x_0, (1-\\overline{\\alpha}_t)I)) ,\n", + " q(x_t|x_0) = \\mathcal{N}(\\sqrt{\\overline{\\alpha}_t}x_0, (1-\\overline{\\alpha}_t)I)) ,\n", "$$\n", "\n", "with the inverted weights $\\alpha_t = 1 - \\beta_t$ and alphas accumulated for time $t$ denoted by\n", diff --git a/references.bib b/references.bib index c13d128..61643f4 100644 --- a/references.bib +++ b/references.bib @@ -13,6 +13,14 @@ @STRING{NeurIPS = "Advances in Neural Information Processing Systems"} +@article{braun2025msbg, + title ={{Adaptive Phase-Field-FLIP for Very Large Scale Two-Phase Fluid Simulation}}, + author = {Braun, Bernhard and Bender, Jan and Thuerey, Nils}, + journal = {{ACM} Transaction on Graphics}, + volume = {44 (3)}, + year = {2025}, + publisher = {ACM}, +} @inproceedings{lino2025dgn, title={Learning Distributions of Complex Fluid Simulations with Diffusion Graph Networks},