Advanced Introduction to C++, Scientific Computing and Machine Learning




Claudius Gros, WS 2019/20

Institut für theoretische Physik
Goethe-University Frankfurt a.M.

Numerical Integration of Differential Equations

first vs. second order differential equations

classical dynamics - second order

$$ \frac{d^2}{dt^2}\,\Big(\,m\,x(t)\,\Big) \ =\ F(t,x,\dot x), \qquad \ddot x(t) \ =\ f(t,x,\dot x) $$ $$ \begin{array}{rcl} \dot{x} & =& y \\ \dot{y} & = & f(t,x,y) \end{array} $$

first order differential equations

$$ \dot y \ =\ f(t,y) $$

Euler Integration

initial condition

$$ \dot y \ =\ f(t,y),\qquad y(t_0) \ =\ y_0, \qquad t_n \ =\ t_0\,+\,n\,h, \qquad n=0,\, 1,\,2,\, \dots $$

one step algorithm

$$ \frac{y(t+h)-y(t)}{h} \ \approx\ \dot y \ =\ f(t,y),\qquad y(t+h) \ \approx\ y(t) \,+\,h\,f(t,y) $$

$$ y(t_{n+1}) \ \approx\ y(t_n) \,+\,h\,f(t_n,y_n) $$

convergence considerations

Taylor expansion to second order

$$ y(t+h) \ \approx\ y(t)\,+\,h\,\dot y(t)\,+\,\frac{h^2}{2}\,\ddot y(t), \qquad\quad \dot y = f(t,y) $$

with

$$ \ddot y \ =\ {d\over dt} \dot y \ =\ {d\over dt} f(t,y) \ =\ f_t\,+\,f_y\,\dot y $$

and hence

$$ y(t+h)\,-\,y(t) \ =\ \Delta y\ \approx\ h\,\dot y\,+\,\frac{h^2}{2}\,\Big(\,f_t\,+\,f_y\,f\,\big) $$

accuracy to second order

$$ \Delta y\ =\ h\,f\,+\,\frac{h^2}{2}\,\Big(\,f_t\,+\,f_y\,f\,\big) $$

Runge-Kutta Ansatz

$$ \Delta y(t)\ =\ a\,f(t,y)\,+\,b\,f\Big(t+\alpha h,y+\beta h f\Big) $$

Runge-Kutta alogrithm - derivation

$$ \begin{array}{lcr} \Delta y(t)& =& a\,f(t,y)\,+\,b\,f\Big(t+\alpha h,y+\beta h f\Big) \\ & =& a\,f\,+\,b\,f\,+\,b\,\Big(f_t \alpha\,+\,f_y f\beta\Big)\,h \\ & :=& h\,f\,+\,\frac{h^2}{2}\,\Big(\,f_t\,+\,f_y\,f\,\big) \end{array} $$ $$ a+b\ =\ h,\qquad\quad \alpha\, b \ =\ \frac{h}{2} \qquad\quad \beta\, b \ =\ \frac{h}{2} $$

Runge-Kutta algorithm

simple Runge-Kutta

$$ y_{n+1}\ =\ y_n\,+\,\frac{h}{2}\ \Big[\, f(t_n,y_n)\,+\,f(t_n+h,y_n+hf(t_n,y_n)\, \Big] $$

classical Runge-Kutta algorithm

$$ \Delta y\ =\ \frac{h}{6}\ \Big[\, k_1\,+\,2k_2\,+\,2k_3\,+\,k_4\, \Big] $$

with

$$ k_1 = f(t,y)\,,\qquad k_2 = f\left(t+\frac{h}{2},y+\frac{hk_1}{2}\right) $$

and

$$ k_3 = f\left(t+\frac{h}{2},y+\frac{hk_2}{2}\right)\,,\qquad k_4 = f(t+h,y+hk_3) $$

example: Kepler problem

Newton's equation of motion

$$ m\dot{\vec v}\ =\ -\frac{GMm}{|\vec x|^3}\, \vec x , \qquad\quad \dot{\vec v}\ =\ -\frac{g}{|\vec x|^\alpha}\, \vec x , \qquad\quad \alpha = 3 $$

first order differentional equation

$$ \vec y \ =\ (\vec x, \vec v) \ =\ (x,y,\dot x,\dot y) , \qquad\quad \dot{\vec y} \ =\ \vec f(\vec y) $$ $$ \vec f(\vec y) \ =\ \big(\vec v, -g\vec x/|\vec x|^\alpha \big) , \qquad\quad |\vec x| = \sqrt{x^2+y^2} $$

integration of Kepler problem

Runge-Kutta for vector variables

$$ \dot{\vec y} \ =\ \vec f(t,\vec y) $$ $$ \Delta\vec y\ =\ \frac{h}{2}\ \Big(\, \vec f(t,\vec y)\,+\,\vec f(t+h,\vec y+h\vec f(t,\vec y)\, \Big) $$

exercise: integration


example Runge Kutta program

van der Pol oscillator
$$ \ddot{x}\,-\,\epsilon(1-x^2)\dot{x}\,+\,x \ =\ 0 \qquad\qquad \begin{array}{|crcl} &\dot{x} &=& v\\ &\dot{v} &=& \epsilon(1-x^2)v -x \end{array} \label{chaos_van_der_pol} $$
Copy Copy to clipboad
Downlaod Download
#include <iostream>
#include <math.h>      // for pow
#include <fstream>     // file streams
using namespace std;
 
const int       nVar  = 2;                // number of dynamical variables
const int nTimeSteps  = 9000;             // number of time steps
const double       h  = 0.01;             // Delta t
double vars[nVar];                        // the dynamical variables
 
// --- 
// --- evaluate the i-th differential equation
// ---
double evaluate(int i, double t, double x[])
 {
 switch (i)
   {
   case 0:  return  x[1];                          // x' = v
   case 1:  return -x[0]+0.2*(1.0-x[0]*x[0])*x[1]; // v' = -x+0.2*(1-x*x)*v
   case 2:  return 0.0;                            // van der Pol oscillator
   case 3:  return 0.0;
   default:
   cout << "throw?  problem in evaluate" << endl;
   return 0.0;
   }
 } // end of evaluate()
 
// ---
// --- solve via Runge Kutta; t = last time value; h = time increment
// ---
 
void solve(double t, double h)
  {
  int i;
  double inp[nVar];
  double  k1[nVar];
  double  k2[nVar];
  double  k3[nVar];
  double  k4[nVar];
  for (i=0; i<nVar; i++)
    k1[i] = evaluate(i,t,vars);       // evaluate at time t
//
  for (i=0; i<nVar; i++)
    inp[i] = vars[i]+k1[i]*h/2;       // set up input to diffeqs
  for (i=0; i<nVar; i++)
    k2[i] = evaluate(i,t+h/2,inp);    // evaluate at time t+h/2
//
  for (i=0; i<nVar; i++)
    inp[i] = vars[i]+k2[i]*h/2;       // set up input to diffeqs
  for (i=0; i<nVar; i++)
    k3[i] = evaluate(i,t+h/2,inp);    // evaluate at time t+h/2
//
  for (i=0; i<nVar; i++)
    inp[i] = vars[i]+k3[i]*h;         // set up input to diffeqs
  for (i=0; i<nVar; i++)
    k4[i] = evaluate(i,t+h,inp);      // evaluate at time t+h
  for (i=0; i<nVar; i++)
    vars[i] = vars[i]+(k1[i]+2*k2[i]+2*k3[i]+k4[i])*h/6;
  } //end of solve
 
// ---
// --- main
// ---
int main() 
{
 for (int i=0; i<nVar; i++)       // initial conditions
    vars[i] = 0.0;
  vars[0] = 0.1;
  vars[1] = 0.1;
//
 ofstream myOutput;
 myOutput.open("RungeKutta.out",ios::out);
 myOutput.setf(ios::fixed,ios::floatfield);  // for formated output 
 myOutput.precision(3);                      // floating point precision
 for (int iTimes=0;iTimes<nTimeSteps;iTimes++)
   {
   double t = h*iTimes;
   solve(t,h);
   myOutput << t << " " << vars[0] << " " << vars[1] << endl;
   }
 myOutput.close();
 return 1;
 }  // end of main()

linear multistep methods



using previous steps

$$ \begin{array}{rcl} \dot y & =& f(t,y) \\ f_j &=& f(t_j,y_j), \qquad\quad t=t_0,\,t_1,\dots,\,t_n \end{array} $$

leap frog scheme

ansatz for three variables

$$ y_n \ =\ \alpha_1 y_{n-1} + \alpha_2 y_{n-2} + h\beta_1 f_{n-1} $$
constant case
$$ y(t)=1, \qquad\quad \dot y=0=f, \qquad\quad \fbox{$\displaystyle\phantom{\large|} 1\ =\ \alpha_1+\alpha_2\phantom{\large|}$} \qquad\quad (*) $$
linear case
$$ y(t)=t, \qquad\quad \dot y=1=f, \qquad\quad \fbox{$\displaystyle\phantom{\large|} t_n\ =\ \alpha_1(t_n-h)+\alpha_2(t_n-2h)+h\beta_1\phantom{\large|}$} $$
quadratic case
$$ y(t)=t^2, \qquad\quad \dot y=2t=f, \qquad\quad \fbox{$\displaystyle\phantom{\large|} t_n^2\ =\ \alpha_1(t_n-h)^2+\alpha_2(t_n-2h)^2+2h\beta_1(t_n-h)\phantom{\large|}$} $$
three equations for three unkowns: $\hspace{2ex} (*),\ (**),\ (***)$
$$ \alpha_1 \ =\ 0, \qquad\quad \alpha_2\ =\ 1, \qquad\quad \beta_1\ =\ 2 $$

leap frog recursion

integration as recursion

$$ \mathbf{v}_{n+1} \ =\ A \mathbf{v}_{n} \qquad\quad \mathbf{v}_{n}= \left(\begin{array}{c} y_{n} \\ y_{n-1} \end{array}\right) \qquad\quad A= \left(\begin{array}{cc} 2\kappa h & 1 \\ 1 & 0 \end{array}\right) $$ $$ \mathbf{v}_{n} \ =\ A^n \mathbf{v}_{0} $$

leap frog - stability analysis

$$ \dot y =\ \kappa y,\qquad\quad y(t)\ =\ y_0 e^{\kappa t} $$ $$ \mathbf{v}_{0}=c_+\hat{e}_+ +c_-\hat{e}_- \qquad\quad A\hat{e}_\pm = \lambda_\pm\hat{e}_\pm $$ $$ \fbox{$\displaystyle\phantom{\big|}\displaystyle \mathbf{v}_{n} \ =\ A^n \mathbf{v}_{0} \ =\ c_+(\lambda_+)^n\hat{e}_+ + c_-(\lambda_-)^n\hat{e}_- \phantom{\big|}$} $$

instability of true solution

$$ \fbox{$\displaystyle\phantom{\large|}\qquad \left|\lambda_{-}\right|\ >\ \left|\lambda_{+}\right| \qquad\phantom{\large|}$} \qquad\qquad (\kappa<0) $$
the true solution corresponding to $\ \lambda_+\ $ is unstable

Adams-Bashforth scheme

ansatz for three variables

$$ y_n \ =\ \alpha_1 y_{n-1} + h\beta_1 f_{n-1} + h\beta_2 f_{n-2} $$
constant case
$$ y(t)=1, \qquad\quad \dot y=0=f, \qquad\quad \fbox{$\displaystyle\phantom{\large|} 1\ =\ \alpha_1\phantom{\large|}$} \qquad\quad (*) $$
linear case $\quad y(t)=t,\quad \dot y=1=f$
$$ \fbox{$\displaystyle\phantom{\large|} t_n\ =\ t_n-h+h\beta_1+h\beta_2\phantom{\large|}$} \qquad\qquad \fbox{$\displaystyle\phantom{\large|} 1\ =\ \beta_1+\beta_2\phantom{\large|}$} \qquad\quad (**) $$
quadratic case $\quad y(t)=t^2,\quad \dot y=2t=f$
$$ \fbox{$\displaystyle\phantom{\large|} t_n^2\ =\ (t_n-h)^2+2h\beta_1(t_n-h)+2h(1-\beta_1)(t_n-2h)\phantom{\large|}$} $$

Adams-Bashforth scheme

$$ y_n \ =\ y_{n-1} \,+\,\frac{3h}{2}f_{n-1} \,-\,\frac{h}{2}f_{n-2} $$

Adams-Bashforth - stability analysis

stability

example multi-step program

Copy Copy to clipboad
Downlaod Download
#include <iostream>
#include <stdio.h>    // for printf
#include <math.h>     // for pow
#include <fstream>    // file streams
using namespace std;
 
const int       nVar  = 2;                // number of dynamical variables
const int nTimeSteps  = 800;              // number of time steps
const double       h  = 0.01;             // Delta t
 
double y_0[nVar];                         // the dynamical variables
double y_1[nVar];                         // of (n-1)
double y_2[nVar];                         // of (n-2)
 
double f_0[nVar];                         // the flow
double f_1[nVar];                         // of (n-1)
double f_2[nVar];                         // of (n-2)
 
// --- 
// --- standard stuff
// ---
void zeroGlobalVariables()
 {
 for (int i=0;i<nVar;i++)
   {
   y_0[nVar] = 0.0;
   f_0[nVar] = 0.0;
   y_1[nVar] = 0.0;
   f_1[nVar] = 0.0;
   y_2[nVar] = 0.0;
   f_2[nVar] = 0.0;
   }
 } // end of zeroVariables()
 
void shiftGlobalVariables()
 {
 for (int i=0;i<nVar;i++)
   {
   y_2[i] = y_1[i];
   f_2[i] = f_1[i];
   y_1[i] = y_0[i];
   f_1[i] = f_0[i];
   }
 } // end of shiftVariables()
 
// --- 
// --- evaluate the flow : \dot y = f(y)
// ---
void evaluate(double y[nVar], double f[nVar])
 {
 f[0] =  y[1];                 // \dot x = y    : for circle
 f[1] = -y[0];                 // \dot y = -x   : for circle
//
 f[0] = -y[0];                 // \dot x = -x   : for exp(-t)
 f[1] =  0.0;
 } // end of evaluate()
 
// ---
// --- main
// ---
int main() 
{
// *** ***************** ***
// *** Euler integration ***
// *** ***************** ***
 zeroGlobalVariables();
 y_0[0] = 1.0; 
 y_0[1] = 0.0;
//
 ofstream myOutput;
 myOutput.open("integrate_Euler.dat",ios::out);
 myOutput.setf(ios::fixed,ios::floatfield);  // for formated output 
 myOutput.precision(3);                      // floating point precision
 for (int iTimes=0;iTimes<nTimeSteps;iTimes++)
   {
   double t = h*iTimes;
   myOutput << t << " " << y_0[0] << " " << y_0[1] << endl;
   evaluate(y_0,f_0);
   y_0[0] += f_0[0]*h;
   y_0[1] += f_0[1]*h;
   }
 myOutput.close();
 
// *** **************** ***
// *** leap frog scheme ***
// *** **************** ***
 zeroGlobalVariables();
 y_2[0] = 1.0; 
 y_2[1] = 0.0;
 evaluate(y_2,f_2);
 y_1[0] = y_2[0] + f_2[0]*h;
 y_1[1] = y_2[1] + f_2[1]*h;
//
 myOutput.open("integrate_leapFrog.dat",ios::out);
 myOutput.setf(ios::fixed,ios::floatfield);  // for formated output 
 myOutput.precision(3);                      // floating point precision
 for (int iTimes=0;iTimes<nTimeSteps;iTimes++)
   {
   double t = h*iTimes;
   myOutput << t << " " << y_2[0] << " " << y_2[1] << endl;
   evaluate(y_1,f_1);
   y_0[0] = y_2[0] + 2.0*h*f_1[0];
   y_0[1] = y_2[1] + 2.0*h*f_1[1];
  shiftGlobalVariables();
   }
 myOutput.close();
 
// *** ********************** ***
// *** Adams Bashforth scheme ***
// *** ********************** ***
 zeroGlobalVariables();
 y_2[0] = 1.0; 
 y_2[1] = 0.0;
 evaluate(y_2,f_2);
 y_1[0] = y_2[0] + f_2[0]*h;
 y_1[1] = y_2[1] + f_2[1]*h;
//
 myOutput.open("integrate_Adams.dat",ios::out);
 myOutput.setf(ios::fixed,ios::floatfield);  // for formated output 
 myOutput.precision(3);                      // floating point precision
 for (int iTimes=0;iTimes<nTimeSteps;iTimes++)
   {
   double t = h*iTimes;
   myOutput << t << " " << y_2[0] << " " << y_2[1] << endl;
   evaluate(y_1,f_1);
   y_0[0] = y_1[0] + 1.5*h*f_1[0] - 0.5*h*f_2[0];
   y_0[1] = y_1[1] + 1.5*h*f_1[1] - 0.5*h*f_2[1];
  shiftGlobalVariables();
   }
 myOutput.close();
 
 return 1;
 }  // end of main()

energy conservation


one dimensional Hamiltonian system


energy conservation for N-body systems

$$ \begin{array}{rcl} \dot{\mathbf{r}}_i & =& \mathbf{v}_i \\[0.5ex] m_i\dot{\mathbf{v}}_i & =& -\nabla\sum_{j\ne i}\Phi(\mathbf{r}_i-\mathbf{r}_j) \end{array} \qquad\quad E \ =\ \sum_{i=1}^N\left( \frac{m_i\mathbf{v}_i^2}{2} +\frac{1}{2}\sum_{j\ne i}\Phi(\mathbf{r}_i-\mathbf{r}_j) \right) $$

adaptive error control

controlling accuracy while integrating

$$ \left. \begin{array}{rcccl} y_a & \to & y_b=y(y_a,h) & \to & y(y_b,h) \\ y_a & & \longrightarrow & & y(y_a,2h) \end{array} \ \right\}\qquad \Rightarrow\qquad \Delta=y(y_a,2h)-y(y_b,h) $$

compare error with tolerance

$$ \fbox{$\displaystyle\phantom{\large|} \Delta_{max} \quad \Longleftrightarrow\quad \Delta \ =\ h^\alpha \phi \phantom{\large|}$} $$ $$ \phi \qquad \rightarrow \qquad h\ =\ \big(\Delta_{max}/\phi\big)^{1/\alpha} $$

implicit Runge Kutta schemes

example: backward Euler method

$$ y_{n + 1} \ =\ y_n + h f(t_n + h,\ y_{n + 1}) $$

stiff differential equations

$$ \dot y(t) \,=\, \lambda\big[g(t)-y(t)\big]+\dot g(t) \qquad\qquad \fbox{$\displaystyle\phantom{\large|} y(t)\,=\,\gamma e^{-\lambda t}+g(t) \phantom{\large|}$} \qquad\qquad \gamma\in\cal{R} $$