DUBeat  1.0.1
High-order discontinuous Galerkin methods and applications to cardiac electrophysiology
generate_convergence_plots.py
Go to the documentation of this file.
1 #!/usr/bin/env python3
2 
16 
17 
20 
21 # Author: Federica Botta <federica.botta@mail.polimi.it>.
22 # Author: Matteo CalafĂ  <matteo.calafa@mail.polimi.it>.
23 
24 
25 #
26 # Generate plots for the convergence analysis on DG schemes.
27 # N.B. requires pandas and matplotlib.
28 #
29 
30 
31 import numpy as np
32 from matplotlib import pyplot as plt
33 import pandas as pd
34 
35 
36 
37 errors = pd.read_csv("../errors_Laplace_3D_u.data", sep='\t')
38 errors = errors[["l_inf","l_2","h_1","DG"]]
39 errors = np.array(errors)
40 errors = errors[errors[:,0]!='x',:].astype(float)
41 n = errors.shape[0]
42 
43 
44 if (n > 0):
45 
46  x = np.logspace(-2, -n, num=n, base=2.0)
47 
48 
49  k = errors[0,0]
50  plt.figure()
51  ax = plt.axes()
52  plt.loglog(x, errors[:,0], 'tab:blue', marker = 'o', linestyle = '-', label='$L^\infty$ error')
53  plt.loglog(x, k*np.power(x,1)/np.power(x[0],1), 'k', linestyle = '-.', label='$h^{-1}$')
54  plt.loglog(x, k*np.power(x,2)/np.power(x[0],2), 'k', linestyle = '--', label='$h^{-2}$')
55  plt.loglog(x, k*np.power(x,3)/np.power(x[0],3), 'k', linestyle = 'dotted', label = '$h^{-3}$')
56  plt.xlabel('h')
57  plt.ylabel('$L^\infty$ error')
58  plt.legend()
59  ax.set_title('Convergence test ($L^\infty$ error)')
60  plt.savefig("Linf")
61 
62 
63 
64  k = errors[0,1]
65  plt.figure()
66  ax = plt.axes()
67  plt.loglog(x, errors[:,1], 'tab:blue', marker = 'o', linestyle = '-', label='$L^2$ error')
68  plt.loglog(x, k*np.power(x,1)/np.power(x[0],1), 'k', linestyle = '-.', label='$h^{-1}$')
69  plt.loglog(x, k*np.power(x,2)/np.power(x[0],2), 'k', linestyle = '--', label='$h^{-2}$')
70  plt.loglog(x, k*np.power(x,3)/np.power(x[0],3), 'k', linestyle = 'dotted', label = '$h^{-3}$')
71  plt.xlabel('h')
72  plt.ylabel('$L^2$ error')
73  plt.legend()
74  ax.set_title('Convergence test ($L^2$ error)')
75  plt.savefig("L2")
76 
77 
78  k = errors[0,2]
79  plt.figure()
80  ax = plt.axes()
81  plt.loglog(x, errors[:,2], 'tab:blue', marker = 'o', linestyle = '-', label='$H^1$ error')
82  plt.loglog(x, k*np.power(x,1)/np.power(x[0],1), 'k', linestyle = '-.', label='$h^{-1}$')
83  plt.loglog(x, k*np.power(x,2)/np.power(x[0],2), 'k', linestyle = '--', label='$h^{-2}$')
84  plt.loglog(x, k*np.power(x,3)/np.power(x[0],3), 'k', linestyle = 'dotted', label = '$h^{-3}$')
85  plt.xlabel('h')
86  plt.ylabel('$H^1$ error')
87  plt.legend()
88  ax.set_title('Convergence test ($H^1$ error)')
89  plt.savefig("H1")
90 
91 
92  k = errors[0,3]
93  plt.figure()
94  ax = plt.axes()
95  plt.loglog(x, errors[:,3], 'tab:blue', marker = 'o', linestyle = '-', label='$DG$ error')
96  plt.loglog(x, k*np.power(x,1)/np.power(x[0],1), 'k', linestyle = '-.', label='$h^{-1}$')
97  plt.loglog(x, k*np.power(x,2)/np.power(x[0],2), 'k', linestyle = '--', label='$h^{-2}$')
98  plt.loglog(x, k*np.power(x,3)/np.power(x[0],3), 'k', linestyle = 'dotted', label = '$h^{-3}$')
99  plt.xlabel('h')
100  plt.ylabel('$DG$ error')
101  plt.legend()
102  ax.set_title('Convergence test ($DG$ error)')
103  plt.savefig("DG")