xref: /petsc/src/ksp/pc/impls/sor/sor.tex (revision 79e48321dcc84b4923f846b7ce3e1bcc8997b56a) !
1\documentclass[11pt,english,pdftex]{article}
2\usepackage{hanging} % added DRE
3\usepackage{times}
4\usepackage{url}
5\usepackage[T1]{fontenc}
6\usepackage[latin1]{inputenc}
7\usepackage{geometry}
8\geometry{verbose,letterpaper,tmargin=1.in,bmargin=1.in,lmargin=1.0in,rmargin=1.0in}
9
10\begin{document}
11Consider the matrix problem $ A x = b$, where $A = L + U + D$.
12
13\section*{Notes on SOR Implementation}
14
15
16Symmetric successive over-relaxation as a simple iterative solver can be written as the two-step process
17\[
18x_i^{n+1/2} =  x_i^n + \omega A_{ii}^{-1}( b_i - \sum_{j < i} A_{ij} x_j^{n+1/2} - \sum_{j \ge i} A_{ij} x_j^{n}) = (1 - \omega) x_i^n + \omega A_{ii}^{-1}( b_i - \sum_{j < i} A_{ij} x_j^{n+1/2} - \sum_{j > i} A_{ij} x_j^{n})
19\]
20for $ i=1,2,...n$. Followed by
21\[
22x_i^{n+1} = x_i^{n+1/2} + \omega A_{ii}^{-1}( b_i - \sum_{j \le i} A_{ij} x_j^{n+1/2}  - \sum_{j > i} A_{ij} x_j^{n+1}) = (1 - \omega) x_i^{n+1/2} + \omega A_{ii}^{-1}( b_i - \sum_{j < i} A_{ij} x_j^{n+1/2}  - \sum_{j > i} A_{ij} x_j^{n+1})
23\]
24for $ i=n,n-1,....1$. It is called over-relaxation because generally $ \omega $ is greater than one, though on occasion underrelaxation with $ \omega < 1$  has the fastest convergence.
25
26To use this as a preconditioner, just start with $x^0 = $ to obtain
27\[
28x_i^{1/2} =  \omega A_{ii}^{-1}( b_i - \sum_{j < i} A_{ij} x_j^{1/2})
29\]
30for $ i=1,2,...n$. Followed by
31\[
32x_i = (1 - \omega) x_i^{1/2} + \omega A_{ii}^{-1}( b_i - \sum_{j < i} A_{ij} x_j^{1/2} - \sum_{j > i} A_{ij} x_j)
33\]
34for $ i=n,n-1,....1$.
35
36Rewriting in matrix form
37\[
38x^{1/2} = \omega (L + D)^{-1} b
39\]
40\[
41x = (1 - \omega) x^{1/2} + \omega (U + D)^{-1}(b - L x^{1/2}) = x^{1/2} + \omega (U+D)^{-1}(b - A x^{1/2}).
42\]
43
44For the SBAIJ matrix format
45\begin{verbatim}
46v  = aa + 1;
47      vj = aj + 1;
48      for (i=0; i<m; i++){
49        nz = ai[i+1] - ai[i] - 1;
50        tmp = - (x[i] = omega*t[i]*aidiag[i]);
51        for (j=0; j<nz; j++) {
52          t[vj[j]] += tmp*v[j];
53        }
54        v  += nz + 1;
55        vj += nz + 1;
56      }
57\end{verbatim}
58the array $t$ starts with the value of $b $ and is updated a column of the matrix at a time to contain the value of $ (b - L x^{1/2})$ that
59are then needed in the upper triangular solve
60\begin{verbatim}
61      v  = aa + ai[m-1] + 1;
62      vj = aj + ai[m-1] + 1;
63      nz = 0;
64      for (i=m-1; i>=0; i--){
65        sum = 0.0;
66        nz2 = ai[i] - ai[i-1] - 1;
67        PETSC_Prefetch(v-nz2-1,0,PETSC_PREFETCH_HINT_NTA);
68        PETSC_Prefetch(vj-nz2-1,0,PETSC_PREFETCH_HINT_NTA);
69        PetscSparseDensePlusDot(sum,x,v,vj,nz);
70        sum = t[i] - sum;
71        x[i] =   (1-omega)*x[i] + omega*sum*aidiag[i];
72        nz  = nz2;
73        v  -= nz + 1;
74        vj -= nz + 1;
75      }
76\end{verbatim}
77Since the values in $ aa[]$ and $ aj[]$ are visited ``backwards'', the prefetch is used to load the needed previous row of matrix values and column indices into cache before they are needed.
78
79For the AIJ format $t$ is updated a row at a time to contain $ (b - Lx^{1/2}).$
80
81
82\section*{Notes on Sequential Eisenstat Implementation}
83
84
85\[
86  x = \omega (L + D)^{-1}b
87\]
88is the same as
89\[
90   x_i = \omega D_{ii}^{-1}(b_i - \sum_{j<i} A_{ij} x_j)
91\]
92\[
93   x_i = (D_{ii}/\omega)^{-1}(b_i - \sum_{j<i} A_{ij} x_j)
94\]
95resulting in
96\[
97  x =  (L + D/\omega)^{-1}b
98\]
99
100Rather than applying the left preconditioner obtained by apply the two step process $ (L + D/\omega)^{-1} $ and then $ (U + D/\omega)^{-1} $
101one can apply the two ``halves'' of the preconditioner symmetrically to the system resulting in
102\[
103 (L + D/\omega)^{-1} A (U + D/\omega)^{-1} y = (L + D/\omega)^{-1} b.
104\]
105Then after this system is solved, $ x = (U + D/\omega)^{-1} y$. If an initial guess that is nonzero is supplied then the
106initial guess for $ y$ must be computed via $ y = (U + D/\omega) x$.
107\begin{eqnarray*}
108 (L + D/\omega)^{-1} A (U + D/\omega)^{-1} & =  & (L + D/\omega)^{-1} (L + D + U) (U + D/\omega)^{-1} \\
109  & = &  (L + D/\omega)^{-1} (L + D/\omega + U + D/\omega + D - 2D/\omega) (U + D/\omega)^{-1} \\
110  & = &  (U + D/\omega)^{-1} + (L+D/\omega)^{-1}(I + \frac{\omega - 2}{\omega}D(U + D/\omega)^{-1}).
111\end{eqnarray*}
112
113\end{document}
114