DistributedCholeskySolver.cxx
1 // Copyright (C) 2010 Marc DuruflĂ©
2 //
3 // This file is part of the linear-algebra library Seldon,
4 // http://seldon.sourceforge.net/.
5 //
6 // Seldon is free software; you can redistribute it and/or modify it under the
7 // terms of the GNU Lesser General Public License as published by the Free
8 // Software Foundation; either version 2.1 of the License, or (at your option)
9 // any later version.
10 //
11 // Seldon is distributed in the hope that it will be useful, but WITHOUT ANY
12 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
14 // more details.
15 //
16 // You should have received a copy of the GNU Lesser General Public License
17 // along with Seldon. If not, see http://www.gnu.org/licenses/.
18 
19 
20 #ifndef SELDON_FILE_DISTRIBUTED_CHOLESKY_SOLVER_CXX
21 
22 
23 namespace Seldon
24 {
25 
26  /*****************************
27  * DistributedCholeskySolver *
28  *****************************/
29 
30 
32  template <class T>
35  {
36 #ifdef SELDON_WITH_MPI
37  // initialisation for a sequential matrix
38  nodl_scalar_ = 1;
39  nb_unknowns_scal_ = 1;
40  comm_ = MPI_COMM_SELF;
41  ProcSharingRows_ = NULL;
42  SharingRowNumbers_ = NULL;
43 #endif
44  }
45 
46 
48  template<class T>
49  template<class Prop, class Storage, class Allocator>
52  {
53  // we call the sequential function
54  SparseCholeskySolver<T>::Factorize(A, keep_matrix);
55  }
56 
57 
58 #ifdef SELDON_WITH_MPI
59  template<class T>
61  template<class Prop0, class Storage0, class Allocator0>
64  {
65  MPI_Comm& comm = A.GetCommunicator();
66  int nb_proc; MPI_Comm_size(comm, &nb_proc);
67  if (nb_proc == 1)
68  {
69  comm_ = comm;
70  SparseCholeskySolver<T>::Factorize(A, keep_matrix);
71  }
72  else
73  {
74  // factorisation of a distributed matrix
75  this->n = A.GetM();
76  comm_ = comm;
77  ProcSharingRows_ = &A.GetProcessorSharingRows();
78  SharingRowNumbers_ = &A.GetSharingRowNumbers();
79  nodl_scalar_ = A.GetNodlScalar();
80  nb_unknowns_scal_ = A.GetNbScalarUnknowns();
81  bool sym_matrix = IsSymmetricMatrix(A);
82 
83  // assembles distributed matrix
84  int rank_proc; MPI_Comm_rank(comm, &rank_proc);
85 
86  if (this->type_solver != SparseCholeskySolver<T>::PASTIX)
87  {
88  cout << "Only available for Pastix" << endl;
89  abort();
90  }
91 
92  DistributedMatrix<T, Prop0, Storage0, Allocator0> Bstore;
93  DistributedMatrix<T, Prop0, Storage0, Allocator0>* B;
94  B = &A;
95  if (keep_matrix)
96  {
97  B = &Bstore;
98  Bstore = A;
99  }
100 
101 #ifdef SELDON_WITH_PASTIX
102  MatrixPastix<T>& mat_pastix = dynamic_cast<MatrixPastix<T>&>(*this->solver);
103  mat_pastix.SetCholeskyFacto(true);
104 #else
105  cout << "Recompile Seldon with Pastix" << endl;
106  abort();
107 #endif
108 
109  bool sym_pattern = true, reorder_num = false;
110  Prop0 sym;
111  if (this->solver->UseInteger8())
112  {
113  Vector<int64_t> Ptr, IndRow;
114  Vector<T> Val;
115 
116  AssembleDistributed(*B, sym, comm, global_col_numbers,
117  local_col_numbers,
118  Ptr, IndRow, Val, sym_pattern, reorder_num);
119 
120  // factorizes the matrix
121  this->solver->FactorizeDistributedMatrix(comm, Ptr, IndRow, Val,
122  global_col_numbers, sym_matrix, reorder_num);
123  }
124  else
125  {
126  Vector<long> Ptr; Vector<int> IndRow;
127  Vector<T> Val;
128 
129  AssembleDistributed(*B, sym, comm, global_col_numbers,
130  local_col_numbers,
131  Ptr, IndRow, Val, sym_pattern, reorder_num);
132 
133  // factorizes the matrix
134  this->solver->FactorizeDistributedMatrix(comm, Ptr, IndRow, Val,
135  global_col_numbers, sym_matrix, reorder_num);
136  }
137  }
138  }
139 
140 
142  template<class T> template<class T2>
143  void DistributedCholeskySolver<T>::AssembleVec(Vector<T2>& X) const
144  {
145  AssembleVector(X, MPI_SUM, *ProcSharingRows_, *SharingRowNumbers_,
146  comm_, nodl_scalar_, nb_unknowns_scal_, 20);
147  }
148 
149 
151  template<class T> template<class T2>
152  void DistributedCholeskySolver<T>::AssembleVec(Matrix<T2, General, ColMajor>& A) const
153  {
154  int nrhs = A.GetN();
155  Vector<T2> X;
156 
157  for (int k = 0; k < nrhs; k++)
158  {
159  X.SetData(A.GetM(), &A(0, k));
160  AssembleVector(X, MPI_SUM, *ProcSharingRows_, *SharingRowNumbers_,
161  comm_, nodl_scalar_, nb_unknowns_scal_, 21);
162 
163  X.Nullify();
164  }
165  }
166 #endif
167 
168 
169  template<class T> template<class T1>
170  void DistributedCholeskySolver<T>::Solve(const SeldonTranspose& trans,
171  Vector<T1>& x_solution, bool assemble)
172  {
173 
174 #ifdef SELDON_WITH_MPI
175  MPI_Comm& comm = comm_;
176  int nb_proc; MPI_Comm_size(comm, &nb_proc);
177  if (nb_proc == 1)
178  SparseCholeskySolver<T>::Solve(trans, x_solution);
179  else
180  {
181  // extracting right hand side (we remove overlapped dofs)
182  int n = local_col_numbers.GetM();
183  Vector<T1> x_sol_extract(n);
184  for (int i = 0; i < local_col_numbers.GetM(); i++)
185  x_sol_extract(i) = x_solution(local_col_numbers(i));
186 
187 #ifdef SELDON_WITH_PASTIX
188  MatrixPastix<T>& mat_pastix = dynamic_cast<MatrixPastix<T>&>(*this->solver);
189 
190  mat_pastix.SolveDistributed(comm, trans, x_sol_extract, global_col_numbers);
191 #else
192  cout << "Recompile Seldon with Pastix" << endl;
193  abort();
194 #endif
195 
196  x_solution.Zero();
197  for (int i = 0; i < local_col_numbers.GetM(); i++)
198  x_solution(local_col_numbers(i)) = x_sol_extract(i);
199 
200  // adding overlapped components
201  if (assemble)
202  this->AssembleVec(x_solution);
203  }
204 #else
205  SparseCholeskySolver<T>::Solve(trans, x_solution);
206 #endif
207  }
208 
209 
210  template<class T> template<class T1>
211  void DistributedCholeskySolver<T>::Mlt(const SeldonTranspose& trans,
212  Vector<T1>& x_solution, bool assemble)
213  {
214 #ifdef SELDON_WITH_MPI
215  MPI_Comm& comm = comm_;
216  int nb_proc; MPI_Comm_size(comm, &nb_proc);
217  if (nb_proc == 1)
218  SparseCholeskySolver<T>::Mlt(trans, x_solution);
219  else
220  {
221  // extracting right hand side (we remove overlapped dofs)
222  int n = local_col_numbers.GetM();
223  Vector<T1> x_sol_extract(n);
224  for (int i = 0; i < local_col_numbers.GetM(); i++)
225  x_sol_extract(i) = x_solution(local_col_numbers(i));
226 
227 #ifdef SELDON_WITH_PASTIX
228  MatrixPastix<T>& mat_pastix = dynamic_cast<MatrixPastix<T>&>(*this->solver);
229 
230  mat_pastix.MltDistributed(comm, trans, x_sol_extract, global_col_numbers);
231 #else
232  cout << "Recompile Seldon with Pastix" << endl;
233  abort();
234 #endif
235 
236  x_solution.Zero();
237  for (int i = 0; i < local_col_numbers.GetM(); i++)
238  x_solution(local_col_numbers(i)) = x_sol_extract(i);
239 
240  // adding overlapped components
241  if (assemble)
242  this->AssembleVec(x_solution);
243  }
244 #else
245  SparseCholeskySolver<T>::Mlt(trans, x_solution);
246 #endif
247  }
248 
249 } // namespace Seldon.
250 
251 
252 #define SELDON_FILE_DISTRIBUTED_CHOLESKY_SOLVER_CXX
253 #endif
Seldon::DistributedCholeskySolver::DistributedCholeskySolver
DistributedCholeskySolver()
Default constructor.
Definition: DistributedCholeskySolver.cxx:33
Seldon::DistributedMatrix_Base::GetNbScalarUnknowns
int GetNbScalarUnknowns() const
returns the number of scalar unknowns
Definition: DistributedMatrixInline.cxx:82
Seldon::DistributedCholeskySolver
Definition: DistributedCholeskySolver.hxx:27
Seldon::IsSymmetricMatrix
bool IsSymmetricMatrix(const Matrix< T, Prop, Storage, Allocator > &A)
returns true if the matrix is symmetric
Definition: Functions_BaseInline.cxx:778
Seldon::SparseCholeskySolver::Solve
void Solve(const SeldonTranspose &TransA, Vector< T1 > &x, bool assemble=true)
Solves L x = b or L^T x = b.
Definition: SparseCholeskyFactorisation.cxx:667
Seldon::Matrix
Definition: SeldonHeader.hxx:226
Seldon::DistributedMatrix_Base::GetSharingRowNumbers
Vector< IVect > & GetSharingRowNumbers()
returns row numbers for each set of shared rows
Definition: DistributedMatrix.cxx:2133
Seldon::DistributedMatrix_Base::GetProcessorSharingRows
IVect & GetProcessorSharingRows()
returns processor numbers for each set of shared rows
Definition: DistributedMatrix.cxx:2103
Seldon::SparseCholeskySolver
Class grouping different Cholesky solvers.
Definition: SparseCholeskyFactorisation.hxx:28
Seldon::SparseCholeskySolver::Mlt
void Mlt(const SeldonTranspose &TransA, Vector< T1 > &x, bool assemble=true)
Computes L x or L^T.
Definition: SparseCholeskyFactorisation.cxx:718
Seldon::DistributedMatrix_Base::GetNodlScalar
int GetNodlScalar() const
returns the number of scalar unknowns
Definition: DistributedMatrixInline.cxx:74
Seldon::DistributedMatrix
matrix distributed over all the processors
Definition: DistributedMatrix.hxx:506
Seldon
Seldon namespace.
Definition: Array.cxx:24
Seldon::SparseCholeskySolver::Factorize
void Factorize(Matrix< T, Prop, Storage, Allocator > &A, bool keep_matrix=false)
Performs Cholesky factorization.
Definition: SparseCholeskyFactorisation.cxx:616
Seldon::DistributedMatrix_Base::GetCommunicator
MPI_Comm & GetCommunicator()
returns MPI communicator (processors that will share the matrix)
Definition: DistributedMatrixInline.cxx:34