DistributedSolver.cxx
1 #ifndef SELDON_FILE_DISTRIBUTED_SOLVER_CXX
2 
3 #include "DistributedSolver.hxx"
4 
5 namespace Seldon
6 {
7 
9  template <class T>
11  : SparseDirectSolver<T>()
12  {
13  diagonal_scaling_left = false;
14  diagonal_scaling_right = false;
15 
16  this->refine_solution = true;
17 
18 #ifdef SELDON_WITH_MPI
19  // initialisation for a sequential matrix
20  nodl_scalar_ = 1;
21  nb_unknowns_scal_ = 1;
22  comm_ = MPI_COMM_SELF;
23  ProcSharingRows_ = NULL;
24  SharingRowNumbers_ = NULL;
25 #endif
26  }
27 
28 
30  template <class T>
32  {
33  if (print_level >= 2)
34  this->ShowMessages();
35  else if (print_level >= 6)
36  this->ShowFullHistory();
37  else
38  this->HideMessages();
39  }
40 
41 
43  template <class T>
45  {
47 
48  diagonal_scale_left.Clear();
49  diagonal_scale_right.Clear();
50  }
51 
52 
54  template<class T> template<class MatrixSparse>
56  {
57  int n = A.GetM();
58  diagonal_scaling_left = true;
59  diagonal_scaling_right = true;
60 
61  GetRowColSum(diagonal_scale_left, diagonal_scale_right, A);
62 
63  // forming D^-1 A C^-1 where d_i = \sum_j | a_ij |
64  // and c_j = \sum_i | a_ij |
65  for (int i = 0; i < n; i++)
66  {
67  if (diagonal_scale_left(i) != 0)
68  diagonal_scale_left(i) = abs(1.0/diagonal_scale_left(i));
69  else
70  diagonal_scale_left(i) = 1.0;
71 
72  if (diagonal_scale_right(i) != 0)
73  diagonal_scale_right(i) = abs(1.0/diagonal_scale_right(i));
74  else
75  diagonal_scale_right(i) = 1.0;
76  }
77 
78  ScaleMatrix(A, diagonal_scale_left, diagonal_scale_right);
79  }
80 
81 
83  template<class T>
84  template<class Prop, class Storage, class Allocator>
87  bool keep_matrix, bool scale_matrix)
88  {
89  diagonal_scaling_left = false;
90  diagonal_scaling_right = false;
91  if (scale_matrix)
92  ScaleMatrixRowCol(A);
93 
94  // then factorizing the modified matrix
95  SparseDirectSolver<T>::Factorize(A, keep_matrix);
96  }
97 
98 
100  template<class T>
101  template<class Prop, class Storage, class Allocator>
104  {
106  }
107 
108 
110  template<class T>
111  template<class Prop, class Storage, class Allocator>
114  bool scale_matrix)
115  {
116  diagonal_scaling_left = false;
117  diagonal_scaling_right = false;
118  if (scale_matrix)
119  ScaleMatrixRowCol(A);
120 
121  // then factorizing the modified matrix
123  }
124 
125 
126 #ifdef SELDON_WITH_MPI
127  template<class T>
129  template<class Prop0, class Storage0, class Allocator0>
132  bool keep_matrix, bool scale_matrix)
133  {
134  diagonal_scaling_left = false;
135  diagonal_scaling_right = false;
136  if (scale_matrix)
137  ScaleMatrixRowCol(A);
138 
139  MPI_Comm& comm = A.GetCommunicator();
140  int nb_proc; MPI_Comm_size(comm, &nb_proc);
141  if (nb_proc == 1)
142  {
143  comm_ = comm;
144  SparseDirectSolver<T>::Factorize(A, keep_matrix);
145  }
146  else
147  {
148  // factorisation of a distributed matrix
149  this->n = A.GetM();
150  comm_ = comm;
151  ProcSharingRows_ = &A.GetProcessorSharingRows();
152  SharingRowNumbers_ = &A.GetSharingRowNumbers();
153  nodl_scalar_ = A.GetNodlScalar();
154  nb_unknowns_scal_ = A.GetNbScalarUnknowns();
155  bool sym_matrix = IsSymmetricMatrix(A);
156 
157  DistributedMatrix<T, Prop0, Storage0, Allocator0> Bstore;
158  DistributedMatrix<T, Prop0, Storage0, Allocator0>* B;
159  B = &A;
160  if (keep_matrix)
161  {
162  B = &Bstore;
163  Bstore = A;
164  }
165 
166  // assembles distributed matrix
167  int rank_proc; MPI_Comm_rank(comm, &rank_proc);
168  if ((this->print_level >= 1) &&(rank_proc == 0))
169  cout << "Assembling the distributed matrix..." << endl;
170 
171  if (this->solver->UseInteger8())
172  {
173  Vector<int64_t> Ptr, IndRow;
174  Vector<T> Val;
175  bool sym_pattern = false;
176  if (this->type_solver == this->PASTIX)
177  sym_pattern = true;
178 
179  bool reorder_num = false;
180  if (this->type_solver == this->PASTIX)
181  reorder_num = true;
182 
183  if (this->type_solver == this->SUPERLU)
184  {
185  General sym;
186  reorder_num = true;
187  AssembleDistributed(*B, sym, comm, global_col_numbers,
188  local_col_numbers,
189  Ptr, IndRow, Val, sym_pattern, reorder_num);
190  }
191  else
192  {
193  Prop0 sym;
194  AssembleDistributed(*B, sym, comm, global_col_numbers,
195  local_col_numbers,
196  Ptr, IndRow, Val, sym_pattern, reorder_num);
197  }
198 
199  if ((this->print_level >= 1) && (rank_proc == 0))
200  cout << "Factorizing the distributed matrix..." << endl;
201 
202  // factorizes the matrix
203  this->FactorizeDistributed(comm, Ptr, IndRow, Val,
204  global_col_numbers, sym_matrix, reorder_num);
205  }
206  else
207  {
208  Vector<long> Ptr; Vector<int> IndRow;
209  Vector<T> Val;
210  bool sym_pattern = false;
211  if (this->type_solver == this->PASTIX)
212  sym_pattern = true;
213 
214  bool reorder_num = false;
215  if (this->type_solver == this->PASTIX)
216  reorder_num = true;
217 
218  if (this->type_solver == this->SUPERLU)
219  {
220  General sym;
221  reorder_num = true;
222  AssembleDistributed(*B, sym, comm, global_col_numbers,
223  local_col_numbers,
224  Ptr, IndRow, Val, sym_pattern, reorder_num);
225  }
226  else
227  {
228  Prop0 sym;
229  AssembleDistributed(*B, sym, comm, global_col_numbers,
230  local_col_numbers,
231  Ptr, IndRow, Val, sym_pattern, reorder_num);
232  }
233 
234  if ((this->print_level >= 1) && (rank_proc == 0))
235  cout << "Factorizing the distributed matrix..." << endl;
236 
237  // factorizes the matrix
238  this->FactorizeDistributed(comm, Ptr, IndRow, Val,
239  global_col_numbers, sym_matrix, reorder_num);
240  }
241  }
242  }
243 
244 
246  template<class T>
247  template<class Prop0, class Storage0, class Allocator0>
248  void SparseDistributedSolver<T>
249  ::PerformAnalysis(DistributedMatrix<T, Prop0, Storage0, Allocator0>& A)
250  {
251  MPI_Comm& comm = A.GetCommunicator();
252  int nb_proc; MPI_Comm_size(comm, &nb_proc);
253  if (nb_proc == 1)
254  {
255  comm_ = comm;
257  }
258  else
259  {
260  // factorisation of a distributed matrix
261  this->n = A.GetM();
262  comm_ = comm;
263  ProcSharingRows_ = &A.GetProcessorSharingRows();
264  SharingRowNumbers_ = &A.GetSharingRowNumbers();
265  nodl_scalar_ = A.GetNodlScalar();
266  nb_unknowns_scal_ = A.GetNbScalarUnknowns();
267  bool sym_matrix = IsSymmetricMatrix(A);
268 
269  // assembles distributed matrix
270  int rank_proc; MPI_Comm_rank(comm, &rank_proc);
271  if ((this->print_level >= 1) &&(rank_proc == 0))
272  cout << "Assembling the distributed matrix..." << endl;
273 
274  DistributedMatrix<T, Prop0, Storage0, Allocator0> B; B = A;
275  if (this->solver->UseInteger8())
276  {
277  Vector<int64_t> Ptr, IndRow;
278  Vector<T> Val;
279  bool sym_pattern = false;
280  if (this->type_solver == this->PASTIX)
281  sym_pattern = true;
282 
283  bool reorder_num = false;
284  if (this->type_solver == this->SUPERLU)
285  {
286  General sym;
287  reorder_num = true;
288  AssembleDistributed(B, sym, comm, global_col_numbers,
289  local_col_numbers,
290  Ptr, IndRow, Val, sym_pattern, reorder_num);
291  }
292  else
293  {
294  Prop0 sym;
295  AssembleDistributed(B, sym, comm, global_col_numbers,
296  local_col_numbers,
297  Ptr, IndRow, Val, sym_pattern, reorder_num);
298  }
299 
300  if ((this->print_level >= 1) && (rank_proc == 0))
301  cout << "Analysing the distributed matrix..." << endl;
302 
303  // factorizes the matrix
304  this->PerformAnalysisDistributed(comm, Ptr, IndRow, Val,
305  global_col_numbers, sym_matrix, reorder_num);
306  }
307  else
308  {
309  Vector<long> Ptr; Vector<int> IndRow;
310  Vector<T> Val;
311  bool sym_pattern = false;
312  if (this->type_solver == this->PASTIX)
313  sym_pattern = true;
314 
315  bool reorder_num = false;
316  if (this->type_solver == this->SUPERLU)
317  {
318  General sym;
319  reorder_num = true;
320  AssembleDistributed(B, sym, comm, global_col_numbers,
321  local_col_numbers,
322  Ptr, IndRow, Val, sym_pattern, reorder_num);
323  }
324  else
325  {
326  Prop0 sym;
327  AssembleDistributed(B, sym, comm, global_col_numbers,
328  local_col_numbers,
329  Ptr, IndRow, Val, sym_pattern, reorder_num);
330  }
331 
332  if ((this->print_level >= 1) && (rank_proc == 0))
333  cout << "Analysing the distributed matrix..." << endl;
334 
335  // analyses the matrix
336  this->PerformAnalysisDistributed(comm, Ptr, IndRow, Val,
337  global_col_numbers, sym_matrix, reorder_num);
338  }
339 
340  }
341  }
342 
343 
345  template<class T>
346  template<class Prop0, class Storage0, class Allocator0>
347  void SparseDistributedSolver<T>
348  ::PerformFactorization(DistributedMatrix<T, Prop0, Storage0, Allocator0>& A,
349  bool scale_matrix)
350  {
351  diagonal_scaling_left = false;
352  diagonal_scaling_right = false;
353  if (scale_matrix)
354  ScaleMatrixRowCol(A);
355 
356  MPI_Comm& comm = A.GetCommunicator();
357  int nb_proc, rank_proc;
358  MPI_Comm_size(comm, &nb_proc); MPI_Comm_rank(comm, &rank_proc);
359  if (nb_proc == 1)
360  {
361  comm_ = comm;
363  }
364  else
365  {
366  // factorisation of a distributed matrix
367  bool sym_matrix = IsSymmetricMatrix(A);
368 
369  // assembles distributed matrix
370  if ((this->print_level >= 1) &&(rank_proc == 0))
371  cout << "Assembling the distributed matrix..." << endl;
372 
373  if (this->solver->UseInteger8())
374  {
375  Vector<int64_t> Ptr, IndRow;
376  Vector<T> Val;
377  bool sym_pattern = false;
378  if (this->type_solver == this->PASTIX)
379  sym_pattern = true;
380 
381  bool reorder_num = false;
382  if (this->type_solver == this->SUPERLU)
383  {
384  General sym;
385  reorder_num = true;
386  AssembleDistributed(A, sym, comm, global_col_numbers,
387  local_col_numbers,
388  Ptr, IndRow, Val, sym_pattern, reorder_num);
389  }
390  else
391  {
392  Prop0 sym;
393  AssembleDistributed(A, sym, comm, global_col_numbers,
394  local_col_numbers,
395  Ptr, IndRow, Val, sym_pattern, reorder_num);
396  }
397 
398  if ((this->print_level >= 1) && (rank_proc == 0))
399  cout << "Factorizing the distributed matrix..." << endl;
400 
401  // factorizes the matrix
402  this->PerformFactorizationDistributed(comm, Ptr, IndRow, Val,
403  global_col_numbers, sym_matrix, reorder_num);
404  }
405  else
406  {
407  Vector<long> Ptr; Vector<int> IndRow;
408  Vector<T> Val;
409  bool sym_pattern = false;
410  if (this->type_solver == this->PASTIX)
411  sym_pattern = true;
412 
413  bool reorder_num = false;
414  if (this->type_solver == this->SUPERLU)
415  {
416  General sym;
417  reorder_num = true;
418  AssembleDistributed(A, sym, comm, global_col_numbers,
419  local_col_numbers,
420  Ptr, IndRow, Val, sym_pattern, reorder_num);
421  }
422  else
423  {
424  Prop0 sym;
425  AssembleDistributed(A, sym, comm, global_col_numbers,
426  local_col_numbers,
427  Ptr, IndRow, Val, sym_pattern, reorder_num);
428  }
429 
430  if ((this->print_level >= 1) && (rank_proc == 0))
431  cout << "Factorizing the distributed matrix..." << endl;
432 
433  // factorizes the matrix
434  this->PerformFactorizationDistributed(comm, Ptr, IndRow, Val,
435  global_col_numbers, sym_matrix, reorder_num);
436  }
437 
438  }
439  }
440 
441 
443  template<class T> template<class T2>
444  void SparseDistributedSolver<T>::AssembleVec(Vector<T2>& X) const
445  {
446  AssembleVector(X, MPI_SUM, *ProcSharingRows_, *SharingRowNumbers_,
447  comm_, nodl_scalar_, nb_unknowns_scal_, 20);
448  }
449 
451  template<class T> template<class T2>
452  void SparseDistributedSolver<T>::AssembleVec(Matrix<T2, General, ColMajor>& A) const
453  {
454  int nrhs = A.GetN();
455  Vector<T2> X;
456 
457  for (int k = 0; k < nrhs; k++)
458  {
459  X.SetData(A.GetM(), &A(0, k));
460  AssembleVector(X, MPI_SUM, *ProcSharingRows_, *SharingRowNumbers_,
461  comm_, nodl_scalar_, nb_unknowns_scal_, 21);
462 
463  X.Nullify();
464  }
465  }
466 #endif
467 
468 
471 
475  template<class T> template<class T1>
477  const Vector<T1>& b_rhs)
478  {
479  Copy(b_rhs, x_solution);
480  Solve(x_solution);
481  }
482 
483 
486 
489  template<class T> template<class T1>
491  {
492  Solve(SeldonNoTrans, x_solution);
493  }
494 
495 
498 
501  template<class T> template<class T1>
503  {
504  Solve(SeldonTrans, x_solution);
505  }
506 
507 
508  template<class T> template<class T1>
510  Vector<T1>& x_solution, bool assemble)
511  {
512  if (diagonal_scaling_left && trans.NoTrans())
513  for (int i = 0; i < x_solution.GetM(); i++)
514  x_solution(i) *= diagonal_scale_left(i);
515 
516  if (diagonal_scaling_right && trans.Trans())
517  for (int i = 0; i < x_solution.GetM(); i++)
518  x_solution(i) *= diagonal_scale_right(i);
519 
520  // SolveLU or SolveLU_Distributed is used to handle the case of solving
521  // a complex right hand side with a real matrix
522 #ifdef SELDON_WITH_MPI
523  MPI_Comm& comm = comm_;
524  int nb_proc; MPI_Comm_size(comm, &nb_proc);
525  if (nb_proc == 1)
526  SolveLU(trans, *this, x_solution);
527  else
528  {
529  // extracting right hand side (we remove overlapped dofs)
530  int n = local_col_numbers.GetM();
531  Vector<T1> x_sol_extract(n);
532  for (int i = 0; i < local_col_numbers.GetM(); i++)
533  x_sol_extract(i) = x_solution(local_col_numbers(i));
534 
535  SolveLU_Distributed(comm, trans, *this,
536  x_sol_extract, global_col_numbers);
537 
538  x_solution.Zero();
539  for (int i = 0; i < local_col_numbers.GetM(); i++)
540  x_solution(local_col_numbers(i)) = x_sol_extract(i);
541 
542  // adding overlapped components
543  if (assemble)
544  this->AssembleVec(x_solution);
545  }
546 #else
547  SolveLU(trans, *this, x_solution);
548 #endif
549 
550  if (diagonal_scaling_right && trans.NoTrans())
551  for (int i = 0; i < x_solution.GetM(); i++)
552  x_solution(i) *= diagonal_scale_right(i);
553 
554  if (diagonal_scaling_left && trans.Trans())
555  for (int i = 0; i < x_solution.GetM(); i++)
556  x_solution(i) *= diagonal_scale_left(i);
557  }
558 
559 
561 
564  template<class T> template<class T1>
566  {
567  Solve(SeldonNoTrans, x_solution);
568  }
569 
570 
572 
575  template<class T> template<class T1>
577  {
578  Solve(SeldonTrans, x_solution);
579  }
580 
581 
582  template<class T> template<class T1>
584  Matrix<T1, General, ColMajor>& x_solution)
585  {
586  if (diagonal_scaling_left && trans.NoTrans())
587  ScaleLeftMatrix(x_solution, diagonal_scale_left);
588 
589  if (diagonal_scaling_right && trans.Trans())
590  ScaleLeftMatrix(x_solution, diagonal_scale_right);
591 
592 #ifdef SELDON_WITH_MPI
593  MPI_Comm& comm = comm_;
594  int nb_proc; MPI_Comm_size(comm, &nb_proc);
595  if (nb_proc == 1)
596  SolveLU(trans, *this, x_solution);
597  else
598  {
599  // extracting right hand side (we remove overlapped dofs)
600  int n = local_col_numbers.GetM();
601  int N = x_solution.GetM(), nrhs = x_solution.GetN();
602  Matrix<T1, General, ColMajor> x_sol_extract(n, nrhs);
603  for (int k = 0; k < nrhs; k++)
604  for (int i = 0; i < local_col_numbers.GetM(); i++)
605  x_sol_extract(i, k) = x_solution(local_col_numbers(i), k);
606 
607  x_solution.Clear();
608  SolveLU_Distributed(comm, trans, *this,
609  x_sol_extract, global_col_numbers);
610 
611  x_solution.Reallocate(N, nrhs);
612  x_solution.Zero();
613  for (int k = 0; k < nrhs; k++)
614  for (int i = 0; i < local_col_numbers.GetM(); i++)
615  x_solution(local_col_numbers(i), k) = x_sol_extract(i, k);
616 
617  // adding overlapped components
618  this->AssembleVec(x_solution);
619  }
620 #else
621  SolveLU(trans, *this, x_solution);
622 #endif
623 
624  if (diagonal_scaling_right && trans.NoTrans())
625  ScaleLeftMatrix(x_solution, diagonal_scale_right);
626 
627  if (diagonal_scaling_left && trans.Trans())
628  ScaleLeftMatrix(x_solution, diagonal_scale_left);
629  }
630 
631 
633 
638  template<class T> template<class MatrixSparse, class MatrixFull>
640  ::GetSchurComplement(MatrixSparse& mat_direct, const IVect& num,
641  MatrixFull& mat_schur)
642  {
643  if (this->type_solver == this->MUMPS)
644  {
645 #ifdef SELDON_WITH_MUMPS
646  MatrixMumps<T>& mat_mumps =
647  dynamic_cast<MatrixMumps<T>& >(*this->solver);
648 
649  GetSchurMatrix(mat_direct, this->mat_mumps, num, mat_schur);
650 #else
651  cout<<"Recompile Montjoie with Mumps."
652  << " Schur complement can't be performed otherwise"<<endl;
653  abort();
654 #endif
655  }
656  else
657  {
658  cout << "Try to use Mumps, not implemented with other solvers " <<endl;
659  abort();
660  }
661  }
662 
663 
665  template<class T>
667  {
668  size_t taille = diagonal_scale_left.GetMemorySize()
669  + diagonal_scale_right.GetMemorySize();
670 
671  taille += this->permut.GetMemorySize();
672  taille += this->solver->GetMemorySize();
673 
674  return taille;
675  }
676 
677 } // namespace Seldon
678 
679 #define SELDON_FILE_DISTRIBUTED_SOLVER_CXX
680 #endif
Seldon::SparseDirectSolver::Factorize
void Factorize(Matrix< T, Prop, Storage, Allocator > &A, bool keep_matrix=false)
factorisation of matrix A
Definition: SparseDirectSolver.cxx:503
Seldon::SparseDistributedSolver::ScaleMatrixRowCol
void ScaleMatrixRowCol(MatrixSparse &A)
scales matrix with 1 / \sum a_{i, j} (rows and columns)
Definition: DistributedSolver.cxx:55
Seldon::DistributedMatrix_Base::GetNbScalarUnknowns
int GetNbScalarUnknowns() const
returns the number of scalar unknowns
Definition: DistributedMatrixInline.cxx:82
Seldon::SeldonTranspose
Definition: MatrixFlag.hxx:32
Seldon::GetSchurMatrix
void GetSchurMatrix(Matrix< T, Symmetric, Storage, Allocator > &A, MatrixMumps< T > &mat_lu, const IVect &num, MatrixFull &schur_matrix, bool keep_matrix)
computes Schur complement of a symmetric matrix
Definition: Mumps.cxx:1230
Seldon::Vector
Definition: SeldonHeader.hxx:207
Seldon::SparseDistributedSolver
general class for direct solver
Definition: DistributedSolver.hxx:15
Seldon::MatrixMumps
object used to solve linear system by calling mumps subroutines
Definition: Mumps.hxx:59
Seldon::IsSymmetricMatrix
bool IsSymmetricMatrix(const Matrix< T, Prop, Storage, Allocator > &A)
returns true if the matrix is symmetric
Definition: Functions_BaseInline.cxx:778
Seldon::SparseDistributedSolver::SparseDistributedSolver
SparseDistributedSolver()
Default constructor.
Definition: DistributedSolver.cxx:10
Seldon::Matrix
Definition: SeldonHeader.hxx:226
Seldon::SparseDistributedSolver::Clear
void Clear()
Clears LU matrices.
Definition: DistributedSolver.cxx:44
Seldon::DistributedMatrix_Base::GetSharingRowNumbers
Vector< IVect > & GetSharingRowNumbers()
returns row numbers for each set of shared rows
Definition: DistributedMatrix.cxx:2133
Seldon::SparseDirectSolver
Class grouping different direct solvers.
Definition: SparseDirectSolver.hxx:26
Seldon::SparseDirectSolver::Clear
void Clear()
clearing factorisation
Definition: SparseDirectSolver.cxx:84
Seldon::DistributedMatrix_Base::GetProcessorSharingRows
IVect & GetProcessorSharingRows()
returns processor numbers for each set of shared rows
Definition: DistributedMatrix.cxx:2103
Seldon::DistributedMatrix_Base::GetNodlScalar
int GetNodlScalar() const
returns the number of scalar unknowns
Definition: DistributedMatrixInline.cxx:74
Seldon::SparseDistributedSolver::diagonal_scaling_left
bool diagonal_scaling_left
left scaling ?
Definition: DistributedSolver.hxx:20
Seldon::SparseDistributedSolver::TransSolve
void TransSolve(Vector< T1 > &x_solution)
solution of linear system A^T x = b by using LU factorization (with scaling)
Definition: DistributedSolver.cxx:502
Seldon::SparseDistributedSolver::GetSchurComplement
void GetSchurComplement(MatrixSparse &mat_direct, const IVect &num, MatrixFull &mat_schur)
Computation of a Schur Complement.
Definition: DistributedSolver.cxx:640
Seldon::SparseDirectSolver::PerformAnalysis
void PerformAnalysis(Matrix< T, Prop, Storage, Allocator > &A)
Performs the analysis of the matrix before numerical factorization.
Definition: SparseDirectSolver.cxx:611
Seldon::DistributedMatrix
matrix distributed over all the processors
Definition: DistributedMatrix.hxx:506
Seldon::SparseDirectSolver::PerformFactorization
void PerformFactorization(Matrix< T, Prop, Storage, Allocator > &A)
Performs the numerical factorization.
Definition: SparseDirectSolver.cxx:637
Seldon::SparseDistributedSolver::GetMemorySize
size_t GetMemorySize() const
returns in bytes the memory used by the direct solver
Definition: DistributedSolver.cxx:666
Seldon::SparseDistributedSolver::SetPrintLevel
void SetPrintLevel(int print)
Changes the level of displayed messages.
Definition: DistributedSolver.cxx:31
Seldon
Seldon namespace.
Definition: Array.cxx:24
Seldon::DistributedMatrix_Base::GetCommunicator
MPI_Comm & GetCommunicator()
returns MPI communicator (processors that will share the matrix)
Definition: DistributedMatrixInline.cxx:34
Seldon::SparseDistributedSolver::diagonal_scaling_right
bool diagonal_scaling_right
right scaling ?
Definition: DistributedSolver.hxx:21
Seldon::SparseDistributedSolver::Solve
void Solve(Vector< T1 > &x_solution, const Vector< T1 > &b_rhs)
solution of linear system Ax = b by using LU factorization (with scaling)
Definition: DistributedSolver.cxx:476