CppNoddy  0.92
Loading...
Searching...
No Matches
DistributedMatrix.h
Go to the documentation of this file.
1/// \file DistributedMatrix.h
2/// A matrix class that constructs a SPARSE/DISTRIBUTED matrix
3/// using PETSc
4
5#if defined(PETSC_D) || defined(PETSC_Z)
6
7#ifndef DISTRIBUTEDMATRIX_H
8#define DISTRIBUTEDMATRIX_H
9
10#include <Exceptions.h>
11#include <DenseVector.h>
12#include <SparseVector.h>
13#include "petsc.h"
14
15
16namespace CppNoddy {
17
18 /// A matrix class that constructs a SPARSE matrix as a
19 /// row major std::vector of SparseVectors.
20 template <typename _Type>
21 class DistributedMatrix {
22
23 public:
24
25 /// Construct with a set number of rows
26 /// \param rows The number of rows in the matrix
27 /// \param cols The number of columns in the matrix
28 /// \param nd The number of diagonal entries per row
29 /// \param od The number of off-diagonal entries per row
30 DistributedMatrix(const PetscInt& rows, const PetscInt& cols,
31 const PetscInt& nd, const PetscInt& od ) {
32 /* "diagonal" vs "off-diagonal" is as defined by PETSc documentation
33 -- it depends on the division of the matrix into processor units
34 Any columns outside row_start and row_end are OFF diagonal.
35 */
36#if defined(PETSC_D) || defined(PETSC_Z)
37 int flag(0);
38 MPI_Initialized(&flag);
39 if(flag != 1) {
40 std::string problem;
41 problem = "DistributedMatrix<> needs PETSc and therefore you must call \n";
42 problem += "PetscSession before instantiating the object.\n";
43 throw ExceptionRuntime(problem);
44 }
45#else
46 std::string problem;
47 problem = "DistributedMatrix<> needs PETSc.\n";
48 throw ExceptionRuntime(problem);
49#endif
50
51 // set A to be an rows x cols matrix
52 MatCreate(PETSC_COMM_WORLD,&m_A);
53 MatSetType(m_A,MATMPIAIJ);
54 // let petsc decide the local row/col, but specify the global number of rows/cols
55 MatSetSizes(m_A,PETSC_DECIDE,PETSC_DECIDE,rows,cols);
56 /* getting the preallocation "right" is key to decent performance */
57 MatMPIAIJSetPreallocation(m_A, nd, NULL, od, NULL);
58 // add any command line configuration
59 MatSetFromOptions(m_A);
60 MatSetUp(m_A);
61
62 // store the rank/size in the object
63 MPI_Comm_size(MPI_COMM_WORLD,&m_size);
64 MPI_Comm_rank(MPI_COMM_WORLD,&m_rank);
65
66#if defined(DEBUG)
67 PetscPrintf(PETSC_COMM_WORLD, "[DEBUG] Creating a distributed matrix\n");
68 PetscInt row_start, row_end;
69 MatGetOwnershipRange(m_A,&row_start,&row_end);
70 PetscSynchronizedPrintf(PETSC_COMM_WORLD, "[DEBUG] Rank = %D Start_row = %D End_row = %D \n", m_rank, row_start, row_end-1 );
71 PetscSynchronizedFlush(PETSC_COMM_WORLD, PETSC_STDOUT);
72#endif
73 }
74
75 /// D-tor, make sure we clean up PETSc objects on exit
76 ~DistributedMatrix(){
77 // have to destroy on all processes (I think?!)
78 MatDestroy(&m_A);
79 }
80
81 /// \return A pointer to the PETSc Mat member data
82 Mat* get_pMat(){
83 return &m_A;
84 }
85
86 void blank() {
87 MatZeroEntries(m_A);
88 }
89
90 /// \param row The row of the element to set
91 /// \param col The column of the element to set
92 /// \param value The value to be put into element (row,column)
93 void operator()(const PetscInt& row, const PetscInt& col, const PetscScalar& value) {
94 set_elt( row, col, value );
95 }
96
97 // _Type& operator()(const PetscInt& row, const PetscInt& col) {
98 // return m_temp_row_storage[i];
99 // }
100
101 /// \param row The row of the element to set
102 /// \param col The column of the element to set
103 /// \param value The value to be put into element (row,column)
104 void set_elt(const PetscInt& row, const PetscInt& col, const PetscScalar& value) {
105 PetscInt row_start, row_end;
106 MatGetOwnershipRange(m_A,&row_start,&row_end);
107 if ( ( row >= row_start ) && ( row < row_end ) ) {
108 //PetscSynchronizedPrintf(PETSC_COMM_WORLD, "Processor rank = %D, row = %D col = %D \n", m_rank, row, col );
109 MatSetValues(m_A,1,&row,1,&col,&value,INSERT_VALUES);
110 } else {
111 }
112 }
113
114 /// \param row The row of the element to set
115 /// \param cols The vector of column indices to set
116 /// \param value The vector of values to be put in the above columns
117 void set_row(const PetscInt& row, const DenseVector<int>& cols, const DenseVector<_Type>& values) {
118 PetscInt row_start, row_end;
119 MatGetOwnershipRange(m_A,&row_start,&row_end);
120 if ( ( row >= row_start ) && ( row < row_end ) ) {
121 PetscInt nnz_cols = cols.size();
122 //PetscSynchronizedPrintf(PETSC_COMM_WORLD, "Processor rank = %D, row = %D cols = %D to %D \n", m_rank, row, cols[0], nnz_cols );
123 MatSetValues(m_A,1,&row,nnz_cols,&cols[0],&values[0],INSERT_VALUES);
124 } else {
125 }
126 //PetscSynchronizedFlush(PETSC_COMM_WORLD, PETSC_STDOUT);
127 }
128
129 /// Assemble the matrix
130 void final_assembly() {
131 MatAssemblyBegin(m_A,MAT_FINAL_ASSEMBLY);
132 MatAssemblyEnd(m_A,MAT_FINAL_ASSEMBLY);
133 }
134
135 /// View the matrix on stdout
136 void view() {
137 PetscInt row_start, row_end;
138 MatGetOwnershipRange(m_A,&row_start,&row_end);
139 PetscPrintf(PETSC_COMM_WORLD, "Matrix view to stdout:\n");
140 PetscSynchronizedPrintf(PETSC_COMM_WORLD, "Processor rank = %D, start row = %D end row = %D \n", m_rank, row_start, row_end-1 );
141 PetscSynchronizedPrintf(PETSC_COMM_WORLD, " Diagonal entry is a %Dx%D square.\n",row_end-row_start,row_end-row_start);
142 PetscSynchronizedFlush(PETSC_COMM_WORLD, PETSC_STDOUT);
143 MatView(m_A,PETSC_VIEWER_STDOUT_WORLD);
144 }
145
146
147 private:
148
149 // PETSc matrix
150 Mat m_A;
151
152 // processor rank
153 PetscMPIInt m_rank, m_size;
154 }
155 ; // END CLASS
156} // end namespace
157
158#endif // include guard
159
160#endif // check for PETSC_D or PETSC_Z
Specification for a templated DenseVector class – a dense, dynamic, vector object.
The collection of CppNoddy exceptions.
A templated SparseVector class – a sparse, variable size, vector object.
A collection of OO numerical routines aimed at simple (typical) applied problems in continuum mechani...

© 2012

R.E. Hewitt