Cholesky Factorization (CF) needs to access matrix elements along both column and row, so that to the sparse matrix, addressing would always be the greatest problem, this utility for dense matrix is quite easy. My brute-forced implementation is much slower than MKL's dcsrilu0.
CF method is a highly serialized algorithm, it processes the whole matrix row by row, and to the 1st row, its complexity is log(n*n/2), and then it would be faster and faster.
In fact I thought several methods as following,
- Gathering and Scattering. As the bottleneck is the loop in Phase2, it need to access column and row, we may use gathering and scattering to simplify memeory access, but that’s as the same as CPU’s.
- Transpose the matrix as a backup, so that access column will countious, but need to sync both matrix at the end of each step, still a overhead.
Here is the code, workable, slow, you may improve it by yourself, I switched to CPU, no sh*t & fu*k GPU.
1: #include
2:
3: __device__ bool ReadCSR(const double *vals, const int *rows, const int *cols, const int r, const int c, double * v)
4: {
5: int i, j;
6:
7: i = rows[r];
8: j = rows[r + 1];
9:
10: i = cols[i];
11: j = cols[j - 1];
12:
13: if (c < i)
14: {
15: return -1;
16: }
17:
18: if (c > j)
19: {
20: return 1;
21: }
22:
23: for (i = rows[r]; i < rows[r + 1]; ++ i)
24: {
25: j = cols[i];
26: if (j == c)
27: {
28: *v = vals[i];
29: return 0;
30: }
31: }
32:
33: return -1;
34: }
35:
36: __device__ void WriteCSR(double * vals, const int * rows, const int * cols, const int r, const int c, const double v)
37: {
38: int i, j;
39: for (i = rows[r]; i < rows[r + 1]; ++ i)
40: {
41: j = cols[i];
42: if (j == c)
43: {
44: vals[i] = v;
45: }
46: }
47: }
48:
49: __global__ void Phase1(const int k, double *vals, const int *rows, const int *cols)
50: {
51: int i = blockIdx.x;
52: if (i > k)
53: {
54: double Akk = 0.0;
55: ReadCSR(vals, rows, cols, k, k, &Akk);
56: Akk = sqrt(Akk);
57:
58: double Aik = 0.0f;
59: if (ReadCSR(vals, rows, cols, i, k, &Aik) == 0)
60: {
61: WriteCSR(vals, rows, cols, i, k, Aik / Akk);
62: }
63: }
64: }
65:
66: __global__ void Phase2(const int k, double *vals, const int *rows, const int *cols)
67: {
68: int j = blockIdx.x;
69: int r = gridDim.x;
70:
71: if (j > k)
72: {
73: for (int i = j; i < r; ++ i)
74: {
75: double Aij = 0.0;
76: int a = ReadCSR(vals, rows, cols, i, j, &Aij);
77: if (a == 0)
78: {
79: double Aik = 0.0, Ajk = 0.0;
80: ReadCSR(vals, rows, cols, i, k, &Aik);
81: ReadCSR(vals, rows, cols, j, k, &Ajk);
82: WriteCSR(vals, rows, cols, i, j, Aij - Aik * Ajk);
83: }
84: else if (a > 0)
85: {
86: break;
87: }
88: }
89: }
90: }
91:
92: __global__ void Phase3(const int k, double *vals, const int *rows, const int *cols)
93: {
94: double Akk = 0.0;
95: ReadCSR(vals, rows, cols, k, k, &Akk);
96: Akk = sqrt(Akk);
97: WriteCSR(vals, rows, cols, k, k, Akk);
98: }
99:
100: void test(const int numRow, double *vals, const int *rows, const int *cols)
101: {
102: for (int k = 0; k < numRow; ++ k)
103: {
104: std::cout << k << std::endl;
105: Phase1<<>>(k, vals, rows, cols);
106: Phase2<<>>(k, vals, rows, cols);
107: Phase3<<<1, 1>>>(k, vals, rows, cols);
108: }
109: }
110:
111:
Apl. 24
Found a paper used ELLPACK-R instead of CSR. I will take a try.