Lines Matching refs:a
28 Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
31 if (A->rmap->n > 0 && A->cmap->n > 0 && a->nz) { //some OpenCL SDKs have issues with buffers of size 0
36 if (a->compressedrow.use) {
43 row_buffer.raw_resize(dummy, a->compressedrow.nrows + 1);
44 for (PetscInt i = 0; i <= a->compressedrow.nrows; ++i) row_buffer.set(i, (a->compressedrow.i)[i]);
47 row_indices.raw_resize(dummy, a->compressedrow.nrows);
48 for (PetscInt i = 0; i < a->compressedrow.nrows; ++i) row_indices.set(i, (a->compressedrow.rindex)[i]);
51 col_buffer.raw_resize(dummy, a->nz);
52 for (PetscInt i = 0; i < a->nz; ++i) col_buffer.set(i, (a->j)[i]);
54 viennaclstruct->compressed_mat->set(row_buffer.get(), row_indices.get(), col_buffer.get(), a->a, A->rmap->n, A->cmap->n, a->compressedrow.nrows, a->nz);
55 PetscCall(PetscLogCpuToGpu(((2 * a->compressedrow.nrows) + 1 + a->nz) * sizeof(PetscInt) + (a->nz) * sizeof(PetscScalar)));
64 for (PetscInt i = 0; i <= A->rmap->n; ++i) row_buffer.set(i, (a->i)[i]);
67 col_buffer.raw_resize(dummy, a->nz);
68 for (PetscInt i = 0; i < a->nz; ++i) col_buffer.set(i, (a->j)[i]);
70 viennaclstruct->mat->set(row_buffer.get(), col_buffer.get(), a->a, A->rmap->n, A->cmap->n, a->nz);
71 PetscCall(PetscLogCpuToGpu(((A->rmap->n + 1) + a->nz) * sizeof(PetscInt) + (a->nz) * sizeof(PetscScalar)));
101 Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
108 PetscCheck(!a->compressedrow.use, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "ViennaCL: Cannot handle row compression for GPU matrices");
110 a->nz = Agpu->nnz();
111 a->maxnz = a->nz; /* Since we allocate exactly the right amount */
113 if (a->free_a) PetscCall(PetscShmgetDeallocateArray((void **)a->a));
114 if (a->free_ij) PetscCall(PetscShmgetDeallocateArray((void **)a->j));
115 if (a->free_ij) PetscCall(PetscShmgetDeallocateArray((void **)a->i));
116 PetscCall(PetscShmgetAllocateArray(a->nz, sizeof(PetscScalar), (void **)&a->a));
117 PetscCall(PetscShmgetAllocateArray(a->nz, sizeof(PetscInt), (void **)&a->j));
118 PetscCall(PetscShmgetAllocateArray(m + 1, sizeof(PetscInt), (void **)&a->i));
119 a->free_a = PETSC_TRUE;
120 a->free_ij = PETSC_TRUE;
123 PetscCall(PetscFree(a->imax));
124 PetscCall(PetscFree(a->ilen));
125 PetscCall(PetscMalloc1(m, &a->imax));
126 PetscCall(PetscMalloc1(m, &a->ilen));
134 (a->i)[0] = row_buffer[0];
136 (a->i)[i + 1] = row_buffer[i + 1];
137 a->imax[i] = a->ilen[i] = a->i[i + 1] - a->i[i]; //Set imax[] and ilen[] arrays at the same time as i[] for better cache reuse
144 for (PetscInt i = 0; i < (PetscInt)Agpu->nnz(); ++i) (a->j)[i] = col_buffer[i];
147 viennacl::backend::memory_read(Agpu->handle(), 0, sizeof(PetscScalar) * Agpu->nnz(), a->a);
159 PetscCheck(!a->compressedrow.use, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "ViennaCL: Cannot handle row compression for GPU matrices");
161 viennacl::backend::memory_read(viennaclstruct->mat->handle(), 0, sizeof(PetscScalar) * viennaclstruct->mat->nnz(), a->a);
163 viennacl::backend::memory_read(Agpu->handle(), 0, sizeof(PetscScalar) * Agpu->nnz(), a->a);
175 Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
183 if (A->rmap->n > 0 && A->cmap->n > 0 && a->nz) {
188 if (a->compressedrow.use) {
200 PetscCall(PetscLogGpuFlops(2.0 * a->nz - a->nonzerorowcnt));
209 Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
217 if (A->rmap->n > 0 && A->cmap->n > 0 && a->nz) {
223 if (a->compressedrow.use) *viennaclstruct->tempvec = viennacl::linalg::prod(*viennaclstruct->compressed_mat, *xgpu);
236 PetscCall(PetscLogGpuFlops(2.0 * a->nz));
253 MatCreateSeqAIJViennaCL - Creates a sparse matrix in `MATSEQAIJVIENNACL` (compressed row) format
362 *array = ((Mat_SeqAIJ *)A->data)->a;
378 *array = ((Mat_SeqAIJ *)A->data)->a;
393 *array = ((Mat_SeqAIJ *)A->data)->a;
407 Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data;
411 if (flg && a->inode.size_csr) {
412 a->inode.use = PETSC_TRUE;
414 a->inode.use = PETSC_FALSE;
423 PetscCall(PetscMemzero(a->ops, sizeof(Mat_SeqAIJOps)));
431 a->ops->getarray = MatSeqAIJGetArray_SeqAIJViennaCL;
432 a->ops->restorearray = MatSeqAIJRestoreArray_SeqAIJViennaCL;
433 a->ops->getarrayread = MatSeqAIJGetArrayRead_SeqAIJViennaCL;
434 a->ops->restorearrayread = MatSeqAIJRestoreArrayRead_SeqAIJViennaCL;
435 a->ops->getarraywrite = MatSeqAIJGetArrayWrite_SeqAIJViennaCL;
436 a->ops->restorearraywrite = MatSeqAIJRestoreArrayWrite_SeqAIJViennaCL;
483 + -mat_type aijviennacl - sets the matrix type to `MATSEQAIJVIENNACL` during a call to `MatSetFromOptions()
484 . -mat_viennacl_storage_format csr - sets the storage format of matrices for `MatMult()` during a call to `MatSetFromOptions()`.
485 - -mat_viennacl_mult_storage_format csr - sets the storage format of matrices for `MatMult()` during a call to `MatSetFromOptions()`.