1 // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at 2 // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights 3 // reserved. See files LICENSE and NOTICE for details. 4 // 5 // This file is part of CEED, a collection of benchmarks, miniapps, software 6 // libraries and APIs for efficient high-order finite element and spectral 7 // element discretizations for exascale applications. For more information and 8 // source code availability see http://github.com/ceed. 9 // 10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, 11 // a collaborative effort of two U.S. Department of Energy organizations (Office 12 // of Science and the National Nuclear Security Administration) responsible for 13 // the planning and preparation of a capable exascale ecosystem, including 14 // software, applications, hardware, advanced system engineering and early 15 // testbed platforms, in support of the nation's exascale computing imperative 16 17 //! A Ceed Operator defines the finite/spectral element operator associated to a 18 //! Ceed QFunction. A Ceed Operator connects Ceed ElemRestrictions, 19 //! Ceed Bases, and Ceed QFunctions. 20 21 use crate::prelude::*; 22 23 // ----------------------------------------------------------------------------- 24 // CeedOperator context wrapper 25 // ----------------------------------------------------------------------------- 26 #[derive(Debug)] 27 pub(crate) struct OperatorCore<'a> { 28 pub(crate) ceed: &'a crate::Ceed, 29 ptr: bind_ceed::CeedOperator, 30 } 31 32 #[derive(Debug)] 33 pub struct Operator<'a> { 34 op_core: OperatorCore<'a>, 35 } 36 37 #[derive(Debug)] 38 pub struct CompositeOperator<'a> { 39 op_core: OperatorCore<'a>, 40 } 41 42 // ----------------------------------------------------------------------------- 43 // Destructor 44 // ----------------------------------------------------------------------------- 45 impl<'a> Drop for OperatorCore<'a> { 46 fn drop(&mut self) { 47 unsafe { 48 bind_ceed::CeedOperatorDestroy(&mut self.ptr); 49 } 50 } 51 } 52 53 // ----------------------------------------------------------------------------- 54 // Display 55 // ----------------------------------------------------------------------------- 56 impl<'a> fmt::Display for OperatorCore<'a> { 57 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 58 let mut ptr = std::ptr::null_mut(); 59 let mut sizeloc = crate::MAX_BUFFER_LENGTH; 60 let cstring = unsafe { 61 let file = bind_ceed::open_memstream(&mut ptr, &mut sizeloc); 62 bind_ceed::CeedOperatorView(self.ptr, file); 63 bind_ceed::fclose(file); 64 CString::from_raw(ptr) 65 }; 66 cstring.to_string_lossy().fmt(f) 67 } 68 } 69 70 /// View an Operator 71 /// 72 /// ``` 73 /// # use libceed::prelude::*; 74 /// # fn main() -> Result<(), libceed::CeedError> { 75 /// # let ceed = libceed::Ceed::default_init(); 76 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 77 /// 78 /// // Operator field arguments 79 /// let ne = 3; 80 /// let q = 4 as usize; 81 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 82 /// for i in 0..ne { 83 /// ind[2 * i + 0] = i as i32; 84 /// ind[2 * i + 1] = (i + 1) as i32; 85 /// } 86 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 87 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 88 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 89 /// 90 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 91 /// 92 /// // Operator fields 93 /// let op = ceed 94 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 95 /// .field("dx", &r, &b, VectorOpt::Active)? 96 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 97 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 98 /// 99 /// println!("{}", op); 100 /// # Ok(()) 101 /// # } 102 /// ``` 103 impl<'a> fmt::Display for Operator<'a> { 104 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 105 self.op_core.fmt(f) 106 } 107 } 108 109 /// View a composite Operator 110 /// 111 /// ``` 112 /// # use libceed::prelude::*; 113 /// # fn main() -> Result<(), libceed::CeedError> { 114 /// # let ceed = libceed::Ceed::default_init(); 115 /// 116 /// // Sub operator field arguments 117 /// let ne = 3; 118 /// let q = 4 as usize; 119 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 120 /// for i in 0..ne { 121 /// ind[2 * i + 0] = i as i32; 122 /// ind[2 * i + 1] = (i + 1) as i32; 123 /// } 124 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 125 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 126 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 127 /// 128 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 129 /// 130 /// let qdata_mass = ceed.vector(q * ne)?; 131 /// let qdata_diff = ceed.vector(q * ne)?; 132 /// 133 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 134 /// let op_mass = ceed 135 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 136 /// .field("u", &r, &b, VectorOpt::Active)? 137 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 138 /// .field("v", &r, &b, VectorOpt::Active)?; 139 /// 140 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 141 /// let op_diff = ceed 142 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 143 /// .field("du", &r, &b, VectorOpt::Active)? 144 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 145 /// .field("dv", &r, &b, VectorOpt::Active)?; 146 /// 147 /// let op = ceed 148 /// .composite_operator()? 149 /// .sub_operator(&op_mass)? 150 /// .sub_operator(&op_diff)?; 151 /// 152 /// println!("{}", op); 153 /// # Ok(()) 154 /// # } 155 /// ``` 156 impl<'a> fmt::Display for CompositeOperator<'a> { 157 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 158 self.op_core.fmt(f) 159 } 160 } 161 162 // ----------------------------------------------------------------------------- 163 // Core functionality 164 // ----------------------------------------------------------------------------- 165 impl<'a> OperatorCore<'a> { 166 // Common implementations 167 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 168 let ierr = unsafe { 169 bind_ceed::CeedOperatorApply( 170 self.ptr, 171 input.ptr, 172 output.ptr, 173 bind_ceed::CEED_REQUEST_IMMEDIATE, 174 ) 175 }; 176 self.ceed.check_error(ierr) 177 } 178 179 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 180 let ierr = unsafe { 181 bind_ceed::CeedOperatorApplyAdd( 182 self.ptr, 183 input.ptr, 184 output.ptr, 185 bind_ceed::CEED_REQUEST_IMMEDIATE, 186 ) 187 }; 188 self.ceed.check_error(ierr) 189 } 190 191 pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 192 let ierr = unsafe { 193 bind_ceed::CeedOperatorLinearAssembleDiagonal( 194 self.ptr, 195 assembled.ptr, 196 bind_ceed::CEED_REQUEST_IMMEDIATE, 197 ) 198 }; 199 self.ceed.check_error(ierr) 200 } 201 202 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 203 let ierr = unsafe { 204 bind_ceed::CeedOperatorLinearAssembleAddDiagonal( 205 self.ptr, 206 assembled.ptr, 207 bind_ceed::CEED_REQUEST_IMMEDIATE, 208 ) 209 }; 210 self.ceed.check_error(ierr) 211 } 212 213 pub fn linear_assemble_point_block_diagonal( 214 &self, 215 assembled: &mut Vector, 216 ) -> crate::Result<i32> { 217 let ierr = unsafe { 218 bind_ceed::CeedOperatorLinearAssemblePointBlockDiagonal( 219 self.ptr, 220 assembled.ptr, 221 bind_ceed::CEED_REQUEST_IMMEDIATE, 222 ) 223 }; 224 self.ceed.check_error(ierr) 225 } 226 227 pub fn linear_assemble_add_point_block_diagonal( 228 &self, 229 assembled: &mut Vector, 230 ) -> crate::Result<i32> { 231 let ierr = unsafe { 232 bind_ceed::CeedOperatorLinearAssembleAddPointBlockDiagonal( 233 self.ptr, 234 assembled.ptr, 235 bind_ceed::CEED_REQUEST_IMMEDIATE, 236 ) 237 }; 238 self.ceed.check_error(ierr) 239 } 240 } 241 242 // ----------------------------------------------------------------------------- 243 // Operator 244 // ----------------------------------------------------------------------------- 245 impl<'a> Operator<'a> { 246 // Constructor 247 pub fn create<'b>( 248 ceed: &'a crate::Ceed, 249 qf: impl Into<QFunctionOpt<'b>>, 250 dqf: impl Into<QFunctionOpt<'b>>, 251 dqfT: impl Into<QFunctionOpt<'b>>, 252 ) -> crate::Result<Self> { 253 let mut ptr = std::ptr::null_mut(); 254 let ierr = unsafe { 255 bind_ceed::CeedOperatorCreate( 256 ceed.ptr, 257 qf.into().to_raw(), 258 dqf.into().to_raw(), 259 dqfT.into().to_raw(), 260 &mut ptr, 261 ) 262 }; 263 ceed.check_error(ierr)?; 264 Ok(Self { 265 op_core: OperatorCore { ceed, ptr }, 266 }) 267 } 268 269 fn from_raw(ceed: &'a crate::Ceed, ptr: bind_ceed::CeedOperator) -> crate::Result<Self> { 270 Ok(Self { 271 op_core: OperatorCore { ceed, ptr }, 272 }) 273 } 274 275 /// Apply Operator to a vector 276 /// 277 /// * `input` - Input Vector 278 /// * `output` - Output Vector 279 /// 280 /// ``` 281 /// # use libceed::prelude::*; 282 /// # fn main() -> Result<(), libceed::CeedError> { 283 /// # let ceed = libceed::Ceed::default_init(); 284 /// let ne = 4; 285 /// let p = 3; 286 /// let q = 4; 287 /// let ndofs = p * ne - ne + 1; 288 /// 289 /// // Vectors 290 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 291 /// let mut qdata = ceed.vector(ne * q)?; 292 /// qdata.set_value(0.0); 293 /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?; 294 /// let mut v = ceed.vector(ndofs)?; 295 /// v.set_value(0.0); 296 /// 297 /// // Restrictions 298 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 299 /// for i in 0..ne { 300 /// indx[2 * i + 0] = i as i32; 301 /// indx[2 * i + 1] = (i + 1) as i32; 302 /// } 303 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 304 /// let mut indu: Vec<i32> = vec![0; p * ne]; 305 /// for i in 0..ne { 306 /// indu[p * i + 0] = i as i32; 307 /// indu[p * i + 1] = (i + 1) as i32; 308 /// indu[p * i + 2] = (i + 2) as i32; 309 /// } 310 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 311 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 312 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 313 /// 314 /// // Bases 315 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 316 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 317 /// 318 /// // Build quadrature data 319 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 320 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 321 /// .field("dx", &rx, &bx, VectorOpt::Active)? 322 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 323 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 324 /// .apply(&x, &mut qdata)?; 325 /// 326 /// // Mass operator 327 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 328 /// let op_mass = ceed 329 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 330 /// .field("u", &ru, &bu, VectorOpt::Active)? 331 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 332 /// .field("v", &ru, &bu, VectorOpt::Active)?; 333 /// 334 /// v.set_value(0.0); 335 /// op_mass.apply(&u, &mut v)?; 336 /// 337 /// // Check 338 /// let sum: Scalar = v.view().iter().sum(); 339 /// assert!( 340 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 341 /// "Incorrect interval length computed" 342 /// ); 343 /// # Ok(()) 344 /// # } 345 /// ``` 346 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 347 self.op_core.apply(input, output) 348 } 349 350 /// Apply Operator to a vector and add result to output vector 351 /// 352 /// * `input` - Input Vector 353 /// * `output` - Output Vector 354 /// 355 /// ``` 356 /// # use libceed::prelude::*; 357 /// # fn main() -> Result<(), libceed::CeedError> { 358 /// # let ceed = libceed::Ceed::default_init(); 359 /// let ne = 4; 360 /// let p = 3; 361 /// let q = 4; 362 /// let ndofs = p * ne - ne + 1; 363 /// 364 /// // Vectors 365 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 366 /// let mut qdata = ceed.vector(ne * q)?; 367 /// qdata.set_value(0.0); 368 /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?; 369 /// let mut v = ceed.vector(ndofs)?; 370 /// 371 /// // Restrictions 372 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 373 /// for i in 0..ne { 374 /// indx[2 * i + 0] = i as i32; 375 /// indx[2 * i + 1] = (i + 1) as i32; 376 /// } 377 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 378 /// let mut indu: Vec<i32> = vec![0; p * ne]; 379 /// for i in 0..ne { 380 /// indu[p * i + 0] = i as i32; 381 /// indu[p * i + 1] = (i + 1) as i32; 382 /// indu[p * i + 2] = (i + 2) as i32; 383 /// } 384 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 385 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 386 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 387 /// 388 /// // Bases 389 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 390 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 391 /// 392 /// // Build quadrature data 393 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 394 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 395 /// .field("dx", &rx, &bx, VectorOpt::Active)? 396 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 397 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 398 /// .apply(&x, &mut qdata)?; 399 /// 400 /// // Mass operator 401 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 402 /// let op_mass = ceed 403 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 404 /// .field("u", &ru, &bu, VectorOpt::Active)? 405 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 406 /// .field("v", &ru, &bu, VectorOpt::Active)?; 407 /// 408 /// v.set_value(1.0); 409 /// op_mass.apply_add(&u, &mut v)?; 410 /// 411 /// // Check 412 /// let sum: Scalar = v.view().iter().sum(); 413 /// assert!( 414 /// (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON, 415 /// "Incorrect interval length computed and added" 416 /// ); 417 /// # Ok(()) 418 /// # } 419 /// ``` 420 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 421 self.op_core.apply_add(input, output) 422 } 423 424 /// Provide a field to a Operator for use by its QFunction 425 /// 426 /// * `fieldname` - Name of the field (to be matched with the name used by 427 /// the QFunction) 428 /// * `r` - ElemRestriction 429 /// * `b` - Basis in which the field resides or `BasisCollocated` if 430 /// collocated with quadrature points 431 /// * `v` - Vector to be used by Operator or `VectorActive` if field 432 /// is active or `VectorNone` if using `Weight` with the 433 /// QFunction 434 /// 435 /// 436 /// ``` 437 /// # use libceed::prelude::*; 438 /// # fn main() -> Result<(), libceed::CeedError> { 439 /// # let ceed = libceed::Ceed::default_init(); 440 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 441 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 442 /// 443 /// // Operator field arguments 444 /// let ne = 3; 445 /// let q = 4; 446 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 447 /// for i in 0..ne { 448 /// ind[2 * i + 0] = i as i32; 449 /// ind[2 * i + 1] = (i + 1) as i32; 450 /// } 451 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 452 /// 453 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 454 /// 455 /// // Operator field 456 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 457 /// # Ok(()) 458 /// # } 459 /// ``` 460 #[allow(unused_mut)] 461 pub fn field<'b>( 462 mut self, 463 fieldname: &str, 464 r: impl Into<ElemRestrictionOpt<'b>>, 465 b: impl Into<BasisOpt<'b>>, 466 v: impl Into<VectorOpt<'b>>, 467 ) -> crate::Result<Self> { 468 let fieldname = CString::new(fieldname).expect("CString::new failed"); 469 let fieldname = fieldname.as_ptr() as *const i8; 470 let ierr = unsafe { 471 bind_ceed::CeedOperatorSetField( 472 self.op_core.ptr, 473 fieldname, 474 r.into().to_raw(), 475 b.into().to_raw(), 476 v.into().to_raw(), 477 ) 478 }; 479 self.op_core.ceed.check_error(ierr)?; 480 Ok(self) 481 } 482 483 /// Assemble the diagonal of a square linear Operator 484 /// 485 /// This overwrites a Vector with the diagonal of a linear Operator. 486 /// 487 /// Note: Currently only non-composite Operators with a single field and 488 /// composite Operators with single field sub-operators are supported. 489 /// 490 /// * `op` - Operator to assemble QFunction 491 /// * `assembled` - Vector to store assembled Operator diagonal 492 /// 493 /// ``` 494 /// # use libceed::prelude::*; 495 /// # fn main() -> Result<(), libceed::CeedError> { 496 /// # let ceed = libceed::Ceed::default_init(); 497 /// let ne = 4; 498 /// let p = 3; 499 /// let q = 4; 500 /// let ndofs = p * ne - ne + 1; 501 /// 502 /// // Vectors 503 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 504 /// let mut qdata = ceed.vector(ne * q)?; 505 /// qdata.set_value(0.0); 506 /// let mut u = ceed.vector(ndofs)?; 507 /// u.set_value(1.0); 508 /// let mut v = ceed.vector(ndofs)?; 509 /// v.set_value(0.0); 510 /// 511 /// // Restrictions 512 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 513 /// for i in 0..ne { 514 /// indx[2 * i + 0] = i as i32; 515 /// indx[2 * i + 1] = (i + 1) as i32; 516 /// } 517 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 518 /// let mut indu: Vec<i32> = vec![0; p * ne]; 519 /// for i in 0..ne { 520 /// indu[p * i + 0] = (2 * i) as i32; 521 /// indu[p * i + 1] = (2 * i + 1) as i32; 522 /// indu[p * i + 2] = (2 * i + 2) as i32; 523 /// } 524 /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?; 525 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 526 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 527 /// 528 /// // Bases 529 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 530 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 531 /// 532 /// // Build quadrature data 533 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 534 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 535 /// .field("dx", &rx, &bx, VectorOpt::Active)? 536 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 537 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 538 /// .apply(&x, &mut qdata)?; 539 /// 540 /// // Mass operator 541 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 542 /// let op_mass = ceed 543 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 544 /// .field("u", &ru, &bu, VectorOpt::Active)? 545 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 546 /// .field("v", &ru, &bu, VectorOpt::Active)?; 547 /// 548 /// // Diagonal 549 /// let mut diag = ceed.vector(ndofs)?; 550 /// diag.set_value(0.0); 551 /// op_mass.linear_assemble_diagonal(&mut diag)?; 552 /// 553 /// // Manual diagonal computation 554 /// let mut true_diag = ceed.vector(ndofs)?; 555 /// for i in 0..ndofs { 556 /// u.set_value(0.0); 557 /// { 558 /// let mut u_array = u.view_mut(); 559 /// u_array[i] = 1.; 560 /// } 561 /// 562 /// op_mass.apply(&u, &mut v)?; 563 /// 564 /// { 565 /// let v_array = v.view_mut(); 566 /// let mut true_array = true_diag.view_mut(); 567 /// true_array[i] = v_array[i]; 568 /// } 569 /// } 570 /// 571 /// // Check 572 /// diag.view() 573 /// .iter() 574 /// .zip(true_diag.view().iter()) 575 /// .for_each(|(computed, actual)| { 576 /// assert!( 577 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 578 /// "Diagonal entry incorrect" 579 /// ); 580 /// }); 581 /// # Ok(()) 582 /// # } 583 /// ``` 584 pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 585 self.op_core.linear_assemble_diagonal(assembled) 586 } 587 588 /// Assemble the diagonal of a square linear Operator 589 /// 590 /// This sums into a Vector with the diagonal of a linear Operator. 591 /// 592 /// Note: Currently only non-composite Operators with a single field and 593 /// composite Operators with single field sub-operators are supported. 594 /// 595 /// * `op` - Operator to assemble QFunction 596 /// * `assembled` - Vector to store assembled Operator diagonal 597 /// 598 /// 599 /// ``` 600 /// # use libceed::prelude::*; 601 /// # fn main() -> Result<(), libceed::CeedError> { 602 /// # let ceed = libceed::Ceed::default_init(); 603 /// let ne = 4; 604 /// let p = 3; 605 /// let q = 4; 606 /// let ndofs = p * ne - ne + 1; 607 /// 608 /// // Vectors 609 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 610 /// let mut qdata = ceed.vector(ne * q)?; 611 /// qdata.set_value(0.0); 612 /// let mut u = ceed.vector(ndofs)?; 613 /// u.set_value(1.0); 614 /// let mut v = ceed.vector(ndofs)?; 615 /// v.set_value(0.0); 616 /// 617 /// // Restrictions 618 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 619 /// for i in 0..ne { 620 /// indx[2 * i + 0] = i as i32; 621 /// indx[2 * i + 1] = (i + 1) as i32; 622 /// } 623 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 624 /// let mut indu: Vec<i32> = vec![0; p * ne]; 625 /// for i in 0..ne { 626 /// indu[p * i + 0] = (2 * i) as i32; 627 /// indu[p * i + 1] = (2 * i + 1) as i32; 628 /// indu[p * i + 2] = (2 * i + 2) as i32; 629 /// } 630 /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?; 631 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 632 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 633 /// 634 /// // Bases 635 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 636 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 637 /// 638 /// // Build quadrature data 639 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 640 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 641 /// .field("dx", &rx, &bx, VectorOpt::Active)? 642 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 643 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 644 /// .apply(&x, &mut qdata)?; 645 /// 646 /// // Mass operator 647 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 648 /// let op_mass = ceed 649 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 650 /// .field("u", &ru, &bu, VectorOpt::Active)? 651 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 652 /// .field("v", &ru, &bu, VectorOpt::Active)?; 653 /// 654 /// // Diagonal 655 /// let mut diag = ceed.vector(ndofs)?; 656 /// diag.set_value(1.0); 657 /// op_mass.linear_assemble_add_diagonal(&mut diag)?; 658 /// 659 /// // Manual diagonal computation 660 /// let mut true_diag = ceed.vector(ndofs)?; 661 /// for i in 0..ndofs { 662 /// u.set_value(0.0); 663 /// { 664 /// let mut u_array = u.view_mut(); 665 /// u_array[i] = 1.; 666 /// } 667 /// 668 /// op_mass.apply(&u, &mut v)?; 669 /// 670 /// { 671 /// let v_array = v.view_mut(); 672 /// let mut true_array = true_diag.view_mut(); 673 /// true_array[i] = v_array[i] + 1.0; 674 /// } 675 /// } 676 /// 677 /// // Check 678 /// diag.view() 679 /// .iter() 680 /// .zip(true_diag.view().iter()) 681 /// .for_each(|(computed, actual)| { 682 /// assert!( 683 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 684 /// "Diagonal entry incorrect" 685 /// ); 686 /// }); 687 /// # Ok(()) 688 /// # } 689 /// ``` 690 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 691 self.op_core.linear_assemble_add_diagonal(assembled) 692 } 693 694 /// Assemble the point block diagonal of a square linear Operator 695 /// 696 /// This overwrites a Vector with the point block diagonal of a linear 697 /// Operator. 698 /// 699 /// Note: Currently only non-composite Operators with a single field and 700 /// composite Operators with single field sub-operators are supported. 701 /// 702 /// * `op` - Operator to assemble QFunction 703 /// * `assembled` - Vector to store assembled CeedOperator point block 704 /// diagonal, provided in row-major form with an 705 /// `ncomp * ncomp` block at each node. The dimensions of 706 /// this vector are derived from the active vector for 707 /// the CeedOperator. The array has shape 708 /// `[nodes, component out, component in]`. 709 /// 710 /// ``` 711 /// # use libceed::prelude::*; 712 /// # fn main() -> Result<(), libceed::CeedError> { 713 /// # let ceed = libceed::Ceed::default_init(); 714 /// let ne = 4; 715 /// let p = 3; 716 /// let q = 4; 717 /// let ncomp = 2; 718 /// let ndofs = p * ne - ne + 1; 719 /// 720 /// // Vectors 721 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 722 /// let mut qdata = ceed.vector(ne * q)?; 723 /// qdata.set_value(0.0); 724 /// let mut u = ceed.vector(ncomp * ndofs)?; 725 /// u.set_value(1.0); 726 /// let mut v = ceed.vector(ncomp * ndofs)?; 727 /// v.set_value(0.0); 728 /// 729 /// // Restrictions 730 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 731 /// for i in 0..ne { 732 /// indx[2 * i + 0] = i as i32; 733 /// indx[2 * i + 1] = (i + 1) as i32; 734 /// } 735 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 736 /// let mut indu: Vec<i32> = vec![0; p * ne]; 737 /// for i in 0..ne { 738 /// indu[p * i + 0] = (2 * i) as i32; 739 /// indu[p * i + 1] = (2 * i + 1) as i32; 740 /// indu[p * i + 2] = (2 * i + 2) as i32; 741 /// } 742 /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?; 743 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 744 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 745 /// 746 /// // Bases 747 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 748 /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?; 749 /// 750 /// // Build quadrature data 751 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 752 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 753 /// .field("dx", &rx, &bx, VectorOpt::Active)? 754 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 755 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 756 /// .apply(&x, &mut qdata)?; 757 /// 758 /// // Mass operator 759 /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| { 760 /// // Number of quadrature points 761 /// let q = qdata.len(); 762 /// 763 /// // Iterate over quadrature points 764 /// for i in 0..q { 765 /// v[i + 0 * q] = u[i + 1 * q] * qdata[i]; 766 /// v[i + 1 * q] = u[i + 0 * q] * qdata[i]; 767 /// } 768 /// 769 /// // Return clean error code 770 /// 0 771 /// }; 772 /// 773 /// let qf_mass = ceed 774 /// .q_function_interior(1, Box::new(mass_2_comp))? 775 /// .input("u", 2, EvalMode::Interp)? 776 /// .input("qdata", 1, EvalMode::None)? 777 /// .output("v", 2, EvalMode::Interp)?; 778 /// 779 /// let op_mass = ceed 780 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 781 /// .field("u", &ru, &bu, VectorOpt::Active)? 782 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 783 /// .field("v", &ru, &bu, VectorOpt::Active)?; 784 /// 785 /// // Diagonal 786 /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?; 787 /// diag.set_value(0.0); 788 /// op_mass.linear_assemble_point_block_diagonal(&mut diag)?; 789 /// 790 /// // Manual diagonal computation 791 /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?; 792 /// for i in 0..ndofs { 793 /// for j in 0..ncomp { 794 /// u.set_value(0.0); 795 /// { 796 /// let mut u_array = u.view_mut(); 797 /// u_array[i + j * ndofs] = 1.; 798 /// } 799 /// 800 /// op_mass.apply(&u, &mut v)?; 801 /// 802 /// { 803 /// let v_array = v.view_mut(); 804 /// let mut true_array = true_diag.view_mut(); 805 /// for k in 0..ncomp { 806 /// true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs]; 807 /// } 808 /// } 809 /// } 810 /// } 811 /// 812 /// // Check 813 /// diag.view() 814 /// .iter() 815 /// .zip(true_diag.view().iter()) 816 /// .for_each(|(computed, actual)| { 817 /// assert!( 818 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 819 /// "Diagonal entry incorrect" 820 /// ); 821 /// }); 822 /// # Ok(()) 823 /// # } 824 /// ``` 825 pub fn linear_assemble_point_block_diagonal( 826 &self, 827 assembled: &mut Vector, 828 ) -> crate::Result<i32> { 829 self.op_core.linear_assemble_point_block_diagonal(assembled) 830 } 831 832 /// Assemble the point block diagonal of a square linear Operator 833 /// 834 /// This sums into a Vector with the point block diagonal of a linear 835 /// Operator. 836 /// 837 /// Note: Currently only non-composite Operators with a single field and 838 /// composite Operators with single field sub-operators are supported. 839 /// 840 /// * `op` - Operator to assemble QFunction 841 /// * `assembled` - Vector to store assembled CeedOperator point block 842 /// diagonal, provided in row-major form with an 843 /// `ncomp * ncomp` block at each node. The dimensions of 844 /// this vector are derived from the active vector for 845 /// the CeedOperator. The array has shape 846 /// `[nodes, component out, component in]`. 847 /// 848 /// ``` 849 /// # use libceed::prelude::*; 850 /// # fn main() -> Result<(), libceed::CeedError> { 851 /// # let ceed = libceed::Ceed::default_init(); 852 /// let ne = 4; 853 /// let p = 3; 854 /// let q = 4; 855 /// let ncomp = 2; 856 /// let ndofs = p * ne - ne + 1; 857 /// 858 /// // Vectors 859 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 860 /// let mut qdata = ceed.vector(ne * q)?; 861 /// qdata.set_value(0.0); 862 /// let mut u = ceed.vector(ncomp * ndofs)?; 863 /// u.set_value(1.0); 864 /// let mut v = ceed.vector(ncomp * ndofs)?; 865 /// v.set_value(0.0); 866 /// 867 /// // Restrictions 868 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 869 /// for i in 0..ne { 870 /// indx[2 * i + 0] = i as i32; 871 /// indx[2 * i + 1] = (i + 1) as i32; 872 /// } 873 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 874 /// let mut indu: Vec<i32> = vec![0; p * ne]; 875 /// for i in 0..ne { 876 /// indu[p * i + 0] = (2 * i) as i32; 877 /// indu[p * i + 1] = (2 * i + 1) as i32; 878 /// indu[p * i + 2] = (2 * i + 2) as i32; 879 /// } 880 /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?; 881 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 882 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 883 /// 884 /// // Bases 885 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 886 /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?; 887 /// 888 /// // Build quadrature data 889 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 890 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 891 /// .field("dx", &rx, &bx, VectorOpt::Active)? 892 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 893 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 894 /// .apply(&x, &mut qdata)?; 895 /// 896 /// // Mass operator 897 /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| { 898 /// // Number of quadrature points 899 /// let q = qdata.len(); 900 /// 901 /// // Iterate over quadrature points 902 /// for i in 0..q { 903 /// v[i + 0 * q] = u[i + 1 * q] * qdata[i]; 904 /// v[i + 1 * q] = u[i + 0 * q] * qdata[i]; 905 /// } 906 /// 907 /// // Return clean error code 908 /// 0 909 /// }; 910 /// 911 /// let qf_mass = ceed 912 /// .q_function_interior(1, Box::new(mass_2_comp))? 913 /// .input("u", 2, EvalMode::Interp)? 914 /// .input("qdata", 1, EvalMode::None)? 915 /// .output("v", 2, EvalMode::Interp)?; 916 /// 917 /// let op_mass = ceed 918 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 919 /// .field("u", &ru, &bu, VectorOpt::Active)? 920 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 921 /// .field("v", &ru, &bu, VectorOpt::Active)?; 922 /// 923 /// // Diagonal 924 /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?; 925 /// diag.set_value(1.0); 926 /// op_mass.linear_assemble_add_point_block_diagonal(&mut diag)?; 927 /// 928 /// // Manual diagonal computation 929 /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?; 930 /// for i in 0..ndofs { 931 /// for j in 0..ncomp { 932 /// u.set_value(0.0); 933 /// { 934 /// let mut u_array = u.view_mut(); 935 /// u_array[i + j * ndofs] = 1.; 936 /// } 937 /// 938 /// op_mass.apply(&u, &mut v)?; 939 /// 940 /// { 941 /// let v_array = v.view_mut(); 942 /// let mut true_array = true_diag.view_mut(); 943 /// for k in 0..ncomp { 944 /// true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs]; 945 /// } 946 /// } 947 /// } 948 /// } 949 /// 950 /// // Check 951 /// diag.view() 952 /// .iter() 953 /// .zip(true_diag.view().iter()) 954 /// .for_each(|(computed, actual)| { 955 /// assert!( 956 /// (*computed - 1.0 - *actual).abs() < 10.0 * libceed::EPSILON, 957 /// "Diagonal entry incorrect" 958 /// ); 959 /// }); 960 /// # Ok(()) 961 /// # } 962 /// ``` 963 pub fn linear_assemble_add_point_block_diagonal( 964 &self, 965 assembled: &mut Vector, 966 ) -> crate::Result<i32> { 967 self.op_core 968 .linear_assemble_add_point_block_diagonal(assembled) 969 } 970 971 /// Create a multigrid coarse Operator and level transfer Operators for a 972 /// given Operator, creating the prolongation basis from the fine and 973 /// coarse grid interpolation 974 /// 975 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 976 /// * `rstr_coarse` - Coarse grid restriction 977 /// * `basis_coarse` - Coarse grid active vector basis 978 /// 979 /// ``` 980 /// # use libceed::prelude::*; 981 /// # fn main() -> Result<(), libceed::CeedError> { 982 /// # let ceed = libceed::Ceed::default_init(); 983 /// let ne = 15; 984 /// let p_coarse = 3; 985 /// let p_fine = 5; 986 /// let q = 6; 987 /// let ndofs_coarse = p_coarse * ne - ne + 1; 988 /// let ndofs_fine = p_fine * ne - ne + 1; 989 /// 990 /// // Vectors 991 /// let x_array = (0..ne + 1) 992 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 993 /// .collect::<Vec<Scalar>>(); 994 /// let x = ceed.vector_from_slice(&x_array)?; 995 /// let mut qdata = ceed.vector(ne * q)?; 996 /// qdata.set_value(0.0); 997 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 998 /// u_coarse.set_value(1.0); 999 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1000 /// u_fine.set_value(1.0); 1001 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1002 /// v_coarse.set_value(0.0); 1003 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1004 /// v_fine.set_value(0.0); 1005 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1006 /// multiplicity.set_value(1.0); 1007 /// 1008 /// // Restrictions 1009 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1010 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1011 /// 1012 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1013 /// for i in 0..ne { 1014 /// indx[2 * i + 0] = i as i32; 1015 /// indx[2 * i + 1] = (i + 1) as i32; 1016 /// } 1017 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1018 /// 1019 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1020 /// for i in 0..ne { 1021 /// for j in 0..p_coarse { 1022 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1023 /// } 1024 /// } 1025 /// let ru_coarse = ceed.elem_restriction( 1026 /// ne, 1027 /// p_coarse, 1028 /// 1, 1029 /// 1, 1030 /// ndofs_coarse, 1031 /// MemType::Host, 1032 /// &indu_coarse, 1033 /// )?; 1034 /// 1035 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1036 /// for i in 0..ne { 1037 /// for j in 0..p_fine { 1038 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1039 /// } 1040 /// } 1041 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1042 /// 1043 /// // Bases 1044 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1045 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1046 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1047 /// 1048 /// // Build quadrature data 1049 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1050 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1051 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1052 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1053 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1054 /// .apply(&x, &mut qdata)?; 1055 /// 1056 /// // Mass operator 1057 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1058 /// let op_mass_fine = ceed 1059 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1060 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1061 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1062 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1063 /// 1064 /// // Multigrid setup 1065 /// let (op_mass_coarse, op_prolong, op_restrict) = 1066 /// op_mass_fine.create_multigrid_level(&multiplicity, &ru_coarse, &bu_coarse)?; 1067 /// 1068 /// // Coarse problem 1069 /// u_coarse.set_value(1.0); 1070 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1071 /// 1072 /// // Check 1073 /// let sum: Scalar = v_coarse.view().iter().sum(); 1074 /// assert!( 1075 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1076 /// "Incorrect interval length computed" 1077 /// ); 1078 /// 1079 /// // Prolong 1080 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1081 /// 1082 /// // Fine problem 1083 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1084 /// 1085 /// // Check 1086 /// let sum: Scalar = v_fine.view().iter().sum(); 1087 /// assert!( 1088 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1089 /// "Incorrect interval length computed" 1090 /// ); 1091 /// 1092 /// // Restrict 1093 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1094 /// 1095 /// // Check 1096 /// let sum: Scalar = v_coarse.view().iter().sum(); 1097 /// assert!( 1098 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1099 /// "Incorrect interval length computed" 1100 /// ); 1101 /// # Ok(()) 1102 /// # } 1103 /// ``` 1104 pub fn create_multigrid_level( 1105 &self, 1106 p_mult_fine: &Vector, 1107 rstr_coarse: &ElemRestriction, 1108 basis_coarse: &Basis, 1109 ) -> crate::Result<(Operator, Operator, Operator)> { 1110 let mut ptr_coarse = std::ptr::null_mut(); 1111 let mut ptr_prolong = std::ptr::null_mut(); 1112 let mut ptr_restrict = std::ptr::null_mut(); 1113 let ierr = unsafe { 1114 bind_ceed::CeedOperatorMultigridLevelCreate( 1115 self.op_core.ptr, 1116 p_mult_fine.ptr, 1117 rstr_coarse.ptr, 1118 basis_coarse.ptr, 1119 &mut ptr_coarse, 1120 &mut ptr_prolong, 1121 &mut ptr_restrict, 1122 ) 1123 }; 1124 self.op_core.ceed.check_error(ierr)?; 1125 let op_coarse = Operator::from_raw(self.op_core.ceed, ptr_coarse)?; 1126 let op_prolong = Operator::from_raw(self.op_core.ceed, ptr_prolong)?; 1127 let op_restrict = Operator::from_raw(self.op_core.ceed, ptr_restrict)?; 1128 Ok((op_coarse, op_prolong, op_restrict)) 1129 } 1130 1131 /// Create a multigrid coarse Operator and level transfer Operators for a 1132 /// given Operator with a tensor basis for the active basis 1133 /// 1134 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1135 /// * `rstr_coarse` - Coarse grid restriction 1136 /// * `basis_coarse` - Coarse grid active vector basis 1137 /// * `interp_c_to_f` - Matrix for coarse to fine 1138 /// 1139 /// ``` 1140 /// # use libceed::prelude::*; 1141 /// # fn main() -> Result<(), libceed::CeedError> { 1142 /// # let ceed = libceed::Ceed::default_init(); 1143 /// let ne = 15; 1144 /// let p_coarse = 3; 1145 /// let p_fine = 5; 1146 /// let q = 6; 1147 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1148 /// let ndofs_fine = p_fine * ne - ne + 1; 1149 /// 1150 /// // Vectors 1151 /// let x_array = (0..ne + 1) 1152 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1153 /// .collect::<Vec<Scalar>>(); 1154 /// let x = ceed.vector_from_slice(&x_array)?; 1155 /// let mut qdata = ceed.vector(ne * q)?; 1156 /// qdata.set_value(0.0); 1157 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1158 /// u_coarse.set_value(1.0); 1159 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1160 /// u_fine.set_value(1.0); 1161 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1162 /// v_coarse.set_value(0.0); 1163 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1164 /// v_fine.set_value(0.0); 1165 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1166 /// multiplicity.set_value(1.0); 1167 /// 1168 /// // Restrictions 1169 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1170 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1171 /// 1172 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1173 /// for i in 0..ne { 1174 /// indx[2 * i + 0] = i as i32; 1175 /// indx[2 * i + 1] = (i + 1) as i32; 1176 /// } 1177 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1178 /// 1179 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1180 /// for i in 0..ne { 1181 /// for j in 0..p_coarse { 1182 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1183 /// } 1184 /// } 1185 /// let ru_coarse = ceed.elem_restriction( 1186 /// ne, 1187 /// p_coarse, 1188 /// 1, 1189 /// 1, 1190 /// ndofs_coarse, 1191 /// MemType::Host, 1192 /// &indu_coarse, 1193 /// )?; 1194 /// 1195 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1196 /// for i in 0..ne { 1197 /// for j in 0..p_fine { 1198 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1199 /// } 1200 /// } 1201 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1202 /// 1203 /// // Bases 1204 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1205 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1206 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1207 /// 1208 /// // Build quadrature data 1209 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1210 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1211 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1212 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1213 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1214 /// .apply(&x, &mut qdata)?; 1215 /// 1216 /// // Mass operator 1217 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1218 /// let op_mass_fine = ceed 1219 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1220 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1221 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1222 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1223 /// 1224 /// // Multigrid setup 1225 /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine]; 1226 /// { 1227 /// let mut coarse = ceed.vector(p_coarse)?; 1228 /// let mut fine = ceed.vector(p_fine)?; 1229 /// let basis_c_to_f = 1230 /// ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?; 1231 /// for i in 0..p_coarse { 1232 /// coarse.set_value(0.0); 1233 /// { 1234 /// let mut array = coarse.view_mut(); 1235 /// array[i] = 1.; 1236 /// } 1237 /// basis_c_to_f.apply( 1238 /// 1, 1239 /// TransposeMode::NoTranspose, 1240 /// EvalMode::Interp, 1241 /// &coarse, 1242 /// &mut fine, 1243 /// )?; 1244 /// let array = fine.view(); 1245 /// for j in 0..p_fine { 1246 /// interp_c_to_f[j * p_coarse + i] = array[j]; 1247 /// } 1248 /// } 1249 /// } 1250 /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_tensor_H1( 1251 /// &multiplicity, 1252 /// &ru_coarse, 1253 /// &bu_coarse, 1254 /// &interp_c_to_f, 1255 /// )?; 1256 /// 1257 /// // Coarse problem 1258 /// u_coarse.set_value(1.0); 1259 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1260 /// 1261 /// // Check 1262 /// let sum: Scalar = v_coarse.view().iter().sum(); 1263 /// assert!( 1264 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1265 /// "Incorrect interval length computed" 1266 /// ); 1267 /// 1268 /// // Prolong 1269 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1270 /// 1271 /// // Fine problem 1272 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1273 /// 1274 /// // Check 1275 /// let sum: Scalar = v_fine.view().iter().sum(); 1276 /// assert!( 1277 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1278 /// "Incorrect interval length computed" 1279 /// ); 1280 /// 1281 /// // Restrict 1282 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1283 /// 1284 /// // Check 1285 /// let sum: Scalar = v_coarse.view().iter().sum(); 1286 /// assert!( 1287 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1288 /// "Incorrect interval length computed" 1289 /// ); 1290 /// # Ok(()) 1291 /// # } 1292 /// ``` 1293 pub fn create_multigrid_level_tensor_H1( 1294 &self, 1295 p_mult_fine: &Vector, 1296 rstr_coarse: &ElemRestriction, 1297 basis_coarse: &Basis, 1298 interpCtoF: &Vec<Scalar>, 1299 ) -> crate::Result<(Operator, Operator, Operator)> { 1300 let mut ptr_coarse = std::ptr::null_mut(); 1301 let mut ptr_prolong = std::ptr::null_mut(); 1302 let mut ptr_restrict = std::ptr::null_mut(); 1303 let ierr = unsafe { 1304 bind_ceed::CeedOperatorMultigridLevelCreateTensorH1( 1305 self.op_core.ptr, 1306 p_mult_fine.ptr, 1307 rstr_coarse.ptr, 1308 basis_coarse.ptr, 1309 interpCtoF.as_ptr(), 1310 &mut ptr_coarse, 1311 &mut ptr_prolong, 1312 &mut ptr_restrict, 1313 ) 1314 }; 1315 self.op_core.ceed.check_error(ierr)?; 1316 let op_coarse = Operator::from_raw(self.op_core.ceed, ptr_coarse)?; 1317 let op_prolong = Operator::from_raw(self.op_core.ceed, ptr_prolong)?; 1318 let op_restrict = Operator::from_raw(self.op_core.ceed, ptr_restrict)?; 1319 Ok((op_coarse, op_prolong, op_restrict)) 1320 } 1321 1322 /// Create a multigrid coarse Operator and level transfer Operators for a 1323 /// given Operator with a non-tensor basis for the active basis 1324 /// 1325 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1326 /// * `rstr_coarse` - Coarse grid restriction 1327 /// * `basis_coarse` - Coarse grid active vector basis 1328 /// * `interp_c_to_f` - Matrix for coarse to fine 1329 /// 1330 /// ``` 1331 /// # use libceed::prelude::*; 1332 /// # fn main() -> Result<(), libceed::CeedError> { 1333 /// # let ceed = libceed::Ceed::default_init(); 1334 /// let ne = 15; 1335 /// let p_coarse = 3; 1336 /// let p_fine = 5; 1337 /// let q = 6; 1338 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1339 /// let ndofs_fine = p_fine * ne - ne + 1; 1340 /// 1341 /// // Vectors 1342 /// let x_array = (0..ne + 1) 1343 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1344 /// .collect::<Vec<Scalar>>(); 1345 /// let x = ceed.vector_from_slice(&x_array)?; 1346 /// let mut qdata = ceed.vector(ne * q)?; 1347 /// qdata.set_value(0.0); 1348 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1349 /// u_coarse.set_value(1.0); 1350 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1351 /// u_fine.set_value(1.0); 1352 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1353 /// v_coarse.set_value(0.0); 1354 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1355 /// v_fine.set_value(0.0); 1356 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1357 /// multiplicity.set_value(1.0); 1358 /// 1359 /// // Restrictions 1360 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1361 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1362 /// 1363 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1364 /// for i in 0..ne { 1365 /// indx[2 * i + 0] = i as i32; 1366 /// indx[2 * i + 1] = (i + 1) as i32; 1367 /// } 1368 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1369 /// 1370 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1371 /// for i in 0..ne { 1372 /// for j in 0..p_coarse { 1373 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1374 /// } 1375 /// } 1376 /// let ru_coarse = ceed.elem_restriction( 1377 /// ne, 1378 /// p_coarse, 1379 /// 1, 1380 /// 1, 1381 /// ndofs_coarse, 1382 /// MemType::Host, 1383 /// &indu_coarse, 1384 /// )?; 1385 /// 1386 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1387 /// for i in 0..ne { 1388 /// for j in 0..p_fine { 1389 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1390 /// } 1391 /// } 1392 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1393 /// 1394 /// // Bases 1395 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1396 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1397 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1398 /// 1399 /// // Build quadrature data 1400 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1401 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1402 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1403 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1404 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1405 /// .apply(&x, &mut qdata)?; 1406 /// 1407 /// // Mass operator 1408 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1409 /// let op_mass_fine = ceed 1410 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1411 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1412 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1413 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1414 /// 1415 /// // Multigrid setup 1416 /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine]; 1417 /// { 1418 /// let mut coarse = ceed.vector(p_coarse)?; 1419 /// let mut fine = ceed.vector(p_fine)?; 1420 /// let basis_c_to_f = 1421 /// ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?; 1422 /// for i in 0..p_coarse { 1423 /// coarse.set_value(0.0); 1424 /// { 1425 /// let mut array = coarse.view_mut(); 1426 /// array[i] = 1.; 1427 /// } 1428 /// basis_c_to_f.apply( 1429 /// 1, 1430 /// TransposeMode::NoTranspose, 1431 /// EvalMode::Interp, 1432 /// &coarse, 1433 /// &mut fine, 1434 /// )?; 1435 /// let array = fine.view(); 1436 /// for j in 0..p_fine { 1437 /// interp_c_to_f[j * p_coarse + i] = array[j]; 1438 /// } 1439 /// } 1440 /// } 1441 /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_H1( 1442 /// &multiplicity, 1443 /// &ru_coarse, 1444 /// &bu_coarse, 1445 /// &interp_c_to_f, 1446 /// )?; 1447 /// 1448 /// // Coarse problem 1449 /// u_coarse.set_value(1.0); 1450 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1451 /// 1452 /// // Check 1453 /// let sum: Scalar = v_coarse.view().iter().sum(); 1454 /// assert!( 1455 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1456 /// "Incorrect interval length computed" 1457 /// ); 1458 /// 1459 /// // Prolong 1460 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1461 /// 1462 /// // Fine problem 1463 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1464 /// 1465 /// // Check 1466 /// let sum: Scalar = v_fine.view().iter().sum(); 1467 /// assert!( 1468 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1469 /// "Incorrect interval length computed" 1470 /// ); 1471 /// 1472 /// // Restrict 1473 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1474 /// 1475 /// // Check 1476 /// let sum: Scalar = v_coarse.view().iter().sum(); 1477 /// assert!( 1478 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1479 /// "Incorrect interval length computed" 1480 /// ); 1481 /// # Ok(()) 1482 /// # } 1483 /// ``` 1484 pub fn create_multigrid_level_H1( 1485 &self, 1486 p_mult_fine: &Vector, 1487 rstr_coarse: &ElemRestriction, 1488 basis_coarse: &Basis, 1489 interpCtoF: &[Scalar], 1490 ) -> crate::Result<(Operator, Operator, Operator)> { 1491 let mut ptr_coarse = std::ptr::null_mut(); 1492 let mut ptr_prolong = std::ptr::null_mut(); 1493 let mut ptr_restrict = std::ptr::null_mut(); 1494 let ierr = unsafe { 1495 bind_ceed::CeedOperatorMultigridLevelCreateH1( 1496 self.op_core.ptr, 1497 p_mult_fine.ptr, 1498 rstr_coarse.ptr, 1499 basis_coarse.ptr, 1500 interpCtoF.as_ptr(), 1501 &mut ptr_coarse, 1502 &mut ptr_prolong, 1503 &mut ptr_restrict, 1504 ) 1505 }; 1506 self.op_core.ceed.check_error(ierr)?; 1507 let op_coarse = Operator::from_raw(self.op_core.ceed, ptr_coarse)?; 1508 let op_prolong = Operator::from_raw(self.op_core.ceed, ptr_prolong)?; 1509 let op_restrict = Operator::from_raw(self.op_core.ceed, ptr_restrict)?; 1510 Ok((op_coarse, op_prolong, op_restrict)) 1511 } 1512 } 1513 1514 // ----------------------------------------------------------------------------- 1515 // Composite Operator 1516 // ----------------------------------------------------------------------------- 1517 impl<'a> CompositeOperator<'a> { 1518 // Constructor 1519 pub fn create(ceed: &'a crate::Ceed) -> crate::Result<Self> { 1520 let mut ptr = std::ptr::null_mut(); 1521 let ierr = unsafe { bind_ceed::CeedCompositeOperatorCreate(ceed.ptr, &mut ptr) }; 1522 ceed.check_error(ierr)?; 1523 Ok(Self { 1524 op_core: OperatorCore { ceed, ptr }, 1525 }) 1526 } 1527 1528 /// Apply Operator to a vector 1529 /// 1530 /// * `input` - Input Vector 1531 /// * `output` - Output Vector 1532 /// 1533 /// ``` 1534 /// # use libceed::prelude::*; 1535 /// # fn main() -> Result<(), libceed::CeedError> { 1536 /// # let ceed = libceed::Ceed::default_init(); 1537 /// let ne = 4; 1538 /// let p = 3; 1539 /// let q = 4; 1540 /// let ndofs = p * ne - ne + 1; 1541 /// 1542 /// // Vectors 1543 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1544 /// let mut qdata_mass = ceed.vector(ne * q)?; 1545 /// qdata_mass.set_value(0.0); 1546 /// let mut qdata_diff = ceed.vector(ne * q)?; 1547 /// qdata_diff.set_value(0.0); 1548 /// let mut u = ceed.vector(ndofs)?; 1549 /// u.set_value(1.0); 1550 /// let mut v = ceed.vector(ndofs)?; 1551 /// v.set_value(0.0); 1552 /// 1553 /// // Restrictions 1554 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1555 /// for i in 0..ne { 1556 /// indx[2 * i + 0] = i as i32; 1557 /// indx[2 * i + 1] = (i + 1) as i32; 1558 /// } 1559 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1560 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1561 /// for i in 0..ne { 1562 /// indu[p * i + 0] = i as i32; 1563 /// indu[p * i + 1] = (i + 1) as i32; 1564 /// indu[p * i + 2] = (i + 2) as i32; 1565 /// } 1566 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 1567 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1568 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1569 /// 1570 /// // Bases 1571 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1572 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 1573 /// 1574 /// // Build quadrature data 1575 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 1576 /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 1577 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1578 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1579 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1580 /// .apply(&x, &mut qdata_mass)?; 1581 /// 1582 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 1583 /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 1584 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1585 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1586 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1587 /// .apply(&x, &mut qdata_diff)?; 1588 /// 1589 /// // Application operator 1590 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1591 /// let op_mass = ceed 1592 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1593 /// .field("u", &ru, &bu, VectorOpt::Active)? 1594 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 1595 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1596 /// 1597 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 1598 /// let op_diff = ceed 1599 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 1600 /// .field("du", &ru, &bu, VectorOpt::Active)? 1601 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 1602 /// .field("dv", &ru, &bu, VectorOpt::Active)?; 1603 /// 1604 /// let op_composite = ceed 1605 /// .composite_operator()? 1606 /// .sub_operator(&op_mass)? 1607 /// .sub_operator(&op_diff)?; 1608 /// 1609 /// v.set_value(0.0); 1610 /// op_composite.apply(&u, &mut v)?; 1611 /// 1612 /// // Check 1613 /// let sum: Scalar = v.view().iter().sum(); 1614 /// assert!( 1615 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1616 /// "Incorrect interval length computed" 1617 /// ); 1618 /// # Ok(()) 1619 /// # } 1620 /// ``` 1621 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 1622 self.op_core.apply(input, output) 1623 } 1624 1625 /// Apply Operator to a vector and add result to output vector 1626 /// 1627 /// * `input` - Input Vector 1628 /// * `output` - Output Vector 1629 /// 1630 /// ``` 1631 /// # use libceed::prelude::*; 1632 /// # fn main() -> Result<(), libceed::CeedError> { 1633 /// # let ceed = libceed::Ceed::default_init(); 1634 /// let ne = 4; 1635 /// let p = 3; 1636 /// let q = 4; 1637 /// let ndofs = p * ne - ne + 1; 1638 /// 1639 /// // Vectors 1640 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1641 /// let mut qdata_mass = ceed.vector(ne * q)?; 1642 /// qdata_mass.set_value(0.0); 1643 /// let mut qdata_diff = ceed.vector(ne * q)?; 1644 /// qdata_diff.set_value(0.0); 1645 /// let mut u = ceed.vector(ndofs)?; 1646 /// u.set_value(1.0); 1647 /// let mut v = ceed.vector(ndofs)?; 1648 /// v.set_value(0.0); 1649 /// 1650 /// // Restrictions 1651 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1652 /// for i in 0..ne { 1653 /// indx[2 * i + 0] = i as i32; 1654 /// indx[2 * i + 1] = (i + 1) as i32; 1655 /// } 1656 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1657 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1658 /// for i in 0..ne { 1659 /// indu[p * i + 0] = i as i32; 1660 /// indu[p * i + 1] = (i + 1) as i32; 1661 /// indu[p * i + 2] = (i + 2) as i32; 1662 /// } 1663 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 1664 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1665 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1666 /// 1667 /// // Bases 1668 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1669 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 1670 /// 1671 /// // Build quadrature data 1672 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 1673 /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 1674 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1675 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1676 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1677 /// .apply(&x, &mut qdata_mass)?; 1678 /// 1679 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 1680 /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 1681 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1682 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1683 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1684 /// .apply(&x, &mut qdata_diff)?; 1685 /// 1686 /// // Application operator 1687 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1688 /// let op_mass = ceed 1689 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1690 /// .field("u", &ru, &bu, VectorOpt::Active)? 1691 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 1692 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1693 /// 1694 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 1695 /// let op_diff = ceed 1696 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 1697 /// .field("du", &ru, &bu, VectorOpt::Active)? 1698 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 1699 /// .field("dv", &ru, &bu, VectorOpt::Active)?; 1700 /// 1701 /// let op_composite = ceed 1702 /// .composite_operator()? 1703 /// .sub_operator(&op_mass)? 1704 /// .sub_operator(&op_diff)?; 1705 /// 1706 /// v.set_value(1.0); 1707 /// op_composite.apply_add(&u, &mut v)?; 1708 /// 1709 /// // Check 1710 /// let sum: Scalar = v.view().iter().sum(); 1711 /// assert!( 1712 /// (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON, 1713 /// "Incorrect interval length computed" 1714 /// ); 1715 /// # Ok(()) 1716 /// # } 1717 /// ``` 1718 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 1719 self.op_core.apply_add(input, output) 1720 } 1721 1722 /// Add a sub-Operator to a Composite Operator 1723 /// 1724 /// * `subop` - Sub-Operator 1725 /// 1726 /// ``` 1727 /// # use libceed::prelude::*; 1728 /// # fn main() -> Result<(), libceed::CeedError> { 1729 /// # let ceed = libceed::Ceed::default_init(); 1730 /// let mut op = ceed.composite_operator()?; 1731 /// 1732 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1733 /// let op_mass = ceed.operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?; 1734 /// op = op.sub_operator(&op_mass)?; 1735 /// 1736 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 1737 /// let op_diff = ceed.operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?; 1738 /// op = op.sub_operator(&op_diff)?; 1739 /// # Ok(()) 1740 /// # } 1741 /// ``` 1742 #[allow(unused_mut)] 1743 pub fn sub_operator(mut self, subop: &Operator) -> crate::Result<Self> { 1744 let ierr = 1745 unsafe { bind_ceed::CeedCompositeOperatorAddSub(self.op_core.ptr, subop.op_core.ptr) }; 1746 self.op_core.ceed.check_error(ierr)?; 1747 Ok(self) 1748 } 1749 1750 /// Assemble the diagonal of a square linear Operator 1751 /// 1752 /// This overwrites a Vector with the diagonal of a linear Operator. 1753 /// 1754 /// Note: Currently only non-composite Operators with a single field and 1755 /// composite Operators with single field sub-operators are supported. 1756 /// 1757 /// * `op` - Operator to assemble QFunction 1758 /// * `assembled` - Vector to store assembled Operator diagonal 1759 pub fn linear_asssemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 1760 self.op_core.linear_assemble_add_diagonal(assembled) 1761 } 1762 1763 /// Assemble the point block diagonal of a square linear Operator 1764 /// 1765 /// This overwrites a Vector with the point block diagonal of a linear 1766 /// Operator. 1767 /// 1768 /// Note: Currently only non-composite Operators with a single field and 1769 /// composite Operators with single field sub-operators are supported. 1770 /// 1771 /// * `op` - Operator to assemble QFunction 1772 /// * `assembled` - Vector to store assembled Operator diagonal 1773 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 1774 self.op_core.linear_assemble_add_diagonal(assembled) 1775 } 1776 1777 /// Assemble the diagonal of a square linear Operator 1778 /// 1779 /// This overwrites a Vector with the diagonal of a linear Operator. 1780 /// 1781 /// Note: Currently only non-composite Operators with a single field and 1782 /// composite Operators with single field sub-operators are supported. 1783 /// 1784 /// * `op` - Operator to assemble QFunction 1785 /// * `assembled` - Vector to store assembled CeedOperator point block 1786 /// diagonal, provided in row-major form with an 1787 /// `ncomp * ncomp` block at each node. The dimensions of 1788 /// this vector are derived from the active vector for 1789 /// the CeedOperator. The array has shape 1790 /// `[nodes, component out, component in]`. 1791 pub fn linear_assemble_point_block_diagonal( 1792 &self, 1793 assembled: &mut Vector, 1794 ) -> crate::Result<i32> { 1795 self.op_core.linear_assemble_add_diagonal(assembled) 1796 } 1797 1798 /// Assemble the diagonal of a square linear Operator 1799 /// 1800 /// This sums into a Vector with the diagonal of a linear Operator. 1801 /// 1802 /// Note: Currently only non-composite Operators with a single field and 1803 /// composite Operators with single field sub-operators are supported. 1804 /// 1805 /// * `op` - Operator to assemble QFunction 1806 /// * `assembled` - Vector to store assembled CeedOperator point block 1807 /// diagonal, provided in row-major form with an 1808 /// `ncomp * ncomp` block at each node. The dimensions of 1809 /// this vector are derived from the active vector for 1810 /// the CeedOperator. The array has shape 1811 /// `[nodes, component out, component in]`. 1812 pub fn linear_assemble_add_point_block_diagonal( 1813 &self, 1814 assembled: &mut Vector, 1815 ) -> crate::Result<i32> { 1816 self.op_core.linear_assemble_add_diagonal(assembled) 1817 } 1818 } 1819 1820 // ----------------------------------------------------------------------------- 1821