1 // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at 2 // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights 3 // reserved. See files LICENSE and NOTICE for details. 4 // 5 // This file is part of CEED, a collection of benchmarks, miniapps, software 6 // libraries and APIs for efficient high-order finite element and spectral 7 // element discretizations for exascale applications. For more information and 8 // source code availability see http://github.com/ceed. 9 // 10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, 11 // a collaborative effort of two U.S. Department of Energy organizations (Office 12 // of Science and the National Nuclear Security Administration) responsible for 13 // the planning and preparation of a capable exascale ecosystem, including 14 // software, applications, hardware, advanced system engineering and early 15 // testbed platforms, in support of the nation's exascale computing imperative 16 17 //! A Ceed Operator defines the finite/spectral element operator associated to a 18 //! Ceed QFunction. A Ceed Operator connects Ceed ElemRestrictions, 19 //! Ceed Bases, and Ceed QFunctions. 20 21 use crate::prelude::*; 22 23 // ----------------------------------------------------------------------------- 24 // CeedOperator Field context wrapper 25 // ----------------------------------------------------------------------------- 26 #[derive(Debug)] 27 pub struct OperatorField<'a> { 28 pub(crate) ptr: bind_ceed::CeedOperatorField, 29 _lifeline: PhantomData<&'a ()>, 30 } 31 32 // ----------------------------------------------------------------------------- 33 // Implementations 34 // ----------------------------------------------------------------------------- 35 impl<'a> OperatorField<'a> { 36 /// Get the name of an OperatorField 37 /// 38 /// ``` 39 /// # use libceed::prelude::*; 40 /// # fn main() -> libceed::Result<()> { 41 /// # let ceed = libceed::Ceed::default_init(); 42 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 43 /// 44 /// // Operator field arguments 45 /// let ne = 3; 46 /// let q = 4 as usize; 47 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 48 /// for i in 0..ne { 49 /// ind[2 * i + 0] = i as i32; 50 /// ind[2 * i + 1] = (i + 1) as i32; 51 /// } 52 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 53 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 54 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 55 /// 56 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 57 /// 58 /// // Operator fields 59 /// let op = ceed 60 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 61 /// .field("dx", &r, &b, VectorOpt::Active)? 62 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 63 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 64 /// 65 /// let inputs = op.inputs()?; 66 /// 67 /// assert_eq!(inputs[0].name(), "dx", "Incorrect input name"); 68 /// assert_eq!(inputs[1].name(), "weights", "Incorrect input name"); 69 /// # Ok(()) 70 /// # } 71 /// ``` 72 pub fn name(&self) -> &str { 73 let mut name_ptr: *mut std::os::raw::c_char = std::ptr::null_mut(); 74 unsafe { 75 bind_ceed::CeedOperatorFieldGetName(self.ptr, &mut name_ptr); 76 } 77 unsafe { CStr::from_ptr(name_ptr) }.to_str().unwrap() 78 } 79 80 /// Get the ElemRestriction of an OperatorField 81 /// 82 /// ``` 83 /// # use libceed::prelude::*; 84 /// # fn main() -> libceed::Result<()> { 85 /// # let ceed = libceed::Ceed::default_init(); 86 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 87 /// 88 /// // Operator field arguments 89 /// let ne = 3; 90 /// let q = 4 as usize; 91 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 92 /// for i in 0..ne { 93 /// ind[2 * i + 0] = i as i32; 94 /// ind[2 * i + 1] = (i + 1) as i32; 95 /// } 96 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 97 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 98 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 99 /// 100 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 101 /// 102 /// // Operator fields 103 /// let op = ceed 104 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 105 /// .field("dx", &r, &b, VectorOpt::Active)? 106 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 107 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 108 /// 109 /// let inputs = op.inputs()?; 110 /// 111 /// assert!( 112 /// inputs[0].elem_restriction().is_some(), 113 /// "Incorrect field ElemRestriction" 114 /// ); 115 /// if let ElemRestrictionOpt::Some(r) = inputs[0].elem_restriction() { 116 /// assert_eq!( 117 /// r.num_elements(), 118 /// ne, 119 /// "Incorrect field ElemRestriction number of elements" 120 /// ); 121 /// } 122 /// 123 /// assert!( 124 /// inputs[1].elem_restriction().is_none(), 125 /// "Incorrect field ElemRestriction" 126 /// ); 127 /// # Ok(()) 128 /// # } 129 /// ``` 130 pub fn elem_restriction(&self) -> ElemRestrictionOpt { 131 let mut ptr = std::ptr::null_mut(); 132 unsafe { 133 bind_ceed::CeedOperatorFieldGetElemRestriction(self.ptr, &mut ptr); 134 } 135 if ptr == unsafe { bind_ceed::CEED_ELEMRESTRICTION_NONE } { 136 ElemRestrictionOpt::None 137 } else { 138 let slice = unsafe { 139 std::slice::from_raw_parts( 140 &ptr as *const bind_ceed::CeedElemRestriction as *const crate::ElemRestriction, 141 1 as usize, 142 ) 143 }; 144 ElemRestrictionOpt::Some(&slice[0]) 145 } 146 } 147 148 /// Get the Basis of an OperatorField 149 /// 150 /// ``` 151 /// # use libceed::prelude::*; 152 /// # fn main() -> libceed::Result<()> { 153 /// # let ceed = libceed::Ceed::default_init(); 154 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 155 /// 156 /// // Operator field arguments 157 /// let ne = 3; 158 /// let q = 4 as usize; 159 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 160 /// for i in 0..ne { 161 /// ind[2 * i + 0] = i as i32; 162 /// ind[2 * i + 1] = (i + 1) as i32; 163 /// } 164 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 165 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 166 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 167 /// 168 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 169 /// 170 /// // Operator fields 171 /// let op = ceed 172 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 173 /// .field("dx", &r, &b, VectorOpt::Active)? 174 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 175 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 176 /// 177 /// let inputs = op.inputs()?; 178 /// 179 /// assert!(inputs[0].basis().is_some(), "Incorrect field Basis"); 180 /// if let BasisOpt::Some(b) = inputs[0].basis() { 181 /// assert_eq!( 182 /// b.num_quadrature_points(), 183 /// q, 184 /// "Incorrect field Basis number of quadrature points" 185 /// ); 186 /// } 187 /// assert!(inputs[1].basis().is_some(), "Incorrect field Basis"); 188 /// if let BasisOpt::Some(b) = inputs[0].basis() { 189 /// assert_eq!( 190 /// b.num_quadrature_points(), 191 /// q, 192 /// "Incorrect field Basis number of quadrature points" 193 /// ); 194 /// } 195 /// 196 /// let outputs = op.outputs()?; 197 /// 198 /// assert!(outputs[0].basis().is_collocated(), "Incorrect field Basis"); 199 /// # Ok(()) 200 /// # } 201 /// ``` 202 pub fn basis(&self) -> BasisOpt { 203 let mut ptr = std::ptr::null_mut(); 204 unsafe { 205 bind_ceed::CeedOperatorFieldGetBasis(self.ptr, &mut ptr); 206 } 207 if ptr == unsafe { bind_ceed::CEED_BASIS_COLLOCATED } { 208 BasisOpt::Collocated 209 } else { 210 let slice = unsafe { 211 std::slice::from_raw_parts( 212 &ptr as *const bind_ceed::CeedBasis as *const crate::Basis, 213 1 as usize, 214 ) 215 }; 216 BasisOpt::Some(&slice[0]) 217 } 218 } 219 220 /// Get the Vector of an OperatorField 221 /// 222 /// ``` 223 /// # use libceed::prelude::*; 224 /// # fn main() -> libceed::Result<()> { 225 /// # let ceed = libceed::Ceed::default_init(); 226 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 227 /// 228 /// // Operator field arguments 229 /// let ne = 3; 230 /// let q = 4 as usize; 231 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 232 /// for i in 0..ne { 233 /// ind[2 * i + 0] = i as i32; 234 /// ind[2 * i + 1] = (i + 1) as i32; 235 /// } 236 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 237 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 238 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 239 /// 240 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 241 /// 242 /// // Operator fields 243 /// let op = ceed 244 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 245 /// .field("dx", &r, &b, VectorOpt::Active)? 246 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 247 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 248 /// 249 /// let inputs = op.inputs()?; 250 /// 251 /// assert!(inputs[0].vector().is_active(), "Incorrect field Vector"); 252 /// assert!(inputs[1].vector().is_none(), "Incorrect field Vector"); 253 /// # Ok(()) 254 /// # } 255 /// ``` 256 pub fn vector(&self) -> VectorOpt { 257 let mut ptr = std::ptr::null_mut(); 258 unsafe { 259 bind_ceed::CeedOperatorFieldGetVector(self.ptr, &mut ptr); 260 } 261 if ptr == unsafe { bind_ceed::CEED_VECTOR_ACTIVE } { 262 VectorOpt::Active 263 } else if ptr == unsafe { bind_ceed::CEED_VECTOR_NONE } { 264 VectorOpt::None 265 } else { 266 let slice = unsafe { 267 std::slice::from_raw_parts( 268 &ptr as *const bind_ceed::CeedVector as *const crate::Vector, 269 1 as usize, 270 ) 271 }; 272 VectorOpt::Some(&slice[0]) 273 } 274 } 275 } 276 277 // ----------------------------------------------------------------------------- 278 // CeedOperator context wrapper 279 // ----------------------------------------------------------------------------- 280 #[derive(Debug)] 281 pub(crate) struct OperatorCore<'a> { 282 ptr: bind_ceed::CeedOperator, 283 _lifeline: PhantomData<&'a ()>, 284 } 285 286 #[derive(Debug)] 287 pub struct Operator<'a> { 288 op_core: OperatorCore<'a>, 289 } 290 291 #[derive(Debug)] 292 pub struct CompositeOperator<'a> { 293 op_core: OperatorCore<'a>, 294 } 295 296 // ----------------------------------------------------------------------------- 297 // Destructor 298 // ----------------------------------------------------------------------------- 299 impl<'a> Drop for OperatorCore<'a> { 300 fn drop(&mut self) { 301 unsafe { 302 bind_ceed::CeedOperatorDestroy(&mut self.ptr); 303 } 304 } 305 } 306 307 // ----------------------------------------------------------------------------- 308 // Display 309 // ----------------------------------------------------------------------------- 310 impl<'a> fmt::Display for OperatorCore<'a> { 311 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 312 let mut ptr = std::ptr::null_mut(); 313 let mut sizeloc = crate::MAX_BUFFER_LENGTH; 314 let cstring = unsafe { 315 let file = bind_ceed::open_memstream(&mut ptr, &mut sizeloc); 316 bind_ceed::CeedOperatorView(self.ptr, file); 317 bind_ceed::fclose(file); 318 CString::from_raw(ptr) 319 }; 320 cstring.to_string_lossy().fmt(f) 321 } 322 } 323 324 /// View an Operator 325 /// 326 /// ``` 327 /// # use libceed::prelude::*; 328 /// # fn main() -> libceed::Result<()> { 329 /// # let ceed = libceed::Ceed::default_init(); 330 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 331 /// 332 /// // Operator field arguments 333 /// let ne = 3; 334 /// let q = 4 as usize; 335 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 336 /// for i in 0..ne { 337 /// ind[2 * i + 0] = i as i32; 338 /// ind[2 * i + 1] = (i + 1) as i32; 339 /// } 340 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 341 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 342 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 343 /// 344 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 345 /// 346 /// // Operator fields 347 /// let op = ceed 348 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 349 /// .field("dx", &r, &b, VectorOpt::Active)? 350 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 351 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 352 /// 353 /// println!("{}", op); 354 /// # Ok(()) 355 /// # } 356 /// ``` 357 impl<'a> fmt::Display for Operator<'a> { 358 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 359 self.op_core.fmt(f) 360 } 361 } 362 363 /// View a composite Operator 364 /// 365 /// ``` 366 /// # use libceed::prelude::*; 367 /// # fn main() -> libceed::Result<()> { 368 /// # let ceed = libceed::Ceed::default_init(); 369 /// 370 /// // Sub operator field arguments 371 /// let ne = 3; 372 /// let q = 4 as usize; 373 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 374 /// for i in 0..ne { 375 /// ind[2 * i + 0] = i as i32; 376 /// ind[2 * i + 1] = (i + 1) as i32; 377 /// } 378 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 379 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 380 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 381 /// 382 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 383 /// 384 /// let qdata_mass = ceed.vector(q * ne)?; 385 /// let qdata_diff = ceed.vector(q * ne)?; 386 /// 387 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 388 /// let op_mass = ceed 389 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 390 /// .field("u", &r, &b, VectorOpt::Active)? 391 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 392 /// .field("v", &r, &b, VectorOpt::Active)?; 393 /// 394 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 395 /// let op_diff = ceed 396 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 397 /// .field("du", &r, &b, VectorOpt::Active)? 398 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 399 /// .field("dv", &r, &b, VectorOpt::Active)?; 400 /// 401 /// let op = ceed 402 /// .composite_operator()? 403 /// .sub_operator(&op_mass)? 404 /// .sub_operator(&op_diff)?; 405 /// 406 /// println!("{}", op); 407 /// # Ok(()) 408 /// # } 409 /// ``` 410 impl<'a> fmt::Display for CompositeOperator<'a> { 411 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 412 self.op_core.fmt(f) 413 } 414 } 415 416 // ----------------------------------------------------------------------------- 417 // Core functionality 418 // ----------------------------------------------------------------------------- 419 impl<'a> OperatorCore<'a> { 420 // Error handling 421 #[doc(hidden)] 422 fn check_error(&self, ierr: i32) -> crate::Result<i32> { 423 let mut ptr = std::ptr::null_mut(); 424 unsafe { 425 bind_ceed::CeedOperatorGetCeed(self.ptr, &mut ptr); 426 } 427 crate::check_error(ptr, ierr) 428 } 429 430 // Common implementations 431 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 432 let ierr = unsafe { 433 bind_ceed::CeedOperatorApply( 434 self.ptr, 435 input.ptr, 436 output.ptr, 437 bind_ceed::CEED_REQUEST_IMMEDIATE, 438 ) 439 }; 440 self.check_error(ierr) 441 } 442 443 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 444 let ierr = unsafe { 445 bind_ceed::CeedOperatorApplyAdd( 446 self.ptr, 447 input.ptr, 448 output.ptr, 449 bind_ceed::CEED_REQUEST_IMMEDIATE, 450 ) 451 }; 452 self.check_error(ierr) 453 } 454 455 pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 456 let ierr = unsafe { 457 bind_ceed::CeedOperatorLinearAssembleDiagonal( 458 self.ptr, 459 assembled.ptr, 460 bind_ceed::CEED_REQUEST_IMMEDIATE, 461 ) 462 }; 463 self.check_error(ierr) 464 } 465 466 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 467 let ierr = unsafe { 468 bind_ceed::CeedOperatorLinearAssembleAddDiagonal( 469 self.ptr, 470 assembled.ptr, 471 bind_ceed::CEED_REQUEST_IMMEDIATE, 472 ) 473 }; 474 self.check_error(ierr) 475 } 476 477 pub fn linear_assemble_point_block_diagonal( 478 &self, 479 assembled: &mut Vector, 480 ) -> crate::Result<i32> { 481 let ierr = unsafe { 482 bind_ceed::CeedOperatorLinearAssemblePointBlockDiagonal( 483 self.ptr, 484 assembled.ptr, 485 bind_ceed::CEED_REQUEST_IMMEDIATE, 486 ) 487 }; 488 self.check_error(ierr) 489 } 490 491 pub fn linear_assemble_add_point_block_diagonal( 492 &self, 493 assembled: &mut Vector, 494 ) -> crate::Result<i32> { 495 let ierr = unsafe { 496 bind_ceed::CeedOperatorLinearAssembleAddPointBlockDiagonal( 497 self.ptr, 498 assembled.ptr, 499 bind_ceed::CEED_REQUEST_IMMEDIATE, 500 ) 501 }; 502 self.check_error(ierr) 503 } 504 } 505 506 // ----------------------------------------------------------------------------- 507 // Operator 508 // ----------------------------------------------------------------------------- 509 impl<'a> Operator<'a> { 510 // Constructor 511 pub fn create<'b>( 512 ceed: &'a crate::Ceed, 513 qf: impl Into<QFunctionOpt<'b>>, 514 dqf: impl Into<QFunctionOpt<'b>>, 515 dqfT: impl Into<QFunctionOpt<'b>>, 516 ) -> crate::Result<Self> { 517 let mut ptr = std::ptr::null_mut(); 518 let ierr = unsafe { 519 bind_ceed::CeedOperatorCreate( 520 ceed.ptr, 521 qf.into().to_raw(), 522 dqf.into().to_raw(), 523 dqfT.into().to_raw(), 524 &mut ptr, 525 ) 526 }; 527 ceed.check_error(ierr)?; 528 Ok(Self { 529 op_core: OperatorCore { 530 ptr, 531 _lifeline: PhantomData, 532 }, 533 }) 534 } 535 536 fn from_raw(ptr: bind_ceed::CeedOperator) -> crate::Result<Self> { 537 Ok(Self { 538 op_core: OperatorCore { 539 ptr, 540 _lifeline: PhantomData, 541 }, 542 }) 543 } 544 545 /// Apply Operator to a vector 546 /// 547 /// * `input` - Input Vector 548 /// * `output` - Output Vector 549 /// 550 /// ``` 551 /// # use libceed::prelude::*; 552 /// # fn main() -> libceed::Result<()> { 553 /// # let ceed = libceed::Ceed::default_init(); 554 /// let ne = 4; 555 /// let p = 3; 556 /// let q = 4; 557 /// let ndofs = p * ne - ne + 1; 558 /// 559 /// // Vectors 560 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 561 /// let mut qdata = ceed.vector(ne * q)?; 562 /// qdata.set_value(0.0); 563 /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?; 564 /// let mut v = ceed.vector(ndofs)?; 565 /// v.set_value(0.0); 566 /// 567 /// // Restrictions 568 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 569 /// for i in 0..ne { 570 /// indx[2 * i + 0] = i as i32; 571 /// indx[2 * i + 1] = (i + 1) as i32; 572 /// } 573 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 574 /// let mut indu: Vec<i32> = vec![0; p * ne]; 575 /// for i in 0..ne { 576 /// indu[p * i + 0] = i as i32; 577 /// indu[p * i + 1] = (i + 1) as i32; 578 /// indu[p * i + 2] = (i + 2) as i32; 579 /// } 580 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 581 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 582 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 583 /// 584 /// // Bases 585 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 586 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 587 /// 588 /// // Build quadrature data 589 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 590 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 591 /// .field("dx", &rx, &bx, VectorOpt::Active)? 592 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 593 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 594 /// .apply(&x, &mut qdata)?; 595 /// 596 /// // Mass operator 597 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 598 /// let op_mass = ceed 599 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 600 /// .field("u", &ru, &bu, VectorOpt::Active)? 601 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 602 /// .field("v", &ru, &bu, VectorOpt::Active)?; 603 /// 604 /// v.set_value(0.0); 605 /// op_mass.apply(&u, &mut v)?; 606 /// 607 /// // Check 608 /// let sum: Scalar = v.view()?.iter().sum(); 609 /// assert!( 610 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 611 /// "Incorrect interval length computed" 612 /// ); 613 /// # Ok(()) 614 /// # } 615 /// ``` 616 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 617 self.op_core.apply(input, output) 618 } 619 620 /// Apply Operator to a vector and add result to output vector 621 /// 622 /// * `input` - Input Vector 623 /// * `output` - Output Vector 624 /// 625 /// ``` 626 /// # use libceed::prelude::*; 627 /// # fn main() -> libceed::Result<()> { 628 /// # let ceed = libceed::Ceed::default_init(); 629 /// let ne = 4; 630 /// let p = 3; 631 /// let q = 4; 632 /// let ndofs = p * ne - ne + 1; 633 /// 634 /// // Vectors 635 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 636 /// let mut qdata = ceed.vector(ne * q)?; 637 /// qdata.set_value(0.0); 638 /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?; 639 /// let mut v = ceed.vector(ndofs)?; 640 /// 641 /// // Restrictions 642 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 643 /// for i in 0..ne { 644 /// indx[2 * i + 0] = i as i32; 645 /// indx[2 * i + 1] = (i + 1) as i32; 646 /// } 647 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 648 /// let mut indu: Vec<i32> = vec![0; p * ne]; 649 /// for i in 0..ne { 650 /// indu[p * i + 0] = i as i32; 651 /// indu[p * i + 1] = (i + 1) as i32; 652 /// indu[p * i + 2] = (i + 2) as i32; 653 /// } 654 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 655 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 656 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 657 /// 658 /// // Bases 659 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 660 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 661 /// 662 /// // Build quadrature data 663 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 664 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 665 /// .field("dx", &rx, &bx, VectorOpt::Active)? 666 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 667 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 668 /// .apply(&x, &mut qdata)?; 669 /// 670 /// // Mass operator 671 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 672 /// let op_mass = ceed 673 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 674 /// .field("u", &ru, &bu, VectorOpt::Active)? 675 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 676 /// .field("v", &ru, &bu, VectorOpt::Active)?; 677 /// 678 /// v.set_value(1.0); 679 /// op_mass.apply_add(&u, &mut v)?; 680 /// 681 /// // Check 682 /// let sum: Scalar = v.view()?.iter().sum(); 683 /// assert!( 684 /// (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON, 685 /// "Incorrect interval length computed and added" 686 /// ); 687 /// # Ok(()) 688 /// # } 689 /// ``` 690 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 691 self.op_core.apply_add(input, output) 692 } 693 694 /// Provide a field to a Operator for use by its QFunction 695 /// 696 /// * `fieldname` - Name of the field (to be matched with the name used by 697 /// the QFunction) 698 /// * `r` - ElemRestriction 699 /// * `b` - Basis in which the field resides or `BasisCollocated` if 700 /// collocated with quadrature points 701 /// * `v` - Vector to be used by Operator or `VectorActive` if field 702 /// is active or `VectorNone` if using `Weight` with the 703 /// QFunction 704 /// 705 /// 706 /// ``` 707 /// # use libceed::prelude::*; 708 /// # fn main() -> libceed::Result<()> { 709 /// # let ceed = libceed::Ceed::default_init(); 710 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 711 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 712 /// 713 /// // Operator field arguments 714 /// let ne = 3; 715 /// let q = 4; 716 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 717 /// for i in 0..ne { 718 /// ind[2 * i + 0] = i as i32; 719 /// ind[2 * i + 1] = (i + 1) as i32; 720 /// } 721 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 722 /// 723 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 724 /// 725 /// // Operator field 726 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 727 /// # Ok(()) 728 /// # } 729 /// ``` 730 #[allow(unused_mut)] 731 pub fn field<'b>( 732 mut self, 733 fieldname: &str, 734 r: impl Into<ElemRestrictionOpt<'b>>, 735 b: impl Into<BasisOpt<'b>>, 736 v: impl Into<VectorOpt<'b>>, 737 ) -> crate::Result<Self> { 738 let fieldname = CString::new(fieldname).expect("CString::new failed"); 739 let fieldname = fieldname.as_ptr() as *const i8; 740 let ierr = unsafe { 741 bind_ceed::CeedOperatorSetField( 742 self.op_core.ptr, 743 fieldname, 744 r.into().to_raw(), 745 b.into().to_raw(), 746 v.into().to_raw(), 747 ) 748 }; 749 self.op_core.check_error(ierr)?; 750 Ok(self) 751 } 752 753 /// Get a slice of Operator inputs 754 /// 755 /// ``` 756 /// # use libceed::prelude::*; 757 /// # fn main() -> libceed::Result<()> { 758 /// # let ceed = libceed::Ceed::default_init(); 759 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 760 /// 761 /// // Operator field arguments 762 /// let ne = 3; 763 /// let q = 4 as usize; 764 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 765 /// for i in 0..ne { 766 /// ind[2 * i + 0] = i as i32; 767 /// ind[2 * i + 1] = (i + 1) as i32; 768 /// } 769 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 770 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 771 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 772 /// 773 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 774 /// 775 /// // Operator fields 776 /// let op = ceed 777 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 778 /// .field("dx", &r, &b, VectorOpt::Active)? 779 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 780 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 781 /// 782 /// let inputs = op.inputs()?; 783 /// 784 /// assert_eq!(inputs.len(), 2, "Incorrect inputs array"); 785 /// # Ok(()) 786 /// # } 787 /// ``` 788 pub fn inputs(&self) -> crate::Result<&[crate::OperatorField]> { 789 // Get array of raw C pointers for inputs 790 let mut num_inputs = 0; 791 let mut inputs_ptr = std::ptr::null_mut(); 792 let ierr = unsafe { 793 bind_ceed::CeedOperatorGetFields( 794 self.op_core.ptr, 795 &mut num_inputs, 796 &mut inputs_ptr, 797 std::ptr::null_mut() as *mut bind_ceed::CeedInt, 798 std::ptr::null_mut() as *mut *mut bind_ceed::CeedOperatorField, 799 ) 800 }; 801 self.op_core.check_error(ierr)?; 802 // Convert raw C pointers to fixed length slice 803 let inputs_slice = unsafe { 804 std::slice::from_raw_parts( 805 inputs_ptr as *const crate::OperatorField, 806 num_inputs as usize, 807 ) 808 }; 809 Ok(inputs_slice) 810 } 811 812 /// Get a slice of Operator outputs 813 /// 814 /// ``` 815 /// # use libceed::prelude::*; 816 /// # fn main() -> libceed::Result<()> { 817 /// # let ceed = libceed::Ceed::default_init(); 818 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 819 /// 820 /// // Operator field arguments 821 /// let ne = 3; 822 /// let q = 4 as usize; 823 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 824 /// for i in 0..ne { 825 /// ind[2 * i + 0] = i as i32; 826 /// ind[2 * i + 1] = (i + 1) as i32; 827 /// } 828 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 829 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 830 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 831 /// 832 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 833 /// 834 /// // Operator fields 835 /// let op = ceed 836 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 837 /// .field("dx", &r, &b, VectorOpt::Active)? 838 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 839 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 840 /// 841 /// let outputs = op.outputs()?; 842 /// 843 /// assert_eq!(outputs.len(), 1, "Incorrect outputs array"); 844 /// # Ok(()) 845 /// # } 846 /// ``` 847 pub fn outputs(&self) -> crate::Result<&[crate::OperatorField]> { 848 // Get array of raw C pointers for outputs 849 let mut num_outputs = 0; 850 let mut outputs_ptr = std::ptr::null_mut(); 851 let ierr = unsafe { 852 bind_ceed::CeedOperatorGetFields( 853 self.op_core.ptr, 854 std::ptr::null_mut() as *mut bind_ceed::CeedInt, 855 std::ptr::null_mut() as *mut *mut bind_ceed::CeedOperatorField, 856 &mut num_outputs, 857 &mut outputs_ptr, 858 ) 859 }; 860 self.op_core.check_error(ierr)?; 861 // Convert raw C pointers to fixed length slice 862 let outputs_slice = unsafe { 863 std::slice::from_raw_parts( 864 outputs_ptr as *const crate::OperatorField, 865 num_outputs as usize, 866 ) 867 }; 868 Ok(outputs_slice) 869 } 870 871 /// Get the number of elements associated with an Operator 872 /// 873 /// 874 /// ``` 875 /// # use libceed::prelude::*; 876 /// # fn main() -> libceed::Result<()> { 877 /// # let ceed = libceed::Ceed::default_init(); 878 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 879 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 880 /// 881 /// // Operator field arguments 882 /// let ne = 3; 883 /// let q = 4; 884 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 885 /// for i in 0..ne { 886 /// ind[2 * i + 0] = i as i32; 887 /// ind[2 * i + 1] = (i + 1) as i32; 888 /// } 889 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 890 /// 891 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 892 /// 893 /// // Operator field 894 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 895 /// 896 /// // Check number of elements 897 /// assert_eq!(op.num_elements(), ne, "Incorrect number of elements"); 898 /// # Ok(()) 899 /// # } 900 /// ``` 901 pub fn num_elements(&self) -> usize { 902 let mut nelem = 0; 903 unsafe { bind_ceed::CeedOperatorGetNumElements(self.op_core.ptr, &mut nelem) }; 904 usize::try_from(nelem).unwrap() 905 } 906 907 /// Get the number of quadrature points associated with each element of 908 /// an Operator 909 /// 910 /// 911 /// ``` 912 /// # use libceed::prelude::*; 913 /// # fn main() -> libceed::Result<()> { 914 /// # let ceed = libceed::Ceed::default_init(); 915 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 916 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 917 /// 918 /// // Operator field arguments 919 /// let ne = 3; 920 /// let q = 4; 921 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 922 /// for i in 0..ne { 923 /// ind[2 * i + 0] = i as i32; 924 /// ind[2 * i + 1] = (i + 1) as i32; 925 /// } 926 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 927 /// 928 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 929 /// 930 /// // Operator field 931 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 932 /// 933 /// // Check number of quadrature points 934 /// assert_eq!( 935 /// op.num_quadrature_points(), 936 /// q, 937 /// "Incorrect number of quadrature points" 938 /// ); 939 /// # Ok(()) 940 /// # } 941 /// ``` 942 pub fn num_quadrature_points(&self) -> usize { 943 let mut Q = 0; 944 unsafe { bind_ceed::CeedOperatorGetNumQuadraturePoints(self.op_core.ptr, &mut Q) }; 945 usize::try_from(Q).unwrap() 946 } 947 948 /// Assemble the diagonal of a square linear Operator 949 /// 950 /// This overwrites a Vector with the diagonal of a linear Operator. 951 /// 952 /// Note: Currently only non-composite Operators with a single field and 953 /// composite Operators with single field sub-operators are supported. 954 /// 955 /// * `op` - Operator to assemble QFunction 956 /// * `assembled` - Vector to store assembled Operator diagonal 957 /// 958 /// ``` 959 /// # use libceed::prelude::*; 960 /// # fn main() -> libceed::Result<()> { 961 /// # let ceed = libceed::Ceed::default_init(); 962 /// let ne = 4; 963 /// let p = 3; 964 /// let q = 4; 965 /// let ndofs = p * ne - ne + 1; 966 /// 967 /// // Vectors 968 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 969 /// let mut qdata = ceed.vector(ne * q)?; 970 /// qdata.set_value(0.0); 971 /// let mut u = ceed.vector(ndofs)?; 972 /// u.set_value(1.0); 973 /// let mut v = ceed.vector(ndofs)?; 974 /// v.set_value(0.0); 975 /// 976 /// // Restrictions 977 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 978 /// for i in 0..ne { 979 /// indx[2 * i + 0] = i as i32; 980 /// indx[2 * i + 1] = (i + 1) as i32; 981 /// } 982 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 983 /// let mut indu: Vec<i32> = vec![0; p * ne]; 984 /// for i in 0..ne { 985 /// indu[p * i + 0] = (2 * i) as i32; 986 /// indu[p * i + 1] = (2 * i + 1) as i32; 987 /// indu[p * i + 2] = (2 * i + 2) as i32; 988 /// } 989 /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?; 990 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 991 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 992 /// 993 /// // Bases 994 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 995 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 996 /// 997 /// // Build quadrature data 998 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 999 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1000 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1001 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1002 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1003 /// .apply(&x, &mut qdata)?; 1004 /// 1005 /// // Mass operator 1006 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1007 /// let op_mass = ceed 1008 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1009 /// .field("u", &ru, &bu, VectorOpt::Active)? 1010 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1011 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1012 /// 1013 /// // Diagonal 1014 /// let mut diag = ceed.vector(ndofs)?; 1015 /// diag.set_value(0.0); 1016 /// op_mass.linear_assemble_diagonal(&mut diag)?; 1017 /// 1018 /// // Manual diagonal computation 1019 /// let mut true_diag = ceed.vector(ndofs)?; 1020 /// for i in 0..ndofs { 1021 /// u.set_value(0.0); 1022 /// { 1023 /// let mut u_array = u.view_mut()?; 1024 /// u_array[i] = 1.; 1025 /// } 1026 /// 1027 /// op_mass.apply(&u, &mut v)?; 1028 /// 1029 /// { 1030 /// let v_array = v.view_mut()?; 1031 /// let mut true_array = true_diag.view_mut()?; 1032 /// true_array[i] = v_array[i]; 1033 /// } 1034 /// } 1035 /// 1036 /// // Check 1037 /// diag.view()? 1038 /// .iter() 1039 /// .zip(true_diag.view()?.iter()) 1040 /// .for_each(|(computed, actual)| { 1041 /// assert!( 1042 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 1043 /// "Diagonal entry incorrect" 1044 /// ); 1045 /// }); 1046 /// # Ok(()) 1047 /// # } 1048 /// ``` 1049 pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 1050 self.op_core.linear_assemble_diagonal(assembled) 1051 } 1052 1053 /// Assemble the diagonal of a square linear Operator 1054 /// 1055 /// This sums into a Vector with the diagonal of a linear Operator. 1056 /// 1057 /// Note: Currently only non-composite Operators with a single field and 1058 /// composite Operators with single field sub-operators are supported. 1059 /// 1060 /// * `op` - Operator to assemble QFunction 1061 /// * `assembled` - Vector to store assembled Operator diagonal 1062 /// 1063 /// 1064 /// ``` 1065 /// # use libceed::prelude::*; 1066 /// # fn main() -> libceed::Result<()> { 1067 /// # let ceed = libceed::Ceed::default_init(); 1068 /// let ne = 4; 1069 /// let p = 3; 1070 /// let q = 4; 1071 /// let ndofs = p * ne - ne + 1; 1072 /// 1073 /// // Vectors 1074 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1075 /// let mut qdata = ceed.vector(ne * q)?; 1076 /// qdata.set_value(0.0); 1077 /// let mut u = ceed.vector(ndofs)?; 1078 /// u.set_value(1.0); 1079 /// let mut v = ceed.vector(ndofs)?; 1080 /// v.set_value(0.0); 1081 /// 1082 /// // Restrictions 1083 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1084 /// for i in 0..ne { 1085 /// indx[2 * i + 0] = i as i32; 1086 /// indx[2 * i + 1] = (i + 1) as i32; 1087 /// } 1088 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1089 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1090 /// for i in 0..ne { 1091 /// indu[p * i + 0] = (2 * i) as i32; 1092 /// indu[p * i + 1] = (2 * i + 1) as i32; 1093 /// indu[p * i + 2] = (2 * i + 2) as i32; 1094 /// } 1095 /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?; 1096 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1097 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1098 /// 1099 /// // Bases 1100 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1101 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 1102 /// 1103 /// // Build quadrature data 1104 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1105 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1106 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1107 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1108 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1109 /// .apply(&x, &mut qdata)?; 1110 /// 1111 /// // Mass operator 1112 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1113 /// let op_mass = ceed 1114 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1115 /// .field("u", &ru, &bu, VectorOpt::Active)? 1116 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1117 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1118 /// 1119 /// // Diagonal 1120 /// let mut diag = ceed.vector(ndofs)?; 1121 /// diag.set_value(1.0); 1122 /// op_mass.linear_assemble_add_diagonal(&mut diag)?; 1123 /// 1124 /// // Manual diagonal computation 1125 /// let mut true_diag = ceed.vector(ndofs)?; 1126 /// for i in 0..ndofs { 1127 /// u.set_value(0.0); 1128 /// { 1129 /// let mut u_array = u.view_mut()?; 1130 /// u_array[i] = 1.; 1131 /// } 1132 /// 1133 /// op_mass.apply(&u, &mut v)?; 1134 /// 1135 /// { 1136 /// let v_array = v.view_mut()?; 1137 /// let mut true_array = true_diag.view_mut()?; 1138 /// true_array[i] = v_array[i] + 1.0; 1139 /// } 1140 /// } 1141 /// 1142 /// // Check 1143 /// diag.view()? 1144 /// .iter() 1145 /// .zip(true_diag.view()?.iter()) 1146 /// .for_each(|(computed, actual)| { 1147 /// assert!( 1148 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 1149 /// "Diagonal entry incorrect" 1150 /// ); 1151 /// }); 1152 /// # Ok(()) 1153 /// # } 1154 /// ``` 1155 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 1156 self.op_core.linear_assemble_add_diagonal(assembled) 1157 } 1158 1159 /// Assemble the point block diagonal of a square linear Operator 1160 /// 1161 /// This overwrites a Vector with the point block diagonal of a linear 1162 /// Operator. 1163 /// 1164 /// Note: Currently only non-composite Operators with a single field and 1165 /// composite Operators with single field sub-operators are supported. 1166 /// 1167 /// * `op` - Operator to assemble QFunction 1168 /// * `assembled` - Vector to store assembled CeedOperator point block 1169 /// diagonal, provided in row-major form with an 1170 /// `ncomp * ncomp` block at each node. The dimensions of 1171 /// this vector are derived from the active vector for 1172 /// the CeedOperator. The array has shape 1173 /// `[nodes, component out, component in]`. 1174 /// 1175 /// ``` 1176 /// # use libceed::prelude::*; 1177 /// # fn main() -> libceed::Result<()> { 1178 /// # let ceed = libceed::Ceed::default_init(); 1179 /// let ne = 4; 1180 /// let p = 3; 1181 /// let q = 4; 1182 /// let ncomp = 2; 1183 /// let ndofs = p * ne - ne + 1; 1184 /// 1185 /// // Vectors 1186 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1187 /// let mut qdata = ceed.vector(ne * q)?; 1188 /// qdata.set_value(0.0); 1189 /// let mut u = ceed.vector(ncomp * ndofs)?; 1190 /// u.set_value(1.0); 1191 /// let mut v = ceed.vector(ncomp * ndofs)?; 1192 /// v.set_value(0.0); 1193 /// 1194 /// // Restrictions 1195 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1196 /// for i in 0..ne { 1197 /// indx[2 * i + 0] = i as i32; 1198 /// indx[2 * i + 1] = (i + 1) as i32; 1199 /// } 1200 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1201 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1202 /// for i in 0..ne { 1203 /// indu[p * i + 0] = (2 * i) as i32; 1204 /// indu[p * i + 1] = (2 * i + 1) as i32; 1205 /// indu[p * i + 2] = (2 * i + 2) as i32; 1206 /// } 1207 /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?; 1208 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1209 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1210 /// 1211 /// // Bases 1212 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1213 /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?; 1214 /// 1215 /// // Build quadrature data 1216 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1217 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1218 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1219 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1220 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1221 /// .apply(&x, &mut qdata)?; 1222 /// 1223 /// // Mass operator 1224 /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| { 1225 /// // Number of quadrature points 1226 /// let q = qdata.len(); 1227 /// 1228 /// // Iterate over quadrature points 1229 /// for i in 0..q { 1230 /// v[i + 0 * q] = u[i + 1 * q] * qdata[i]; 1231 /// v[i + 1 * q] = u[i + 0 * q] * qdata[i]; 1232 /// } 1233 /// 1234 /// // Return clean error code 1235 /// 0 1236 /// }; 1237 /// 1238 /// let qf_mass = ceed 1239 /// .q_function_interior(1, Box::new(mass_2_comp))? 1240 /// .input("u", 2, EvalMode::Interp)? 1241 /// .input("qdata", 1, EvalMode::None)? 1242 /// .output("v", 2, EvalMode::Interp)?; 1243 /// 1244 /// let op_mass = ceed 1245 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1246 /// .field("u", &ru, &bu, VectorOpt::Active)? 1247 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1248 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1249 /// 1250 /// // Diagonal 1251 /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?; 1252 /// diag.set_value(0.0); 1253 /// op_mass.linear_assemble_point_block_diagonal(&mut diag)?; 1254 /// 1255 /// // Manual diagonal computation 1256 /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?; 1257 /// for i in 0..ndofs { 1258 /// for j in 0..ncomp { 1259 /// u.set_value(0.0); 1260 /// { 1261 /// let mut u_array = u.view_mut()?; 1262 /// u_array[i + j * ndofs] = 1.; 1263 /// } 1264 /// 1265 /// op_mass.apply(&u, &mut v)?; 1266 /// 1267 /// { 1268 /// let v_array = v.view_mut()?; 1269 /// let mut true_array = true_diag.view_mut()?; 1270 /// for k in 0..ncomp { 1271 /// true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs]; 1272 /// } 1273 /// } 1274 /// } 1275 /// } 1276 /// 1277 /// // Check 1278 /// diag.view()? 1279 /// .iter() 1280 /// .zip(true_diag.view()?.iter()) 1281 /// .for_each(|(computed, actual)| { 1282 /// assert!( 1283 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 1284 /// "Diagonal entry incorrect" 1285 /// ); 1286 /// }); 1287 /// # Ok(()) 1288 /// # } 1289 /// ``` 1290 pub fn linear_assemble_point_block_diagonal( 1291 &self, 1292 assembled: &mut Vector, 1293 ) -> crate::Result<i32> { 1294 self.op_core.linear_assemble_point_block_diagonal(assembled) 1295 } 1296 1297 /// Assemble the point block diagonal of a square linear Operator 1298 /// 1299 /// This sums into a Vector with the point block diagonal of a linear 1300 /// Operator. 1301 /// 1302 /// Note: Currently only non-composite Operators with a single field and 1303 /// composite Operators with single field sub-operators are supported. 1304 /// 1305 /// * `op` - Operator to assemble QFunction 1306 /// * `assembled` - Vector to store assembled CeedOperator point block 1307 /// diagonal, provided in row-major form with an 1308 /// `ncomp * ncomp` block at each node. The dimensions of 1309 /// this vector are derived from the active vector for 1310 /// the CeedOperator. The array has shape 1311 /// `[nodes, component out, component in]`. 1312 /// 1313 /// ``` 1314 /// # use libceed::prelude::*; 1315 /// # fn main() -> libceed::Result<()> { 1316 /// # let ceed = libceed::Ceed::default_init(); 1317 /// let ne = 4; 1318 /// let p = 3; 1319 /// let q = 4; 1320 /// let ncomp = 2; 1321 /// let ndofs = p * ne - ne + 1; 1322 /// 1323 /// // Vectors 1324 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1325 /// let mut qdata = ceed.vector(ne * q)?; 1326 /// qdata.set_value(0.0); 1327 /// let mut u = ceed.vector(ncomp * ndofs)?; 1328 /// u.set_value(1.0); 1329 /// let mut v = ceed.vector(ncomp * ndofs)?; 1330 /// v.set_value(0.0); 1331 /// 1332 /// // Restrictions 1333 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1334 /// for i in 0..ne { 1335 /// indx[2 * i + 0] = i as i32; 1336 /// indx[2 * i + 1] = (i + 1) as i32; 1337 /// } 1338 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1339 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1340 /// for i in 0..ne { 1341 /// indu[p * i + 0] = (2 * i) as i32; 1342 /// indu[p * i + 1] = (2 * i + 1) as i32; 1343 /// indu[p * i + 2] = (2 * i + 2) as i32; 1344 /// } 1345 /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?; 1346 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1347 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1348 /// 1349 /// // Bases 1350 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1351 /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?; 1352 /// 1353 /// // Build quadrature data 1354 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1355 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1356 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1357 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1358 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1359 /// .apply(&x, &mut qdata)?; 1360 /// 1361 /// // Mass operator 1362 /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| { 1363 /// // Number of quadrature points 1364 /// let q = qdata.len(); 1365 /// 1366 /// // Iterate over quadrature points 1367 /// for i in 0..q { 1368 /// v[i + 0 * q] = u[i + 1 * q] * qdata[i]; 1369 /// v[i + 1 * q] = u[i + 0 * q] * qdata[i]; 1370 /// } 1371 /// 1372 /// // Return clean error code 1373 /// 0 1374 /// }; 1375 /// 1376 /// let qf_mass = ceed 1377 /// .q_function_interior(1, Box::new(mass_2_comp))? 1378 /// .input("u", 2, EvalMode::Interp)? 1379 /// .input("qdata", 1, EvalMode::None)? 1380 /// .output("v", 2, EvalMode::Interp)?; 1381 /// 1382 /// let op_mass = ceed 1383 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1384 /// .field("u", &ru, &bu, VectorOpt::Active)? 1385 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1386 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1387 /// 1388 /// // Diagonal 1389 /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?; 1390 /// diag.set_value(1.0); 1391 /// op_mass.linear_assemble_add_point_block_diagonal(&mut diag)?; 1392 /// 1393 /// // Manual diagonal computation 1394 /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?; 1395 /// for i in 0..ndofs { 1396 /// for j in 0..ncomp { 1397 /// u.set_value(0.0); 1398 /// { 1399 /// let mut u_array = u.view_mut()?; 1400 /// u_array[i + j * ndofs] = 1.; 1401 /// } 1402 /// 1403 /// op_mass.apply(&u, &mut v)?; 1404 /// 1405 /// { 1406 /// let v_array = v.view_mut()?; 1407 /// let mut true_array = true_diag.view_mut()?; 1408 /// for k in 0..ncomp { 1409 /// true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs]; 1410 /// } 1411 /// } 1412 /// } 1413 /// } 1414 /// 1415 /// // Check 1416 /// diag.view()? 1417 /// .iter() 1418 /// .zip(true_diag.view()?.iter()) 1419 /// .for_each(|(computed, actual)| { 1420 /// assert!( 1421 /// (*computed - 1.0 - *actual).abs() < 10.0 * libceed::EPSILON, 1422 /// "Diagonal entry incorrect" 1423 /// ); 1424 /// }); 1425 /// # Ok(()) 1426 /// # } 1427 /// ``` 1428 pub fn linear_assemble_add_point_block_diagonal( 1429 &self, 1430 assembled: &mut Vector, 1431 ) -> crate::Result<i32> { 1432 self.op_core 1433 .linear_assemble_add_point_block_diagonal(assembled) 1434 } 1435 1436 /// Create a multigrid coarse Operator and level transfer Operators for a 1437 /// given Operator, creating the prolongation basis from the fine and 1438 /// coarse grid interpolation 1439 /// 1440 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1441 /// * `rstr_coarse` - Coarse grid restriction 1442 /// * `basis_coarse` - Coarse grid active vector basis 1443 /// 1444 /// ``` 1445 /// # use libceed::prelude::*; 1446 /// # fn main() -> libceed::Result<()> { 1447 /// # let ceed = libceed::Ceed::default_init(); 1448 /// let ne = 15; 1449 /// let p_coarse = 3; 1450 /// let p_fine = 5; 1451 /// let q = 6; 1452 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1453 /// let ndofs_fine = p_fine * ne - ne + 1; 1454 /// 1455 /// // Vectors 1456 /// let x_array = (0..ne + 1) 1457 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1458 /// .collect::<Vec<Scalar>>(); 1459 /// let x = ceed.vector_from_slice(&x_array)?; 1460 /// let mut qdata = ceed.vector(ne * q)?; 1461 /// qdata.set_value(0.0); 1462 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1463 /// u_coarse.set_value(1.0); 1464 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1465 /// u_fine.set_value(1.0); 1466 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1467 /// v_coarse.set_value(0.0); 1468 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1469 /// v_fine.set_value(0.0); 1470 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1471 /// multiplicity.set_value(1.0); 1472 /// 1473 /// // Restrictions 1474 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1475 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1476 /// 1477 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1478 /// for i in 0..ne { 1479 /// indx[2 * i + 0] = i as i32; 1480 /// indx[2 * i + 1] = (i + 1) as i32; 1481 /// } 1482 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1483 /// 1484 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1485 /// for i in 0..ne { 1486 /// for j in 0..p_coarse { 1487 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1488 /// } 1489 /// } 1490 /// let ru_coarse = ceed.elem_restriction( 1491 /// ne, 1492 /// p_coarse, 1493 /// 1, 1494 /// 1, 1495 /// ndofs_coarse, 1496 /// MemType::Host, 1497 /// &indu_coarse, 1498 /// )?; 1499 /// 1500 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1501 /// for i in 0..ne { 1502 /// for j in 0..p_fine { 1503 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1504 /// } 1505 /// } 1506 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1507 /// 1508 /// // Bases 1509 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1510 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1511 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1512 /// 1513 /// // Build quadrature data 1514 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1515 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1516 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1517 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1518 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1519 /// .apply(&x, &mut qdata)?; 1520 /// 1521 /// // Mass operator 1522 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1523 /// let op_mass_fine = ceed 1524 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1525 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1526 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1527 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1528 /// 1529 /// // Multigrid setup 1530 /// let (op_mass_coarse, op_prolong, op_restrict) = 1531 /// op_mass_fine.create_multigrid_level(&multiplicity, &ru_coarse, &bu_coarse)?; 1532 /// 1533 /// // Coarse problem 1534 /// u_coarse.set_value(1.0); 1535 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1536 /// 1537 /// // Check 1538 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1539 /// assert!( 1540 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1541 /// "Incorrect interval length computed" 1542 /// ); 1543 /// 1544 /// // Prolong 1545 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1546 /// 1547 /// // Fine problem 1548 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1549 /// 1550 /// // Check 1551 /// let sum: Scalar = v_fine.view()?.iter().sum(); 1552 /// assert!( 1553 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1554 /// "Incorrect interval length computed" 1555 /// ); 1556 /// 1557 /// // Restrict 1558 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1559 /// 1560 /// // Check 1561 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1562 /// assert!( 1563 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1564 /// "Incorrect interval length computed" 1565 /// ); 1566 /// # Ok(()) 1567 /// # } 1568 /// ``` 1569 pub fn create_multigrid_level( 1570 &self, 1571 p_mult_fine: &Vector, 1572 rstr_coarse: &ElemRestriction, 1573 basis_coarse: &Basis, 1574 ) -> crate::Result<(Operator, Operator, Operator)> { 1575 let mut ptr_coarse = std::ptr::null_mut(); 1576 let mut ptr_prolong = std::ptr::null_mut(); 1577 let mut ptr_restrict = std::ptr::null_mut(); 1578 let ierr = unsafe { 1579 bind_ceed::CeedOperatorMultigridLevelCreate( 1580 self.op_core.ptr, 1581 p_mult_fine.ptr, 1582 rstr_coarse.ptr, 1583 basis_coarse.ptr, 1584 &mut ptr_coarse, 1585 &mut ptr_prolong, 1586 &mut ptr_restrict, 1587 ) 1588 }; 1589 self.op_core.check_error(ierr)?; 1590 let op_coarse = Operator::from_raw(ptr_coarse)?; 1591 let op_prolong = Operator::from_raw(ptr_prolong)?; 1592 let op_restrict = Operator::from_raw(ptr_restrict)?; 1593 Ok((op_coarse, op_prolong, op_restrict)) 1594 } 1595 1596 /// Create a multigrid coarse Operator and level transfer Operators for a 1597 /// given Operator with a tensor basis for the active basis 1598 /// 1599 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1600 /// * `rstr_coarse` - Coarse grid restriction 1601 /// * `basis_coarse` - Coarse grid active vector basis 1602 /// * `interp_c_to_f` - Matrix for coarse to fine 1603 /// 1604 /// ``` 1605 /// # use libceed::prelude::*; 1606 /// # fn main() -> libceed::Result<()> { 1607 /// # let ceed = libceed::Ceed::default_init(); 1608 /// let ne = 15; 1609 /// let p_coarse = 3; 1610 /// let p_fine = 5; 1611 /// let q = 6; 1612 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1613 /// let ndofs_fine = p_fine * ne - ne + 1; 1614 /// 1615 /// // Vectors 1616 /// let x_array = (0..ne + 1) 1617 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1618 /// .collect::<Vec<Scalar>>(); 1619 /// let x = ceed.vector_from_slice(&x_array)?; 1620 /// let mut qdata = ceed.vector(ne * q)?; 1621 /// qdata.set_value(0.0); 1622 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1623 /// u_coarse.set_value(1.0); 1624 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1625 /// u_fine.set_value(1.0); 1626 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1627 /// v_coarse.set_value(0.0); 1628 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1629 /// v_fine.set_value(0.0); 1630 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1631 /// multiplicity.set_value(1.0); 1632 /// 1633 /// // Restrictions 1634 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1635 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1636 /// 1637 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1638 /// for i in 0..ne { 1639 /// indx[2 * i + 0] = i as i32; 1640 /// indx[2 * i + 1] = (i + 1) as i32; 1641 /// } 1642 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1643 /// 1644 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1645 /// for i in 0..ne { 1646 /// for j in 0..p_coarse { 1647 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1648 /// } 1649 /// } 1650 /// let ru_coarse = ceed.elem_restriction( 1651 /// ne, 1652 /// p_coarse, 1653 /// 1, 1654 /// 1, 1655 /// ndofs_coarse, 1656 /// MemType::Host, 1657 /// &indu_coarse, 1658 /// )?; 1659 /// 1660 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1661 /// for i in 0..ne { 1662 /// for j in 0..p_fine { 1663 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1664 /// } 1665 /// } 1666 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1667 /// 1668 /// // Bases 1669 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1670 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1671 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1672 /// 1673 /// // Build quadrature data 1674 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1675 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1676 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1677 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1678 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1679 /// .apply(&x, &mut qdata)?; 1680 /// 1681 /// // Mass operator 1682 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1683 /// let op_mass_fine = ceed 1684 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1685 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1686 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1687 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1688 /// 1689 /// // Multigrid setup 1690 /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine]; 1691 /// { 1692 /// let mut coarse = ceed.vector(p_coarse)?; 1693 /// let mut fine = ceed.vector(p_fine)?; 1694 /// let basis_c_to_f = 1695 /// ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?; 1696 /// for i in 0..p_coarse { 1697 /// coarse.set_value(0.0); 1698 /// { 1699 /// let mut array = coarse.view_mut()?; 1700 /// array[i] = 1.; 1701 /// } 1702 /// basis_c_to_f.apply( 1703 /// 1, 1704 /// TransposeMode::NoTranspose, 1705 /// EvalMode::Interp, 1706 /// &coarse, 1707 /// &mut fine, 1708 /// )?; 1709 /// let array = fine.view()?; 1710 /// for j in 0..p_fine { 1711 /// interp_c_to_f[j * p_coarse + i] = array[j]; 1712 /// } 1713 /// } 1714 /// } 1715 /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_tensor_H1( 1716 /// &multiplicity, 1717 /// &ru_coarse, 1718 /// &bu_coarse, 1719 /// &interp_c_to_f, 1720 /// )?; 1721 /// 1722 /// // Coarse problem 1723 /// u_coarse.set_value(1.0); 1724 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1725 /// 1726 /// // Check 1727 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1728 /// assert!( 1729 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1730 /// "Incorrect interval length computed" 1731 /// ); 1732 /// 1733 /// // Prolong 1734 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1735 /// 1736 /// // Fine problem 1737 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1738 /// 1739 /// // Check 1740 /// let sum: Scalar = v_fine.view()?.iter().sum(); 1741 /// assert!( 1742 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1743 /// "Incorrect interval length computed" 1744 /// ); 1745 /// 1746 /// // Restrict 1747 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1748 /// 1749 /// // Check 1750 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1751 /// assert!( 1752 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1753 /// "Incorrect interval length computed" 1754 /// ); 1755 /// # Ok(()) 1756 /// # } 1757 /// ``` 1758 pub fn create_multigrid_level_tensor_H1( 1759 &self, 1760 p_mult_fine: &Vector, 1761 rstr_coarse: &ElemRestriction, 1762 basis_coarse: &Basis, 1763 interpCtoF: &Vec<Scalar>, 1764 ) -> crate::Result<(Operator, Operator, Operator)> { 1765 let mut ptr_coarse = std::ptr::null_mut(); 1766 let mut ptr_prolong = std::ptr::null_mut(); 1767 let mut ptr_restrict = std::ptr::null_mut(); 1768 let ierr = unsafe { 1769 bind_ceed::CeedOperatorMultigridLevelCreateTensorH1( 1770 self.op_core.ptr, 1771 p_mult_fine.ptr, 1772 rstr_coarse.ptr, 1773 basis_coarse.ptr, 1774 interpCtoF.as_ptr(), 1775 &mut ptr_coarse, 1776 &mut ptr_prolong, 1777 &mut ptr_restrict, 1778 ) 1779 }; 1780 self.op_core.check_error(ierr)?; 1781 let op_coarse = Operator::from_raw(ptr_coarse)?; 1782 let op_prolong = Operator::from_raw(ptr_prolong)?; 1783 let op_restrict = Operator::from_raw(ptr_restrict)?; 1784 Ok((op_coarse, op_prolong, op_restrict)) 1785 } 1786 1787 /// Create a multigrid coarse Operator and level transfer Operators for a 1788 /// given Operator with a non-tensor basis for the active basis 1789 /// 1790 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1791 /// * `rstr_coarse` - Coarse grid restriction 1792 /// * `basis_coarse` - Coarse grid active vector basis 1793 /// * `interp_c_to_f` - Matrix for coarse to fine 1794 /// 1795 /// ``` 1796 /// # use libceed::prelude::*; 1797 /// # fn main() -> libceed::Result<()> { 1798 /// # let ceed = libceed::Ceed::default_init(); 1799 /// let ne = 15; 1800 /// let p_coarse = 3; 1801 /// let p_fine = 5; 1802 /// let q = 6; 1803 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1804 /// let ndofs_fine = p_fine * ne - ne + 1; 1805 /// 1806 /// // Vectors 1807 /// let x_array = (0..ne + 1) 1808 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1809 /// .collect::<Vec<Scalar>>(); 1810 /// let x = ceed.vector_from_slice(&x_array)?; 1811 /// let mut qdata = ceed.vector(ne * q)?; 1812 /// qdata.set_value(0.0); 1813 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1814 /// u_coarse.set_value(1.0); 1815 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1816 /// u_fine.set_value(1.0); 1817 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1818 /// v_coarse.set_value(0.0); 1819 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1820 /// v_fine.set_value(0.0); 1821 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1822 /// multiplicity.set_value(1.0); 1823 /// 1824 /// // Restrictions 1825 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1826 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1827 /// 1828 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1829 /// for i in 0..ne { 1830 /// indx[2 * i + 0] = i as i32; 1831 /// indx[2 * i + 1] = (i + 1) as i32; 1832 /// } 1833 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1834 /// 1835 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1836 /// for i in 0..ne { 1837 /// for j in 0..p_coarse { 1838 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1839 /// } 1840 /// } 1841 /// let ru_coarse = ceed.elem_restriction( 1842 /// ne, 1843 /// p_coarse, 1844 /// 1, 1845 /// 1, 1846 /// ndofs_coarse, 1847 /// MemType::Host, 1848 /// &indu_coarse, 1849 /// )?; 1850 /// 1851 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1852 /// for i in 0..ne { 1853 /// for j in 0..p_fine { 1854 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1855 /// } 1856 /// } 1857 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1858 /// 1859 /// // Bases 1860 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1861 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1862 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1863 /// 1864 /// // Build quadrature data 1865 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1866 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1867 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1868 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1869 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1870 /// .apply(&x, &mut qdata)?; 1871 /// 1872 /// // Mass operator 1873 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1874 /// let op_mass_fine = ceed 1875 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1876 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1877 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1878 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1879 /// 1880 /// // Multigrid setup 1881 /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine]; 1882 /// { 1883 /// let mut coarse = ceed.vector(p_coarse)?; 1884 /// let mut fine = ceed.vector(p_fine)?; 1885 /// let basis_c_to_f = 1886 /// ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?; 1887 /// for i in 0..p_coarse { 1888 /// coarse.set_value(0.0); 1889 /// { 1890 /// let mut array = coarse.view_mut()?; 1891 /// array[i] = 1.; 1892 /// } 1893 /// basis_c_to_f.apply( 1894 /// 1, 1895 /// TransposeMode::NoTranspose, 1896 /// EvalMode::Interp, 1897 /// &coarse, 1898 /// &mut fine, 1899 /// )?; 1900 /// let array = fine.view()?; 1901 /// for j in 0..p_fine { 1902 /// interp_c_to_f[j * p_coarse + i] = array[j]; 1903 /// } 1904 /// } 1905 /// } 1906 /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_H1( 1907 /// &multiplicity, 1908 /// &ru_coarse, 1909 /// &bu_coarse, 1910 /// &interp_c_to_f, 1911 /// )?; 1912 /// 1913 /// // Coarse problem 1914 /// u_coarse.set_value(1.0); 1915 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1916 /// 1917 /// // Check 1918 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1919 /// assert!( 1920 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1921 /// "Incorrect interval length computed" 1922 /// ); 1923 /// 1924 /// // Prolong 1925 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1926 /// 1927 /// // Fine problem 1928 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1929 /// 1930 /// // Check 1931 /// let sum: Scalar = v_fine.view()?.iter().sum(); 1932 /// assert!( 1933 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1934 /// "Incorrect interval length computed" 1935 /// ); 1936 /// 1937 /// // Restrict 1938 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1939 /// 1940 /// // Check 1941 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1942 /// assert!( 1943 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1944 /// "Incorrect interval length computed" 1945 /// ); 1946 /// # Ok(()) 1947 /// # } 1948 /// ``` 1949 pub fn create_multigrid_level_H1( 1950 &self, 1951 p_mult_fine: &Vector, 1952 rstr_coarse: &ElemRestriction, 1953 basis_coarse: &Basis, 1954 interpCtoF: &[Scalar], 1955 ) -> crate::Result<(Operator, Operator, Operator)> { 1956 let mut ptr_coarse = std::ptr::null_mut(); 1957 let mut ptr_prolong = std::ptr::null_mut(); 1958 let mut ptr_restrict = std::ptr::null_mut(); 1959 let ierr = unsafe { 1960 bind_ceed::CeedOperatorMultigridLevelCreateH1( 1961 self.op_core.ptr, 1962 p_mult_fine.ptr, 1963 rstr_coarse.ptr, 1964 basis_coarse.ptr, 1965 interpCtoF.as_ptr(), 1966 &mut ptr_coarse, 1967 &mut ptr_prolong, 1968 &mut ptr_restrict, 1969 ) 1970 }; 1971 self.op_core.check_error(ierr)?; 1972 let op_coarse = Operator::from_raw(ptr_coarse)?; 1973 let op_prolong = Operator::from_raw(ptr_prolong)?; 1974 let op_restrict = Operator::from_raw(ptr_restrict)?; 1975 Ok((op_coarse, op_prolong, op_restrict)) 1976 } 1977 } 1978 1979 // ----------------------------------------------------------------------------- 1980 // Composite Operator 1981 // ----------------------------------------------------------------------------- 1982 impl<'a> CompositeOperator<'a> { 1983 // Constructor 1984 pub fn create(ceed: &'a crate::Ceed) -> crate::Result<Self> { 1985 let mut ptr = std::ptr::null_mut(); 1986 let ierr = unsafe { bind_ceed::CeedCompositeOperatorCreate(ceed.ptr, &mut ptr) }; 1987 ceed.check_error(ierr)?; 1988 Ok(Self { 1989 op_core: OperatorCore { 1990 ptr, 1991 _lifeline: PhantomData, 1992 }, 1993 }) 1994 } 1995 1996 /// Apply Operator to a vector 1997 /// 1998 /// * `input` - Input Vector 1999 /// * `output` - Output Vector 2000 /// 2001 /// ``` 2002 /// # use libceed::prelude::*; 2003 /// # fn main() -> libceed::Result<()> { 2004 /// # let ceed = libceed::Ceed::default_init(); 2005 /// let ne = 4; 2006 /// let p = 3; 2007 /// let q = 4; 2008 /// let ndofs = p * ne - ne + 1; 2009 /// 2010 /// // Vectors 2011 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 2012 /// let mut qdata_mass = ceed.vector(ne * q)?; 2013 /// qdata_mass.set_value(0.0); 2014 /// let mut qdata_diff = ceed.vector(ne * q)?; 2015 /// qdata_diff.set_value(0.0); 2016 /// let mut u = ceed.vector(ndofs)?; 2017 /// u.set_value(1.0); 2018 /// let mut v = ceed.vector(ndofs)?; 2019 /// v.set_value(0.0); 2020 /// 2021 /// // Restrictions 2022 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 2023 /// for i in 0..ne { 2024 /// indx[2 * i + 0] = i as i32; 2025 /// indx[2 * i + 1] = (i + 1) as i32; 2026 /// } 2027 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 2028 /// let mut indu: Vec<i32> = vec![0; p * ne]; 2029 /// for i in 0..ne { 2030 /// indu[p * i + 0] = i as i32; 2031 /// indu[p * i + 1] = (i + 1) as i32; 2032 /// indu[p * i + 2] = (i + 2) as i32; 2033 /// } 2034 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 2035 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 2036 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 2037 /// 2038 /// // Bases 2039 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 2040 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 2041 /// 2042 /// // Build quadrature data 2043 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 2044 /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 2045 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2046 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2047 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2048 /// .apply(&x, &mut qdata_mass)?; 2049 /// 2050 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 2051 /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 2052 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2053 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2054 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2055 /// .apply(&x, &mut qdata_diff)?; 2056 /// 2057 /// // Application operator 2058 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2059 /// let op_mass = ceed 2060 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 2061 /// .field("u", &ru, &bu, VectorOpt::Active)? 2062 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 2063 /// .field("v", &ru, &bu, VectorOpt::Active)?; 2064 /// 2065 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2066 /// let op_diff = ceed 2067 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 2068 /// .field("du", &ru, &bu, VectorOpt::Active)? 2069 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 2070 /// .field("dv", &ru, &bu, VectorOpt::Active)?; 2071 /// 2072 /// let op_composite = ceed 2073 /// .composite_operator()? 2074 /// .sub_operator(&op_mass)? 2075 /// .sub_operator(&op_diff)?; 2076 /// 2077 /// v.set_value(0.0); 2078 /// op_composite.apply(&u, &mut v)?; 2079 /// 2080 /// // Check 2081 /// let sum: Scalar = v.view()?.iter().sum(); 2082 /// assert!( 2083 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 2084 /// "Incorrect interval length computed" 2085 /// ); 2086 /// # Ok(()) 2087 /// # } 2088 /// ``` 2089 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 2090 self.op_core.apply(input, output) 2091 } 2092 2093 /// Apply Operator to a vector and add result to output vector 2094 /// 2095 /// * `input` - Input Vector 2096 /// * `output` - Output Vector 2097 /// 2098 /// ``` 2099 /// # use libceed::prelude::*; 2100 /// # fn main() -> libceed::Result<()> { 2101 /// # let ceed = libceed::Ceed::default_init(); 2102 /// let ne = 4; 2103 /// let p = 3; 2104 /// let q = 4; 2105 /// let ndofs = p * ne - ne + 1; 2106 /// 2107 /// // Vectors 2108 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 2109 /// let mut qdata_mass = ceed.vector(ne * q)?; 2110 /// qdata_mass.set_value(0.0); 2111 /// let mut qdata_diff = ceed.vector(ne * q)?; 2112 /// qdata_diff.set_value(0.0); 2113 /// let mut u = ceed.vector(ndofs)?; 2114 /// u.set_value(1.0); 2115 /// let mut v = ceed.vector(ndofs)?; 2116 /// v.set_value(0.0); 2117 /// 2118 /// // Restrictions 2119 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 2120 /// for i in 0..ne { 2121 /// indx[2 * i + 0] = i as i32; 2122 /// indx[2 * i + 1] = (i + 1) as i32; 2123 /// } 2124 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 2125 /// let mut indu: Vec<i32> = vec![0; p * ne]; 2126 /// for i in 0..ne { 2127 /// indu[p * i + 0] = i as i32; 2128 /// indu[p * i + 1] = (i + 1) as i32; 2129 /// indu[p * i + 2] = (i + 2) as i32; 2130 /// } 2131 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 2132 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 2133 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 2134 /// 2135 /// // Bases 2136 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 2137 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 2138 /// 2139 /// // Build quadrature data 2140 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 2141 /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 2142 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2143 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2144 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2145 /// .apply(&x, &mut qdata_mass)?; 2146 /// 2147 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 2148 /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 2149 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2150 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2151 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2152 /// .apply(&x, &mut qdata_diff)?; 2153 /// 2154 /// // Application operator 2155 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2156 /// let op_mass = ceed 2157 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 2158 /// .field("u", &ru, &bu, VectorOpt::Active)? 2159 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 2160 /// .field("v", &ru, &bu, VectorOpt::Active)?; 2161 /// 2162 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2163 /// let op_diff = ceed 2164 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 2165 /// .field("du", &ru, &bu, VectorOpt::Active)? 2166 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 2167 /// .field("dv", &ru, &bu, VectorOpt::Active)?; 2168 /// 2169 /// let op_composite = ceed 2170 /// .composite_operator()? 2171 /// .sub_operator(&op_mass)? 2172 /// .sub_operator(&op_diff)?; 2173 /// 2174 /// v.set_value(1.0); 2175 /// op_composite.apply_add(&u, &mut v)?; 2176 /// 2177 /// // Check 2178 /// let sum: Scalar = v.view()?.iter().sum(); 2179 /// assert!( 2180 /// (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON, 2181 /// "Incorrect interval length computed" 2182 /// ); 2183 /// # Ok(()) 2184 /// # } 2185 /// ``` 2186 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 2187 self.op_core.apply_add(input, output) 2188 } 2189 2190 /// Add a sub-Operator to a Composite Operator 2191 /// 2192 /// * `subop` - Sub-Operator 2193 /// 2194 /// ``` 2195 /// # use libceed::prelude::*; 2196 /// # fn main() -> libceed::Result<()> { 2197 /// # let ceed = libceed::Ceed::default_init(); 2198 /// let mut op = ceed.composite_operator()?; 2199 /// 2200 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2201 /// let op_mass = ceed.operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?; 2202 /// op = op.sub_operator(&op_mass)?; 2203 /// 2204 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2205 /// let op_diff = ceed.operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?; 2206 /// op = op.sub_operator(&op_diff)?; 2207 /// # Ok(()) 2208 /// # } 2209 /// ``` 2210 #[allow(unused_mut)] 2211 pub fn sub_operator(mut self, subop: &Operator) -> crate::Result<Self> { 2212 let ierr = 2213 unsafe { bind_ceed::CeedCompositeOperatorAddSub(self.op_core.ptr, subop.op_core.ptr) }; 2214 self.op_core.check_error(ierr)?; 2215 Ok(self) 2216 } 2217 2218 /// Assemble the diagonal of a square linear Operator 2219 /// 2220 /// This overwrites a Vector with the diagonal of a linear Operator. 2221 /// 2222 /// Note: Currently only non-composite Operators with a single field and 2223 /// composite Operators with single field sub-operators are supported. 2224 /// 2225 /// * `op` - Operator to assemble QFunction 2226 /// * `assembled` - Vector to store assembled Operator diagonal 2227 pub fn linear_asssemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 2228 self.op_core.linear_assemble_add_diagonal(assembled) 2229 } 2230 2231 /// Assemble the point block diagonal of a square linear Operator 2232 /// 2233 /// This overwrites a Vector with the point block diagonal of a linear 2234 /// Operator. 2235 /// 2236 /// Note: Currently only non-composite Operators with a single field and 2237 /// composite Operators with single field sub-operators are supported. 2238 /// 2239 /// * `op` - Operator to assemble QFunction 2240 /// * `assembled` - Vector to store assembled Operator diagonal 2241 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 2242 self.op_core.linear_assemble_add_diagonal(assembled) 2243 } 2244 2245 /// Assemble the diagonal of a square linear Operator 2246 /// 2247 /// This overwrites a Vector with the diagonal of a linear Operator. 2248 /// 2249 /// Note: Currently only non-composite Operators with a single field and 2250 /// composite Operators with single field sub-operators are supported. 2251 /// 2252 /// * `op` - Operator to assemble QFunction 2253 /// * `assembled` - Vector to store assembled CeedOperator point block 2254 /// diagonal, provided in row-major form with an 2255 /// `ncomp * ncomp` block at each node. The dimensions of 2256 /// this vector are derived from the active vector for 2257 /// the CeedOperator. The array has shape 2258 /// `[nodes, component out, component in]`. 2259 pub fn linear_assemble_point_block_diagonal( 2260 &self, 2261 assembled: &mut Vector, 2262 ) -> crate::Result<i32> { 2263 self.op_core.linear_assemble_add_diagonal(assembled) 2264 } 2265 2266 /// Assemble the diagonal of a square linear Operator 2267 /// 2268 /// This sums into a Vector with the diagonal of a linear Operator. 2269 /// 2270 /// Note: Currently only non-composite Operators with a single field and 2271 /// composite Operators with single field sub-operators are supported. 2272 /// 2273 /// * `op` - Operator to assemble QFunction 2274 /// * `assembled` - Vector to store assembled CeedOperator point block 2275 /// diagonal, provided in row-major form with an 2276 /// `ncomp * ncomp` block at each node. The dimensions of 2277 /// this vector are derived from the active vector for 2278 /// the CeedOperator. The array has shape 2279 /// `[nodes, component out, component in]`. 2280 pub fn linear_assemble_add_point_block_diagonal( 2281 &self, 2282 assembled: &mut Vector, 2283 ) -> crate::Result<i32> { 2284 self.op_core.linear_assemble_add_diagonal(assembled) 2285 } 2286 } 2287 2288 // ----------------------------------------------------------------------------- 2289