1 // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at 2 // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights 3 // reserved. See files LICENSE and NOTICE for details. 4 // 5 // This file is part of CEED, a collection of benchmarks, miniapps, software 6 // libraries and APIs for efficient high-order finite element and spectral 7 // element discretizations for exascale applications. For more information and 8 // source code availability see http://github.com/ceed. 9 // 10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, 11 // a collaborative effort of two U.S. Department of Energy organizations (Office 12 // of Science and the National Nuclear Security Administration) responsible for 13 // the planning and preparation of a capable exascale ecosystem, including 14 // software, applications, hardware, advanced system engineering and early 15 // testbed platforms, in support of the nation's exascale computing imperative 16 17 //! A Ceed Operator defines the finite/spectral element operator associated to a 18 //! Ceed QFunction. A Ceed Operator connects Ceed ElemRestrictions, 19 //! Ceed Bases, and Ceed QFunctions. 20 21 use crate::prelude::*; 22 23 // ----------------------------------------------------------------------------- 24 // CeedOperator Field context wrapper 25 // ----------------------------------------------------------------------------- 26 #[derive(Debug)] 27 pub struct OperatorField<'a> { 28 pub(crate) ptr: bind_ceed::CeedOperatorField, 29 _lifeline: PhantomData<&'a ()>, 30 } 31 32 // ----------------------------------------------------------------------------- 33 // Implementations 34 // ----------------------------------------------------------------------------- 35 impl<'a> OperatorField<'a> { 36 /// Get the name of an OperatorField 37 /// 38 /// ``` 39 /// # use libceed::prelude::*; 40 /// # fn main() -> libceed::Result<()> { 41 /// # let ceed = libceed::Ceed::default_init(); 42 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 43 /// 44 /// // Operator field arguments 45 /// let ne = 3; 46 /// let q = 4 as usize; 47 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 48 /// for i in 0..ne { 49 /// ind[2 * i + 0] = i as i32; 50 /// ind[2 * i + 1] = (i + 1) as i32; 51 /// } 52 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 53 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 54 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 55 /// 56 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 57 /// 58 /// // Operator fields 59 /// let op = ceed 60 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 61 /// .field("dx", &r, &b, VectorOpt::Active)? 62 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 63 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 64 /// 65 /// let inputs = op.inputs()?; 66 /// 67 /// assert_eq!(inputs[0].name(), "dx", "Incorrect input name"); 68 /// assert_eq!(inputs[1].name(), "weights", "Incorrect input name"); 69 /// # Ok(()) 70 /// # } 71 /// ``` 72 pub fn name(&self) -> &str { 73 let mut name_ptr: *mut std::os::raw::c_char = std::ptr::null_mut(); 74 unsafe { 75 bind_ceed::CeedOperatorFieldGetName(self.ptr, &mut name_ptr); 76 } 77 unsafe { CStr::from_ptr(name_ptr) }.to_str().unwrap() 78 } 79 80 /// Get the ElemRestriction of an OperatorField 81 /// 82 /// ``` 83 /// # use libceed::prelude::*; 84 /// # fn main() -> libceed::Result<()> { 85 /// # let ceed = libceed::Ceed::default_init(); 86 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 87 /// 88 /// // Operator field arguments 89 /// let ne = 3; 90 /// let q = 4 as usize; 91 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 92 /// for i in 0..ne { 93 /// ind[2 * i + 0] = i as i32; 94 /// ind[2 * i + 1] = (i + 1) as i32; 95 /// } 96 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 97 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 98 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 99 /// 100 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 101 /// 102 /// // Operator fields 103 /// let op = ceed 104 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 105 /// .field("dx", &r, &b, VectorOpt::Active)? 106 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 107 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 108 /// 109 /// let inputs = op.inputs()?; 110 /// 111 /// assert!( 112 /// inputs[0].elem_restriction().is_some(), 113 /// "Incorrect field ElemRestriction" 114 /// ); 115 /// if let ElemRestrictionOpt::Some(r) = inputs[0].elem_restriction() { 116 /// assert_eq!( 117 /// r.num_elements(), 118 /// ne, 119 /// "Incorrect field ElemRestriction number of elements" 120 /// ); 121 /// } 122 /// 123 /// assert!( 124 /// inputs[1].elem_restriction().is_none(), 125 /// "Incorrect field ElemRestriction" 126 /// ); 127 /// # Ok(()) 128 /// # } 129 /// ``` 130 pub fn elem_restriction(&self) -> ElemRestrictionOpt { 131 let mut ptr = std::ptr::null_mut(); 132 unsafe { 133 bind_ceed::CeedOperatorFieldGetElemRestriction(self.ptr, &mut ptr); 134 } 135 if ptr == unsafe { bind_ceed::CEED_ELEMRESTRICTION_NONE } { 136 ElemRestrictionOpt::None 137 } else { 138 let slice = unsafe { 139 std::slice::from_raw_parts( 140 &ptr as *const bind_ceed::CeedElemRestriction as *const crate::ElemRestriction, 141 1 as usize, 142 ) 143 }; 144 ElemRestrictionOpt::Some(&slice[0]) 145 } 146 } 147 148 /// Get the Basis of an OperatorField 149 /// 150 /// ``` 151 /// # use libceed::prelude::*; 152 /// # fn main() -> libceed::Result<()> { 153 /// # let ceed = libceed::Ceed::default_init(); 154 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 155 /// 156 /// // Operator field arguments 157 /// let ne = 3; 158 /// let q = 4 as usize; 159 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 160 /// for i in 0..ne { 161 /// ind[2 * i + 0] = i as i32; 162 /// ind[2 * i + 1] = (i + 1) as i32; 163 /// } 164 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 165 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 166 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 167 /// 168 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 169 /// 170 /// // Operator fields 171 /// let op = ceed 172 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 173 /// .field("dx", &r, &b, VectorOpt::Active)? 174 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 175 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 176 /// 177 /// let inputs = op.inputs()?; 178 /// 179 /// assert!(inputs[0].basis().is_some(), "Incorrect field Basis"); 180 /// if let BasisOpt::Some(b) = inputs[0].basis() { 181 /// assert_eq!( 182 /// b.num_quadrature_points(), 183 /// q, 184 /// "Incorrect field Basis number of quadrature points" 185 /// ); 186 /// } 187 /// assert!(inputs[1].basis().is_some(), "Incorrect field Basis"); 188 /// if let BasisOpt::Some(b) = inputs[0].basis() { 189 /// assert_eq!( 190 /// b.num_quadrature_points(), 191 /// q, 192 /// "Incorrect field Basis number of quadrature points" 193 /// ); 194 /// } 195 /// 196 /// let outputs = op.outputs()?; 197 /// 198 /// assert!(outputs[0].basis().is_collocated(), "Incorrect field Basis"); 199 /// # Ok(()) 200 /// # } 201 /// ``` 202 pub fn basis(&self) -> BasisOpt { 203 let mut ptr = std::ptr::null_mut(); 204 unsafe { 205 bind_ceed::CeedOperatorFieldGetBasis(self.ptr, &mut ptr); 206 } 207 if ptr == unsafe { bind_ceed::CEED_BASIS_COLLOCATED } { 208 BasisOpt::Collocated 209 } else { 210 let slice = unsafe { 211 std::slice::from_raw_parts( 212 &ptr as *const bind_ceed::CeedBasis as *const crate::Basis, 213 1 as usize, 214 ) 215 }; 216 BasisOpt::Some(&slice[0]) 217 } 218 } 219 220 /// Get the Vector of an OperatorField 221 /// 222 /// ``` 223 /// # use libceed::prelude::*; 224 /// # fn main() -> libceed::Result<()> { 225 /// # let ceed = libceed::Ceed::default_init(); 226 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 227 /// 228 /// // Operator field arguments 229 /// let ne = 3; 230 /// let q = 4 as usize; 231 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 232 /// for i in 0..ne { 233 /// ind[2 * i + 0] = i as i32; 234 /// ind[2 * i + 1] = (i + 1) as i32; 235 /// } 236 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 237 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 238 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 239 /// 240 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 241 /// 242 /// // Operator fields 243 /// let op = ceed 244 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 245 /// .field("dx", &r, &b, VectorOpt::Active)? 246 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 247 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 248 /// 249 /// let inputs = op.inputs()?; 250 /// 251 /// assert!(inputs[0].vector().is_active(), "Incorrect field Vector"); 252 /// assert!(inputs[1].vector().is_none(), "Incorrect field Vector"); 253 /// # Ok(()) 254 /// # } 255 /// ``` 256 pub fn vector(&self) -> VectorOpt { 257 let mut ptr = std::ptr::null_mut(); 258 unsafe { 259 bind_ceed::CeedOperatorFieldGetVector(self.ptr, &mut ptr); 260 } 261 if ptr == unsafe { bind_ceed::CEED_VECTOR_ACTIVE } { 262 VectorOpt::Active 263 } else if ptr == unsafe { bind_ceed::CEED_VECTOR_NONE } { 264 VectorOpt::None 265 } else { 266 let slice = unsafe { 267 std::slice::from_raw_parts( 268 &ptr as *const bind_ceed::CeedVector as *const crate::Vector, 269 1 as usize, 270 ) 271 }; 272 VectorOpt::Some(&slice[0]) 273 } 274 } 275 } 276 277 // ----------------------------------------------------------------------------- 278 // CeedOperator context wrapper 279 // ----------------------------------------------------------------------------- 280 #[derive(Debug)] 281 pub(crate) struct OperatorCore<'a> { 282 ptr: bind_ceed::CeedOperator, 283 _lifeline: PhantomData<&'a ()>, 284 } 285 286 #[derive(Debug)] 287 pub struct Operator<'a> { 288 op_core: OperatorCore<'a>, 289 } 290 291 #[derive(Debug)] 292 pub struct CompositeOperator<'a> { 293 op_core: OperatorCore<'a>, 294 } 295 296 // ----------------------------------------------------------------------------- 297 // Destructor 298 // ----------------------------------------------------------------------------- 299 impl<'a> Drop for OperatorCore<'a> { 300 fn drop(&mut self) { 301 unsafe { 302 bind_ceed::CeedOperatorDestroy(&mut self.ptr); 303 } 304 } 305 } 306 307 // ----------------------------------------------------------------------------- 308 // Display 309 // ----------------------------------------------------------------------------- 310 impl<'a> fmt::Display for OperatorCore<'a> { 311 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 312 let mut ptr = std::ptr::null_mut(); 313 let mut sizeloc = crate::MAX_BUFFER_LENGTH; 314 let cstring = unsafe { 315 let file = bind_ceed::open_memstream(&mut ptr, &mut sizeloc); 316 bind_ceed::CeedOperatorView(self.ptr, file); 317 bind_ceed::fclose(file); 318 CString::from_raw(ptr) 319 }; 320 cstring.to_string_lossy().fmt(f) 321 } 322 } 323 324 /// View an Operator 325 /// 326 /// ``` 327 /// # use libceed::prelude::*; 328 /// # fn main() -> libceed::Result<()> { 329 /// # let ceed = libceed::Ceed::default_init(); 330 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 331 /// 332 /// // Operator field arguments 333 /// let ne = 3; 334 /// let q = 4 as usize; 335 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 336 /// for i in 0..ne { 337 /// ind[2 * i + 0] = i as i32; 338 /// ind[2 * i + 1] = (i + 1) as i32; 339 /// } 340 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 341 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 342 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 343 /// 344 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 345 /// 346 /// // Operator fields 347 /// let op = ceed 348 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 349 /// .field("dx", &r, &b, VectorOpt::Active)? 350 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 351 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 352 /// 353 /// println!("{}", op); 354 /// # Ok(()) 355 /// # } 356 /// ``` 357 impl<'a> fmt::Display for Operator<'a> { 358 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 359 self.op_core.fmt(f) 360 } 361 } 362 363 /// View a composite Operator 364 /// 365 /// ``` 366 /// # use libceed::prelude::*; 367 /// # fn main() -> libceed::Result<()> { 368 /// # let ceed = libceed::Ceed::default_init(); 369 /// 370 /// // Sub operator field arguments 371 /// let ne = 3; 372 /// let q = 4 as usize; 373 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 374 /// for i in 0..ne { 375 /// ind[2 * i + 0] = i as i32; 376 /// ind[2 * i + 1] = (i + 1) as i32; 377 /// } 378 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 379 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 380 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 381 /// 382 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 383 /// 384 /// let qdata_mass = ceed.vector(q * ne)?; 385 /// let qdata_diff = ceed.vector(q * ne)?; 386 /// 387 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 388 /// let op_mass = ceed 389 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 390 /// .field("u", &r, &b, VectorOpt::Active)? 391 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 392 /// .field("v", &r, &b, VectorOpt::Active)?; 393 /// 394 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 395 /// let op_diff = ceed 396 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 397 /// .field("du", &r, &b, VectorOpt::Active)? 398 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 399 /// .field("dv", &r, &b, VectorOpt::Active)?; 400 /// 401 /// let op = ceed 402 /// .composite_operator()? 403 /// .sub_operator(&op_mass)? 404 /// .sub_operator(&op_diff)?; 405 /// 406 /// println!("{}", op); 407 /// # Ok(()) 408 /// # } 409 /// ``` 410 impl<'a> fmt::Display for CompositeOperator<'a> { 411 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 412 self.op_core.fmt(f) 413 } 414 } 415 416 // ----------------------------------------------------------------------------- 417 // Core functionality 418 // ----------------------------------------------------------------------------- 419 impl<'a> OperatorCore<'a> { 420 // Error handling 421 #[doc(hidden)] 422 fn check_error(&self, ierr: i32) -> crate::Result<i32> { 423 let mut ptr = std::ptr::null_mut(); 424 unsafe { 425 bind_ceed::CeedOperatorGetCeed(self.ptr, &mut ptr); 426 } 427 crate::check_error(ptr, ierr) 428 } 429 430 // Common implementations 431 pub fn check(&self) -> crate::Result<i32> { 432 let ierr = unsafe { bind_ceed::CeedOperatorCheckReady(self.ptr) }; 433 self.check_error(ierr) 434 } 435 436 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 437 let ierr = unsafe { 438 bind_ceed::CeedOperatorApply( 439 self.ptr, 440 input.ptr, 441 output.ptr, 442 bind_ceed::CEED_REQUEST_IMMEDIATE, 443 ) 444 }; 445 self.check_error(ierr) 446 } 447 448 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 449 let ierr = unsafe { 450 bind_ceed::CeedOperatorApplyAdd( 451 self.ptr, 452 input.ptr, 453 output.ptr, 454 bind_ceed::CEED_REQUEST_IMMEDIATE, 455 ) 456 }; 457 self.check_error(ierr) 458 } 459 460 pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 461 let ierr = unsafe { 462 bind_ceed::CeedOperatorLinearAssembleDiagonal( 463 self.ptr, 464 assembled.ptr, 465 bind_ceed::CEED_REQUEST_IMMEDIATE, 466 ) 467 }; 468 self.check_error(ierr) 469 } 470 471 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 472 let ierr = unsafe { 473 bind_ceed::CeedOperatorLinearAssembleAddDiagonal( 474 self.ptr, 475 assembled.ptr, 476 bind_ceed::CEED_REQUEST_IMMEDIATE, 477 ) 478 }; 479 self.check_error(ierr) 480 } 481 482 pub fn linear_assemble_point_block_diagonal( 483 &self, 484 assembled: &mut Vector, 485 ) -> crate::Result<i32> { 486 let ierr = unsafe { 487 bind_ceed::CeedOperatorLinearAssemblePointBlockDiagonal( 488 self.ptr, 489 assembled.ptr, 490 bind_ceed::CEED_REQUEST_IMMEDIATE, 491 ) 492 }; 493 self.check_error(ierr) 494 } 495 496 pub fn linear_assemble_add_point_block_diagonal( 497 &self, 498 assembled: &mut Vector, 499 ) -> crate::Result<i32> { 500 let ierr = unsafe { 501 bind_ceed::CeedOperatorLinearAssembleAddPointBlockDiagonal( 502 self.ptr, 503 assembled.ptr, 504 bind_ceed::CEED_REQUEST_IMMEDIATE, 505 ) 506 }; 507 self.check_error(ierr) 508 } 509 } 510 511 // ----------------------------------------------------------------------------- 512 // Operator 513 // ----------------------------------------------------------------------------- 514 impl<'a> Operator<'a> { 515 // Constructor 516 pub fn create<'b>( 517 ceed: &'a crate::Ceed, 518 qf: impl Into<QFunctionOpt<'b>>, 519 dqf: impl Into<QFunctionOpt<'b>>, 520 dqfT: impl Into<QFunctionOpt<'b>>, 521 ) -> crate::Result<Self> { 522 let mut ptr = std::ptr::null_mut(); 523 let ierr = unsafe { 524 bind_ceed::CeedOperatorCreate( 525 ceed.ptr, 526 qf.into().to_raw(), 527 dqf.into().to_raw(), 528 dqfT.into().to_raw(), 529 &mut ptr, 530 ) 531 }; 532 ceed.check_error(ierr)?; 533 Ok(Self { 534 op_core: OperatorCore { 535 ptr, 536 _lifeline: PhantomData, 537 }, 538 }) 539 } 540 541 fn from_raw(ptr: bind_ceed::CeedOperator) -> crate::Result<Self> { 542 Ok(Self { 543 op_core: OperatorCore { 544 ptr, 545 _lifeline: PhantomData, 546 }, 547 }) 548 } 549 550 /// Apply Operator to a vector 551 /// 552 /// * `input` - Input Vector 553 /// * `output` - Output Vector 554 /// 555 /// ``` 556 /// # use libceed::prelude::*; 557 /// # fn main() -> libceed::Result<()> { 558 /// # let ceed = libceed::Ceed::default_init(); 559 /// let ne = 4; 560 /// let p = 3; 561 /// let q = 4; 562 /// let ndofs = p * ne - ne + 1; 563 /// 564 /// // Vectors 565 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 566 /// let mut qdata = ceed.vector(ne * q)?; 567 /// qdata.set_value(0.0); 568 /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?; 569 /// let mut v = ceed.vector(ndofs)?; 570 /// v.set_value(0.0); 571 /// 572 /// // Restrictions 573 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 574 /// for i in 0..ne { 575 /// indx[2 * i + 0] = i as i32; 576 /// indx[2 * i + 1] = (i + 1) as i32; 577 /// } 578 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 579 /// let mut indu: Vec<i32> = vec![0; p * ne]; 580 /// for i in 0..ne { 581 /// indu[p * i + 0] = i as i32; 582 /// indu[p * i + 1] = (i + 1) as i32; 583 /// indu[p * i + 2] = (i + 2) as i32; 584 /// } 585 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 586 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 587 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 588 /// 589 /// // Bases 590 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 591 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 592 /// 593 /// // Build quadrature data 594 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 595 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 596 /// .field("dx", &rx, &bx, VectorOpt::Active)? 597 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 598 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 599 /// .apply(&x, &mut qdata)?; 600 /// 601 /// // Mass operator 602 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 603 /// let op_mass = ceed 604 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 605 /// .field("u", &ru, &bu, VectorOpt::Active)? 606 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 607 /// .field("v", &ru, &bu, VectorOpt::Active)?; 608 /// 609 /// v.set_value(0.0); 610 /// op_mass.apply(&u, &mut v)?; 611 /// 612 /// // Check 613 /// let sum: Scalar = v.view()?.iter().sum(); 614 /// assert!( 615 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 616 /// "Incorrect interval length computed" 617 /// ); 618 /// # Ok(()) 619 /// # } 620 /// ``` 621 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 622 self.op_core.apply(input, output) 623 } 624 625 /// Apply Operator to a vector and add result to output vector 626 /// 627 /// * `input` - Input Vector 628 /// * `output` - Output Vector 629 /// 630 /// ``` 631 /// # use libceed::prelude::*; 632 /// # fn main() -> libceed::Result<()> { 633 /// # let ceed = libceed::Ceed::default_init(); 634 /// let ne = 4; 635 /// let p = 3; 636 /// let q = 4; 637 /// let ndofs = p * ne - ne + 1; 638 /// 639 /// // Vectors 640 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 641 /// let mut qdata = ceed.vector(ne * q)?; 642 /// qdata.set_value(0.0); 643 /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?; 644 /// let mut v = ceed.vector(ndofs)?; 645 /// 646 /// // Restrictions 647 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 648 /// for i in 0..ne { 649 /// indx[2 * i + 0] = i as i32; 650 /// indx[2 * i + 1] = (i + 1) as i32; 651 /// } 652 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 653 /// let mut indu: Vec<i32> = vec![0; p * ne]; 654 /// for i in 0..ne { 655 /// indu[p * i + 0] = i as i32; 656 /// indu[p * i + 1] = (i + 1) as i32; 657 /// indu[p * i + 2] = (i + 2) as i32; 658 /// } 659 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 660 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 661 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 662 /// 663 /// // Bases 664 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 665 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 666 /// 667 /// // Build quadrature data 668 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 669 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 670 /// .field("dx", &rx, &bx, VectorOpt::Active)? 671 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 672 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 673 /// .apply(&x, &mut qdata)?; 674 /// 675 /// // Mass operator 676 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 677 /// let op_mass = ceed 678 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 679 /// .field("u", &ru, &bu, VectorOpt::Active)? 680 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 681 /// .field("v", &ru, &bu, VectorOpt::Active)?; 682 /// 683 /// v.set_value(1.0); 684 /// op_mass.apply_add(&u, &mut v)?; 685 /// 686 /// // Check 687 /// let sum: Scalar = v.view()?.iter().sum(); 688 /// assert!( 689 /// (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON, 690 /// "Incorrect interval length computed and added" 691 /// ); 692 /// # Ok(()) 693 /// # } 694 /// ``` 695 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 696 self.op_core.apply_add(input, output) 697 } 698 699 /// Provide a field to a Operator for use by its QFunction 700 /// 701 /// * `fieldname` - Name of the field (to be matched with the name used by 702 /// the QFunction) 703 /// * `r` - ElemRestriction 704 /// * `b` - Basis in which the field resides or `BasisCollocated` if 705 /// collocated with quadrature points 706 /// * `v` - Vector to be used by Operator or `VectorActive` if field 707 /// is active or `VectorNone` if using `Weight` with the 708 /// QFunction 709 /// 710 /// 711 /// ``` 712 /// # use libceed::prelude::*; 713 /// # fn main() -> libceed::Result<()> { 714 /// # let ceed = libceed::Ceed::default_init(); 715 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 716 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 717 /// 718 /// // Operator field arguments 719 /// let ne = 3; 720 /// let q = 4; 721 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 722 /// for i in 0..ne { 723 /// ind[2 * i + 0] = i as i32; 724 /// ind[2 * i + 1] = (i + 1) as i32; 725 /// } 726 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 727 /// 728 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 729 /// 730 /// // Operator field 731 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 732 /// # Ok(()) 733 /// # } 734 /// ``` 735 #[allow(unused_mut)] 736 pub fn field<'b>( 737 mut self, 738 fieldname: &str, 739 r: impl Into<ElemRestrictionOpt<'b>>, 740 b: impl Into<BasisOpt<'b>>, 741 v: impl Into<VectorOpt<'b>>, 742 ) -> crate::Result<Self> { 743 let fieldname = CString::new(fieldname).expect("CString::new failed"); 744 let fieldname = fieldname.as_ptr() as *const i8; 745 let ierr = unsafe { 746 bind_ceed::CeedOperatorSetField( 747 self.op_core.ptr, 748 fieldname, 749 r.into().to_raw(), 750 b.into().to_raw(), 751 v.into().to_raw(), 752 ) 753 }; 754 self.op_core.check_error(ierr)?; 755 Ok(self) 756 } 757 758 /// Get a slice of Operator inputs 759 /// 760 /// ``` 761 /// # use libceed::prelude::*; 762 /// # fn main() -> libceed::Result<()> { 763 /// # let ceed = libceed::Ceed::default_init(); 764 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 765 /// 766 /// // Operator field arguments 767 /// let ne = 3; 768 /// let q = 4 as usize; 769 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 770 /// for i in 0..ne { 771 /// ind[2 * i + 0] = i as i32; 772 /// ind[2 * i + 1] = (i + 1) as i32; 773 /// } 774 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 775 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 776 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 777 /// 778 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 779 /// 780 /// // Operator fields 781 /// let op = ceed 782 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 783 /// .field("dx", &r, &b, VectorOpt::Active)? 784 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 785 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 786 /// 787 /// let inputs = op.inputs()?; 788 /// 789 /// assert_eq!(inputs.len(), 2, "Incorrect inputs array"); 790 /// # Ok(()) 791 /// # } 792 /// ``` 793 pub fn inputs(&self) -> crate::Result<&[crate::OperatorField]> { 794 // Get array of raw C pointers for inputs 795 let mut num_inputs = 0; 796 let mut inputs_ptr = std::ptr::null_mut(); 797 let ierr = unsafe { 798 bind_ceed::CeedOperatorGetFields( 799 self.op_core.ptr, 800 &mut num_inputs, 801 &mut inputs_ptr, 802 std::ptr::null_mut() as *mut bind_ceed::CeedInt, 803 std::ptr::null_mut() as *mut *mut bind_ceed::CeedOperatorField, 804 ) 805 }; 806 self.op_core.check_error(ierr)?; 807 // Convert raw C pointers to fixed length slice 808 let inputs_slice = unsafe { 809 std::slice::from_raw_parts( 810 inputs_ptr as *const crate::OperatorField, 811 num_inputs as usize, 812 ) 813 }; 814 Ok(inputs_slice) 815 } 816 817 /// Get a slice of Operator outputs 818 /// 819 /// ``` 820 /// # use libceed::prelude::*; 821 /// # fn main() -> libceed::Result<()> { 822 /// # let ceed = libceed::Ceed::default_init(); 823 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 824 /// 825 /// // Operator field arguments 826 /// let ne = 3; 827 /// let q = 4 as usize; 828 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 829 /// for i in 0..ne { 830 /// ind[2 * i + 0] = i as i32; 831 /// ind[2 * i + 1] = (i + 1) as i32; 832 /// } 833 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 834 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 835 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 836 /// 837 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 838 /// 839 /// // Operator fields 840 /// let op = ceed 841 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 842 /// .field("dx", &r, &b, VectorOpt::Active)? 843 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 844 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 845 /// 846 /// let outputs = op.outputs()?; 847 /// 848 /// assert_eq!(outputs.len(), 1, "Incorrect outputs array"); 849 /// # Ok(()) 850 /// # } 851 /// ``` 852 pub fn outputs(&self) -> crate::Result<&[crate::OperatorField]> { 853 // Get array of raw C pointers for outputs 854 let mut num_outputs = 0; 855 let mut outputs_ptr = std::ptr::null_mut(); 856 let ierr = unsafe { 857 bind_ceed::CeedOperatorGetFields( 858 self.op_core.ptr, 859 std::ptr::null_mut() as *mut bind_ceed::CeedInt, 860 std::ptr::null_mut() as *mut *mut bind_ceed::CeedOperatorField, 861 &mut num_outputs, 862 &mut outputs_ptr, 863 ) 864 }; 865 self.op_core.check_error(ierr)?; 866 // Convert raw C pointers to fixed length slice 867 let outputs_slice = unsafe { 868 std::slice::from_raw_parts( 869 outputs_ptr as *const crate::OperatorField, 870 num_outputs as usize, 871 ) 872 }; 873 Ok(outputs_slice) 874 } 875 876 /// Check if Operator is setup correctly 877 /// 878 /// ``` 879 /// # use libceed::prelude::*; 880 /// # fn main() -> libceed::Result<()> { 881 /// # let ceed = libceed::Ceed::default_init(); 882 /// let ne = 4; 883 /// let p = 3; 884 /// let q = 4; 885 /// let ndofs = p * ne - ne + 1; 886 /// 887 /// // Restrictions 888 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 889 /// for i in 0..ne { 890 /// indx[2 * i + 0] = i as i32; 891 /// indx[2 * i + 1] = (i + 1) as i32; 892 /// } 893 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 894 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 895 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 896 /// 897 /// // Bases 898 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 899 /// 900 /// // Build quadrature data 901 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 902 /// let op_build = ceed 903 /// .operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 904 /// .field("dx", &rx, &bx, VectorOpt::Active)? 905 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 906 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 907 /// 908 /// // Check 909 /// assert!(op_build.check().is_ok(), "Operator setup incorrectly"); 910 /// # Ok(()) 911 /// # } 912 /// ``` 913 pub fn check(self) -> crate::Result<Self> { 914 self.op_core.check()?; 915 Ok(self) 916 } 917 918 /// Get the number of elements associated with an Operator 919 /// 920 /// 921 /// ``` 922 /// # use libceed::prelude::*; 923 /// # fn main() -> libceed::Result<()> { 924 /// # let ceed = libceed::Ceed::default_init(); 925 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 926 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 927 /// 928 /// // Operator field arguments 929 /// let ne = 3; 930 /// let q = 4; 931 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 932 /// for i in 0..ne { 933 /// ind[2 * i + 0] = i as i32; 934 /// ind[2 * i + 1] = (i + 1) as i32; 935 /// } 936 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 937 /// 938 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 939 /// 940 /// // Operator field 941 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 942 /// 943 /// // Check number of elements 944 /// assert_eq!(op.num_elements(), ne, "Incorrect number of elements"); 945 /// # Ok(()) 946 /// # } 947 /// ``` 948 pub fn num_elements(&self) -> usize { 949 let mut nelem = 0; 950 unsafe { bind_ceed::CeedOperatorGetNumElements(self.op_core.ptr, &mut nelem) }; 951 usize::try_from(nelem).unwrap() 952 } 953 954 /// Get the number of quadrature points associated with each element of 955 /// an Operator 956 /// 957 /// 958 /// ``` 959 /// # use libceed::prelude::*; 960 /// # fn main() -> libceed::Result<()> { 961 /// # let ceed = libceed::Ceed::default_init(); 962 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 963 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 964 /// 965 /// // Operator field arguments 966 /// let ne = 3; 967 /// let q = 4; 968 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 969 /// for i in 0..ne { 970 /// ind[2 * i + 0] = i as i32; 971 /// ind[2 * i + 1] = (i + 1) as i32; 972 /// } 973 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 974 /// 975 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 976 /// 977 /// // Operator field 978 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 979 /// 980 /// // Check number of quadrature points 981 /// assert_eq!( 982 /// op.num_quadrature_points(), 983 /// q, 984 /// "Incorrect number of quadrature points" 985 /// ); 986 /// # Ok(()) 987 /// # } 988 /// ``` 989 pub fn num_quadrature_points(&self) -> usize { 990 let mut Q = 0; 991 unsafe { bind_ceed::CeedOperatorGetNumQuadraturePoints(self.op_core.ptr, &mut Q) }; 992 usize::try_from(Q).unwrap() 993 } 994 995 /// Assemble the diagonal of a square linear Operator 996 /// 997 /// This overwrites a Vector with the diagonal of a linear Operator. 998 /// 999 /// Note: Currently only non-composite Operators with a single field and 1000 /// composite Operators with single field sub-operators are supported. 1001 /// 1002 /// * `op` - Operator to assemble QFunction 1003 /// * `assembled` - Vector to store assembled Operator diagonal 1004 /// 1005 /// ``` 1006 /// # use libceed::prelude::*; 1007 /// # fn main() -> libceed::Result<()> { 1008 /// # let ceed = libceed::Ceed::default_init(); 1009 /// let ne = 4; 1010 /// let p = 3; 1011 /// let q = 4; 1012 /// let ndofs = p * ne - ne + 1; 1013 /// 1014 /// // Vectors 1015 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1016 /// let mut qdata = ceed.vector(ne * q)?; 1017 /// qdata.set_value(0.0); 1018 /// let mut u = ceed.vector(ndofs)?; 1019 /// u.set_value(1.0); 1020 /// let mut v = ceed.vector(ndofs)?; 1021 /// v.set_value(0.0); 1022 /// 1023 /// // Restrictions 1024 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1025 /// for i in 0..ne { 1026 /// indx[2 * i + 0] = i as i32; 1027 /// indx[2 * i + 1] = (i + 1) as i32; 1028 /// } 1029 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1030 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1031 /// for i in 0..ne { 1032 /// indu[p * i + 0] = (2 * i) as i32; 1033 /// indu[p * i + 1] = (2 * i + 1) as i32; 1034 /// indu[p * i + 2] = (2 * i + 2) as i32; 1035 /// } 1036 /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?; 1037 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1038 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1039 /// 1040 /// // Bases 1041 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1042 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 1043 /// 1044 /// // Build quadrature data 1045 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1046 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1047 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1048 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1049 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1050 /// .apply(&x, &mut qdata)?; 1051 /// 1052 /// // Mass operator 1053 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1054 /// let op_mass = ceed 1055 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1056 /// .field("u", &ru, &bu, VectorOpt::Active)? 1057 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1058 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1059 /// 1060 /// // Diagonal 1061 /// let mut diag = ceed.vector(ndofs)?; 1062 /// diag.set_value(0.0); 1063 /// op_mass.linear_assemble_diagonal(&mut diag)?; 1064 /// 1065 /// // Manual diagonal computation 1066 /// let mut true_diag = ceed.vector(ndofs)?; 1067 /// for i in 0..ndofs { 1068 /// u.set_value(0.0); 1069 /// { 1070 /// let mut u_array = u.view_mut()?; 1071 /// u_array[i] = 1.; 1072 /// } 1073 /// 1074 /// op_mass.apply(&u, &mut v)?; 1075 /// 1076 /// { 1077 /// let v_array = v.view_mut()?; 1078 /// let mut true_array = true_diag.view_mut()?; 1079 /// true_array[i] = v_array[i]; 1080 /// } 1081 /// } 1082 /// 1083 /// // Check 1084 /// diag.view()? 1085 /// .iter() 1086 /// .zip(true_diag.view()?.iter()) 1087 /// .for_each(|(computed, actual)| { 1088 /// assert!( 1089 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 1090 /// "Diagonal entry incorrect" 1091 /// ); 1092 /// }); 1093 /// # Ok(()) 1094 /// # } 1095 /// ``` 1096 pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 1097 self.op_core.linear_assemble_diagonal(assembled) 1098 } 1099 1100 /// Assemble the diagonal of a square linear Operator 1101 /// 1102 /// This sums into a Vector with the diagonal of a linear Operator. 1103 /// 1104 /// Note: Currently only non-composite Operators with a single field and 1105 /// composite Operators with single field sub-operators are supported. 1106 /// 1107 /// * `op` - Operator to assemble QFunction 1108 /// * `assembled` - Vector to store assembled Operator diagonal 1109 /// 1110 /// 1111 /// ``` 1112 /// # use libceed::prelude::*; 1113 /// # fn main() -> libceed::Result<()> { 1114 /// # let ceed = libceed::Ceed::default_init(); 1115 /// let ne = 4; 1116 /// let p = 3; 1117 /// let q = 4; 1118 /// let ndofs = p * ne - ne + 1; 1119 /// 1120 /// // Vectors 1121 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1122 /// let mut qdata = ceed.vector(ne * q)?; 1123 /// qdata.set_value(0.0); 1124 /// let mut u = ceed.vector(ndofs)?; 1125 /// u.set_value(1.0); 1126 /// let mut v = ceed.vector(ndofs)?; 1127 /// v.set_value(0.0); 1128 /// 1129 /// // Restrictions 1130 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1131 /// for i in 0..ne { 1132 /// indx[2 * i + 0] = i as i32; 1133 /// indx[2 * i + 1] = (i + 1) as i32; 1134 /// } 1135 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1136 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1137 /// for i in 0..ne { 1138 /// indu[p * i + 0] = (2 * i) as i32; 1139 /// indu[p * i + 1] = (2 * i + 1) as i32; 1140 /// indu[p * i + 2] = (2 * i + 2) as i32; 1141 /// } 1142 /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?; 1143 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1144 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1145 /// 1146 /// // Bases 1147 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1148 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 1149 /// 1150 /// // Build quadrature data 1151 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1152 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1153 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1154 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1155 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1156 /// .apply(&x, &mut qdata)?; 1157 /// 1158 /// // Mass operator 1159 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1160 /// let op_mass = ceed 1161 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1162 /// .field("u", &ru, &bu, VectorOpt::Active)? 1163 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1164 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1165 /// 1166 /// // Diagonal 1167 /// let mut diag = ceed.vector(ndofs)?; 1168 /// diag.set_value(1.0); 1169 /// op_mass.linear_assemble_add_diagonal(&mut diag)?; 1170 /// 1171 /// // Manual diagonal computation 1172 /// let mut true_diag = ceed.vector(ndofs)?; 1173 /// for i in 0..ndofs { 1174 /// u.set_value(0.0); 1175 /// { 1176 /// let mut u_array = u.view_mut()?; 1177 /// u_array[i] = 1.; 1178 /// } 1179 /// 1180 /// op_mass.apply(&u, &mut v)?; 1181 /// 1182 /// { 1183 /// let v_array = v.view_mut()?; 1184 /// let mut true_array = true_diag.view_mut()?; 1185 /// true_array[i] = v_array[i] + 1.0; 1186 /// } 1187 /// } 1188 /// 1189 /// // Check 1190 /// diag.view()? 1191 /// .iter() 1192 /// .zip(true_diag.view()?.iter()) 1193 /// .for_each(|(computed, actual)| { 1194 /// assert!( 1195 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 1196 /// "Diagonal entry incorrect" 1197 /// ); 1198 /// }); 1199 /// # Ok(()) 1200 /// # } 1201 /// ``` 1202 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 1203 self.op_core.linear_assemble_add_diagonal(assembled) 1204 } 1205 1206 /// Assemble the point block diagonal of a square linear Operator 1207 /// 1208 /// This overwrites a Vector with the point block diagonal of a linear 1209 /// Operator. 1210 /// 1211 /// Note: Currently only non-composite Operators with a single field and 1212 /// composite Operators with single field sub-operators are supported. 1213 /// 1214 /// * `op` - Operator to assemble QFunction 1215 /// * `assembled` - Vector to store assembled CeedOperator point block 1216 /// diagonal, provided in row-major form with an 1217 /// `ncomp * ncomp` block at each node. The dimensions of 1218 /// this vector are derived from the active vector for 1219 /// the CeedOperator. The array has shape 1220 /// `[nodes, component out, component in]`. 1221 /// 1222 /// ``` 1223 /// # use libceed::prelude::*; 1224 /// # fn main() -> libceed::Result<()> { 1225 /// # let ceed = libceed::Ceed::default_init(); 1226 /// let ne = 4; 1227 /// let p = 3; 1228 /// let q = 4; 1229 /// let ncomp = 2; 1230 /// let ndofs = p * ne - ne + 1; 1231 /// 1232 /// // Vectors 1233 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1234 /// let mut qdata = ceed.vector(ne * q)?; 1235 /// qdata.set_value(0.0); 1236 /// let mut u = ceed.vector(ncomp * ndofs)?; 1237 /// u.set_value(1.0); 1238 /// let mut v = ceed.vector(ncomp * ndofs)?; 1239 /// v.set_value(0.0); 1240 /// 1241 /// // Restrictions 1242 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1243 /// for i in 0..ne { 1244 /// indx[2 * i + 0] = i as i32; 1245 /// indx[2 * i + 1] = (i + 1) as i32; 1246 /// } 1247 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1248 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1249 /// for i in 0..ne { 1250 /// indu[p * i + 0] = (2 * i) as i32; 1251 /// indu[p * i + 1] = (2 * i + 1) as i32; 1252 /// indu[p * i + 2] = (2 * i + 2) as i32; 1253 /// } 1254 /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?; 1255 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1256 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1257 /// 1258 /// // Bases 1259 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1260 /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?; 1261 /// 1262 /// // Build quadrature data 1263 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1264 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1265 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1266 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1267 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1268 /// .apply(&x, &mut qdata)?; 1269 /// 1270 /// // Mass operator 1271 /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| { 1272 /// // Number of quadrature points 1273 /// let q = qdata.len(); 1274 /// 1275 /// // Iterate over quadrature points 1276 /// for i in 0..q { 1277 /// v[i + 0 * q] = u[i + 1 * q] * qdata[i]; 1278 /// v[i + 1 * q] = u[i + 0 * q] * qdata[i]; 1279 /// } 1280 /// 1281 /// // Return clean error code 1282 /// 0 1283 /// }; 1284 /// 1285 /// let qf_mass = ceed 1286 /// .q_function_interior(1, Box::new(mass_2_comp))? 1287 /// .input("u", 2, EvalMode::Interp)? 1288 /// .input("qdata", 1, EvalMode::None)? 1289 /// .output("v", 2, EvalMode::Interp)?; 1290 /// 1291 /// let op_mass = ceed 1292 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1293 /// .field("u", &ru, &bu, VectorOpt::Active)? 1294 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1295 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1296 /// 1297 /// // Diagonal 1298 /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?; 1299 /// diag.set_value(0.0); 1300 /// op_mass.linear_assemble_point_block_diagonal(&mut diag)?; 1301 /// 1302 /// // Manual diagonal computation 1303 /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?; 1304 /// for i in 0..ndofs { 1305 /// for j in 0..ncomp { 1306 /// u.set_value(0.0); 1307 /// { 1308 /// let mut u_array = u.view_mut()?; 1309 /// u_array[i + j * ndofs] = 1.; 1310 /// } 1311 /// 1312 /// op_mass.apply(&u, &mut v)?; 1313 /// 1314 /// { 1315 /// let v_array = v.view_mut()?; 1316 /// let mut true_array = true_diag.view_mut()?; 1317 /// for k in 0..ncomp { 1318 /// true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs]; 1319 /// } 1320 /// } 1321 /// } 1322 /// } 1323 /// 1324 /// // Check 1325 /// diag.view()? 1326 /// .iter() 1327 /// .zip(true_diag.view()?.iter()) 1328 /// .for_each(|(computed, actual)| { 1329 /// assert!( 1330 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 1331 /// "Diagonal entry incorrect" 1332 /// ); 1333 /// }); 1334 /// # Ok(()) 1335 /// # } 1336 /// ``` 1337 pub fn linear_assemble_point_block_diagonal( 1338 &self, 1339 assembled: &mut Vector, 1340 ) -> crate::Result<i32> { 1341 self.op_core.linear_assemble_point_block_diagonal(assembled) 1342 } 1343 1344 /// Assemble the point block diagonal of a square linear Operator 1345 /// 1346 /// This sums into a Vector with the point block diagonal of a linear 1347 /// Operator. 1348 /// 1349 /// Note: Currently only non-composite Operators with a single field and 1350 /// composite Operators with single field sub-operators are supported. 1351 /// 1352 /// * `op` - Operator to assemble QFunction 1353 /// * `assembled` - Vector to store assembled CeedOperator point block 1354 /// diagonal, provided in row-major form with an 1355 /// `ncomp * ncomp` block at each node. The dimensions of 1356 /// this vector are derived from the active vector for 1357 /// the CeedOperator. The array has shape 1358 /// `[nodes, component out, component in]`. 1359 /// 1360 /// ``` 1361 /// # use libceed::prelude::*; 1362 /// # fn main() -> libceed::Result<()> { 1363 /// # let ceed = libceed::Ceed::default_init(); 1364 /// let ne = 4; 1365 /// let p = 3; 1366 /// let q = 4; 1367 /// let ncomp = 2; 1368 /// let ndofs = p * ne - ne + 1; 1369 /// 1370 /// // Vectors 1371 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1372 /// let mut qdata = ceed.vector(ne * q)?; 1373 /// qdata.set_value(0.0); 1374 /// let mut u = ceed.vector(ncomp * ndofs)?; 1375 /// u.set_value(1.0); 1376 /// let mut v = ceed.vector(ncomp * ndofs)?; 1377 /// v.set_value(0.0); 1378 /// 1379 /// // Restrictions 1380 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1381 /// for i in 0..ne { 1382 /// indx[2 * i + 0] = i as i32; 1383 /// indx[2 * i + 1] = (i + 1) as i32; 1384 /// } 1385 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1386 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1387 /// for i in 0..ne { 1388 /// indu[p * i + 0] = (2 * i) as i32; 1389 /// indu[p * i + 1] = (2 * i + 1) as i32; 1390 /// indu[p * i + 2] = (2 * i + 2) as i32; 1391 /// } 1392 /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?; 1393 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1394 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1395 /// 1396 /// // Bases 1397 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1398 /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?; 1399 /// 1400 /// // Build quadrature data 1401 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1402 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1403 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1404 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1405 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1406 /// .apply(&x, &mut qdata)?; 1407 /// 1408 /// // Mass operator 1409 /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| { 1410 /// // Number of quadrature points 1411 /// let q = qdata.len(); 1412 /// 1413 /// // Iterate over quadrature points 1414 /// for i in 0..q { 1415 /// v[i + 0 * q] = u[i + 1 * q] * qdata[i]; 1416 /// v[i + 1 * q] = u[i + 0 * q] * qdata[i]; 1417 /// } 1418 /// 1419 /// // Return clean error code 1420 /// 0 1421 /// }; 1422 /// 1423 /// let qf_mass = ceed 1424 /// .q_function_interior(1, Box::new(mass_2_comp))? 1425 /// .input("u", 2, EvalMode::Interp)? 1426 /// .input("qdata", 1, EvalMode::None)? 1427 /// .output("v", 2, EvalMode::Interp)?; 1428 /// 1429 /// let op_mass = ceed 1430 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1431 /// .field("u", &ru, &bu, VectorOpt::Active)? 1432 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1433 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1434 /// 1435 /// // Diagonal 1436 /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?; 1437 /// diag.set_value(1.0); 1438 /// op_mass.linear_assemble_add_point_block_diagonal(&mut diag)?; 1439 /// 1440 /// // Manual diagonal computation 1441 /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?; 1442 /// for i in 0..ndofs { 1443 /// for j in 0..ncomp { 1444 /// u.set_value(0.0); 1445 /// { 1446 /// let mut u_array = u.view_mut()?; 1447 /// u_array[i + j * ndofs] = 1.; 1448 /// } 1449 /// 1450 /// op_mass.apply(&u, &mut v)?; 1451 /// 1452 /// { 1453 /// let v_array = v.view_mut()?; 1454 /// let mut true_array = true_diag.view_mut()?; 1455 /// for k in 0..ncomp { 1456 /// true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs]; 1457 /// } 1458 /// } 1459 /// } 1460 /// } 1461 /// 1462 /// // Check 1463 /// diag.view()? 1464 /// .iter() 1465 /// .zip(true_diag.view()?.iter()) 1466 /// .for_each(|(computed, actual)| { 1467 /// assert!( 1468 /// (*computed - 1.0 - *actual).abs() < 10.0 * libceed::EPSILON, 1469 /// "Diagonal entry incorrect" 1470 /// ); 1471 /// }); 1472 /// # Ok(()) 1473 /// # } 1474 /// ``` 1475 pub fn linear_assemble_add_point_block_diagonal( 1476 &self, 1477 assembled: &mut Vector, 1478 ) -> crate::Result<i32> { 1479 self.op_core 1480 .linear_assemble_add_point_block_diagonal(assembled) 1481 } 1482 1483 /// Create a multigrid coarse Operator and level transfer Operators for a 1484 /// given Operator, creating the prolongation basis from the fine and 1485 /// coarse grid interpolation 1486 /// 1487 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1488 /// * `rstr_coarse` - Coarse grid restriction 1489 /// * `basis_coarse` - Coarse grid active vector basis 1490 /// 1491 /// ``` 1492 /// # use libceed::prelude::*; 1493 /// # fn main() -> libceed::Result<()> { 1494 /// # let ceed = libceed::Ceed::default_init(); 1495 /// let ne = 15; 1496 /// let p_coarse = 3; 1497 /// let p_fine = 5; 1498 /// let q = 6; 1499 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1500 /// let ndofs_fine = p_fine * ne - ne + 1; 1501 /// 1502 /// // Vectors 1503 /// let x_array = (0..ne + 1) 1504 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1505 /// .collect::<Vec<Scalar>>(); 1506 /// let x = ceed.vector_from_slice(&x_array)?; 1507 /// let mut qdata = ceed.vector(ne * q)?; 1508 /// qdata.set_value(0.0); 1509 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1510 /// u_coarse.set_value(1.0); 1511 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1512 /// u_fine.set_value(1.0); 1513 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1514 /// v_coarse.set_value(0.0); 1515 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1516 /// v_fine.set_value(0.0); 1517 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1518 /// multiplicity.set_value(1.0); 1519 /// 1520 /// // Restrictions 1521 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1522 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1523 /// 1524 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1525 /// for i in 0..ne { 1526 /// indx[2 * i + 0] = i as i32; 1527 /// indx[2 * i + 1] = (i + 1) as i32; 1528 /// } 1529 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1530 /// 1531 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1532 /// for i in 0..ne { 1533 /// for j in 0..p_coarse { 1534 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1535 /// } 1536 /// } 1537 /// let ru_coarse = ceed.elem_restriction( 1538 /// ne, 1539 /// p_coarse, 1540 /// 1, 1541 /// 1, 1542 /// ndofs_coarse, 1543 /// MemType::Host, 1544 /// &indu_coarse, 1545 /// )?; 1546 /// 1547 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1548 /// for i in 0..ne { 1549 /// for j in 0..p_fine { 1550 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1551 /// } 1552 /// } 1553 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1554 /// 1555 /// // Bases 1556 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1557 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1558 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1559 /// 1560 /// // Build quadrature data 1561 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1562 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1563 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1564 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1565 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1566 /// .apply(&x, &mut qdata)?; 1567 /// 1568 /// // Mass operator 1569 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1570 /// let op_mass_fine = ceed 1571 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1572 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1573 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1574 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1575 /// 1576 /// // Multigrid setup 1577 /// let (op_mass_coarse, op_prolong, op_restrict) = 1578 /// op_mass_fine.create_multigrid_level(&multiplicity, &ru_coarse, &bu_coarse)?; 1579 /// 1580 /// // Coarse problem 1581 /// u_coarse.set_value(1.0); 1582 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1583 /// 1584 /// // Check 1585 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1586 /// assert!( 1587 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1588 /// "Incorrect interval length computed" 1589 /// ); 1590 /// 1591 /// // Prolong 1592 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1593 /// 1594 /// // Fine problem 1595 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1596 /// 1597 /// // Check 1598 /// let sum: Scalar = v_fine.view()?.iter().sum(); 1599 /// assert!( 1600 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1601 /// "Incorrect interval length computed" 1602 /// ); 1603 /// 1604 /// // Restrict 1605 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1606 /// 1607 /// // Check 1608 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1609 /// assert!( 1610 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1611 /// "Incorrect interval length computed" 1612 /// ); 1613 /// # Ok(()) 1614 /// # } 1615 /// ``` 1616 pub fn create_multigrid_level( 1617 &self, 1618 p_mult_fine: &Vector, 1619 rstr_coarse: &ElemRestriction, 1620 basis_coarse: &Basis, 1621 ) -> crate::Result<(Operator, Operator, Operator)> { 1622 let mut ptr_coarse = std::ptr::null_mut(); 1623 let mut ptr_prolong = std::ptr::null_mut(); 1624 let mut ptr_restrict = std::ptr::null_mut(); 1625 let ierr = unsafe { 1626 bind_ceed::CeedOperatorMultigridLevelCreate( 1627 self.op_core.ptr, 1628 p_mult_fine.ptr, 1629 rstr_coarse.ptr, 1630 basis_coarse.ptr, 1631 &mut ptr_coarse, 1632 &mut ptr_prolong, 1633 &mut ptr_restrict, 1634 ) 1635 }; 1636 self.op_core.check_error(ierr)?; 1637 let op_coarse = Operator::from_raw(ptr_coarse)?; 1638 let op_prolong = Operator::from_raw(ptr_prolong)?; 1639 let op_restrict = Operator::from_raw(ptr_restrict)?; 1640 Ok((op_coarse, op_prolong, op_restrict)) 1641 } 1642 1643 /// Create a multigrid coarse Operator and level transfer Operators for a 1644 /// given Operator with a tensor basis for the active basis 1645 /// 1646 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1647 /// * `rstr_coarse` - Coarse grid restriction 1648 /// * `basis_coarse` - Coarse grid active vector basis 1649 /// * `interp_c_to_f` - Matrix for coarse to fine 1650 /// 1651 /// ``` 1652 /// # use libceed::prelude::*; 1653 /// # fn main() -> libceed::Result<()> { 1654 /// # let ceed = libceed::Ceed::default_init(); 1655 /// let ne = 15; 1656 /// let p_coarse = 3; 1657 /// let p_fine = 5; 1658 /// let q = 6; 1659 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1660 /// let ndofs_fine = p_fine * ne - ne + 1; 1661 /// 1662 /// // Vectors 1663 /// let x_array = (0..ne + 1) 1664 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1665 /// .collect::<Vec<Scalar>>(); 1666 /// let x = ceed.vector_from_slice(&x_array)?; 1667 /// let mut qdata = ceed.vector(ne * q)?; 1668 /// qdata.set_value(0.0); 1669 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1670 /// u_coarse.set_value(1.0); 1671 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1672 /// u_fine.set_value(1.0); 1673 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1674 /// v_coarse.set_value(0.0); 1675 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1676 /// v_fine.set_value(0.0); 1677 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1678 /// multiplicity.set_value(1.0); 1679 /// 1680 /// // Restrictions 1681 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1682 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1683 /// 1684 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1685 /// for i in 0..ne { 1686 /// indx[2 * i + 0] = i as i32; 1687 /// indx[2 * i + 1] = (i + 1) as i32; 1688 /// } 1689 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1690 /// 1691 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1692 /// for i in 0..ne { 1693 /// for j in 0..p_coarse { 1694 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1695 /// } 1696 /// } 1697 /// let ru_coarse = ceed.elem_restriction( 1698 /// ne, 1699 /// p_coarse, 1700 /// 1, 1701 /// 1, 1702 /// ndofs_coarse, 1703 /// MemType::Host, 1704 /// &indu_coarse, 1705 /// )?; 1706 /// 1707 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1708 /// for i in 0..ne { 1709 /// for j in 0..p_fine { 1710 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1711 /// } 1712 /// } 1713 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1714 /// 1715 /// // Bases 1716 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1717 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1718 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1719 /// 1720 /// // Build quadrature data 1721 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1722 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1723 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1724 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1725 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1726 /// .apply(&x, &mut qdata)?; 1727 /// 1728 /// // Mass operator 1729 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1730 /// let op_mass_fine = ceed 1731 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1732 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1733 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1734 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1735 /// 1736 /// // Multigrid setup 1737 /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine]; 1738 /// { 1739 /// let mut coarse = ceed.vector(p_coarse)?; 1740 /// let mut fine = ceed.vector(p_fine)?; 1741 /// let basis_c_to_f = 1742 /// ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?; 1743 /// for i in 0..p_coarse { 1744 /// coarse.set_value(0.0); 1745 /// { 1746 /// let mut array = coarse.view_mut()?; 1747 /// array[i] = 1.; 1748 /// } 1749 /// basis_c_to_f.apply( 1750 /// 1, 1751 /// TransposeMode::NoTranspose, 1752 /// EvalMode::Interp, 1753 /// &coarse, 1754 /// &mut fine, 1755 /// )?; 1756 /// let array = fine.view()?; 1757 /// for j in 0..p_fine { 1758 /// interp_c_to_f[j * p_coarse + i] = array[j]; 1759 /// } 1760 /// } 1761 /// } 1762 /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_tensor_H1( 1763 /// &multiplicity, 1764 /// &ru_coarse, 1765 /// &bu_coarse, 1766 /// &interp_c_to_f, 1767 /// )?; 1768 /// 1769 /// // Coarse problem 1770 /// u_coarse.set_value(1.0); 1771 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1772 /// 1773 /// // Check 1774 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1775 /// assert!( 1776 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1777 /// "Incorrect interval length computed" 1778 /// ); 1779 /// 1780 /// // Prolong 1781 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1782 /// 1783 /// // Fine problem 1784 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1785 /// 1786 /// // Check 1787 /// let sum: Scalar = v_fine.view()?.iter().sum(); 1788 /// assert!( 1789 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1790 /// "Incorrect interval length computed" 1791 /// ); 1792 /// 1793 /// // Restrict 1794 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1795 /// 1796 /// // Check 1797 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1798 /// assert!( 1799 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1800 /// "Incorrect interval length computed" 1801 /// ); 1802 /// # Ok(()) 1803 /// # } 1804 /// ``` 1805 pub fn create_multigrid_level_tensor_H1( 1806 &self, 1807 p_mult_fine: &Vector, 1808 rstr_coarse: &ElemRestriction, 1809 basis_coarse: &Basis, 1810 interpCtoF: &Vec<Scalar>, 1811 ) -> crate::Result<(Operator, Operator, Operator)> { 1812 let mut ptr_coarse = std::ptr::null_mut(); 1813 let mut ptr_prolong = std::ptr::null_mut(); 1814 let mut ptr_restrict = std::ptr::null_mut(); 1815 let ierr = unsafe { 1816 bind_ceed::CeedOperatorMultigridLevelCreateTensorH1( 1817 self.op_core.ptr, 1818 p_mult_fine.ptr, 1819 rstr_coarse.ptr, 1820 basis_coarse.ptr, 1821 interpCtoF.as_ptr(), 1822 &mut ptr_coarse, 1823 &mut ptr_prolong, 1824 &mut ptr_restrict, 1825 ) 1826 }; 1827 self.op_core.check_error(ierr)?; 1828 let op_coarse = Operator::from_raw(ptr_coarse)?; 1829 let op_prolong = Operator::from_raw(ptr_prolong)?; 1830 let op_restrict = Operator::from_raw(ptr_restrict)?; 1831 Ok((op_coarse, op_prolong, op_restrict)) 1832 } 1833 1834 /// Create a multigrid coarse Operator and level transfer Operators for a 1835 /// given Operator with a non-tensor basis for the active basis 1836 /// 1837 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1838 /// * `rstr_coarse` - Coarse grid restriction 1839 /// * `basis_coarse` - Coarse grid active vector basis 1840 /// * `interp_c_to_f` - Matrix for coarse to fine 1841 /// 1842 /// ``` 1843 /// # use libceed::prelude::*; 1844 /// # fn main() -> libceed::Result<()> { 1845 /// # let ceed = libceed::Ceed::default_init(); 1846 /// let ne = 15; 1847 /// let p_coarse = 3; 1848 /// let p_fine = 5; 1849 /// let q = 6; 1850 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1851 /// let ndofs_fine = p_fine * ne - ne + 1; 1852 /// 1853 /// // Vectors 1854 /// let x_array = (0..ne + 1) 1855 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1856 /// .collect::<Vec<Scalar>>(); 1857 /// let x = ceed.vector_from_slice(&x_array)?; 1858 /// let mut qdata = ceed.vector(ne * q)?; 1859 /// qdata.set_value(0.0); 1860 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1861 /// u_coarse.set_value(1.0); 1862 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1863 /// u_fine.set_value(1.0); 1864 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1865 /// v_coarse.set_value(0.0); 1866 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1867 /// v_fine.set_value(0.0); 1868 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1869 /// multiplicity.set_value(1.0); 1870 /// 1871 /// // Restrictions 1872 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1873 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1874 /// 1875 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1876 /// for i in 0..ne { 1877 /// indx[2 * i + 0] = i as i32; 1878 /// indx[2 * i + 1] = (i + 1) as i32; 1879 /// } 1880 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1881 /// 1882 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1883 /// for i in 0..ne { 1884 /// for j in 0..p_coarse { 1885 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1886 /// } 1887 /// } 1888 /// let ru_coarse = ceed.elem_restriction( 1889 /// ne, 1890 /// p_coarse, 1891 /// 1, 1892 /// 1, 1893 /// ndofs_coarse, 1894 /// MemType::Host, 1895 /// &indu_coarse, 1896 /// )?; 1897 /// 1898 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1899 /// for i in 0..ne { 1900 /// for j in 0..p_fine { 1901 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1902 /// } 1903 /// } 1904 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1905 /// 1906 /// // Bases 1907 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1908 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1909 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1910 /// 1911 /// // Build quadrature data 1912 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1913 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1914 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1915 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1916 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1917 /// .apply(&x, &mut qdata)?; 1918 /// 1919 /// // Mass operator 1920 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1921 /// let op_mass_fine = ceed 1922 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1923 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1924 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1925 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1926 /// 1927 /// // Multigrid setup 1928 /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine]; 1929 /// { 1930 /// let mut coarse = ceed.vector(p_coarse)?; 1931 /// let mut fine = ceed.vector(p_fine)?; 1932 /// let basis_c_to_f = 1933 /// ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?; 1934 /// for i in 0..p_coarse { 1935 /// coarse.set_value(0.0); 1936 /// { 1937 /// let mut array = coarse.view_mut()?; 1938 /// array[i] = 1.; 1939 /// } 1940 /// basis_c_to_f.apply( 1941 /// 1, 1942 /// TransposeMode::NoTranspose, 1943 /// EvalMode::Interp, 1944 /// &coarse, 1945 /// &mut fine, 1946 /// )?; 1947 /// let array = fine.view()?; 1948 /// for j in 0..p_fine { 1949 /// interp_c_to_f[j * p_coarse + i] = array[j]; 1950 /// } 1951 /// } 1952 /// } 1953 /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_H1( 1954 /// &multiplicity, 1955 /// &ru_coarse, 1956 /// &bu_coarse, 1957 /// &interp_c_to_f, 1958 /// )?; 1959 /// 1960 /// // Coarse problem 1961 /// u_coarse.set_value(1.0); 1962 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1963 /// 1964 /// // Check 1965 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1966 /// assert!( 1967 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1968 /// "Incorrect interval length computed" 1969 /// ); 1970 /// 1971 /// // Prolong 1972 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1973 /// 1974 /// // Fine problem 1975 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1976 /// 1977 /// // Check 1978 /// let sum: Scalar = v_fine.view()?.iter().sum(); 1979 /// assert!( 1980 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1981 /// "Incorrect interval length computed" 1982 /// ); 1983 /// 1984 /// // Restrict 1985 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1986 /// 1987 /// // Check 1988 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1989 /// assert!( 1990 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1991 /// "Incorrect interval length computed" 1992 /// ); 1993 /// # Ok(()) 1994 /// # } 1995 /// ``` 1996 pub fn create_multigrid_level_H1( 1997 &self, 1998 p_mult_fine: &Vector, 1999 rstr_coarse: &ElemRestriction, 2000 basis_coarse: &Basis, 2001 interpCtoF: &[Scalar], 2002 ) -> crate::Result<(Operator, Operator, Operator)> { 2003 let mut ptr_coarse = std::ptr::null_mut(); 2004 let mut ptr_prolong = std::ptr::null_mut(); 2005 let mut ptr_restrict = std::ptr::null_mut(); 2006 let ierr = unsafe { 2007 bind_ceed::CeedOperatorMultigridLevelCreateH1( 2008 self.op_core.ptr, 2009 p_mult_fine.ptr, 2010 rstr_coarse.ptr, 2011 basis_coarse.ptr, 2012 interpCtoF.as_ptr(), 2013 &mut ptr_coarse, 2014 &mut ptr_prolong, 2015 &mut ptr_restrict, 2016 ) 2017 }; 2018 self.op_core.check_error(ierr)?; 2019 let op_coarse = Operator::from_raw(ptr_coarse)?; 2020 let op_prolong = Operator::from_raw(ptr_prolong)?; 2021 let op_restrict = Operator::from_raw(ptr_restrict)?; 2022 Ok((op_coarse, op_prolong, op_restrict)) 2023 } 2024 } 2025 2026 // ----------------------------------------------------------------------------- 2027 // Composite Operator 2028 // ----------------------------------------------------------------------------- 2029 impl<'a> CompositeOperator<'a> { 2030 // Constructor 2031 pub fn create(ceed: &'a crate::Ceed) -> crate::Result<Self> { 2032 let mut ptr = std::ptr::null_mut(); 2033 let ierr = unsafe { bind_ceed::CeedCompositeOperatorCreate(ceed.ptr, &mut ptr) }; 2034 ceed.check_error(ierr)?; 2035 Ok(Self { 2036 op_core: OperatorCore { 2037 ptr, 2038 _lifeline: PhantomData, 2039 }, 2040 }) 2041 } 2042 2043 /// Apply Operator to a vector 2044 /// 2045 /// * `input` - Input Vector 2046 /// * `output` - Output Vector 2047 /// 2048 /// ``` 2049 /// # use libceed::prelude::*; 2050 /// # fn main() -> libceed::Result<()> { 2051 /// # let ceed = libceed::Ceed::default_init(); 2052 /// let ne = 4; 2053 /// let p = 3; 2054 /// let q = 4; 2055 /// let ndofs = p * ne - ne + 1; 2056 /// 2057 /// // Vectors 2058 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 2059 /// let mut qdata_mass = ceed.vector(ne * q)?; 2060 /// qdata_mass.set_value(0.0); 2061 /// let mut qdata_diff = ceed.vector(ne * q)?; 2062 /// qdata_diff.set_value(0.0); 2063 /// let mut u = ceed.vector(ndofs)?; 2064 /// u.set_value(1.0); 2065 /// let mut v = ceed.vector(ndofs)?; 2066 /// v.set_value(0.0); 2067 /// 2068 /// // Restrictions 2069 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 2070 /// for i in 0..ne { 2071 /// indx[2 * i + 0] = i as i32; 2072 /// indx[2 * i + 1] = (i + 1) as i32; 2073 /// } 2074 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 2075 /// let mut indu: Vec<i32> = vec![0; p * ne]; 2076 /// for i in 0..ne { 2077 /// indu[p * i + 0] = i as i32; 2078 /// indu[p * i + 1] = (i + 1) as i32; 2079 /// indu[p * i + 2] = (i + 2) as i32; 2080 /// } 2081 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 2082 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 2083 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 2084 /// 2085 /// // Bases 2086 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 2087 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 2088 /// 2089 /// // Build quadrature data 2090 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 2091 /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 2092 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2093 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2094 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2095 /// .apply(&x, &mut qdata_mass)?; 2096 /// 2097 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 2098 /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 2099 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2100 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2101 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2102 /// .apply(&x, &mut qdata_diff)?; 2103 /// 2104 /// // Application operator 2105 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2106 /// let op_mass = ceed 2107 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 2108 /// .field("u", &ru, &bu, VectorOpt::Active)? 2109 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 2110 /// .field("v", &ru, &bu, VectorOpt::Active)?; 2111 /// 2112 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2113 /// let op_diff = ceed 2114 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 2115 /// .field("du", &ru, &bu, VectorOpt::Active)? 2116 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 2117 /// .field("dv", &ru, &bu, VectorOpt::Active)?; 2118 /// 2119 /// let op_composite = ceed 2120 /// .composite_operator()? 2121 /// .sub_operator(&op_mass)? 2122 /// .sub_operator(&op_diff)?; 2123 /// 2124 /// v.set_value(0.0); 2125 /// op_composite.apply(&u, &mut v)?; 2126 /// 2127 /// // Check 2128 /// let sum: Scalar = v.view()?.iter().sum(); 2129 /// assert!( 2130 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 2131 /// "Incorrect interval length computed" 2132 /// ); 2133 /// # Ok(()) 2134 /// # } 2135 /// ``` 2136 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 2137 self.op_core.apply(input, output) 2138 } 2139 2140 /// Apply Operator to a vector and add result to output vector 2141 /// 2142 /// * `input` - Input Vector 2143 /// * `output` - Output Vector 2144 /// 2145 /// ``` 2146 /// # use libceed::prelude::*; 2147 /// # fn main() -> libceed::Result<()> { 2148 /// # let ceed = libceed::Ceed::default_init(); 2149 /// let ne = 4; 2150 /// let p = 3; 2151 /// let q = 4; 2152 /// let ndofs = p * ne - ne + 1; 2153 /// 2154 /// // Vectors 2155 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 2156 /// let mut qdata_mass = ceed.vector(ne * q)?; 2157 /// qdata_mass.set_value(0.0); 2158 /// let mut qdata_diff = ceed.vector(ne * q)?; 2159 /// qdata_diff.set_value(0.0); 2160 /// let mut u = ceed.vector(ndofs)?; 2161 /// u.set_value(1.0); 2162 /// let mut v = ceed.vector(ndofs)?; 2163 /// v.set_value(0.0); 2164 /// 2165 /// // Restrictions 2166 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 2167 /// for i in 0..ne { 2168 /// indx[2 * i + 0] = i as i32; 2169 /// indx[2 * i + 1] = (i + 1) as i32; 2170 /// } 2171 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 2172 /// let mut indu: Vec<i32> = vec![0; p * ne]; 2173 /// for i in 0..ne { 2174 /// indu[p * i + 0] = i as i32; 2175 /// indu[p * i + 1] = (i + 1) as i32; 2176 /// indu[p * i + 2] = (i + 2) as i32; 2177 /// } 2178 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 2179 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 2180 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 2181 /// 2182 /// // Bases 2183 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 2184 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 2185 /// 2186 /// // Build quadrature data 2187 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 2188 /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 2189 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2190 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2191 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2192 /// .apply(&x, &mut qdata_mass)?; 2193 /// 2194 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 2195 /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 2196 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2197 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2198 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2199 /// .apply(&x, &mut qdata_diff)?; 2200 /// 2201 /// // Application operator 2202 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2203 /// let op_mass = ceed 2204 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 2205 /// .field("u", &ru, &bu, VectorOpt::Active)? 2206 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 2207 /// .field("v", &ru, &bu, VectorOpt::Active)?; 2208 /// 2209 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2210 /// let op_diff = ceed 2211 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 2212 /// .field("du", &ru, &bu, VectorOpt::Active)? 2213 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 2214 /// .field("dv", &ru, &bu, VectorOpt::Active)?; 2215 /// 2216 /// let op_composite = ceed 2217 /// .composite_operator()? 2218 /// .sub_operator(&op_mass)? 2219 /// .sub_operator(&op_diff)?; 2220 /// 2221 /// v.set_value(1.0); 2222 /// op_composite.apply_add(&u, &mut v)?; 2223 /// 2224 /// // Check 2225 /// let sum: Scalar = v.view()?.iter().sum(); 2226 /// assert!( 2227 /// (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON, 2228 /// "Incorrect interval length computed" 2229 /// ); 2230 /// # Ok(()) 2231 /// # } 2232 /// ``` 2233 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 2234 self.op_core.apply_add(input, output) 2235 } 2236 2237 /// Add a sub-Operator to a Composite Operator 2238 /// 2239 /// * `subop` - Sub-Operator 2240 /// 2241 /// ``` 2242 /// # use libceed::prelude::*; 2243 /// # fn main() -> libceed::Result<()> { 2244 /// # let ceed = libceed::Ceed::default_init(); 2245 /// let mut op = ceed.composite_operator()?; 2246 /// 2247 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2248 /// let op_mass = ceed.operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?; 2249 /// op = op.sub_operator(&op_mass)?; 2250 /// 2251 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2252 /// let op_diff = ceed.operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?; 2253 /// op = op.sub_operator(&op_diff)?; 2254 /// # Ok(()) 2255 /// # } 2256 /// ``` 2257 #[allow(unused_mut)] 2258 pub fn sub_operator(mut self, subop: &Operator) -> crate::Result<Self> { 2259 let ierr = 2260 unsafe { bind_ceed::CeedCompositeOperatorAddSub(self.op_core.ptr, subop.op_core.ptr) }; 2261 self.op_core.check_error(ierr)?; 2262 Ok(self) 2263 } 2264 2265 /// Check if CompositeOperator is setup correctly 2266 /// 2267 /// ``` 2268 /// # use libceed::prelude::*; 2269 /// # fn main() -> libceed::Result<()> { 2270 /// # let ceed = libceed::Ceed::default_init(); 2271 /// let ne = 4; 2272 /// let p = 3; 2273 /// let q = 4; 2274 /// let ndofs = p * ne - ne + 1; 2275 /// 2276 /// // Restrictions 2277 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 2278 /// for i in 0..ne { 2279 /// indx[2 * i + 0] = i as i32; 2280 /// indx[2 * i + 1] = (i + 1) as i32; 2281 /// } 2282 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 2283 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 2284 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 2285 /// 2286 /// // Bases 2287 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 2288 /// 2289 /// // Build quadrature data 2290 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 2291 /// let op_build_mass = ceed 2292 /// .operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 2293 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2294 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2295 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 2296 /// 2297 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 2298 /// let op_build_diff = ceed 2299 /// .operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 2300 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2301 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2302 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 2303 /// 2304 /// let op_build = ceed 2305 /// .composite_operator()? 2306 /// .sub_operator(&op_build_mass)? 2307 /// .sub_operator(&op_build_diff)?; 2308 /// 2309 /// // Check 2310 /// assert!(op_build.check().is_ok(), "Operator setup incorrectly"); 2311 /// # Ok(()) 2312 /// # } 2313 /// ``` 2314 pub fn check(self) -> crate::Result<Self> { 2315 self.op_core.check()?; 2316 Ok(self) 2317 } 2318 2319 /// Assemble the diagonal of a square linear Operator 2320 /// 2321 /// This overwrites a Vector with the diagonal of a linear Operator. 2322 /// 2323 /// Note: Currently only non-composite Operators with a single field and 2324 /// composite Operators with single field sub-operators are supported. 2325 /// 2326 /// * `op` - Operator to assemble QFunction 2327 /// * `assembled` - Vector to store assembled Operator diagonal 2328 pub fn linear_asssemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 2329 self.op_core.linear_assemble_add_diagonal(assembled) 2330 } 2331 2332 /// Assemble the point block diagonal of a square linear Operator 2333 /// 2334 /// This overwrites a Vector with the point block diagonal of a linear 2335 /// Operator. 2336 /// 2337 /// Note: Currently only non-composite Operators with a single field and 2338 /// composite Operators with single field sub-operators are supported. 2339 /// 2340 /// * `op` - Operator to assemble QFunction 2341 /// * `assembled` - Vector to store assembled Operator diagonal 2342 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 2343 self.op_core.linear_assemble_add_diagonal(assembled) 2344 } 2345 2346 /// Assemble the diagonal of a square linear Operator 2347 /// 2348 /// This overwrites a Vector with the diagonal of a linear Operator. 2349 /// 2350 /// Note: Currently only non-composite Operators with a single field and 2351 /// composite Operators with single field sub-operators are supported. 2352 /// 2353 /// * `op` - Operator to assemble QFunction 2354 /// * `assembled` - Vector to store assembled CeedOperator point block 2355 /// diagonal, provided in row-major form with an 2356 /// `ncomp * ncomp` block at each node. The dimensions of 2357 /// this vector are derived from the active vector for 2358 /// the CeedOperator. The array has shape 2359 /// `[nodes, component out, component in]`. 2360 pub fn linear_assemble_point_block_diagonal( 2361 &self, 2362 assembled: &mut Vector, 2363 ) -> crate::Result<i32> { 2364 self.op_core.linear_assemble_add_diagonal(assembled) 2365 } 2366 2367 /// Assemble the diagonal of a square linear Operator 2368 /// 2369 /// This sums into a Vector with the diagonal of a linear Operator. 2370 /// 2371 /// Note: Currently only non-composite Operators with a single field and 2372 /// composite Operators with single field sub-operators are supported. 2373 /// 2374 /// * `op` - Operator to assemble QFunction 2375 /// * `assembled` - Vector to store assembled CeedOperator point block 2376 /// diagonal, provided in row-major form with an 2377 /// `ncomp * ncomp` block at each node. The dimensions of 2378 /// this vector are derived from the active vector for 2379 /// the CeedOperator. The array has shape 2380 /// `[nodes, component out, component in]`. 2381 pub fn linear_assemble_add_point_block_diagonal( 2382 &self, 2383 assembled: &mut Vector, 2384 ) -> crate::Result<i32> { 2385 self.op_core.linear_assemble_add_diagonal(assembled) 2386 } 2387 } 2388 2389 // ----------------------------------------------------------------------------- 2390