1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 //! A Ceed Operator defines the finite/spectral element operator associated to a 9 //! Ceed QFunction. A Ceed Operator connects Ceed ElemRestrictions, 10 //! Ceed Bases, and Ceed QFunctions. 11 12 use crate::prelude::*; 13 14 // ----------------------------------------------------------------------------- 15 // Operator Field context wrapper 16 // ----------------------------------------------------------------------------- 17 #[derive(Debug)] 18 pub struct OperatorField<'a> { 19 pub(crate) ptr: bind_ceed::CeedOperatorField, 20 _lifeline: PhantomData<&'a ()>, 21 } 22 23 // ----------------------------------------------------------------------------- 24 // Implementations 25 // ----------------------------------------------------------------------------- 26 impl<'a> OperatorField<'a> { 27 /// Get the name of an OperatorField 28 /// 29 /// ``` 30 /// # use libceed::prelude::*; 31 /// # fn main() -> libceed::Result<()> { 32 /// # let ceed = libceed::Ceed::default_init(); 33 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 34 /// 35 /// // Operator field arguments 36 /// let ne = 3; 37 /// let q = 4 as usize; 38 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 39 /// for i in 0..ne { 40 /// ind[2 * i + 0] = i as i32; 41 /// ind[2 * i + 1] = (i + 1) as i32; 42 /// } 43 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 44 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 45 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 46 /// 47 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 48 /// 49 /// // Operator fields 50 /// let op = ceed 51 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 52 /// .field("dx", &r, &b, VectorOpt::Active)? 53 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 54 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 55 /// 56 /// let inputs = op.inputs()?; 57 /// 58 /// assert_eq!(inputs[0].name(), "dx", "Incorrect input name"); 59 /// assert_eq!(inputs[1].name(), "weights", "Incorrect input name"); 60 /// # Ok(()) 61 /// # } 62 /// ``` 63 pub fn name(&self) -> &str { 64 let mut name_ptr: *mut std::os::raw::c_char = std::ptr::null_mut(); 65 unsafe { 66 bind_ceed::CeedOperatorFieldGetName(self.ptr, &mut name_ptr); 67 } 68 unsafe { CStr::from_ptr(name_ptr) }.to_str().unwrap() 69 } 70 71 /// Get the ElemRestriction of an OperatorField 72 /// 73 /// ``` 74 /// # use libceed::prelude::*; 75 /// # fn main() -> libceed::Result<()> { 76 /// # let ceed = libceed::Ceed::default_init(); 77 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 78 /// 79 /// // Operator field arguments 80 /// let ne = 3; 81 /// let q = 4 as usize; 82 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 83 /// for i in 0..ne { 84 /// ind[2 * i + 0] = i as i32; 85 /// ind[2 * i + 1] = (i + 1) as i32; 86 /// } 87 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 88 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 89 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 90 /// 91 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 92 /// 93 /// // Operator fields 94 /// let op = ceed 95 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 96 /// .field("dx", &r, &b, VectorOpt::Active)? 97 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 98 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 99 /// 100 /// let inputs = op.inputs()?; 101 /// 102 /// assert!( 103 /// inputs[0].elem_restriction().is_some(), 104 /// "Incorrect field ElemRestriction" 105 /// ); 106 /// if let ElemRestrictionOpt::Some(r) = inputs[0].elem_restriction() { 107 /// assert_eq!( 108 /// r.num_elements(), 109 /// ne, 110 /// "Incorrect field ElemRestriction number of elements" 111 /// ); 112 /// } 113 /// 114 /// assert!( 115 /// inputs[1].elem_restriction().is_none(), 116 /// "Incorrect field ElemRestriction" 117 /// ); 118 /// # Ok(()) 119 /// # } 120 /// ``` 121 pub fn elem_restriction(&self) -> ElemRestrictionOpt { 122 let mut ptr = std::ptr::null_mut(); 123 unsafe { 124 bind_ceed::CeedOperatorFieldGetElemRestriction(self.ptr, &mut ptr); 125 } 126 if ptr == unsafe { bind_ceed::CEED_ELEMRESTRICTION_NONE } { 127 ElemRestrictionOpt::None 128 } else { 129 let slice = unsafe { 130 std::slice::from_raw_parts( 131 &ptr as *const bind_ceed::CeedElemRestriction as *const crate::ElemRestriction, 132 1 as usize, 133 ) 134 }; 135 ElemRestrictionOpt::Some(&slice[0]) 136 } 137 } 138 139 /// Get the Basis of an OperatorField 140 /// 141 /// ``` 142 /// # use libceed::prelude::*; 143 /// # fn main() -> libceed::Result<()> { 144 /// # let ceed = libceed::Ceed::default_init(); 145 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 146 /// 147 /// // Operator field arguments 148 /// let ne = 3; 149 /// let q = 4 as usize; 150 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 151 /// for i in 0..ne { 152 /// ind[2 * i + 0] = i as i32; 153 /// ind[2 * i + 1] = (i + 1) as i32; 154 /// } 155 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 156 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 157 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 158 /// 159 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 160 /// 161 /// // Operator fields 162 /// let op = ceed 163 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 164 /// .field("dx", &r, &b, VectorOpt::Active)? 165 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 166 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 167 /// 168 /// let inputs = op.inputs()?; 169 /// 170 /// assert!(inputs[0].basis().is_some(), "Incorrect field Basis"); 171 /// if let BasisOpt::Some(b) = inputs[0].basis() { 172 /// assert_eq!( 173 /// b.num_quadrature_points(), 174 /// q, 175 /// "Incorrect field Basis number of quadrature points" 176 /// ); 177 /// } 178 /// assert!(inputs[1].basis().is_some(), "Incorrect field Basis"); 179 /// if let BasisOpt::Some(b) = inputs[0].basis() { 180 /// assert_eq!( 181 /// b.num_quadrature_points(), 182 /// q, 183 /// "Incorrect field Basis number of quadrature points" 184 /// ); 185 /// } 186 /// 187 /// let outputs = op.outputs()?; 188 /// 189 /// assert!(outputs[0].basis().is_collocated(), "Incorrect field Basis"); 190 /// # Ok(()) 191 /// # } 192 /// ``` 193 pub fn basis(&self) -> BasisOpt { 194 let mut ptr = std::ptr::null_mut(); 195 unsafe { 196 bind_ceed::CeedOperatorFieldGetBasis(self.ptr, &mut ptr); 197 } 198 if ptr == unsafe { bind_ceed::CEED_BASIS_COLLOCATED } { 199 BasisOpt::Collocated 200 } else { 201 let slice = unsafe { 202 std::slice::from_raw_parts( 203 &ptr as *const bind_ceed::CeedBasis as *const crate::Basis, 204 1 as usize, 205 ) 206 }; 207 BasisOpt::Some(&slice[0]) 208 } 209 } 210 211 /// Get the Vector of an OperatorField 212 /// 213 /// ``` 214 /// # use libceed::prelude::*; 215 /// # fn main() -> libceed::Result<()> { 216 /// # let ceed = libceed::Ceed::default_init(); 217 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 218 /// 219 /// // Operator field arguments 220 /// let ne = 3; 221 /// let q = 4 as usize; 222 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 223 /// for i in 0..ne { 224 /// ind[2 * i + 0] = i as i32; 225 /// ind[2 * i + 1] = (i + 1) as i32; 226 /// } 227 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 228 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 229 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 230 /// 231 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 232 /// 233 /// // Operator fields 234 /// let op = ceed 235 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 236 /// .field("dx", &r, &b, VectorOpt::Active)? 237 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 238 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 239 /// 240 /// let inputs = op.inputs()?; 241 /// 242 /// assert!(inputs[0].vector().is_active(), "Incorrect field Vector"); 243 /// assert!(inputs[1].vector().is_none(), "Incorrect field Vector"); 244 /// # Ok(()) 245 /// # } 246 /// ``` 247 pub fn vector(&self) -> VectorOpt { 248 let mut ptr = std::ptr::null_mut(); 249 unsafe { 250 bind_ceed::CeedOperatorFieldGetVector(self.ptr, &mut ptr); 251 } 252 if ptr == unsafe { bind_ceed::CEED_VECTOR_ACTIVE } { 253 VectorOpt::Active 254 } else if ptr == unsafe { bind_ceed::CEED_VECTOR_NONE } { 255 VectorOpt::None 256 } else { 257 let slice = unsafe { 258 std::slice::from_raw_parts( 259 &ptr as *const bind_ceed::CeedVector as *const crate::Vector, 260 1 as usize, 261 ) 262 }; 263 VectorOpt::Some(&slice[0]) 264 } 265 } 266 } 267 268 // ----------------------------------------------------------------------------- 269 // Operator context wrapper 270 // ----------------------------------------------------------------------------- 271 #[derive(Debug)] 272 pub(crate) struct OperatorCore<'a> { 273 ptr: bind_ceed::CeedOperator, 274 _lifeline: PhantomData<&'a ()>, 275 } 276 277 #[derive(Debug)] 278 pub struct Operator<'a> { 279 op_core: OperatorCore<'a>, 280 } 281 282 #[derive(Debug)] 283 pub struct CompositeOperator<'a> { 284 op_core: OperatorCore<'a>, 285 } 286 287 // ----------------------------------------------------------------------------- 288 // Destructor 289 // ----------------------------------------------------------------------------- 290 impl<'a> Drop for OperatorCore<'a> { 291 fn drop(&mut self) { 292 unsafe { 293 bind_ceed::CeedOperatorDestroy(&mut self.ptr); 294 } 295 } 296 } 297 298 // ----------------------------------------------------------------------------- 299 // Display 300 // ----------------------------------------------------------------------------- 301 impl<'a> fmt::Display for OperatorCore<'a> { 302 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 303 let mut ptr = std::ptr::null_mut(); 304 let mut sizeloc = crate::MAX_BUFFER_LENGTH; 305 let cstring = unsafe { 306 let file = bind_ceed::open_memstream(&mut ptr, &mut sizeloc); 307 bind_ceed::CeedOperatorView(self.ptr, file); 308 bind_ceed::fclose(file); 309 CString::from_raw(ptr) 310 }; 311 cstring.to_string_lossy().fmt(f) 312 } 313 } 314 315 /// View an Operator 316 /// 317 /// ``` 318 /// # use libceed::prelude::*; 319 /// # fn main() -> libceed::Result<()> { 320 /// # let ceed = libceed::Ceed::default_init(); 321 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 322 /// 323 /// // Operator field arguments 324 /// let ne = 3; 325 /// let q = 4 as usize; 326 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 327 /// for i in 0..ne { 328 /// ind[2 * i + 0] = i as i32; 329 /// ind[2 * i + 1] = (i + 1) as i32; 330 /// } 331 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 332 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 333 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 334 /// 335 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 336 /// 337 /// // Operator fields 338 /// let op = ceed 339 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 340 /// .name("mass")? 341 /// .field("dx", &r, &b, VectorOpt::Active)? 342 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 343 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 344 /// 345 /// println!("{}", op); 346 /// # Ok(()) 347 /// # } 348 /// ``` 349 impl<'a> fmt::Display for Operator<'a> { 350 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 351 self.op_core.fmt(f) 352 } 353 } 354 355 /// View a composite Operator 356 /// 357 /// ``` 358 /// # use libceed::prelude::*; 359 /// # fn main() -> libceed::Result<()> { 360 /// # let ceed = libceed::Ceed::default_init(); 361 /// 362 /// // Sub operator field arguments 363 /// let ne = 3; 364 /// let q = 4 as usize; 365 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 366 /// for i in 0..ne { 367 /// ind[2 * i + 0] = i as i32; 368 /// ind[2 * i + 1] = (i + 1) as i32; 369 /// } 370 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 371 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 372 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 373 /// 374 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 375 /// 376 /// let qdata_mass = ceed.vector(q * ne)?; 377 /// let qdata_diff = ceed.vector(q * ne)?; 378 /// 379 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 380 /// let op_mass = ceed 381 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 382 /// .name("Mass term")? 383 /// .field("u", &r, &b, VectorOpt::Active)? 384 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 385 /// .field("v", &r, &b, VectorOpt::Active)?; 386 /// 387 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 388 /// let op_diff = ceed 389 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 390 /// .name("Poisson term")? 391 /// .field("du", &r, &b, VectorOpt::Active)? 392 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 393 /// .field("dv", &r, &b, VectorOpt::Active)?; 394 /// 395 /// let op = ceed 396 /// .composite_operator()? 397 /// .name("Screened Poisson")? 398 /// .sub_operator(&op_mass)? 399 /// .sub_operator(&op_diff)?; 400 /// 401 /// println!("{}", op); 402 /// # Ok(()) 403 /// # } 404 /// ``` 405 impl<'a> fmt::Display for CompositeOperator<'a> { 406 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 407 self.op_core.fmt(f) 408 } 409 } 410 411 // ----------------------------------------------------------------------------- 412 // Core functionality 413 // ----------------------------------------------------------------------------- 414 impl<'a> OperatorCore<'a> { 415 // Error handling 416 #[doc(hidden)] 417 fn check_error(&self, ierr: i32) -> crate::Result<i32> { 418 let mut ptr = std::ptr::null_mut(); 419 unsafe { 420 bind_ceed::CeedOperatorGetCeed(self.ptr, &mut ptr); 421 } 422 crate::check_error(ptr, ierr) 423 } 424 425 // Common implementations 426 pub fn check(&self) -> crate::Result<i32> { 427 let ierr = unsafe { bind_ceed::CeedOperatorCheckReady(self.ptr) }; 428 self.check_error(ierr) 429 } 430 431 pub fn name(&self, name: &str) -> crate::Result<i32> { 432 let name_c = CString::new(name).expect("CString::new failed"); 433 let ierr = unsafe { bind_ceed::CeedOperatorSetName(self.ptr, name_c.as_ptr()) }; 434 self.check_error(ierr) 435 } 436 437 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 438 let ierr = unsafe { 439 bind_ceed::CeedOperatorApply( 440 self.ptr, 441 input.ptr, 442 output.ptr, 443 bind_ceed::CEED_REQUEST_IMMEDIATE, 444 ) 445 }; 446 self.check_error(ierr) 447 } 448 449 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 450 let ierr = unsafe { 451 bind_ceed::CeedOperatorApplyAdd( 452 self.ptr, 453 input.ptr, 454 output.ptr, 455 bind_ceed::CEED_REQUEST_IMMEDIATE, 456 ) 457 }; 458 self.check_error(ierr) 459 } 460 461 pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 462 let ierr = unsafe { 463 bind_ceed::CeedOperatorLinearAssembleDiagonal( 464 self.ptr, 465 assembled.ptr, 466 bind_ceed::CEED_REQUEST_IMMEDIATE, 467 ) 468 }; 469 self.check_error(ierr) 470 } 471 472 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 473 let ierr = unsafe { 474 bind_ceed::CeedOperatorLinearAssembleAddDiagonal( 475 self.ptr, 476 assembled.ptr, 477 bind_ceed::CEED_REQUEST_IMMEDIATE, 478 ) 479 }; 480 self.check_error(ierr) 481 } 482 483 pub fn linear_assemble_point_block_diagonal( 484 &self, 485 assembled: &mut Vector, 486 ) -> crate::Result<i32> { 487 let ierr = unsafe { 488 bind_ceed::CeedOperatorLinearAssemblePointBlockDiagonal( 489 self.ptr, 490 assembled.ptr, 491 bind_ceed::CEED_REQUEST_IMMEDIATE, 492 ) 493 }; 494 self.check_error(ierr) 495 } 496 497 pub fn linear_assemble_add_point_block_diagonal( 498 &self, 499 assembled: &mut Vector, 500 ) -> crate::Result<i32> { 501 let ierr = unsafe { 502 bind_ceed::CeedOperatorLinearAssembleAddPointBlockDiagonal( 503 self.ptr, 504 assembled.ptr, 505 bind_ceed::CEED_REQUEST_IMMEDIATE, 506 ) 507 }; 508 self.check_error(ierr) 509 } 510 } 511 512 // ----------------------------------------------------------------------------- 513 // Operator 514 // ----------------------------------------------------------------------------- 515 impl<'a> Operator<'a> { 516 // Constructor 517 pub fn create<'b>( 518 ceed: &crate::Ceed, 519 qf: impl Into<QFunctionOpt<'b>>, 520 dqf: impl Into<QFunctionOpt<'b>>, 521 dqfT: impl Into<QFunctionOpt<'b>>, 522 ) -> crate::Result<Self> { 523 let mut ptr = std::ptr::null_mut(); 524 let ierr = unsafe { 525 bind_ceed::CeedOperatorCreate( 526 ceed.ptr, 527 qf.into().to_raw(), 528 dqf.into().to_raw(), 529 dqfT.into().to_raw(), 530 &mut ptr, 531 ) 532 }; 533 ceed.check_error(ierr)?; 534 Ok(Self { 535 op_core: OperatorCore { 536 ptr, 537 _lifeline: PhantomData, 538 }, 539 }) 540 } 541 542 fn from_raw(ptr: bind_ceed::CeedOperator) -> crate::Result<Self> { 543 Ok(Self { 544 op_core: OperatorCore { 545 ptr, 546 _lifeline: PhantomData, 547 }, 548 }) 549 } 550 551 /// Set name for Operator printing 552 /// 553 /// * 'name' - Name to set 554 /// 555 /// ``` 556 /// # use libceed::prelude::*; 557 /// # fn main() -> libceed::Result<()> { 558 /// # let ceed = libceed::Ceed::default_init(); 559 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 560 /// 561 /// // Operator field arguments 562 /// let ne = 3; 563 /// let q = 4 as usize; 564 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 565 /// for i in 0..ne { 566 /// ind[2 * i + 0] = i as i32; 567 /// ind[2 * i + 1] = (i + 1) as i32; 568 /// } 569 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 570 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 571 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 572 /// 573 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 574 /// 575 /// // Operator fields 576 /// let op = ceed 577 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 578 /// .name("mass")? 579 /// .field("dx", &r, &b, VectorOpt::Active)? 580 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 581 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 582 /// # Ok(()) 583 /// # } 584 /// ``` 585 #[allow(unused_mut)] 586 pub fn name(mut self, name: &str) -> crate::Result<Self> { 587 self.op_core.name(name)?; 588 Ok(self) 589 } 590 591 /// Apply Operator to a vector 592 /// 593 /// * `input` - Input Vector 594 /// * `output` - Output Vector 595 /// 596 /// ``` 597 /// # use libceed::prelude::*; 598 /// # fn main() -> libceed::Result<()> { 599 /// # let ceed = libceed::Ceed::default_init(); 600 /// let ne = 4; 601 /// let p = 3; 602 /// let q = 4; 603 /// let ndofs = p * ne - ne + 1; 604 /// 605 /// // Vectors 606 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 607 /// let mut qdata = ceed.vector(ne * q)?; 608 /// qdata.set_value(0.0); 609 /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?; 610 /// let mut v = ceed.vector(ndofs)?; 611 /// v.set_value(0.0); 612 /// 613 /// // Restrictions 614 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 615 /// for i in 0..ne { 616 /// indx[2 * i + 0] = i as i32; 617 /// indx[2 * i + 1] = (i + 1) as i32; 618 /// } 619 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 620 /// let mut indu: Vec<i32> = vec![0; p * ne]; 621 /// for i in 0..ne { 622 /// indu[p * i + 0] = i as i32; 623 /// indu[p * i + 1] = (i + 1) as i32; 624 /// indu[p * i + 2] = (i + 2) as i32; 625 /// } 626 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 627 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 628 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 629 /// 630 /// // Bases 631 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 632 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 633 /// 634 /// // Build quadrature data 635 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 636 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 637 /// .field("dx", &rx, &bx, VectorOpt::Active)? 638 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 639 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 640 /// .apply(&x, &mut qdata)?; 641 /// 642 /// // Mass operator 643 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 644 /// let op_mass = ceed 645 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 646 /// .field("u", &ru, &bu, VectorOpt::Active)? 647 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 648 /// .field("v", &ru, &bu, VectorOpt::Active)?; 649 /// 650 /// v.set_value(0.0); 651 /// op_mass.apply(&u, &mut v)?; 652 /// 653 /// // Check 654 /// let sum: Scalar = v.view()?.iter().sum(); 655 /// assert!( 656 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 657 /// "Incorrect interval length computed" 658 /// ); 659 /// # Ok(()) 660 /// # } 661 /// ``` 662 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 663 self.op_core.apply(input, output) 664 } 665 666 /// Apply Operator to a vector and add result to output vector 667 /// 668 /// * `input` - Input Vector 669 /// * `output` - Output Vector 670 /// 671 /// ``` 672 /// # use libceed::prelude::*; 673 /// # fn main() -> libceed::Result<()> { 674 /// # let ceed = libceed::Ceed::default_init(); 675 /// let ne = 4; 676 /// let p = 3; 677 /// let q = 4; 678 /// let ndofs = p * ne - ne + 1; 679 /// 680 /// // Vectors 681 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 682 /// let mut qdata = ceed.vector(ne * q)?; 683 /// qdata.set_value(0.0); 684 /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?; 685 /// let mut v = ceed.vector(ndofs)?; 686 /// 687 /// // Restrictions 688 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 689 /// for i in 0..ne { 690 /// indx[2 * i + 0] = i as i32; 691 /// indx[2 * i + 1] = (i + 1) as i32; 692 /// } 693 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 694 /// let mut indu: Vec<i32> = vec![0; p * ne]; 695 /// for i in 0..ne { 696 /// indu[p * i + 0] = i as i32; 697 /// indu[p * i + 1] = (i + 1) as i32; 698 /// indu[p * i + 2] = (i + 2) as i32; 699 /// } 700 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 701 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 702 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 703 /// 704 /// // Bases 705 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 706 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 707 /// 708 /// // Build quadrature data 709 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 710 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 711 /// .field("dx", &rx, &bx, VectorOpt::Active)? 712 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 713 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 714 /// .apply(&x, &mut qdata)?; 715 /// 716 /// // Mass operator 717 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 718 /// let op_mass = ceed 719 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 720 /// .field("u", &ru, &bu, VectorOpt::Active)? 721 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 722 /// .field("v", &ru, &bu, VectorOpt::Active)?; 723 /// 724 /// v.set_value(1.0); 725 /// op_mass.apply_add(&u, &mut v)?; 726 /// 727 /// // Check 728 /// let sum: Scalar = v.view()?.iter().sum(); 729 /// assert!( 730 /// (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON, 731 /// "Incorrect interval length computed and added" 732 /// ); 733 /// # Ok(()) 734 /// # } 735 /// ``` 736 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 737 self.op_core.apply_add(input, output) 738 } 739 740 /// Provide a field to a Operator for use by its QFunction 741 /// 742 /// * `fieldname` - Name of the field (to be matched with the name used by 743 /// the QFunction) 744 /// * `r` - ElemRestriction 745 /// * `b` - Basis in which the field resides or `BasisCollocated` if 746 /// collocated with quadrature points 747 /// * `v` - Vector to be used by Operator or `VectorActive` if field 748 /// is active or `VectorNone` if using `Weight` with the 749 /// QFunction 750 /// 751 /// 752 /// ``` 753 /// # use libceed::prelude::*; 754 /// # fn main() -> libceed::Result<()> { 755 /// # let ceed = libceed::Ceed::default_init(); 756 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 757 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 758 /// 759 /// // Operator field arguments 760 /// let ne = 3; 761 /// let q = 4; 762 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 763 /// for i in 0..ne { 764 /// ind[2 * i + 0] = i as i32; 765 /// ind[2 * i + 1] = (i + 1) as i32; 766 /// } 767 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 768 /// 769 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 770 /// 771 /// // Operator field 772 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 773 /// # Ok(()) 774 /// # } 775 /// ``` 776 #[allow(unused_mut)] 777 pub fn field<'b>( 778 mut self, 779 fieldname: &str, 780 r: impl Into<ElemRestrictionOpt<'b>>, 781 b: impl Into<BasisOpt<'b>>, 782 v: impl Into<VectorOpt<'b>>, 783 ) -> crate::Result<Self> { 784 let fieldname = CString::new(fieldname).expect("CString::new failed"); 785 let fieldname = fieldname.as_ptr() as *const i8; 786 let ierr = unsafe { 787 bind_ceed::CeedOperatorSetField( 788 self.op_core.ptr, 789 fieldname, 790 r.into().to_raw(), 791 b.into().to_raw(), 792 v.into().to_raw(), 793 ) 794 }; 795 self.op_core.check_error(ierr)?; 796 Ok(self) 797 } 798 799 /// Get a slice of Operator inputs 800 /// 801 /// ``` 802 /// # use libceed::prelude::*; 803 /// # fn main() -> libceed::Result<()> { 804 /// # let ceed = libceed::Ceed::default_init(); 805 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 806 /// 807 /// // Operator field arguments 808 /// let ne = 3; 809 /// let q = 4 as usize; 810 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 811 /// for i in 0..ne { 812 /// ind[2 * i + 0] = i as i32; 813 /// ind[2 * i + 1] = (i + 1) as i32; 814 /// } 815 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 816 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 817 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 818 /// 819 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 820 /// 821 /// // Operator fields 822 /// let op = ceed 823 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 824 /// .field("dx", &r, &b, VectorOpt::Active)? 825 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 826 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 827 /// 828 /// let inputs = op.inputs()?; 829 /// 830 /// assert_eq!(inputs.len(), 2, "Incorrect inputs array"); 831 /// # Ok(()) 832 /// # } 833 /// ``` 834 pub fn inputs(&self) -> crate::Result<&[crate::OperatorField]> { 835 // Get array of raw C pointers for inputs 836 let mut num_inputs = 0; 837 let mut inputs_ptr = std::ptr::null_mut(); 838 let ierr = unsafe { 839 bind_ceed::CeedOperatorGetFields( 840 self.op_core.ptr, 841 &mut num_inputs, 842 &mut inputs_ptr, 843 std::ptr::null_mut() as *mut bind_ceed::CeedInt, 844 std::ptr::null_mut() as *mut *mut bind_ceed::CeedOperatorField, 845 ) 846 }; 847 self.op_core.check_error(ierr)?; 848 // Convert raw C pointers to fixed length slice 849 let inputs_slice = unsafe { 850 std::slice::from_raw_parts( 851 inputs_ptr as *const crate::OperatorField, 852 num_inputs as usize, 853 ) 854 }; 855 Ok(inputs_slice) 856 } 857 858 /// Get a slice of Operator outputs 859 /// 860 /// ``` 861 /// # use libceed::prelude::*; 862 /// # fn main() -> libceed::Result<()> { 863 /// # let ceed = libceed::Ceed::default_init(); 864 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 865 /// 866 /// // Operator field arguments 867 /// let ne = 3; 868 /// let q = 4 as usize; 869 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 870 /// for i in 0..ne { 871 /// ind[2 * i + 0] = i as i32; 872 /// ind[2 * i + 1] = (i + 1) as i32; 873 /// } 874 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 875 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 876 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 877 /// 878 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 879 /// 880 /// // Operator fields 881 /// let op = ceed 882 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 883 /// .field("dx", &r, &b, VectorOpt::Active)? 884 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 885 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 886 /// 887 /// let outputs = op.outputs()?; 888 /// 889 /// assert_eq!(outputs.len(), 1, "Incorrect outputs array"); 890 /// # Ok(()) 891 /// # } 892 /// ``` 893 pub fn outputs(&self) -> crate::Result<&[crate::OperatorField]> { 894 // Get array of raw C pointers for outputs 895 let mut num_outputs = 0; 896 let mut outputs_ptr = std::ptr::null_mut(); 897 let ierr = unsafe { 898 bind_ceed::CeedOperatorGetFields( 899 self.op_core.ptr, 900 std::ptr::null_mut() as *mut bind_ceed::CeedInt, 901 std::ptr::null_mut() as *mut *mut bind_ceed::CeedOperatorField, 902 &mut num_outputs, 903 &mut outputs_ptr, 904 ) 905 }; 906 self.op_core.check_error(ierr)?; 907 // Convert raw C pointers to fixed length slice 908 let outputs_slice = unsafe { 909 std::slice::from_raw_parts( 910 outputs_ptr as *const crate::OperatorField, 911 num_outputs as usize, 912 ) 913 }; 914 Ok(outputs_slice) 915 } 916 917 /// Check if Operator is setup correctly 918 /// 919 /// ``` 920 /// # use libceed::prelude::*; 921 /// # fn main() -> libceed::Result<()> { 922 /// # let ceed = libceed::Ceed::default_init(); 923 /// let ne = 4; 924 /// let p = 3; 925 /// let q = 4; 926 /// let ndofs = p * ne - ne + 1; 927 /// 928 /// // Restrictions 929 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 930 /// for i in 0..ne { 931 /// indx[2 * i + 0] = i as i32; 932 /// indx[2 * i + 1] = (i + 1) as i32; 933 /// } 934 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 935 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 936 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 937 /// 938 /// // Bases 939 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 940 /// 941 /// // Build quadrature data 942 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 943 /// let op_build = ceed 944 /// .operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 945 /// .field("dx", &rx, &bx, VectorOpt::Active)? 946 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 947 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 948 /// 949 /// // Check 950 /// assert!(op_build.check().is_ok(), "Operator setup incorrectly"); 951 /// # Ok(()) 952 /// # } 953 /// ``` 954 pub fn check(self) -> crate::Result<Self> { 955 self.op_core.check()?; 956 Ok(self) 957 } 958 959 /// Get the number of elements associated with an Operator 960 /// 961 /// 962 /// ``` 963 /// # use libceed::prelude::*; 964 /// # fn main() -> libceed::Result<()> { 965 /// # let ceed = libceed::Ceed::default_init(); 966 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 967 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 968 /// 969 /// // Operator field arguments 970 /// let ne = 3; 971 /// let q = 4; 972 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 973 /// for i in 0..ne { 974 /// ind[2 * i + 0] = i as i32; 975 /// ind[2 * i + 1] = (i + 1) as i32; 976 /// } 977 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 978 /// 979 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 980 /// 981 /// // Operator field 982 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 983 /// 984 /// // Check number of elements 985 /// assert_eq!(op.num_elements(), ne, "Incorrect number of elements"); 986 /// # Ok(()) 987 /// # } 988 /// ``` 989 pub fn num_elements(&self) -> usize { 990 let mut nelem = 0; 991 unsafe { bind_ceed::CeedOperatorGetNumElements(self.op_core.ptr, &mut nelem) }; 992 usize::try_from(nelem).unwrap() 993 } 994 995 /// Get the number of quadrature points associated with each element of 996 /// an Operator 997 /// 998 /// 999 /// ``` 1000 /// # use libceed::prelude::*; 1001 /// # fn main() -> libceed::Result<()> { 1002 /// # let ceed = libceed::Ceed::default_init(); 1003 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 1004 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 1005 /// 1006 /// // Operator field arguments 1007 /// let ne = 3; 1008 /// let q = 4; 1009 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 1010 /// for i in 0..ne { 1011 /// ind[2 * i + 0] = i as i32; 1012 /// ind[2 * i + 1] = (i + 1) as i32; 1013 /// } 1014 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 1015 /// 1016 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1017 /// 1018 /// // Operator field 1019 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 1020 /// 1021 /// // Check number of quadrature points 1022 /// assert_eq!( 1023 /// op.num_quadrature_points(), 1024 /// q, 1025 /// "Incorrect number of quadrature points" 1026 /// ); 1027 /// # Ok(()) 1028 /// # } 1029 /// ``` 1030 pub fn num_quadrature_points(&self) -> usize { 1031 let mut Q = 0; 1032 unsafe { bind_ceed::CeedOperatorGetNumQuadraturePoints(self.op_core.ptr, &mut Q) }; 1033 usize::try_from(Q).unwrap() 1034 } 1035 1036 /// Assemble the diagonal of a square linear Operator 1037 /// 1038 /// This overwrites a Vector with the diagonal of a linear Operator. 1039 /// 1040 /// Note: Currently only non-composite Operators with a single field and 1041 /// composite Operators with single field sub-operators are supported. 1042 /// 1043 /// * `op` - Operator to assemble QFunction 1044 /// * `assembled` - Vector to store assembled Operator diagonal 1045 /// 1046 /// ``` 1047 /// # use libceed::prelude::*; 1048 /// # fn main() -> libceed::Result<()> { 1049 /// # let ceed = libceed::Ceed::default_init(); 1050 /// let ne = 4; 1051 /// let p = 3; 1052 /// let q = 4; 1053 /// let ndofs = p * ne - ne + 1; 1054 /// 1055 /// // Vectors 1056 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1057 /// let mut qdata = ceed.vector(ne * q)?; 1058 /// qdata.set_value(0.0); 1059 /// let mut u = ceed.vector(ndofs)?; 1060 /// u.set_value(1.0); 1061 /// let mut v = ceed.vector(ndofs)?; 1062 /// v.set_value(0.0); 1063 /// 1064 /// // Restrictions 1065 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1066 /// for i in 0..ne { 1067 /// indx[2 * i + 0] = i as i32; 1068 /// indx[2 * i + 1] = (i + 1) as i32; 1069 /// } 1070 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1071 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1072 /// for i in 0..ne { 1073 /// indu[p * i + 0] = (2 * i) as i32; 1074 /// indu[p * i + 1] = (2 * i + 1) as i32; 1075 /// indu[p * i + 2] = (2 * i + 2) as i32; 1076 /// } 1077 /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?; 1078 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1079 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1080 /// 1081 /// // Bases 1082 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1083 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 1084 /// 1085 /// // Build quadrature data 1086 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1087 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1088 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1089 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1090 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1091 /// .apply(&x, &mut qdata)?; 1092 /// 1093 /// // Mass operator 1094 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1095 /// let op_mass = ceed 1096 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1097 /// .field("u", &ru, &bu, VectorOpt::Active)? 1098 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1099 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1100 /// 1101 /// // Diagonal 1102 /// let mut diag = ceed.vector(ndofs)?; 1103 /// diag.set_value(0.0); 1104 /// op_mass.linear_assemble_diagonal(&mut diag)?; 1105 /// 1106 /// // Manual diagonal computation 1107 /// let mut true_diag = ceed.vector(ndofs)?; 1108 /// true_diag.set_value(0.0)?; 1109 /// for i in 0..ndofs { 1110 /// u.set_value(0.0); 1111 /// { 1112 /// let mut u_array = u.view_mut()?; 1113 /// u_array[i] = 1.; 1114 /// } 1115 /// 1116 /// op_mass.apply(&u, &mut v)?; 1117 /// 1118 /// { 1119 /// let v_array = v.view()?; 1120 /// let mut true_array = true_diag.view_mut()?; 1121 /// true_array[i] = v_array[i]; 1122 /// } 1123 /// } 1124 /// 1125 /// // Check 1126 /// diag.view()? 1127 /// .iter() 1128 /// .zip(true_diag.view()?.iter()) 1129 /// .for_each(|(computed, actual)| { 1130 /// assert!( 1131 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 1132 /// "Diagonal entry incorrect" 1133 /// ); 1134 /// }); 1135 /// # Ok(()) 1136 /// # } 1137 /// ``` 1138 pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 1139 self.op_core.linear_assemble_diagonal(assembled) 1140 } 1141 1142 /// Assemble the diagonal of a square linear Operator 1143 /// 1144 /// This sums into a Vector with the diagonal of a linear Operator. 1145 /// 1146 /// Note: Currently only non-composite Operators with a single field and 1147 /// composite Operators with single field sub-operators are supported. 1148 /// 1149 /// * `op` - Operator to assemble QFunction 1150 /// * `assembled` - Vector to store assembled Operator diagonal 1151 /// 1152 /// 1153 /// ``` 1154 /// # use libceed::prelude::*; 1155 /// # fn main() -> libceed::Result<()> { 1156 /// # let ceed = libceed::Ceed::default_init(); 1157 /// let ne = 4; 1158 /// let p = 3; 1159 /// let q = 4; 1160 /// let ndofs = p * ne - ne + 1; 1161 /// 1162 /// // Vectors 1163 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1164 /// let mut qdata = ceed.vector(ne * q)?; 1165 /// qdata.set_value(0.0); 1166 /// let mut u = ceed.vector(ndofs)?; 1167 /// u.set_value(1.0); 1168 /// let mut v = ceed.vector(ndofs)?; 1169 /// v.set_value(0.0); 1170 /// 1171 /// // Restrictions 1172 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1173 /// for i in 0..ne { 1174 /// indx[2 * i + 0] = i as i32; 1175 /// indx[2 * i + 1] = (i + 1) as i32; 1176 /// } 1177 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1178 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1179 /// for i in 0..ne { 1180 /// indu[p * i + 0] = (2 * i) as i32; 1181 /// indu[p * i + 1] = (2 * i + 1) as i32; 1182 /// indu[p * i + 2] = (2 * i + 2) as i32; 1183 /// } 1184 /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?; 1185 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1186 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1187 /// 1188 /// // Bases 1189 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1190 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 1191 /// 1192 /// // Build quadrature data 1193 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1194 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1195 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1196 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1197 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1198 /// .apply(&x, &mut qdata)?; 1199 /// 1200 /// // Mass operator 1201 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1202 /// let op_mass = ceed 1203 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1204 /// .field("u", &ru, &bu, VectorOpt::Active)? 1205 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1206 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1207 /// 1208 /// // Diagonal 1209 /// let mut diag = ceed.vector(ndofs)?; 1210 /// diag.set_value(1.0); 1211 /// op_mass.linear_assemble_add_diagonal(&mut diag)?; 1212 /// 1213 /// // Manual diagonal computation 1214 /// let mut true_diag = ceed.vector(ndofs)?; 1215 /// true_diag.set_value(0.0)?; 1216 /// for i in 0..ndofs { 1217 /// u.set_value(0.0); 1218 /// { 1219 /// let mut u_array = u.view_mut()?; 1220 /// u_array[i] = 1.; 1221 /// } 1222 /// 1223 /// op_mass.apply(&u, &mut v)?; 1224 /// 1225 /// { 1226 /// let v_array = v.view()?; 1227 /// let mut true_array = true_diag.view_mut()?; 1228 /// true_array[i] = v_array[i] + 1.0; 1229 /// } 1230 /// } 1231 /// 1232 /// // Check 1233 /// diag.view()? 1234 /// .iter() 1235 /// .zip(true_diag.view()?.iter()) 1236 /// .for_each(|(computed, actual)| { 1237 /// assert!( 1238 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 1239 /// "Diagonal entry incorrect" 1240 /// ); 1241 /// }); 1242 /// # Ok(()) 1243 /// # } 1244 /// ``` 1245 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 1246 self.op_core.linear_assemble_add_diagonal(assembled) 1247 } 1248 1249 /// Assemble the point block diagonal of a square linear Operator 1250 /// 1251 /// This overwrites a Vector with the point block diagonal of a linear 1252 /// Operator. 1253 /// 1254 /// Note: Currently only non-composite Operators with a single field and 1255 /// composite Operators with single field sub-operators are supported. 1256 /// 1257 /// * `op` - Operator to assemble QFunction 1258 /// * `assembled` - Vector to store assembled CeedOperator point block 1259 /// diagonal, provided in row-major form with an 1260 /// `ncomp * ncomp` block at each node. The dimensions of 1261 /// this vector are derived from the active vector for 1262 /// the CeedOperator. The array has shape 1263 /// `[nodes, component out, component in]`. 1264 /// 1265 /// ``` 1266 /// # use libceed::prelude::*; 1267 /// # fn main() -> libceed::Result<()> { 1268 /// # let ceed = libceed::Ceed::default_init(); 1269 /// let ne = 4; 1270 /// let p = 3; 1271 /// let q = 4; 1272 /// let ncomp = 2; 1273 /// let ndofs = p * ne - ne + 1; 1274 /// 1275 /// // Vectors 1276 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1277 /// let mut qdata = ceed.vector(ne * q)?; 1278 /// qdata.set_value(0.0); 1279 /// let mut u = ceed.vector(ncomp * ndofs)?; 1280 /// u.set_value(1.0); 1281 /// let mut v = ceed.vector(ncomp * ndofs)?; 1282 /// v.set_value(0.0); 1283 /// 1284 /// // Restrictions 1285 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1286 /// for i in 0..ne { 1287 /// indx[2 * i + 0] = i as i32; 1288 /// indx[2 * i + 1] = (i + 1) as i32; 1289 /// } 1290 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1291 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1292 /// for i in 0..ne { 1293 /// indu[p * i + 0] = (2 * i) as i32; 1294 /// indu[p * i + 1] = (2 * i + 1) as i32; 1295 /// indu[p * i + 2] = (2 * i + 2) as i32; 1296 /// } 1297 /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?; 1298 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1299 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1300 /// 1301 /// // Bases 1302 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1303 /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?; 1304 /// 1305 /// // Build quadrature data 1306 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1307 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1308 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1309 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1310 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1311 /// .apply(&x, &mut qdata)?; 1312 /// 1313 /// // Mass operator 1314 /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| { 1315 /// // Number of quadrature points 1316 /// let q = qdata.len(); 1317 /// 1318 /// // Iterate over quadrature points 1319 /// for i in 0..q { 1320 /// v[i + 0 * q] = u[i + 1 * q] * qdata[i]; 1321 /// v[i + 1 * q] = u[i + 0 * q] * qdata[i]; 1322 /// } 1323 /// 1324 /// // Return clean error code 1325 /// 0 1326 /// }; 1327 /// 1328 /// let qf_mass = ceed 1329 /// .q_function_interior(1, Box::new(mass_2_comp))? 1330 /// .input("u", 2, EvalMode::Interp)? 1331 /// .input("qdata", 1, EvalMode::None)? 1332 /// .output("v", 2, EvalMode::Interp)?; 1333 /// 1334 /// let op_mass = ceed 1335 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1336 /// .field("u", &ru, &bu, VectorOpt::Active)? 1337 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1338 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1339 /// 1340 /// // Diagonal 1341 /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?; 1342 /// diag.set_value(0.0); 1343 /// op_mass.linear_assemble_point_block_diagonal(&mut diag)?; 1344 /// 1345 /// // Manual diagonal computation 1346 /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?; 1347 /// true_diag.set_value(0.0)?; 1348 /// for i in 0..ndofs { 1349 /// for j in 0..ncomp { 1350 /// u.set_value(0.0); 1351 /// { 1352 /// let mut u_array = u.view_mut()?; 1353 /// u_array[i + j * ndofs] = 1.; 1354 /// } 1355 /// 1356 /// op_mass.apply(&u, &mut v)?; 1357 /// 1358 /// { 1359 /// let v_array = v.view()?; 1360 /// let mut true_array = true_diag.view_mut()?; 1361 /// for k in 0..ncomp { 1362 /// true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs]; 1363 /// } 1364 /// } 1365 /// } 1366 /// } 1367 /// 1368 /// // Check 1369 /// diag.view()? 1370 /// .iter() 1371 /// .zip(true_diag.view()?.iter()) 1372 /// .for_each(|(computed, actual)| { 1373 /// assert!( 1374 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 1375 /// "Diagonal entry incorrect" 1376 /// ); 1377 /// }); 1378 /// # Ok(()) 1379 /// # } 1380 /// ``` 1381 pub fn linear_assemble_point_block_diagonal( 1382 &self, 1383 assembled: &mut Vector, 1384 ) -> crate::Result<i32> { 1385 self.op_core.linear_assemble_point_block_diagonal(assembled) 1386 } 1387 1388 /// Assemble the point block diagonal of a square linear Operator 1389 /// 1390 /// This sums into a Vector with the point block diagonal of a linear 1391 /// Operator. 1392 /// 1393 /// Note: Currently only non-composite Operators with a single field and 1394 /// composite Operators with single field sub-operators are supported. 1395 /// 1396 /// * `op` - Operator to assemble QFunction 1397 /// * `assembled` - Vector to store assembled CeedOperator point block 1398 /// diagonal, provided in row-major form with an 1399 /// `ncomp * ncomp` block at each node. The dimensions of 1400 /// this vector are derived from the active vector for 1401 /// the CeedOperator. The array has shape 1402 /// `[nodes, component out, component in]`. 1403 /// 1404 /// ``` 1405 /// # use libceed::prelude::*; 1406 /// # fn main() -> libceed::Result<()> { 1407 /// # let ceed = libceed::Ceed::default_init(); 1408 /// let ne = 4; 1409 /// let p = 3; 1410 /// let q = 4; 1411 /// let ncomp = 2; 1412 /// let ndofs = p * ne - ne + 1; 1413 /// 1414 /// // Vectors 1415 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1416 /// let mut qdata = ceed.vector(ne * q)?; 1417 /// qdata.set_value(0.0); 1418 /// let mut u = ceed.vector(ncomp * ndofs)?; 1419 /// u.set_value(1.0); 1420 /// let mut v = ceed.vector(ncomp * ndofs)?; 1421 /// v.set_value(0.0); 1422 /// 1423 /// // Restrictions 1424 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1425 /// for i in 0..ne { 1426 /// indx[2 * i + 0] = i as i32; 1427 /// indx[2 * i + 1] = (i + 1) as i32; 1428 /// } 1429 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1430 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1431 /// for i in 0..ne { 1432 /// indu[p * i + 0] = (2 * i) as i32; 1433 /// indu[p * i + 1] = (2 * i + 1) as i32; 1434 /// indu[p * i + 2] = (2 * i + 2) as i32; 1435 /// } 1436 /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?; 1437 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1438 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1439 /// 1440 /// // Bases 1441 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1442 /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?; 1443 /// 1444 /// // Build quadrature data 1445 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1446 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1447 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1448 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1449 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1450 /// .apply(&x, &mut qdata)?; 1451 /// 1452 /// // Mass operator 1453 /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| { 1454 /// // Number of quadrature points 1455 /// let q = qdata.len(); 1456 /// 1457 /// // Iterate over quadrature points 1458 /// for i in 0..q { 1459 /// v[i + 0 * q] = u[i + 1 * q] * qdata[i]; 1460 /// v[i + 1 * q] = u[i + 0 * q] * qdata[i]; 1461 /// } 1462 /// 1463 /// // Return clean error code 1464 /// 0 1465 /// }; 1466 /// 1467 /// let qf_mass = ceed 1468 /// .q_function_interior(1, Box::new(mass_2_comp))? 1469 /// .input("u", 2, EvalMode::Interp)? 1470 /// .input("qdata", 1, EvalMode::None)? 1471 /// .output("v", 2, EvalMode::Interp)?; 1472 /// 1473 /// let op_mass = ceed 1474 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1475 /// .field("u", &ru, &bu, VectorOpt::Active)? 1476 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1477 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1478 /// 1479 /// // Diagonal 1480 /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?; 1481 /// diag.set_value(1.0); 1482 /// op_mass.linear_assemble_add_point_block_diagonal(&mut diag)?; 1483 /// 1484 /// // Manual diagonal computation 1485 /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?; 1486 /// true_diag.set_value(0.0)?; 1487 /// for i in 0..ndofs { 1488 /// for j in 0..ncomp { 1489 /// u.set_value(0.0); 1490 /// { 1491 /// let mut u_array = u.view_mut()?; 1492 /// u_array[i + j * ndofs] = 1.; 1493 /// } 1494 /// 1495 /// op_mass.apply(&u, &mut v)?; 1496 /// 1497 /// { 1498 /// let v_array = v.view()?; 1499 /// let mut true_array = true_diag.view_mut()?; 1500 /// for k in 0..ncomp { 1501 /// true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs]; 1502 /// } 1503 /// } 1504 /// } 1505 /// } 1506 /// 1507 /// // Check 1508 /// diag.view()? 1509 /// .iter() 1510 /// .zip(true_diag.view()?.iter()) 1511 /// .for_each(|(computed, actual)| { 1512 /// assert!( 1513 /// (*computed - 1.0 - *actual).abs() < 10.0 * libceed::EPSILON, 1514 /// "Diagonal entry incorrect" 1515 /// ); 1516 /// }); 1517 /// # Ok(()) 1518 /// # } 1519 /// ``` 1520 pub fn linear_assemble_add_point_block_diagonal( 1521 &self, 1522 assembled: &mut Vector, 1523 ) -> crate::Result<i32> { 1524 self.op_core 1525 .linear_assemble_add_point_block_diagonal(assembled) 1526 } 1527 1528 /// Create a multigrid coarse Operator and level transfer Operators for a 1529 /// given Operator, creating the prolongation basis from the fine and 1530 /// coarse grid interpolation 1531 /// 1532 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1533 /// * `rstr_coarse` - Coarse grid restriction 1534 /// * `basis_coarse` - Coarse grid active vector basis 1535 /// 1536 /// ``` 1537 /// # use libceed::prelude::*; 1538 /// # fn main() -> libceed::Result<()> { 1539 /// # let ceed = libceed::Ceed::default_init(); 1540 /// let ne = 15; 1541 /// let p_coarse = 3; 1542 /// let p_fine = 5; 1543 /// let q = 6; 1544 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1545 /// let ndofs_fine = p_fine * ne - ne + 1; 1546 /// 1547 /// // Vectors 1548 /// let x_array = (0..ne + 1) 1549 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1550 /// .collect::<Vec<Scalar>>(); 1551 /// let x = ceed.vector_from_slice(&x_array)?; 1552 /// let mut qdata = ceed.vector(ne * q)?; 1553 /// qdata.set_value(0.0); 1554 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1555 /// u_coarse.set_value(1.0); 1556 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1557 /// u_fine.set_value(1.0); 1558 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1559 /// v_coarse.set_value(0.0); 1560 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1561 /// v_fine.set_value(0.0); 1562 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1563 /// multiplicity.set_value(1.0); 1564 /// 1565 /// // Restrictions 1566 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1567 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1568 /// 1569 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1570 /// for i in 0..ne { 1571 /// indx[2 * i + 0] = i as i32; 1572 /// indx[2 * i + 1] = (i + 1) as i32; 1573 /// } 1574 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1575 /// 1576 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1577 /// for i in 0..ne { 1578 /// for j in 0..p_coarse { 1579 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1580 /// } 1581 /// } 1582 /// let ru_coarse = ceed.elem_restriction( 1583 /// ne, 1584 /// p_coarse, 1585 /// 1, 1586 /// 1, 1587 /// ndofs_coarse, 1588 /// MemType::Host, 1589 /// &indu_coarse, 1590 /// )?; 1591 /// 1592 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1593 /// for i in 0..ne { 1594 /// for j in 0..p_fine { 1595 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1596 /// } 1597 /// } 1598 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1599 /// 1600 /// // Bases 1601 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1602 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1603 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1604 /// 1605 /// // Build quadrature data 1606 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1607 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1608 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1609 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1610 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1611 /// .apply(&x, &mut qdata)?; 1612 /// 1613 /// // Mass operator 1614 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1615 /// let op_mass_fine = ceed 1616 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1617 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1618 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1619 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1620 /// 1621 /// // Multigrid setup 1622 /// let (op_mass_coarse, op_prolong, op_restrict) = 1623 /// op_mass_fine.create_multigrid_level(&multiplicity, &ru_coarse, &bu_coarse)?; 1624 /// 1625 /// // Coarse problem 1626 /// u_coarse.set_value(1.0); 1627 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1628 /// 1629 /// // Check 1630 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1631 /// assert!( 1632 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1633 /// "Incorrect interval length computed" 1634 /// ); 1635 /// 1636 /// // Prolong 1637 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1638 /// 1639 /// // Fine problem 1640 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1641 /// 1642 /// // Check 1643 /// let sum: Scalar = v_fine.view()?.iter().sum(); 1644 /// assert!( 1645 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1646 /// "Incorrect interval length computed" 1647 /// ); 1648 /// 1649 /// // Restrict 1650 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1651 /// 1652 /// // Check 1653 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1654 /// assert!( 1655 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1656 /// "Incorrect interval length computed" 1657 /// ); 1658 /// # Ok(()) 1659 /// # } 1660 /// ``` 1661 pub fn create_multigrid_level<'b>( 1662 &self, 1663 p_mult_fine: &Vector, 1664 rstr_coarse: &ElemRestriction, 1665 basis_coarse: &Basis, 1666 ) -> crate::Result<(Operator<'b>, Operator<'b>, Operator<'b>)> { 1667 let mut ptr_coarse = std::ptr::null_mut(); 1668 let mut ptr_prolong = std::ptr::null_mut(); 1669 let mut ptr_restrict = std::ptr::null_mut(); 1670 let ierr = unsafe { 1671 bind_ceed::CeedOperatorMultigridLevelCreate( 1672 self.op_core.ptr, 1673 p_mult_fine.ptr, 1674 rstr_coarse.ptr, 1675 basis_coarse.ptr, 1676 &mut ptr_coarse, 1677 &mut ptr_prolong, 1678 &mut ptr_restrict, 1679 ) 1680 }; 1681 self.op_core.check_error(ierr)?; 1682 let op_coarse = Operator::from_raw(ptr_coarse)?; 1683 let op_prolong = Operator::from_raw(ptr_prolong)?; 1684 let op_restrict = Operator::from_raw(ptr_restrict)?; 1685 Ok((op_coarse, op_prolong, op_restrict)) 1686 } 1687 1688 /// Create a multigrid coarse Operator and level transfer Operators for a 1689 /// given Operator with a tensor basis for the active basis 1690 /// 1691 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1692 /// * `rstr_coarse` - Coarse grid restriction 1693 /// * `basis_coarse` - Coarse grid active vector basis 1694 /// * `interp_c_to_f` - Matrix for coarse to fine 1695 /// 1696 /// ``` 1697 /// # use libceed::prelude::*; 1698 /// # fn main() -> libceed::Result<()> { 1699 /// # let ceed = libceed::Ceed::default_init(); 1700 /// let ne = 15; 1701 /// let p_coarse = 3; 1702 /// let p_fine = 5; 1703 /// let q = 6; 1704 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1705 /// let ndofs_fine = p_fine * ne - ne + 1; 1706 /// 1707 /// // Vectors 1708 /// let x_array = (0..ne + 1) 1709 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1710 /// .collect::<Vec<Scalar>>(); 1711 /// let x = ceed.vector_from_slice(&x_array)?; 1712 /// let mut qdata = ceed.vector(ne * q)?; 1713 /// qdata.set_value(0.0); 1714 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1715 /// u_coarse.set_value(1.0); 1716 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1717 /// u_fine.set_value(1.0); 1718 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1719 /// v_coarse.set_value(0.0); 1720 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1721 /// v_fine.set_value(0.0); 1722 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1723 /// multiplicity.set_value(1.0); 1724 /// 1725 /// // Restrictions 1726 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1727 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1728 /// 1729 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1730 /// for i in 0..ne { 1731 /// indx[2 * i + 0] = i as i32; 1732 /// indx[2 * i + 1] = (i + 1) as i32; 1733 /// } 1734 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1735 /// 1736 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1737 /// for i in 0..ne { 1738 /// for j in 0..p_coarse { 1739 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1740 /// } 1741 /// } 1742 /// let ru_coarse = ceed.elem_restriction( 1743 /// ne, 1744 /// p_coarse, 1745 /// 1, 1746 /// 1, 1747 /// ndofs_coarse, 1748 /// MemType::Host, 1749 /// &indu_coarse, 1750 /// )?; 1751 /// 1752 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1753 /// for i in 0..ne { 1754 /// for j in 0..p_fine { 1755 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1756 /// } 1757 /// } 1758 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1759 /// 1760 /// // Bases 1761 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1762 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1763 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1764 /// 1765 /// // Build quadrature data 1766 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1767 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1768 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1769 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1770 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1771 /// .apply(&x, &mut qdata)?; 1772 /// 1773 /// // Mass operator 1774 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1775 /// let op_mass_fine = ceed 1776 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1777 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1778 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1779 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1780 /// 1781 /// // Multigrid setup 1782 /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine]; 1783 /// { 1784 /// let mut coarse = ceed.vector(p_coarse)?; 1785 /// let mut fine = ceed.vector(p_fine)?; 1786 /// let basis_c_to_f = 1787 /// ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?; 1788 /// for i in 0..p_coarse { 1789 /// coarse.set_value(0.0); 1790 /// { 1791 /// let mut array = coarse.view_mut()?; 1792 /// array[i] = 1.; 1793 /// } 1794 /// basis_c_to_f.apply( 1795 /// 1, 1796 /// TransposeMode::NoTranspose, 1797 /// EvalMode::Interp, 1798 /// &coarse, 1799 /// &mut fine, 1800 /// )?; 1801 /// let array = fine.view()?; 1802 /// for j in 0..p_fine { 1803 /// interp_c_to_f[j * p_coarse + i] = array[j]; 1804 /// } 1805 /// } 1806 /// } 1807 /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_tensor_H1( 1808 /// &multiplicity, 1809 /// &ru_coarse, 1810 /// &bu_coarse, 1811 /// &interp_c_to_f, 1812 /// )?; 1813 /// 1814 /// // Coarse problem 1815 /// u_coarse.set_value(1.0); 1816 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1817 /// 1818 /// // Check 1819 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1820 /// assert!( 1821 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1822 /// "Incorrect interval length computed" 1823 /// ); 1824 /// 1825 /// // Prolong 1826 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1827 /// 1828 /// // Fine problem 1829 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1830 /// 1831 /// // Check 1832 /// let sum: Scalar = v_fine.view()?.iter().sum(); 1833 /// assert!( 1834 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1835 /// "Incorrect interval length computed" 1836 /// ); 1837 /// 1838 /// // Restrict 1839 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1840 /// 1841 /// // Check 1842 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1843 /// assert!( 1844 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1845 /// "Incorrect interval length computed" 1846 /// ); 1847 /// # Ok(()) 1848 /// # } 1849 /// ``` 1850 pub fn create_multigrid_level_tensor_H1<'b>( 1851 &self, 1852 p_mult_fine: &Vector, 1853 rstr_coarse: &ElemRestriction, 1854 basis_coarse: &Basis, 1855 interpCtoF: &Vec<Scalar>, 1856 ) -> crate::Result<(Operator<'b>, Operator<'b>, Operator<'b>)> { 1857 let mut ptr_coarse = std::ptr::null_mut(); 1858 let mut ptr_prolong = std::ptr::null_mut(); 1859 let mut ptr_restrict = std::ptr::null_mut(); 1860 let ierr = unsafe { 1861 bind_ceed::CeedOperatorMultigridLevelCreateTensorH1( 1862 self.op_core.ptr, 1863 p_mult_fine.ptr, 1864 rstr_coarse.ptr, 1865 basis_coarse.ptr, 1866 interpCtoF.as_ptr(), 1867 &mut ptr_coarse, 1868 &mut ptr_prolong, 1869 &mut ptr_restrict, 1870 ) 1871 }; 1872 self.op_core.check_error(ierr)?; 1873 let op_coarse = Operator::from_raw(ptr_coarse)?; 1874 let op_prolong = Operator::from_raw(ptr_prolong)?; 1875 let op_restrict = Operator::from_raw(ptr_restrict)?; 1876 Ok((op_coarse, op_prolong, op_restrict)) 1877 } 1878 1879 /// Create a multigrid coarse Operator and level transfer Operators for a 1880 /// given Operator with a non-tensor basis for the active basis 1881 /// 1882 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1883 /// * `rstr_coarse` - Coarse grid restriction 1884 /// * `basis_coarse` - Coarse grid active vector basis 1885 /// * `interp_c_to_f` - Matrix for coarse to fine 1886 /// 1887 /// ``` 1888 /// # use libceed::prelude::*; 1889 /// # fn main() -> libceed::Result<()> { 1890 /// # let ceed = libceed::Ceed::default_init(); 1891 /// let ne = 15; 1892 /// let p_coarse = 3; 1893 /// let p_fine = 5; 1894 /// let q = 6; 1895 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1896 /// let ndofs_fine = p_fine * ne - ne + 1; 1897 /// 1898 /// // Vectors 1899 /// let x_array = (0..ne + 1) 1900 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1901 /// .collect::<Vec<Scalar>>(); 1902 /// let x = ceed.vector_from_slice(&x_array)?; 1903 /// let mut qdata = ceed.vector(ne * q)?; 1904 /// qdata.set_value(0.0); 1905 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1906 /// u_coarse.set_value(1.0); 1907 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1908 /// u_fine.set_value(1.0); 1909 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1910 /// v_coarse.set_value(0.0); 1911 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1912 /// v_fine.set_value(0.0); 1913 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1914 /// multiplicity.set_value(1.0); 1915 /// 1916 /// // Restrictions 1917 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1918 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1919 /// 1920 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1921 /// for i in 0..ne { 1922 /// indx[2 * i + 0] = i as i32; 1923 /// indx[2 * i + 1] = (i + 1) as i32; 1924 /// } 1925 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1926 /// 1927 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1928 /// for i in 0..ne { 1929 /// for j in 0..p_coarse { 1930 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1931 /// } 1932 /// } 1933 /// let ru_coarse = ceed.elem_restriction( 1934 /// ne, 1935 /// p_coarse, 1936 /// 1, 1937 /// 1, 1938 /// ndofs_coarse, 1939 /// MemType::Host, 1940 /// &indu_coarse, 1941 /// )?; 1942 /// 1943 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1944 /// for i in 0..ne { 1945 /// for j in 0..p_fine { 1946 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1947 /// } 1948 /// } 1949 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1950 /// 1951 /// // Bases 1952 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1953 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1954 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1955 /// 1956 /// // Build quadrature data 1957 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1958 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1959 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1960 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1961 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1962 /// .apply(&x, &mut qdata)?; 1963 /// 1964 /// // Mass operator 1965 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1966 /// let op_mass_fine = ceed 1967 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1968 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1969 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1970 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1971 /// 1972 /// // Multigrid setup 1973 /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine]; 1974 /// { 1975 /// let mut coarse = ceed.vector(p_coarse)?; 1976 /// let mut fine = ceed.vector(p_fine)?; 1977 /// let basis_c_to_f = 1978 /// ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?; 1979 /// for i in 0..p_coarse { 1980 /// coarse.set_value(0.0); 1981 /// { 1982 /// let mut array = coarse.view_mut()?; 1983 /// array[i] = 1.; 1984 /// } 1985 /// basis_c_to_f.apply( 1986 /// 1, 1987 /// TransposeMode::NoTranspose, 1988 /// EvalMode::Interp, 1989 /// &coarse, 1990 /// &mut fine, 1991 /// )?; 1992 /// let array = fine.view()?; 1993 /// for j in 0..p_fine { 1994 /// interp_c_to_f[j * p_coarse + i] = array[j]; 1995 /// } 1996 /// } 1997 /// } 1998 /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_H1( 1999 /// &multiplicity, 2000 /// &ru_coarse, 2001 /// &bu_coarse, 2002 /// &interp_c_to_f, 2003 /// )?; 2004 /// 2005 /// // Coarse problem 2006 /// u_coarse.set_value(1.0); 2007 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 2008 /// 2009 /// // Check 2010 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 2011 /// assert!( 2012 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 2013 /// "Incorrect interval length computed" 2014 /// ); 2015 /// 2016 /// // Prolong 2017 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 2018 /// 2019 /// // Fine problem 2020 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 2021 /// 2022 /// // Check 2023 /// let sum: Scalar = v_fine.view()?.iter().sum(); 2024 /// assert!( 2025 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 2026 /// "Incorrect interval length computed" 2027 /// ); 2028 /// 2029 /// // Restrict 2030 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 2031 /// 2032 /// // Check 2033 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 2034 /// assert!( 2035 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 2036 /// "Incorrect interval length computed" 2037 /// ); 2038 /// # Ok(()) 2039 /// # } 2040 /// ``` 2041 pub fn create_multigrid_level_H1<'b>( 2042 &self, 2043 p_mult_fine: &Vector, 2044 rstr_coarse: &ElemRestriction, 2045 basis_coarse: &Basis, 2046 interpCtoF: &[Scalar], 2047 ) -> crate::Result<(Operator<'b>, Operator<'b>, Operator<'b>)> { 2048 let mut ptr_coarse = std::ptr::null_mut(); 2049 let mut ptr_prolong = std::ptr::null_mut(); 2050 let mut ptr_restrict = std::ptr::null_mut(); 2051 let ierr = unsafe { 2052 bind_ceed::CeedOperatorMultigridLevelCreateH1( 2053 self.op_core.ptr, 2054 p_mult_fine.ptr, 2055 rstr_coarse.ptr, 2056 basis_coarse.ptr, 2057 interpCtoF.as_ptr(), 2058 &mut ptr_coarse, 2059 &mut ptr_prolong, 2060 &mut ptr_restrict, 2061 ) 2062 }; 2063 self.op_core.check_error(ierr)?; 2064 let op_coarse = Operator::from_raw(ptr_coarse)?; 2065 let op_prolong = Operator::from_raw(ptr_prolong)?; 2066 let op_restrict = Operator::from_raw(ptr_restrict)?; 2067 Ok((op_coarse, op_prolong, op_restrict)) 2068 } 2069 } 2070 2071 // ----------------------------------------------------------------------------- 2072 // Composite Operator 2073 // ----------------------------------------------------------------------------- 2074 impl<'a> CompositeOperator<'a> { 2075 // Constructor 2076 pub fn create(ceed: &crate::Ceed) -> crate::Result<Self> { 2077 let mut ptr = std::ptr::null_mut(); 2078 let ierr = unsafe { bind_ceed::CeedCompositeOperatorCreate(ceed.ptr, &mut ptr) }; 2079 ceed.check_error(ierr)?; 2080 Ok(Self { 2081 op_core: OperatorCore { 2082 ptr, 2083 _lifeline: PhantomData, 2084 }, 2085 }) 2086 } 2087 2088 /// Set name for CompositeOperator printing 2089 /// 2090 /// * 'name' - Name to set 2091 /// 2092 /// ``` 2093 /// # use libceed::prelude::*; 2094 /// # fn main() -> libceed::Result<()> { 2095 /// # let ceed = libceed::Ceed::default_init(); 2096 /// 2097 /// // Sub operator field arguments 2098 /// let ne = 3; 2099 /// let q = 4 as usize; 2100 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 2101 /// for i in 0..ne { 2102 /// ind[2 * i + 0] = i as i32; 2103 /// ind[2 * i + 1] = (i + 1) as i32; 2104 /// } 2105 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 2106 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 2107 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 2108 /// 2109 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 2110 /// 2111 /// let qdata_mass = ceed.vector(q * ne)?; 2112 /// let qdata_diff = ceed.vector(q * ne)?; 2113 /// 2114 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2115 /// let op_mass = ceed 2116 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 2117 /// .name("Mass term")? 2118 /// .field("u", &r, &b, VectorOpt::Active)? 2119 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 2120 /// .field("v", &r, &b, VectorOpt::Active)?; 2121 /// 2122 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2123 /// let op_diff = ceed 2124 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 2125 /// .name("Poisson term")? 2126 /// .field("du", &r, &b, VectorOpt::Active)? 2127 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 2128 /// .field("dv", &r, &b, VectorOpt::Active)?; 2129 /// 2130 /// let op = ceed 2131 /// .composite_operator()? 2132 /// .name("Screened Poisson")? 2133 /// .sub_operator(&op_mass)? 2134 /// .sub_operator(&op_diff)?; 2135 /// # Ok(()) 2136 /// # } 2137 /// ``` 2138 #[allow(unused_mut)] 2139 pub fn name(mut self, name: &str) -> crate::Result<Self> { 2140 self.op_core.name(name)?; 2141 Ok(self) 2142 } 2143 2144 /// Apply Operator to a vector 2145 /// 2146 /// * `input` - Inpuht Vector 2147 /// * `output` - Output Vector 2148 /// 2149 /// ``` 2150 /// # use libceed::prelude::*; 2151 /// # fn main() -> libceed::Result<()> { 2152 /// # let ceed = libceed::Ceed::default_init(); 2153 /// let ne = 4; 2154 /// let p = 3; 2155 /// let q = 4; 2156 /// let ndofs = p * ne - ne + 1; 2157 /// 2158 /// // Vectors 2159 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 2160 /// let mut qdata_mass = ceed.vector(ne * q)?; 2161 /// qdata_mass.set_value(0.0); 2162 /// let mut qdata_diff = ceed.vector(ne * q)?; 2163 /// qdata_diff.set_value(0.0); 2164 /// let mut u = ceed.vector(ndofs)?; 2165 /// u.set_value(1.0); 2166 /// let mut v = ceed.vector(ndofs)?; 2167 /// v.set_value(0.0); 2168 /// 2169 /// // Restrictions 2170 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 2171 /// for i in 0..ne { 2172 /// indx[2 * i + 0] = i as i32; 2173 /// indx[2 * i + 1] = (i + 1) as i32; 2174 /// } 2175 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 2176 /// let mut indu: Vec<i32> = vec![0; p * ne]; 2177 /// for i in 0..ne { 2178 /// indu[p * i + 0] = i as i32; 2179 /// indu[p * i + 1] = (i + 1) as i32; 2180 /// indu[p * i + 2] = (i + 2) as i32; 2181 /// } 2182 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 2183 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 2184 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 2185 /// 2186 /// // Bases 2187 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 2188 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 2189 /// 2190 /// // Build quadrature data 2191 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 2192 /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 2193 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2194 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2195 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2196 /// .apply(&x, &mut qdata_mass)?; 2197 /// 2198 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 2199 /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 2200 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2201 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2202 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2203 /// .apply(&x, &mut qdata_diff)?; 2204 /// 2205 /// // Application operator 2206 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2207 /// let op_mass = ceed 2208 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 2209 /// .field("u", &ru, &bu, VectorOpt::Active)? 2210 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 2211 /// .field("v", &ru, &bu, VectorOpt::Active)?; 2212 /// 2213 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2214 /// let op_diff = ceed 2215 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 2216 /// .field("du", &ru, &bu, VectorOpt::Active)? 2217 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 2218 /// .field("dv", &ru, &bu, VectorOpt::Active)?; 2219 /// 2220 /// let op_composite = ceed 2221 /// .composite_operator()? 2222 /// .sub_operator(&op_mass)? 2223 /// .sub_operator(&op_diff)?; 2224 /// 2225 /// v.set_value(0.0); 2226 /// op_composite.apply(&u, &mut v)?; 2227 /// 2228 /// // Check 2229 /// let sum: Scalar = v.view()?.iter().sum(); 2230 /// assert!( 2231 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 2232 /// "Incorrect interval length computed" 2233 /// ); 2234 /// # Ok(()) 2235 /// # } 2236 /// ``` 2237 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 2238 self.op_core.apply(input, output) 2239 } 2240 2241 /// Apply Operator to a vector and add result to output vector 2242 /// 2243 /// * `input` - Input Vector 2244 /// * `output` - Output Vector 2245 /// 2246 /// ``` 2247 /// # use libceed::prelude::*; 2248 /// # fn main() -> libceed::Result<()> { 2249 /// # let ceed = libceed::Ceed::default_init(); 2250 /// let ne = 4; 2251 /// let p = 3; 2252 /// let q = 4; 2253 /// let ndofs = p * ne - ne + 1; 2254 /// 2255 /// // Vectors 2256 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 2257 /// let mut qdata_mass = ceed.vector(ne * q)?; 2258 /// qdata_mass.set_value(0.0); 2259 /// let mut qdata_diff = ceed.vector(ne * q)?; 2260 /// qdata_diff.set_value(0.0); 2261 /// let mut u = ceed.vector(ndofs)?; 2262 /// u.set_value(1.0); 2263 /// let mut v = ceed.vector(ndofs)?; 2264 /// v.set_value(0.0); 2265 /// 2266 /// // Restrictions 2267 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 2268 /// for i in 0..ne { 2269 /// indx[2 * i + 0] = i as i32; 2270 /// indx[2 * i + 1] = (i + 1) as i32; 2271 /// } 2272 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 2273 /// let mut indu: Vec<i32> = vec![0; p * ne]; 2274 /// for i in 0..ne { 2275 /// indu[p * i + 0] = i as i32; 2276 /// indu[p * i + 1] = (i + 1) as i32; 2277 /// indu[p * i + 2] = (i + 2) as i32; 2278 /// } 2279 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 2280 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 2281 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 2282 /// 2283 /// // Bases 2284 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 2285 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 2286 /// 2287 /// // Build quadrature data 2288 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 2289 /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 2290 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2291 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2292 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2293 /// .apply(&x, &mut qdata_mass)?; 2294 /// 2295 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 2296 /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 2297 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2298 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2299 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2300 /// .apply(&x, &mut qdata_diff)?; 2301 /// 2302 /// // Application operator 2303 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2304 /// let op_mass = ceed 2305 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 2306 /// .field("u", &ru, &bu, VectorOpt::Active)? 2307 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 2308 /// .field("v", &ru, &bu, VectorOpt::Active)?; 2309 /// 2310 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2311 /// let op_diff = ceed 2312 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 2313 /// .field("du", &ru, &bu, VectorOpt::Active)? 2314 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 2315 /// .field("dv", &ru, &bu, VectorOpt::Active)?; 2316 /// 2317 /// let op_composite = ceed 2318 /// .composite_operator()? 2319 /// .sub_operator(&op_mass)? 2320 /// .sub_operator(&op_diff)?; 2321 /// 2322 /// v.set_value(1.0); 2323 /// op_composite.apply_add(&u, &mut v)?; 2324 /// 2325 /// // Check 2326 /// let sum: Scalar = v.view()?.iter().sum(); 2327 /// assert!( 2328 /// (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON, 2329 /// "Incorrect interval length computed" 2330 /// ); 2331 /// # Ok(()) 2332 /// # } 2333 /// ``` 2334 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 2335 self.op_core.apply_add(input, output) 2336 } 2337 2338 /// Add a sub-Operator to a Composite Operator 2339 /// 2340 /// * `subop` - Sub-Operator 2341 /// 2342 /// ``` 2343 /// # use libceed::prelude::*; 2344 /// # fn main() -> libceed::Result<()> { 2345 /// # let ceed = libceed::Ceed::default_init(); 2346 /// let mut op = ceed.composite_operator()?; 2347 /// 2348 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2349 /// let op_mass = ceed.operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?; 2350 /// op = op.sub_operator(&op_mass)?; 2351 /// 2352 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2353 /// let op_diff = ceed.operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?; 2354 /// op = op.sub_operator(&op_diff)?; 2355 /// # Ok(()) 2356 /// # } 2357 /// ``` 2358 #[allow(unused_mut)] 2359 pub fn sub_operator(mut self, subop: &Operator) -> crate::Result<Self> { 2360 let ierr = 2361 unsafe { bind_ceed::CeedCompositeOperatorAddSub(self.op_core.ptr, subop.op_core.ptr) }; 2362 self.op_core.check_error(ierr)?; 2363 Ok(self) 2364 } 2365 2366 /// Check if CompositeOperator is setup correctly 2367 /// 2368 /// ``` 2369 /// # use libceed::prelude::*; 2370 /// # fn main() -> libceed::Result<()> { 2371 /// # let ceed = libceed::Ceed::default_init(); 2372 /// let ne = 4; 2373 /// let p = 3; 2374 /// let q = 4; 2375 /// let ndofs = p * ne - ne + 1; 2376 /// 2377 /// // Restrictions 2378 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 2379 /// for i in 0..ne { 2380 /// indx[2 * i + 0] = i as i32; 2381 /// indx[2 * i + 1] = (i + 1) as i32; 2382 /// } 2383 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 2384 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 2385 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 2386 /// 2387 /// // Bases 2388 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 2389 /// 2390 /// // Build quadrature data 2391 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 2392 /// let op_build_mass = ceed 2393 /// .operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 2394 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2395 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2396 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 2397 /// 2398 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 2399 /// let op_build_diff = ceed 2400 /// .operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 2401 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2402 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2403 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 2404 /// 2405 /// let op_build = ceed 2406 /// .composite_operator()? 2407 /// .sub_operator(&op_build_mass)? 2408 /// .sub_operator(&op_build_diff)?; 2409 /// 2410 /// // Check 2411 /// assert!(op_build.check().is_ok(), "Operator setup incorrectly"); 2412 /// # Ok(()) 2413 /// # } 2414 /// ``` 2415 pub fn check(self) -> crate::Result<Self> { 2416 self.op_core.check()?; 2417 Ok(self) 2418 } 2419 2420 /// Assemble the diagonal of a square linear Operator 2421 /// 2422 /// This overwrites a Vector with the diagonal of a linear Operator. 2423 /// 2424 /// Note: Currently only non-composite Operators with a single field and 2425 /// composite Operators with single field sub-operators are supported. 2426 /// 2427 /// * `op` - Operator to assemble QFunction 2428 /// * `assembled` - Vector to store assembled Operator diagonal 2429 pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 2430 self.op_core.linear_assemble_add_diagonal(assembled) 2431 } 2432 2433 /// Assemble the point block diagonal of a square linear Operator 2434 /// 2435 /// This overwrites a Vector with the point block diagonal of a linear 2436 /// Operator. 2437 /// 2438 /// Note: Currently only non-composite Operators with a single field and 2439 /// composite Operators with single field sub-operators are supported. 2440 /// 2441 /// * `op` - Operator to assemble QFunction 2442 /// * `assembled` - Vector to store assembled Operator diagonal 2443 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 2444 self.op_core.linear_assemble_add_diagonal(assembled) 2445 } 2446 2447 /// Assemble the diagonal of a square linear Operator 2448 /// 2449 /// This overwrites a Vector with the diagonal of a linear Operator. 2450 /// 2451 /// Note: Currently only non-composite Operators with a single field and 2452 /// composite Operators with single field sub-operators are supported. 2453 /// 2454 /// * `op` - Operator to assemble QFunction 2455 /// * `assembled` - Vector to store assembled CeedOperator point block 2456 /// diagonal, provided in row-major form with an 2457 /// `ncomp * ncomp` block at each node. The dimensions of 2458 /// this vector are derived from the active vector for 2459 /// the CeedOperator. The array has shape 2460 /// `[nodes, component out, component in]`. 2461 pub fn linear_assemble_point_block_diagonal( 2462 &self, 2463 assembled: &mut Vector, 2464 ) -> crate::Result<i32> { 2465 self.op_core.linear_assemble_add_diagonal(assembled) 2466 } 2467 2468 /// Assemble the diagonal of a square linear Operator 2469 /// 2470 /// This sums into a Vector with the diagonal of a linear Operator. 2471 /// 2472 /// Note: Currently only non-composite Operators with a single field and 2473 /// composite Operators with single field sub-operators are supported. 2474 /// 2475 /// * `op` - Operator to assemble QFunction 2476 /// * `assembled` - Vector to store assembled CeedOperator point block 2477 /// diagonal, provided in row-major form with an 2478 /// `ncomp * ncomp` block at each node. The dimensions of 2479 /// this vector are derived from the active vector for 2480 /// the CeedOperator. The array has shape 2481 /// `[nodes, component out, component in]`. 2482 pub fn linear_assemble_add_point_block_diagonal( 2483 &self, 2484 assembled: &mut Vector, 2485 ) -> crate::Result<i32> { 2486 self.op_core.linear_assemble_add_diagonal(assembled) 2487 } 2488 } 2489 2490 // ----------------------------------------------------------------------------- 2491