1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 //! A Ceed Operator defines the finite/spectral element operator associated to a 9 //! Ceed QFunction. A Ceed Operator connects Ceed ElemRestrictions, 10 //! Ceed Bases, and Ceed QFunctions. 11 12 use crate::prelude::*; 13 14 // ----------------------------------------------------------------------------- 15 // Operator Field context wrapper 16 // ----------------------------------------------------------------------------- 17 #[derive(Debug)] 18 pub struct OperatorField<'a> { 19 pub(crate) ptr: bind_ceed::CeedOperatorField, 20 _lifeline: PhantomData<&'a ()>, 21 } 22 23 // ----------------------------------------------------------------------------- 24 // Implementations 25 // ----------------------------------------------------------------------------- 26 impl<'a> OperatorField<'a> { 27 /// Get the name of an OperatorField 28 /// 29 /// ``` 30 /// # use libceed::prelude::*; 31 /// # fn main() -> libceed::Result<()> { 32 /// # let ceed = libceed::Ceed::default_init(); 33 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 34 /// 35 /// // Operator field arguments 36 /// let ne = 3; 37 /// let q = 4 as usize; 38 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 39 /// for i in 0..ne { 40 /// ind[2 * i + 0] = i as i32; 41 /// ind[2 * i + 1] = (i + 1) as i32; 42 /// } 43 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 44 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 45 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 46 /// 47 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 48 /// 49 /// // Operator fields 50 /// let op = ceed 51 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 52 /// .field("dx", &r, &b, VectorOpt::Active)? 53 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 54 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 55 /// 56 /// let inputs = op.inputs()?; 57 /// 58 /// assert_eq!(inputs[0].name(), "dx", "Incorrect input name"); 59 /// assert_eq!(inputs[1].name(), "weights", "Incorrect input name"); 60 /// # Ok(()) 61 /// # } 62 /// ``` 63 pub fn name(&self) -> &str { 64 let mut name_ptr: *mut std::os::raw::c_char = std::ptr::null_mut(); 65 unsafe { 66 bind_ceed::CeedOperatorFieldGetName(self.ptr, &mut name_ptr); 67 } 68 unsafe { CStr::from_ptr(name_ptr) }.to_str().unwrap() 69 } 70 71 /// Get the ElemRestriction of an OperatorField 72 /// 73 /// ``` 74 /// # use libceed::prelude::*; 75 /// # fn main() -> libceed::Result<()> { 76 /// # let ceed = libceed::Ceed::default_init(); 77 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 78 /// 79 /// // Operator field arguments 80 /// let ne = 3; 81 /// let q = 4 as usize; 82 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 83 /// for i in 0..ne { 84 /// ind[2 * i + 0] = i as i32; 85 /// ind[2 * i + 1] = (i + 1) as i32; 86 /// } 87 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 88 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 89 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 90 /// 91 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 92 /// 93 /// // Operator fields 94 /// let op = ceed 95 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 96 /// .field("dx", &r, &b, VectorOpt::Active)? 97 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 98 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 99 /// 100 /// let inputs = op.inputs()?; 101 /// 102 /// assert!( 103 /// inputs[0].elem_restriction().is_some(), 104 /// "Incorrect field ElemRestriction" 105 /// ); 106 /// if let ElemRestrictionOpt::Some(r) = inputs[0].elem_restriction() { 107 /// assert_eq!( 108 /// r.num_elements(), 109 /// ne, 110 /// "Incorrect field ElemRestriction number of elements" 111 /// ); 112 /// } 113 /// 114 /// assert!( 115 /// inputs[1].elem_restriction().is_none(), 116 /// "Incorrect field ElemRestriction" 117 /// ); 118 /// # Ok(()) 119 /// # } 120 /// ``` 121 pub fn elem_restriction(&self) -> ElemRestrictionOpt { 122 let mut ptr = std::ptr::null_mut(); 123 unsafe { 124 bind_ceed::CeedOperatorFieldGetElemRestriction(self.ptr, &mut ptr); 125 } 126 if ptr == unsafe { bind_ceed::CEED_ELEMRESTRICTION_NONE } { 127 ElemRestrictionOpt::None 128 } else { 129 let slice = unsafe { 130 std::slice::from_raw_parts( 131 &ptr as *const bind_ceed::CeedElemRestriction as *const crate::ElemRestriction, 132 1 as usize, 133 ) 134 }; 135 ElemRestrictionOpt::Some(&slice[0]) 136 } 137 } 138 139 /// Get the Basis of an OperatorField 140 /// 141 /// ``` 142 /// # use libceed::prelude::*; 143 /// # fn main() -> libceed::Result<()> { 144 /// # let ceed = libceed::Ceed::default_init(); 145 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 146 /// 147 /// // Operator field arguments 148 /// let ne = 3; 149 /// let q = 4 as usize; 150 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 151 /// for i in 0..ne { 152 /// ind[2 * i + 0] = i as i32; 153 /// ind[2 * i + 1] = (i + 1) as i32; 154 /// } 155 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 156 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 157 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 158 /// 159 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 160 /// 161 /// // Operator fields 162 /// let op = ceed 163 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 164 /// .field("dx", &r, &b, VectorOpt::Active)? 165 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 166 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 167 /// 168 /// let inputs = op.inputs()?; 169 /// 170 /// assert!(inputs[0].basis().is_some(), "Incorrect field Basis"); 171 /// if let BasisOpt::Some(b) = inputs[0].basis() { 172 /// assert_eq!( 173 /// b.num_quadrature_points(), 174 /// q, 175 /// "Incorrect field Basis number of quadrature points" 176 /// ); 177 /// } 178 /// assert!(inputs[1].basis().is_some(), "Incorrect field Basis"); 179 /// if let BasisOpt::Some(b) = inputs[0].basis() { 180 /// assert_eq!( 181 /// b.num_quadrature_points(), 182 /// q, 183 /// "Incorrect field Basis number of quadrature points" 184 /// ); 185 /// } 186 /// 187 /// let outputs = op.outputs()?; 188 /// 189 /// assert!(outputs[0].basis().is_collocated(), "Incorrect field Basis"); 190 /// # Ok(()) 191 /// # } 192 /// ``` 193 pub fn basis(&self) -> BasisOpt { 194 let mut ptr = std::ptr::null_mut(); 195 unsafe { 196 bind_ceed::CeedOperatorFieldGetBasis(self.ptr, &mut ptr); 197 } 198 if ptr == unsafe { bind_ceed::CEED_BASIS_COLLOCATED } { 199 BasisOpt::Collocated 200 } else { 201 let slice = unsafe { 202 std::slice::from_raw_parts( 203 &ptr as *const bind_ceed::CeedBasis as *const crate::Basis, 204 1 as usize, 205 ) 206 }; 207 BasisOpt::Some(&slice[0]) 208 } 209 } 210 211 /// Get the Vector of an OperatorField 212 /// 213 /// ``` 214 /// # use libceed::prelude::*; 215 /// # fn main() -> libceed::Result<()> { 216 /// # let ceed = libceed::Ceed::default_init(); 217 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 218 /// 219 /// // Operator field arguments 220 /// let ne = 3; 221 /// let q = 4 as usize; 222 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 223 /// for i in 0..ne { 224 /// ind[2 * i + 0] = i as i32; 225 /// ind[2 * i + 1] = (i + 1) as i32; 226 /// } 227 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 228 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 229 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 230 /// 231 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 232 /// 233 /// // Operator fields 234 /// let op = ceed 235 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 236 /// .field("dx", &r, &b, VectorOpt::Active)? 237 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 238 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 239 /// 240 /// let inputs = op.inputs()?; 241 /// 242 /// assert!(inputs[0].vector().is_active(), "Incorrect field Vector"); 243 /// assert!(inputs[1].vector().is_none(), "Incorrect field Vector"); 244 /// # Ok(()) 245 /// # } 246 /// ``` 247 pub fn vector(&self) -> VectorOpt { 248 let mut ptr = std::ptr::null_mut(); 249 unsafe { 250 bind_ceed::CeedOperatorFieldGetVector(self.ptr, &mut ptr); 251 } 252 if ptr == unsafe { bind_ceed::CEED_VECTOR_ACTIVE } { 253 VectorOpt::Active 254 } else if ptr == unsafe { bind_ceed::CEED_VECTOR_NONE } { 255 VectorOpt::None 256 } else { 257 let slice = unsafe { 258 std::slice::from_raw_parts( 259 &ptr as *const bind_ceed::CeedVector as *const crate::Vector, 260 1 as usize, 261 ) 262 }; 263 VectorOpt::Some(&slice[0]) 264 } 265 } 266 } 267 268 // ----------------------------------------------------------------------------- 269 // Operator context wrapper 270 // ----------------------------------------------------------------------------- 271 #[derive(Debug)] 272 pub(crate) struct OperatorCore<'a> { 273 ptr: bind_ceed::CeedOperator, 274 _lifeline: PhantomData<&'a ()>, 275 } 276 277 #[derive(Debug)] 278 pub struct Operator<'a> { 279 op_core: OperatorCore<'a>, 280 } 281 282 #[derive(Debug)] 283 pub struct CompositeOperator<'a> { 284 op_core: OperatorCore<'a>, 285 } 286 287 // ----------------------------------------------------------------------------- 288 // Destructor 289 // ----------------------------------------------------------------------------- 290 impl<'a> Drop for OperatorCore<'a> { 291 fn drop(&mut self) { 292 unsafe { 293 bind_ceed::CeedOperatorDestroy(&mut self.ptr); 294 } 295 } 296 } 297 298 // ----------------------------------------------------------------------------- 299 // Display 300 // ----------------------------------------------------------------------------- 301 impl<'a> fmt::Display for OperatorCore<'a> { 302 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 303 let mut ptr = std::ptr::null_mut(); 304 let mut sizeloc = crate::MAX_BUFFER_LENGTH; 305 let cstring = unsafe { 306 let file = bind_ceed::open_memstream(&mut ptr, &mut sizeloc); 307 bind_ceed::CeedOperatorView(self.ptr, file); 308 bind_ceed::fclose(file); 309 CString::from_raw(ptr) 310 }; 311 cstring.to_string_lossy().fmt(f) 312 } 313 } 314 315 /// View an Operator 316 /// 317 /// ``` 318 /// # use libceed::prelude::*; 319 /// # fn main() -> libceed::Result<()> { 320 /// # let ceed = libceed::Ceed::default_init(); 321 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 322 /// 323 /// // Operator field arguments 324 /// let ne = 3; 325 /// let q = 4 as usize; 326 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 327 /// for i in 0..ne { 328 /// ind[2 * i + 0] = i as i32; 329 /// ind[2 * i + 1] = (i + 1) as i32; 330 /// } 331 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 332 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 333 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 334 /// 335 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 336 /// 337 /// // Operator fields 338 /// let op = ceed 339 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 340 /// .field("dx", &r, &b, VectorOpt::Active)? 341 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 342 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 343 /// 344 /// println!("{}", op); 345 /// # Ok(()) 346 /// # } 347 /// ``` 348 impl<'a> fmt::Display for Operator<'a> { 349 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 350 self.op_core.fmt(f) 351 } 352 } 353 354 /// View a composite Operator 355 /// 356 /// ``` 357 /// # use libceed::prelude::*; 358 /// # fn main() -> libceed::Result<()> { 359 /// # let ceed = libceed::Ceed::default_init(); 360 /// 361 /// // Sub operator field arguments 362 /// let ne = 3; 363 /// let q = 4 as usize; 364 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 365 /// for i in 0..ne { 366 /// ind[2 * i + 0] = i as i32; 367 /// ind[2 * i + 1] = (i + 1) as i32; 368 /// } 369 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 370 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 371 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 372 /// 373 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 374 /// 375 /// let qdata_mass = ceed.vector(q * ne)?; 376 /// let qdata_diff = ceed.vector(q * ne)?; 377 /// 378 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 379 /// let op_mass = ceed 380 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 381 /// .field("u", &r, &b, VectorOpt::Active)? 382 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 383 /// .field("v", &r, &b, VectorOpt::Active)?; 384 /// 385 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 386 /// let op_diff = ceed 387 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 388 /// .field("du", &r, &b, VectorOpt::Active)? 389 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 390 /// .field("dv", &r, &b, VectorOpt::Active)?; 391 /// 392 /// let op = ceed 393 /// .composite_operator()? 394 /// .sub_operator(&op_mass)? 395 /// .sub_operator(&op_diff)?; 396 /// 397 /// println!("{}", op); 398 /// # Ok(()) 399 /// # } 400 /// ``` 401 impl<'a> fmt::Display for CompositeOperator<'a> { 402 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 403 self.op_core.fmt(f) 404 } 405 } 406 407 // ----------------------------------------------------------------------------- 408 // Core functionality 409 // ----------------------------------------------------------------------------- 410 impl<'a> OperatorCore<'a> { 411 // Error handling 412 #[doc(hidden)] 413 fn check_error(&self, ierr: i32) -> crate::Result<i32> { 414 let mut ptr = std::ptr::null_mut(); 415 unsafe { 416 bind_ceed::CeedOperatorGetCeed(self.ptr, &mut ptr); 417 } 418 crate::check_error(ptr, ierr) 419 } 420 421 // Common implementations 422 pub fn check(&self) -> crate::Result<i32> { 423 let ierr = unsafe { bind_ceed::CeedOperatorCheckReady(self.ptr) }; 424 self.check_error(ierr) 425 } 426 427 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 428 let ierr = unsafe { 429 bind_ceed::CeedOperatorApply( 430 self.ptr, 431 input.ptr, 432 output.ptr, 433 bind_ceed::CEED_REQUEST_IMMEDIATE, 434 ) 435 }; 436 self.check_error(ierr) 437 } 438 439 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 440 let ierr = unsafe { 441 bind_ceed::CeedOperatorApplyAdd( 442 self.ptr, 443 input.ptr, 444 output.ptr, 445 bind_ceed::CEED_REQUEST_IMMEDIATE, 446 ) 447 }; 448 self.check_error(ierr) 449 } 450 451 pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 452 let ierr = unsafe { 453 bind_ceed::CeedOperatorLinearAssembleDiagonal( 454 self.ptr, 455 assembled.ptr, 456 bind_ceed::CEED_REQUEST_IMMEDIATE, 457 ) 458 }; 459 self.check_error(ierr) 460 } 461 462 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 463 let ierr = unsafe { 464 bind_ceed::CeedOperatorLinearAssembleAddDiagonal( 465 self.ptr, 466 assembled.ptr, 467 bind_ceed::CEED_REQUEST_IMMEDIATE, 468 ) 469 }; 470 self.check_error(ierr) 471 } 472 473 pub fn linear_assemble_point_block_diagonal( 474 &self, 475 assembled: &mut Vector, 476 ) -> crate::Result<i32> { 477 let ierr = unsafe { 478 bind_ceed::CeedOperatorLinearAssemblePointBlockDiagonal( 479 self.ptr, 480 assembled.ptr, 481 bind_ceed::CEED_REQUEST_IMMEDIATE, 482 ) 483 }; 484 self.check_error(ierr) 485 } 486 487 pub fn linear_assemble_add_point_block_diagonal( 488 &self, 489 assembled: &mut Vector, 490 ) -> crate::Result<i32> { 491 let ierr = unsafe { 492 bind_ceed::CeedOperatorLinearAssembleAddPointBlockDiagonal( 493 self.ptr, 494 assembled.ptr, 495 bind_ceed::CEED_REQUEST_IMMEDIATE, 496 ) 497 }; 498 self.check_error(ierr) 499 } 500 } 501 502 // ----------------------------------------------------------------------------- 503 // Operator 504 // ----------------------------------------------------------------------------- 505 impl<'a> Operator<'a> { 506 // Constructor 507 pub fn create<'b>( 508 ceed: &crate::Ceed, 509 qf: impl Into<QFunctionOpt<'b>>, 510 dqf: impl Into<QFunctionOpt<'b>>, 511 dqfT: impl Into<QFunctionOpt<'b>>, 512 ) -> crate::Result<Self> { 513 let mut ptr = std::ptr::null_mut(); 514 let ierr = unsafe { 515 bind_ceed::CeedOperatorCreate( 516 ceed.ptr, 517 qf.into().to_raw(), 518 dqf.into().to_raw(), 519 dqfT.into().to_raw(), 520 &mut ptr, 521 ) 522 }; 523 ceed.check_error(ierr)?; 524 Ok(Self { 525 op_core: OperatorCore { 526 ptr, 527 _lifeline: PhantomData, 528 }, 529 }) 530 } 531 532 fn from_raw(ptr: bind_ceed::CeedOperator) -> crate::Result<Self> { 533 Ok(Self { 534 op_core: OperatorCore { 535 ptr, 536 _lifeline: PhantomData, 537 }, 538 }) 539 } 540 541 /// Apply Operator to a vector 542 /// 543 /// * `input` - Input Vector 544 /// * `output` - Output Vector 545 /// 546 /// ``` 547 /// # use libceed::prelude::*; 548 /// # fn main() -> libceed::Result<()> { 549 /// # let ceed = libceed::Ceed::default_init(); 550 /// let ne = 4; 551 /// let p = 3; 552 /// let q = 4; 553 /// let ndofs = p * ne - ne + 1; 554 /// 555 /// // Vectors 556 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 557 /// let mut qdata = ceed.vector(ne * q)?; 558 /// qdata.set_value(0.0); 559 /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?; 560 /// let mut v = ceed.vector(ndofs)?; 561 /// v.set_value(0.0); 562 /// 563 /// // Restrictions 564 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 565 /// for i in 0..ne { 566 /// indx[2 * i + 0] = i as i32; 567 /// indx[2 * i + 1] = (i + 1) as i32; 568 /// } 569 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 570 /// let mut indu: Vec<i32> = vec![0; p * ne]; 571 /// for i in 0..ne { 572 /// indu[p * i + 0] = i as i32; 573 /// indu[p * i + 1] = (i + 1) as i32; 574 /// indu[p * i + 2] = (i + 2) as i32; 575 /// } 576 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 577 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 578 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 579 /// 580 /// // Bases 581 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 582 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 583 /// 584 /// // Build quadrature data 585 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 586 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 587 /// .field("dx", &rx, &bx, VectorOpt::Active)? 588 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 589 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 590 /// .apply(&x, &mut qdata)?; 591 /// 592 /// // Mass operator 593 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 594 /// let op_mass = ceed 595 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 596 /// .field("u", &ru, &bu, VectorOpt::Active)? 597 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 598 /// .field("v", &ru, &bu, VectorOpt::Active)?; 599 /// 600 /// v.set_value(0.0); 601 /// op_mass.apply(&u, &mut v)?; 602 /// 603 /// // Check 604 /// let sum: Scalar = v.view()?.iter().sum(); 605 /// assert!( 606 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 607 /// "Incorrect interval length computed" 608 /// ); 609 /// # Ok(()) 610 /// # } 611 /// ``` 612 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 613 self.op_core.apply(input, output) 614 } 615 616 /// Apply Operator to a vector and add result to output vector 617 /// 618 /// * `input` - Input Vector 619 /// * `output` - Output Vector 620 /// 621 /// ``` 622 /// # use libceed::prelude::*; 623 /// # fn main() -> libceed::Result<()> { 624 /// # let ceed = libceed::Ceed::default_init(); 625 /// let ne = 4; 626 /// let p = 3; 627 /// let q = 4; 628 /// let ndofs = p * ne - ne + 1; 629 /// 630 /// // Vectors 631 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 632 /// let mut qdata = ceed.vector(ne * q)?; 633 /// qdata.set_value(0.0); 634 /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?; 635 /// let mut v = ceed.vector(ndofs)?; 636 /// 637 /// // Restrictions 638 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 639 /// for i in 0..ne { 640 /// indx[2 * i + 0] = i as i32; 641 /// indx[2 * i + 1] = (i + 1) as i32; 642 /// } 643 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 644 /// let mut indu: Vec<i32> = vec![0; p * ne]; 645 /// for i in 0..ne { 646 /// indu[p * i + 0] = i as i32; 647 /// indu[p * i + 1] = (i + 1) as i32; 648 /// indu[p * i + 2] = (i + 2) as i32; 649 /// } 650 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 651 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 652 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 653 /// 654 /// // Bases 655 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 656 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 657 /// 658 /// // Build quadrature data 659 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 660 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 661 /// .field("dx", &rx, &bx, VectorOpt::Active)? 662 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 663 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 664 /// .apply(&x, &mut qdata)?; 665 /// 666 /// // Mass operator 667 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 668 /// let op_mass = ceed 669 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 670 /// .field("u", &ru, &bu, VectorOpt::Active)? 671 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 672 /// .field("v", &ru, &bu, VectorOpt::Active)?; 673 /// 674 /// v.set_value(1.0); 675 /// op_mass.apply_add(&u, &mut v)?; 676 /// 677 /// // Check 678 /// let sum: Scalar = v.view()?.iter().sum(); 679 /// assert!( 680 /// (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON, 681 /// "Incorrect interval length computed and added" 682 /// ); 683 /// # Ok(()) 684 /// # } 685 /// ``` 686 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 687 self.op_core.apply_add(input, output) 688 } 689 690 /// Provide a field to a Operator for use by its QFunction 691 /// 692 /// * `fieldname` - Name of the field (to be matched with the name used by 693 /// the QFunction) 694 /// * `r` - ElemRestriction 695 /// * `b` - Basis in which the field resides or `BasisCollocated` if 696 /// collocated with quadrature points 697 /// * `v` - Vector to be used by Operator or `VectorActive` if field 698 /// is active or `VectorNone` if using `Weight` with the 699 /// QFunction 700 /// 701 /// 702 /// ``` 703 /// # use libceed::prelude::*; 704 /// # fn main() -> libceed::Result<()> { 705 /// # let ceed = libceed::Ceed::default_init(); 706 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 707 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 708 /// 709 /// // Operator field arguments 710 /// let ne = 3; 711 /// let q = 4; 712 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 713 /// for i in 0..ne { 714 /// ind[2 * i + 0] = i as i32; 715 /// ind[2 * i + 1] = (i + 1) as i32; 716 /// } 717 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 718 /// 719 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 720 /// 721 /// // Operator field 722 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 723 /// # Ok(()) 724 /// # } 725 /// ``` 726 #[allow(unused_mut)] 727 pub fn field<'b>( 728 mut self, 729 fieldname: &str, 730 r: impl Into<ElemRestrictionOpt<'b>>, 731 b: impl Into<BasisOpt<'b>>, 732 v: impl Into<VectorOpt<'b>>, 733 ) -> crate::Result<Self> { 734 let fieldname = CString::new(fieldname).expect("CString::new failed"); 735 let fieldname = fieldname.as_ptr() as *const i8; 736 let ierr = unsafe { 737 bind_ceed::CeedOperatorSetField( 738 self.op_core.ptr, 739 fieldname, 740 r.into().to_raw(), 741 b.into().to_raw(), 742 v.into().to_raw(), 743 ) 744 }; 745 self.op_core.check_error(ierr)?; 746 Ok(self) 747 } 748 749 /// Get a slice of Operator inputs 750 /// 751 /// ``` 752 /// # use libceed::prelude::*; 753 /// # fn main() -> libceed::Result<()> { 754 /// # let ceed = libceed::Ceed::default_init(); 755 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 756 /// 757 /// // Operator field arguments 758 /// let ne = 3; 759 /// let q = 4 as usize; 760 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 761 /// for i in 0..ne { 762 /// ind[2 * i + 0] = i as i32; 763 /// ind[2 * i + 1] = (i + 1) as i32; 764 /// } 765 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 766 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 767 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 768 /// 769 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 770 /// 771 /// // Operator fields 772 /// let op = ceed 773 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 774 /// .field("dx", &r, &b, VectorOpt::Active)? 775 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 776 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 777 /// 778 /// let inputs = op.inputs()?; 779 /// 780 /// assert_eq!(inputs.len(), 2, "Incorrect inputs array"); 781 /// # Ok(()) 782 /// # } 783 /// ``` 784 pub fn inputs(&self) -> crate::Result<&[crate::OperatorField]> { 785 // Get array of raw C pointers for inputs 786 let mut num_inputs = 0; 787 let mut inputs_ptr = std::ptr::null_mut(); 788 let ierr = unsafe { 789 bind_ceed::CeedOperatorGetFields( 790 self.op_core.ptr, 791 &mut num_inputs, 792 &mut inputs_ptr, 793 std::ptr::null_mut() as *mut bind_ceed::CeedInt, 794 std::ptr::null_mut() as *mut *mut bind_ceed::CeedOperatorField, 795 ) 796 }; 797 self.op_core.check_error(ierr)?; 798 // Convert raw C pointers to fixed length slice 799 let inputs_slice = unsafe { 800 std::slice::from_raw_parts( 801 inputs_ptr as *const crate::OperatorField, 802 num_inputs as usize, 803 ) 804 }; 805 Ok(inputs_slice) 806 } 807 808 /// Get a slice of Operator outputs 809 /// 810 /// ``` 811 /// # use libceed::prelude::*; 812 /// # fn main() -> libceed::Result<()> { 813 /// # let ceed = libceed::Ceed::default_init(); 814 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 815 /// 816 /// // Operator field arguments 817 /// let ne = 3; 818 /// let q = 4 as usize; 819 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 820 /// for i in 0..ne { 821 /// ind[2 * i + 0] = i as i32; 822 /// ind[2 * i + 1] = (i + 1) as i32; 823 /// } 824 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 825 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 826 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?; 827 /// 828 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 829 /// 830 /// // Operator fields 831 /// let op = ceed 832 /// .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)? 833 /// .field("dx", &r, &b, VectorOpt::Active)? 834 /// .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)? 835 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 836 /// 837 /// let outputs = op.outputs()?; 838 /// 839 /// assert_eq!(outputs.len(), 1, "Incorrect outputs array"); 840 /// # Ok(()) 841 /// # } 842 /// ``` 843 pub fn outputs(&self) -> crate::Result<&[crate::OperatorField]> { 844 // Get array of raw C pointers for outputs 845 let mut num_outputs = 0; 846 let mut outputs_ptr = std::ptr::null_mut(); 847 let ierr = unsafe { 848 bind_ceed::CeedOperatorGetFields( 849 self.op_core.ptr, 850 std::ptr::null_mut() as *mut bind_ceed::CeedInt, 851 std::ptr::null_mut() as *mut *mut bind_ceed::CeedOperatorField, 852 &mut num_outputs, 853 &mut outputs_ptr, 854 ) 855 }; 856 self.op_core.check_error(ierr)?; 857 // Convert raw C pointers to fixed length slice 858 let outputs_slice = unsafe { 859 std::slice::from_raw_parts( 860 outputs_ptr as *const crate::OperatorField, 861 num_outputs as usize, 862 ) 863 }; 864 Ok(outputs_slice) 865 } 866 867 /// Check if Operator is setup correctly 868 /// 869 /// ``` 870 /// # use libceed::prelude::*; 871 /// # fn main() -> libceed::Result<()> { 872 /// # let ceed = libceed::Ceed::default_init(); 873 /// let ne = 4; 874 /// let p = 3; 875 /// let q = 4; 876 /// let ndofs = p * ne - ne + 1; 877 /// 878 /// // Restrictions 879 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 880 /// for i in 0..ne { 881 /// indx[2 * i + 0] = i as i32; 882 /// indx[2 * i + 1] = (i + 1) as i32; 883 /// } 884 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 885 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 886 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 887 /// 888 /// // Bases 889 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 890 /// 891 /// // Build quadrature data 892 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 893 /// let op_build = ceed 894 /// .operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 895 /// .field("dx", &rx, &bx, VectorOpt::Active)? 896 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 897 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 898 /// 899 /// // Check 900 /// assert!(op_build.check().is_ok(), "Operator setup incorrectly"); 901 /// # Ok(()) 902 /// # } 903 /// ``` 904 pub fn check(self) -> crate::Result<Self> { 905 self.op_core.check()?; 906 Ok(self) 907 } 908 909 /// Get the number of elements associated with an Operator 910 /// 911 /// 912 /// ``` 913 /// # use libceed::prelude::*; 914 /// # fn main() -> libceed::Result<()> { 915 /// # let ceed = libceed::Ceed::default_init(); 916 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 917 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 918 /// 919 /// // Operator field arguments 920 /// let ne = 3; 921 /// let q = 4; 922 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 923 /// for i in 0..ne { 924 /// ind[2 * i + 0] = i as i32; 925 /// ind[2 * i + 1] = (i + 1) as i32; 926 /// } 927 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 928 /// 929 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 930 /// 931 /// // Operator field 932 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 933 /// 934 /// // Check number of elements 935 /// assert_eq!(op.num_elements(), ne, "Incorrect number of elements"); 936 /// # Ok(()) 937 /// # } 938 /// ``` 939 pub fn num_elements(&self) -> usize { 940 let mut nelem = 0; 941 unsafe { bind_ceed::CeedOperatorGetNumElements(self.op_core.ptr, &mut nelem) }; 942 usize::try_from(nelem).unwrap() 943 } 944 945 /// Get the number of quadrature points associated with each element of 946 /// an Operator 947 /// 948 /// 949 /// ``` 950 /// # use libceed::prelude::*; 951 /// # fn main() -> libceed::Result<()> { 952 /// # let ceed = libceed::Ceed::default_init(); 953 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?; 954 /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?; 955 /// 956 /// // Operator field arguments 957 /// let ne = 3; 958 /// let q = 4; 959 /// let mut ind: Vec<i32> = vec![0; 2 * ne]; 960 /// for i in 0..ne { 961 /// ind[2 * i + 0] = i as i32; 962 /// ind[2 * i + 1] = (i + 1) as i32; 963 /// } 964 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?; 965 /// 966 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 967 /// 968 /// // Operator field 969 /// op = op.field("dx", &r, &b, VectorOpt::Active)?; 970 /// 971 /// // Check number of quadrature points 972 /// assert_eq!( 973 /// op.num_quadrature_points(), 974 /// q, 975 /// "Incorrect number of quadrature points" 976 /// ); 977 /// # Ok(()) 978 /// # } 979 /// ``` 980 pub fn num_quadrature_points(&self) -> usize { 981 let mut Q = 0; 982 unsafe { bind_ceed::CeedOperatorGetNumQuadraturePoints(self.op_core.ptr, &mut Q) }; 983 usize::try_from(Q).unwrap() 984 } 985 986 /// Assemble the diagonal of a square linear Operator 987 /// 988 /// This overwrites a Vector with the diagonal of a linear Operator. 989 /// 990 /// Note: Currently only non-composite Operators with a single field and 991 /// composite Operators with single field sub-operators are supported. 992 /// 993 /// * `op` - Operator to assemble QFunction 994 /// * `assembled` - Vector to store assembled Operator diagonal 995 /// 996 /// ``` 997 /// # use libceed::prelude::*; 998 /// # fn main() -> libceed::Result<()> { 999 /// # let ceed = libceed::Ceed::default_init(); 1000 /// let ne = 4; 1001 /// let p = 3; 1002 /// let q = 4; 1003 /// let ndofs = p * ne - ne + 1; 1004 /// 1005 /// // Vectors 1006 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1007 /// let mut qdata = ceed.vector(ne * q)?; 1008 /// qdata.set_value(0.0); 1009 /// let mut u = ceed.vector(ndofs)?; 1010 /// u.set_value(1.0); 1011 /// let mut v = ceed.vector(ndofs)?; 1012 /// v.set_value(0.0); 1013 /// 1014 /// // Restrictions 1015 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1016 /// for i in 0..ne { 1017 /// indx[2 * i + 0] = i as i32; 1018 /// indx[2 * i + 1] = (i + 1) as i32; 1019 /// } 1020 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1021 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1022 /// for i in 0..ne { 1023 /// indu[p * i + 0] = (2 * i) as i32; 1024 /// indu[p * i + 1] = (2 * i + 1) as i32; 1025 /// indu[p * i + 2] = (2 * i + 2) as i32; 1026 /// } 1027 /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?; 1028 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1029 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1030 /// 1031 /// // Bases 1032 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1033 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 1034 /// 1035 /// // Build quadrature data 1036 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1037 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1038 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1039 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1040 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1041 /// .apply(&x, &mut qdata)?; 1042 /// 1043 /// // Mass operator 1044 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1045 /// let op_mass = ceed 1046 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1047 /// .field("u", &ru, &bu, VectorOpt::Active)? 1048 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1049 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1050 /// 1051 /// // Diagonal 1052 /// let mut diag = ceed.vector(ndofs)?; 1053 /// diag.set_value(0.0); 1054 /// op_mass.linear_assemble_diagonal(&mut diag)?; 1055 /// 1056 /// // Manual diagonal computation 1057 /// let mut true_diag = ceed.vector(ndofs)?; 1058 /// true_diag.set_value(0.0)?; 1059 /// for i in 0..ndofs { 1060 /// u.set_value(0.0); 1061 /// { 1062 /// let mut u_array = u.view_mut()?; 1063 /// u_array[i] = 1.; 1064 /// } 1065 /// 1066 /// op_mass.apply(&u, &mut v)?; 1067 /// 1068 /// { 1069 /// let v_array = v.view()?; 1070 /// let mut true_array = true_diag.view_mut()?; 1071 /// true_array[i] = v_array[i]; 1072 /// } 1073 /// } 1074 /// 1075 /// // Check 1076 /// diag.view()? 1077 /// .iter() 1078 /// .zip(true_diag.view()?.iter()) 1079 /// .for_each(|(computed, actual)| { 1080 /// assert!( 1081 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 1082 /// "Diagonal entry incorrect" 1083 /// ); 1084 /// }); 1085 /// # Ok(()) 1086 /// # } 1087 /// ``` 1088 pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 1089 self.op_core.linear_assemble_diagonal(assembled) 1090 } 1091 1092 /// Assemble the diagonal of a square linear Operator 1093 /// 1094 /// This sums into a Vector with the diagonal of a linear Operator. 1095 /// 1096 /// Note: Currently only non-composite Operators with a single field and 1097 /// composite Operators with single field sub-operators are supported. 1098 /// 1099 /// * `op` - Operator to assemble QFunction 1100 /// * `assembled` - Vector to store assembled Operator diagonal 1101 /// 1102 /// 1103 /// ``` 1104 /// # use libceed::prelude::*; 1105 /// # fn main() -> libceed::Result<()> { 1106 /// # let ceed = libceed::Ceed::default_init(); 1107 /// let ne = 4; 1108 /// let p = 3; 1109 /// let q = 4; 1110 /// let ndofs = p * ne - ne + 1; 1111 /// 1112 /// // Vectors 1113 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1114 /// let mut qdata = ceed.vector(ne * q)?; 1115 /// qdata.set_value(0.0); 1116 /// let mut u = ceed.vector(ndofs)?; 1117 /// u.set_value(1.0); 1118 /// let mut v = ceed.vector(ndofs)?; 1119 /// v.set_value(0.0); 1120 /// 1121 /// // Restrictions 1122 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1123 /// for i in 0..ne { 1124 /// indx[2 * i + 0] = i as i32; 1125 /// indx[2 * i + 1] = (i + 1) as i32; 1126 /// } 1127 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1128 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1129 /// for i in 0..ne { 1130 /// indu[p * i + 0] = (2 * i) as i32; 1131 /// indu[p * i + 1] = (2 * i + 1) as i32; 1132 /// indu[p * i + 2] = (2 * i + 2) as i32; 1133 /// } 1134 /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?; 1135 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1136 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1137 /// 1138 /// // Bases 1139 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1140 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 1141 /// 1142 /// // Build quadrature data 1143 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1144 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1145 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1146 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1147 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1148 /// .apply(&x, &mut qdata)?; 1149 /// 1150 /// // Mass operator 1151 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1152 /// let op_mass = ceed 1153 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1154 /// .field("u", &ru, &bu, VectorOpt::Active)? 1155 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1156 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1157 /// 1158 /// // Diagonal 1159 /// let mut diag = ceed.vector(ndofs)?; 1160 /// diag.set_value(1.0); 1161 /// op_mass.linear_assemble_add_diagonal(&mut diag)?; 1162 /// 1163 /// // Manual diagonal computation 1164 /// let mut true_diag = ceed.vector(ndofs)?; 1165 /// true_diag.set_value(0.0)?; 1166 /// for i in 0..ndofs { 1167 /// u.set_value(0.0); 1168 /// { 1169 /// let mut u_array = u.view_mut()?; 1170 /// u_array[i] = 1.; 1171 /// } 1172 /// 1173 /// op_mass.apply(&u, &mut v)?; 1174 /// 1175 /// { 1176 /// let v_array = v.view()?; 1177 /// let mut true_array = true_diag.view_mut()?; 1178 /// true_array[i] = v_array[i] + 1.0; 1179 /// } 1180 /// } 1181 /// 1182 /// // Check 1183 /// diag.view()? 1184 /// .iter() 1185 /// .zip(true_diag.view()?.iter()) 1186 /// .for_each(|(computed, actual)| { 1187 /// assert!( 1188 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 1189 /// "Diagonal entry incorrect" 1190 /// ); 1191 /// }); 1192 /// # Ok(()) 1193 /// # } 1194 /// ``` 1195 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 1196 self.op_core.linear_assemble_add_diagonal(assembled) 1197 } 1198 1199 /// Assemble the point block diagonal of a square linear Operator 1200 /// 1201 /// This overwrites a Vector with the point block diagonal of a linear 1202 /// Operator. 1203 /// 1204 /// Note: Currently only non-composite Operators with a single field and 1205 /// composite Operators with single field sub-operators are supported. 1206 /// 1207 /// * `op` - Operator to assemble QFunction 1208 /// * `assembled` - Vector to store assembled CeedOperator point block 1209 /// diagonal, provided in row-major form with an 1210 /// `ncomp * ncomp` block at each node. The dimensions of 1211 /// this vector are derived from the active vector for 1212 /// the CeedOperator. The array has shape 1213 /// `[nodes, component out, component in]`. 1214 /// 1215 /// ``` 1216 /// # use libceed::prelude::*; 1217 /// # fn main() -> libceed::Result<()> { 1218 /// # let ceed = libceed::Ceed::default_init(); 1219 /// let ne = 4; 1220 /// let p = 3; 1221 /// let q = 4; 1222 /// let ncomp = 2; 1223 /// let ndofs = p * ne - ne + 1; 1224 /// 1225 /// // Vectors 1226 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1227 /// let mut qdata = ceed.vector(ne * q)?; 1228 /// qdata.set_value(0.0); 1229 /// let mut u = ceed.vector(ncomp * ndofs)?; 1230 /// u.set_value(1.0); 1231 /// let mut v = ceed.vector(ncomp * ndofs)?; 1232 /// v.set_value(0.0); 1233 /// 1234 /// // Restrictions 1235 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1236 /// for i in 0..ne { 1237 /// indx[2 * i + 0] = i as i32; 1238 /// indx[2 * i + 1] = (i + 1) as i32; 1239 /// } 1240 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1241 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1242 /// for i in 0..ne { 1243 /// indu[p * i + 0] = (2 * i) as i32; 1244 /// indu[p * i + 1] = (2 * i + 1) as i32; 1245 /// indu[p * i + 2] = (2 * i + 2) as i32; 1246 /// } 1247 /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?; 1248 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1249 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1250 /// 1251 /// // Bases 1252 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1253 /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?; 1254 /// 1255 /// // Build quadrature data 1256 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1257 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1258 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1259 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1260 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1261 /// .apply(&x, &mut qdata)?; 1262 /// 1263 /// // Mass operator 1264 /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| { 1265 /// // Number of quadrature points 1266 /// let q = qdata.len(); 1267 /// 1268 /// // Iterate over quadrature points 1269 /// for i in 0..q { 1270 /// v[i + 0 * q] = u[i + 1 * q] * qdata[i]; 1271 /// v[i + 1 * q] = u[i + 0 * q] * qdata[i]; 1272 /// } 1273 /// 1274 /// // Return clean error code 1275 /// 0 1276 /// }; 1277 /// 1278 /// let qf_mass = ceed 1279 /// .q_function_interior(1, Box::new(mass_2_comp))? 1280 /// .input("u", 2, EvalMode::Interp)? 1281 /// .input("qdata", 1, EvalMode::None)? 1282 /// .output("v", 2, EvalMode::Interp)?; 1283 /// 1284 /// let op_mass = ceed 1285 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1286 /// .field("u", &ru, &bu, VectorOpt::Active)? 1287 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1288 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1289 /// 1290 /// // Diagonal 1291 /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?; 1292 /// diag.set_value(0.0); 1293 /// op_mass.linear_assemble_point_block_diagonal(&mut diag)?; 1294 /// 1295 /// // Manual diagonal computation 1296 /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?; 1297 /// true_diag.set_value(0.0)?; 1298 /// for i in 0..ndofs { 1299 /// for j in 0..ncomp { 1300 /// u.set_value(0.0); 1301 /// { 1302 /// let mut u_array = u.view_mut()?; 1303 /// u_array[i + j * ndofs] = 1.; 1304 /// } 1305 /// 1306 /// op_mass.apply(&u, &mut v)?; 1307 /// 1308 /// { 1309 /// let v_array = v.view()?; 1310 /// let mut true_array = true_diag.view_mut()?; 1311 /// for k in 0..ncomp { 1312 /// true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs]; 1313 /// } 1314 /// } 1315 /// } 1316 /// } 1317 /// 1318 /// // Check 1319 /// diag.view()? 1320 /// .iter() 1321 /// .zip(true_diag.view()?.iter()) 1322 /// .for_each(|(computed, actual)| { 1323 /// assert!( 1324 /// (*computed - *actual).abs() < 10.0 * libceed::EPSILON, 1325 /// "Diagonal entry incorrect" 1326 /// ); 1327 /// }); 1328 /// # Ok(()) 1329 /// # } 1330 /// ``` 1331 pub fn linear_assemble_point_block_diagonal( 1332 &self, 1333 assembled: &mut Vector, 1334 ) -> crate::Result<i32> { 1335 self.op_core.linear_assemble_point_block_diagonal(assembled) 1336 } 1337 1338 /// Assemble the point block diagonal of a square linear Operator 1339 /// 1340 /// This sums into a Vector with the point block diagonal of a linear 1341 /// Operator. 1342 /// 1343 /// Note: Currently only non-composite Operators with a single field and 1344 /// composite Operators with single field sub-operators are supported. 1345 /// 1346 /// * `op` - Operator to assemble QFunction 1347 /// * `assembled` - Vector to store assembled CeedOperator point block 1348 /// diagonal, provided in row-major form with an 1349 /// `ncomp * ncomp` block at each node. The dimensions of 1350 /// this vector are derived from the active vector for 1351 /// the CeedOperator. The array has shape 1352 /// `[nodes, component out, component in]`. 1353 /// 1354 /// ``` 1355 /// # use libceed::prelude::*; 1356 /// # fn main() -> libceed::Result<()> { 1357 /// # let ceed = libceed::Ceed::default_init(); 1358 /// let ne = 4; 1359 /// let p = 3; 1360 /// let q = 4; 1361 /// let ncomp = 2; 1362 /// let ndofs = p * ne - ne + 1; 1363 /// 1364 /// // Vectors 1365 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 1366 /// let mut qdata = ceed.vector(ne * q)?; 1367 /// qdata.set_value(0.0); 1368 /// let mut u = ceed.vector(ncomp * ndofs)?; 1369 /// u.set_value(1.0); 1370 /// let mut v = ceed.vector(ncomp * ndofs)?; 1371 /// v.set_value(0.0); 1372 /// 1373 /// // Restrictions 1374 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1375 /// for i in 0..ne { 1376 /// indx[2 * i + 0] = i as i32; 1377 /// indx[2 * i + 1] = (i + 1) as i32; 1378 /// } 1379 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1380 /// let mut indu: Vec<i32> = vec![0; p * ne]; 1381 /// for i in 0..ne { 1382 /// indu[p * i + 0] = (2 * i) as i32; 1383 /// indu[p * i + 1] = (2 * i + 1) as i32; 1384 /// indu[p * i + 2] = (2 * i + 2) as i32; 1385 /// } 1386 /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?; 1387 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1388 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1389 /// 1390 /// // Bases 1391 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1392 /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?; 1393 /// 1394 /// // Build quadrature data 1395 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1396 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1397 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1398 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1399 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1400 /// .apply(&x, &mut qdata)?; 1401 /// 1402 /// // Mass operator 1403 /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| { 1404 /// // Number of quadrature points 1405 /// let q = qdata.len(); 1406 /// 1407 /// // Iterate over quadrature points 1408 /// for i in 0..q { 1409 /// v[i + 0 * q] = u[i + 1 * q] * qdata[i]; 1410 /// v[i + 1 * q] = u[i + 0 * q] * qdata[i]; 1411 /// } 1412 /// 1413 /// // Return clean error code 1414 /// 0 1415 /// }; 1416 /// 1417 /// let qf_mass = ceed 1418 /// .q_function_interior(1, Box::new(mass_2_comp))? 1419 /// .input("u", 2, EvalMode::Interp)? 1420 /// .input("qdata", 1, EvalMode::None)? 1421 /// .output("v", 2, EvalMode::Interp)?; 1422 /// 1423 /// let op_mass = ceed 1424 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1425 /// .field("u", &ru, &bu, VectorOpt::Active)? 1426 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1427 /// .field("v", &ru, &bu, VectorOpt::Active)?; 1428 /// 1429 /// // Diagonal 1430 /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?; 1431 /// diag.set_value(1.0); 1432 /// op_mass.linear_assemble_add_point_block_diagonal(&mut diag)?; 1433 /// 1434 /// // Manual diagonal computation 1435 /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?; 1436 /// true_diag.set_value(0.0)?; 1437 /// for i in 0..ndofs { 1438 /// for j in 0..ncomp { 1439 /// u.set_value(0.0); 1440 /// { 1441 /// let mut u_array = u.view_mut()?; 1442 /// u_array[i + j * ndofs] = 1.; 1443 /// } 1444 /// 1445 /// op_mass.apply(&u, &mut v)?; 1446 /// 1447 /// { 1448 /// let v_array = v.view()?; 1449 /// let mut true_array = true_diag.view_mut()?; 1450 /// for k in 0..ncomp { 1451 /// true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs]; 1452 /// } 1453 /// } 1454 /// } 1455 /// } 1456 /// 1457 /// // Check 1458 /// diag.view()? 1459 /// .iter() 1460 /// .zip(true_diag.view()?.iter()) 1461 /// .for_each(|(computed, actual)| { 1462 /// assert!( 1463 /// (*computed - 1.0 - *actual).abs() < 10.0 * libceed::EPSILON, 1464 /// "Diagonal entry incorrect" 1465 /// ); 1466 /// }); 1467 /// # Ok(()) 1468 /// # } 1469 /// ``` 1470 pub fn linear_assemble_add_point_block_diagonal( 1471 &self, 1472 assembled: &mut Vector, 1473 ) -> crate::Result<i32> { 1474 self.op_core 1475 .linear_assemble_add_point_block_diagonal(assembled) 1476 } 1477 1478 /// Create a multigrid coarse Operator and level transfer Operators for a 1479 /// given Operator, creating the prolongation basis from the fine and 1480 /// coarse grid interpolation 1481 /// 1482 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1483 /// * `rstr_coarse` - Coarse grid restriction 1484 /// * `basis_coarse` - Coarse grid active vector basis 1485 /// 1486 /// ``` 1487 /// # use libceed::prelude::*; 1488 /// # fn main() -> libceed::Result<()> { 1489 /// # let ceed = libceed::Ceed::default_init(); 1490 /// let ne = 15; 1491 /// let p_coarse = 3; 1492 /// let p_fine = 5; 1493 /// let q = 6; 1494 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1495 /// let ndofs_fine = p_fine * ne - ne + 1; 1496 /// 1497 /// // Vectors 1498 /// let x_array = (0..ne + 1) 1499 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1500 /// .collect::<Vec<Scalar>>(); 1501 /// let x = ceed.vector_from_slice(&x_array)?; 1502 /// let mut qdata = ceed.vector(ne * q)?; 1503 /// qdata.set_value(0.0); 1504 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1505 /// u_coarse.set_value(1.0); 1506 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1507 /// u_fine.set_value(1.0); 1508 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1509 /// v_coarse.set_value(0.0); 1510 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1511 /// v_fine.set_value(0.0); 1512 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1513 /// multiplicity.set_value(1.0); 1514 /// 1515 /// // Restrictions 1516 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1517 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1518 /// 1519 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1520 /// for i in 0..ne { 1521 /// indx[2 * i + 0] = i as i32; 1522 /// indx[2 * i + 1] = (i + 1) as i32; 1523 /// } 1524 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1525 /// 1526 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1527 /// for i in 0..ne { 1528 /// for j in 0..p_coarse { 1529 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1530 /// } 1531 /// } 1532 /// let ru_coarse = ceed.elem_restriction( 1533 /// ne, 1534 /// p_coarse, 1535 /// 1, 1536 /// 1, 1537 /// ndofs_coarse, 1538 /// MemType::Host, 1539 /// &indu_coarse, 1540 /// )?; 1541 /// 1542 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1543 /// for i in 0..ne { 1544 /// for j in 0..p_fine { 1545 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1546 /// } 1547 /// } 1548 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1549 /// 1550 /// // Bases 1551 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1552 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1553 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1554 /// 1555 /// // Build quadrature data 1556 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1557 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1558 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1559 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1560 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1561 /// .apply(&x, &mut qdata)?; 1562 /// 1563 /// // Mass operator 1564 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1565 /// let op_mass_fine = ceed 1566 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1567 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1568 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1569 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1570 /// 1571 /// // Multigrid setup 1572 /// let (op_mass_coarse, op_prolong, op_restrict) = 1573 /// op_mass_fine.create_multigrid_level(&multiplicity, &ru_coarse, &bu_coarse)?; 1574 /// 1575 /// // Coarse problem 1576 /// u_coarse.set_value(1.0); 1577 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1578 /// 1579 /// // Check 1580 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1581 /// assert!( 1582 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1583 /// "Incorrect interval length computed" 1584 /// ); 1585 /// 1586 /// // Prolong 1587 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1588 /// 1589 /// // Fine problem 1590 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1591 /// 1592 /// // Check 1593 /// let sum: Scalar = v_fine.view()?.iter().sum(); 1594 /// assert!( 1595 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1596 /// "Incorrect interval length computed" 1597 /// ); 1598 /// 1599 /// // Restrict 1600 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1601 /// 1602 /// // Check 1603 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1604 /// assert!( 1605 /// (sum - 2.0).abs() < 50.0 * libceed::EPSILON, 1606 /// "Incorrect interval length computed" 1607 /// ); 1608 /// # Ok(()) 1609 /// # } 1610 /// ``` 1611 pub fn create_multigrid_level<'b>( 1612 &self, 1613 p_mult_fine: &Vector, 1614 rstr_coarse: &ElemRestriction, 1615 basis_coarse: &Basis, 1616 ) -> crate::Result<(Operator<'b>, Operator<'b>, Operator<'b>)> { 1617 let mut ptr_coarse = std::ptr::null_mut(); 1618 let mut ptr_prolong = std::ptr::null_mut(); 1619 let mut ptr_restrict = std::ptr::null_mut(); 1620 let ierr = unsafe { 1621 bind_ceed::CeedOperatorMultigridLevelCreate( 1622 self.op_core.ptr, 1623 p_mult_fine.ptr, 1624 rstr_coarse.ptr, 1625 basis_coarse.ptr, 1626 &mut ptr_coarse, 1627 &mut ptr_prolong, 1628 &mut ptr_restrict, 1629 ) 1630 }; 1631 self.op_core.check_error(ierr)?; 1632 let op_coarse = Operator::from_raw(ptr_coarse)?; 1633 let op_prolong = Operator::from_raw(ptr_prolong)?; 1634 let op_restrict = Operator::from_raw(ptr_restrict)?; 1635 Ok((op_coarse, op_prolong, op_restrict)) 1636 } 1637 1638 /// Create a multigrid coarse Operator and level transfer Operators for a 1639 /// given Operator with a tensor basis for the active basis 1640 /// 1641 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1642 /// * `rstr_coarse` - Coarse grid restriction 1643 /// * `basis_coarse` - Coarse grid active vector basis 1644 /// * `interp_c_to_f` - Matrix for coarse to fine 1645 /// 1646 /// ``` 1647 /// # use libceed::prelude::*; 1648 /// # fn main() -> libceed::Result<()> { 1649 /// # let ceed = libceed::Ceed::default_init(); 1650 /// let ne = 15; 1651 /// let p_coarse = 3; 1652 /// let p_fine = 5; 1653 /// let q = 6; 1654 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1655 /// let ndofs_fine = p_fine * ne - ne + 1; 1656 /// 1657 /// // Vectors 1658 /// let x_array = (0..ne + 1) 1659 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1660 /// .collect::<Vec<Scalar>>(); 1661 /// let x = ceed.vector_from_slice(&x_array)?; 1662 /// let mut qdata = ceed.vector(ne * q)?; 1663 /// qdata.set_value(0.0); 1664 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1665 /// u_coarse.set_value(1.0); 1666 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1667 /// u_fine.set_value(1.0); 1668 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1669 /// v_coarse.set_value(0.0); 1670 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1671 /// v_fine.set_value(0.0); 1672 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1673 /// multiplicity.set_value(1.0); 1674 /// 1675 /// // Restrictions 1676 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1677 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1678 /// 1679 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1680 /// for i in 0..ne { 1681 /// indx[2 * i + 0] = i as i32; 1682 /// indx[2 * i + 1] = (i + 1) as i32; 1683 /// } 1684 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1685 /// 1686 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1687 /// for i in 0..ne { 1688 /// for j in 0..p_coarse { 1689 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1690 /// } 1691 /// } 1692 /// let ru_coarse = ceed.elem_restriction( 1693 /// ne, 1694 /// p_coarse, 1695 /// 1, 1696 /// 1, 1697 /// ndofs_coarse, 1698 /// MemType::Host, 1699 /// &indu_coarse, 1700 /// )?; 1701 /// 1702 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1703 /// for i in 0..ne { 1704 /// for j in 0..p_fine { 1705 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1706 /// } 1707 /// } 1708 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1709 /// 1710 /// // Bases 1711 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1712 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1713 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1714 /// 1715 /// // Build quadrature data 1716 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1717 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1718 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1719 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1720 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1721 /// .apply(&x, &mut qdata)?; 1722 /// 1723 /// // Mass operator 1724 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1725 /// let op_mass_fine = ceed 1726 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1727 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1728 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1729 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1730 /// 1731 /// // Multigrid setup 1732 /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine]; 1733 /// { 1734 /// let mut coarse = ceed.vector(p_coarse)?; 1735 /// let mut fine = ceed.vector(p_fine)?; 1736 /// let basis_c_to_f = 1737 /// ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?; 1738 /// for i in 0..p_coarse { 1739 /// coarse.set_value(0.0); 1740 /// { 1741 /// let mut array = coarse.view_mut()?; 1742 /// array[i] = 1.; 1743 /// } 1744 /// basis_c_to_f.apply( 1745 /// 1, 1746 /// TransposeMode::NoTranspose, 1747 /// EvalMode::Interp, 1748 /// &coarse, 1749 /// &mut fine, 1750 /// )?; 1751 /// let array = fine.view()?; 1752 /// for j in 0..p_fine { 1753 /// interp_c_to_f[j * p_coarse + i] = array[j]; 1754 /// } 1755 /// } 1756 /// } 1757 /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_tensor_H1( 1758 /// &multiplicity, 1759 /// &ru_coarse, 1760 /// &bu_coarse, 1761 /// &interp_c_to_f, 1762 /// )?; 1763 /// 1764 /// // Coarse problem 1765 /// u_coarse.set_value(1.0); 1766 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1767 /// 1768 /// // Check 1769 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1770 /// assert!( 1771 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1772 /// "Incorrect interval length computed" 1773 /// ); 1774 /// 1775 /// // Prolong 1776 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1777 /// 1778 /// // Fine problem 1779 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1780 /// 1781 /// // Check 1782 /// let sum: Scalar = v_fine.view()?.iter().sum(); 1783 /// assert!( 1784 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1785 /// "Incorrect interval length computed" 1786 /// ); 1787 /// 1788 /// // Restrict 1789 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1790 /// 1791 /// // Check 1792 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1793 /// assert!( 1794 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1795 /// "Incorrect interval length computed" 1796 /// ); 1797 /// # Ok(()) 1798 /// # } 1799 /// ``` 1800 pub fn create_multigrid_level_tensor_H1<'b>( 1801 &self, 1802 p_mult_fine: &Vector, 1803 rstr_coarse: &ElemRestriction, 1804 basis_coarse: &Basis, 1805 interpCtoF: &Vec<Scalar>, 1806 ) -> crate::Result<(Operator<'b>, Operator<'b>, Operator<'b>)> { 1807 let mut ptr_coarse = std::ptr::null_mut(); 1808 let mut ptr_prolong = std::ptr::null_mut(); 1809 let mut ptr_restrict = std::ptr::null_mut(); 1810 let ierr = unsafe { 1811 bind_ceed::CeedOperatorMultigridLevelCreateTensorH1( 1812 self.op_core.ptr, 1813 p_mult_fine.ptr, 1814 rstr_coarse.ptr, 1815 basis_coarse.ptr, 1816 interpCtoF.as_ptr(), 1817 &mut ptr_coarse, 1818 &mut ptr_prolong, 1819 &mut ptr_restrict, 1820 ) 1821 }; 1822 self.op_core.check_error(ierr)?; 1823 let op_coarse = Operator::from_raw(ptr_coarse)?; 1824 let op_prolong = Operator::from_raw(ptr_prolong)?; 1825 let op_restrict = Operator::from_raw(ptr_restrict)?; 1826 Ok((op_coarse, op_prolong, op_restrict)) 1827 } 1828 1829 /// Create a multigrid coarse Operator and level transfer Operators for a 1830 /// given Operator with a non-tensor basis for the active basis 1831 /// 1832 /// * `p_mult_fine` - Lvector multiplicity in parallel gather/scatter 1833 /// * `rstr_coarse` - Coarse grid restriction 1834 /// * `basis_coarse` - Coarse grid active vector basis 1835 /// * `interp_c_to_f` - Matrix for coarse to fine 1836 /// 1837 /// ``` 1838 /// # use libceed::prelude::*; 1839 /// # fn main() -> libceed::Result<()> { 1840 /// # let ceed = libceed::Ceed::default_init(); 1841 /// let ne = 15; 1842 /// let p_coarse = 3; 1843 /// let p_fine = 5; 1844 /// let q = 6; 1845 /// let ndofs_coarse = p_coarse * ne - ne + 1; 1846 /// let ndofs_fine = p_fine * ne - ne + 1; 1847 /// 1848 /// // Vectors 1849 /// let x_array = (0..ne + 1) 1850 /// .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0) 1851 /// .collect::<Vec<Scalar>>(); 1852 /// let x = ceed.vector_from_slice(&x_array)?; 1853 /// let mut qdata = ceed.vector(ne * q)?; 1854 /// qdata.set_value(0.0); 1855 /// let mut u_coarse = ceed.vector(ndofs_coarse)?; 1856 /// u_coarse.set_value(1.0); 1857 /// let mut u_fine = ceed.vector(ndofs_fine)?; 1858 /// u_fine.set_value(1.0); 1859 /// let mut v_coarse = ceed.vector(ndofs_coarse)?; 1860 /// v_coarse.set_value(0.0); 1861 /// let mut v_fine = ceed.vector(ndofs_fine)?; 1862 /// v_fine.set_value(0.0); 1863 /// let mut multiplicity = ceed.vector(ndofs_fine)?; 1864 /// multiplicity.set_value(1.0); 1865 /// 1866 /// // Restrictions 1867 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 1868 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 1869 /// 1870 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 1871 /// for i in 0..ne { 1872 /// indx[2 * i + 0] = i as i32; 1873 /// indx[2 * i + 1] = (i + 1) as i32; 1874 /// } 1875 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 1876 /// 1877 /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne]; 1878 /// for i in 0..ne { 1879 /// for j in 0..p_coarse { 1880 /// indu_coarse[p_coarse * i + j] = (i + j) as i32; 1881 /// } 1882 /// } 1883 /// let ru_coarse = ceed.elem_restriction( 1884 /// ne, 1885 /// p_coarse, 1886 /// 1, 1887 /// 1, 1888 /// ndofs_coarse, 1889 /// MemType::Host, 1890 /// &indu_coarse, 1891 /// )?; 1892 /// 1893 /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne]; 1894 /// for i in 0..ne { 1895 /// for j in 0..p_fine { 1896 /// indu_fine[p_fine * i + j] = (i + j) as i32; 1897 /// } 1898 /// } 1899 /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?; 1900 /// 1901 /// // Bases 1902 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 1903 /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?; 1904 /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?; 1905 /// 1906 /// // Build quadrature data 1907 /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?; 1908 /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)? 1909 /// .field("dx", &rx, &bx, VectorOpt::Active)? 1910 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 1911 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 1912 /// .apply(&x, &mut qdata)?; 1913 /// 1914 /// // Mass operator 1915 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 1916 /// let op_mass_fine = ceed 1917 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 1918 /// .field("u", &ru_fine, &bu_fine, VectorOpt::Active)? 1919 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata)? 1920 /// .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?; 1921 /// 1922 /// // Multigrid setup 1923 /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine]; 1924 /// { 1925 /// let mut coarse = ceed.vector(p_coarse)?; 1926 /// let mut fine = ceed.vector(p_fine)?; 1927 /// let basis_c_to_f = 1928 /// ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?; 1929 /// for i in 0..p_coarse { 1930 /// coarse.set_value(0.0); 1931 /// { 1932 /// let mut array = coarse.view_mut()?; 1933 /// array[i] = 1.; 1934 /// } 1935 /// basis_c_to_f.apply( 1936 /// 1, 1937 /// TransposeMode::NoTranspose, 1938 /// EvalMode::Interp, 1939 /// &coarse, 1940 /// &mut fine, 1941 /// )?; 1942 /// let array = fine.view()?; 1943 /// for j in 0..p_fine { 1944 /// interp_c_to_f[j * p_coarse + i] = array[j]; 1945 /// } 1946 /// } 1947 /// } 1948 /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_H1( 1949 /// &multiplicity, 1950 /// &ru_coarse, 1951 /// &bu_coarse, 1952 /// &interp_c_to_f, 1953 /// )?; 1954 /// 1955 /// // Coarse problem 1956 /// u_coarse.set_value(1.0); 1957 /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?; 1958 /// 1959 /// // Check 1960 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1961 /// assert!( 1962 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1963 /// "Incorrect interval length computed" 1964 /// ); 1965 /// 1966 /// // Prolong 1967 /// op_prolong.apply(&u_coarse, &mut u_fine)?; 1968 /// 1969 /// // Fine problem 1970 /// op_mass_fine.apply(&u_fine, &mut v_fine)?; 1971 /// 1972 /// // Check 1973 /// let sum: Scalar = v_fine.view()?.iter().sum(); 1974 /// assert!( 1975 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1976 /// "Incorrect interval length computed" 1977 /// ); 1978 /// 1979 /// // Restrict 1980 /// op_restrict.apply(&v_fine, &mut v_coarse)?; 1981 /// 1982 /// // Check 1983 /// let sum: Scalar = v_coarse.view()?.iter().sum(); 1984 /// assert!( 1985 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 1986 /// "Incorrect interval length computed" 1987 /// ); 1988 /// # Ok(()) 1989 /// # } 1990 /// ``` 1991 pub fn create_multigrid_level_H1<'b>( 1992 &self, 1993 p_mult_fine: &Vector, 1994 rstr_coarse: &ElemRestriction, 1995 basis_coarse: &Basis, 1996 interpCtoF: &[Scalar], 1997 ) -> crate::Result<(Operator<'b>, Operator<'b>, Operator<'b>)> { 1998 let mut ptr_coarse = std::ptr::null_mut(); 1999 let mut ptr_prolong = std::ptr::null_mut(); 2000 let mut ptr_restrict = std::ptr::null_mut(); 2001 let ierr = unsafe { 2002 bind_ceed::CeedOperatorMultigridLevelCreateH1( 2003 self.op_core.ptr, 2004 p_mult_fine.ptr, 2005 rstr_coarse.ptr, 2006 basis_coarse.ptr, 2007 interpCtoF.as_ptr(), 2008 &mut ptr_coarse, 2009 &mut ptr_prolong, 2010 &mut ptr_restrict, 2011 ) 2012 }; 2013 self.op_core.check_error(ierr)?; 2014 let op_coarse = Operator::from_raw(ptr_coarse)?; 2015 let op_prolong = Operator::from_raw(ptr_prolong)?; 2016 let op_restrict = Operator::from_raw(ptr_restrict)?; 2017 Ok((op_coarse, op_prolong, op_restrict)) 2018 } 2019 } 2020 2021 // ----------------------------------------------------------------------------- 2022 // Composite Operator 2023 // ----------------------------------------------------------------------------- 2024 impl<'a> CompositeOperator<'a> { 2025 // Constructor 2026 pub fn create(ceed: &crate::Ceed) -> crate::Result<Self> { 2027 let mut ptr = std::ptr::null_mut(); 2028 let ierr = unsafe { bind_ceed::CeedCompositeOperatorCreate(ceed.ptr, &mut ptr) }; 2029 ceed.check_error(ierr)?; 2030 Ok(Self { 2031 op_core: OperatorCore { 2032 ptr, 2033 _lifeline: PhantomData, 2034 }, 2035 }) 2036 } 2037 2038 /// Apply Operator to a vector 2039 /// 2040 /// * `input` - Input Vector 2041 /// * `output` - Output Vector 2042 /// 2043 /// ``` 2044 /// # use libceed::prelude::*; 2045 /// # fn main() -> libceed::Result<()> { 2046 /// # let ceed = libceed::Ceed::default_init(); 2047 /// let ne = 4; 2048 /// let p = 3; 2049 /// let q = 4; 2050 /// let ndofs = p * ne - ne + 1; 2051 /// 2052 /// // Vectors 2053 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 2054 /// let mut qdata_mass = ceed.vector(ne * q)?; 2055 /// qdata_mass.set_value(0.0); 2056 /// let mut qdata_diff = ceed.vector(ne * q)?; 2057 /// qdata_diff.set_value(0.0); 2058 /// let mut u = ceed.vector(ndofs)?; 2059 /// u.set_value(1.0); 2060 /// let mut v = ceed.vector(ndofs)?; 2061 /// v.set_value(0.0); 2062 /// 2063 /// // Restrictions 2064 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 2065 /// for i in 0..ne { 2066 /// indx[2 * i + 0] = i as i32; 2067 /// indx[2 * i + 1] = (i + 1) as i32; 2068 /// } 2069 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 2070 /// let mut indu: Vec<i32> = vec![0; p * ne]; 2071 /// for i in 0..ne { 2072 /// indu[p * i + 0] = i as i32; 2073 /// indu[p * i + 1] = (i + 1) as i32; 2074 /// indu[p * i + 2] = (i + 2) as i32; 2075 /// } 2076 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 2077 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 2078 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 2079 /// 2080 /// // Bases 2081 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 2082 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 2083 /// 2084 /// // Build quadrature data 2085 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 2086 /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 2087 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2088 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2089 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2090 /// .apply(&x, &mut qdata_mass)?; 2091 /// 2092 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 2093 /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 2094 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2095 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2096 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2097 /// .apply(&x, &mut qdata_diff)?; 2098 /// 2099 /// // Application operator 2100 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2101 /// let op_mass = ceed 2102 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 2103 /// .field("u", &ru, &bu, VectorOpt::Active)? 2104 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 2105 /// .field("v", &ru, &bu, VectorOpt::Active)?; 2106 /// 2107 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2108 /// let op_diff = ceed 2109 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 2110 /// .field("du", &ru, &bu, VectorOpt::Active)? 2111 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 2112 /// .field("dv", &ru, &bu, VectorOpt::Active)?; 2113 /// 2114 /// let op_composite = ceed 2115 /// .composite_operator()? 2116 /// .sub_operator(&op_mass)? 2117 /// .sub_operator(&op_diff)?; 2118 /// 2119 /// v.set_value(0.0); 2120 /// op_composite.apply(&u, &mut v)?; 2121 /// 2122 /// // Check 2123 /// let sum: Scalar = v.view()?.iter().sum(); 2124 /// assert!( 2125 /// (sum - 2.0).abs() < 10.0 * libceed::EPSILON, 2126 /// "Incorrect interval length computed" 2127 /// ); 2128 /// # Ok(()) 2129 /// # } 2130 /// ``` 2131 pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 2132 self.op_core.apply(input, output) 2133 } 2134 2135 /// Apply Operator to a vector and add result to output vector 2136 /// 2137 /// * `input` - Input Vector 2138 /// * `output` - Output Vector 2139 /// 2140 /// ``` 2141 /// # use libceed::prelude::*; 2142 /// # fn main() -> libceed::Result<()> { 2143 /// # let ceed = libceed::Ceed::default_init(); 2144 /// let ne = 4; 2145 /// let p = 3; 2146 /// let q = 4; 2147 /// let ndofs = p * ne - ne + 1; 2148 /// 2149 /// // Vectors 2150 /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?; 2151 /// let mut qdata_mass = ceed.vector(ne * q)?; 2152 /// qdata_mass.set_value(0.0); 2153 /// let mut qdata_diff = ceed.vector(ne * q)?; 2154 /// qdata_diff.set_value(0.0); 2155 /// let mut u = ceed.vector(ndofs)?; 2156 /// u.set_value(1.0); 2157 /// let mut v = ceed.vector(ndofs)?; 2158 /// v.set_value(0.0); 2159 /// 2160 /// // Restrictions 2161 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 2162 /// for i in 0..ne { 2163 /// indx[2 * i + 0] = i as i32; 2164 /// indx[2 * i + 1] = (i + 1) as i32; 2165 /// } 2166 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 2167 /// let mut indu: Vec<i32> = vec![0; p * ne]; 2168 /// for i in 0..ne { 2169 /// indu[p * i + 0] = i as i32; 2170 /// indu[p * i + 1] = (i + 1) as i32; 2171 /// indu[p * i + 2] = (i + 2) as i32; 2172 /// } 2173 /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?; 2174 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 2175 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 2176 /// 2177 /// // Bases 2178 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 2179 /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?; 2180 /// 2181 /// // Build quadrature data 2182 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 2183 /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 2184 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2185 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2186 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2187 /// .apply(&x, &mut qdata_mass)?; 2188 /// 2189 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 2190 /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 2191 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2192 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2193 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)? 2194 /// .apply(&x, &mut qdata_diff)?; 2195 /// 2196 /// // Application operator 2197 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2198 /// let op_mass = ceed 2199 /// .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)? 2200 /// .field("u", &ru, &bu, VectorOpt::Active)? 2201 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)? 2202 /// .field("v", &ru, &bu, VectorOpt::Active)?; 2203 /// 2204 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2205 /// let op_diff = ceed 2206 /// .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)? 2207 /// .field("du", &ru, &bu, VectorOpt::Active)? 2208 /// .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)? 2209 /// .field("dv", &ru, &bu, VectorOpt::Active)?; 2210 /// 2211 /// let op_composite = ceed 2212 /// .composite_operator()? 2213 /// .sub_operator(&op_mass)? 2214 /// .sub_operator(&op_diff)?; 2215 /// 2216 /// v.set_value(1.0); 2217 /// op_composite.apply_add(&u, &mut v)?; 2218 /// 2219 /// // Check 2220 /// let sum: Scalar = v.view()?.iter().sum(); 2221 /// assert!( 2222 /// (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON, 2223 /// "Incorrect interval length computed" 2224 /// ); 2225 /// # Ok(()) 2226 /// # } 2227 /// ``` 2228 pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> { 2229 self.op_core.apply_add(input, output) 2230 } 2231 2232 /// Add a sub-Operator to a Composite Operator 2233 /// 2234 /// * `subop` - Sub-Operator 2235 /// 2236 /// ``` 2237 /// # use libceed::prelude::*; 2238 /// # fn main() -> libceed::Result<()> { 2239 /// # let ceed = libceed::Ceed::default_init(); 2240 /// let mut op = ceed.composite_operator()?; 2241 /// 2242 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?; 2243 /// let op_mass = ceed.operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?; 2244 /// op = op.sub_operator(&op_mass)?; 2245 /// 2246 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?; 2247 /// let op_diff = ceed.operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?; 2248 /// op = op.sub_operator(&op_diff)?; 2249 /// # Ok(()) 2250 /// # } 2251 /// ``` 2252 #[allow(unused_mut)] 2253 pub fn sub_operator(mut self, subop: &Operator) -> crate::Result<Self> { 2254 let ierr = 2255 unsafe { bind_ceed::CeedCompositeOperatorAddSub(self.op_core.ptr, subop.op_core.ptr) }; 2256 self.op_core.check_error(ierr)?; 2257 Ok(self) 2258 } 2259 2260 /// Check if CompositeOperator is setup correctly 2261 /// 2262 /// ``` 2263 /// # use libceed::prelude::*; 2264 /// # fn main() -> libceed::Result<()> { 2265 /// # let ceed = libceed::Ceed::default_init(); 2266 /// let ne = 4; 2267 /// let p = 3; 2268 /// let q = 4; 2269 /// let ndofs = p * ne - ne + 1; 2270 /// 2271 /// // Restrictions 2272 /// let mut indx: Vec<i32> = vec![0; 2 * ne]; 2273 /// for i in 0..ne { 2274 /// indx[2 * i + 0] = i as i32; 2275 /// indx[2 * i + 1] = (i + 1) as i32; 2276 /// } 2277 /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?; 2278 /// let strides: [i32; 3] = [1, q as i32, q as i32]; 2279 /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?; 2280 /// 2281 /// // Bases 2282 /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?; 2283 /// 2284 /// // Build quadrature data 2285 /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?; 2286 /// let op_build_mass = ceed 2287 /// .operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)? 2288 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2289 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2290 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 2291 /// 2292 /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?; 2293 /// let op_build_diff = ceed 2294 /// .operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)? 2295 /// .field("dx", &rx, &bx, VectorOpt::Active)? 2296 /// .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)? 2297 /// .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?; 2298 /// 2299 /// let op_build = ceed 2300 /// .composite_operator()? 2301 /// .sub_operator(&op_build_mass)? 2302 /// .sub_operator(&op_build_diff)?; 2303 /// 2304 /// // Check 2305 /// assert!(op_build.check().is_ok(), "Operator setup incorrectly"); 2306 /// # Ok(()) 2307 /// # } 2308 /// ``` 2309 pub fn check(self) -> crate::Result<Self> { 2310 self.op_core.check()?; 2311 Ok(self) 2312 } 2313 2314 /// Assemble the diagonal of a square linear Operator 2315 /// 2316 /// This overwrites a Vector with the diagonal of a linear Operator. 2317 /// 2318 /// Note: Currently only non-composite Operators with a single field and 2319 /// composite Operators with single field sub-operators are supported. 2320 /// 2321 /// * `op` - Operator to assemble QFunction 2322 /// * `assembled` - Vector to store assembled Operator diagonal 2323 pub fn linear_asssemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 2324 self.op_core.linear_assemble_add_diagonal(assembled) 2325 } 2326 2327 /// Assemble the point block diagonal of a square linear Operator 2328 /// 2329 /// This overwrites a Vector with the point block diagonal of a linear 2330 /// Operator. 2331 /// 2332 /// Note: Currently only non-composite Operators with a single field and 2333 /// composite Operators with single field sub-operators are supported. 2334 /// 2335 /// * `op` - Operator to assemble QFunction 2336 /// * `assembled` - Vector to store assembled Operator diagonal 2337 pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> { 2338 self.op_core.linear_assemble_add_diagonal(assembled) 2339 } 2340 2341 /// Assemble the diagonal of a square linear Operator 2342 /// 2343 /// This overwrites a Vector with the diagonal of a linear Operator. 2344 /// 2345 /// Note: Currently only non-composite Operators with a single field and 2346 /// composite Operators with single field sub-operators are supported. 2347 /// 2348 /// * `op` - Operator to assemble QFunction 2349 /// * `assembled` - Vector to store assembled CeedOperator point block 2350 /// diagonal, provided in row-major form with an 2351 /// `ncomp * ncomp` block at each node. The dimensions of 2352 /// this vector are derived from the active vector for 2353 /// the CeedOperator. The array has shape 2354 /// `[nodes, component out, component in]`. 2355 pub fn linear_assemble_point_block_diagonal( 2356 &self, 2357 assembled: &mut Vector, 2358 ) -> crate::Result<i32> { 2359 self.op_core.linear_assemble_add_diagonal(assembled) 2360 } 2361 2362 /// Assemble the diagonal of a square linear Operator 2363 /// 2364 /// This sums into a Vector with the diagonal of a linear Operator. 2365 /// 2366 /// Note: Currently only non-composite Operators with a single field and 2367 /// composite Operators with single field sub-operators are supported. 2368 /// 2369 /// * `op` - Operator to assemble QFunction 2370 /// * `assembled` - Vector to store assembled CeedOperator point block 2371 /// diagonal, provided in row-major form with an 2372 /// `ncomp * ncomp` block at each node. The dimensions of 2373 /// this vector are derived from the active vector for 2374 /// the CeedOperator. The array has shape 2375 /// `[nodes, component out, component in]`. 2376 pub fn linear_assemble_add_point_block_diagonal( 2377 &self, 2378 assembled: &mut Vector, 2379 ) -> crate::Result<i32> { 2380 self.op_core.linear_assemble_add_diagonal(assembled) 2381 } 2382 } 2383 2384 // ----------------------------------------------------------------------------- 2385