xref: /libCEED/rust/libceed/src/operator.rs (revision d3677ae8c8463ed424a8c5269c03811212785cd9)
1 // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
3 // reserved. See files LICENSE and NOTICE for details.
4 //
5 // This file is part of CEED, a collection of benchmarks, miniapps, software
6 // libraries and APIs for efficient high-order finite element and spectral
7 // element discretizations for exascale applications. For more information and
8 // source code availability see http://github.com/ceed.
9 //
10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
11 // a collaborative effort of two U.S. Department of Energy organizations (Office
12 // of Science and the National Nuclear Security Administration) responsible for
13 // the planning and preparation of a capable exascale ecosystem, including
14 // software, applications, hardware, advanced system engineering and early
15 // testbed platforms, in support of the nation's exascale computing imperative
16 
17 //! A Ceed Operator defines the finite/spectral element operator associated to a
18 //! Ceed QFunction. A Ceed Operator connects Ceed ElemRestrictions,
19 //! Ceed Bases, and Ceed QFunctions.
20 
21 use crate::prelude::*;
22 
23 // -----------------------------------------------------------------------------
24 // CeedOperator Field context wrapper
25 // -----------------------------------------------------------------------------
26 #[derive(Debug)]
27 pub struct OperatorField<'a> {
28     pub(crate) ptr: bind_ceed::CeedOperatorField,
29     _lifeline: PhantomData<&'a ()>,
30 }
31 
32 // -----------------------------------------------------------------------------
33 // Implementations
34 // -----------------------------------------------------------------------------
35 impl<'a> OperatorField<'a> {
36     /// Get the name of an OperatorField
37     ///
38     /// ```
39     /// # use libceed::prelude::*;
40     /// # fn main() -> libceed::Result<()> {
41     /// # let ceed = libceed::Ceed::default_init();
42     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
43     ///
44     /// // Operator field arguments
45     /// let ne = 3;
46     /// let q = 4 as usize;
47     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
48     /// for i in 0..ne {
49     ///     ind[2 * i + 0] = i as i32;
50     ///     ind[2 * i + 1] = (i + 1) as i32;
51     /// }
52     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
53     /// let strides: [i32; 3] = [1, q as i32, q as i32];
54     /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?;
55     ///
56     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
57     ///
58     /// // Operator fields
59     /// let op = ceed
60     ///     .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?
61     ///     .field("dx", &r, &b, VectorOpt::Active)?
62     ///     .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)?
63     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?;
64     ///
65     /// let inputs = op.inputs()?;
66     ///
67     /// assert_eq!(inputs[0].name(), "dx", "Incorrect input name");
68     /// assert_eq!(inputs[1].name(), "weights", "Incorrect input name");
69     /// # Ok(())
70     /// # }
71     /// ```
72     pub fn name(&self) -> &str {
73         let mut name_ptr: *mut std::os::raw::c_char = std::ptr::null_mut();
74         unsafe {
75             bind_ceed::CeedOperatorFieldGetName(self.ptr, &mut name_ptr);
76         }
77         unsafe { CStr::from_ptr(name_ptr) }.to_str().unwrap()
78     }
79 
80     /// Get the ElemRestriction of an OperatorField
81     ///
82     /// ```
83     /// # use libceed::prelude::*;
84     /// # fn main() -> libceed::Result<()> {
85     /// # let ceed = libceed::Ceed::default_init();
86     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
87     ///
88     /// // Operator field arguments
89     /// let ne = 3;
90     /// let q = 4 as usize;
91     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
92     /// for i in 0..ne {
93     ///     ind[2 * i + 0] = i as i32;
94     ///     ind[2 * i + 1] = (i + 1) as i32;
95     /// }
96     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
97     /// let strides: [i32; 3] = [1, q as i32, q as i32];
98     /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?;
99     ///
100     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
101     ///
102     /// // Operator fields
103     /// let op = ceed
104     ///     .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?
105     ///     .field("dx", &r, &b, VectorOpt::Active)?
106     ///     .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)?
107     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?;
108     ///
109     /// let inputs = op.inputs()?;
110     ///
111     /// assert!(
112     ///     inputs[0].elem_restriction().is_some(),
113     ///     "Incorrect field ElemRestriction"
114     /// );
115     /// if let ElemRestrictionOpt::Some(r) = inputs[0].elem_restriction() {
116     ///     assert_eq!(
117     ///         r.num_elements(),
118     ///         ne,
119     ///         "Incorrect field ElemRestriction number of elements"
120     ///     );
121     /// }
122     ///
123     /// assert!(
124     ///     inputs[1].elem_restriction().is_none(),
125     ///     "Incorrect field ElemRestriction"
126     /// );
127     /// # Ok(())
128     /// # }
129     /// ```
130     pub fn elem_restriction(&self) -> ElemRestrictionOpt {
131         let mut ptr = std::ptr::null_mut();
132         unsafe {
133             bind_ceed::CeedOperatorFieldGetElemRestriction(self.ptr, &mut ptr);
134         }
135         if ptr == unsafe { bind_ceed::CEED_ELEMRESTRICTION_NONE } {
136             ElemRestrictionOpt::None
137         } else {
138             let slice = unsafe {
139                 std::slice::from_raw_parts(
140                     &ptr as *const bind_ceed::CeedElemRestriction as *const crate::ElemRestriction,
141                     1 as usize,
142                 )
143             };
144             ElemRestrictionOpt::Some(&slice[0])
145         }
146     }
147 
148     /// Get the Basis of an OperatorField
149     ///
150     /// ```
151     /// # use libceed::prelude::*;
152     /// # fn main() -> libceed::Result<()> {
153     /// # let ceed = libceed::Ceed::default_init();
154     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
155     ///
156     /// // Operator field arguments
157     /// let ne = 3;
158     /// let q = 4 as usize;
159     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
160     /// for i in 0..ne {
161     ///     ind[2 * i + 0] = i as i32;
162     ///     ind[2 * i + 1] = (i + 1) as i32;
163     /// }
164     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
165     /// let strides: [i32; 3] = [1, q as i32, q as i32];
166     /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?;
167     ///
168     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
169     ///
170     /// // Operator fields
171     /// let op = ceed
172     ///     .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?
173     ///     .field("dx", &r, &b, VectorOpt::Active)?
174     ///     .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)?
175     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?;
176     ///
177     /// let inputs = op.inputs()?;
178     ///
179     /// assert!(inputs[0].basis().is_some(), "Incorrect field Basis");
180     /// if let BasisOpt::Some(b) = inputs[0].basis() {
181     ///     assert_eq!(
182     ///         b.num_quadrature_points(),
183     ///         q,
184     ///         "Incorrect field Basis number of quadrature points"
185     ///     );
186     /// }
187     /// assert!(inputs[1].basis().is_some(), "Incorrect field Basis");
188     /// if let BasisOpt::Some(b) = inputs[0].basis() {
189     ///     assert_eq!(
190     ///         b.num_quadrature_points(),
191     ///         q,
192     ///         "Incorrect field Basis number of quadrature points"
193     ///     );
194     /// }
195     ///
196     /// let outputs = op.outputs()?;
197     ///
198     /// assert!(outputs[0].basis().is_collocated(), "Incorrect field Basis");
199     /// # Ok(())
200     /// # }
201     /// ```
202     pub fn basis(&self) -> BasisOpt {
203         let mut ptr = std::ptr::null_mut();
204         unsafe {
205             bind_ceed::CeedOperatorFieldGetBasis(self.ptr, &mut ptr);
206         }
207         if ptr == unsafe { bind_ceed::CEED_BASIS_COLLOCATED } {
208             BasisOpt::Collocated
209         } else {
210             let slice = unsafe {
211                 std::slice::from_raw_parts(
212                     &ptr as *const bind_ceed::CeedBasis as *const crate::Basis,
213                     1 as usize,
214                 )
215             };
216             BasisOpt::Some(&slice[0])
217         }
218     }
219 
220     /// Get the Vector of an OperatorField
221     ///
222     /// ```
223     /// # use libceed::prelude::*;
224     /// # fn main() -> libceed::Result<()> {
225     /// # let ceed = libceed::Ceed::default_init();
226     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
227     ///
228     /// // Operator field arguments
229     /// let ne = 3;
230     /// let q = 4 as usize;
231     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
232     /// for i in 0..ne {
233     ///     ind[2 * i + 0] = i as i32;
234     ///     ind[2 * i + 1] = (i + 1) as i32;
235     /// }
236     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
237     /// let strides: [i32; 3] = [1, q as i32, q as i32];
238     /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?;
239     ///
240     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
241     ///
242     /// // Operator fields
243     /// let op = ceed
244     ///     .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?
245     ///     .field("dx", &r, &b, VectorOpt::Active)?
246     ///     .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)?
247     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?;
248     ///
249     /// let inputs = op.inputs()?;
250     ///
251     /// assert!(inputs[0].vector().is_active(), "Incorrect field Vector");
252     /// assert!(inputs[1].vector().is_none(), "Incorrect field Vector");
253     /// # Ok(())
254     /// # }
255     /// ```
256     pub fn vector(&self) -> VectorOpt {
257         let mut ptr = std::ptr::null_mut();
258         unsafe {
259             bind_ceed::CeedOperatorFieldGetVector(self.ptr, &mut ptr);
260         }
261         if ptr == unsafe { bind_ceed::CEED_VECTOR_ACTIVE } {
262             VectorOpt::Active
263         } else if ptr == unsafe { bind_ceed::CEED_VECTOR_NONE } {
264             VectorOpt::None
265         } else {
266             let slice = unsafe {
267                 std::slice::from_raw_parts(
268                     &ptr as *const bind_ceed::CeedVector as *const crate::Vector,
269                     1 as usize,
270                 )
271             };
272             VectorOpt::Some(&slice[0])
273         }
274     }
275 }
276 
277 // -----------------------------------------------------------------------------
278 // CeedOperator context wrapper
279 // -----------------------------------------------------------------------------
280 #[derive(Debug)]
281 pub(crate) struct OperatorCore<'a> {
282     ptr: bind_ceed::CeedOperator,
283     _lifeline: PhantomData<&'a ()>,
284 }
285 
286 #[derive(Debug)]
287 pub struct Operator<'a> {
288     op_core: OperatorCore<'a>,
289 }
290 
291 #[derive(Debug)]
292 pub struct CompositeOperator<'a> {
293     op_core: OperatorCore<'a>,
294 }
295 
296 // -----------------------------------------------------------------------------
297 // Destructor
298 // -----------------------------------------------------------------------------
299 impl<'a> Drop for OperatorCore<'a> {
300     fn drop(&mut self) {
301         unsafe {
302             bind_ceed::CeedOperatorDestroy(&mut self.ptr);
303         }
304     }
305 }
306 
307 // -----------------------------------------------------------------------------
308 // Display
309 // -----------------------------------------------------------------------------
310 impl<'a> fmt::Display for OperatorCore<'a> {
311     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
312         let mut ptr = std::ptr::null_mut();
313         let mut sizeloc = crate::MAX_BUFFER_LENGTH;
314         let cstring = unsafe {
315             let file = bind_ceed::open_memstream(&mut ptr, &mut sizeloc);
316             bind_ceed::CeedOperatorView(self.ptr, file);
317             bind_ceed::fclose(file);
318             CString::from_raw(ptr)
319         };
320         cstring.to_string_lossy().fmt(f)
321     }
322 }
323 
324 /// View an Operator
325 ///
326 /// ```
327 /// # use libceed::prelude::*;
328 /// # fn main() -> libceed::Result<()> {
329 /// # let ceed = libceed::Ceed::default_init();
330 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
331 ///
332 /// // Operator field arguments
333 /// let ne = 3;
334 /// let q = 4 as usize;
335 /// let mut ind: Vec<i32> = vec![0; 2 * ne];
336 /// for i in 0..ne {
337 ///     ind[2 * i + 0] = i as i32;
338 ///     ind[2 * i + 1] = (i + 1) as i32;
339 /// }
340 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
341 /// let strides: [i32; 3] = [1, q as i32, q as i32];
342 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?;
343 ///
344 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
345 ///
346 /// // Operator fields
347 /// let op = ceed
348 ///     .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?
349 ///     .field("dx", &r, &b, VectorOpt::Active)?
350 ///     .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)?
351 ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?;
352 ///
353 /// println!("{}", op);
354 /// # Ok(())
355 /// # }
356 /// ```
357 impl<'a> fmt::Display for Operator<'a> {
358     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
359         self.op_core.fmt(f)
360     }
361 }
362 
363 /// View a composite Operator
364 ///
365 /// ```
366 /// # use libceed::prelude::*;
367 /// # fn main() -> libceed::Result<()> {
368 /// # let ceed = libceed::Ceed::default_init();
369 ///
370 /// // Sub operator field arguments
371 /// let ne = 3;
372 /// let q = 4 as usize;
373 /// let mut ind: Vec<i32> = vec![0; 2 * ne];
374 /// for i in 0..ne {
375 ///     ind[2 * i + 0] = i as i32;
376 ///     ind[2 * i + 1] = (i + 1) as i32;
377 /// }
378 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
379 /// let strides: [i32; 3] = [1, q as i32, q as i32];
380 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?;
381 ///
382 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
383 ///
384 /// let qdata_mass = ceed.vector(q * ne)?;
385 /// let qdata_diff = ceed.vector(q * ne)?;
386 ///
387 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
388 /// let op_mass = ceed
389 ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
390 ///     .field("u", &r, &b, VectorOpt::Active)?
391 ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)?
392 ///     .field("v", &r, &b, VectorOpt::Active)?;
393 ///
394 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?;
395 /// let op_diff = ceed
396 ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?
397 ///     .field("du", &r, &b, VectorOpt::Active)?
398 ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)?
399 ///     .field("dv", &r, &b, VectorOpt::Active)?;
400 ///
401 /// let op = ceed
402 ///     .composite_operator()?
403 ///     .sub_operator(&op_mass)?
404 ///     .sub_operator(&op_diff)?;
405 ///
406 /// println!("{}", op);
407 /// # Ok(())
408 /// # }
409 /// ```
410 impl<'a> fmt::Display for CompositeOperator<'a> {
411     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
412         self.op_core.fmt(f)
413     }
414 }
415 
416 // -----------------------------------------------------------------------------
417 // Core functionality
418 // -----------------------------------------------------------------------------
419 impl<'a> OperatorCore<'a> {
420     // Error handling
421     #[doc(hidden)]
422     fn check_error(&self, ierr: i32) -> crate::Result<i32> {
423         let mut ptr = std::ptr::null_mut();
424         unsafe {
425             bind_ceed::CeedOperatorGetCeed(self.ptr, &mut ptr);
426         }
427         crate::check_error(ptr, ierr)
428     }
429 
430     // Common implementations
431     pub fn check(&self) -> crate::Result<i32> {
432         let ierr = unsafe { bind_ceed::CeedOperatorCheckReady(self.ptr) };
433         self.check_error(ierr)
434     }
435 
436     pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
437         let ierr = unsafe {
438             bind_ceed::CeedOperatorApply(
439                 self.ptr,
440                 input.ptr,
441                 output.ptr,
442                 bind_ceed::CEED_REQUEST_IMMEDIATE,
443             )
444         };
445         self.check_error(ierr)
446     }
447 
448     pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
449         let ierr = unsafe {
450             bind_ceed::CeedOperatorApplyAdd(
451                 self.ptr,
452                 input.ptr,
453                 output.ptr,
454                 bind_ceed::CEED_REQUEST_IMMEDIATE,
455             )
456         };
457         self.check_error(ierr)
458     }
459 
460     pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
461         let ierr = unsafe {
462             bind_ceed::CeedOperatorLinearAssembleDiagonal(
463                 self.ptr,
464                 assembled.ptr,
465                 bind_ceed::CEED_REQUEST_IMMEDIATE,
466             )
467         };
468         self.check_error(ierr)
469     }
470 
471     pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
472         let ierr = unsafe {
473             bind_ceed::CeedOperatorLinearAssembleAddDiagonal(
474                 self.ptr,
475                 assembled.ptr,
476                 bind_ceed::CEED_REQUEST_IMMEDIATE,
477             )
478         };
479         self.check_error(ierr)
480     }
481 
482     pub fn linear_assemble_point_block_diagonal(
483         &self,
484         assembled: &mut Vector,
485     ) -> crate::Result<i32> {
486         let ierr = unsafe {
487             bind_ceed::CeedOperatorLinearAssemblePointBlockDiagonal(
488                 self.ptr,
489                 assembled.ptr,
490                 bind_ceed::CEED_REQUEST_IMMEDIATE,
491             )
492         };
493         self.check_error(ierr)
494     }
495 
496     pub fn linear_assemble_add_point_block_diagonal(
497         &self,
498         assembled: &mut Vector,
499     ) -> crate::Result<i32> {
500         let ierr = unsafe {
501             bind_ceed::CeedOperatorLinearAssembleAddPointBlockDiagonal(
502                 self.ptr,
503                 assembled.ptr,
504                 bind_ceed::CEED_REQUEST_IMMEDIATE,
505             )
506         };
507         self.check_error(ierr)
508     }
509 }
510 
511 // -----------------------------------------------------------------------------
512 // Operator
513 // -----------------------------------------------------------------------------
514 impl<'a> Operator<'a> {
515     // Constructor
516     pub fn create<'b>(
517         ceed: &'a crate::Ceed,
518         qf: impl Into<QFunctionOpt<'b>>,
519         dqf: impl Into<QFunctionOpt<'b>>,
520         dqfT: impl Into<QFunctionOpt<'b>>,
521     ) -> crate::Result<Self> {
522         let mut ptr = std::ptr::null_mut();
523         let ierr = unsafe {
524             bind_ceed::CeedOperatorCreate(
525                 ceed.ptr,
526                 qf.into().to_raw(),
527                 dqf.into().to_raw(),
528                 dqfT.into().to_raw(),
529                 &mut ptr,
530             )
531         };
532         ceed.check_error(ierr)?;
533         Ok(Self {
534             op_core: OperatorCore {
535                 ptr,
536                 _lifeline: PhantomData,
537             },
538         })
539     }
540 
541     fn from_raw(ptr: bind_ceed::CeedOperator) -> crate::Result<Self> {
542         Ok(Self {
543             op_core: OperatorCore {
544                 ptr,
545                 _lifeline: PhantomData,
546             },
547         })
548     }
549 
550     /// Apply Operator to a vector
551     ///
552     /// * `input`  - Input Vector
553     /// * `output` - Output Vector
554     ///
555     /// ```
556     /// # use libceed::prelude::*;
557     /// # fn main() -> libceed::Result<()> {
558     /// # let ceed = libceed::Ceed::default_init();
559     /// let ne = 4;
560     /// let p = 3;
561     /// let q = 4;
562     /// let ndofs = p * ne - ne + 1;
563     ///
564     /// // Vectors
565     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
566     /// let mut qdata = ceed.vector(ne * q)?;
567     /// qdata.set_value(0.0);
568     /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?;
569     /// let mut v = ceed.vector(ndofs)?;
570     /// v.set_value(0.0);
571     ///
572     /// // Restrictions
573     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
574     /// for i in 0..ne {
575     ///     indx[2 * i + 0] = i as i32;
576     ///     indx[2 * i + 1] = (i + 1) as i32;
577     /// }
578     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
579     /// let mut indu: Vec<i32> = vec![0; p * ne];
580     /// for i in 0..ne {
581     ///     indu[p * i + 0] = i as i32;
582     ///     indu[p * i + 1] = (i + 1) as i32;
583     ///     indu[p * i + 2] = (i + 2) as i32;
584     /// }
585     /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?;
586     /// let strides: [i32; 3] = [1, q as i32, q as i32];
587     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
588     ///
589     /// // Bases
590     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
591     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
592     ///
593     /// // Build quadrature data
594     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
595     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
596     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
597     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
598     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
599     ///     .apply(&x, &mut qdata)?;
600     ///
601     /// // Mass operator
602     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
603     /// let op_mass = ceed
604     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
605     ///     .field("u", &ru, &bu, VectorOpt::Active)?
606     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
607     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
608     ///
609     /// v.set_value(0.0);
610     /// op_mass.apply(&u, &mut v)?;
611     ///
612     /// // Check
613     /// let sum: Scalar = v.view()?.iter().sum();
614     /// assert!(
615     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
616     ///     "Incorrect interval length computed"
617     /// );
618     /// # Ok(())
619     /// # }
620     /// ```
621     pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
622         self.op_core.apply(input, output)
623     }
624 
625     /// Apply Operator to a vector and add result to output vector
626     ///
627     /// * `input`  - Input Vector
628     /// * `output` - Output Vector
629     ///
630     /// ```
631     /// # use libceed::prelude::*;
632     /// # fn main() -> libceed::Result<()> {
633     /// # let ceed = libceed::Ceed::default_init();
634     /// let ne = 4;
635     /// let p = 3;
636     /// let q = 4;
637     /// let ndofs = p * ne - ne + 1;
638     ///
639     /// // Vectors
640     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
641     /// let mut qdata = ceed.vector(ne * q)?;
642     /// qdata.set_value(0.0);
643     /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?;
644     /// let mut v = ceed.vector(ndofs)?;
645     ///
646     /// // Restrictions
647     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
648     /// for i in 0..ne {
649     ///     indx[2 * i + 0] = i as i32;
650     ///     indx[2 * i + 1] = (i + 1) as i32;
651     /// }
652     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
653     /// let mut indu: Vec<i32> = vec![0; p * ne];
654     /// for i in 0..ne {
655     ///     indu[p * i + 0] = i as i32;
656     ///     indu[p * i + 1] = (i + 1) as i32;
657     ///     indu[p * i + 2] = (i + 2) as i32;
658     /// }
659     /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?;
660     /// let strides: [i32; 3] = [1, q as i32, q as i32];
661     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
662     ///
663     /// // Bases
664     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
665     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
666     ///
667     /// // Build quadrature data
668     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
669     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
670     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
671     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
672     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
673     ///     .apply(&x, &mut qdata)?;
674     ///
675     /// // Mass operator
676     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
677     /// let op_mass = ceed
678     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
679     ///     .field("u", &ru, &bu, VectorOpt::Active)?
680     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
681     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
682     ///
683     /// v.set_value(1.0);
684     /// op_mass.apply_add(&u, &mut v)?;
685     ///
686     /// // Check
687     /// let sum: Scalar = v.view()?.iter().sum();
688     /// assert!(
689     ///     (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON,
690     ///     "Incorrect interval length computed and added"
691     /// );
692     /// # Ok(())
693     /// # }
694     /// ```
695     pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
696         self.op_core.apply_add(input, output)
697     }
698 
699     /// Provide a field to a Operator for use by its QFunction
700     ///
701     /// * `fieldname` - Name of the field (to be matched with the name used by
702     ///                   the QFunction)
703     /// * `r`         - ElemRestriction
704     /// * `b`         - Basis in which the field resides or `BasisCollocated` if
705     ///                   collocated with quadrature points
706     /// * `v`         - Vector to be used by Operator or `VectorActive` if field
707     ///                   is active or `VectorNone` if using `Weight` with the
708     ///                   QFunction
709     ///
710     ///
711     /// ```
712     /// # use libceed::prelude::*;
713     /// # fn main() -> libceed::Result<()> {
714     /// # let ceed = libceed::Ceed::default_init();
715     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
716     /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?;
717     ///
718     /// // Operator field arguments
719     /// let ne = 3;
720     /// let q = 4;
721     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
722     /// for i in 0..ne {
723     ///     ind[2 * i + 0] = i as i32;
724     ///     ind[2 * i + 1] = (i + 1) as i32;
725     /// }
726     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
727     ///
728     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
729     ///
730     /// // Operator field
731     /// op = op.field("dx", &r, &b, VectorOpt::Active)?;
732     /// # Ok(())
733     /// # }
734     /// ```
735     #[allow(unused_mut)]
736     pub fn field<'b>(
737         mut self,
738         fieldname: &str,
739         r: impl Into<ElemRestrictionOpt<'b>>,
740         b: impl Into<BasisOpt<'b>>,
741         v: impl Into<VectorOpt<'b>>,
742     ) -> crate::Result<Self> {
743         let fieldname = CString::new(fieldname).expect("CString::new failed");
744         let fieldname = fieldname.as_ptr() as *const i8;
745         let ierr = unsafe {
746             bind_ceed::CeedOperatorSetField(
747                 self.op_core.ptr,
748                 fieldname,
749                 r.into().to_raw(),
750                 b.into().to_raw(),
751                 v.into().to_raw(),
752             )
753         };
754         self.op_core.check_error(ierr)?;
755         Ok(self)
756     }
757 
758     /// Get a slice of Operator inputs
759     ///
760     /// ```
761     /// # use libceed::prelude::*;
762     /// # fn main() -> libceed::Result<()> {
763     /// # let ceed = libceed::Ceed::default_init();
764     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
765     ///
766     /// // Operator field arguments
767     /// let ne = 3;
768     /// let q = 4 as usize;
769     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
770     /// for i in 0..ne {
771     ///     ind[2 * i + 0] = i as i32;
772     ///     ind[2 * i + 1] = (i + 1) as i32;
773     /// }
774     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
775     /// let strides: [i32; 3] = [1, q as i32, q as i32];
776     /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?;
777     ///
778     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
779     ///
780     /// // Operator fields
781     /// let op = ceed
782     ///     .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?
783     ///     .field("dx", &r, &b, VectorOpt::Active)?
784     ///     .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)?
785     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?;
786     ///
787     /// let inputs = op.inputs()?;
788     ///
789     /// assert_eq!(inputs.len(), 2, "Incorrect inputs array");
790     /// # Ok(())
791     /// # }
792     /// ```
793     pub fn inputs(&self) -> crate::Result<&[crate::OperatorField]> {
794         // Get array of raw C pointers for inputs
795         let mut num_inputs = 0;
796         let mut inputs_ptr = std::ptr::null_mut();
797         let ierr = unsafe {
798             bind_ceed::CeedOperatorGetFields(
799                 self.op_core.ptr,
800                 &mut num_inputs,
801                 &mut inputs_ptr,
802                 std::ptr::null_mut() as *mut bind_ceed::CeedInt,
803                 std::ptr::null_mut() as *mut *mut bind_ceed::CeedOperatorField,
804             )
805         };
806         self.op_core.check_error(ierr)?;
807         // Convert raw C pointers to fixed length slice
808         let inputs_slice = unsafe {
809             std::slice::from_raw_parts(
810                 inputs_ptr as *const crate::OperatorField,
811                 num_inputs as usize,
812             )
813         };
814         Ok(inputs_slice)
815     }
816 
817     /// Get a slice of Operator outputs
818     ///
819     /// ```
820     /// # use libceed::prelude::*;
821     /// # fn main() -> libceed::Result<()> {
822     /// # let ceed = libceed::Ceed::default_init();
823     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
824     ///
825     /// // Operator field arguments
826     /// let ne = 3;
827     /// let q = 4 as usize;
828     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
829     /// for i in 0..ne {
830     ///     ind[2 * i + 0] = i as i32;
831     ///     ind[2 * i + 1] = (i + 1) as i32;
832     /// }
833     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
834     /// let strides: [i32; 3] = [1, q as i32, q as i32];
835     /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?;
836     ///
837     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
838     ///
839     /// // Operator fields
840     /// let op = ceed
841     ///     .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?
842     ///     .field("dx", &r, &b, VectorOpt::Active)?
843     ///     .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)?
844     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?;
845     ///
846     /// let outputs = op.outputs()?;
847     ///
848     /// assert_eq!(outputs.len(), 1, "Incorrect outputs array");
849     /// # Ok(())
850     /// # }
851     /// ```
852     pub fn outputs(&self) -> crate::Result<&[crate::OperatorField]> {
853         // Get array of raw C pointers for outputs
854         let mut num_outputs = 0;
855         let mut outputs_ptr = std::ptr::null_mut();
856         let ierr = unsafe {
857             bind_ceed::CeedOperatorGetFields(
858                 self.op_core.ptr,
859                 std::ptr::null_mut() as *mut bind_ceed::CeedInt,
860                 std::ptr::null_mut() as *mut *mut bind_ceed::CeedOperatorField,
861                 &mut num_outputs,
862                 &mut outputs_ptr,
863             )
864         };
865         self.op_core.check_error(ierr)?;
866         // Convert raw C pointers to fixed length slice
867         let outputs_slice = unsafe {
868             std::slice::from_raw_parts(
869                 outputs_ptr as *const crate::OperatorField,
870                 num_outputs as usize,
871             )
872         };
873         Ok(outputs_slice)
874     }
875 
876     /// Check if Operator is setup correctly
877     ///
878     /// ```
879     /// # use libceed::prelude::*;
880     /// # fn main() -> libceed::Result<()> {
881     /// # let ceed = libceed::Ceed::default_init();
882     /// let ne = 4;
883     /// let p = 3;
884     /// let q = 4;
885     /// let ndofs = p * ne - ne + 1;
886     ///
887     /// // Restrictions
888     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
889     /// for i in 0..ne {
890     ///     indx[2 * i + 0] = i as i32;
891     ///     indx[2 * i + 1] = (i + 1) as i32;
892     /// }
893     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
894     /// let strides: [i32; 3] = [1, q as i32, q as i32];
895     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
896     ///
897     /// // Bases
898     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
899     ///
900     /// // Build quadrature data
901     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
902     /// let op_build = ceed
903     ///     .operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
904     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
905     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
906     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?;
907     ///
908     /// // Check
909     /// assert!(op_build.check().is_ok(), "Operator setup incorrectly");
910     /// # Ok(())
911     /// # }
912     /// ```
913     pub fn check(self) -> crate::Result<Self> {
914         self.op_core.check()?;
915         Ok(self)
916     }
917 
918     /// Get the number of elements associated with an Operator
919     ///
920     ///
921     /// ```
922     /// # use libceed::prelude::*;
923     /// # fn main() -> libceed::Result<()> {
924     /// # let ceed = libceed::Ceed::default_init();
925     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
926     /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?;
927     ///
928     /// // Operator field arguments
929     /// let ne = 3;
930     /// let q = 4;
931     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
932     /// for i in 0..ne {
933     ///     ind[2 * i + 0] = i as i32;
934     ///     ind[2 * i + 1] = (i + 1) as i32;
935     /// }
936     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
937     ///
938     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
939     ///
940     /// // Operator field
941     /// op = op.field("dx", &r, &b, VectorOpt::Active)?;
942     ///
943     /// // Check number of elements
944     /// assert_eq!(op.num_elements(), ne, "Incorrect number of elements");
945     /// # Ok(())
946     /// # }
947     /// ```
948     pub fn num_elements(&self) -> usize {
949         let mut nelem = 0;
950         unsafe { bind_ceed::CeedOperatorGetNumElements(self.op_core.ptr, &mut nelem) };
951         usize::try_from(nelem).unwrap()
952     }
953 
954     /// Get the number of quadrature points associated with each element of
955     ///   an Operator
956     ///
957     ///
958     /// ```
959     /// # use libceed::prelude::*;
960     /// # fn main() -> libceed::Result<()> {
961     /// # let ceed = libceed::Ceed::default_init();
962     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
963     /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?;
964     ///
965     /// // Operator field arguments
966     /// let ne = 3;
967     /// let q = 4;
968     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
969     /// for i in 0..ne {
970     ///     ind[2 * i + 0] = i as i32;
971     ///     ind[2 * i + 1] = (i + 1) as i32;
972     /// }
973     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
974     ///
975     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
976     ///
977     /// // Operator field
978     /// op = op.field("dx", &r, &b, VectorOpt::Active)?;
979     ///
980     /// // Check number of quadrature points
981     /// assert_eq!(
982     ///     op.num_quadrature_points(),
983     ///     q,
984     ///     "Incorrect number of quadrature points"
985     /// );
986     /// # Ok(())
987     /// # }
988     /// ```
989     pub fn num_quadrature_points(&self) -> usize {
990         let mut Q = 0;
991         unsafe { bind_ceed::CeedOperatorGetNumQuadraturePoints(self.op_core.ptr, &mut Q) };
992         usize::try_from(Q).unwrap()
993     }
994 
995     /// Assemble the diagonal of a square linear Operator
996     ///
997     /// This overwrites a Vector with the diagonal of a linear Operator.
998     ///
999     /// Note: Currently only non-composite Operators with a single field and
1000     ///      composite Operators with single field sub-operators are supported.
1001     ///
1002     /// * `op`        - Operator to assemble QFunction
1003     /// * `assembled` - Vector to store assembled Operator diagonal
1004     ///
1005     /// ```
1006     /// # use libceed::prelude::*;
1007     /// # fn main() -> libceed::Result<()> {
1008     /// # let ceed = libceed::Ceed::default_init();
1009     /// let ne = 4;
1010     /// let p = 3;
1011     /// let q = 4;
1012     /// let ndofs = p * ne - ne + 1;
1013     ///
1014     /// // Vectors
1015     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
1016     /// let mut qdata = ceed.vector(ne * q)?;
1017     /// qdata.set_value(0.0);
1018     /// let mut u = ceed.vector(ndofs)?;
1019     /// u.set_value(1.0);
1020     /// let mut v = ceed.vector(ndofs)?;
1021     /// v.set_value(0.0);
1022     ///
1023     /// // Restrictions
1024     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1025     /// for i in 0..ne {
1026     ///     indx[2 * i + 0] = i as i32;
1027     ///     indx[2 * i + 1] = (i + 1) as i32;
1028     /// }
1029     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1030     /// let mut indu: Vec<i32> = vec![0; p * ne];
1031     /// for i in 0..ne {
1032     ///     indu[p * i + 0] = (2 * i) as i32;
1033     ///     indu[p * i + 1] = (2 * i + 1) as i32;
1034     ///     indu[p * i + 2] = (2 * i + 2) as i32;
1035     /// }
1036     /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?;
1037     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1038     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1039     ///
1040     /// // Bases
1041     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1042     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
1043     ///
1044     /// // Build quadrature data
1045     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1046     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1047     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1048     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1049     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1050     ///     .apply(&x, &mut qdata)?;
1051     ///
1052     /// // Mass operator
1053     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1054     /// let op_mass = ceed
1055     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1056     ///     .field("u", &ru, &bu, VectorOpt::Active)?
1057     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1058     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
1059     ///
1060     /// // Diagonal
1061     /// let mut diag = ceed.vector(ndofs)?;
1062     /// diag.set_value(0.0);
1063     /// op_mass.linear_assemble_diagonal(&mut diag)?;
1064     ///
1065     /// // Manual diagonal computation
1066     /// let mut true_diag = ceed.vector(ndofs)?;
1067     /// true_diag.set_value(0.0)?;
1068     /// for i in 0..ndofs {
1069     ///     u.set_value(0.0);
1070     ///     {
1071     ///         let mut u_array = u.view_mut()?;
1072     ///         u_array[i] = 1.;
1073     ///     }
1074     ///
1075     ///     op_mass.apply(&u, &mut v)?;
1076     ///
1077     ///     {
1078     ///         let v_array = v.view()?;
1079     ///         let mut true_array = true_diag.view_mut()?;
1080     ///         true_array[i] = v_array[i];
1081     ///     }
1082     /// }
1083     ///
1084     /// // Check
1085     /// diag.view()?
1086     ///     .iter()
1087     ///     .zip(true_diag.view()?.iter())
1088     ///     .for_each(|(computed, actual)| {
1089     ///         assert!(
1090     ///             (*computed - *actual).abs() < 10.0 * libceed::EPSILON,
1091     ///             "Diagonal entry incorrect"
1092     ///         );
1093     ///     });
1094     /// # Ok(())
1095     /// # }
1096     /// ```
1097     pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
1098         self.op_core.linear_assemble_diagonal(assembled)
1099     }
1100 
1101     /// Assemble the diagonal of a square linear Operator
1102     ///
1103     /// This sums into a Vector with the diagonal of a linear Operator.
1104     ///
1105     /// Note: Currently only non-composite Operators with a single field and
1106     ///      composite Operators with single field sub-operators are supported.
1107     ///
1108     /// * `op`        - Operator to assemble QFunction
1109     /// * `assembled` - Vector to store assembled Operator diagonal
1110     ///
1111     ///
1112     /// ```
1113     /// # use libceed::prelude::*;
1114     /// # fn main() -> libceed::Result<()> {
1115     /// # let ceed = libceed::Ceed::default_init();
1116     /// let ne = 4;
1117     /// let p = 3;
1118     /// let q = 4;
1119     /// let ndofs = p * ne - ne + 1;
1120     ///
1121     /// // Vectors
1122     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
1123     /// let mut qdata = ceed.vector(ne * q)?;
1124     /// qdata.set_value(0.0);
1125     /// let mut u = ceed.vector(ndofs)?;
1126     /// u.set_value(1.0);
1127     /// let mut v = ceed.vector(ndofs)?;
1128     /// v.set_value(0.0);
1129     ///
1130     /// // Restrictions
1131     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1132     /// for i in 0..ne {
1133     ///     indx[2 * i + 0] = i as i32;
1134     ///     indx[2 * i + 1] = (i + 1) as i32;
1135     /// }
1136     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1137     /// let mut indu: Vec<i32> = vec![0; p * ne];
1138     /// for i in 0..ne {
1139     ///     indu[p * i + 0] = (2 * i) as i32;
1140     ///     indu[p * i + 1] = (2 * i + 1) as i32;
1141     ///     indu[p * i + 2] = (2 * i + 2) as i32;
1142     /// }
1143     /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?;
1144     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1145     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1146     ///
1147     /// // Bases
1148     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1149     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
1150     ///
1151     /// // Build quadrature data
1152     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1153     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1154     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1155     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1156     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1157     ///     .apply(&x, &mut qdata)?;
1158     ///
1159     /// // Mass operator
1160     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1161     /// let op_mass = ceed
1162     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1163     ///     .field("u", &ru, &bu, VectorOpt::Active)?
1164     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1165     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
1166     ///
1167     /// // Diagonal
1168     /// let mut diag = ceed.vector(ndofs)?;
1169     /// diag.set_value(1.0);
1170     /// op_mass.linear_assemble_add_diagonal(&mut diag)?;
1171     ///
1172     /// // Manual diagonal computation
1173     /// let mut true_diag = ceed.vector(ndofs)?;
1174     /// true_diag.set_value(0.0)?;
1175     /// for i in 0..ndofs {
1176     ///     u.set_value(0.0);
1177     ///     {
1178     ///         let mut u_array = u.view_mut()?;
1179     ///         u_array[i] = 1.;
1180     ///     }
1181     ///
1182     ///     op_mass.apply(&u, &mut v)?;
1183     ///
1184     ///     {
1185     ///         let v_array = v.view()?;
1186     ///         let mut true_array = true_diag.view_mut()?;
1187     ///         true_array[i] = v_array[i] + 1.0;
1188     ///     }
1189     /// }
1190     ///
1191     /// // Check
1192     /// diag.view()?
1193     ///     .iter()
1194     ///     .zip(true_diag.view()?.iter())
1195     ///     .for_each(|(computed, actual)| {
1196     ///         assert!(
1197     ///             (*computed - *actual).abs() < 10.0 * libceed::EPSILON,
1198     ///             "Diagonal entry incorrect"
1199     ///         );
1200     ///     });
1201     /// # Ok(())
1202     /// # }
1203     /// ```
1204     pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
1205         self.op_core.linear_assemble_add_diagonal(assembled)
1206     }
1207 
1208     /// Assemble the point block diagonal of a square linear Operator
1209     ///
1210     /// This overwrites a Vector with the point block diagonal of a linear
1211     /// Operator.
1212     ///
1213     /// Note: Currently only non-composite Operators with a single field and
1214     ///      composite Operators with single field sub-operators are supported.
1215     ///
1216     /// * `op`        - Operator to assemble QFunction
1217     /// * `assembled` - Vector to store assembled CeedOperator point block
1218     ///                   diagonal, provided in row-major form with an
1219     ///                   `ncomp * ncomp` block at each node. The dimensions of
1220     ///                   this vector are derived from the active vector for
1221     ///                   the CeedOperator. The array has shape
1222     ///                   `[nodes, component out, component in]`.
1223     ///
1224     /// ```
1225     /// # use libceed::prelude::*;
1226     /// # fn main() -> libceed::Result<()> {
1227     /// # let ceed = libceed::Ceed::default_init();
1228     /// let ne = 4;
1229     /// let p = 3;
1230     /// let q = 4;
1231     /// let ncomp = 2;
1232     /// let ndofs = p * ne - ne + 1;
1233     ///
1234     /// // Vectors
1235     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
1236     /// let mut qdata = ceed.vector(ne * q)?;
1237     /// qdata.set_value(0.0);
1238     /// let mut u = ceed.vector(ncomp * ndofs)?;
1239     /// u.set_value(1.0);
1240     /// let mut v = ceed.vector(ncomp * ndofs)?;
1241     /// v.set_value(0.0);
1242     ///
1243     /// // Restrictions
1244     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1245     /// for i in 0..ne {
1246     ///     indx[2 * i + 0] = i as i32;
1247     ///     indx[2 * i + 1] = (i + 1) as i32;
1248     /// }
1249     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1250     /// let mut indu: Vec<i32> = vec![0; p * ne];
1251     /// for i in 0..ne {
1252     ///     indu[p * i + 0] = (2 * i) as i32;
1253     ///     indu[p * i + 1] = (2 * i + 1) as i32;
1254     ///     indu[p * i + 2] = (2 * i + 2) as i32;
1255     /// }
1256     /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?;
1257     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1258     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1259     ///
1260     /// // Bases
1261     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1262     /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?;
1263     ///
1264     /// // Build quadrature data
1265     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1266     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1267     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1268     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1269     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1270     ///     .apply(&x, &mut qdata)?;
1271     ///
1272     /// // Mass operator
1273     /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| {
1274     ///     // Number of quadrature points
1275     ///     let q = qdata.len();
1276     ///
1277     ///     // Iterate over quadrature points
1278     ///     for i in 0..q {
1279     ///         v[i + 0 * q] = u[i + 1 * q] * qdata[i];
1280     ///         v[i + 1 * q] = u[i + 0 * q] * qdata[i];
1281     ///     }
1282     ///
1283     ///     // Return clean error code
1284     ///     0
1285     /// };
1286     ///
1287     /// let qf_mass = ceed
1288     ///     .q_function_interior(1, Box::new(mass_2_comp))?
1289     ///     .input("u", 2, EvalMode::Interp)?
1290     ///     .input("qdata", 1, EvalMode::None)?
1291     ///     .output("v", 2, EvalMode::Interp)?;
1292     ///
1293     /// let op_mass = ceed
1294     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1295     ///     .field("u", &ru, &bu, VectorOpt::Active)?
1296     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1297     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
1298     ///
1299     /// // Diagonal
1300     /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?;
1301     /// diag.set_value(0.0);
1302     /// op_mass.linear_assemble_point_block_diagonal(&mut diag)?;
1303     ///
1304     /// // Manual diagonal computation
1305     /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?;
1306     /// true_diag.set_value(0.0)?;
1307     /// for i in 0..ndofs {
1308     ///     for j in 0..ncomp {
1309     ///         u.set_value(0.0);
1310     ///         {
1311     ///             let mut u_array = u.view_mut()?;
1312     ///             u_array[i + j * ndofs] = 1.;
1313     ///         }
1314     ///
1315     ///         op_mass.apply(&u, &mut v)?;
1316     ///
1317     ///         {
1318     ///             let v_array = v.view()?;
1319     ///             let mut true_array = true_diag.view_mut()?;
1320     ///             for k in 0..ncomp {
1321     ///                 true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs];
1322     ///             }
1323     ///         }
1324     ///     }
1325     /// }
1326     ///
1327     /// // Check
1328     /// diag.view()?
1329     ///     .iter()
1330     ///     .zip(true_diag.view()?.iter())
1331     ///     .for_each(|(computed, actual)| {
1332     ///         assert!(
1333     ///             (*computed - *actual).abs() < 10.0 * libceed::EPSILON,
1334     ///             "Diagonal entry incorrect"
1335     ///         );
1336     ///     });
1337     /// # Ok(())
1338     /// # }
1339     /// ```
1340     pub fn linear_assemble_point_block_diagonal(
1341         &self,
1342         assembled: &mut Vector,
1343     ) -> crate::Result<i32> {
1344         self.op_core.linear_assemble_point_block_diagonal(assembled)
1345     }
1346 
1347     /// Assemble the point block diagonal of a square linear Operator
1348     ///
1349     /// This sums into a Vector with the point block diagonal of a linear
1350     /// Operator.
1351     ///
1352     /// Note: Currently only non-composite Operators with a single field and
1353     ///      composite Operators with single field sub-operators are supported.
1354     ///
1355     /// * `op`        -     Operator to assemble QFunction
1356     /// * `assembled` - Vector to store assembled CeedOperator point block
1357     ///                   diagonal, provided in row-major form with an
1358     ///                   `ncomp * ncomp` block at each node. The dimensions of
1359     ///                   this vector are derived from the active vector for
1360     ///                   the CeedOperator. The array has shape
1361     ///                   `[nodes, component out, component in]`.
1362     ///
1363     /// ```
1364     /// # use libceed::prelude::*;
1365     /// # fn main() -> libceed::Result<()> {
1366     /// # let ceed = libceed::Ceed::default_init();
1367     /// let ne = 4;
1368     /// let p = 3;
1369     /// let q = 4;
1370     /// let ncomp = 2;
1371     /// let ndofs = p * ne - ne + 1;
1372     ///
1373     /// // Vectors
1374     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
1375     /// let mut qdata = ceed.vector(ne * q)?;
1376     /// qdata.set_value(0.0);
1377     /// let mut u = ceed.vector(ncomp * ndofs)?;
1378     /// u.set_value(1.0);
1379     /// let mut v = ceed.vector(ncomp * ndofs)?;
1380     /// v.set_value(0.0);
1381     ///
1382     /// // Restrictions
1383     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1384     /// for i in 0..ne {
1385     ///     indx[2 * i + 0] = i as i32;
1386     ///     indx[2 * i + 1] = (i + 1) as i32;
1387     /// }
1388     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1389     /// let mut indu: Vec<i32> = vec![0; p * ne];
1390     /// for i in 0..ne {
1391     ///     indu[p * i + 0] = (2 * i) as i32;
1392     ///     indu[p * i + 1] = (2 * i + 1) as i32;
1393     ///     indu[p * i + 2] = (2 * i + 2) as i32;
1394     /// }
1395     /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?;
1396     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1397     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1398     ///
1399     /// // Bases
1400     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1401     /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?;
1402     ///
1403     /// // Build quadrature data
1404     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1405     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1406     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1407     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1408     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1409     ///     .apply(&x, &mut qdata)?;
1410     ///
1411     /// // Mass operator
1412     /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| {
1413     ///     // Number of quadrature points
1414     ///     let q = qdata.len();
1415     ///
1416     ///     // Iterate over quadrature points
1417     ///     for i in 0..q {
1418     ///         v[i + 0 * q] = u[i + 1 * q] * qdata[i];
1419     ///         v[i + 1 * q] = u[i + 0 * q] * qdata[i];
1420     ///     }
1421     ///
1422     ///     // Return clean error code
1423     ///     0
1424     /// };
1425     ///
1426     /// let qf_mass = ceed
1427     ///     .q_function_interior(1, Box::new(mass_2_comp))?
1428     ///     .input("u", 2, EvalMode::Interp)?
1429     ///     .input("qdata", 1, EvalMode::None)?
1430     ///     .output("v", 2, EvalMode::Interp)?;
1431     ///
1432     /// let op_mass = ceed
1433     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1434     ///     .field("u", &ru, &bu, VectorOpt::Active)?
1435     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1436     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
1437     ///
1438     /// // Diagonal
1439     /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?;
1440     /// diag.set_value(1.0);
1441     /// op_mass.linear_assemble_add_point_block_diagonal(&mut diag)?;
1442     ///
1443     /// // Manual diagonal computation
1444     /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?;
1445     /// true_diag.set_value(0.0)?;
1446     /// for i in 0..ndofs {
1447     ///     for j in 0..ncomp {
1448     ///         u.set_value(0.0);
1449     ///         {
1450     ///             let mut u_array = u.view_mut()?;
1451     ///             u_array[i + j * ndofs] = 1.;
1452     ///         }
1453     ///
1454     ///         op_mass.apply(&u, &mut v)?;
1455     ///
1456     ///         {
1457     ///             let v_array = v.view()?;
1458     ///             let mut true_array = true_diag.view_mut()?;
1459     ///             for k in 0..ncomp {
1460     ///                 true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs];
1461     ///             }
1462     ///         }
1463     ///     }
1464     /// }
1465     ///
1466     /// // Check
1467     /// diag.view()?
1468     ///     .iter()
1469     ///     .zip(true_diag.view()?.iter())
1470     ///     .for_each(|(computed, actual)| {
1471     ///         assert!(
1472     ///             (*computed - 1.0 - *actual).abs() < 10.0 * libceed::EPSILON,
1473     ///             "Diagonal entry incorrect"
1474     ///         );
1475     ///     });
1476     /// # Ok(())
1477     /// # }
1478     /// ```
1479     pub fn linear_assemble_add_point_block_diagonal(
1480         &self,
1481         assembled: &mut Vector,
1482     ) -> crate::Result<i32> {
1483         self.op_core
1484             .linear_assemble_add_point_block_diagonal(assembled)
1485     }
1486 
1487     /// Create a multigrid coarse Operator and level transfer Operators for a
1488     ///   given Operator, creating the prolongation basis from the fine and
1489     ///   coarse grid interpolation
1490     ///
1491     /// * `p_mult_fine`  - Lvector multiplicity in parallel gather/scatter
1492     /// * `rstr_coarse`  - Coarse grid restriction
1493     /// * `basis_coarse` - Coarse grid active vector basis
1494     ///
1495     /// ```
1496     /// # use libceed::prelude::*;
1497     /// # fn main() -> libceed::Result<()> {
1498     /// # let ceed = libceed::Ceed::default_init();
1499     /// let ne = 15;
1500     /// let p_coarse = 3;
1501     /// let p_fine = 5;
1502     /// let q = 6;
1503     /// let ndofs_coarse = p_coarse * ne - ne + 1;
1504     /// let ndofs_fine = p_fine * ne - ne + 1;
1505     ///
1506     /// // Vectors
1507     /// let x_array = (0..ne + 1)
1508     ///     .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0)
1509     ///     .collect::<Vec<Scalar>>();
1510     /// let x = ceed.vector_from_slice(&x_array)?;
1511     /// let mut qdata = ceed.vector(ne * q)?;
1512     /// qdata.set_value(0.0);
1513     /// let mut u_coarse = ceed.vector(ndofs_coarse)?;
1514     /// u_coarse.set_value(1.0);
1515     /// let mut u_fine = ceed.vector(ndofs_fine)?;
1516     /// u_fine.set_value(1.0);
1517     /// let mut v_coarse = ceed.vector(ndofs_coarse)?;
1518     /// v_coarse.set_value(0.0);
1519     /// let mut v_fine = ceed.vector(ndofs_fine)?;
1520     /// v_fine.set_value(0.0);
1521     /// let mut multiplicity = ceed.vector(ndofs_fine)?;
1522     /// multiplicity.set_value(1.0);
1523     ///
1524     /// // Restrictions
1525     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1526     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1527     ///
1528     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1529     /// for i in 0..ne {
1530     ///     indx[2 * i + 0] = i as i32;
1531     ///     indx[2 * i + 1] = (i + 1) as i32;
1532     /// }
1533     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1534     ///
1535     /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne];
1536     /// for i in 0..ne {
1537     ///     for j in 0..p_coarse {
1538     ///         indu_coarse[p_coarse * i + j] = (i + j) as i32;
1539     ///     }
1540     /// }
1541     /// let ru_coarse = ceed.elem_restriction(
1542     ///     ne,
1543     ///     p_coarse,
1544     ///     1,
1545     ///     1,
1546     ///     ndofs_coarse,
1547     ///     MemType::Host,
1548     ///     &indu_coarse,
1549     /// )?;
1550     ///
1551     /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne];
1552     /// for i in 0..ne {
1553     ///     for j in 0..p_fine {
1554     ///         indu_fine[p_fine * i + j] = (i + j) as i32;
1555     ///     }
1556     /// }
1557     /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?;
1558     ///
1559     /// // Bases
1560     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1561     /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?;
1562     /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?;
1563     ///
1564     /// // Build quadrature data
1565     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1566     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1567     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1568     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1569     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1570     ///     .apply(&x, &mut qdata)?;
1571     ///
1572     /// // Mass operator
1573     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1574     /// let op_mass_fine = ceed
1575     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1576     ///     .field("u", &ru_fine, &bu_fine, VectorOpt::Active)?
1577     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1578     ///     .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?;
1579     ///
1580     /// // Multigrid setup
1581     /// let (op_mass_coarse, op_prolong, op_restrict) =
1582     ///     op_mass_fine.create_multigrid_level(&multiplicity, &ru_coarse, &bu_coarse)?;
1583     ///
1584     /// // Coarse problem
1585     /// u_coarse.set_value(1.0);
1586     /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?;
1587     ///
1588     /// // Check
1589     /// let sum: Scalar = v_coarse.view()?.iter().sum();
1590     /// assert!(
1591     ///     (sum - 2.0).abs() < 50.0 * libceed::EPSILON,
1592     ///     "Incorrect interval length computed"
1593     /// );
1594     ///
1595     /// // Prolong
1596     /// op_prolong.apply(&u_coarse, &mut u_fine)?;
1597     ///
1598     /// // Fine problem
1599     /// op_mass_fine.apply(&u_fine, &mut v_fine)?;
1600     ///
1601     /// // Check
1602     /// let sum: Scalar = v_fine.view()?.iter().sum();
1603     /// assert!(
1604     ///     (sum - 2.0).abs() < 50.0 * libceed::EPSILON,
1605     ///     "Incorrect interval length computed"
1606     /// );
1607     ///
1608     /// // Restrict
1609     /// op_restrict.apply(&v_fine, &mut v_coarse)?;
1610     ///
1611     /// // Check
1612     /// let sum: Scalar = v_coarse.view()?.iter().sum();
1613     /// assert!(
1614     ///     (sum - 2.0).abs() < 50.0 * libceed::EPSILON,
1615     ///     "Incorrect interval length computed"
1616     /// );
1617     /// # Ok(())
1618     /// # }
1619     /// ```
1620     pub fn create_multigrid_level(
1621         &self,
1622         p_mult_fine: &Vector,
1623         rstr_coarse: &ElemRestriction,
1624         basis_coarse: &Basis,
1625     ) -> crate::Result<(Operator, Operator, Operator)> {
1626         let mut ptr_coarse = std::ptr::null_mut();
1627         let mut ptr_prolong = std::ptr::null_mut();
1628         let mut ptr_restrict = std::ptr::null_mut();
1629         let ierr = unsafe {
1630             bind_ceed::CeedOperatorMultigridLevelCreate(
1631                 self.op_core.ptr,
1632                 p_mult_fine.ptr,
1633                 rstr_coarse.ptr,
1634                 basis_coarse.ptr,
1635                 &mut ptr_coarse,
1636                 &mut ptr_prolong,
1637                 &mut ptr_restrict,
1638             )
1639         };
1640         self.op_core.check_error(ierr)?;
1641         let op_coarse = Operator::from_raw(ptr_coarse)?;
1642         let op_prolong = Operator::from_raw(ptr_prolong)?;
1643         let op_restrict = Operator::from_raw(ptr_restrict)?;
1644         Ok((op_coarse, op_prolong, op_restrict))
1645     }
1646 
1647     /// Create a multigrid coarse Operator and level transfer Operators for a
1648     ///   given Operator with a tensor basis for the active basis
1649     ///
1650     /// * `p_mult_fine`   - Lvector multiplicity in parallel gather/scatter
1651     /// * `rstr_coarse`   - Coarse grid restriction
1652     /// * `basis_coarse`  - Coarse grid active vector basis
1653     /// * `interp_c_to_f` - Matrix for coarse to fine
1654     ///
1655     /// ```
1656     /// # use libceed::prelude::*;
1657     /// # fn main() -> libceed::Result<()> {
1658     /// # let ceed = libceed::Ceed::default_init();
1659     /// let ne = 15;
1660     /// let p_coarse = 3;
1661     /// let p_fine = 5;
1662     /// let q = 6;
1663     /// let ndofs_coarse = p_coarse * ne - ne + 1;
1664     /// let ndofs_fine = p_fine * ne - ne + 1;
1665     ///
1666     /// // Vectors
1667     /// let x_array = (0..ne + 1)
1668     ///     .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0)
1669     ///     .collect::<Vec<Scalar>>();
1670     /// let x = ceed.vector_from_slice(&x_array)?;
1671     /// let mut qdata = ceed.vector(ne * q)?;
1672     /// qdata.set_value(0.0);
1673     /// let mut u_coarse = ceed.vector(ndofs_coarse)?;
1674     /// u_coarse.set_value(1.0);
1675     /// let mut u_fine = ceed.vector(ndofs_fine)?;
1676     /// u_fine.set_value(1.0);
1677     /// let mut v_coarse = ceed.vector(ndofs_coarse)?;
1678     /// v_coarse.set_value(0.0);
1679     /// let mut v_fine = ceed.vector(ndofs_fine)?;
1680     /// v_fine.set_value(0.0);
1681     /// let mut multiplicity = ceed.vector(ndofs_fine)?;
1682     /// multiplicity.set_value(1.0);
1683     ///
1684     /// // Restrictions
1685     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1686     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1687     ///
1688     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1689     /// for i in 0..ne {
1690     ///     indx[2 * i + 0] = i as i32;
1691     ///     indx[2 * i + 1] = (i + 1) as i32;
1692     /// }
1693     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1694     ///
1695     /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne];
1696     /// for i in 0..ne {
1697     ///     for j in 0..p_coarse {
1698     ///         indu_coarse[p_coarse * i + j] = (i + j) as i32;
1699     ///     }
1700     /// }
1701     /// let ru_coarse = ceed.elem_restriction(
1702     ///     ne,
1703     ///     p_coarse,
1704     ///     1,
1705     ///     1,
1706     ///     ndofs_coarse,
1707     ///     MemType::Host,
1708     ///     &indu_coarse,
1709     /// )?;
1710     ///
1711     /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne];
1712     /// for i in 0..ne {
1713     ///     for j in 0..p_fine {
1714     ///         indu_fine[p_fine * i + j] = (i + j) as i32;
1715     ///     }
1716     /// }
1717     /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?;
1718     ///
1719     /// // Bases
1720     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1721     /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?;
1722     /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?;
1723     ///
1724     /// // Build quadrature data
1725     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1726     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1727     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1728     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1729     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1730     ///     .apply(&x, &mut qdata)?;
1731     ///
1732     /// // Mass operator
1733     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1734     /// let op_mass_fine = ceed
1735     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1736     ///     .field("u", &ru_fine, &bu_fine, VectorOpt::Active)?
1737     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1738     ///     .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?;
1739     ///
1740     /// // Multigrid setup
1741     /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine];
1742     /// {
1743     ///     let mut coarse = ceed.vector(p_coarse)?;
1744     ///     let mut fine = ceed.vector(p_fine)?;
1745     ///     let basis_c_to_f =
1746     ///         ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?;
1747     ///     for i in 0..p_coarse {
1748     ///         coarse.set_value(0.0);
1749     ///         {
1750     ///             let mut array = coarse.view_mut()?;
1751     ///             array[i] = 1.;
1752     ///         }
1753     ///         basis_c_to_f.apply(
1754     ///             1,
1755     ///             TransposeMode::NoTranspose,
1756     ///             EvalMode::Interp,
1757     ///             &coarse,
1758     ///             &mut fine,
1759     ///         )?;
1760     ///         let array = fine.view()?;
1761     ///         for j in 0..p_fine {
1762     ///             interp_c_to_f[j * p_coarse + i] = array[j];
1763     ///         }
1764     ///     }
1765     /// }
1766     /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_tensor_H1(
1767     ///     &multiplicity,
1768     ///     &ru_coarse,
1769     ///     &bu_coarse,
1770     ///     &interp_c_to_f,
1771     /// )?;
1772     ///
1773     /// // Coarse problem
1774     /// u_coarse.set_value(1.0);
1775     /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?;
1776     ///
1777     /// // Check
1778     /// let sum: Scalar = v_coarse.view()?.iter().sum();
1779     /// assert!(
1780     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1781     ///     "Incorrect interval length computed"
1782     /// );
1783     ///
1784     /// // Prolong
1785     /// op_prolong.apply(&u_coarse, &mut u_fine)?;
1786     ///
1787     /// // Fine problem
1788     /// op_mass_fine.apply(&u_fine, &mut v_fine)?;
1789     ///
1790     /// // Check
1791     /// let sum: Scalar = v_fine.view()?.iter().sum();
1792     /// assert!(
1793     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1794     ///     "Incorrect interval length computed"
1795     /// );
1796     ///
1797     /// // Restrict
1798     /// op_restrict.apply(&v_fine, &mut v_coarse)?;
1799     ///
1800     /// // Check
1801     /// let sum: Scalar = v_coarse.view()?.iter().sum();
1802     /// assert!(
1803     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1804     ///     "Incorrect interval length computed"
1805     /// );
1806     /// # Ok(())
1807     /// # }
1808     /// ```
1809     pub fn create_multigrid_level_tensor_H1(
1810         &self,
1811         p_mult_fine: &Vector,
1812         rstr_coarse: &ElemRestriction,
1813         basis_coarse: &Basis,
1814         interpCtoF: &Vec<Scalar>,
1815     ) -> crate::Result<(Operator, Operator, Operator)> {
1816         let mut ptr_coarse = std::ptr::null_mut();
1817         let mut ptr_prolong = std::ptr::null_mut();
1818         let mut ptr_restrict = std::ptr::null_mut();
1819         let ierr = unsafe {
1820             bind_ceed::CeedOperatorMultigridLevelCreateTensorH1(
1821                 self.op_core.ptr,
1822                 p_mult_fine.ptr,
1823                 rstr_coarse.ptr,
1824                 basis_coarse.ptr,
1825                 interpCtoF.as_ptr(),
1826                 &mut ptr_coarse,
1827                 &mut ptr_prolong,
1828                 &mut ptr_restrict,
1829             )
1830         };
1831         self.op_core.check_error(ierr)?;
1832         let op_coarse = Operator::from_raw(ptr_coarse)?;
1833         let op_prolong = Operator::from_raw(ptr_prolong)?;
1834         let op_restrict = Operator::from_raw(ptr_restrict)?;
1835         Ok((op_coarse, op_prolong, op_restrict))
1836     }
1837 
1838     /// Create a multigrid coarse Operator and level transfer Operators for a
1839     ///   given Operator with a non-tensor basis for the active basis
1840     ///
1841     /// * `p_mult_fine`   - Lvector multiplicity in parallel gather/scatter
1842     /// * `rstr_coarse`   - Coarse grid restriction
1843     /// * `basis_coarse`  - Coarse grid active vector basis
1844     /// * `interp_c_to_f` - Matrix for coarse to fine
1845     ///
1846     /// ```
1847     /// # use libceed::prelude::*;
1848     /// # fn main() -> libceed::Result<()> {
1849     /// # let ceed = libceed::Ceed::default_init();
1850     /// let ne = 15;
1851     /// let p_coarse = 3;
1852     /// let p_fine = 5;
1853     /// let q = 6;
1854     /// let ndofs_coarse = p_coarse * ne - ne + 1;
1855     /// let ndofs_fine = p_fine * ne - ne + 1;
1856     ///
1857     /// // Vectors
1858     /// let x_array = (0..ne + 1)
1859     ///     .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0)
1860     ///     .collect::<Vec<Scalar>>();
1861     /// let x = ceed.vector_from_slice(&x_array)?;
1862     /// let mut qdata = ceed.vector(ne * q)?;
1863     /// qdata.set_value(0.0);
1864     /// let mut u_coarse = ceed.vector(ndofs_coarse)?;
1865     /// u_coarse.set_value(1.0);
1866     /// let mut u_fine = ceed.vector(ndofs_fine)?;
1867     /// u_fine.set_value(1.0);
1868     /// let mut v_coarse = ceed.vector(ndofs_coarse)?;
1869     /// v_coarse.set_value(0.0);
1870     /// let mut v_fine = ceed.vector(ndofs_fine)?;
1871     /// v_fine.set_value(0.0);
1872     /// let mut multiplicity = ceed.vector(ndofs_fine)?;
1873     /// multiplicity.set_value(1.0);
1874     ///
1875     /// // Restrictions
1876     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1877     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1878     ///
1879     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1880     /// for i in 0..ne {
1881     ///     indx[2 * i + 0] = i as i32;
1882     ///     indx[2 * i + 1] = (i + 1) as i32;
1883     /// }
1884     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1885     ///
1886     /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne];
1887     /// for i in 0..ne {
1888     ///     for j in 0..p_coarse {
1889     ///         indu_coarse[p_coarse * i + j] = (i + j) as i32;
1890     ///     }
1891     /// }
1892     /// let ru_coarse = ceed.elem_restriction(
1893     ///     ne,
1894     ///     p_coarse,
1895     ///     1,
1896     ///     1,
1897     ///     ndofs_coarse,
1898     ///     MemType::Host,
1899     ///     &indu_coarse,
1900     /// )?;
1901     ///
1902     /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne];
1903     /// for i in 0..ne {
1904     ///     for j in 0..p_fine {
1905     ///         indu_fine[p_fine * i + j] = (i + j) as i32;
1906     ///     }
1907     /// }
1908     /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?;
1909     ///
1910     /// // Bases
1911     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1912     /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?;
1913     /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?;
1914     ///
1915     /// // Build quadrature data
1916     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1917     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1918     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1919     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1920     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1921     ///     .apply(&x, &mut qdata)?;
1922     ///
1923     /// // Mass operator
1924     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1925     /// let op_mass_fine = ceed
1926     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1927     ///     .field("u", &ru_fine, &bu_fine, VectorOpt::Active)?
1928     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1929     ///     .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?;
1930     ///
1931     /// // Multigrid setup
1932     /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine];
1933     /// {
1934     ///     let mut coarse = ceed.vector(p_coarse)?;
1935     ///     let mut fine = ceed.vector(p_fine)?;
1936     ///     let basis_c_to_f =
1937     ///         ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?;
1938     ///     for i in 0..p_coarse {
1939     ///         coarse.set_value(0.0);
1940     ///         {
1941     ///             let mut array = coarse.view_mut()?;
1942     ///             array[i] = 1.;
1943     ///         }
1944     ///         basis_c_to_f.apply(
1945     ///             1,
1946     ///             TransposeMode::NoTranspose,
1947     ///             EvalMode::Interp,
1948     ///             &coarse,
1949     ///             &mut fine,
1950     ///         )?;
1951     ///         let array = fine.view()?;
1952     ///         for j in 0..p_fine {
1953     ///             interp_c_to_f[j * p_coarse + i] = array[j];
1954     ///         }
1955     ///     }
1956     /// }
1957     /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_H1(
1958     ///     &multiplicity,
1959     ///     &ru_coarse,
1960     ///     &bu_coarse,
1961     ///     &interp_c_to_f,
1962     /// )?;
1963     ///
1964     /// // Coarse problem
1965     /// u_coarse.set_value(1.0);
1966     /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?;
1967     ///
1968     /// // Check
1969     /// let sum: Scalar = v_coarse.view()?.iter().sum();
1970     /// assert!(
1971     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1972     ///     "Incorrect interval length computed"
1973     /// );
1974     ///
1975     /// // Prolong
1976     /// op_prolong.apply(&u_coarse, &mut u_fine)?;
1977     ///
1978     /// // Fine problem
1979     /// op_mass_fine.apply(&u_fine, &mut v_fine)?;
1980     ///
1981     /// // Check
1982     /// let sum: Scalar = v_fine.view()?.iter().sum();
1983     /// assert!(
1984     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1985     ///     "Incorrect interval length computed"
1986     /// );
1987     ///
1988     /// // Restrict
1989     /// op_restrict.apply(&v_fine, &mut v_coarse)?;
1990     ///
1991     /// // Check
1992     /// let sum: Scalar = v_coarse.view()?.iter().sum();
1993     /// assert!(
1994     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1995     ///     "Incorrect interval length computed"
1996     /// );
1997     /// # Ok(())
1998     /// # }
1999     /// ```
2000     pub fn create_multigrid_level_H1(
2001         &self,
2002         p_mult_fine: &Vector,
2003         rstr_coarse: &ElemRestriction,
2004         basis_coarse: &Basis,
2005         interpCtoF: &[Scalar],
2006     ) -> crate::Result<(Operator, Operator, Operator)> {
2007         let mut ptr_coarse = std::ptr::null_mut();
2008         let mut ptr_prolong = std::ptr::null_mut();
2009         let mut ptr_restrict = std::ptr::null_mut();
2010         let ierr = unsafe {
2011             bind_ceed::CeedOperatorMultigridLevelCreateH1(
2012                 self.op_core.ptr,
2013                 p_mult_fine.ptr,
2014                 rstr_coarse.ptr,
2015                 basis_coarse.ptr,
2016                 interpCtoF.as_ptr(),
2017                 &mut ptr_coarse,
2018                 &mut ptr_prolong,
2019                 &mut ptr_restrict,
2020             )
2021         };
2022         self.op_core.check_error(ierr)?;
2023         let op_coarse = Operator::from_raw(ptr_coarse)?;
2024         let op_prolong = Operator::from_raw(ptr_prolong)?;
2025         let op_restrict = Operator::from_raw(ptr_restrict)?;
2026         Ok((op_coarse, op_prolong, op_restrict))
2027     }
2028 }
2029 
2030 // -----------------------------------------------------------------------------
2031 // Composite Operator
2032 // -----------------------------------------------------------------------------
2033 impl<'a> CompositeOperator<'a> {
2034     // Constructor
2035     pub fn create(ceed: &'a crate::Ceed) -> crate::Result<Self> {
2036         let mut ptr = std::ptr::null_mut();
2037         let ierr = unsafe { bind_ceed::CeedCompositeOperatorCreate(ceed.ptr, &mut ptr) };
2038         ceed.check_error(ierr)?;
2039         Ok(Self {
2040             op_core: OperatorCore {
2041                 ptr,
2042                 _lifeline: PhantomData,
2043             },
2044         })
2045     }
2046 
2047     /// Apply Operator to a vector
2048     ///
2049     /// * `input`  - Input Vector
2050     /// * `output` - Output Vector
2051     ///
2052     /// ```
2053     /// # use libceed::prelude::*;
2054     /// # fn main() -> libceed::Result<()> {
2055     /// # let ceed = libceed::Ceed::default_init();
2056     /// let ne = 4;
2057     /// let p = 3;
2058     /// let q = 4;
2059     /// let ndofs = p * ne - ne + 1;
2060     ///
2061     /// // Vectors
2062     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
2063     /// let mut qdata_mass = ceed.vector(ne * q)?;
2064     /// qdata_mass.set_value(0.0);
2065     /// let mut qdata_diff = ceed.vector(ne * q)?;
2066     /// qdata_diff.set_value(0.0);
2067     /// let mut u = ceed.vector(ndofs)?;
2068     /// u.set_value(1.0);
2069     /// let mut v = ceed.vector(ndofs)?;
2070     /// v.set_value(0.0);
2071     ///
2072     /// // Restrictions
2073     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
2074     /// for i in 0..ne {
2075     ///     indx[2 * i + 0] = i as i32;
2076     ///     indx[2 * i + 1] = (i + 1) as i32;
2077     /// }
2078     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
2079     /// let mut indu: Vec<i32> = vec![0; p * ne];
2080     /// for i in 0..ne {
2081     ///     indu[p * i + 0] = i as i32;
2082     ///     indu[p * i + 1] = (i + 1) as i32;
2083     ///     indu[p * i + 2] = (i + 2) as i32;
2084     /// }
2085     /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?;
2086     /// let strides: [i32; 3] = [1, q as i32, q as i32];
2087     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
2088     ///
2089     /// // Bases
2090     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
2091     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
2092     ///
2093     /// // Build quadrature data
2094     /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?;
2095     /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)?
2096     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
2097     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
2098     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
2099     ///     .apply(&x, &mut qdata_mass)?;
2100     ///
2101     /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?;
2102     /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)?
2103     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
2104     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
2105     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
2106     ///     .apply(&x, &mut qdata_diff)?;
2107     ///
2108     /// // Application operator
2109     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
2110     /// let op_mass = ceed
2111     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
2112     ///     .field("u", &ru, &bu, VectorOpt::Active)?
2113     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)?
2114     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
2115     ///
2116     /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?;
2117     /// let op_diff = ceed
2118     ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?
2119     ///     .field("du", &ru, &bu, VectorOpt::Active)?
2120     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)?
2121     ///     .field("dv", &ru, &bu, VectorOpt::Active)?;
2122     ///
2123     /// let op_composite = ceed
2124     ///     .composite_operator()?
2125     ///     .sub_operator(&op_mass)?
2126     ///     .sub_operator(&op_diff)?;
2127     ///
2128     /// v.set_value(0.0);
2129     /// op_composite.apply(&u, &mut v)?;
2130     ///
2131     /// // Check
2132     /// let sum: Scalar = v.view()?.iter().sum();
2133     /// assert!(
2134     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
2135     ///     "Incorrect interval length computed"
2136     /// );
2137     /// # Ok(())
2138     /// # }
2139     /// ```
2140     pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
2141         self.op_core.apply(input, output)
2142     }
2143 
2144     /// Apply Operator to a vector and add result to output vector
2145     ///
2146     /// * `input`  - Input Vector
2147     /// * `output` - Output Vector
2148     ///
2149     /// ```
2150     /// # use libceed::prelude::*;
2151     /// # fn main() -> libceed::Result<()> {
2152     /// # let ceed = libceed::Ceed::default_init();
2153     /// let ne = 4;
2154     /// let p = 3;
2155     /// let q = 4;
2156     /// let ndofs = p * ne - ne + 1;
2157     ///
2158     /// // Vectors
2159     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
2160     /// let mut qdata_mass = ceed.vector(ne * q)?;
2161     /// qdata_mass.set_value(0.0);
2162     /// let mut qdata_diff = ceed.vector(ne * q)?;
2163     /// qdata_diff.set_value(0.0);
2164     /// let mut u = ceed.vector(ndofs)?;
2165     /// u.set_value(1.0);
2166     /// let mut v = ceed.vector(ndofs)?;
2167     /// v.set_value(0.0);
2168     ///
2169     /// // Restrictions
2170     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
2171     /// for i in 0..ne {
2172     ///     indx[2 * i + 0] = i as i32;
2173     ///     indx[2 * i + 1] = (i + 1) as i32;
2174     /// }
2175     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
2176     /// let mut indu: Vec<i32> = vec![0; p * ne];
2177     /// for i in 0..ne {
2178     ///     indu[p * i + 0] = i as i32;
2179     ///     indu[p * i + 1] = (i + 1) as i32;
2180     ///     indu[p * i + 2] = (i + 2) as i32;
2181     /// }
2182     /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?;
2183     /// let strides: [i32; 3] = [1, q as i32, q as i32];
2184     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
2185     ///
2186     /// // Bases
2187     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
2188     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
2189     ///
2190     /// // Build quadrature data
2191     /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?;
2192     /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)?
2193     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
2194     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
2195     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
2196     ///     .apply(&x, &mut qdata_mass)?;
2197     ///
2198     /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?;
2199     /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)?
2200     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
2201     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
2202     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
2203     ///     .apply(&x, &mut qdata_diff)?;
2204     ///
2205     /// // Application operator
2206     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
2207     /// let op_mass = ceed
2208     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
2209     ///     .field("u", &ru, &bu, VectorOpt::Active)?
2210     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)?
2211     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
2212     ///
2213     /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?;
2214     /// let op_diff = ceed
2215     ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?
2216     ///     .field("du", &ru, &bu, VectorOpt::Active)?
2217     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)?
2218     ///     .field("dv", &ru, &bu, VectorOpt::Active)?;
2219     ///
2220     /// let op_composite = ceed
2221     ///     .composite_operator()?
2222     ///     .sub_operator(&op_mass)?
2223     ///     .sub_operator(&op_diff)?;
2224     ///
2225     /// v.set_value(1.0);
2226     /// op_composite.apply_add(&u, &mut v)?;
2227     ///
2228     /// // Check
2229     /// let sum: Scalar = v.view()?.iter().sum();
2230     /// assert!(
2231     ///     (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON,
2232     ///     "Incorrect interval length computed"
2233     /// );
2234     /// # Ok(())
2235     /// # }
2236     /// ```
2237     pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
2238         self.op_core.apply_add(input, output)
2239     }
2240 
2241     /// Add a sub-Operator to a Composite Operator
2242     ///
2243     /// * `subop` - Sub-Operator
2244     ///
2245     /// ```
2246     /// # use libceed::prelude::*;
2247     /// # fn main() -> libceed::Result<()> {
2248     /// # let ceed = libceed::Ceed::default_init();
2249     /// let mut op = ceed.composite_operator()?;
2250     ///
2251     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
2252     /// let op_mass = ceed.operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?;
2253     /// op = op.sub_operator(&op_mass)?;
2254     ///
2255     /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?;
2256     /// let op_diff = ceed.operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?;
2257     /// op = op.sub_operator(&op_diff)?;
2258     /// # Ok(())
2259     /// # }
2260     /// ```
2261     #[allow(unused_mut)]
2262     pub fn sub_operator(mut self, subop: &Operator) -> crate::Result<Self> {
2263         let ierr =
2264             unsafe { bind_ceed::CeedCompositeOperatorAddSub(self.op_core.ptr, subop.op_core.ptr) };
2265         self.op_core.check_error(ierr)?;
2266         Ok(self)
2267     }
2268 
2269     /// Check if CompositeOperator is setup correctly
2270     ///
2271     /// ```
2272     /// # use libceed::prelude::*;
2273     /// # fn main() -> libceed::Result<()> {
2274     /// # let ceed = libceed::Ceed::default_init();
2275     /// let ne = 4;
2276     /// let p = 3;
2277     /// let q = 4;
2278     /// let ndofs = p * ne - ne + 1;
2279     ///
2280     /// // Restrictions
2281     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
2282     /// for i in 0..ne {
2283     ///     indx[2 * i + 0] = i as i32;
2284     ///     indx[2 * i + 1] = (i + 1) as i32;
2285     /// }
2286     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
2287     /// let strides: [i32; 3] = [1, q as i32, q as i32];
2288     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
2289     ///
2290     /// // Bases
2291     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
2292     ///
2293     /// // Build quadrature data
2294     /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?;
2295     /// let op_build_mass = ceed
2296     ///     .operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)?
2297     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
2298     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
2299     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?;
2300     ///
2301     /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?;
2302     /// let op_build_diff = ceed
2303     ///     .operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)?
2304     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
2305     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
2306     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?;
2307     ///
2308     /// let op_build = ceed
2309     ///     .composite_operator()?
2310     ///     .sub_operator(&op_build_mass)?
2311     ///     .sub_operator(&op_build_diff)?;
2312     ///
2313     /// // Check
2314     /// assert!(op_build.check().is_ok(), "Operator setup incorrectly");
2315     /// # Ok(())
2316     /// # }
2317     /// ```
2318     pub fn check(self) -> crate::Result<Self> {
2319         self.op_core.check()?;
2320         Ok(self)
2321     }
2322 
2323     /// Assemble the diagonal of a square linear Operator
2324     ///
2325     /// This overwrites a Vector with the diagonal of a linear Operator.
2326     ///
2327     /// Note: Currently only non-composite Operators with a single field and
2328     ///      composite Operators with single field sub-operators are supported.
2329     ///
2330     /// * `op`        - Operator to assemble QFunction
2331     /// * `assembled` - Vector to store assembled Operator diagonal
2332     pub fn linear_asssemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
2333         self.op_core.linear_assemble_add_diagonal(assembled)
2334     }
2335 
2336     /// Assemble the point block diagonal of a square linear Operator
2337     ///
2338     /// This overwrites a Vector with the point block diagonal of a linear
2339     ///   Operator.
2340     ///
2341     /// Note: Currently only non-composite Operators with a single field and
2342     ///      composite Operators with single field sub-operators are supported.
2343     ///
2344     /// * `op`        - Operator to assemble QFunction
2345     /// * `assembled` - Vector to store assembled Operator diagonal
2346     pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
2347         self.op_core.linear_assemble_add_diagonal(assembled)
2348     }
2349 
2350     /// Assemble the diagonal of a square linear Operator
2351     ///
2352     /// This overwrites a Vector with the diagonal of a linear Operator.
2353     ///
2354     /// Note: Currently only non-composite Operators with a single field and
2355     ///      composite Operators with single field sub-operators are supported.
2356     ///
2357     /// * `op`        - Operator to assemble QFunction
2358     /// * `assembled` - Vector to store assembled CeedOperator point block
2359     ///                   diagonal, provided in row-major form with an
2360     ///                   `ncomp * ncomp` block at each node. The dimensions of
2361     ///                   this vector are derived from the active vector for
2362     ///                   the CeedOperator. The array has shape
2363     ///                   `[nodes, component out, component in]`.
2364     pub fn linear_assemble_point_block_diagonal(
2365         &self,
2366         assembled: &mut Vector,
2367     ) -> crate::Result<i32> {
2368         self.op_core.linear_assemble_add_diagonal(assembled)
2369     }
2370 
2371     /// Assemble the diagonal of a square linear Operator
2372     ///
2373     /// This sums into a Vector with the diagonal of a linear Operator.
2374     ///
2375     /// Note: Currently only non-composite Operators with a single field and
2376     ///      composite Operators with single field sub-operators are supported.
2377     ///
2378     /// * `op`        - Operator to assemble QFunction
2379     /// * `assembled` - Vector to store assembled CeedOperator point block
2380     ///                   diagonal, provided in row-major form with an
2381     ///                   `ncomp * ncomp` block at each node. The dimensions of
2382     ///                   this vector are derived from the active vector for
2383     ///                   the CeedOperator. The array has shape
2384     ///                   `[nodes, component out, component in]`.
2385     pub fn linear_assemble_add_point_block_diagonal(
2386         &self,
2387         assembled: &mut Vector,
2388     ) -> crate::Result<i32> {
2389         self.op_core.linear_assemble_add_diagonal(assembled)
2390     }
2391 }
2392 
2393 // -----------------------------------------------------------------------------
2394