xref: /libCEED/rust/libceed/src/operator.rs (revision 1142270cb02df4484c0ba89e11097db6aa2d5cef)
1 // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
3 // reserved. See files LICENSE and NOTICE for details.
4 //
5 // This file is part of CEED, a collection of benchmarks, miniapps, software
6 // libraries and APIs for efficient high-order finite element and spectral
7 // element discretizations for exascale applications. For more information and
8 // source code availability see http://github.com/ceed.
9 //
10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
11 // a collaborative effort of two U.S. Department of Energy organizations (Office
12 // of Science and the National Nuclear Security Administration) responsible for
13 // the planning and preparation of a capable exascale ecosystem, including
14 // software, applications, hardware, advanced system engineering and early
15 // testbed platforms, in support of the nation's exascale computing imperative
16 
17 //! A Ceed Operator defines the finite/spectral element operator associated to a
18 //! Ceed QFunction. A Ceed Operator connects Ceed ElemRestrictions,
19 //! Ceed Bases, and Ceed QFunctions.
20 
21 use crate::prelude::*;
22 
23 // -----------------------------------------------------------------------------
24 // CeedOperator context wrapper
25 // -----------------------------------------------------------------------------
26 #[derive(Debug)]
27 pub(crate) struct OperatorCore<'a> {
28     ptr: bind_ceed::CeedOperator,
29     _lifeline: PhantomData<&'a ()>,
30 }
31 
32 #[derive(Debug)]
33 pub struct Operator<'a> {
34     op_core: OperatorCore<'a>,
35 }
36 
37 #[derive(Debug)]
38 pub struct CompositeOperator<'a> {
39     op_core: OperatorCore<'a>,
40 }
41 
42 // -----------------------------------------------------------------------------
43 // Destructor
44 // -----------------------------------------------------------------------------
45 impl<'a> Drop for OperatorCore<'a> {
46     fn drop(&mut self) {
47         unsafe {
48             bind_ceed::CeedOperatorDestroy(&mut self.ptr);
49         }
50     }
51 }
52 
53 // -----------------------------------------------------------------------------
54 // Display
55 // -----------------------------------------------------------------------------
56 impl<'a> fmt::Display for OperatorCore<'a> {
57     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
58         let mut ptr = std::ptr::null_mut();
59         let mut sizeloc = crate::MAX_BUFFER_LENGTH;
60         let cstring = unsafe {
61             let file = bind_ceed::open_memstream(&mut ptr, &mut sizeloc);
62             bind_ceed::CeedOperatorView(self.ptr, file);
63             bind_ceed::fclose(file);
64             CString::from_raw(ptr)
65         };
66         cstring.to_string_lossy().fmt(f)
67     }
68 }
69 
70 /// View an Operator
71 ///
72 /// ```
73 /// # use libceed::prelude::*;
74 /// # fn main() -> Result<(), libceed::CeedError> {
75 /// # let ceed = libceed::Ceed::default_init();
76 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
77 ///
78 /// // Operator field arguments
79 /// let ne = 3;
80 /// let q = 4 as usize;
81 /// let mut ind: Vec<i32> = vec![0; 2 * ne];
82 /// for i in 0..ne {
83 ///     ind[2 * i + 0] = i as i32;
84 ///     ind[2 * i + 1] = (i + 1) as i32;
85 /// }
86 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
87 /// let strides: [i32; 3] = [1, q as i32, q as i32];
88 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?;
89 ///
90 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
91 ///
92 /// // Operator fields
93 /// let op = ceed
94 ///     .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?
95 ///     .field("dx", &r, &b, VectorOpt::Active)?
96 ///     .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)?
97 ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?;
98 ///
99 /// println!("{}", op);
100 /// # Ok(())
101 /// # }
102 /// ```
103 impl<'a> fmt::Display for Operator<'a> {
104     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
105         self.op_core.fmt(f)
106     }
107 }
108 
109 /// View a composite Operator
110 ///
111 /// ```
112 /// # use libceed::prelude::*;
113 /// # fn main() -> Result<(), libceed::CeedError> {
114 /// # let ceed = libceed::Ceed::default_init();
115 ///
116 /// // Sub operator field arguments
117 /// let ne = 3;
118 /// let q = 4 as usize;
119 /// let mut ind: Vec<i32> = vec![0; 2 * ne];
120 /// for i in 0..ne {
121 ///     ind[2 * i + 0] = i as i32;
122 ///     ind[2 * i + 1] = (i + 1) as i32;
123 /// }
124 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
125 /// let strides: [i32; 3] = [1, q as i32, q as i32];
126 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?;
127 ///
128 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
129 ///
130 /// let qdata_mass = ceed.vector(q * ne)?;
131 /// let qdata_diff = ceed.vector(q * ne)?;
132 ///
133 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
134 /// let op_mass = ceed
135 ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
136 ///     .field("u", &r, &b, VectorOpt::Active)?
137 ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)?
138 ///     .field("v", &r, &b, VectorOpt::Active)?;
139 ///
140 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?;
141 /// let op_diff = ceed
142 ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?
143 ///     .field("du", &r, &b, VectorOpt::Active)?
144 ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)?
145 ///     .field("dv", &r, &b, VectorOpt::Active)?;
146 ///
147 /// let op = ceed
148 ///     .composite_operator()?
149 ///     .sub_operator(&op_mass)?
150 ///     .sub_operator(&op_diff)?;
151 ///
152 /// println!("{}", op);
153 /// # Ok(())
154 /// # }
155 /// ```
156 impl<'a> fmt::Display for CompositeOperator<'a> {
157     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
158         self.op_core.fmt(f)
159     }
160 }
161 
162 // -----------------------------------------------------------------------------
163 // Core functionality
164 // -----------------------------------------------------------------------------
165 impl<'a> OperatorCore<'a> {
166     // Error handling
167     #[doc(hidden)]
168     fn check_error(&self, ierr: i32) -> crate::Result<i32> {
169         let mut ptr = std::ptr::null_mut();
170         unsafe {
171             bind_ceed::CeedOperatorGetCeed(self.ptr, &mut ptr);
172         }
173         crate::check_error(ptr, ierr)
174     }
175 
176     // Common implementations
177     pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
178         let ierr = unsafe {
179             bind_ceed::CeedOperatorApply(
180                 self.ptr,
181                 input.ptr,
182                 output.ptr,
183                 bind_ceed::CEED_REQUEST_IMMEDIATE,
184             )
185         };
186         self.check_error(ierr)
187     }
188 
189     pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
190         let ierr = unsafe {
191             bind_ceed::CeedOperatorApplyAdd(
192                 self.ptr,
193                 input.ptr,
194                 output.ptr,
195                 bind_ceed::CEED_REQUEST_IMMEDIATE,
196             )
197         };
198         self.check_error(ierr)
199     }
200 
201     pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
202         let ierr = unsafe {
203             bind_ceed::CeedOperatorLinearAssembleDiagonal(
204                 self.ptr,
205                 assembled.ptr,
206                 bind_ceed::CEED_REQUEST_IMMEDIATE,
207             )
208         };
209         self.check_error(ierr)
210     }
211 
212     pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
213         let ierr = unsafe {
214             bind_ceed::CeedOperatorLinearAssembleAddDiagonal(
215                 self.ptr,
216                 assembled.ptr,
217                 bind_ceed::CEED_REQUEST_IMMEDIATE,
218             )
219         };
220         self.check_error(ierr)
221     }
222 
223     pub fn linear_assemble_point_block_diagonal(
224         &self,
225         assembled: &mut Vector,
226     ) -> crate::Result<i32> {
227         let ierr = unsafe {
228             bind_ceed::CeedOperatorLinearAssemblePointBlockDiagonal(
229                 self.ptr,
230                 assembled.ptr,
231                 bind_ceed::CEED_REQUEST_IMMEDIATE,
232             )
233         };
234         self.check_error(ierr)
235     }
236 
237     pub fn linear_assemble_add_point_block_diagonal(
238         &self,
239         assembled: &mut Vector,
240     ) -> crate::Result<i32> {
241         let ierr = unsafe {
242             bind_ceed::CeedOperatorLinearAssembleAddPointBlockDiagonal(
243                 self.ptr,
244                 assembled.ptr,
245                 bind_ceed::CEED_REQUEST_IMMEDIATE,
246             )
247         };
248         self.check_error(ierr)
249     }
250 }
251 
252 // -----------------------------------------------------------------------------
253 // Operator
254 // -----------------------------------------------------------------------------
255 impl<'a> Operator<'a> {
256     // Constructor
257     pub fn create<'b>(
258         ceed: &'a crate::Ceed,
259         qf: impl Into<QFunctionOpt<'b>>,
260         dqf: impl Into<QFunctionOpt<'b>>,
261         dqfT: impl Into<QFunctionOpt<'b>>,
262     ) -> crate::Result<Self> {
263         let mut ptr = std::ptr::null_mut();
264         let ierr = unsafe {
265             bind_ceed::CeedOperatorCreate(
266                 ceed.ptr,
267                 qf.into().to_raw(),
268                 dqf.into().to_raw(),
269                 dqfT.into().to_raw(),
270                 &mut ptr,
271             )
272         };
273         ceed.check_error(ierr)?;
274         Ok(Self {
275             op_core: OperatorCore {
276                 ptr,
277                 _lifeline: PhantomData,
278             },
279         })
280     }
281 
282     fn from_raw(ptr: bind_ceed::CeedOperator) -> crate::Result<Self> {
283         Ok(Self {
284             op_core: OperatorCore {
285                 ptr,
286                 _lifeline: PhantomData,
287             },
288         })
289     }
290 
291     /// Apply Operator to a vector
292     ///
293     /// * `input`  - Input Vector
294     /// * `output` - Output Vector
295     ///
296     /// ```
297     /// # use libceed::prelude::*;
298     /// # fn main() -> Result<(), libceed::CeedError> {
299     /// # let ceed = libceed::Ceed::default_init();
300     /// let ne = 4;
301     /// let p = 3;
302     /// let q = 4;
303     /// let ndofs = p * ne - ne + 1;
304     ///
305     /// // Vectors
306     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
307     /// let mut qdata = ceed.vector(ne * q)?;
308     /// qdata.set_value(0.0);
309     /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?;
310     /// let mut v = ceed.vector(ndofs)?;
311     /// v.set_value(0.0);
312     ///
313     /// // Restrictions
314     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
315     /// for i in 0..ne {
316     ///     indx[2 * i + 0] = i as i32;
317     ///     indx[2 * i + 1] = (i + 1) as i32;
318     /// }
319     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
320     /// let mut indu: Vec<i32> = vec![0; p * ne];
321     /// for i in 0..ne {
322     ///     indu[p * i + 0] = i as i32;
323     ///     indu[p * i + 1] = (i + 1) as i32;
324     ///     indu[p * i + 2] = (i + 2) as i32;
325     /// }
326     /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?;
327     /// let strides: [i32; 3] = [1, q as i32, q as i32];
328     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
329     ///
330     /// // Bases
331     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
332     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
333     ///
334     /// // Build quadrature data
335     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
336     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
337     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
338     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
339     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
340     ///     .apply(&x, &mut qdata)?;
341     ///
342     /// // Mass operator
343     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
344     /// let op_mass = ceed
345     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
346     ///     .field("u", &ru, &bu, VectorOpt::Active)?
347     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
348     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
349     ///
350     /// v.set_value(0.0);
351     /// op_mass.apply(&u, &mut v)?;
352     ///
353     /// // Check
354     /// let sum: Scalar = v.view().iter().sum();
355     /// assert!(
356     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
357     ///     "Incorrect interval length computed"
358     /// );
359     /// # Ok(())
360     /// # }
361     /// ```
362     pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
363         self.op_core.apply(input, output)
364     }
365 
366     /// Apply Operator to a vector and add result to output vector
367     ///
368     /// * `input`  - Input Vector
369     /// * `output` - Output Vector
370     ///
371     /// ```
372     /// # use libceed::prelude::*;
373     /// # fn main() -> Result<(), libceed::CeedError> {
374     /// # let ceed = libceed::Ceed::default_init();
375     /// let ne = 4;
376     /// let p = 3;
377     /// let q = 4;
378     /// let ndofs = p * ne - ne + 1;
379     ///
380     /// // Vectors
381     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
382     /// let mut qdata = ceed.vector(ne * q)?;
383     /// qdata.set_value(0.0);
384     /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?;
385     /// let mut v = ceed.vector(ndofs)?;
386     ///
387     /// // Restrictions
388     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
389     /// for i in 0..ne {
390     ///     indx[2 * i + 0] = i as i32;
391     ///     indx[2 * i + 1] = (i + 1) as i32;
392     /// }
393     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
394     /// let mut indu: Vec<i32> = vec![0; p * ne];
395     /// for i in 0..ne {
396     ///     indu[p * i + 0] = i as i32;
397     ///     indu[p * i + 1] = (i + 1) as i32;
398     ///     indu[p * i + 2] = (i + 2) as i32;
399     /// }
400     /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?;
401     /// let strides: [i32; 3] = [1, q as i32, q as i32];
402     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
403     ///
404     /// // Bases
405     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
406     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
407     ///
408     /// // Build quadrature data
409     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
410     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
411     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
412     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
413     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
414     ///     .apply(&x, &mut qdata)?;
415     ///
416     /// // Mass operator
417     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
418     /// let op_mass = ceed
419     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
420     ///     .field("u", &ru, &bu, VectorOpt::Active)?
421     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
422     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
423     ///
424     /// v.set_value(1.0);
425     /// op_mass.apply_add(&u, &mut v)?;
426     ///
427     /// // Check
428     /// let sum: Scalar = v.view().iter().sum();
429     /// assert!(
430     ///     (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON,
431     ///     "Incorrect interval length computed and added"
432     /// );
433     /// # Ok(())
434     /// # }
435     /// ```
436     pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
437         self.op_core.apply_add(input, output)
438     }
439 
440     /// Provide a field to a Operator for use by its QFunction
441     ///
442     /// * `fieldname` - Name of the field (to be matched with the name used by
443     ///                   the QFunction)
444     /// * `r`         - ElemRestriction
445     /// * `b`         - Basis in which the field resides or `BasisCollocated` if
446     ///                   collocated with quadrature points
447     /// * `v`         - Vector to be used by Operator or `VectorActive` if field
448     ///                   is active or `VectorNone` if using `Weight` with the
449     ///                   QFunction
450     ///
451     ///
452     /// ```
453     /// # use libceed::prelude::*;
454     /// # fn main() -> Result<(), libceed::CeedError> {
455     /// # let ceed = libceed::Ceed::default_init();
456     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
457     /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?;
458     ///
459     /// // Operator field arguments
460     /// let ne = 3;
461     /// let q = 4;
462     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
463     /// for i in 0..ne {
464     ///     ind[2 * i + 0] = i as i32;
465     ///     ind[2 * i + 1] = (i + 1) as i32;
466     /// }
467     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
468     ///
469     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
470     ///
471     /// // Operator field
472     /// op = op.field("dx", &r, &b, VectorOpt::Active)?;
473     /// # Ok(())
474     /// # }
475     /// ```
476     #[allow(unused_mut)]
477     pub fn field<'b>(
478         mut self,
479         fieldname: &str,
480         r: impl Into<ElemRestrictionOpt<'b>>,
481         b: impl Into<BasisOpt<'b>>,
482         v: impl Into<VectorOpt<'b>>,
483     ) -> crate::Result<Self> {
484         let fieldname = CString::new(fieldname).expect("CString::new failed");
485         let fieldname = fieldname.as_ptr() as *const i8;
486         let ierr = unsafe {
487             bind_ceed::CeedOperatorSetField(
488                 self.op_core.ptr,
489                 fieldname,
490                 r.into().to_raw(),
491                 b.into().to_raw(),
492                 v.into().to_raw(),
493             )
494         };
495         self.op_core.check_error(ierr)?;
496         Ok(self)
497     }
498 
499     /// Assemble the diagonal of a square linear Operator
500     ///
501     /// This overwrites a Vector with the diagonal of a linear Operator.
502     ///
503     /// Note: Currently only non-composite Operators with a single field and
504     ///      composite Operators with single field sub-operators are supported.
505     ///
506     /// * `op`        - Operator to assemble QFunction
507     /// * `assembled` - Vector to store assembled Operator diagonal
508     ///
509     /// ```
510     /// # use libceed::prelude::*;
511     /// # fn main() -> Result<(), libceed::CeedError> {
512     /// # let ceed = libceed::Ceed::default_init();
513     /// let ne = 4;
514     /// let p = 3;
515     /// let q = 4;
516     /// let ndofs = p * ne - ne + 1;
517     ///
518     /// // Vectors
519     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
520     /// let mut qdata = ceed.vector(ne * q)?;
521     /// qdata.set_value(0.0);
522     /// let mut u = ceed.vector(ndofs)?;
523     /// u.set_value(1.0);
524     /// let mut v = ceed.vector(ndofs)?;
525     /// v.set_value(0.0);
526     ///
527     /// // Restrictions
528     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
529     /// for i in 0..ne {
530     ///     indx[2 * i + 0] = i as i32;
531     ///     indx[2 * i + 1] = (i + 1) as i32;
532     /// }
533     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
534     /// let mut indu: Vec<i32> = vec![0; p * ne];
535     /// for i in 0..ne {
536     ///     indu[p * i + 0] = (2 * i) as i32;
537     ///     indu[p * i + 1] = (2 * i + 1) as i32;
538     ///     indu[p * i + 2] = (2 * i + 2) as i32;
539     /// }
540     /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?;
541     /// let strides: [i32; 3] = [1, q as i32, q as i32];
542     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
543     ///
544     /// // Bases
545     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
546     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
547     ///
548     /// // Build quadrature data
549     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
550     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
551     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
552     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
553     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
554     ///     .apply(&x, &mut qdata)?;
555     ///
556     /// // Mass operator
557     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
558     /// let op_mass = ceed
559     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
560     ///     .field("u", &ru, &bu, VectorOpt::Active)?
561     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
562     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
563     ///
564     /// // Diagonal
565     /// let mut diag = ceed.vector(ndofs)?;
566     /// diag.set_value(0.0);
567     /// op_mass.linear_assemble_diagonal(&mut diag)?;
568     ///
569     /// // Manual diagonal computation
570     /// let mut true_diag = ceed.vector(ndofs)?;
571     /// for i in 0..ndofs {
572     ///     u.set_value(0.0);
573     ///     {
574     ///         let mut u_array = u.view_mut();
575     ///         u_array[i] = 1.;
576     ///     }
577     ///
578     ///     op_mass.apply(&u, &mut v)?;
579     ///
580     ///     {
581     ///         let v_array = v.view_mut();
582     ///         let mut true_array = true_diag.view_mut();
583     ///         true_array[i] = v_array[i];
584     ///     }
585     /// }
586     ///
587     /// // Check
588     /// diag.view()
589     ///     .iter()
590     ///     .zip(true_diag.view().iter())
591     ///     .for_each(|(computed, actual)| {
592     ///         assert!(
593     ///             (*computed - *actual).abs() < 10.0 * libceed::EPSILON,
594     ///             "Diagonal entry incorrect"
595     ///         );
596     ///     });
597     /// # Ok(())
598     /// # }
599     /// ```
600     pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
601         self.op_core.linear_assemble_diagonal(assembled)
602     }
603 
604     /// Assemble the diagonal of a square linear Operator
605     ///
606     /// This sums into a Vector with the diagonal of a linear Operator.
607     ///
608     /// Note: Currently only non-composite Operators with a single field and
609     ///      composite Operators with single field sub-operators are supported.
610     ///
611     /// * `op`        - Operator to assemble QFunction
612     /// * `assembled` - Vector to store assembled Operator diagonal
613     ///
614     ///
615     /// ```
616     /// # use libceed::prelude::*;
617     /// # fn main() -> Result<(), libceed::CeedError> {
618     /// # let ceed = libceed::Ceed::default_init();
619     /// let ne = 4;
620     /// let p = 3;
621     /// let q = 4;
622     /// let ndofs = p * ne - ne + 1;
623     ///
624     /// // Vectors
625     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
626     /// let mut qdata = ceed.vector(ne * q)?;
627     /// qdata.set_value(0.0);
628     /// let mut u = ceed.vector(ndofs)?;
629     /// u.set_value(1.0);
630     /// let mut v = ceed.vector(ndofs)?;
631     /// v.set_value(0.0);
632     ///
633     /// // Restrictions
634     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
635     /// for i in 0..ne {
636     ///     indx[2 * i + 0] = i as i32;
637     ///     indx[2 * i + 1] = (i + 1) as i32;
638     /// }
639     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
640     /// let mut indu: Vec<i32> = vec![0; p * ne];
641     /// for i in 0..ne {
642     ///     indu[p * i + 0] = (2 * i) as i32;
643     ///     indu[p * i + 1] = (2 * i + 1) as i32;
644     ///     indu[p * i + 2] = (2 * i + 2) as i32;
645     /// }
646     /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?;
647     /// let strides: [i32; 3] = [1, q as i32, q as i32];
648     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
649     ///
650     /// // Bases
651     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
652     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
653     ///
654     /// // Build quadrature data
655     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
656     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
657     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
658     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
659     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
660     ///     .apply(&x, &mut qdata)?;
661     ///
662     /// // Mass operator
663     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
664     /// let op_mass = ceed
665     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
666     ///     .field("u", &ru, &bu, VectorOpt::Active)?
667     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
668     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
669     ///
670     /// // Diagonal
671     /// let mut diag = ceed.vector(ndofs)?;
672     /// diag.set_value(1.0);
673     /// op_mass.linear_assemble_add_diagonal(&mut diag)?;
674     ///
675     /// // Manual diagonal computation
676     /// let mut true_diag = ceed.vector(ndofs)?;
677     /// for i in 0..ndofs {
678     ///     u.set_value(0.0);
679     ///     {
680     ///         let mut u_array = u.view_mut();
681     ///         u_array[i] = 1.;
682     ///     }
683     ///
684     ///     op_mass.apply(&u, &mut v)?;
685     ///
686     ///     {
687     ///         let v_array = v.view_mut();
688     ///         let mut true_array = true_diag.view_mut();
689     ///         true_array[i] = v_array[i] + 1.0;
690     ///     }
691     /// }
692     ///
693     /// // Check
694     /// diag.view()
695     ///     .iter()
696     ///     .zip(true_diag.view().iter())
697     ///     .for_each(|(computed, actual)| {
698     ///         assert!(
699     ///             (*computed - *actual).abs() < 10.0 * libceed::EPSILON,
700     ///             "Diagonal entry incorrect"
701     ///         );
702     ///     });
703     /// # Ok(())
704     /// # }
705     /// ```
706     pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
707         self.op_core.linear_assemble_add_diagonal(assembled)
708     }
709 
710     /// Assemble the point block diagonal of a square linear Operator
711     ///
712     /// This overwrites a Vector with the point block diagonal of a linear
713     /// Operator.
714     ///
715     /// Note: Currently only non-composite Operators with a single field and
716     ///      composite Operators with single field sub-operators are supported.
717     ///
718     /// * `op`        - Operator to assemble QFunction
719     /// * `assembled` - Vector to store assembled CeedOperator point block
720     ///                   diagonal, provided in row-major form with an
721     ///                   `ncomp * ncomp` block at each node. The dimensions of
722     ///                   this vector are derived from the active vector for
723     ///                   the CeedOperator. The array has shape
724     ///                   `[nodes, component out, component in]`.
725     ///
726     /// ```
727     /// # use libceed::prelude::*;
728     /// # fn main() -> Result<(), libceed::CeedError> {
729     /// # let ceed = libceed::Ceed::default_init();
730     /// let ne = 4;
731     /// let p = 3;
732     /// let q = 4;
733     /// let ncomp = 2;
734     /// let ndofs = p * ne - ne + 1;
735     ///
736     /// // Vectors
737     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
738     /// let mut qdata = ceed.vector(ne * q)?;
739     /// qdata.set_value(0.0);
740     /// let mut u = ceed.vector(ncomp * ndofs)?;
741     /// u.set_value(1.0);
742     /// let mut v = ceed.vector(ncomp * ndofs)?;
743     /// v.set_value(0.0);
744     ///
745     /// // Restrictions
746     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
747     /// for i in 0..ne {
748     ///     indx[2 * i + 0] = i as i32;
749     ///     indx[2 * i + 1] = (i + 1) as i32;
750     /// }
751     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
752     /// let mut indu: Vec<i32> = vec![0; p * ne];
753     /// for i in 0..ne {
754     ///     indu[p * i + 0] = (2 * i) as i32;
755     ///     indu[p * i + 1] = (2 * i + 1) as i32;
756     ///     indu[p * i + 2] = (2 * i + 2) as i32;
757     /// }
758     /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?;
759     /// let strides: [i32; 3] = [1, q as i32, q as i32];
760     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
761     ///
762     /// // Bases
763     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
764     /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?;
765     ///
766     /// // Build quadrature data
767     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
768     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
769     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
770     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
771     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
772     ///     .apply(&x, &mut qdata)?;
773     ///
774     /// // Mass operator
775     /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| {
776     ///     // Number of quadrature points
777     ///     let q = qdata.len();
778     ///
779     ///     // Iterate over quadrature points
780     ///     for i in 0..q {
781     ///         v[i + 0 * q] = u[i + 1 * q] * qdata[i];
782     ///         v[i + 1 * q] = u[i + 0 * q] * qdata[i];
783     ///     }
784     ///
785     ///     // Return clean error code
786     ///     0
787     /// };
788     ///
789     /// let qf_mass = ceed
790     ///     .q_function_interior(1, Box::new(mass_2_comp))?
791     ///     .input("u", 2, EvalMode::Interp)?
792     ///     .input("qdata", 1, EvalMode::None)?
793     ///     .output("v", 2, EvalMode::Interp)?;
794     ///
795     /// let op_mass = ceed
796     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
797     ///     .field("u", &ru, &bu, VectorOpt::Active)?
798     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
799     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
800     ///
801     /// // Diagonal
802     /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?;
803     /// diag.set_value(0.0);
804     /// op_mass.linear_assemble_point_block_diagonal(&mut diag)?;
805     ///
806     /// // Manual diagonal computation
807     /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?;
808     /// for i in 0..ndofs {
809     ///     for j in 0..ncomp {
810     ///         u.set_value(0.0);
811     ///         {
812     ///             let mut u_array = u.view_mut();
813     ///             u_array[i + j * ndofs] = 1.;
814     ///         }
815     ///
816     ///         op_mass.apply(&u, &mut v)?;
817     ///
818     ///         {
819     ///             let v_array = v.view_mut();
820     ///             let mut true_array = true_diag.view_mut();
821     ///             for k in 0..ncomp {
822     ///                 true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs];
823     ///             }
824     ///         }
825     ///     }
826     /// }
827     ///
828     /// // Check
829     /// diag.view()
830     ///     .iter()
831     ///     .zip(true_diag.view().iter())
832     ///     .for_each(|(computed, actual)| {
833     ///         assert!(
834     ///             (*computed - *actual).abs() < 10.0 * libceed::EPSILON,
835     ///             "Diagonal entry incorrect"
836     ///         );
837     ///     });
838     /// # Ok(())
839     /// # }
840     /// ```
841     pub fn linear_assemble_point_block_diagonal(
842         &self,
843         assembled: &mut Vector,
844     ) -> crate::Result<i32> {
845         self.op_core.linear_assemble_point_block_diagonal(assembled)
846     }
847 
848     /// Assemble the point block diagonal of a square linear Operator
849     ///
850     /// This sums into a Vector with the point block diagonal of a linear
851     /// Operator.
852     ///
853     /// Note: Currently only non-composite Operators with a single field and
854     ///      composite Operators with single field sub-operators are supported.
855     ///
856     /// * `op`        -     Operator to assemble QFunction
857     /// * `assembled` - Vector to store assembled CeedOperator point block
858     ///                   diagonal, provided in row-major form with an
859     ///                   `ncomp * ncomp` block at each node. The dimensions of
860     ///                   this vector are derived from the active vector for
861     ///                   the CeedOperator. The array has shape
862     ///                   `[nodes, component out, component in]`.
863     ///
864     /// ```
865     /// # use libceed::prelude::*;
866     /// # fn main() -> Result<(), libceed::CeedError> {
867     /// # let ceed = libceed::Ceed::default_init();
868     /// let ne = 4;
869     /// let p = 3;
870     /// let q = 4;
871     /// let ncomp = 2;
872     /// let ndofs = p * ne - ne + 1;
873     ///
874     /// // Vectors
875     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
876     /// let mut qdata = ceed.vector(ne * q)?;
877     /// qdata.set_value(0.0);
878     /// let mut u = ceed.vector(ncomp * ndofs)?;
879     /// u.set_value(1.0);
880     /// let mut v = ceed.vector(ncomp * ndofs)?;
881     /// v.set_value(0.0);
882     ///
883     /// // Restrictions
884     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
885     /// for i in 0..ne {
886     ///     indx[2 * i + 0] = i as i32;
887     ///     indx[2 * i + 1] = (i + 1) as i32;
888     /// }
889     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
890     /// let mut indu: Vec<i32> = vec![0; p * ne];
891     /// for i in 0..ne {
892     ///     indu[p * i + 0] = (2 * i) as i32;
893     ///     indu[p * i + 1] = (2 * i + 1) as i32;
894     ///     indu[p * i + 2] = (2 * i + 2) as i32;
895     /// }
896     /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?;
897     /// let strides: [i32; 3] = [1, q as i32, q as i32];
898     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
899     ///
900     /// // Bases
901     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
902     /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?;
903     ///
904     /// // Build quadrature data
905     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
906     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
907     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
908     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
909     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
910     ///     .apply(&x, &mut qdata)?;
911     ///
912     /// // Mass operator
913     /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| {
914     ///     // Number of quadrature points
915     ///     let q = qdata.len();
916     ///
917     ///     // Iterate over quadrature points
918     ///     for i in 0..q {
919     ///         v[i + 0 * q] = u[i + 1 * q] * qdata[i];
920     ///         v[i + 1 * q] = u[i + 0 * q] * qdata[i];
921     ///     }
922     ///
923     ///     // Return clean error code
924     ///     0
925     /// };
926     ///
927     /// let qf_mass = ceed
928     ///     .q_function_interior(1, Box::new(mass_2_comp))?
929     ///     .input("u", 2, EvalMode::Interp)?
930     ///     .input("qdata", 1, EvalMode::None)?
931     ///     .output("v", 2, EvalMode::Interp)?;
932     ///
933     /// let op_mass = ceed
934     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
935     ///     .field("u", &ru, &bu, VectorOpt::Active)?
936     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
937     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
938     ///
939     /// // Diagonal
940     /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?;
941     /// diag.set_value(1.0);
942     /// op_mass.linear_assemble_add_point_block_diagonal(&mut diag)?;
943     ///
944     /// // Manual diagonal computation
945     /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?;
946     /// for i in 0..ndofs {
947     ///     for j in 0..ncomp {
948     ///         u.set_value(0.0);
949     ///         {
950     ///             let mut u_array = u.view_mut();
951     ///             u_array[i + j * ndofs] = 1.;
952     ///         }
953     ///
954     ///         op_mass.apply(&u, &mut v)?;
955     ///
956     ///         {
957     ///             let v_array = v.view_mut();
958     ///             let mut true_array = true_diag.view_mut();
959     ///             for k in 0..ncomp {
960     ///                 true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs];
961     ///             }
962     ///         }
963     ///     }
964     /// }
965     ///
966     /// // Check
967     /// diag.view()
968     ///     .iter()
969     ///     .zip(true_diag.view().iter())
970     ///     .for_each(|(computed, actual)| {
971     ///         assert!(
972     ///             (*computed - 1.0 - *actual).abs() < 10.0 * libceed::EPSILON,
973     ///             "Diagonal entry incorrect"
974     ///         );
975     ///     });
976     /// # Ok(())
977     /// # }
978     /// ```
979     pub fn linear_assemble_add_point_block_diagonal(
980         &self,
981         assembled: &mut Vector,
982     ) -> crate::Result<i32> {
983         self.op_core
984             .linear_assemble_add_point_block_diagonal(assembled)
985     }
986 
987     /// Create a multigrid coarse Operator and level transfer Operators for a
988     ///   given Operator, creating the prolongation basis from the fine and
989     ///   coarse grid interpolation
990     ///
991     /// * `p_mult_fine`  - Lvector multiplicity in parallel gather/scatter
992     /// * `rstr_coarse`  - Coarse grid restriction
993     /// * `basis_coarse` - Coarse grid active vector basis
994     ///
995     /// ```
996     /// # use libceed::prelude::*;
997     /// # fn main() -> Result<(), libceed::CeedError> {
998     /// # let ceed = libceed::Ceed::default_init();
999     /// let ne = 15;
1000     /// let p_coarse = 3;
1001     /// let p_fine = 5;
1002     /// let q = 6;
1003     /// let ndofs_coarse = p_coarse * ne - ne + 1;
1004     /// let ndofs_fine = p_fine * ne - ne + 1;
1005     ///
1006     /// // Vectors
1007     /// let x_array = (0..ne + 1)
1008     ///     .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0)
1009     ///     .collect::<Vec<Scalar>>();
1010     /// let x = ceed.vector_from_slice(&x_array)?;
1011     /// let mut qdata = ceed.vector(ne * q)?;
1012     /// qdata.set_value(0.0);
1013     /// let mut u_coarse = ceed.vector(ndofs_coarse)?;
1014     /// u_coarse.set_value(1.0);
1015     /// let mut u_fine = ceed.vector(ndofs_fine)?;
1016     /// u_fine.set_value(1.0);
1017     /// let mut v_coarse = ceed.vector(ndofs_coarse)?;
1018     /// v_coarse.set_value(0.0);
1019     /// let mut v_fine = ceed.vector(ndofs_fine)?;
1020     /// v_fine.set_value(0.0);
1021     /// let mut multiplicity = ceed.vector(ndofs_fine)?;
1022     /// multiplicity.set_value(1.0);
1023     ///
1024     /// // Restrictions
1025     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1026     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1027     ///
1028     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1029     /// for i in 0..ne {
1030     ///     indx[2 * i + 0] = i as i32;
1031     ///     indx[2 * i + 1] = (i + 1) as i32;
1032     /// }
1033     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1034     ///
1035     /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne];
1036     /// for i in 0..ne {
1037     ///     for j in 0..p_coarse {
1038     ///         indu_coarse[p_coarse * i + j] = (i + j) as i32;
1039     ///     }
1040     /// }
1041     /// let ru_coarse = ceed.elem_restriction(
1042     ///     ne,
1043     ///     p_coarse,
1044     ///     1,
1045     ///     1,
1046     ///     ndofs_coarse,
1047     ///     MemType::Host,
1048     ///     &indu_coarse,
1049     /// )?;
1050     ///
1051     /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne];
1052     /// for i in 0..ne {
1053     ///     for j in 0..p_fine {
1054     ///         indu_fine[p_fine * i + j] = (i + j) as i32;
1055     ///     }
1056     /// }
1057     /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?;
1058     ///
1059     /// // Bases
1060     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1061     /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?;
1062     /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?;
1063     ///
1064     /// // Build quadrature data
1065     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1066     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1067     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1068     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1069     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1070     ///     .apply(&x, &mut qdata)?;
1071     ///
1072     /// // Mass operator
1073     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1074     /// let op_mass_fine = ceed
1075     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1076     ///     .field("u", &ru_fine, &bu_fine, VectorOpt::Active)?
1077     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1078     ///     .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?;
1079     ///
1080     /// // Multigrid setup
1081     /// let (op_mass_coarse, op_prolong, op_restrict) =
1082     ///     op_mass_fine.create_multigrid_level(&multiplicity, &ru_coarse, &bu_coarse)?;
1083     ///
1084     /// // Coarse problem
1085     /// u_coarse.set_value(1.0);
1086     /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?;
1087     ///
1088     /// // Check
1089     /// let sum: Scalar = v_coarse.view().iter().sum();
1090     /// assert!(
1091     ///     (sum - 2.0).abs() < 50.0 * libceed::EPSILON,
1092     ///     "Incorrect interval length computed"
1093     /// );
1094     ///
1095     /// // Prolong
1096     /// op_prolong.apply(&u_coarse, &mut u_fine)?;
1097     ///
1098     /// // Fine problem
1099     /// op_mass_fine.apply(&u_fine, &mut v_fine)?;
1100     ///
1101     /// // Check
1102     /// let sum: Scalar = v_fine.view().iter().sum();
1103     /// assert!(
1104     ///     (sum - 2.0).abs() < 50.0 * libceed::EPSILON,
1105     ///     "Incorrect interval length computed"
1106     /// );
1107     ///
1108     /// // Restrict
1109     /// op_restrict.apply(&v_fine, &mut v_coarse)?;
1110     ///
1111     /// // Check
1112     /// let sum: Scalar = v_coarse.view().iter().sum();
1113     /// assert!(
1114     ///     (sum - 2.0).abs() < 50.0 * libceed::EPSILON,
1115     ///     "Incorrect interval length computed"
1116     /// );
1117     /// # Ok(())
1118     /// # }
1119     /// ```
1120     pub fn create_multigrid_level(
1121         &self,
1122         p_mult_fine: &Vector,
1123         rstr_coarse: &ElemRestriction,
1124         basis_coarse: &Basis,
1125     ) -> crate::Result<(Operator, Operator, Operator)> {
1126         let mut ptr_coarse = std::ptr::null_mut();
1127         let mut ptr_prolong = std::ptr::null_mut();
1128         let mut ptr_restrict = std::ptr::null_mut();
1129         let ierr = unsafe {
1130             bind_ceed::CeedOperatorMultigridLevelCreate(
1131                 self.op_core.ptr,
1132                 p_mult_fine.ptr,
1133                 rstr_coarse.ptr,
1134                 basis_coarse.ptr,
1135                 &mut ptr_coarse,
1136                 &mut ptr_prolong,
1137                 &mut ptr_restrict,
1138             )
1139         };
1140         self.op_core.check_error(ierr)?;
1141         let op_coarse = Operator::from_raw(ptr_coarse)?;
1142         let op_prolong = Operator::from_raw(ptr_prolong)?;
1143         let op_restrict = Operator::from_raw(ptr_restrict)?;
1144         Ok((op_coarse, op_prolong, op_restrict))
1145     }
1146 
1147     /// Create a multigrid coarse Operator and level transfer Operators for a
1148     ///   given Operator with a tensor basis for the active basis
1149     ///
1150     /// * `p_mult_fine`   - Lvector multiplicity in parallel gather/scatter
1151     /// * `rstr_coarse`   - Coarse grid restriction
1152     /// * `basis_coarse`  - Coarse grid active vector basis
1153     /// * `interp_c_to_f` - Matrix for coarse to fine
1154     ///
1155     /// ```
1156     /// # use libceed::prelude::*;
1157     /// # fn main() -> Result<(), libceed::CeedError> {
1158     /// # let ceed = libceed::Ceed::default_init();
1159     /// let ne = 15;
1160     /// let p_coarse = 3;
1161     /// let p_fine = 5;
1162     /// let q = 6;
1163     /// let ndofs_coarse = p_coarse * ne - ne + 1;
1164     /// let ndofs_fine = p_fine * ne - ne + 1;
1165     ///
1166     /// // Vectors
1167     /// let x_array = (0..ne + 1)
1168     ///     .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0)
1169     ///     .collect::<Vec<Scalar>>();
1170     /// let x = ceed.vector_from_slice(&x_array)?;
1171     /// let mut qdata = ceed.vector(ne * q)?;
1172     /// qdata.set_value(0.0);
1173     /// let mut u_coarse = ceed.vector(ndofs_coarse)?;
1174     /// u_coarse.set_value(1.0);
1175     /// let mut u_fine = ceed.vector(ndofs_fine)?;
1176     /// u_fine.set_value(1.0);
1177     /// let mut v_coarse = ceed.vector(ndofs_coarse)?;
1178     /// v_coarse.set_value(0.0);
1179     /// let mut v_fine = ceed.vector(ndofs_fine)?;
1180     /// v_fine.set_value(0.0);
1181     /// let mut multiplicity = ceed.vector(ndofs_fine)?;
1182     /// multiplicity.set_value(1.0);
1183     ///
1184     /// // Restrictions
1185     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1186     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1187     ///
1188     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1189     /// for i in 0..ne {
1190     ///     indx[2 * i + 0] = i as i32;
1191     ///     indx[2 * i + 1] = (i + 1) as i32;
1192     /// }
1193     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1194     ///
1195     /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne];
1196     /// for i in 0..ne {
1197     ///     for j in 0..p_coarse {
1198     ///         indu_coarse[p_coarse * i + j] = (i + j) as i32;
1199     ///     }
1200     /// }
1201     /// let ru_coarse = ceed.elem_restriction(
1202     ///     ne,
1203     ///     p_coarse,
1204     ///     1,
1205     ///     1,
1206     ///     ndofs_coarse,
1207     ///     MemType::Host,
1208     ///     &indu_coarse,
1209     /// )?;
1210     ///
1211     /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne];
1212     /// for i in 0..ne {
1213     ///     for j in 0..p_fine {
1214     ///         indu_fine[p_fine * i + j] = (i + j) as i32;
1215     ///     }
1216     /// }
1217     /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?;
1218     ///
1219     /// // Bases
1220     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1221     /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?;
1222     /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?;
1223     ///
1224     /// // Build quadrature data
1225     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1226     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1227     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1228     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1229     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1230     ///     .apply(&x, &mut qdata)?;
1231     ///
1232     /// // Mass operator
1233     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1234     /// let op_mass_fine = ceed
1235     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1236     ///     .field("u", &ru_fine, &bu_fine, VectorOpt::Active)?
1237     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1238     ///     .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?;
1239     ///
1240     /// // Multigrid setup
1241     /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine];
1242     /// {
1243     ///     let mut coarse = ceed.vector(p_coarse)?;
1244     ///     let mut fine = ceed.vector(p_fine)?;
1245     ///     let basis_c_to_f =
1246     ///         ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?;
1247     ///     for i in 0..p_coarse {
1248     ///         coarse.set_value(0.0);
1249     ///         {
1250     ///             let mut array = coarse.view_mut();
1251     ///             array[i] = 1.;
1252     ///         }
1253     ///         basis_c_to_f.apply(
1254     ///             1,
1255     ///             TransposeMode::NoTranspose,
1256     ///             EvalMode::Interp,
1257     ///             &coarse,
1258     ///             &mut fine,
1259     ///         )?;
1260     ///         let array = fine.view();
1261     ///         for j in 0..p_fine {
1262     ///             interp_c_to_f[j * p_coarse + i] = array[j];
1263     ///         }
1264     ///     }
1265     /// }
1266     /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_tensor_H1(
1267     ///     &multiplicity,
1268     ///     &ru_coarse,
1269     ///     &bu_coarse,
1270     ///     &interp_c_to_f,
1271     /// )?;
1272     ///
1273     /// // Coarse problem
1274     /// u_coarse.set_value(1.0);
1275     /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?;
1276     ///
1277     /// // Check
1278     /// let sum: Scalar = v_coarse.view().iter().sum();
1279     /// assert!(
1280     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1281     ///     "Incorrect interval length computed"
1282     /// );
1283     ///
1284     /// // Prolong
1285     /// op_prolong.apply(&u_coarse, &mut u_fine)?;
1286     ///
1287     /// // Fine problem
1288     /// op_mass_fine.apply(&u_fine, &mut v_fine)?;
1289     ///
1290     /// // Check
1291     /// let sum: Scalar = v_fine.view().iter().sum();
1292     /// assert!(
1293     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1294     ///     "Incorrect interval length computed"
1295     /// );
1296     ///
1297     /// // Restrict
1298     /// op_restrict.apply(&v_fine, &mut v_coarse)?;
1299     ///
1300     /// // Check
1301     /// let sum: Scalar = v_coarse.view().iter().sum();
1302     /// assert!(
1303     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1304     ///     "Incorrect interval length computed"
1305     /// );
1306     /// # Ok(())
1307     /// # }
1308     /// ```
1309     pub fn create_multigrid_level_tensor_H1(
1310         &self,
1311         p_mult_fine: &Vector,
1312         rstr_coarse: &ElemRestriction,
1313         basis_coarse: &Basis,
1314         interpCtoF: &Vec<Scalar>,
1315     ) -> crate::Result<(Operator, Operator, Operator)> {
1316         let mut ptr_coarse = std::ptr::null_mut();
1317         let mut ptr_prolong = std::ptr::null_mut();
1318         let mut ptr_restrict = std::ptr::null_mut();
1319         let ierr = unsafe {
1320             bind_ceed::CeedOperatorMultigridLevelCreateTensorH1(
1321                 self.op_core.ptr,
1322                 p_mult_fine.ptr,
1323                 rstr_coarse.ptr,
1324                 basis_coarse.ptr,
1325                 interpCtoF.as_ptr(),
1326                 &mut ptr_coarse,
1327                 &mut ptr_prolong,
1328                 &mut ptr_restrict,
1329             )
1330         };
1331         self.op_core.check_error(ierr)?;
1332         let op_coarse = Operator::from_raw(ptr_coarse)?;
1333         let op_prolong = Operator::from_raw(ptr_prolong)?;
1334         let op_restrict = Operator::from_raw(ptr_restrict)?;
1335         Ok((op_coarse, op_prolong, op_restrict))
1336     }
1337 
1338     /// Create a multigrid coarse Operator and level transfer Operators for a
1339     ///   given Operator with a non-tensor basis for the active basis
1340     ///
1341     /// * `p_mult_fine`   - Lvector multiplicity in parallel gather/scatter
1342     /// * `rstr_coarse`   - Coarse grid restriction
1343     /// * `basis_coarse`  - Coarse grid active vector basis
1344     /// * `interp_c_to_f` - Matrix for coarse to fine
1345     ///
1346     /// ```
1347     /// # use libceed::prelude::*;
1348     /// # fn main() -> Result<(), libceed::CeedError> {
1349     /// # let ceed = libceed::Ceed::default_init();
1350     /// let ne = 15;
1351     /// let p_coarse = 3;
1352     /// let p_fine = 5;
1353     /// let q = 6;
1354     /// let ndofs_coarse = p_coarse * ne - ne + 1;
1355     /// let ndofs_fine = p_fine * ne - ne + 1;
1356     ///
1357     /// // Vectors
1358     /// let x_array = (0..ne + 1)
1359     ///     .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0)
1360     ///     .collect::<Vec<Scalar>>();
1361     /// let x = ceed.vector_from_slice(&x_array)?;
1362     /// let mut qdata = ceed.vector(ne * q)?;
1363     /// qdata.set_value(0.0);
1364     /// let mut u_coarse = ceed.vector(ndofs_coarse)?;
1365     /// u_coarse.set_value(1.0);
1366     /// let mut u_fine = ceed.vector(ndofs_fine)?;
1367     /// u_fine.set_value(1.0);
1368     /// let mut v_coarse = ceed.vector(ndofs_coarse)?;
1369     /// v_coarse.set_value(0.0);
1370     /// let mut v_fine = ceed.vector(ndofs_fine)?;
1371     /// v_fine.set_value(0.0);
1372     /// let mut multiplicity = ceed.vector(ndofs_fine)?;
1373     /// multiplicity.set_value(1.0);
1374     ///
1375     /// // Restrictions
1376     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1377     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1378     ///
1379     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1380     /// for i in 0..ne {
1381     ///     indx[2 * i + 0] = i as i32;
1382     ///     indx[2 * i + 1] = (i + 1) as i32;
1383     /// }
1384     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1385     ///
1386     /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne];
1387     /// for i in 0..ne {
1388     ///     for j in 0..p_coarse {
1389     ///         indu_coarse[p_coarse * i + j] = (i + j) as i32;
1390     ///     }
1391     /// }
1392     /// let ru_coarse = ceed.elem_restriction(
1393     ///     ne,
1394     ///     p_coarse,
1395     ///     1,
1396     ///     1,
1397     ///     ndofs_coarse,
1398     ///     MemType::Host,
1399     ///     &indu_coarse,
1400     /// )?;
1401     ///
1402     /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne];
1403     /// for i in 0..ne {
1404     ///     for j in 0..p_fine {
1405     ///         indu_fine[p_fine * i + j] = (i + j) as i32;
1406     ///     }
1407     /// }
1408     /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?;
1409     ///
1410     /// // Bases
1411     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1412     /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?;
1413     /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?;
1414     ///
1415     /// // Build quadrature data
1416     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1417     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1418     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1419     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1420     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1421     ///     .apply(&x, &mut qdata)?;
1422     ///
1423     /// // Mass operator
1424     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1425     /// let op_mass_fine = ceed
1426     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1427     ///     .field("u", &ru_fine, &bu_fine, VectorOpt::Active)?
1428     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1429     ///     .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?;
1430     ///
1431     /// // Multigrid setup
1432     /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine];
1433     /// {
1434     ///     let mut coarse = ceed.vector(p_coarse)?;
1435     ///     let mut fine = ceed.vector(p_fine)?;
1436     ///     let basis_c_to_f =
1437     ///         ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?;
1438     ///     for i in 0..p_coarse {
1439     ///         coarse.set_value(0.0);
1440     ///         {
1441     ///             let mut array = coarse.view_mut();
1442     ///             array[i] = 1.;
1443     ///         }
1444     ///         basis_c_to_f.apply(
1445     ///             1,
1446     ///             TransposeMode::NoTranspose,
1447     ///             EvalMode::Interp,
1448     ///             &coarse,
1449     ///             &mut fine,
1450     ///         )?;
1451     ///         let array = fine.view();
1452     ///         for j in 0..p_fine {
1453     ///             interp_c_to_f[j * p_coarse + i] = array[j];
1454     ///         }
1455     ///     }
1456     /// }
1457     /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_H1(
1458     ///     &multiplicity,
1459     ///     &ru_coarse,
1460     ///     &bu_coarse,
1461     ///     &interp_c_to_f,
1462     /// )?;
1463     ///
1464     /// // Coarse problem
1465     /// u_coarse.set_value(1.0);
1466     /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?;
1467     ///
1468     /// // Check
1469     /// let sum: Scalar = v_coarse.view().iter().sum();
1470     /// assert!(
1471     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1472     ///     "Incorrect interval length computed"
1473     /// );
1474     ///
1475     /// // Prolong
1476     /// op_prolong.apply(&u_coarse, &mut u_fine)?;
1477     ///
1478     /// // Fine problem
1479     /// op_mass_fine.apply(&u_fine, &mut v_fine)?;
1480     ///
1481     /// // Check
1482     /// let sum: Scalar = v_fine.view().iter().sum();
1483     /// assert!(
1484     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1485     ///     "Incorrect interval length computed"
1486     /// );
1487     ///
1488     /// // Restrict
1489     /// op_restrict.apply(&v_fine, &mut v_coarse)?;
1490     ///
1491     /// // Check
1492     /// let sum: Scalar = v_coarse.view().iter().sum();
1493     /// assert!(
1494     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1495     ///     "Incorrect interval length computed"
1496     /// );
1497     /// # Ok(())
1498     /// # }
1499     /// ```
1500     pub fn create_multigrid_level_H1(
1501         &self,
1502         p_mult_fine: &Vector,
1503         rstr_coarse: &ElemRestriction,
1504         basis_coarse: &Basis,
1505         interpCtoF: &[Scalar],
1506     ) -> crate::Result<(Operator, Operator, Operator)> {
1507         let mut ptr_coarse = std::ptr::null_mut();
1508         let mut ptr_prolong = std::ptr::null_mut();
1509         let mut ptr_restrict = std::ptr::null_mut();
1510         let ierr = unsafe {
1511             bind_ceed::CeedOperatorMultigridLevelCreateH1(
1512                 self.op_core.ptr,
1513                 p_mult_fine.ptr,
1514                 rstr_coarse.ptr,
1515                 basis_coarse.ptr,
1516                 interpCtoF.as_ptr(),
1517                 &mut ptr_coarse,
1518                 &mut ptr_prolong,
1519                 &mut ptr_restrict,
1520             )
1521         };
1522         self.op_core.check_error(ierr)?;
1523         let op_coarse = Operator::from_raw(ptr_coarse)?;
1524         let op_prolong = Operator::from_raw(ptr_prolong)?;
1525         let op_restrict = Operator::from_raw(ptr_restrict)?;
1526         Ok((op_coarse, op_prolong, op_restrict))
1527     }
1528 }
1529 
1530 // -----------------------------------------------------------------------------
1531 // Composite Operator
1532 // -----------------------------------------------------------------------------
1533 impl<'a> CompositeOperator<'a> {
1534     // Constructor
1535     pub fn create(ceed: &'a crate::Ceed) -> crate::Result<Self> {
1536         let mut ptr = std::ptr::null_mut();
1537         let ierr = unsafe { bind_ceed::CeedCompositeOperatorCreate(ceed.ptr, &mut ptr) };
1538         ceed.check_error(ierr)?;
1539         Ok(Self {
1540             op_core: OperatorCore {
1541                 ptr,
1542                 _lifeline: PhantomData,
1543             },
1544         })
1545     }
1546 
1547     /// Apply Operator to a vector
1548     ///
1549     /// * `input`  - Input Vector
1550     /// * `output` - Output Vector
1551     ///
1552     /// ```
1553     /// # use libceed::prelude::*;
1554     /// # fn main() -> Result<(), libceed::CeedError> {
1555     /// # let ceed = libceed::Ceed::default_init();
1556     /// let ne = 4;
1557     /// let p = 3;
1558     /// let q = 4;
1559     /// let ndofs = p * ne - ne + 1;
1560     ///
1561     /// // Vectors
1562     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
1563     /// let mut qdata_mass = ceed.vector(ne * q)?;
1564     /// qdata_mass.set_value(0.0);
1565     /// let mut qdata_diff = ceed.vector(ne * q)?;
1566     /// qdata_diff.set_value(0.0);
1567     /// let mut u = ceed.vector(ndofs)?;
1568     /// u.set_value(1.0);
1569     /// let mut v = ceed.vector(ndofs)?;
1570     /// v.set_value(0.0);
1571     ///
1572     /// // Restrictions
1573     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1574     /// for i in 0..ne {
1575     ///     indx[2 * i + 0] = i as i32;
1576     ///     indx[2 * i + 1] = (i + 1) as i32;
1577     /// }
1578     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1579     /// let mut indu: Vec<i32> = vec![0; p * ne];
1580     /// for i in 0..ne {
1581     ///     indu[p * i + 0] = i as i32;
1582     ///     indu[p * i + 1] = (i + 1) as i32;
1583     ///     indu[p * i + 2] = (i + 2) as i32;
1584     /// }
1585     /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?;
1586     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1587     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1588     ///
1589     /// // Bases
1590     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1591     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
1592     ///
1593     /// // Build quadrature data
1594     /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?;
1595     /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)?
1596     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1597     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1598     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1599     ///     .apply(&x, &mut qdata_mass)?;
1600     ///
1601     /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?;
1602     /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)?
1603     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1604     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1605     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1606     ///     .apply(&x, &mut qdata_diff)?;
1607     ///
1608     /// // Application operator
1609     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1610     /// let op_mass = ceed
1611     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1612     ///     .field("u", &ru, &bu, VectorOpt::Active)?
1613     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)?
1614     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
1615     ///
1616     /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?;
1617     /// let op_diff = ceed
1618     ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?
1619     ///     .field("du", &ru, &bu, VectorOpt::Active)?
1620     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)?
1621     ///     .field("dv", &ru, &bu, VectorOpt::Active)?;
1622     ///
1623     /// let op_composite = ceed
1624     ///     .composite_operator()?
1625     ///     .sub_operator(&op_mass)?
1626     ///     .sub_operator(&op_diff)?;
1627     ///
1628     /// v.set_value(0.0);
1629     /// op_composite.apply(&u, &mut v)?;
1630     ///
1631     /// // Check
1632     /// let sum: Scalar = v.view().iter().sum();
1633     /// assert!(
1634     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1635     ///     "Incorrect interval length computed"
1636     /// );
1637     /// # Ok(())
1638     /// # }
1639     /// ```
1640     pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
1641         self.op_core.apply(input, output)
1642     }
1643 
1644     /// Apply Operator to a vector and add result to output vector
1645     ///
1646     /// * `input`  - Input Vector
1647     /// * `output` - Output Vector
1648     ///
1649     /// ```
1650     /// # use libceed::prelude::*;
1651     /// # fn main() -> Result<(), libceed::CeedError> {
1652     /// # let ceed = libceed::Ceed::default_init();
1653     /// let ne = 4;
1654     /// let p = 3;
1655     /// let q = 4;
1656     /// let ndofs = p * ne - ne + 1;
1657     ///
1658     /// // Vectors
1659     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
1660     /// let mut qdata_mass = ceed.vector(ne * q)?;
1661     /// qdata_mass.set_value(0.0);
1662     /// let mut qdata_diff = ceed.vector(ne * q)?;
1663     /// qdata_diff.set_value(0.0);
1664     /// let mut u = ceed.vector(ndofs)?;
1665     /// u.set_value(1.0);
1666     /// let mut v = ceed.vector(ndofs)?;
1667     /// v.set_value(0.0);
1668     ///
1669     /// // Restrictions
1670     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1671     /// for i in 0..ne {
1672     ///     indx[2 * i + 0] = i as i32;
1673     ///     indx[2 * i + 1] = (i + 1) as i32;
1674     /// }
1675     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1676     /// let mut indu: Vec<i32> = vec![0; p * ne];
1677     /// for i in 0..ne {
1678     ///     indu[p * i + 0] = i as i32;
1679     ///     indu[p * i + 1] = (i + 1) as i32;
1680     ///     indu[p * i + 2] = (i + 2) as i32;
1681     /// }
1682     /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?;
1683     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1684     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1685     ///
1686     /// // Bases
1687     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1688     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
1689     ///
1690     /// // Build quadrature data
1691     /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?;
1692     /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)?
1693     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1694     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1695     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1696     ///     .apply(&x, &mut qdata_mass)?;
1697     ///
1698     /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?;
1699     /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)?
1700     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1701     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1702     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1703     ///     .apply(&x, &mut qdata_diff)?;
1704     ///
1705     /// // Application operator
1706     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1707     /// let op_mass = ceed
1708     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1709     ///     .field("u", &ru, &bu, VectorOpt::Active)?
1710     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)?
1711     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
1712     ///
1713     /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?;
1714     /// let op_diff = ceed
1715     ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?
1716     ///     .field("du", &ru, &bu, VectorOpt::Active)?
1717     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)?
1718     ///     .field("dv", &ru, &bu, VectorOpt::Active)?;
1719     ///
1720     /// let op_composite = ceed
1721     ///     .composite_operator()?
1722     ///     .sub_operator(&op_mass)?
1723     ///     .sub_operator(&op_diff)?;
1724     ///
1725     /// v.set_value(1.0);
1726     /// op_composite.apply_add(&u, &mut v)?;
1727     ///
1728     /// // Check
1729     /// let sum: Scalar = v.view().iter().sum();
1730     /// assert!(
1731     ///     (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON,
1732     ///     "Incorrect interval length computed"
1733     /// );
1734     /// # Ok(())
1735     /// # }
1736     /// ```
1737     pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
1738         self.op_core.apply_add(input, output)
1739     }
1740 
1741     /// Add a sub-Operator to a Composite Operator
1742     ///
1743     /// * `subop` - Sub-Operator
1744     ///
1745     /// ```
1746     /// # use libceed::prelude::*;
1747     /// # fn main() -> Result<(), libceed::CeedError> {
1748     /// # let ceed = libceed::Ceed::default_init();
1749     /// let mut op = ceed.composite_operator()?;
1750     ///
1751     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1752     /// let op_mass = ceed.operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?;
1753     /// op = op.sub_operator(&op_mass)?;
1754     ///
1755     /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?;
1756     /// let op_diff = ceed.operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?;
1757     /// op = op.sub_operator(&op_diff)?;
1758     /// # Ok(())
1759     /// # }
1760     /// ```
1761     #[allow(unused_mut)]
1762     pub fn sub_operator(mut self, subop: &Operator) -> crate::Result<Self> {
1763         let ierr =
1764             unsafe { bind_ceed::CeedCompositeOperatorAddSub(self.op_core.ptr, subop.op_core.ptr) };
1765         self.op_core.check_error(ierr)?;
1766         Ok(self)
1767     }
1768 
1769     /// Assemble the diagonal of a square linear Operator
1770     ///
1771     /// This overwrites a Vector with the diagonal of a linear Operator.
1772     ///
1773     /// Note: Currently only non-composite Operators with a single field and
1774     ///      composite Operators with single field sub-operators are supported.
1775     ///
1776     /// * `op`        - Operator to assemble QFunction
1777     /// * `assembled` - Vector to store assembled Operator diagonal
1778     pub fn linear_asssemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
1779         self.op_core.linear_assemble_add_diagonal(assembled)
1780     }
1781 
1782     /// Assemble the point block diagonal of a square linear Operator
1783     ///
1784     /// This overwrites a Vector with the point block diagonal of a linear
1785     ///   Operator.
1786     ///
1787     /// Note: Currently only non-composite Operators with a single field and
1788     ///      composite Operators with single field sub-operators are supported.
1789     ///
1790     /// * `op`        - Operator to assemble QFunction
1791     /// * `assembled` - Vector to store assembled Operator diagonal
1792     pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
1793         self.op_core.linear_assemble_add_diagonal(assembled)
1794     }
1795 
1796     /// Assemble the diagonal of a square linear Operator
1797     ///
1798     /// This overwrites a Vector with the diagonal of a linear Operator.
1799     ///
1800     /// Note: Currently only non-composite Operators with a single field and
1801     ///      composite Operators with single field sub-operators are supported.
1802     ///
1803     /// * `op`        - Operator to assemble QFunction
1804     /// * `assembled` - Vector to store assembled CeedOperator point block
1805     ///                   diagonal, provided in row-major form with an
1806     ///                   `ncomp * ncomp` block at each node. The dimensions of
1807     ///                   this vector are derived from the active vector for
1808     ///                   the CeedOperator. The array has shape
1809     ///                   `[nodes, component out, component in]`.
1810     pub fn linear_assemble_point_block_diagonal(
1811         &self,
1812         assembled: &mut Vector,
1813     ) -> crate::Result<i32> {
1814         self.op_core.linear_assemble_add_diagonal(assembled)
1815     }
1816 
1817     /// Assemble the diagonal of a square linear Operator
1818     ///
1819     /// This sums into a Vector with the diagonal of a linear Operator.
1820     ///
1821     /// Note: Currently only non-composite Operators with a single field and
1822     ///      composite Operators with single field sub-operators are supported.
1823     ///
1824     /// * `op`        - Operator to assemble QFunction
1825     /// * `assembled` - Vector to store assembled CeedOperator point block
1826     ///                   diagonal, provided in row-major form with an
1827     ///                   `ncomp * ncomp` block at each node. The dimensions of
1828     ///                   this vector are derived from the active vector for
1829     ///                   the CeedOperator. The array has shape
1830     ///                   `[nodes, component out, component in]`.
1831     pub fn linear_assemble_add_point_block_diagonal(
1832         &self,
1833         assembled: &mut Vector,
1834     ) -> crate::Result<i32> {
1835         self.op_core.linear_assemble_add_diagonal(assembled)
1836     }
1837 }
1838 
1839 // -----------------------------------------------------------------------------
1840