xref: /libCEED/rust/libceed/src/operator.rs (revision 3f2759cf9eda7a8c0564885fc9cc858f567f0f9b)
1 // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
3 // reserved. See files LICENSE and NOTICE for details.
4 //
5 // This file is part of CEED, a collection of benchmarks, miniapps, software
6 // libraries and APIs for efficient high-order finite element and spectral
7 // element discretizations for exascale applications. For more information and
8 // source code availability see http://github.com/ceed.
9 //
10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
11 // a collaborative effort of two U.S. Department of Energy organizations (Office
12 // of Science and the National Nuclear Security Administration) responsible for
13 // the planning and preparation of a capable exascale ecosystem, including
14 // software, applications, hardware, advanced system engineering and early
15 // testbed platforms, in support of the nation's exascale computing imperative
16 
17 //! A Ceed Operator defines the finite/spectral element operator associated to a
18 //! Ceed QFunction. A Ceed Operator connects Ceed ElemRestrictions,
19 //! Ceed Bases, and Ceed QFunctions.
20 
21 use crate::prelude::*;
22 
23 // -----------------------------------------------------------------------------
24 // CeedOperator context wrapper
25 // -----------------------------------------------------------------------------
26 #[derive(Debug)]
27 pub(crate) struct OperatorCore<'a> {
28     ptr: bind_ceed::CeedOperator,
29     _lifeline: PhantomData<&'a ()>,
30 }
31 
32 #[derive(Debug)]
33 pub struct Operator<'a> {
34     op_core: OperatorCore<'a>,
35 }
36 
37 #[derive(Debug)]
38 pub struct CompositeOperator<'a> {
39     op_core: OperatorCore<'a>,
40 }
41 
42 // -----------------------------------------------------------------------------
43 // Destructor
44 // -----------------------------------------------------------------------------
45 impl<'a> Drop for OperatorCore<'a> {
46     fn drop(&mut self) {
47         unsafe {
48             bind_ceed::CeedOperatorDestroy(&mut self.ptr);
49         }
50     }
51 }
52 
53 // -----------------------------------------------------------------------------
54 // Display
55 // -----------------------------------------------------------------------------
56 impl<'a> fmt::Display for OperatorCore<'a> {
57     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
58         let mut ptr = std::ptr::null_mut();
59         let mut sizeloc = crate::MAX_BUFFER_LENGTH;
60         let cstring = unsafe {
61             let file = bind_ceed::open_memstream(&mut ptr, &mut sizeloc);
62             bind_ceed::CeedOperatorView(self.ptr, file);
63             bind_ceed::fclose(file);
64             CString::from_raw(ptr)
65         };
66         cstring.to_string_lossy().fmt(f)
67     }
68 }
69 
70 /// View an Operator
71 ///
72 /// ```
73 /// # use libceed::prelude::*;
74 /// # fn main() -> Result<(), libceed::CeedError> {
75 /// # let ceed = libceed::Ceed::default_init();
76 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
77 ///
78 /// // Operator field arguments
79 /// let ne = 3;
80 /// let q = 4 as usize;
81 /// let mut ind: Vec<i32> = vec![0; 2 * ne];
82 /// for i in 0..ne {
83 ///     ind[2 * i + 0] = i as i32;
84 ///     ind[2 * i + 1] = (i + 1) as i32;
85 /// }
86 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
87 /// let strides: [i32; 3] = [1, q as i32, q as i32];
88 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?;
89 ///
90 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
91 ///
92 /// // Operator fields
93 /// let op = ceed
94 ///     .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?
95 ///     .field("dx", &r, &b, VectorOpt::Active)?
96 ///     .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)?
97 ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?;
98 ///
99 /// println!("{}", op);
100 /// # Ok(())
101 /// # }
102 /// ```
103 impl<'a> fmt::Display for Operator<'a> {
104     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
105         self.op_core.fmt(f)
106     }
107 }
108 
109 /// View a composite Operator
110 ///
111 /// ```
112 /// # use libceed::prelude::*;
113 /// # fn main() -> Result<(), libceed::CeedError> {
114 /// # let ceed = libceed::Ceed::default_init();
115 ///
116 /// // Sub operator field arguments
117 /// let ne = 3;
118 /// let q = 4 as usize;
119 /// let mut ind: Vec<i32> = vec![0; 2 * ne];
120 /// for i in 0..ne {
121 ///     ind[2 * i + 0] = i as i32;
122 ///     ind[2 * i + 1] = (i + 1) as i32;
123 /// }
124 /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
125 /// let strides: [i32; 3] = [1, q as i32, q as i32];
126 /// let rq = ceed.strided_elem_restriction(ne, 2, 1, q * ne, strides)?;
127 ///
128 /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
129 ///
130 /// let qdata_mass = ceed.vector(q * ne)?;
131 /// let qdata_diff = ceed.vector(q * ne)?;
132 ///
133 /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
134 /// let op_mass = ceed
135 ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
136 ///     .field("u", &r, &b, VectorOpt::Active)?
137 ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)?
138 ///     .field("v", &r, &b, VectorOpt::Active)?;
139 ///
140 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?;
141 /// let op_diff = ceed
142 ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?
143 ///     .field("du", &r, &b, VectorOpt::Active)?
144 ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)?
145 ///     .field("dv", &r, &b, VectorOpt::Active)?;
146 ///
147 /// let op = ceed
148 ///     .composite_operator()?
149 ///     .sub_operator(&op_mass)?
150 ///     .sub_operator(&op_diff)?;
151 ///
152 /// println!("{}", op);
153 /// # Ok(())
154 /// # }
155 /// ```
156 impl<'a> fmt::Display for CompositeOperator<'a> {
157     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
158         self.op_core.fmt(f)
159     }
160 }
161 
162 // -----------------------------------------------------------------------------
163 // Core functionality
164 // -----------------------------------------------------------------------------
165 impl<'a> OperatorCore<'a> {
166     // Error handling
167     #[doc(hidden)]
168     fn check_error(&self, ierr: i32) -> crate::Result<i32> {
169         let mut ptr = std::ptr::null_mut();
170         unsafe {
171             bind_ceed::CeedOperatorGetCeed(self.ptr, &mut ptr);
172         }
173         crate::check_error(ptr, ierr)
174     }
175 
176     // Common implementations
177     pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
178         let ierr = unsafe {
179             bind_ceed::CeedOperatorApply(
180                 self.ptr,
181                 input.ptr,
182                 output.ptr,
183                 bind_ceed::CEED_REQUEST_IMMEDIATE,
184             )
185         };
186         self.check_error(ierr)
187     }
188 
189     pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
190         let ierr = unsafe {
191             bind_ceed::CeedOperatorApplyAdd(
192                 self.ptr,
193                 input.ptr,
194                 output.ptr,
195                 bind_ceed::CEED_REQUEST_IMMEDIATE,
196             )
197         };
198         self.check_error(ierr)
199     }
200 
201     pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
202         let ierr = unsafe {
203             bind_ceed::CeedOperatorLinearAssembleDiagonal(
204                 self.ptr,
205                 assembled.ptr,
206                 bind_ceed::CEED_REQUEST_IMMEDIATE,
207             )
208         };
209         self.check_error(ierr)
210     }
211 
212     pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
213         let ierr = unsafe {
214             bind_ceed::CeedOperatorLinearAssembleAddDiagonal(
215                 self.ptr,
216                 assembled.ptr,
217                 bind_ceed::CEED_REQUEST_IMMEDIATE,
218             )
219         };
220         self.check_error(ierr)
221     }
222 
223     pub fn linear_assemble_point_block_diagonal(
224         &self,
225         assembled: &mut Vector,
226     ) -> crate::Result<i32> {
227         let ierr = unsafe {
228             bind_ceed::CeedOperatorLinearAssemblePointBlockDiagonal(
229                 self.ptr,
230                 assembled.ptr,
231                 bind_ceed::CEED_REQUEST_IMMEDIATE,
232             )
233         };
234         self.check_error(ierr)
235     }
236 
237     pub fn linear_assemble_add_point_block_diagonal(
238         &self,
239         assembled: &mut Vector,
240     ) -> crate::Result<i32> {
241         let ierr = unsafe {
242             bind_ceed::CeedOperatorLinearAssembleAddPointBlockDiagonal(
243                 self.ptr,
244                 assembled.ptr,
245                 bind_ceed::CEED_REQUEST_IMMEDIATE,
246             )
247         };
248         self.check_error(ierr)
249     }
250 }
251 
252 // -----------------------------------------------------------------------------
253 // Operator
254 // -----------------------------------------------------------------------------
255 impl<'a> Operator<'a> {
256     // Constructor
257     pub fn create<'b>(
258         ceed: &'a crate::Ceed,
259         qf: impl Into<QFunctionOpt<'b>>,
260         dqf: impl Into<QFunctionOpt<'b>>,
261         dqfT: impl Into<QFunctionOpt<'b>>,
262     ) -> crate::Result<Self> {
263         let mut ptr = std::ptr::null_mut();
264         let ierr = unsafe {
265             bind_ceed::CeedOperatorCreate(
266                 ceed.ptr,
267                 qf.into().to_raw(),
268                 dqf.into().to_raw(),
269                 dqfT.into().to_raw(),
270                 &mut ptr,
271             )
272         };
273         ceed.check_error(ierr)?;
274         Ok(Self {
275             op_core: OperatorCore {
276                 ptr,
277                 _lifeline: PhantomData,
278             },
279         })
280     }
281 
282     fn from_raw(ptr: bind_ceed::CeedOperator) -> crate::Result<Self> {
283         Ok(Self {
284             op_core: OperatorCore {
285                 ptr,
286                 _lifeline: PhantomData,
287             },
288         })
289     }
290 
291     /// Apply Operator to a vector
292     ///
293     /// * `input`  - Input Vector
294     /// * `output` - Output Vector
295     ///
296     /// ```
297     /// # use libceed::prelude::*;
298     /// # fn main() -> Result<(), libceed::CeedError> {
299     /// # let ceed = libceed::Ceed::default_init();
300     /// let ne = 4;
301     /// let p = 3;
302     /// let q = 4;
303     /// let ndofs = p * ne - ne + 1;
304     ///
305     /// // Vectors
306     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
307     /// let mut qdata = ceed.vector(ne * q)?;
308     /// qdata.set_value(0.0);
309     /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?;
310     /// let mut v = ceed.vector(ndofs)?;
311     /// v.set_value(0.0);
312     ///
313     /// // Restrictions
314     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
315     /// for i in 0..ne {
316     ///     indx[2 * i + 0] = i as i32;
317     ///     indx[2 * i + 1] = (i + 1) as i32;
318     /// }
319     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
320     /// let mut indu: Vec<i32> = vec![0; p * ne];
321     /// for i in 0..ne {
322     ///     indu[p * i + 0] = i as i32;
323     ///     indu[p * i + 1] = (i + 1) as i32;
324     ///     indu[p * i + 2] = (i + 2) as i32;
325     /// }
326     /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?;
327     /// let strides: [i32; 3] = [1, q as i32, q as i32];
328     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
329     ///
330     /// // Bases
331     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
332     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
333     ///
334     /// // Build quadrature data
335     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
336     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
337     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
338     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
339     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
340     ///     .apply(&x, &mut qdata)?;
341     ///
342     /// // Mass operator
343     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
344     /// let op_mass = ceed
345     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
346     ///     .field("u", &ru, &bu, VectorOpt::Active)?
347     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
348     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
349     ///
350     /// v.set_value(0.0);
351     /// op_mass.apply(&u, &mut v)?;
352     ///
353     /// // Check
354     /// let sum: Scalar = v.view().iter().sum();
355     /// assert!(
356     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
357     ///     "Incorrect interval length computed"
358     /// );
359     /// # Ok(())
360     /// # }
361     /// ```
362     pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
363         self.op_core.apply(input, output)
364     }
365 
366     /// Apply Operator to a vector and add result to output vector
367     ///
368     /// * `input`  - Input Vector
369     /// * `output` - Output Vector
370     ///
371     /// ```
372     /// # use libceed::prelude::*;
373     /// # fn main() -> Result<(), libceed::CeedError> {
374     /// # let ceed = libceed::Ceed::default_init();
375     /// let ne = 4;
376     /// let p = 3;
377     /// let q = 4;
378     /// let ndofs = p * ne - ne + 1;
379     ///
380     /// // Vectors
381     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
382     /// let mut qdata = ceed.vector(ne * q)?;
383     /// qdata.set_value(0.0);
384     /// let u = ceed.vector_from_slice(&vec![1.0; ndofs])?;
385     /// let mut v = ceed.vector(ndofs)?;
386     ///
387     /// // Restrictions
388     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
389     /// for i in 0..ne {
390     ///     indx[2 * i + 0] = i as i32;
391     ///     indx[2 * i + 1] = (i + 1) as i32;
392     /// }
393     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
394     /// let mut indu: Vec<i32> = vec![0; p * ne];
395     /// for i in 0..ne {
396     ///     indu[p * i + 0] = i as i32;
397     ///     indu[p * i + 1] = (i + 1) as i32;
398     ///     indu[p * i + 2] = (i + 2) as i32;
399     /// }
400     /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?;
401     /// let strides: [i32; 3] = [1, q as i32, q as i32];
402     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
403     ///
404     /// // Bases
405     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
406     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
407     ///
408     /// // Build quadrature data
409     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
410     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
411     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
412     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
413     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
414     ///     .apply(&x, &mut qdata)?;
415     ///
416     /// // Mass operator
417     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
418     /// let op_mass = ceed
419     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
420     ///     .field("u", &ru, &bu, VectorOpt::Active)?
421     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
422     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
423     ///
424     /// v.set_value(1.0);
425     /// op_mass.apply_add(&u, &mut v)?;
426     ///
427     /// // Check
428     /// let sum: Scalar = v.view().iter().sum();
429     /// assert!(
430     ///     (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON,
431     ///     "Incorrect interval length computed and added"
432     /// );
433     /// # Ok(())
434     /// # }
435     /// ```
436     pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
437         self.op_core.apply_add(input, output)
438     }
439 
440     /// Provide a field to a Operator for use by its QFunction
441     ///
442     /// * `fieldname` - Name of the field (to be matched with the name used by
443     ///                   the QFunction)
444     /// * `r`         - ElemRestriction
445     /// * `b`         - Basis in which the field resides or `BasisCollocated` if
446     ///                   collocated with quadrature points
447     /// * `v`         - Vector to be used by Operator or `VectorActive` if field
448     ///                   is active or `VectorNone` if using `Weight` with the
449     ///                   QFunction
450     ///
451     ///
452     /// ```
453     /// # use libceed::prelude::*;
454     /// # fn main() -> Result<(), libceed::CeedError> {
455     /// # let ceed = libceed::Ceed::default_init();
456     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
457     /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?;
458     ///
459     /// // Operator field arguments
460     /// let ne = 3;
461     /// let q = 4;
462     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
463     /// for i in 0..ne {
464     ///     ind[2 * i + 0] = i as i32;
465     ///     ind[2 * i + 1] = (i + 1) as i32;
466     /// }
467     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
468     ///
469     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
470     ///
471     /// // Operator field
472     /// op = op.field("dx", &r, &b, VectorOpt::Active)?;
473     /// # Ok(())
474     /// # }
475     /// ```
476     #[allow(unused_mut)]
477     pub fn field<'b>(
478         mut self,
479         fieldname: &str,
480         r: impl Into<ElemRestrictionOpt<'b>>,
481         b: impl Into<BasisOpt<'b>>,
482         v: impl Into<VectorOpt<'b>>,
483     ) -> crate::Result<Self> {
484         let fieldname = CString::new(fieldname).expect("CString::new failed");
485         let fieldname = fieldname.as_ptr() as *const i8;
486         let ierr = unsafe {
487             bind_ceed::CeedOperatorSetField(
488                 self.op_core.ptr,
489                 fieldname,
490                 r.into().to_raw(),
491                 b.into().to_raw(),
492                 v.into().to_raw(),
493             )
494         };
495         self.op_core.check_error(ierr)?;
496         Ok(self)
497     }
498 
499     /// Get the number of elements associated with an Operator
500     ///
501     ///
502     /// ```
503     /// # use libceed::prelude::*;
504     /// # fn main() -> Result<(), libceed::CeedError> {
505     /// # let ceed = libceed::Ceed::default_init();
506     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
507     /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?;
508     ///
509     /// // Operator field arguments
510     /// let ne = 3;
511     /// let q = 4;
512     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
513     /// for i in 0..ne {
514     ///     ind[2 * i + 0] = i as i32;
515     ///     ind[2 * i + 1] = (i + 1) as i32;
516     /// }
517     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
518     ///
519     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
520     ///
521     /// // Operator field
522     /// op = op.field("dx", &r, &b, VectorOpt::Active)?;
523     ///
524     /// // Check number of elements
525     /// assert_eq!(op.num_elements(), ne, "Incorrect number of elements");
526     /// # Ok(())
527     /// # }
528     /// ```
529     pub fn num_elements(&self) -> usize {
530         let mut nelem = 0;
531         unsafe { bind_ceed::CeedOperatorGetNumElements(self.op_core.ptr, &mut nelem) };
532         usize::try_from(nelem).unwrap()
533     }
534 
535     /// Get the number of quadrature points associated with each element of
536     ///   an Operator
537     ///
538     ///
539     /// ```
540     /// # use libceed::prelude::*;
541     /// # fn main() -> Result<(), libceed::CeedError> {
542     /// # let ceed = libceed::Ceed::default_init();
543     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild")?;
544     /// let mut op = ceed.operator(&qf, QFunctionOpt::None, QFunctionOpt::None)?;
545     ///
546     /// // Operator field arguments
547     /// let ne = 3;
548     /// let q = 4;
549     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
550     /// for i in 0..ne {
551     ///     ind[2 * i + 0] = i as i32;
552     ///     ind[2 * i + 1] = (i + 1) as i32;
553     /// }
554     /// let r = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)?;
555     ///
556     /// let b = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
557     ///
558     /// // Operator field
559     /// op = op.field("dx", &r, &b, VectorOpt::Active)?;
560     ///
561     /// // Check number of quadrature points
562     /// assert_eq!(
563     ///     op.num_quadrature_points(),
564     ///     q,
565     ///     "Incorrect number of quadrature points"
566     /// );
567     /// # Ok(())
568     /// # }
569     /// ```
570     pub fn num_quadrature_points(&self) -> usize {
571         let mut Q = 0;
572         unsafe { bind_ceed::CeedOperatorGetNumQuadraturePoints(self.op_core.ptr, &mut Q) };
573         usize::try_from(Q).unwrap()
574     }
575 
576     /// Assemble the diagonal of a square linear Operator
577     ///
578     /// This overwrites a Vector with the diagonal of a linear Operator.
579     ///
580     /// Note: Currently only non-composite Operators with a single field and
581     ///      composite Operators with single field sub-operators are supported.
582     ///
583     /// * `op`        - Operator to assemble QFunction
584     /// * `assembled` - Vector to store assembled Operator diagonal
585     ///
586     /// ```
587     /// # use libceed::prelude::*;
588     /// # fn main() -> Result<(), libceed::CeedError> {
589     /// # let ceed = libceed::Ceed::default_init();
590     /// let ne = 4;
591     /// let p = 3;
592     /// let q = 4;
593     /// let ndofs = p * ne - ne + 1;
594     ///
595     /// // Vectors
596     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
597     /// let mut qdata = ceed.vector(ne * q)?;
598     /// qdata.set_value(0.0);
599     /// let mut u = ceed.vector(ndofs)?;
600     /// u.set_value(1.0);
601     /// let mut v = ceed.vector(ndofs)?;
602     /// v.set_value(0.0);
603     ///
604     /// // Restrictions
605     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
606     /// for i in 0..ne {
607     ///     indx[2 * i + 0] = i as i32;
608     ///     indx[2 * i + 1] = (i + 1) as i32;
609     /// }
610     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
611     /// let mut indu: Vec<i32> = vec![0; p * ne];
612     /// for i in 0..ne {
613     ///     indu[p * i + 0] = (2 * i) as i32;
614     ///     indu[p * i + 1] = (2 * i + 1) as i32;
615     ///     indu[p * i + 2] = (2 * i + 2) as i32;
616     /// }
617     /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?;
618     /// let strides: [i32; 3] = [1, q as i32, q as i32];
619     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
620     ///
621     /// // Bases
622     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
623     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
624     ///
625     /// // Build quadrature data
626     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
627     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
628     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
629     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
630     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
631     ///     .apply(&x, &mut qdata)?;
632     ///
633     /// // Mass operator
634     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
635     /// let op_mass = ceed
636     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
637     ///     .field("u", &ru, &bu, VectorOpt::Active)?
638     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
639     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
640     ///
641     /// // Diagonal
642     /// let mut diag = ceed.vector(ndofs)?;
643     /// diag.set_value(0.0);
644     /// op_mass.linear_assemble_diagonal(&mut diag)?;
645     ///
646     /// // Manual diagonal computation
647     /// let mut true_diag = ceed.vector(ndofs)?;
648     /// for i in 0..ndofs {
649     ///     u.set_value(0.0);
650     ///     {
651     ///         let mut u_array = u.view_mut();
652     ///         u_array[i] = 1.;
653     ///     }
654     ///
655     ///     op_mass.apply(&u, &mut v)?;
656     ///
657     ///     {
658     ///         let v_array = v.view_mut();
659     ///         let mut true_array = true_diag.view_mut();
660     ///         true_array[i] = v_array[i];
661     ///     }
662     /// }
663     ///
664     /// // Check
665     /// diag.view()
666     ///     .iter()
667     ///     .zip(true_diag.view().iter())
668     ///     .for_each(|(computed, actual)| {
669     ///         assert!(
670     ///             (*computed - *actual).abs() < 10.0 * libceed::EPSILON,
671     ///             "Diagonal entry incorrect"
672     ///         );
673     ///     });
674     /// # Ok(())
675     /// # }
676     /// ```
677     pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
678         self.op_core.linear_assemble_diagonal(assembled)
679     }
680 
681     /// Assemble the diagonal of a square linear Operator
682     ///
683     /// This sums into a Vector with the diagonal of a linear Operator.
684     ///
685     /// Note: Currently only non-composite Operators with a single field and
686     ///      composite Operators with single field sub-operators are supported.
687     ///
688     /// * `op`        - Operator to assemble QFunction
689     /// * `assembled` - Vector to store assembled Operator diagonal
690     ///
691     ///
692     /// ```
693     /// # use libceed::prelude::*;
694     /// # fn main() -> Result<(), libceed::CeedError> {
695     /// # let ceed = libceed::Ceed::default_init();
696     /// let ne = 4;
697     /// let p = 3;
698     /// let q = 4;
699     /// let ndofs = p * ne - ne + 1;
700     ///
701     /// // Vectors
702     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
703     /// let mut qdata = ceed.vector(ne * q)?;
704     /// qdata.set_value(0.0);
705     /// let mut u = ceed.vector(ndofs)?;
706     /// u.set_value(1.0);
707     /// let mut v = ceed.vector(ndofs)?;
708     /// v.set_value(0.0);
709     ///
710     /// // Restrictions
711     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
712     /// for i in 0..ne {
713     ///     indx[2 * i + 0] = i as i32;
714     ///     indx[2 * i + 1] = (i + 1) as i32;
715     /// }
716     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
717     /// let mut indu: Vec<i32> = vec![0; p * ne];
718     /// for i in 0..ne {
719     ///     indu[p * i + 0] = (2 * i) as i32;
720     ///     indu[p * i + 1] = (2 * i + 1) as i32;
721     ///     indu[p * i + 2] = (2 * i + 2) as i32;
722     /// }
723     /// let ru = ceed.elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)?;
724     /// let strides: [i32; 3] = [1, q as i32, q as i32];
725     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
726     ///
727     /// // Bases
728     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
729     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
730     ///
731     /// // Build quadrature data
732     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
733     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
734     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
735     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
736     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
737     ///     .apply(&x, &mut qdata)?;
738     ///
739     /// // Mass operator
740     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
741     /// let op_mass = ceed
742     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
743     ///     .field("u", &ru, &bu, VectorOpt::Active)?
744     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
745     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
746     ///
747     /// // Diagonal
748     /// let mut diag = ceed.vector(ndofs)?;
749     /// diag.set_value(1.0);
750     /// op_mass.linear_assemble_add_diagonal(&mut diag)?;
751     ///
752     /// // Manual diagonal computation
753     /// let mut true_diag = ceed.vector(ndofs)?;
754     /// for i in 0..ndofs {
755     ///     u.set_value(0.0);
756     ///     {
757     ///         let mut u_array = u.view_mut();
758     ///         u_array[i] = 1.;
759     ///     }
760     ///
761     ///     op_mass.apply(&u, &mut v)?;
762     ///
763     ///     {
764     ///         let v_array = v.view_mut();
765     ///         let mut true_array = true_diag.view_mut();
766     ///         true_array[i] = v_array[i] + 1.0;
767     ///     }
768     /// }
769     ///
770     /// // Check
771     /// diag.view()
772     ///     .iter()
773     ///     .zip(true_diag.view().iter())
774     ///     .for_each(|(computed, actual)| {
775     ///         assert!(
776     ///             (*computed - *actual).abs() < 10.0 * libceed::EPSILON,
777     ///             "Diagonal entry incorrect"
778     ///         );
779     ///     });
780     /// # Ok(())
781     /// # }
782     /// ```
783     pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
784         self.op_core.linear_assemble_add_diagonal(assembled)
785     }
786 
787     /// Assemble the point block diagonal of a square linear Operator
788     ///
789     /// This overwrites a Vector with the point block diagonal of a linear
790     /// Operator.
791     ///
792     /// Note: Currently only non-composite Operators with a single field and
793     ///      composite Operators with single field sub-operators are supported.
794     ///
795     /// * `op`        - Operator to assemble QFunction
796     /// * `assembled` - Vector to store assembled CeedOperator point block
797     ///                   diagonal, provided in row-major form with an
798     ///                   `ncomp * ncomp` block at each node. The dimensions of
799     ///                   this vector are derived from the active vector for
800     ///                   the CeedOperator. The array has shape
801     ///                   `[nodes, component out, component in]`.
802     ///
803     /// ```
804     /// # use libceed::prelude::*;
805     /// # fn main() -> Result<(), libceed::CeedError> {
806     /// # let ceed = libceed::Ceed::default_init();
807     /// let ne = 4;
808     /// let p = 3;
809     /// let q = 4;
810     /// let ncomp = 2;
811     /// let ndofs = p * ne - ne + 1;
812     ///
813     /// // Vectors
814     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
815     /// let mut qdata = ceed.vector(ne * q)?;
816     /// qdata.set_value(0.0);
817     /// let mut u = ceed.vector(ncomp * ndofs)?;
818     /// u.set_value(1.0);
819     /// let mut v = ceed.vector(ncomp * ndofs)?;
820     /// v.set_value(0.0);
821     ///
822     /// // Restrictions
823     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
824     /// for i in 0..ne {
825     ///     indx[2 * i + 0] = i as i32;
826     ///     indx[2 * i + 1] = (i + 1) as i32;
827     /// }
828     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
829     /// let mut indu: Vec<i32> = vec![0; p * ne];
830     /// for i in 0..ne {
831     ///     indu[p * i + 0] = (2 * i) as i32;
832     ///     indu[p * i + 1] = (2 * i + 1) as i32;
833     ///     indu[p * i + 2] = (2 * i + 2) as i32;
834     /// }
835     /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?;
836     /// let strides: [i32; 3] = [1, q as i32, q as i32];
837     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
838     ///
839     /// // Bases
840     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
841     /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?;
842     ///
843     /// // Build quadrature data
844     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
845     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
846     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
847     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
848     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
849     ///     .apply(&x, &mut qdata)?;
850     ///
851     /// // Mass operator
852     /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| {
853     ///     // Number of quadrature points
854     ///     let q = qdata.len();
855     ///
856     ///     // Iterate over quadrature points
857     ///     for i in 0..q {
858     ///         v[i + 0 * q] = u[i + 1 * q] * qdata[i];
859     ///         v[i + 1 * q] = u[i + 0 * q] * qdata[i];
860     ///     }
861     ///
862     ///     // Return clean error code
863     ///     0
864     /// };
865     ///
866     /// let qf_mass = ceed
867     ///     .q_function_interior(1, Box::new(mass_2_comp))?
868     ///     .input("u", 2, EvalMode::Interp)?
869     ///     .input("qdata", 1, EvalMode::None)?
870     ///     .output("v", 2, EvalMode::Interp)?;
871     ///
872     /// let op_mass = ceed
873     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
874     ///     .field("u", &ru, &bu, VectorOpt::Active)?
875     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
876     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
877     ///
878     /// // Diagonal
879     /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?;
880     /// diag.set_value(0.0);
881     /// op_mass.linear_assemble_point_block_diagonal(&mut diag)?;
882     ///
883     /// // Manual diagonal computation
884     /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?;
885     /// for i in 0..ndofs {
886     ///     for j in 0..ncomp {
887     ///         u.set_value(0.0);
888     ///         {
889     ///             let mut u_array = u.view_mut();
890     ///             u_array[i + j * ndofs] = 1.;
891     ///         }
892     ///
893     ///         op_mass.apply(&u, &mut v)?;
894     ///
895     ///         {
896     ///             let v_array = v.view_mut();
897     ///             let mut true_array = true_diag.view_mut();
898     ///             for k in 0..ncomp {
899     ///                 true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs];
900     ///             }
901     ///         }
902     ///     }
903     /// }
904     ///
905     /// // Check
906     /// diag.view()
907     ///     .iter()
908     ///     .zip(true_diag.view().iter())
909     ///     .for_each(|(computed, actual)| {
910     ///         assert!(
911     ///             (*computed - *actual).abs() < 10.0 * libceed::EPSILON,
912     ///             "Diagonal entry incorrect"
913     ///         );
914     ///     });
915     /// # Ok(())
916     /// # }
917     /// ```
918     pub fn linear_assemble_point_block_diagonal(
919         &self,
920         assembled: &mut Vector,
921     ) -> crate::Result<i32> {
922         self.op_core.linear_assemble_point_block_diagonal(assembled)
923     }
924 
925     /// Assemble the point block diagonal of a square linear Operator
926     ///
927     /// This sums into a Vector with the point block diagonal of a linear
928     /// Operator.
929     ///
930     /// Note: Currently only non-composite Operators with a single field and
931     ///      composite Operators with single field sub-operators are supported.
932     ///
933     /// * `op`        -     Operator to assemble QFunction
934     /// * `assembled` - Vector to store assembled CeedOperator point block
935     ///                   diagonal, provided in row-major form with an
936     ///                   `ncomp * ncomp` block at each node. The dimensions of
937     ///                   this vector are derived from the active vector for
938     ///                   the CeedOperator. The array has shape
939     ///                   `[nodes, component out, component in]`.
940     ///
941     /// ```
942     /// # use libceed::prelude::*;
943     /// # fn main() -> Result<(), libceed::CeedError> {
944     /// # let ceed = libceed::Ceed::default_init();
945     /// let ne = 4;
946     /// let p = 3;
947     /// let q = 4;
948     /// let ncomp = 2;
949     /// let ndofs = p * ne - ne + 1;
950     ///
951     /// // Vectors
952     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
953     /// let mut qdata = ceed.vector(ne * q)?;
954     /// qdata.set_value(0.0);
955     /// let mut u = ceed.vector(ncomp * ndofs)?;
956     /// u.set_value(1.0);
957     /// let mut v = ceed.vector(ncomp * ndofs)?;
958     /// v.set_value(0.0);
959     ///
960     /// // Restrictions
961     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
962     /// for i in 0..ne {
963     ///     indx[2 * i + 0] = i as i32;
964     ///     indx[2 * i + 1] = (i + 1) as i32;
965     /// }
966     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
967     /// let mut indu: Vec<i32> = vec![0; p * ne];
968     /// for i in 0..ne {
969     ///     indu[p * i + 0] = (2 * i) as i32;
970     ///     indu[p * i + 1] = (2 * i + 1) as i32;
971     ///     indu[p * i + 2] = (2 * i + 2) as i32;
972     /// }
973     /// let ru = ceed.elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)?;
974     /// let strides: [i32; 3] = [1, q as i32, q as i32];
975     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
976     ///
977     /// // Bases
978     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
979     /// let bu = ceed.basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)?;
980     ///
981     /// // Build quadrature data
982     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
983     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
984     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
985     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
986     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
987     ///     .apply(&x, &mut qdata)?;
988     ///
989     /// // Mass operator
990     /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| {
991     ///     // Number of quadrature points
992     ///     let q = qdata.len();
993     ///
994     ///     // Iterate over quadrature points
995     ///     for i in 0..q {
996     ///         v[i + 0 * q] = u[i + 1 * q] * qdata[i];
997     ///         v[i + 1 * q] = u[i + 0 * q] * qdata[i];
998     ///     }
999     ///
1000     ///     // Return clean error code
1001     ///     0
1002     /// };
1003     ///
1004     /// let qf_mass = ceed
1005     ///     .q_function_interior(1, Box::new(mass_2_comp))?
1006     ///     .input("u", 2, EvalMode::Interp)?
1007     ///     .input("qdata", 1, EvalMode::None)?
1008     ///     .output("v", 2, EvalMode::Interp)?;
1009     ///
1010     /// let op_mass = ceed
1011     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1012     ///     .field("u", &ru, &bu, VectorOpt::Active)?
1013     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1014     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
1015     ///
1016     /// // Diagonal
1017     /// let mut diag = ceed.vector(ncomp * ncomp * ndofs)?;
1018     /// diag.set_value(1.0);
1019     /// op_mass.linear_assemble_add_point_block_diagonal(&mut diag)?;
1020     ///
1021     /// // Manual diagonal computation
1022     /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs)?;
1023     /// for i in 0..ndofs {
1024     ///     for j in 0..ncomp {
1025     ///         u.set_value(0.0);
1026     ///         {
1027     ///             let mut u_array = u.view_mut();
1028     ///             u_array[i + j * ndofs] = 1.;
1029     ///         }
1030     ///
1031     ///         op_mass.apply(&u, &mut v)?;
1032     ///
1033     ///         {
1034     ///             let v_array = v.view_mut();
1035     ///             let mut true_array = true_diag.view_mut();
1036     ///             for k in 0..ncomp {
1037     ///                 true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs];
1038     ///             }
1039     ///         }
1040     ///     }
1041     /// }
1042     ///
1043     /// // Check
1044     /// diag.view()
1045     ///     .iter()
1046     ///     .zip(true_diag.view().iter())
1047     ///     .for_each(|(computed, actual)| {
1048     ///         assert!(
1049     ///             (*computed - 1.0 - *actual).abs() < 10.0 * libceed::EPSILON,
1050     ///             "Diagonal entry incorrect"
1051     ///         );
1052     ///     });
1053     /// # Ok(())
1054     /// # }
1055     /// ```
1056     pub fn linear_assemble_add_point_block_diagonal(
1057         &self,
1058         assembled: &mut Vector,
1059     ) -> crate::Result<i32> {
1060         self.op_core
1061             .linear_assemble_add_point_block_diagonal(assembled)
1062     }
1063 
1064     /// Create a multigrid coarse Operator and level transfer Operators for a
1065     ///   given Operator, creating the prolongation basis from the fine and
1066     ///   coarse grid interpolation
1067     ///
1068     /// * `p_mult_fine`  - Lvector multiplicity in parallel gather/scatter
1069     /// * `rstr_coarse`  - Coarse grid restriction
1070     /// * `basis_coarse` - Coarse grid active vector basis
1071     ///
1072     /// ```
1073     /// # use libceed::prelude::*;
1074     /// # fn main() -> Result<(), libceed::CeedError> {
1075     /// # let ceed = libceed::Ceed::default_init();
1076     /// let ne = 15;
1077     /// let p_coarse = 3;
1078     /// let p_fine = 5;
1079     /// let q = 6;
1080     /// let ndofs_coarse = p_coarse * ne - ne + 1;
1081     /// let ndofs_fine = p_fine * ne - ne + 1;
1082     ///
1083     /// // Vectors
1084     /// let x_array = (0..ne + 1)
1085     ///     .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0)
1086     ///     .collect::<Vec<Scalar>>();
1087     /// let x = ceed.vector_from_slice(&x_array)?;
1088     /// let mut qdata = ceed.vector(ne * q)?;
1089     /// qdata.set_value(0.0);
1090     /// let mut u_coarse = ceed.vector(ndofs_coarse)?;
1091     /// u_coarse.set_value(1.0);
1092     /// let mut u_fine = ceed.vector(ndofs_fine)?;
1093     /// u_fine.set_value(1.0);
1094     /// let mut v_coarse = ceed.vector(ndofs_coarse)?;
1095     /// v_coarse.set_value(0.0);
1096     /// let mut v_fine = ceed.vector(ndofs_fine)?;
1097     /// v_fine.set_value(0.0);
1098     /// let mut multiplicity = ceed.vector(ndofs_fine)?;
1099     /// multiplicity.set_value(1.0);
1100     ///
1101     /// // Restrictions
1102     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1103     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1104     ///
1105     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1106     /// for i in 0..ne {
1107     ///     indx[2 * i + 0] = i as i32;
1108     ///     indx[2 * i + 1] = (i + 1) as i32;
1109     /// }
1110     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1111     ///
1112     /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne];
1113     /// for i in 0..ne {
1114     ///     for j in 0..p_coarse {
1115     ///         indu_coarse[p_coarse * i + j] = (i + j) as i32;
1116     ///     }
1117     /// }
1118     /// let ru_coarse = ceed.elem_restriction(
1119     ///     ne,
1120     ///     p_coarse,
1121     ///     1,
1122     ///     1,
1123     ///     ndofs_coarse,
1124     ///     MemType::Host,
1125     ///     &indu_coarse,
1126     /// )?;
1127     ///
1128     /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne];
1129     /// for i in 0..ne {
1130     ///     for j in 0..p_fine {
1131     ///         indu_fine[p_fine * i + j] = (i + j) as i32;
1132     ///     }
1133     /// }
1134     /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?;
1135     ///
1136     /// // Bases
1137     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1138     /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?;
1139     /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?;
1140     ///
1141     /// // Build quadrature data
1142     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1143     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1144     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1145     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1146     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1147     ///     .apply(&x, &mut qdata)?;
1148     ///
1149     /// // Mass operator
1150     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1151     /// let op_mass_fine = ceed
1152     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1153     ///     .field("u", &ru_fine, &bu_fine, VectorOpt::Active)?
1154     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1155     ///     .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?;
1156     ///
1157     /// // Multigrid setup
1158     /// let (op_mass_coarse, op_prolong, op_restrict) =
1159     ///     op_mass_fine.create_multigrid_level(&multiplicity, &ru_coarse, &bu_coarse)?;
1160     ///
1161     /// // Coarse problem
1162     /// u_coarse.set_value(1.0);
1163     /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?;
1164     ///
1165     /// // Check
1166     /// let sum: Scalar = v_coarse.view().iter().sum();
1167     /// assert!(
1168     ///     (sum - 2.0).abs() < 50.0 * libceed::EPSILON,
1169     ///     "Incorrect interval length computed"
1170     /// );
1171     ///
1172     /// // Prolong
1173     /// op_prolong.apply(&u_coarse, &mut u_fine)?;
1174     ///
1175     /// // Fine problem
1176     /// op_mass_fine.apply(&u_fine, &mut v_fine)?;
1177     ///
1178     /// // Check
1179     /// let sum: Scalar = v_fine.view().iter().sum();
1180     /// assert!(
1181     ///     (sum - 2.0).abs() < 50.0 * libceed::EPSILON,
1182     ///     "Incorrect interval length computed"
1183     /// );
1184     ///
1185     /// // Restrict
1186     /// op_restrict.apply(&v_fine, &mut v_coarse)?;
1187     ///
1188     /// // Check
1189     /// let sum: Scalar = v_coarse.view().iter().sum();
1190     /// assert!(
1191     ///     (sum - 2.0).abs() < 50.0 * libceed::EPSILON,
1192     ///     "Incorrect interval length computed"
1193     /// );
1194     /// # Ok(())
1195     /// # }
1196     /// ```
1197     pub fn create_multigrid_level(
1198         &self,
1199         p_mult_fine: &Vector,
1200         rstr_coarse: &ElemRestriction,
1201         basis_coarse: &Basis,
1202     ) -> crate::Result<(Operator, Operator, Operator)> {
1203         let mut ptr_coarse = std::ptr::null_mut();
1204         let mut ptr_prolong = std::ptr::null_mut();
1205         let mut ptr_restrict = std::ptr::null_mut();
1206         let ierr = unsafe {
1207             bind_ceed::CeedOperatorMultigridLevelCreate(
1208                 self.op_core.ptr,
1209                 p_mult_fine.ptr,
1210                 rstr_coarse.ptr,
1211                 basis_coarse.ptr,
1212                 &mut ptr_coarse,
1213                 &mut ptr_prolong,
1214                 &mut ptr_restrict,
1215             )
1216         };
1217         self.op_core.check_error(ierr)?;
1218         let op_coarse = Operator::from_raw(ptr_coarse)?;
1219         let op_prolong = Operator::from_raw(ptr_prolong)?;
1220         let op_restrict = Operator::from_raw(ptr_restrict)?;
1221         Ok((op_coarse, op_prolong, op_restrict))
1222     }
1223 
1224     /// Create a multigrid coarse Operator and level transfer Operators for a
1225     ///   given Operator with a tensor basis for the active basis
1226     ///
1227     /// * `p_mult_fine`   - Lvector multiplicity in parallel gather/scatter
1228     /// * `rstr_coarse`   - Coarse grid restriction
1229     /// * `basis_coarse`  - Coarse grid active vector basis
1230     /// * `interp_c_to_f` - Matrix for coarse to fine
1231     ///
1232     /// ```
1233     /// # use libceed::prelude::*;
1234     /// # fn main() -> Result<(), libceed::CeedError> {
1235     /// # let ceed = libceed::Ceed::default_init();
1236     /// let ne = 15;
1237     /// let p_coarse = 3;
1238     /// let p_fine = 5;
1239     /// let q = 6;
1240     /// let ndofs_coarse = p_coarse * ne - ne + 1;
1241     /// let ndofs_fine = p_fine * ne - ne + 1;
1242     ///
1243     /// // Vectors
1244     /// let x_array = (0..ne + 1)
1245     ///     .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0)
1246     ///     .collect::<Vec<Scalar>>();
1247     /// let x = ceed.vector_from_slice(&x_array)?;
1248     /// let mut qdata = ceed.vector(ne * q)?;
1249     /// qdata.set_value(0.0);
1250     /// let mut u_coarse = ceed.vector(ndofs_coarse)?;
1251     /// u_coarse.set_value(1.0);
1252     /// let mut u_fine = ceed.vector(ndofs_fine)?;
1253     /// u_fine.set_value(1.0);
1254     /// let mut v_coarse = ceed.vector(ndofs_coarse)?;
1255     /// v_coarse.set_value(0.0);
1256     /// let mut v_fine = ceed.vector(ndofs_fine)?;
1257     /// v_fine.set_value(0.0);
1258     /// let mut multiplicity = ceed.vector(ndofs_fine)?;
1259     /// multiplicity.set_value(1.0);
1260     ///
1261     /// // Restrictions
1262     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1263     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1264     ///
1265     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1266     /// for i in 0..ne {
1267     ///     indx[2 * i + 0] = i as i32;
1268     ///     indx[2 * i + 1] = (i + 1) as i32;
1269     /// }
1270     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1271     ///
1272     /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne];
1273     /// for i in 0..ne {
1274     ///     for j in 0..p_coarse {
1275     ///         indu_coarse[p_coarse * i + j] = (i + j) as i32;
1276     ///     }
1277     /// }
1278     /// let ru_coarse = ceed.elem_restriction(
1279     ///     ne,
1280     ///     p_coarse,
1281     ///     1,
1282     ///     1,
1283     ///     ndofs_coarse,
1284     ///     MemType::Host,
1285     ///     &indu_coarse,
1286     /// )?;
1287     ///
1288     /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne];
1289     /// for i in 0..ne {
1290     ///     for j in 0..p_fine {
1291     ///         indu_fine[p_fine * i + j] = (i + j) as i32;
1292     ///     }
1293     /// }
1294     /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?;
1295     ///
1296     /// // Bases
1297     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1298     /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?;
1299     /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?;
1300     ///
1301     /// // Build quadrature data
1302     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1303     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1304     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1305     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1306     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1307     ///     .apply(&x, &mut qdata)?;
1308     ///
1309     /// // Mass operator
1310     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1311     /// let op_mass_fine = ceed
1312     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1313     ///     .field("u", &ru_fine, &bu_fine, VectorOpt::Active)?
1314     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1315     ///     .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?;
1316     ///
1317     /// // Multigrid setup
1318     /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine];
1319     /// {
1320     ///     let mut coarse = ceed.vector(p_coarse)?;
1321     ///     let mut fine = ceed.vector(p_fine)?;
1322     ///     let basis_c_to_f =
1323     ///         ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?;
1324     ///     for i in 0..p_coarse {
1325     ///         coarse.set_value(0.0);
1326     ///         {
1327     ///             let mut array = coarse.view_mut();
1328     ///             array[i] = 1.;
1329     ///         }
1330     ///         basis_c_to_f.apply(
1331     ///             1,
1332     ///             TransposeMode::NoTranspose,
1333     ///             EvalMode::Interp,
1334     ///             &coarse,
1335     ///             &mut fine,
1336     ///         )?;
1337     ///         let array = fine.view();
1338     ///         for j in 0..p_fine {
1339     ///             interp_c_to_f[j * p_coarse + i] = array[j];
1340     ///         }
1341     ///     }
1342     /// }
1343     /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_tensor_H1(
1344     ///     &multiplicity,
1345     ///     &ru_coarse,
1346     ///     &bu_coarse,
1347     ///     &interp_c_to_f,
1348     /// )?;
1349     ///
1350     /// // Coarse problem
1351     /// u_coarse.set_value(1.0);
1352     /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?;
1353     ///
1354     /// // Check
1355     /// let sum: Scalar = v_coarse.view().iter().sum();
1356     /// assert!(
1357     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1358     ///     "Incorrect interval length computed"
1359     /// );
1360     ///
1361     /// // Prolong
1362     /// op_prolong.apply(&u_coarse, &mut u_fine)?;
1363     ///
1364     /// // Fine problem
1365     /// op_mass_fine.apply(&u_fine, &mut v_fine)?;
1366     ///
1367     /// // Check
1368     /// let sum: Scalar = v_fine.view().iter().sum();
1369     /// assert!(
1370     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1371     ///     "Incorrect interval length computed"
1372     /// );
1373     ///
1374     /// // Restrict
1375     /// op_restrict.apply(&v_fine, &mut v_coarse)?;
1376     ///
1377     /// // Check
1378     /// let sum: Scalar = v_coarse.view().iter().sum();
1379     /// assert!(
1380     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1381     ///     "Incorrect interval length computed"
1382     /// );
1383     /// # Ok(())
1384     /// # }
1385     /// ```
1386     pub fn create_multigrid_level_tensor_H1(
1387         &self,
1388         p_mult_fine: &Vector,
1389         rstr_coarse: &ElemRestriction,
1390         basis_coarse: &Basis,
1391         interpCtoF: &Vec<Scalar>,
1392     ) -> crate::Result<(Operator, Operator, Operator)> {
1393         let mut ptr_coarse = std::ptr::null_mut();
1394         let mut ptr_prolong = std::ptr::null_mut();
1395         let mut ptr_restrict = std::ptr::null_mut();
1396         let ierr = unsafe {
1397             bind_ceed::CeedOperatorMultigridLevelCreateTensorH1(
1398                 self.op_core.ptr,
1399                 p_mult_fine.ptr,
1400                 rstr_coarse.ptr,
1401                 basis_coarse.ptr,
1402                 interpCtoF.as_ptr(),
1403                 &mut ptr_coarse,
1404                 &mut ptr_prolong,
1405                 &mut ptr_restrict,
1406             )
1407         };
1408         self.op_core.check_error(ierr)?;
1409         let op_coarse = Operator::from_raw(ptr_coarse)?;
1410         let op_prolong = Operator::from_raw(ptr_prolong)?;
1411         let op_restrict = Operator::from_raw(ptr_restrict)?;
1412         Ok((op_coarse, op_prolong, op_restrict))
1413     }
1414 
1415     /// Create a multigrid coarse Operator and level transfer Operators for a
1416     ///   given Operator with a non-tensor basis for the active basis
1417     ///
1418     /// * `p_mult_fine`   - Lvector multiplicity in parallel gather/scatter
1419     /// * `rstr_coarse`   - Coarse grid restriction
1420     /// * `basis_coarse`  - Coarse grid active vector basis
1421     /// * `interp_c_to_f` - Matrix for coarse to fine
1422     ///
1423     /// ```
1424     /// # use libceed::prelude::*;
1425     /// # fn main() -> Result<(), libceed::CeedError> {
1426     /// # let ceed = libceed::Ceed::default_init();
1427     /// let ne = 15;
1428     /// let p_coarse = 3;
1429     /// let p_fine = 5;
1430     /// let q = 6;
1431     /// let ndofs_coarse = p_coarse * ne - ne + 1;
1432     /// let ndofs_fine = p_fine * ne - ne + 1;
1433     ///
1434     /// // Vectors
1435     /// let x_array = (0..ne + 1)
1436     ///     .map(|i| 2.0 * i as Scalar / ne as Scalar - 1.0)
1437     ///     .collect::<Vec<Scalar>>();
1438     /// let x = ceed.vector_from_slice(&x_array)?;
1439     /// let mut qdata = ceed.vector(ne * q)?;
1440     /// qdata.set_value(0.0);
1441     /// let mut u_coarse = ceed.vector(ndofs_coarse)?;
1442     /// u_coarse.set_value(1.0);
1443     /// let mut u_fine = ceed.vector(ndofs_fine)?;
1444     /// u_fine.set_value(1.0);
1445     /// let mut v_coarse = ceed.vector(ndofs_coarse)?;
1446     /// v_coarse.set_value(0.0);
1447     /// let mut v_fine = ceed.vector(ndofs_fine)?;
1448     /// v_fine.set_value(0.0);
1449     /// let mut multiplicity = ceed.vector(ndofs_fine)?;
1450     /// multiplicity.set_value(1.0);
1451     ///
1452     /// // Restrictions
1453     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1454     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1455     ///
1456     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1457     /// for i in 0..ne {
1458     ///     indx[2 * i + 0] = i as i32;
1459     ///     indx[2 * i + 1] = (i + 1) as i32;
1460     /// }
1461     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1462     ///
1463     /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne];
1464     /// for i in 0..ne {
1465     ///     for j in 0..p_coarse {
1466     ///         indu_coarse[p_coarse * i + j] = (i + j) as i32;
1467     ///     }
1468     /// }
1469     /// let ru_coarse = ceed.elem_restriction(
1470     ///     ne,
1471     ///     p_coarse,
1472     ///     1,
1473     ///     1,
1474     ///     ndofs_coarse,
1475     ///     MemType::Host,
1476     ///     &indu_coarse,
1477     /// )?;
1478     ///
1479     /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne];
1480     /// for i in 0..ne {
1481     ///     for j in 0..p_fine {
1482     ///         indu_fine[p_fine * i + j] = (i + j) as i32;
1483     ///     }
1484     /// }
1485     /// let ru_fine = ceed.elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)?;
1486     ///
1487     /// // Bases
1488     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1489     /// let bu_coarse = ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)?;
1490     /// let bu_fine = ceed.basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)?;
1491     ///
1492     /// // Build quadrature data
1493     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild")?;
1494     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)?
1495     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1496     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1497     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1498     ///     .apply(&x, &mut qdata)?;
1499     ///
1500     /// // Mass operator
1501     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1502     /// let op_mass_fine = ceed
1503     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1504     ///     .field("u", &ru_fine, &bu_fine, VectorOpt::Active)?
1505     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)?
1506     ///     .field("v", &ru_fine, &bu_fine, VectorOpt::Active)?;
1507     ///
1508     /// // Multigrid setup
1509     /// let mut interp_c_to_f: Vec<Scalar> = vec![0.; p_coarse * p_fine];
1510     /// {
1511     ///     let mut coarse = ceed.vector(p_coarse)?;
1512     ///     let mut fine = ceed.vector(p_fine)?;
1513     ///     let basis_c_to_f =
1514     ///         ceed.basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)?;
1515     ///     for i in 0..p_coarse {
1516     ///         coarse.set_value(0.0);
1517     ///         {
1518     ///             let mut array = coarse.view_mut();
1519     ///             array[i] = 1.;
1520     ///         }
1521     ///         basis_c_to_f.apply(
1522     ///             1,
1523     ///             TransposeMode::NoTranspose,
1524     ///             EvalMode::Interp,
1525     ///             &coarse,
1526     ///             &mut fine,
1527     ///         )?;
1528     ///         let array = fine.view();
1529     ///         for j in 0..p_fine {
1530     ///             interp_c_to_f[j * p_coarse + i] = array[j];
1531     ///         }
1532     ///     }
1533     /// }
1534     /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine.create_multigrid_level_H1(
1535     ///     &multiplicity,
1536     ///     &ru_coarse,
1537     ///     &bu_coarse,
1538     ///     &interp_c_to_f,
1539     /// )?;
1540     ///
1541     /// // Coarse problem
1542     /// u_coarse.set_value(1.0);
1543     /// op_mass_coarse.apply(&u_coarse, &mut v_coarse)?;
1544     ///
1545     /// // Check
1546     /// let sum: Scalar = v_coarse.view().iter().sum();
1547     /// assert!(
1548     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1549     ///     "Incorrect interval length computed"
1550     /// );
1551     ///
1552     /// // Prolong
1553     /// op_prolong.apply(&u_coarse, &mut u_fine)?;
1554     ///
1555     /// // Fine problem
1556     /// op_mass_fine.apply(&u_fine, &mut v_fine)?;
1557     ///
1558     /// // Check
1559     /// let sum: Scalar = v_fine.view().iter().sum();
1560     /// assert!(
1561     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1562     ///     "Incorrect interval length computed"
1563     /// );
1564     ///
1565     /// // Restrict
1566     /// op_restrict.apply(&v_fine, &mut v_coarse)?;
1567     ///
1568     /// // Check
1569     /// let sum: Scalar = v_coarse.view().iter().sum();
1570     /// assert!(
1571     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1572     ///     "Incorrect interval length computed"
1573     /// );
1574     /// # Ok(())
1575     /// # }
1576     /// ```
1577     pub fn create_multigrid_level_H1(
1578         &self,
1579         p_mult_fine: &Vector,
1580         rstr_coarse: &ElemRestriction,
1581         basis_coarse: &Basis,
1582         interpCtoF: &[Scalar],
1583     ) -> crate::Result<(Operator, Operator, Operator)> {
1584         let mut ptr_coarse = std::ptr::null_mut();
1585         let mut ptr_prolong = std::ptr::null_mut();
1586         let mut ptr_restrict = std::ptr::null_mut();
1587         let ierr = unsafe {
1588             bind_ceed::CeedOperatorMultigridLevelCreateH1(
1589                 self.op_core.ptr,
1590                 p_mult_fine.ptr,
1591                 rstr_coarse.ptr,
1592                 basis_coarse.ptr,
1593                 interpCtoF.as_ptr(),
1594                 &mut ptr_coarse,
1595                 &mut ptr_prolong,
1596                 &mut ptr_restrict,
1597             )
1598         };
1599         self.op_core.check_error(ierr)?;
1600         let op_coarse = Operator::from_raw(ptr_coarse)?;
1601         let op_prolong = Operator::from_raw(ptr_prolong)?;
1602         let op_restrict = Operator::from_raw(ptr_restrict)?;
1603         Ok((op_coarse, op_prolong, op_restrict))
1604     }
1605 }
1606 
1607 // -----------------------------------------------------------------------------
1608 // Composite Operator
1609 // -----------------------------------------------------------------------------
1610 impl<'a> CompositeOperator<'a> {
1611     // Constructor
1612     pub fn create(ceed: &'a crate::Ceed) -> crate::Result<Self> {
1613         let mut ptr = std::ptr::null_mut();
1614         let ierr = unsafe { bind_ceed::CeedCompositeOperatorCreate(ceed.ptr, &mut ptr) };
1615         ceed.check_error(ierr)?;
1616         Ok(Self {
1617             op_core: OperatorCore {
1618                 ptr,
1619                 _lifeline: PhantomData,
1620             },
1621         })
1622     }
1623 
1624     /// Apply Operator to a vector
1625     ///
1626     /// * `input`  - Input Vector
1627     /// * `output` - Output Vector
1628     ///
1629     /// ```
1630     /// # use libceed::prelude::*;
1631     /// # fn main() -> Result<(), libceed::CeedError> {
1632     /// # let ceed = libceed::Ceed::default_init();
1633     /// let ne = 4;
1634     /// let p = 3;
1635     /// let q = 4;
1636     /// let ndofs = p * ne - ne + 1;
1637     ///
1638     /// // Vectors
1639     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
1640     /// let mut qdata_mass = ceed.vector(ne * q)?;
1641     /// qdata_mass.set_value(0.0);
1642     /// let mut qdata_diff = ceed.vector(ne * q)?;
1643     /// qdata_diff.set_value(0.0);
1644     /// let mut u = ceed.vector(ndofs)?;
1645     /// u.set_value(1.0);
1646     /// let mut v = ceed.vector(ndofs)?;
1647     /// v.set_value(0.0);
1648     ///
1649     /// // Restrictions
1650     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1651     /// for i in 0..ne {
1652     ///     indx[2 * i + 0] = i as i32;
1653     ///     indx[2 * i + 1] = (i + 1) as i32;
1654     /// }
1655     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1656     /// let mut indu: Vec<i32> = vec![0; p * ne];
1657     /// for i in 0..ne {
1658     ///     indu[p * i + 0] = i as i32;
1659     ///     indu[p * i + 1] = (i + 1) as i32;
1660     ///     indu[p * i + 2] = (i + 2) as i32;
1661     /// }
1662     /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?;
1663     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1664     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1665     ///
1666     /// // Bases
1667     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1668     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
1669     ///
1670     /// // Build quadrature data
1671     /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?;
1672     /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)?
1673     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1674     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1675     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1676     ///     .apply(&x, &mut qdata_mass)?;
1677     ///
1678     /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?;
1679     /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)?
1680     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1681     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1682     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1683     ///     .apply(&x, &mut qdata_diff)?;
1684     ///
1685     /// // Application operator
1686     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1687     /// let op_mass = ceed
1688     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1689     ///     .field("u", &ru, &bu, VectorOpt::Active)?
1690     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)?
1691     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
1692     ///
1693     /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?;
1694     /// let op_diff = ceed
1695     ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?
1696     ///     .field("du", &ru, &bu, VectorOpt::Active)?
1697     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)?
1698     ///     .field("dv", &ru, &bu, VectorOpt::Active)?;
1699     ///
1700     /// let op_composite = ceed
1701     ///     .composite_operator()?
1702     ///     .sub_operator(&op_mass)?
1703     ///     .sub_operator(&op_diff)?;
1704     ///
1705     /// v.set_value(0.0);
1706     /// op_composite.apply(&u, &mut v)?;
1707     ///
1708     /// // Check
1709     /// let sum: Scalar = v.view().iter().sum();
1710     /// assert!(
1711     ///     (sum - 2.0).abs() < 10.0 * libceed::EPSILON,
1712     ///     "Incorrect interval length computed"
1713     /// );
1714     /// # Ok(())
1715     /// # }
1716     /// ```
1717     pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
1718         self.op_core.apply(input, output)
1719     }
1720 
1721     /// Apply Operator to a vector and add result to output vector
1722     ///
1723     /// * `input`  - Input Vector
1724     /// * `output` - Output Vector
1725     ///
1726     /// ```
1727     /// # use libceed::prelude::*;
1728     /// # fn main() -> Result<(), libceed::CeedError> {
1729     /// # let ceed = libceed::Ceed::default_init();
1730     /// let ne = 4;
1731     /// let p = 3;
1732     /// let q = 4;
1733     /// let ndofs = p * ne - ne + 1;
1734     ///
1735     /// // Vectors
1736     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0])?;
1737     /// let mut qdata_mass = ceed.vector(ne * q)?;
1738     /// qdata_mass.set_value(0.0);
1739     /// let mut qdata_diff = ceed.vector(ne * q)?;
1740     /// qdata_diff.set_value(0.0);
1741     /// let mut u = ceed.vector(ndofs)?;
1742     /// u.set_value(1.0);
1743     /// let mut v = ceed.vector(ndofs)?;
1744     /// v.set_value(0.0);
1745     ///
1746     /// // Restrictions
1747     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1748     /// for i in 0..ne {
1749     ///     indx[2 * i + 0] = i as i32;
1750     ///     indx[2 * i + 1] = (i + 1) as i32;
1751     /// }
1752     /// let rx = ceed.elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)?;
1753     /// let mut indu: Vec<i32> = vec![0; p * ne];
1754     /// for i in 0..ne {
1755     ///     indu[p * i + 0] = i as i32;
1756     ///     indu[p * i + 1] = (i + 1) as i32;
1757     ///     indu[p * i + 2] = (i + 2) as i32;
1758     /// }
1759     /// let ru = ceed.elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)?;
1760     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1761     /// let rq = ceed.strided_elem_restriction(ne, q, 1, q * ne, strides)?;
1762     ///
1763     /// // Bases
1764     /// let bx = ceed.basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)?;
1765     /// let bu = ceed.basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)?;
1766     ///
1767     /// // Build quadrature data
1768     /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild")?;
1769     /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)?
1770     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1771     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1772     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1773     ///     .apply(&x, &mut qdata_mass)?;
1774     ///
1775     /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild")?;
1776     /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)?
1777     ///     .field("dx", &rx, &bx, VectorOpt::Active)?
1778     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)?
1779     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)?
1780     ///     .apply(&x, &mut qdata_diff)?;
1781     ///
1782     /// // Application operator
1783     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1784     /// let op_mass = ceed
1785     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?
1786     ///     .field("u", &ru, &bu, VectorOpt::Active)?
1787     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)?
1788     ///     .field("v", &ru, &bu, VectorOpt::Active)?;
1789     ///
1790     /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?;
1791     /// let op_diff = ceed
1792     ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?
1793     ///     .field("du", &ru, &bu, VectorOpt::Active)?
1794     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)?
1795     ///     .field("dv", &ru, &bu, VectorOpt::Active)?;
1796     ///
1797     /// let op_composite = ceed
1798     ///     .composite_operator()?
1799     ///     .sub_operator(&op_mass)?
1800     ///     .sub_operator(&op_diff)?;
1801     ///
1802     /// v.set_value(1.0);
1803     /// op_composite.apply_add(&u, &mut v)?;
1804     ///
1805     /// // Check
1806     /// let sum: Scalar = v.view().iter().sum();
1807     /// assert!(
1808     ///     (sum - (2.0 + ndofs as Scalar)).abs() < 10.0 * libceed::EPSILON,
1809     ///     "Incorrect interval length computed"
1810     /// );
1811     /// # Ok(())
1812     /// # }
1813     /// ```
1814     pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
1815         self.op_core.apply_add(input, output)
1816     }
1817 
1818     /// Add a sub-Operator to a Composite Operator
1819     ///
1820     /// * `subop` - Sub-Operator
1821     ///
1822     /// ```
1823     /// # use libceed::prelude::*;
1824     /// # fn main() -> Result<(), libceed::CeedError> {
1825     /// # let ceed = libceed::Ceed::default_init();
1826     /// let mut op = ceed.composite_operator()?;
1827     ///
1828     /// let qf_mass = ceed.q_function_interior_by_name("MassApply")?;
1829     /// let op_mass = ceed.operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)?;
1830     /// op = op.sub_operator(&op_mass)?;
1831     ///
1832     /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply")?;
1833     /// let op_diff = ceed.operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)?;
1834     /// op = op.sub_operator(&op_diff)?;
1835     /// # Ok(())
1836     /// # }
1837     /// ```
1838     #[allow(unused_mut)]
1839     pub fn sub_operator(mut self, subop: &Operator) -> crate::Result<Self> {
1840         let ierr =
1841             unsafe { bind_ceed::CeedCompositeOperatorAddSub(self.op_core.ptr, subop.op_core.ptr) };
1842         self.op_core.check_error(ierr)?;
1843         Ok(self)
1844     }
1845 
1846     /// Assemble the diagonal of a square linear Operator
1847     ///
1848     /// This overwrites a Vector with the diagonal of a linear Operator.
1849     ///
1850     /// Note: Currently only non-composite Operators with a single field and
1851     ///      composite Operators with single field sub-operators are supported.
1852     ///
1853     /// * `op`        - Operator to assemble QFunction
1854     /// * `assembled` - Vector to store assembled Operator diagonal
1855     pub fn linear_asssemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
1856         self.op_core.linear_assemble_add_diagonal(assembled)
1857     }
1858 
1859     /// Assemble the point block diagonal of a square linear Operator
1860     ///
1861     /// This overwrites a Vector with the point block diagonal of a linear
1862     ///   Operator.
1863     ///
1864     /// Note: Currently only non-composite Operators with a single field and
1865     ///      composite Operators with single field sub-operators are supported.
1866     ///
1867     /// * `op`        - Operator to assemble QFunction
1868     /// * `assembled` - Vector to store assembled Operator diagonal
1869     pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
1870         self.op_core.linear_assemble_add_diagonal(assembled)
1871     }
1872 
1873     /// Assemble the diagonal of a square linear Operator
1874     ///
1875     /// This overwrites a Vector with the diagonal of a linear Operator.
1876     ///
1877     /// Note: Currently only non-composite Operators with a single field and
1878     ///      composite Operators with single field sub-operators are supported.
1879     ///
1880     /// * `op`        - Operator to assemble QFunction
1881     /// * `assembled` - Vector to store assembled CeedOperator point block
1882     ///                   diagonal, provided in row-major form with an
1883     ///                   `ncomp * ncomp` block at each node. The dimensions of
1884     ///                   this vector are derived from the active vector for
1885     ///                   the CeedOperator. The array has shape
1886     ///                   `[nodes, component out, component in]`.
1887     pub fn linear_assemble_point_block_diagonal(
1888         &self,
1889         assembled: &mut Vector,
1890     ) -> crate::Result<i32> {
1891         self.op_core.linear_assemble_add_diagonal(assembled)
1892     }
1893 
1894     /// Assemble the diagonal of a square linear Operator
1895     ///
1896     /// This sums into a Vector with the diagonal of a linear Operator.
1897     ///
1898     /// Note: Currently only non-composite Operators with a single field and
1899     ///      composite Operators with single field sub-operators are supported.
1900     ///
1901     /// * `op`        - Operator to assemble QFunction
1902     /// * `assembled` - Vector to store assembled CeedOperator point block
1903     ///                   diagonal, provided in row-major form with an
1904     ///                   `ncomp * ncomp` block at each node. The dimensions of
1905     ///                   this vector are derived from the active vector for
1906     ///                   the CeedOperator. The array has shape
1907     ///                   `[nodes, component out, component in]`.
1908     pub fn linear_assemble_add_point_block_diagonal(
1909         &self,
1910         assembled: &mut Vector,
1911     ) -> crate::Result<i32> {
1912         self.op_core.linear_assemble_add_diagonal(assembled)
1913     }
1914 }
1915 
1916 // -----------------------------------------------------------------------------
1917