xref: /libCEED/rust/libceed/src/operator.rs (revision acf255f533f9b196990ab212a71b6493b7e4ca3c)
1 // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
3 // reserved. See files LICENSE and NOTICE for details.
4 //
5 // This file is part of CEED, a collection of benchmarks, miniapps, software
6 // libraries and APIs for efficient high-order finite element and spectral
7 // element discretizations for exascale applications. For more information and
8 // source code availability see http://github.com/ceed.
9 //
10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
11 // a collaborative effort of two U.S. Department of Energy organizations (Office
12 // of Science and the National Nuclear Security Administration) responsible for
13 // the planning and preparation of a capable exascale ecosystem, including
14 // software, applications, hardware, advanced system engineering and early
15 // testbed platforms, in support of the nation's exascale computing imperative
16 
17 //! A Ceed Operator defines the finite/spectral element operator associated to a
18 //! Ceed QFunction. A Ceed Operator connects Ceed ElemRestrictions,
19 //! Ceed Bases, and Ceed QFunctions.
20 
21 use crate::prelude::*;
22 
23 // -----------------------------------------------------------------------------
24 // CeedOperator context wrapper
25 // -----------------------------------------------------------------------------
26 pub(crate) struct OperatorCore<'a> {
27     ceed: &'a crate::Ceed,
28     ptr: bind_ceed::CeedOperator,
29 }
30 
31 pub struct Operator<'a> {
32     op_core: OperatorCore<'a>,
33 }
34 
35 pub struct CompositeOperator<'a> {
36     op_core: OperatorCore<'a>,
37 }
38 
39 // -----------------------------------------------------------------------------
40 // Destructor
41 // -----------------------------------------------------------------------------
42 impl<'a> Drop for OperatorCore<'a> {
43     fn drop(&mut self) {
44         unsafe {
45             bind_ceed::CeedOperatorDestroy(&mut self.ptr);
46         }
47     }
48 }
49 
50 // -----------------------------------------------------------------------------
51 // Display
52 // -----------------------------------------------------------------------------
53 impl<'a> fmt::Display for OperatorCore<'a> {
54     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
55         let mut ptr = std::ptr::null_mut();
56         let mut sizeloc = crate::MAX_BUFFER_LENGTH;
57         let cstring = unsafe {
58             let file = bind_ceed::open_memstream(&mut ptr, &mut sizeloc);
59             bind_ceed::CeedOperatorView(self.ptr, file);
60             bind_ceed::fclose(file);
61             CString::from_raw(ptr)
62         };
63         cstring.to_string_lossy().fmt(f)
64     }
65 }
66 
67 /// View an Operator
68 ///
69 /// ```
70 /// # use libceed::prelude::*;
71 /// # let ceed = libceed::Ceed::default_init();
72 /// let qf = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
73 ///
74 /// // Operator field arguments
75 /// let ne = 3;
76 /// let q = 4 as usize;
77 /// let mut ind: Vec<i32> = vec![0; 2 * ne];
78 /// for i in 0..ne {
79 ///     ind[2 * i + 0] = i as i32;
80 ///     ind[2 * i + 1] = (i + 1) as i32;
81 /// }
82 /// let r = ceed
83 ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)
84 ///     .unwrap();
85 /// let strides: [i32; 3] = [1, q as i32, q as i32];
86 /// let rq = ceed
87 ///     .strided_elem_restriction(ne, 2, 1, q * ne, strides)
88 ///     .unwrap();
89 ///
90 /// let b = ceed
91 ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
92 ///     .unwrap();
93 ///
94 /// // Operator fields
95 /// let op = ceed
96 ///     .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)
97 ///     .unwrap()
98 ///     .field("dx", &r, &b, VectorOpt::Active)
99 ///     .unwrap()
100 ///     .field("weights", ElemRestrictionOpt::None, &b, VectorOpt::None)
101 ///     .unwrap()
102 ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
103 ///     .unwrap();
104 ///
105 /// println!("{}", op);
106 /// ```
107 impl<'a> fmt::Display for Operator<'a> {
108     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
109         self.op_core.fmt(f)
110     }
111 }
112 
113 /// View a composite Operator
114 ///
115 /// ```
116 /// # use libceed::prelude::*;
117 /// # let ceed = libceed::Ceed::default_init();
118 ///
119 /// // Sub operator field arguments
120 /// let ne = 3;
121 /// let q = 4 as usize;
122 /// let mut ind: Vec<i32> = vec![0; 2 * ne];
123 /// for i in 0..ne {
124 ///     ind[2 * i + 0] = i as i32;
125 ///     ind[2 * i + 1] = (i + 1) as i32;
126 /// }
127 /// let r = ceed
128 ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)
129 ///     .unwrap();
130 /// let strides: [i32; 3] = [1, q as i32, q as i32];
131 /// let rq = ceed
132 ///     .strided_elem_restriction(ne, 2, 1, q * ne, strides)
133 ///     .unwrap();
134 ///
135 /// let b = ceed
136 ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
137 ///     .unwrap();
138 ///
139 /// let qdata_mass = ceed.vector(q * ne).unwrap();
140 /// let qdata_diff = ceed.vector(q * ne).unwrap();
141 ///
142 /// let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap();
143 /// let op_mass = ceed
144 ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
145 ///     .unwrap()
146 ///     .field("u", &r, &b, VectorOpt::Active)
147 ///     .unwrap()
148 ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)
149 ///     .unwrap()
150 ///     .field("v", &r, &b, VectorOpt::Active)
151 ///     .unwrap();
152 ///
153 /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply").unwrap();
154 /// let op_diff = ceed
155 ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)
156 ///     .unwrap()
157 ///     .field("du", &r, &b, VectorOpt::Active)
158 ///     .unwrap()
159 ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)
160 ///     .unwrap()
161 ///     .field("dv", &r, &b, VectorOpt::Active)
162 ///     .unwrap();
163 ///
164 /// let op = ceed
165 ///     .composite_operator()
166 ///     .unwrap()
167 ///     .sub_operator(&op_mass)
168 ///     .unwrap()
169 ///     .sub_operator(&op_diff)
170 ///     .unwrap();
171 ///
172 /// println!("{}", op);
173 /// ```
174 impl<'a> fmt::Display for CompositeOperator<'a> {
175     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
176         self.op_core.fmt(f)
177     }
178 }
179 
180 // -----------------------------------------------------------------------------
181 // Core functionality
182 // -----------------------------------------------------------------------------
183 impl<'a> OperatorCore<'a> {
184     // Common implementations
185     pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
186         let ierr = unsafe {
187             bind_ceed::CeedOperatorApply(
188                 self.ptr,
189                 input.ptr,
190                 output.ptr,
191                 bind_ceed::CEED_REQUEST_IMMEDIATE,
192             )
193         };
194         self.ceed.check_error(ierr)
195     }
196 
197     pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
198         let ierr = unsafe {
199             bind_ceed::CeedOperatorApplyAdd(
200                 self.ptr,
201                 input.ptr,
202                 output.ptr,
203                 bind_ceed::CEED_REQUEST_IMMEDIATE,
204             )
205         };
206         self.ceed.check_error(ierr)
207     }
208 
209     pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
210         let ierr = unsafe {
211             bind_ceed::CeedOperatorLinearAssembleDiagonal(
212                 self.ptr,
213                 assembled.ptr,
214                 bind_ceed::CEED_REQUEST_IMMEDIATE,
215             )
216         };
217         self.ceed.check_error(ierr)
218     }
219 
220     pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
221         let ierr = unsafe {
222             bind_ceed::CeedOperatorLinearAssembleAddDiagonal(
223                 self.ptr,
224                 assembled.ptr,
225                 bind_ceed::CEED_REQUEST_IMMEDIATE,
226             )
227         };
228         self.ceed.check_error(ierr)
229     }
230 
231     pub fn linear_assemble_point_block_diagonal(
232         &self,
233         assembled: &mut Vector,
234     ) -> crate::Result<i32> {
235         let ierr = unsafe {
236             bind_ceed::CeedOperatorLinearAssemblePointBlockDiagonal(
237                 self.ptr,
238                 assembled.ptr,
239                 bind_ceed::CEED_REQUEST_IMMEDIATE,
240             )
241         };
242         self.ceed.check_error(ierr)
243     }
244 
245     pub fn linear_assemble_add_point_block_diagonal(
246         &self,
247         assembled: &mut Vector,
248     ) -> crate::Result<i32> {
249         let ierr = unsafe {
250             bind_ceed::CeedOperatorLinearAssembleAddPointBlockDiagonal(
251                 self.ptr,
252                 assembled.ptr,
253                 bind_ceed::CEED_REQUEST_IMMEDIATE,
254             )
255         };
256         self.ceed.check_error(ierr)
257     }
258 }
259 
260 // -----------------------------------------------------------------------------
261 // Operator
262 // -----------------------------------------------------------------------------
263 impl<'a> Operator<'a> {
264     // Constructor
265     pub fn create<'b>(
266         ceed: &'a crate::Ceed,
267         qf: impl Into<QFunctionOpt<'b>>,
268         dqf: impl Into<QFunctionOpt<'b>>,
269         dqfT: impl Into<QFunctionOpt<'b>>,
270     ) -> crate::Result<Self> {
271         let mut ptr = std::ptr::null_mut();
272         let ierr = unsafe {
273             bind_ceed::CeedOperatorCreate(
274                 ceed.ptr,
275                 qf.into().to_raw(),
276                 dqf.into().to_raw(),
277                 dqfT.into().to_raw(),
278                 &mut ptr,
279             )
280         };
281         ceed.check_error(ierr)?;
282         Ok(Self {
283             op_core: OperatorCore { ceed, ptr },
284         })
285     }
286 
287     fn from_raw(ceed: &'a crate::Ceed, ptr: bind_ceed::CeedOperator) -> crate::Result<Self> {
288         Ok(Self {
289             op_core: OperatorCore { ceed, ptr },
290         })
291     }
292 
293     /// Apply Operator to a vector
294     ///
295     /// * `input`  - Input Vector
296     /// * `output` - Output Vector
297     ///
298     /// ```
299     /// # use libceed::prelude::*;
300     /// # let ceed = libceed::Ceed::default_init();
301     /// let ne = 4;
302     /// let p = 3;
303     /// let q = 4;
304     /// let ndofs = p * ne - ne + 1;
305     ///
306     /// // Vectors
307     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap();
308     /// let mut qdata = ceed.vector(ne * q).unwrap();
309     /// qdata.set_value(0.0);
310     /// let u = ceed.vector_from_slice(&vec![1.0; ndofs]).unwrap();
311     /// let mut v = ceed.vector(ndofs).unwrap();
312     /// v.set_value(0.0);
313     ///
314     /// // Restrictions
315     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
316     /// for i in 0..ne {
317     ///     indx[2 * i + 0] = i as i32;
318     ///     indx[2 * i + 1] = (i + 1) as i32;
319     /// }
320     /// let rx = ceed
321     ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)
322     ///     .unwrap();
323     /// let mut indu: Vec<i32> = vec![0; p * ne];
324     /// for i in 0..ne {
325     ///     indu[p * i + 0] = i as i32;
326     ///     indu[p * i + 1] = (i + 1) as i32;
327     ///     indu[p * i + 2] = (i + 2) as i32;
328     /// }
329     /// let ru = ceed
330     ///     .elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)
331     ///     .unwrap();
332     /// let strides: [i32; 3] = [1, q as i32, q as i32];
333     /// let rq = ceed
334     ///     .strided_elem_restriction(ne, q, 1, q * ne, strides)
335     ///     .unwrap();
336     ///
337     /// // Bases
338     /// let bx = ceed
339     ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
340     ///     .unwrap();
341     /// let bu = ceed
342     ///     .basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)
343     ///     .unwrap();
344     ///
345     /// // Build quadrature data
346     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
347     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)
348     ///     .unwrap()
349     ///     .field("dx", &rx, &bx, VectorOpt::Active)
350     ///     .unwrap()
351     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
352     ///     .unwrap()
353     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
354     ///     .unwrap()
355     ///     .apply(&x, &mut qdata)
356     ///     .unwrap();
357     ///
358     /// // Mass operator
359     /// let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap();
360     /// let op_mass = ceed
361     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
362     ///     .unwrap()
363     ///     .field("u", &ru, &bu, VectorOpt::Active)
364     ///     .unwrap()
365     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)
366     ///     .unwrap()
367     ///     .field("v", &ru, &bu, VectorOpt::Active)
368     ///     .unwrap();
369     ///
370     /// v.set_value(0.0);
371     /// op_mass.apply(&u, &mut v).unwrap();
372     ///
373     /// // Check
374     /// let sum: f64 = v.view().iter().sum();
375     /// assert!(
376     ///     (sum - 2.0).abs() < 1e-15,
377     ///     "Incorrect interval length computed"
378     /// );
379     /// ```
380     pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
381         self.op_core.apply(input, output)
382     }
383 
384     /// Apply Operator to a vector and add result to output vector
385     ///
386     /// * `input`  - Input Vector
387     /// * `output` - Output Vector
388     ///
389     /// ```
390     /// # use libceed::prelude::*;
391     /// # let ceed = libceed::Ceed::default_init();
392     /// let ne = 4;
393     /// let p = 3;
394     /// let q = 4;
395     /// let ndofs = p * ne - ne + 1;
396     ///
397     /// // Vectors
398     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap();
399     /// let mut qdata = ceed.vector(ne * q).unwrap();
400     /// qdata.set_value(0.0);
401     /// let u = ceed.vector_from_slice(&vec![1.0; ndofs]).unwrap();
402     /// let mut v = ceed.vector(ndofs).unwrap();
403     ///
404     /// // Restrictions
405     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
406     /// for i in 0..ne {
407     ///     indx[2 * i + 0] = i as i32;
408     ///     indx[2 * i + 1] = (i + 1) as i32;
409     /// }
410     /// let rx = ceed
411     ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)
412     ///     .unwrap();
413     /// let mut indu: Vec<i32> = vec![0; p * ne];
414     /// for i in 0..ne {
415     ///     indu[p * i + 0] = i as i32;
416     ///     indu[p * i + 1] = (i + 1) as i32;
417     ///     indu[p * i + 2] = (i + 2) as i32;
418     /// }
419     /// let ru = ceed
420     ///     .elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)
421     ///     .unwrap();
422     /// let strides: [i32; 3] = [1, q as i32, q as i32];
423     /// let rq = ceed
424     ///     .strided_elem_restriction(ne, q, 1, q * ne, strides)
425     ///     .unwrap();
426     ///
427     /// // Bases
428     /// let bx = ceed
429     ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
430     ///     .unwrap();
431     /// let bu = ceed
432     ///     .basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)
433     ///     .unwrap();
434     ///
435     /// // Build quadrature data
436     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
437     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)
438     ///     .unwrap()
439     ///     .field("dx", &rx, &bx, VectorOpt::Active)
440     ///     .unwrap()
441     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
442     ///     .unwrap()
443     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
444     ///     .unwrap()
445     ///     .apply(&x, &mut qdata)
446     ///     .unwrap();
447     ///
448     /// // Mass operator
449     /// let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap();
450     /// let op_mass = ceed
451     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
452     ///     .unwrap()
453     ///     .field("u", &ru, &bu, VectorOpt::Active)
454     ///     .unwrap()
455     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)
456     ///     .unwrap()
457     ///     .field("v", &ru, &bu, VectorOpt::Active)
458     ///     .unwrap();
459     ///
460     /// v.set_value(1.0);
461     /// op_mass.apply_add(&u, &mut v).unwrap();
462     ///
463     /// // Check
464     /// let sum: f64 = v.view().iter().sum();
465     /// assert!(
466     ///     (sum - (2.0 + ndofs as f64)).abs() < 1e-15,
467     ///     "Incorrect interval length computed and added"
468     /// );
469     /// ```
470     pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
471         self.op_core.apply_add(input, output)
472     }
473 
474     /// Provide a field to a Operator for use by its QFunction
475     ///
476     /// * `fieldname` - Name of the field (to be matched with the name used by
477     ///                   the QFunction)
478     /// * `r`         - ElemRestriction
479     /// * `b`         - Basis in which the field resides or `BasisCollocated` if
480     ///                   collocated with quadrature points
481     /// * `v`         - Vector to be used by Operator or `VectorActive` if field
482     ///                   is active or `VectorNone` if using `Weight` with the
483     ///                   QFunction
484     ///
485     ///
486     /// ```
487     /// # use libceed::prelude::*;
488     /// # let ceed = libceed::Ceed::default_init();
489     /// let qf = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
490     /// let mut op = ceed
491     ///     .operator(&qf, QFunctionOpt::None, QFunctionOpt::None)
492     ///     .unwrap();
493     ///
494     /// // Operator field arguments
495     /// let ne = 3;
496     /// let q = 4;
497     /// let mut ind: Vec<i32> = vec![0; 2 * ne];
498     /// for i in 0..ne {
499     ///     ind[2 * i + 0] = i as i32;
500     ///     ind[2 * i + 1] = (i + 1) as i32;
501     /// }
502     /// let r = ceed
503     ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &ind)
504     ///     .unwrap();
505     ///
506     /// let b = ceed
507     ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
508     ///     .unwrap();
509     ///
510     /// // Operator field
511     /// op = op.field("dx", &r, &b, VectorOpt::Active).unwrap();
512     /// ```
513     #[allow(unused_mut)]
514     pub fn field<'b>(
515         mut self,
516         fieldname: &str,
517         r: impl Into<ElemRestrictionOpt<'b>>,
518         b: impl Into<BasisOpt<'b>>,
519         v: impl Into<VectorOpt<'b>>,
520     ) -> crate::Result<Self> {
521         let fieldname = CString::new(fieldname).expect("CString::new failed");
522         let fieldname = fieldname.as_ptr() as *const i8;
523         let ierr = unsafe {
524             bind_ceed::CeedOperatorSetField(
525                 self.op_core.ptr,
526                 fieldname,
527                 r.into().to_raw(),
528                 b.into().to_raw(),
529                 v.into().to_raw(),
530             )
531         };
532         self.op_core.ceed.check_error(ierr)?;
533         Ok(self)
534     }
535 
536     /// Assemble the diagonal of a square linear Operator
537     ///
538     /// This overwrites a Vector with the diagonal of a linear Operator.
539     ///
540     /// Note: Currently only non-composite Operators with a single field and
541     ///      composite Operators with single field sub-operators are supported.
542     ///
543     /// * `op`        - Operator to assemble QFunction
544     /// * `assembled` - Vector to store assembled Operator diagonal
545     ///
546     /// ```
547     /// # use libceed::prelude::*;
548     /// # let ceed = libceed::Ceed::default_init();
549     /// let ne = 4;
550     /// let p = 3;
551     /// let q = 4;
552     /// let ndofs = p * ne - ne + 1;
553     ///
554     /// // Vectors
555     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap();
556     /// let mut qdata = ceed.vector(ne * q).unwrap();
557     /// qdata.set_value(0.0);
558     /// let mut u = ceed.vector(ndofs).unwrap();
559     /// u.set_value(1.0);
560     /// let mut v = ceed.vector(ndofs).unwrap();
561     /// v.set_value(0.0);
562     ///
563     /// // Restrictions
564     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
565     /// for i in 0..ne {
566     ///     indx[2 * i + 0] = i as i32;
567     ///     indx[2 * i + 1] = (i + 1) as i32;
568     /// }
569     /// let rx = ceed
570     ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)
571     ///     .unwrap();
572     /// let mut indu: Vec<i32> = vec![0; p * ne];
573     /// for i in 0..ne {
574     ///     indu[p * i + 0] = (2 * i) as i32;
575     ///     indu[p * i + 1] = (2 * i + 1) as i32;
576     ///     indu[p * i + 2] = (2 * i + 2) as i32;
577     /// }
578     /// let ru = ceed
579     ///     .elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)
580     ///     .unwrap();
581     /// let strides: [i32; 3] = [1, q as i32, q as i32];
582     /// let rq = ceed
583     ///     .strided_elem_restriction(ne, q, 1, q * ne, strides)
584     ///     .unwrap();
585     ///
586     /// // Bases
587     /// let bx = ceed
588     ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
589     ///     .unwrap();
590     /// let bu = ceed
591     ///     .basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)
592     ///     .unwrap();
593     ///
594     /// // Build quadrature data
595     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
596     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)
597     ///     .unwrap()
598     ///     .field("dx", &rx, &bx, VectorOpt::Active)
599     ///     .unwrap()
600     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
601     ///     .unwrap()
602     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
603     ///     .unwrap()
604     ///     .apply(&x, &mut qdata)
605     ///     .unwrap();
606     ///
607     /// // Mass operator
608     /// let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap();
609     /// let op_mass = ceed
610     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
611     ///     .unwrap()
612     ///     .field("u", &ru, &bu, VectorOpt::Active)
613     ///     .unwrap()
614     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)
615     ///     .unwrap()
616     ///     .field("v", &ru, &bu, VectorOpt::Active)
617     ///     .unwrap();
618     ///
619     /// // Diagonal
620     /// let mut diag = ceed.vector(ndofs).unwrap();
621     /// diag.set_value(0.0);
622     /// op_mass.linear_assemble_diagonal(&mut diag).unwrap();
623     ///
624     /// // Manual diagonal computation
625     /// let mut true_diag = ceed.vector(ndofs).unwrap();
626     /// for i in 0..ndofs {
627     ///     u.set_value(0.0);
628     ///     {
629     ///         let mut u_array = u.view_mut();
630     ///         u_array[i] = 1.;
631     ///     }
632     ///
633     ///     op_mass.apply(&u, &mut v).unwrap();
634     ///
635     ///     {
636     ///         let v_array = v.view_mut();
637     ///         let mut true_array = true_diag.view_mut();
638     ///         true_array[i] = v_array[i];
639     ///     }
640     /// }
641     ///
642     /// // Check
643     /// diag.view()
644     ///     .iter()
645     ///     .zip(true_diag.view().iter())
646     ///     .for_each(|(computed, actual)| {
647     ///         assert!(
648     ///             (*computed - *actual).abs() < 1e-15,
649     ///             "Diagonal entry incorrect"
650     ///         );
651     ///     });
652     /// ```
653     pub fn linear_assemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
654         self.op_core.linear_assemble_diagonal(assembled)
655     }
656 
657     /// Assemble the diagonal of a square linear Operator
658     ///
659     /// This sums into a Vector with the diagonal of a linear Operator.
660     ///
661     /// Note: Currently only non-composite Operators with a single field and
662     ///      composite Operators with single field sub-operators are supported.
663     ///
664     /// * `op`        - Operator to assemble QFunction
665     /// * `assembled` - Vector to store assembled Operator diagonal
666     ///
667     ///
668     /// ```
669     /// # use libceed::prelude::*;
670     /// # let ceed = libceed::Ceed::default_init();
671     /// let ne = 4;
672     /// let p = 3;
673     /// let q = 4;
674     /// let ndofs = p * ne - ne + 1;
675     ///
676     /// // Vectors
677     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap();
678     /// let mut qdata = ceed.vector(ne * q).unwrap();
679     /// qdata.set_value(0.0);
680     /// let mut u = ceed.vector(ndofs).unwrap();
681     /// u.set_value(1.0);
682     /// let mut v = ceed.vector(ndofs).unwrap();
683     /// v.set_value(0.0);
684     ///
685     /// // Restrictions
686     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
687     /// for i in 0..ne {
688     ///     indx[2 * i + 0] = i as i32;
689     ///     indx[2 * i + 1] = (i + 1) as i32;
690     /// }
691     /// let rx = ceed
692     ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)
693     ///     .unwrap();
694     /// let mut indu: Vec<i32> = vec![0; p * ne];
695     /// for i in 0..ne {
696     ///     indu[p * i + 0] = (2 * i) as i32;
697     ///     indu[p * i + 1] = (2 * i + 1) as i32;
698     ///     indu[p * i + 2] = (2 * i + 2) as i32;
699     /// }
700     /// let ru = ceed
701     ///     .elem_restriction(ne, p, 1, 1, ndofs, MemType::Host, &indu)
702     ///     .unwrap();
703     /// let strides: [i32; 3] = [1, q as i32, q as i32];
704     /// let rq = ceed
705     ///     .strided_elem_restriction(ne, q, 1, q * ne, strides)
706     ///     .unwrap();
707     ///
708     /// // Bases
709     /// let bx = ceed
710     ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
711     ///     .unwrap();
712     /// let bu = ceed
713     ///     .basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)
714     ///     .unwrap();
715     ///
716     /// // Build quadrature data
717     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
718     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)
719     ///     .unwrap()
720     ///     .field("dx", &rx, &bx, VectorOpt::Active)
721     ///     .unwrap()
722     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
723     ///     .unwrap()
724     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
725     ///     .unwrap()
726     ///     .apply(&x, &mut qdata)
727     ///     .unwrap();
728     ///
729     /// // Mass operator
730     /// let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap();
731     /// let op_mass = ceed
732     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
733     ///     .unwrap()
734     ///     .field("u", &ru, &bu, VectorOpt::Active)
735     ///     .unwrap()
736     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)
737     ///     .unwrap()
738     ///     .field("v", &ru, &bu, VectorOpt::Active)
739     ///     .unwrap();
740     ///
741     /// // Diagonal
742     /// let mut diag = ceed.vector(ndofs).unwrap();
743     /// diag.set_value(1.0);
744     /// op_mass.linear_assemble_add_diagonal(&mut diag).unwrap();
745     ///
746     /// // Manual diagonal computation
747     /// let mut true_diag = ceed.vector(ndofs).unwrap();
748     /// for i in 0..ndofs {
749     ///     u.set_value(0.0);
750     ///     {
751     ///         let mut u_array = u.view_mut();
752     ///         u_array[i] = 1.;
753     ///     }
754     ///
755     ///     op_mass.apply(&u, &mut v).unwrap();
756     ///
757     ///     {
758     ///         let v_array = v.view_mut();
759     ///         let mut true_array = true_diag.view_mut();
760     ///         true_array[i] = v_array[i] + 1.0;
761     ///     }
762     /// }
763     ///
764     /// // Check
765     /// diag.view()
766     ///     .iter()
767     ///     .zip(true_diag.view().iter())
768     ///     .for_each(|(computed, actual)| {
769     ///         assert!(
770     ///             (*computed - *actual).abs() < 1e-15,
771     ///             "Diagonal entry incorrect"
772     ///         );
773     ///     });
774     /// ```
775     pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
776         self.op_core.linear_assemble_add_diagonal(assembled)
777     }
778 
779     /// Assemble the point block diagonal of a square linear Operator
780     ///
781     /// This overwrites a Vector with the point block diagonal of a linear
782     /// Operator.
783     ///
784     /// Note: Currently only non-composite Operators with a single field and
785     ///      composite Operators with single field sub-operators are supported.
786     ///
787     /// * `op`        - Operator to assemble QFunction
788     /// * `assembled` - Vector to store assembled CeedOperator point block
789     ///                   diagonal, provided in row-major form with an
790     ///                   `ncomp * ncomp` block at each node. The dimensions of
791     ///                   this vector are derived from the active vector for
792     ///                   the CeedOperator. The array has shape
793     ///                   `[nodes, component out, component in]`.
794     ///
795     /// ```
796     /// # use libceed::prelude::*;
797     /// # let ceed = libceed::Ceed::default_init();
798     /// let ne = 4;
799     /// let p = 3;
800     /// let q = 4;
801     /// let ncomp = 2;
802     /// let ndofs = p * ne - ne + 1;
803     ///
804     /// // Vectors
805     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap();
806     /// let mut qdata = ceed.vector(ne * q).unwrap();
807     /// qdata.set_value(0.0);
808     /// let mut u = ceed.vector(ncomp * ndofs).unwrap();
809     /// u.set_value(1.0);
810     /// let mut v = ceed.vector(ncomp * ndofs).unwrap();
811     /// v.set_value(0.0);
812     ///
813     /// // Restrictions
814     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
815     /// for i in 0..ne {
816     ///     indx[2 * i + 0] = i as i32;
817     ///     indx[2 * i + 1] = (i + 1) as i32;
818     /// }
819     /// let rx = ceed
820     ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)
821     ///     .unwrap();
822     /// let mut indu: Vec<i32> = vec![0; p * ne];
823     /// for i in 0..ne {
824     ///     indu[p * i + 0] = (2 * i) as i32;
825     ///     indu[p * i + 1] = (2 * i + 1) as i32;
826     ///     indu[p * i + 2] = (2 * i + 2) as i32;
827     /// }
828     /// let ru = ceed
829     ///     .elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)
830     ///     .unwrap();
831     /// let strides: [i32; 3] = [1, q as i32, q as i32];
832     /// let rq = ceed
833     ///     .strided_elem_restriction(ne, q, 1, q * ne, strides)
834     ///     .unwrap();
835     ///
836     /// // Bases
837     /// let bx = ceed
838     ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
839     ///     .unwrap();
840     /// let bu = ceed
841     ///     .basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)
842     ///     .unwrap();
843     ///
844     /// // Build quadrature data
845     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
846     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)
847     ///     .unwrap()
848     ///     .field("dx", &rx, &bx, VectorOpt::Active)
849     ///     .unwrap()
850     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
851     ///     .unwrap()
852     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
853     ///     .unwrap()
854     ///     .apply(&x, &mut qdata)
855     ///     .unwrap();
856     ///
857     /// // Mass operator
858     /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| {
859     ///     // Number of quadrature points
860     ///     let q = qdata.len();
861     ///
862     ///     // Iterate over quadrature points
863     ///     for i in 0..q {
864     ///         v[i + 0 * q] = u[i + 1 * q] * qdata[i];
865     ///         v[i + 1 * q] = u[i + 0 * q] * qdata[i];
866     ///     }
867     ///
868     ///     // Return clean error code
869     ///     0
870     /// };
871     ///
872     /// let qf_mass = ceed
873     ///     .q_function_interior(1, Box::new(mass_2_comp))
874     ///     .unwrap()
875     ///     .input("u", 2, EvalMode::Interp)
876     ///     .unwrap()
877     ///     .input("qdata", 1, EvalMode::None)
878     ///     .unwrap()
879     ///     .output("v", 2, EvalMode::Interp)
880     ///     .unwrap();
881     ///
882     /// let op_mass = ceed
883     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
884     ///     .unwrap()
885     ///     .field("u", &ru, &bu, VectorOpt::Active)
886     ///     .unwrap()
887     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)
888     ///     .unwrap()
889     ///     .field("v", &ru, &bu, VectorOpt::Active)
890     ///     .unwrap();
891     ///
892     /// // Diagonal
893     /// let mut diag = ceed.vector(ncomp * ncomp * ndofs).unwrap();
894     /// diag.set_value(0.0);
895     /// op_mass
896     ///     .linear_assemble_point_block_diagonal(&mut diag)
897     ///     .unwrap();
898     ///
899     /// // Manual diagonal computation
900     /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs).unwrap();
901     /// for i in 0..ndofs {
902     ///     for j in 0..ncomp {
903     ///         u.set_value(0.0);
904     ///         {
905     ///             let mut u_array = u.view_mut();
906     ///             u_array[i + j * ndofs] = 1.;
907     ///         }
908     ///
909     ///         op_mass.apply(&u, &mut v).unwrap();
910     ///
911     ///         {
912     ///             let v_array = v.view_mut();
913     ///             let mut true_array = true_diag.view_mut();
914     ///             for k in 0..ncomp {
915     ///                 true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs];
916     ///             }
917     ///         }
918     ///     }
919     /// }
920     ///
921     /// // Check
922     /// diag.view()
923     ///     .iter()
924     ///     .zip(true_diag.view().iter())
925     ///     .for_each(|(computed, actual)| {
926     ///         assert!(
927     ///             (*computed - *actual).abs() < 1e-15,
928     ///             "Diagonal entry incorrect"
929     ///         );
930     ///     });
931     /// ```
932     pub fn linear_assemble_point_block_diagonal(
933         &self,
934         assembled: &mut Vector,
935     ) -> crate::Result<i32> {
936         self.op_core.linear_assemble_point_block_diagonal(assembled)
937     }
938 
939     /// Assemble the point block diagonal of a square linear Operator
940     ///
941     /// This sums into a Vector with the point block diagonal of a linear
942     /// Operator.
943     ///
944     /// Note: Currently only non-composite Operators with a single field and
945     ///      composite Operators with single field sub-operators are supported.
946     ///
947     /// * `op`        -     Operator to assemble QFunction
948     /// * `assembled` - Vector to store assembled CeedOperator point block
949     ///                   diagonal, provided in row-major form with an
950     ///                   `ncomp * ncomp` block at each node. The dimensions of
951     ///                   this vector are derived from the active vector for
952     ///                   the CeedOperator. The array has shape
953     ///                   `[nodes, component out, component in]`.
954     ///
955     /// ```
956     /// # use libceed::prelude::*;
957     /// # let ceed = libceed::Ceed::default_init();
958     /// let ne = 4;
959     /// let p = 3;
960     /// let q = 4;
961     /// let ncomp = 2;
962     /// let ndofs = p * ne - ne + 1;
963     ///
964     /// // Vectors
965     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap();
966     /// let mut qdata = ceed.vector(ne * q).unwrap();
967     /// qdata.set_value(0.0);
968     /// let mut u = ceed.vector(ncomp * ndofs).unwrap();
969     /// u.set_value(1.0);
970     /// let mut v = ceed.vector(ncomp * ndofs).unwrap();
971     /// v.set_value(0.0);
972     ///
973     /// // Restrictions
974     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
975     /// for i in 0..ne {
976     ///     indx[2 * i + 0] = i as i32;
977     ///     indx[2 * i + 1] = (i + 1) as i32;
978     /// }
979     /// let rx = ceed
980     ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)
981     ///     .unwrap();
982     /// let mut indu: Vec<i32> = vec![0; p * ne];
983     /// for i in 0..ne {
984     ///     indu[p * i + 0] = (2 * i) as i32;
985     ///     indu[p * i + 1] = (2 * i + 1) as i32;
986     ///     indu[p * i + 2] = (2 * i + 2) as i32;
987     /// }
988     /// let ru = ceed
989     ///     .elem_restriction(ne, p, ncomp, ndofs, ncomp * ndofs, MemType::Host, &indu)
990     ///     .unwrap();
991     /// let strides: [i32; 3] = [1, q as i32, q as i32];
992     /// let rq = ceed
993     ///     .strided_elem_restriction(ne, q, 1, q * ne, strides)
994     ///     .unwrap();
995     ///
996     /// // Bases
997     /// let bx = ceed
998     ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
999     ///     .unwrap();
1000     /// let bu = ceed
1001     ///     .basis_tensor_H1_Lagrange(1, ncomp, p, q, QuadMode::Gauss)
1002     ///     .unwrap();
1003     ///
1004     /// // Build quadrature data
1005     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
1006     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)
1007     ///     .unwrap()
1008     ///     .field("dx", &rx, &bx, VectorOpt::Active)
1009     ///     .unwrap()
1010     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
1011     ///     .unwrap()
1012     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
1013     ///     .unwrap()
1014     ///     .apply(&x, &mut qdata)
1015     ///     .unwrap();
1016     ///
1017     /// // Mass operator
1018     /// let mut mass_2_comp = |[u, qdata, ..]: QFunctionInputs, [v, ..]: QFunctionOutputs| {
1019     ///     // Number of quadrature points
1020     ///     let q = qdata.len();
1021     ///
1022     ///     // Iterate over quadrature points
1023     ///     for i in 0..q {
1024     ///         v[i + 0 * q] = u[i + 1 * q] * qdata[i];
1025     ///         v[i + 1 * q] = u[i + 0 * q] * qdata[i];
1026     ///     }
1027     ///
1028     ///     // Return clean error code
1029     ///     0
1030     /// };
1031     ///
1032     /// let qf_mass = ceed
1033     ///     .q_function_interior(1, Box::new(mass_2_comp))
1034     ///     .unwrap()
1035     ///     .input("u", 2, EvalMode::Interp)
1036     ///     .unwrap()
1037     ///     .input("qdata", 1, EvalMode::None)
1038     ///     .unwrap()
1039     ///     .output("v", 2, EvalMode::Interp)
1040     ///     .unwrap();
1041     ///
1042     /// let op_mass = ceed
1043     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
1044     ///     .unwrap()
1045     ///     .field("u", &ru, &bu, VectorOpt::Active)
1046     ///     .unwrap()
1047     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)
1048     ///     .unwrap()
1049     ///     .field("v", &ru, &bu, VectorOpt::Active)
1050     ///     .unwrap();
1051     ///
1052     /// // Diagonal
1053     /// let mut diag = ceed.vector(ncomp * ncomp * ndofs).unwrap();
1054     /// diag.set_value(1.0);
1055     /// op_mass
1056     ///     .linear_assemble_add_point_block_diagonal(&mut diag)
1057     ///     .unwrap();
1058     ///
1059     /// // Manual diagonal computation
1060     /// let mut true_diag = ceed.vector(ncomp * ncomp * ndofs).unwrap();
1061     /// for i in 0..ndofs {
1062     ///     for j in 0..ncomp {
1063     ///         u.set_value(0.0);
1064     ///         {
1065     ///             let mut u_array = u.view_mut();
1066     ///             u_array[i + j * ndofs] = 1.;
1067     ///         }
1068     ///
1069     ///         op_mass.apply(&u, &mut v).unwrap();
1070     ///
1071     ///         {
1072     ///             let v_array = v.view_mut();
1073     ///             let mut true_array = true_diag.view_mut();
1074     ///             for k in 0..ncomp {
1075     ///                 true_array[i * ncomp * ncomp + k * ncomp + j] = v_array[i + k * ndofs];
1076     ///             }
1077     ///         }
1078     ///     }
1079     /// }
1080     ///
1081     /// // Check
1082     /// diag.view()
1083     ///     .iter()
1084     ///     .zip(true_diag.view().iter())
1085     ///     .for_each(|(computed, actual)| {
1086     ///         assert!(
1087     ///             (*computed - 1.0 - *actual).abs() < 1e-15,
1088     ///             "Diagonal entry incorrect"
1089     ///         );
1090     ///     });
1091     /// ```
1092     pub fn linear_assemble_add_point_block_diagonal(
1093         &self,
1094         assembled: &mut Vector,
1095     ) -> crate::Result<i32> {
1096         self.op_core
1097             .linear_assemble_add_point_block_diagonal(assembled)
1098     }
1099 
1100     /// Create a multigrid coarse Operator and level transfer Operators for a
1101     ///   given Operator, creating the prolongation basis from the fine and
1102     ///   coarse grid interpolation
1103     ///
1104     /// * `p_mult_fine`  - Lvector multiplicity in parallel gather/scatter
1105     /// * `rstr_coarse`  - Coarse grid restriction
1106     /// * `basis_coarse` - Coarse grid active vector basis
1107     ///
1108     /// ```
1109     /// # use libceed::prelude::*;
1110     /// # let ceed = libceed::Ceed::default_init();
1111     /// let ne = 15;
1112     /// let p_coarse = 3;
1113     /// let p_fine = 5;
1114     /// let q = 6;
1115     /// let ndofs_coarse = p_coarse * ne - ne + 1;
1116     /// let ndofs_fine = p_fine * ne - ne + 1;
1117     ///
1118     /// // Vectors
1119     /// let x_array = (0..ne + 1)
1120     ///     .map(|i| 2.0 * i as f64 / ne as f64 - 1.0)
1121     ///     .collect::<Vec<f64>>();
1122     /// let x = ceed.vector_from_slice(&x_array).unwrap();
1123     /// let mut qdata = ceed.vector(ne * q).unwrap();
1124     /// qdata.set_value(0.0);
1125     /// let mut u_coarse = ceed.vector(ndofs_coarse).unwrap();
1126     /// u_coarse.set_value(1.0);
1127     /// let mut u_fine = ceed.vector(ndofs_fine).unwrap();
1128     /// u_fine.set_value(1.0);
1129     /// let mut v_coarse = ceed.vector(ndofs_coarse).unwrap();
1130     /// v_coarse.set_value(0.0);
1131     /// let mut v_fine = ceed.vector(ndofs_fine).unwrap();
1132     /// v_fine.set_value(0.0);
1133     /// let mut multiplicity = ceed.vector(ndofs_fine).unwrap();
1134     /// multiplicity.set_value(1.0);
1135     ///
1136     /// // Restrictions
1137     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1138     /// let rq = ceed
1139     ///     .strided_elem_restriction(ne, q, 1, q * ne, strides)
1140     ///     .unwrap();
1141     ///
1142     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1143     /// for i in 0..ne {
1144     ///     indx[2 * i + 0] = i as i32;
1145     ///     indx[2 * i + 1] = (i + 1) as i32;
1146     /// }
1147     /// let rx = ceed
1148     ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)
1149     ///     .unwrap();
1150     ///
1151     /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne];
1152     /// for i in 0..ne {
1153     ///     for j in 0..p_coarse {
1154     ///         indu_coarse[p_coarse * i + j] = (i + j) as i32;
1155     ///     }
1156     /// }
1157     /// let ru_coarse = ceed
1158     ///     .elem_restriction(
1159     ///         ne,
1160     ///         p_coarse,
1161     ///         1,
1162     ///         1,
1163     ///         ndofs_coarse,
1164     ///         MemType::Host,
1165     ///         &indu_coarse,
1166     ///     )
1167     ///     .unwrap();
1168     ///
1169     /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne];
1170     /// for i in 0..ne {
1171     ///     for j in 0..p_fine {
1172     ///         indu_fine[p_fine * i + j] = (i + j) as i32;
1173     ///     }
1174     /// }
1175     /// let ru_fine = ceed
1176     ///     .elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)
1177     ///     .unwrap();
1178     ///
1179     /// // Bases
1180     /// let bx = ceed
1181     ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
1182     ///     .unwrap();
1183     /// let bu_coarse = ceed
1184     ///     .basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)
1185     ///     .unwrap();
1186     /// let bu_fine = ceed
1187     ///     .basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)
1188     ///     .unwrap();
1189     ///
1190     /// // Build quadrature data
1191     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
1192     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)
1193     ///     .unwrap()
1194     ///     .field("dx", &rx, &bx, VectorOpt::Active)
1195     ///     .unwrap()
1196     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
1197     ///     .unwrap()
1198     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
1199     ///     .unwrap()
1200     ///     .apply(&x, &mut qdata)
1201     ///     .unwrap();
1202     ///
1203     /// // Mass operator
1204     /// let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap();
1205     /// let op_mass_fine = ceed
1206     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
1207     ///     .unwrap()
1208     ///     .field("u", &ru_fine, &bu_fine, VectorOpt::Active)
1209     ///     .unwrap()
1210     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)
1211     ///     .unwrap()
1212     ///     .field("v", &ru_fine, &bu_fine, VectorOpt::Active)
1213     ///     .unwrap();
1214     ///
1215     /// // Multigrid setup
1216     /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine
1217     ///     .create_multigrid_level(&multiplicity, &ru_coarse, &bu_coarse)
1218     ///     .unwrap();
1219     ///
1220     /// // Coarse problem
1221     /// u_coarse.set_value(1.0);
1222     /// op_mass_coarse.apply(&u_coarse, &mut v_coarse).unwrap();
1223     ///
1224     /// // Check
1225     /// let sum: f64 = v_coarse.view().iter().sum();
1226     /// assert!(
1227     ///     (sum - 2.0).abs() < 1e-15,
1228     ///     "Incorrect interval length computed"
1229     /// );
1230     ///
1231     /// // Prolong
1232     /// op_prolong.apply(&u_coarse, &mut u_fine).unwrap();
1233     ///
1234     /// // Fine problem
1235     /// op_mass_fine.apply(&u_fine, &mut v_fine).unwrap();
1236     ///
1237     /// // Check
1238     /// let sum: f64 = v_fine.view().iter().sum();
1239     /// assert!(
1240     ///     (sum - 2.0).abs() < 1e-15,
1241     ///     "Incorrect interval length computed"
1242     /// );
1243     ///
1244     /// // Restrict
1245     /// op_restrict.apply(&v_fine, &mut v_coarse).unwrap();
1246     ///
1247     /// // Check
1248     /// let sum: f64 = v_coarse.view().iter().sum();
1249     /// assert!(
1250     ///     (sum - 2.0).abs() < 1e-15,
1251     ///     "Incorrect interval length computed"
1252     /// );
1253     /// ```
1254     pub fn create_multigrid_level(
1255         &self,
1256         p_mult_fine: &Vector,
1257         rstr_coarse: &ElemRestriction,
1258         basis_coarse: &Basis,
1259     ) -> crate::Result<(Operator, Operator, Operator)> {
1260         let mut ptr_coarse = std::ptr::null_mut();
1261         let mut ptr_prolong = std::ptr::null_mut();
1262         let mut ptr_restrict = std::ptr::null_mut();
1263         let ierr = unsafe {
1264             bind_ceed::CeedOperatorMultigridLevelCreate(
1265                 self.op_core.ptr,
1266                 p_mult_fine.ptr,
1267                 rstr_coarse.ptr,
1268                 basis_coarse.ptr,
1269                 &mut ptr_coarse,
1270                 &mut ptr_prolong,
1271                 &mut ptr_restrict,
1272             )
1273         };
1274         self.op_core.ceed.check_error(ierr)?;
1275         let op_coarse = Operator::from_raw(self.op_core.ceed, ptr_coarse)?;
1276         let op_prolong = Operator::from_raw(self.op_core.ceed, ptr_prolong)?;
1277         let op_restrict = Operator::from_raw(self.op_core.ceed, ptr_restrict)?;
1278         Ok((op_coarse, op_prolong, op_restrict))
1279     }
1280 
1281     /// Create a multigrid coarse Operator and level transfer Operators for a
1282     ///   given Operator with a tensor basis for the active basis
1283     ///
1284     /// * `p_mult_fine`   - Lvector multiplicity in parallel gather/scatter
1285     /// * `rstr_coarse`   - Coarse grid restriction
1286     /// * `basis_coarse`  - Coarse grid active vector basis
1287     /// * `interp_c_to_f` - Matrix for coarse to fine
1288     ///
1289     /// ```
1290     /// # use libceed::prelude::*;
1291     /// # let ceed = libceed::Ceed::default_init();
1292     /// let ne = 15;
1293     /// let p_coarse = 3;
1294     /// let p_fine = 5;
1295     /// let q = 6;
1296     /// let ndofs_coarse = p_coarse * ne - ne + 1;
1297     /// let ndofs_fine = p_fine * ne - ne + 1;
1298     ///
1299     /// // Vectors
1300     /// let x_array = (0..ne + 1)
1301     ///     .map(|i| 2.0 * i as f64 / ne as f64 - 1.0)
1302     ///     .collect::<Vec<f64>>();
1303     /// let x = ceed.vector_from_slice(&x_array).unwrap();
1304     /// let mut qdata = ceed.vector(ne * q).unwrap();
1305     /// qdata.set_value(0.0);
1306     /// let mut u_coarse = ceed.vector(ndofs_coarse).unwrap();
1307     /// u_coarse.set_value(1.0);
1308     /// let mut u_fine = ceed.vector(ndofs_fine).unwrap();
1309     /// u_fine.set_value(1.0);
1310     /// let mut v_coarse = ceed.vector(ndofs_coarse).unwrap();
1311     /// v_coarse.set_value(0.0);
1312     /// let mut v_fine = ceed.vector(ndofs_fine).unwrap();
1313     /// v_fine.set_value(0.0);
1314     /// let mut multiplicity = ceed.vector(ndofs_fine).unwrap();
1315     /// multiplicity.set_value(1.0);
1316     ///
1317     /// // Restrictions
1318     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1319     /// let rq = ceed
1320     ///     .strided_elem_restriction(ne, q, 1, q * ne, strides)
1321     ///     .unwrap();
1322     ///
1323     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1324     /// for i in 0..ne {
1325     ///     indx[2 * i + 0] = i as i32;
1326     ///     indx[2 * i + 1] = (i + 1) as i32;
1327     /// }
1328     /// let rx = ceed
1329     ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)
1330     ///     .unwrap();
1331     ///
1332     /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne];
1333     /// for i in 0..ne {
1334     ///     for j in 0..p_coarse {
1335     ///         indu_coarse[p_coarse * i + j] = (i + j) as i32;
1336     ///     }
1337     /// }
1338     /// let ru_coarse = ceed
1339     ///     .elem_restriction(
1340     ///         ne,
1341     ///         p_coarse,
1342     ///         1,
1343     ///         1,
1344     ///         ndofs_coarse,
1345     ///         MemType::Host,
1346     ///         &indu_coarse,
1347     ///     )
1348     ///     .unwrap();
1349     ///
1350     /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne];
1351     /// for i in 0..ne {
1352     ///     for j in 0..p_fine {
1353     ///         indu_fine[p_fine * i + j] = (i + j) as i32;
1354     ///     }
1355     /// }
1356     /// let ru_fine = ceed
1357     ///     .elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)
1358     ///     .unwrap();
1359     ///
1360     /// // Bases
1361     /// let bx = ceed
1362     ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
1363     ///     .unwrap();
1364     /// let bu_coarse = ceed
1365     ///     .basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)
1366     ///     .unwrap();
1367     /// let bu_fine = ceed
1368     ///     .basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)
1369     ///     .unwrap();
1370     ///
1371     /// // Build quadrature data
1372     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
1373     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)
1374     ///     .unwrap()
1375     ///     .field("dx", &rx, &bx, VectorOpt::Active)
1376     ///     .unwrap()
1377     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
1378     ///     .unwrap()
1379     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
1380     ///     .unwrap()
1381     ///     .apply(&x, &mut qdata)
1382     ///     .unwrap();
1383     ///
1384     /// // Mass operator
1385     /// let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap();
1386     /// let op_mass_fine = ceed
1387     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
1388     ///     .unwrap()
1389     ///     .field("u", &ru_fine, &bu_fine, VectorOpt::Active)
1390     ///     .unwrap()
1391     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)
1392     ///     .unwrap()
1393     ///     .field("v", &ru_fine, &bu_fine, VectorOpt::Active)
1394     ///     .unwrap();
1395     ///
1396     /// // Multigrid setup
1397     /// let mut interp_c_to_f: Vec<f64> = vec![0.; p_coarse * p_fine];
1398     /// {
1399     ///     let mut coarse = ceed.vector(p_coarse).unwrap();
1400     ///     let mut fine = ceed.vector(p_fine).unwrap();
1401     ///     let basis_c_to_f = ceed
1402     ///         .basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)
1403     ///         .unwrap();
1404     ///     for i in 0..p_coarse {
1405     ///         coarse.set_value(0.0);
1406     ///         {
1407     ///             let mut array = coarse.view_mut();
1408     ///             array[i] = 1.;
1409     ///         }
1410     ///         basis_c_to_f
1411     ///             .apply(
1412     ///                 1,
1413     ///                 TransposeMode::NoTranspose,
1414     ///                 EvalMode::Interp,
1415     ///                 &coarse,
1416     ///                 &mut fine,
1417     ///             )
1418     ///             .unwrap();
1419     ///         let array = fine.view();
1420     ///         for j in 0..p_fine {
1421     ///             interp_c_to_f[j * p_coarse + i] = array[j];
1422     ///         }
1423     ///     }
1424     /// }
1425     /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine
1426     ///     .create_multigrid_level_tensor_H1(&multiplicity, &ru_coarse, &bu_coarse, &interp_c_to_f)
1427     ///     .unwrap();
1428     ///
1429     /// // Coarse problem
1430     /// u_coarse.set_value(1.0);
1431     /// op_mass_coarse.apply(&u_coarse, &mut v_coarse).unwrap();
1432     ///
1433     /// // Check
1434     /// let sum: f64 = v_coarse.view().iter().sum();
1435     /// assert!(
1436     ///     (sum - 2.0).abs() < 1e-15,
1437     ///     "Incorrect interval length computed"
1438     /// );
1439     ///
1440     /// // Prolong
1441     /// op_prolong.apply(&u_coarse, &mut u_fine).unwrap();
1442     ///
1443     /// // Fine problem
1444     /// op_mass_fine.apply(&u_fine, &mut v_fine).unwrap();
1445     ///
1446     /// // Check
1447     /// let sum: f64 = v_fine.view().iter().sum();
1448     /// assert!(
1449     ///     (sum - 2.0).abs() < 1e-15,
1450     ///     "Incorrect interval length computed"
1451     /// );
1452     ///
1453     /// // Restrict
1454     /// op_restrict.apply(&v_fine, &mut v_coarse).unwrap();
1455     ///
1456     /// // Check
1457     /// let sum: f64 = v_coarse.view().iter().sum();
1458     /// assert!(
1459     ///     (sum - 2.0).abs() < 1e-15,
1460     ///     "Incorrect interval length computed"
1461     /// );
1462     /// ```
1463     pub fn create_multigrid_level_tensor_H1(
1464         &self,
1465         p_mult_fine: &Vector,
1466         rstr_coarse: &ElemRestriction,
1467         basis_coarse: &Basis,
1468         interpCtoF: &Vec<f64>,
1469     ) -> crate::Result<(Operator, Operator, Operator)> {
1470         let mut ptr_coarse = std::ptr::null_mut();
1471         let mut ptr_prolong = std::ptr::null_mut();
1472         let mut ptr_restrict = std::ptr::null_mut();
1473         let ierr = unsafe {
1474             bind_ceed::CeedOperatorMultigridLevelCreateTensorH1(
1475                 self.op_core.ptr,
1476                 p_mult_fine.ptr,
1477                 rstr_coarse.ptr,
1478                 basis_coarse.ptr,
1479                 interpCtoF.as_ptr(),
1480                 &mut ptr_coarse,
1481                 &mut ptr_prolong,
1482                 &mut ptr_restrict,
1483             )
1484         };
1485         self.op_core.ceed.check_error(ierr)?;
1486         let op_coarse = Operator::from_raw(self.op_core.ceed, ptr_coarse)?;
1487         let op_prolong = Operator::from_raw(self.op_core.ceed, ptr_prolong)?;
1488         let op_restrict = Operator::from_raw(self.op_core.ceed, ptr_restrict)?;
1489         Ok((op_coarse, op_prolong, op_restrict))
1490     }
1491 
1492     /// Create a multigrid coarse Operator and level transfer Operators for a
1493     ///   given Operator with a non-tensor basis for the active basis
1494     ///
1495     /// * `p_mult_fine`   - Lvector multiplicity in parallel gather/scatter
1496     /// * `rstr_coarse`   - Coarse grid restriction
1497     /// * `basis_coarse`  - Coarse grid active vector basis
1498     /// * `interp_c_to_f` - Matrix for coarse to fine
1499     ///
1500     /// ```
1501     /// # use libceed::prelude::*;
1502     /// # let ceed = libceed::Ceed::default_init();
1503     /// let ne = 15;
1504     /// let p_coarse = 3;
1505     /// let p_fine = 5;
1506     /// let q = 6;
1507     /// let ndofs_coarse = p_coarse * ne - ne + 1;
1508     /// let ndofs_fine = p_fine * ne - ne + 1;
1509     ///
1510     /// // Vectors
1511     /// let x_array = (0..ne + 1)
1512     ///     .map(|i| 2.0 * i as f64 / ne as f64 - 1.0)
1513     ///     .collect::<Vec<f64>>();
1514     /// let x = ceed.vector_from_slice(&x_array).unwrap();
1515     /// let mut qdata = ceed.vector(ne * q).unwrap();
1516     /// qdata.set_value(0.0);
1517     /// let mut u_coarse = ceed.vector(ndofs_coarse).unwrap();
1518     /// u_coarse.set_value(1.0);
1519     /// let mut u_fine = ceed.vector(ndofs_fine).unwrap();
1520     /// u_fine.set_value(1.0);
1521     /// let mut v_coarse = ceed.vector(ndofs_coarse).unwrap();
1522     /// v_coarse.set_value(0.0);
1523     /// let mut v_fine = ceed.vector(ndofs_fine).unwrap();
1524     /// v_fine.set_value(0.0);
1525     /// let mut multiplicity = ceed.vector(ndofs_fine).unwrap();
1526     /// multiplicity.set_value(1.0);
1527     ///
1528     /// // Restrictions
1529     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1530     /// let rq = ceed
1531     ///     .strided_elem_restriction(ne, q, 1, q * ne, strides)
1532     ///     .unwrap();
1533     ///
1534     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1535     /// for i in 0..ne {
1536     ///     indx[2 * i + 0] = i as i32;
1537     ///     indx[2 * i + 1] = (i + 1) as i32;
1538     /// }
1539     /// let rx = ceed
1540     ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)
1541     ///     .unwrap();
1542     ///
1543     /// let mut indu_coarse: Vec<i32> = vec![0; p_coarse * ne];
1544     /// for i in 0..ne {
1545     ///     for j in 0..p_coarse {
1546     ///         indu_coarse[p_coarse * i + j] = (i + j) as i32;
1547     ///     }
1548     /// }
1549     /// let ru_coarse = ceed
1550     ///     .elem_restriction(
1551     ///         ne,
1552     ///         p_coarse,
1553     ///         1,
1554     ///         1,
1555     ///         ndofs_coarse,
1556     ///         MemType::Host,
1557     ///         &indu_coarse,
1558     ///     )
1559     ///     .unwrap();
1560     ///
1561     /// let mut indu_fine: Vec<i32> = vec![0; p_fine * ne];
1562     /// for i in 0..ne {
1563     ///     for j in 0..p_fine {
1564     ///         indu_fine[p_fine * i + j] = (i + j) as i32;
1565     ///     }
1566     /// }
1567     /// let ru_fine = ceed
1568     ///     .elem_restriction(ne, p_fine, 1, 1, ndofs_fine, MemType::Host, &indu_fine)
1569     ///     .unwrap();
1570     ///
1571     /// // Bases
1572     /// let bx = ceed
1573     ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
1574     ///     .unwrap();
1575     /// let bu_coarse = ceed
1576     ///     .basis_tensor_H1_Lagrange(1, 1, p_coarse, q, QuadMode::Gauss)
1577     ///     .unwrap();
1578     /// let bu_fine = ceed
1579     ///     .basis_tensor_H1_Lagrange(1, 1, p_fine, q, QuadMode::Gauss)
1580     ///     .unwrap();
1581     ///
1582     /// // Build quadrature data
1583     /// let qf_build = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
1584     /// ceed.operator(&qf_build, QFunctionOpt::None, QFunctionOpt::None)
1585     ///     .unwrap()
1586     ///     .field("dx", &rx, &bx, VectorOpt::Active)
1587     ///     .unwrap()
1588     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
1589     ///     .unwrap()
1590     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
1591     ///     .unwrap()
1592     ///     .apply(&x, &mut qdata)
1593     ///     .unwrap();
1594     ///
1595     /// // Mass operator
1596     /// let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap();
1597     /// let op_mass_fine = ceed
1598     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
1599     ///     .unwrap()
1600     ///     .field("u", &ru_fine, &bu_fine, VectorOpt::Active)
1601     ///     .unwrap()
1602     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata)
1603     ///     .unwrap()
1604     ///     .field("v", &ru_fine, &bu_fine, VectorOpt::Active)
1605     ///     .unwrap();
1606     ///
1607     /// // Multigrid setup
1608     /// let mut interp_c_to_f: Vec<f64> = vec![0.; p_coarse * p_fine];
1609     /// {
1610     ///     let mut coarse = ceed.vector(p_coarse).unwrap();
1611     ///     let mut fine = ceed.vector(p_fine).unwrap();
1612     ///     let basis_c_to_f = ceed
1613     ///         .basis_tensor_H1_Lagrange(1, 1, p_coarse, p_fine, QuadMode::GaussLobatto)
1614     ///         .unwrap();
1615     ///     for i in 0..p_coarse {
1616     ///         coarse.set_value(0.0);
1617     ///         {
1618     ///             let mut array = coarse.view_mut();
1619     ///             array[i] = 1.;
1620     ///         }
1621     ///         basis_c_to_f
1622     ///             .apply(
1623     ///                 1,
1624     ///                 TransposeMode::NoTranspose,
1625     ///                 EvalMode::Interp,
1626     ///                 &coarse,
1627     ///                 &mut fine,
1628     ///             )
1629     ///             .unwrap();
1630     ///         let array = fine.view();
1631     ///         for j in 0..p_fine {
1632     ///             interp_c_to_f[j * p_coarse + i] = array[j];
1633     ///         }
1634     ///     }
1635     /// }
1636     /// let (op_mass_coarse, op_prolong, op_restrict) = op_mass_fine
1637     ///     .create_multigrid_level_H1(&multiplicity, &ru_coarse, &bu_coarse, &interp_c_to_f)
1638     ///     .unwrap();
1639     ///
1640     /// // Coarse problem
1641     /// u_coarse.set_value(1.0);
1642     /// op_mass_coarse.apply(&u_coarse, &mut v_coarse).unwrap();
1643     ///
1644     /// // Check
1645     /// let sum: f64 = v_coarse.view().iter().sum();
1646     /// assert!(
1647     ///     (sum - 2.0).abs() < 1e-15,
1648     ///     "Incorrect interval length computed"
1649     /// );
1650     ///
1651     /// // Prolong
1652     /// op_prolong.apply(&u_coarse, &mut u_fine).unwrap();
1653     ///
1654     /// // Fine problem
1655     /// op_mass_fine.apply(&u_fine, &mut v_fine).unwrap();
1656     ///
1657     /// // Check
1658     /// let sum: f64 = v_fine.view().iter().sum();
1659     /// assert!(
1660     ///     (sum - 2.0).abs() < 1e-15,
1661     ///     "Incorrect interval length computed"
1662     /// );
1663     ///
1664     /// // Restrict
1665     /// op_restrict.apply(&v_fine, &mut v_coarse).unwrap();
1666     ///
1667     /// // Check
1668     /// let sum: f64 = v_coarse.view().iter().sum();
1669     /// assert!(
1670     ///     (sum - 2.0).abs() < 1e-15,
1671     ///     "Incorrect interval length computed"
1672     /// );
1673     /// ```
1674     pub fn create_multigrid_level_H1(
1675         &self,
1676         p_mult_fine: &Vector,
1677         rstr_coarse: &ElemRestriction,
1678         basis_coarse: &Basis,
1679         interpCtoF: &[f64],
1680     ) -> crate::Result<(Operator, Operator, Operator)> {
1681         let mut ptr_coarse = std::ptr::null_mut();
1682         let mut ptr_prolong = std::ptr::null_mut();
1683         let mut ptr_restrict = std::ptr::null_mut();
1684         let ierr = unsafe {
1685             bind_ceed::CeedOperatorMultigridLevelCreateH1(
1686                 self.op_core.ptr,
1687                 p_mult_fine.ptr,
1688                 rstr_coarse.ptr,
1689                 basis_coarse.ptr,
1690                 interpCtoF.as_ptr(),
1691                 &mut ptr_coarse,
1692                 &mut ptr_prolong,
1693                 &mut ptr_restrict,
1694             )
1695         };
1696         self.op_core.ceed.check_error(ierr)?;
1697         let op_coarse = Operator::from_raw(self.op_core.ceed, ptr_coarse)?;
1698         let op_prolong = Operator::from_raw(self.op_core.ceed, ptr_prolong)?;
1699         let op_restrict = Operator::from_raw(self.op_core.ceed, ptr_restrict)?;
1700         Ok((op_coarse, op_prolong, op_restrict))
1701     }
1702 }
1703 
1704 // -----------------------------------------------------------------------------
1705 // Composite Operator
1706 // -----------------------------------------------------------------------------
1707 impl<'a> CompositeOperator<'a> {
1708     // Constructor
1709     pub fn create(ceed: &'a crate::Ceed) -> crate::Result<Self> {
1710         let mut ptr = std::ptr::null_mut();
1711         let ierr = unsafe { bind_ceed::CeedCompositeOperatorCreate(ceed.ptr, &mut ptr) };
1712         ceed.check_error(ierr)?;
1713         Ok(Self {
1714             op_core: OperatorCore { ceed, ptr },
1715         })
1716     }
1717 
1718     /// Apply Operator to a vector
1719     ///
1720     /// * `input`  - Input Vector
1721     /// * `output` - Output Vector
1722     ///
1723     /// ```
1724     /// # use libceed::prelude::*;
1725     /// # let ceed = libceed::Ceed::default_init();
1726     /// let ne = 4;
1727     /// let p = 3;
1728     /// let q = 4;
1729     /// let ndofs = p * ne - ne + 1;
1730     ///
1731     /// // Vectors
1732     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap();
1733     /// let mut qdata_mass = ceed.vector(ne * q).unwrap();
1734     /// qdata_mass.set_value(0.0);
1735     /// let mut qdata_diff = ceed.vector(ne * q).unwrap();
1736     /// qdata_diff.set_value(0.0);
1737     /// let mut u = ceed.vector(ndofs).unwrap();
1738     /// u.set_value(1.0);
1739     /// let mut v = ceed.vector(ndofs).unwrap();
1740     /// v.set_value(0.0);
1741     ///
1742     /// // Restrictions
1743     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1744     /// for i in 0..ne {
1745     ///     indx[2 * i + 0] = i as i32;
1746     ///     indx[2 * i + 1] = (i + 1) as i32;
1747     /// }
1748     /// let rx = ceed
1749     ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)
1750     ///     .unwrap();
1751     /// let mut indu: Vec<i32> = vec![0; p * ne];
1752     /// for i in 0..ne {
1753     ///     indu[p * i + 0] = i as i32;
1754     ///     indu[p * i + 1] = (i + 1) as i32;
1755     ///     indu[p * i + 2] = (i + 2) as i32;
1756     /// }
1757     /// let ru = ceed
1758     ///     .elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)
1759     ///     .unwrap();
1760     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1761     /// let rq = ceed
1762     ///     .strided_elem_restriction(ne, q, 1, q * ne, strides)
1763     ///     .unwrap();
1764     ///
1765     /// // Bases
1766     /// let bx = ceed
1767     ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
1768     ///     .unwrap();
1769     /// let bu = ceed
1770     ///     .basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)
1771     ///     .unwrap();
1772     ///
1773     /// // Build quadrature data
1774     /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
1775     /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)
1776     ///     .unwrap()
1777     ///     .field("dx", &rx, &bx, VectorOpt::Active)
1778     ///     .unwrap()
1779     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
1780     ///     .unwrap()
1781     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
1782     ///     .unwrap()
1783     ///     .apply(&x, &mut qdata_mass)
1784     ///     .unwrap();
1785     ///
1786     /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild").unwrap();
1787     /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)
1788     ///     .unwrap()
1789     ///     .field("dx", &rx, &bx, VectorOpt::Active)
1790     ///     .unwrap()
1791     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
1792     ///     .unwrap()
1793     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
1794     ///     .unwrap()
1795     ///     .apply(&x, &mut qdata_diff)
1796     ///     .unwrap();
1797     ///
1798     /// // Application operator
1799     /// let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap();
1800     /// let op_mass = ceed
1801     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
1802     ///     .unwrap()
1803     ///     .field("u", &ru, &bu, VectorOpt::Active)
1804     ///     .unwrap()
1805     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)
1806     ///     .unwrap()
1807     ///     .field("v", &ru, &bu, VectorOpt::Active)
1808     ///     .unwrap();
1809     ///
1810     /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply").unwrap();
1811     /// let op_diff = ceed
1812     ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)
1813     ///     .unwrap()
1814     ///     .field("du", &ru, &bu, VectorOpt::Active)
1815     ///     .unwrap()
1816     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)
1817     ///     .unwrap()
1818     ///     .field("dv", &ru, &bu, VectorOpt::Active)
1819     ///     .unwrap();
1820     ///
1821     /// let op_composite = ceed
1822     ///     .composite_operator()
1823     ///     .unwrap()
1824     ///     .sub_operator(&op_mass)
1825     ///     .unwrap()
1826     ///     .sub_operator(&op_diff)
1827     ///     .unwrap();
1828     ///
1829     /// v.set_value(0.0);
1830     /// op_composite.apply(&u, &mut v).unwrap();
1831     ///
1832     /// // Check
1833     /// let sum: f64 = v.view().iter().sum();
1834     /// assert!(
1835     ///     (sum - 2.0).abs() < 1e-15,
1836     ///     "Incorrect interval length computed"
1837     /// );
1838     /// ```
1839     pub fn apply(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
1840         self.op_core.apply(input, output)
1841     }
1842 
1843     /// Apply Operator to a vector and add result to output vector
1844     ///
1845     /// * `input`  - Input Vector
1846     /// * `output` - Output Vector
1847     ///
1848     /// ```
1849     /// # use libceed::prelude::*;
1850     /// # let ceed = libceed::Ceed::default_init();
1851     /// let ne = 4;
1852     /// let p = 3;
1853     /// let q = 4;
1854     /// let ndofs = p * ne - ne + 1;
1855     ///
1856     /// // Vectors
1857     /// let x = ceed.vector_from_slice(&[-1., -0.5, 0.0, 0.5, 1.0]).unwrap();
1858     /// let mut qdata_mass = ceed.vector(ne * q).unwrap();
1859     /// qdata_mass.set_value(0.0);
1860     /// let mut qdata_diff = ceed.vector(ne * q).unwrap();
1861     /// qdata_diff.set_value(0.0);
1862     /// let mut u = ceed.vector(ndofs).unwrap();
1863     /// u.set_value(1.0);
1864     /// let mut v = ceed.vector(ndofs).unwrap();
1865     /// v.set_value(0.0);
1866     ///
1867     /// // Restrictions
1868     /// let mut indx: Vec<i32> = vec![0; 2 * ne];
1869     /// for i in 0..ne {
1870     ///     indx[2 * i + 0] = i as i32;
1871     ///     indx[2 * i + 1] = (i + 1) as i32;
1872     /// }
1873     /// let rx = ceed
1874     ///     .elem_restriction(ne, 2, 1, 1, ne + 1, MemType::Host, &indx)
1875     ///     .unwrap();
1876     /// let mut indu: Vec<i32> = vec![0; p * ne];
1877     /// for i in 0..ne {
1878     ///     indu[p * i + 0] = i as i32;
1879     ///     indu[p * i + 1] = (i + 1) as i32;
1880     ///     indu[p * i + 2] = (i + 2) as i32;
1881     /// }
1882     /// let ru = ceed
1883     ///     .elem_restriction(ne, 3, 1, 1, ndofs, MemType::Host, &indu)
1884     ///     .unwrap();
1885     /// let strides: [i32; 3] = [1, q as i32, q as i32];
1886     /// let rq = ceed
1887     ///     .strided_elem_restriction(ne, q, 1, q * ne, strides)
1888     ///     .unwrap();
1889     ///
1890     /// // Bases
1891     /// let bx = ceed
1892     ///     .basis_tensor_H1_Lagrange(1, 1, 2, q, QuadMode::Gauss)
1893     ///     .unwrap();
1894     /// let bu = ceed
1895     ///     .basis_tensor_H1_Lagrange(1, 1, p, q, QuadMode::Gauss)
1896     ///     .unwrap();
1897     ///
1898     /// // Build quadrature data
1899     /// let qf_build_mass = ceed.q_function_interior_by_name("Mass1DBuild").unwrap();
1900     /// ceed.operator(&qf_build_mass, QFunctionOpt::None, QFunctionOpt::None)
1901     ///     .unwrap()
1902     ///     .field("dx", &rx, &bx, VectorOpt::Active)
1903     ///     .unwrap()
1904     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
1905     ///     .unwrap()
1906     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
1907     ///     .unwrap()
1908     ///     .apply(&x, &mut qdata_mass)
1909     ///     .unwrap();
1910     ///
1911     /// let qf_build_diff = ceed.q_function_interior_by_name("Poisson1DBuild").unwrap();
1912     /// ceed.operator(&qf_build_diff, QFunctionOpt::None, QFunctionOpt::None)
1913     ///     .unwrap()
1914     ///     .field("dx", &rx, &bx, VectorOpt::Active)
1915     ///     .unwrap()
1916     ///     .field("weights", ElemRestrictionOpt::None, &bx, VectorOpt::None)
1917     ///     .unwrap()
1918     ///     .field("qdata", &rq, BasisOpt::Collocated, VectorOpt::Active)
1919     ///     .unwrap()
1920     ///     .apply(&x, &mut qdata_diff)
1921     ///     .unwrap();
1922     ///
1923     /// // Application operator
1924     /// let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap();
1925     /// let op_mass = ceed
1926     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
1927     ///     .unwrap()
1928     ///     .field("u", &ru, &bu, VectorOpt::Active)
1929     ///     .unwrap()
1930     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_mass)
1931     ///     .unwrap()
1932     ///     .field("v", &ru, &bu, VectorOpt::Active)
1933     ///     .unwrap();
1934     ///
1935     /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply").unwrap();
1936     /// let op_diff = ceed
1937     ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)
1938     ///     .unwrap()
1939     ///     .field("du", &ru, &bu, VectorOpt::Active)
1940     ///     .unwrap()
1941     ///     .field("qdata", &rq, BasisOpt::Collocated, &qdata_diff)
1942     ///     .unwrap()
1943     ///     .field("dv", &ru, &bu, VectorOpt::Active)
1944     ///     .unwrap();
1945     ///
1946     /// let op_composite = ceed
1947     ///     .composite_operator()
1948     ///     .unwrap()
1949     ///     .sub_operator(&op_mass)
1950     ///     .unwrap()
1951     ///     .sub_operator(&op_diff)
1952     ///     .unwrap();
1953     ///
1954     /// v.set_value(1.0);
1955     /// op_composite.apply_add(&u, &mut v).unwrap();
1956     ///
1957     /// // Check
1958     /// let sum: f64 = v.view().iter().sum();
1959     /// assert!(
1960     ///     (sum - (2.0 + ndofs as f64)).abs() < 1e-15,
1961     ///     "Incorrect interval length computed"
1962     /// );
1963     /// ```
1964     pub fn apply_add(&self, input: &Vector, output: &mut Vector) -> crate::Result<i32> {
1965         self.op_core.apply_add(input, output)
1966     }
1967 
1968     /// Add a sub-Operator to a Composite Operator
1969     ///
1970     /// * `subop` - Sub-Operator
1971     ///
1972     /// ```
1973     /// # use libceed::prelude::*;
1974     /// # let ceed = libceed::Ceed::default_init();
1975     /// let mut op = ceed.composite_operator().unwrap();
1976     ///
1977     /// let qf_mass = ceed.q_function_interior_by_name("MassApply").unwrap();
1978     /// let op_mass = ceed
1979     ///     .operator(&qf_mass, QFunctionOpt::None, QFunctionOpt::None)
1980     ///     .unwrap();
1981     /// op = op.sub_operator(&op_mass).unwrap();
1982     ///
1983     /// let qf_diff = ceed.q_function_interior_by_name("Poisson1DApply").unwrap();
1984     /// let op_diff = ceed
1985     ///     .operator(&qf_diff, QFunctionOpt::None, QFunctionOpt::None)
1986     ///     .unwrap();
1987     /// op = op.sub_operator(&op_diff).unwrap();
1988     /// ```
1989     #[allow(unused_mut)]
1990     pub fn sub_operator(mut self, subop: &Operator) -> crate::Result<Self> {
1991         let ierr =
1992             unsafe { bind_ceed::CeedCompositeOperatorAddSub(self.op_core.ptr, subop.op_core.ptr) };
1993         self.op_core.ceed.check_error(ierr)?;
1994         Ok(self)
1995     }
1996 
1997     /// Assemble the diagonal of a square linear Operator
1998     ///
1999     /// This overwrites a Vector with the diagonal of a linear Operator.
2000     ///
2001     /// Note: Currently only non-composite Operators with a single field and
2002     ///      composite Operators with single field sub-operators are supported.
2003     ///
2004     /// * `op`        - Operator to assemble QFunction
2005     /// * `assembled` - Vector to store assembled Operator diagonal
2006     pub fn linear_asssemble_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
2007         self.op_core.linear_assemble_add_diagonal(assembled)
2008     }
2009 
2010     /// Assemble the point block diagonal of a square linear Operator
2011     ///
2012     /// This overwrites a Vector with the point block diagonal of a linear
2013     ///   Operator.
2014     ///
2015     /// Note: Currently only non-composite Operators with a single field and
2016     ///      composite Operators with single field sub-operators are supported.
2017     ///
2018     /// * `op`        - Operator to assemble QFunction
2019     /// * `assembled` - Vector to store assembled Operator diagonal
2020     pub fn linear_assemble_add_diagonal(&self, assembled: &mut Vector) -> crate::Result<i32> {
2021         self.op_core.linear_assemble_add_diagonal(assembled)
2022     }
2023 
2024     /// Assemble the diagonal of a square linear Operator
2025     ///
2026     /// This overwrites a Vector with the diagonal of a linear Operator.
2027     ///
2028     /// Note: Currently only non-composite Operators with a single field and
2029     ///      composite Operators with single field sub-operators are supported.
2030     ///
2031     /// * `op`        - Operator to assemble QFunction
2032     /// * `assembled` - Vector to store assembled CeedOperator point block
2033     ///                   diagonal, provided in row-major form with an
2034     ///                   `ncomp * ncomp` block at each node. The dimensions of
2035     ///                   this vector are derived from the active vector for
2036     ///                   the CeedOperator. The array has shape
2037     ///                   `[nodes, component out, component in]`.
2038     pub fn linear_assemble_point_block_diagonal(
2039         &self,
2040         assembled: &mut Vector,
2041     ) -> crate::Result<i32> {
2042         self.op_core.linear_assemble_add_diagonal(assembled)
2043     }
2044 
2045     /// Assemble the diagonal of a square linear Operator
2046     ///
2047     /// This sums into a Vector with the diagonal of a linear Operator.
2048     ///
2049     /// Note: Currently only non-composite Operators with a single field and
2050     ///      composite Operators with single field sub-operators are supported.
2051     ///
2052     /// * `op`        - Operator to assemble QFunction
2053     /// * `assembled` - Vector to store assembled CeedOperator point block
2054     ///                   diagonal, provided in row-major form with an
2055     ///                   `ncomp * ncomp` block at each node. The dimensions of
2056     ///                   this vector are derived from the active vector for
2057     ///                   the CeedOperator. The array has shape
2058     ///                   `[nodes, component out, component in]`.
2059     pub fn linear_assemble_add_point_block_diagonal(
2060         &self,
2061         assembled: &mut Vector,
2062     ) -> crate::Result<i32> {
2063         self.op_core.linear_assemble_add_diagonal(assembled)
2064     }
2065 }
2066 
2067 // -----------------------------------------------------------------------------
2068