MFEM  v4.6.0
Finite element discretization library
elasticity_kernels.hpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2023, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #ifndef MFEM_ELASTICITY_KERNELS_HPP
13 #define MFEM_ELASTICITY_KERNELS_HPP
14 
15 #include "kernel_helpers.hpp"
16 #include "linalg/vector.hpp"
17 
18 using mfem::internal::tensor;
19 using mfem::internal::make_tensor;
20 
21 namespace mfem
22 {
23 
24 namespace ElasticityKernels
25 {
26 
27 /**
28  * @brief Apply the 3D elasticity kernel
29  *
30  * @tparam d1d Number of degrees of freedom in 1D.
31  * @tparam q1d Number of quadrature points in 1D.
32  * @tparam material_type Material type which adheres to the material type
33  * interface. See examples in the materials directory.
34  * @param ne Number of elements.
35  * @param B_ Basis functions evaluated at quadrature points in column-major
36  * layout q1d x d1d.
37  * @param G_ Gradients of basis functions evaluated at quadrature points in
38  * column major layout q1d x d1d.
39  * @param W_ Jacobians of the element transformations at all quadrature points
40  * in column-major layout q1d x q1d x q1d x sdim x dim x ne.
41  * @param Jacobian_ Jacobians of the element transformations at all quadrature
42  * points in column-major layout q1d x q1d x q1d x sdim x dim x ne.
43  * @param detJ_ Precomputed determinants of the Jacobians q1d x q1d x q1d x ne.
44  * @param X_ Input vector d1d x d1d x d1d x vdim x ne.
45  * @param Y_ Output vector d1d x d1d x d1d x vdim x ne.
46  * @param material Material object.
47  */
48 template <int d1d, int q1d, typename material_type> static inline
49 void Apply3D(const int ne,
50  const Array<double> &B_,
51  const Array<double> &G_,
52  const Array<double> &W_,
53  const Vector &Jacobian_,
54  const Vector &detJ_,
55  const Vector &X_, Vector &Y_,
56  const material_type &material)
57 {
58  static constexpr int dim = 3;
60 
61  const tensor<double, q1d, d1d> &B =
62  make_tensor<q1d, d1d>([&](int i, int j) { return B_[i + q1d*j]; });
63 
64  const tensor<double, q1d, d1d> &G =
65  make_tensor<q1d, d1d>([&](int i, int j) { return G_[i + q1d*j]; });
66 
67  const auto qweights = Reshape(W_.Read(), q1d, q1d, q1d);
68  const auto J = Reshape(Jacobian_.Read(), q1d, q1d, q1d, dim, dim, ne);
69  const auto detJ = Reshape(detJ_.Read(), q1d, q1d, q1d, ne);
70  const auto U = Reshape(X_.Read(), d1d, d1d, d1d, dim, ne);
71  auto force = Reshape(Y_.ReadWrite(), d1d, d1d, d1d, dim, ne);
72 
73  mfem::forall_3D(ne, q1d, q1d, q1d, [=] MFEM_HOST_DEVICE (int e)
74  {
75  // shared memory placeholders for temporary contraction results
76  MFEM_SHARED tensor<double, 2, 3, q1d, q1d, q1d> smem;
77  // cauchy stress
78  MFEM_SHARED_3D_BLOCK_TENSOR(invJ_sigma_detJw, double, q1d, q1d, q1d, dim, dim);
79  // du/dxi
80  MFEM_SHARED_3D_BLOCK_TENSOR(dudxi, double, q1d, q1d, q1d, dim, dim);
81 
82  const auto U_el = Reshape(&U(0, 0, 0, 0, e), d1d, d1d, d1d, dim);
83  KernelHelpers::CalcGrad(B, G, smem, U_el, dudxi);
84 
85  MFEM_FOREACH_THREAD(qx, x, q1d)
86  {
87  MFEM_FOREACH_THREAD(qy, y, q1d)
88  {
89  MFEM_FOREACH_THREAD(qz, z, q1d)
90  {
91  auto invJqp = inv(make_tensor<dim, dim>(
92  [&](int i, int j) { return J(qx, qy, qz, i, j, e); }));
93 
94  auto dudx = dudxi(qz, qy, qx) * invJqp;
95 
96  // This represents the quadrature function operation in the
97  // Finite Element Operator Decomposition e.g. A_Q(x).
98  auto sigma = material.stress(dudx);
99 
100  invJ_sigma_detJw(qx, qy, qz) =
101  invJqp * sigma * detJ(qx, qy, qz, e) * qweights(qx, qy, qz);
102  }
103  }
104  }
105  MFEM_SYNC_THREAD;
106 
107  auto F = Reshape(&force(0, 0, 0, 0, e), d1d, d1d, d1d, dim);
108  KernelHelpers::CalcGradTSum(B, G, smem, invJ_sigma_detJw, F);
109  }); // for each element
110 }
111 
112 /**
113  * @brief Apply the 3D elasticity gradient kernel
114  *
115  * @tparam d1d Number of degrees of freedom in 1D.
116  * @tparam q1d Number of quadrature points in 1D.
117  * @tparam material_type Material type which adheres to the material type
118  * interface. See examples in the materials directory.
119  * @param ne Number of elements.
120  * @param B_ Basis functions evaluated at quadrature points in column-major
121  * layout q1d x d1d.
122  * @param G_ Gradients of basis functions evaluated at quadrature points in
123  * column major layout q1d x d1d.
124  * @param W_ Jacobians of the element transformations at all quadrature points
125  * in column-major layout q1d x q1d x q1d x sdim x dim x ne.
126  * @param Jacobian_ Jacobians of the element transformations at all quadrature
127  * points in column-major layout q1d x q1d x q1d x sdim x dim x ne.
128  * @param detJ_ Precomputed determinants of the Jacobians q1d x q1d x q1d x ne.
129  * @param dU_ Input vector d1d x d1d x d1d x vdim x ne. Represents the current
130  * iterate.
131  * @param dF_ Output vector d1d x d1d x d1d x vdim x ne.
132  * @param U_ Input vector d1d x d1d x d1d x vdim x ne. Represents the current
133  * state, e.g. it is fixed during Newton iterations.
134  * @param material Material object.
135  * @param use_cache_ Use cached dsigma/dudx. Useful for linear solves in between
136  * Newton iterations.
137  * @param recompute_cache_ Recompute and cache dsigma/dudx.
138  * @param dsigma_cache_ Vector to use as memory for the cache of dsigma/dudx.
139  * Size needed is ne x q1d x q1d x q1d x dim x dim x dim x dim.
140  */
141 template <int d1d, int q1d, typename material_type> static inline
142 void ApplyGradient3D(const int ne,
143  const Array<double> &B_, const Array<double> &G_,
144  const Array<double> &W_, const Vector &Jacobian_,
145  const Vector &detJ_, const Vector &dU_, Vector &dF_,
146  const Vector &U_, const material_type &material,
147  const bool use_cache_, const bool recompute_cache_,
148  Vector &dsigma_cache_)
149 {
150  static constexpr int dim = 3;
152 
153  const tensor<double, q1d, d1d> &B =
154  make_tensor<q1d, d1d>([&](int i, int j) { return B_[i + q1d*j]; });
155 
156  const tensor<double, q1d, d1d> &G =
157  make_tensor<q1d, d1d>([&](int i, int j) { return G_[i + q1d*j]; });
158 
159  const auto qweights = Reshape(W_.Read(), q1d, q1d, q1d);
160  const auto J = Reshape(Jacobian_.Read(), q1d, q1d, q1d, dim, dim, ne);
161  const auto detJ = Reshape(detJ_.Read(), q1d, q1d, q1d, ne);
162  const auto dU = Reshape(dU_.Read(), d1d, d1d, d1d, dim, ne);
163  auto force = Reshape(dF_.ReadWrite(), d1d, d1d, d1d, dim, ne);
164  const auto U = Reshape(U_.Read(), d1d, d1d, d1d, dim, ne);
165 
166  auto dsigma_cache = Reshape(dsigma_cache_.ReadWrite(), ne, q1d, q1d, q1d,
167  dim, dim, dim, dim);
168 
169  mfem::forall_3D(ne, q1d, q1d, q1d, [=] MFEM_HOST_DEVICE (int e)
170  {
171  // shared memory placeholders for temporary contraction results
172  MFEM_SHARED tensor<double, 2, 3, q1d, q1d, q1d> smem;
173  // cauchy stress
174  MFEM_SHARED tensor<double, q1d, q1d, q1d, dim, dim> invJ_dsigma_detJw;
175  // du/dxi, ddu/dxi
176  MFEM_SHARED_3D_BLOCK_TENSOR( dudxi, double, q1d, q1d, q1d, dim, dim);
177  MFEM_SHARED_3D_BLOCK_TENSOR(ddudxi, double, q1d, q1d, q1d, dim, dim);
178 
179  const auto U_el = Reshape(&U(0, 0, 0, 0, e), d1d, d1d, d1d, dim);
180  KernelHelpers::CalcGrad(B, G, smem, U_el, dudxi);
181 
182  const auto dU_el = Reshape(&dU(0, 0, 0, 0, e), d1d, d1d, d1d, dim);
183  KernelHelpers::CalcGrad(B, G, smem, dU_el, ddudxi);
184 
185  MFEM_FOREACH_THREAD(qx, x, q1d)
186  {
187  MFEM_FOREACH_THREAD(qy, y, q1d)
188  {
189  MFEM_FOREACH_THREAD(qz, z, q1d)
190  {
191  auto invJqp = inv(make_tensor<dim, dim>(
192  [&](int i, int j) { return J(qx, qy, qz, i, j, e); }));
193 
194  auto dudx = dudxi(qz, qy, qx) * invJqp;
195  auto ddudx = ddudxi(qz, qy, qx) * invJqp;
196 
197  if (use_cache_)
198  {
199  // C = dsigma/dudx
200  tensor<double, dim, dim, dim, dim> C;
201 
202  auto C_cache = make_tensor<dim, dim, dim, dim>(
203  [&](int i, int j, int k, int l) { return dsigma_cache(e, qx, qy, qz, i, j, k, l); });
204 
205  if (recompute_cache_)
206  {
207  C = material.gradient(dudx);
208  for (int i = 0; i < dim; i++)
209  {
210  for (int j = 0; j < dim; j++)
211  {
212  for (int k = 0; k < dim; k++)
213  {
214  for (int l = 0; l < dim; l++)
215  {
216  dsigma_cache(e, qx, qy, qz, i, j, k, l) = C(i, j, k, l);
217  }
218  }
219  }
220  }
221  C_cache = C;
222  }
223  else
224  {
225  C = C_cache;
226  }
227  invJ_dsigma_detJw(qx, qy, qz) =
228  invJqp * ddot(C, ddudx) * detJ(qx, qy, qz, e) * qweights(qx, qy, qz);
229  }
230  else
231  {
232  auto dsigma = material.action_of_gradient(dudx, ddudx);
233  invJ_dsigma_detJw(qx, qy, qz) =
234  invJqp * dsigma * detJ(qx, qy, qz, e) * qweights(qx, qy, qz);
235  }
236  }
237  }
238  }
239  MFEM_SYNC_THREAD;
240  auto F = Reshape(&force(0, 0, 0, 0, e), d1d, d1d, d1d, dim);
241  KernelHelpers::CalcGradTSum(B, G, smem, invJ_dsigma_detJw, F);
242  }); // for each element
243 }
244 
245 /**
246  * @brief Assemble the 3D elasticity gradient diagonal.
247  *
248  * @tparam d1d Number of degrees of freedom in 1D.
249  * @tparam q1d Number of quadrature points in 1D.
250  * @tparam material_type Material type which adheres to the material type
251  * interface. See examples in the materials directory.
252  * @param ne Number of elements.
253  * @param B_ Basis functions evaluated at quadrature points in column-major
254  * layout q1d x d1d.
255  * @param G_ Gradients of basis functions evaluated at quadrature points in
256  * column major layout q1d x d1d.
257  * @param W_ Jacobians of the element transformations at all quadrature points
258  * in column-major layout q1d x q1d x q1d x sdim x dim x ne.
259  * @param Jacobian_ Jacobians of the element transformations at all quadrature
260  * points in column-major layout q1d x q1d x q1d x sdim x dim x ne.
261  * @param detJ_ Precomputed determinants of the Jacobians q1d x q1d x q1d x ne.
262  * @param X_ Input vector d1d x d1d x d1d x vdim x ne. Represents the current
263  * state.
264  * @param Ke_diag_memory Vector representing the memory needed for the diagonal
265  * matrices. Size needed is d1d x d1d x d1d x dim x ne x dim.
266  * @param material Material object.
267  */
268 template <int d1d, int q1d, typename material_type> static inline
269 void AssembleGradientDiagonal3D(const int ne,
270  const Array<double> &B_,
271  const Array<double> &G_,
272  const Array<double> &W_,
273  const Vector &Jacobian_,
274  const Vector &detJ_,
275  const Vector &X_,
276  Vector &Ke_diag_memory,
277  const material_type &material)
278 {
279  static constexpr int dim = 3;
281 
282  const tensor<double, q1d, d1d> &B =
283  make_tensor<q1d, d1d>([&](int i, int j) { return B_[i + q1d*j]; });
284 
285  const tensor<double, q1d, d1d> &G =
286  make_tensor<q1d, d1d>([&](int i, int j) { return G_[i + q1d*j]; });
287 
288  const auto qweights = Reshape(W_.Read(), q1d, q1d, q1d);
289  const auto J = Reshape(Jacobian_.Read(), q1d, q1d, q1d, dim, dim, ne);
290  const auto detJ = Reshape(detJ_.Read(), q1d, q1d, q1d, ne);
291  const auto U = Reshape(X_.Read(), d1d, d1d, d1d, dim, ne);
292 
293  auto Ke_diag_m =
294  Reshape(Ke_diag_memory.ReadWrite(), d1d, d1d, d1d, dim, ne, dim);
295 
296  mfem::forall_3D(ne, q1d, q1d, q1d, [=] MFEM_HOST_DEVICE (int e)
297  {
298  // shared memory placeholders for temporary contraction results
299  MFEM_SHARED tensor<double, 2, 3, q1d, q1d, q1d> smem;
300 
301  // du/dxi
302  MFEM_SHARED_3D_BLOCK_TENSOR(dudxi, double, q1d, q1d, q1d, dim, dim);
303  MFEM_SHARED_3D_BLOCK_TENSOR(Ke_diag, double, d1d, d1d, d1d, dim, dim);
304 
305  const auto U_el = Reshape(&U(0, 0, 0, 0, e), d1d, d1d, d1d, dim);
306  KernelHelpers::CalcGrad(B, G, smem, U_el, dudxi);
307 
308  MFEM_FOREACH_THREAD(qx, x, q1d)
309  {
310  MFEM_FOREACH_THREAD(qy, y, q1d)
311  {
312  MFEM_FOREACH_THREAD(qz, z, q1d)
313  {
314  const auto invJqp = inv(make_tensor<dim, dim>([&](int i, int j)
315  {
316  return J(qx, qy, qz, i, j, e);
317  }));
318 
319  const auto dudx = dudxi(qz, qy, qx) * invJqp;
320 
321  const auto dsigma_ddudx = material.gradient(dudx);
322 
323  const double JxW = detJ(qx, qy, qz, e) * qweights(qx, qy, qz);
324  const auto dphidx = KernelHelpers::GradAllShapeFunctions(qx, qy, qz, B, G,
325  invJqp);
326 
327  for (int dx = 0; dx < d1d; dx++)
328  {
329  for (int dy = 0; dy < d1d; dy++)
330  {
331  for (int dz = 0; dz < d1d; dz++)
332  {
333  // phi_i * f(...) * phi_i
334  // dphidx_i dsigma_ddudx_ijkl dphidx_l
335  const auto phi_i = dphidx(dx, dy, dz);
336  const auto val = JxW * ( phi_i * dsigma_ddudx * phi_i);
337  for (int l = 0; l < dim; l++)
338  {
339  for (int m = 0; m < dim; m++)
340  {
341  AtomicAdd(Ke_diag(dx, dy, dz, l, m), val[l][m]);
342  } // m
343  } // l
344  } // dz
345  } // dy
346  } // dx
347  } // qz
348  } // qy
349  } // qx
350  MFEM_SYNC_THREAD;
351 
352  MFEM_FOREACH_THREAD(i, x, d1d)
353  {
354  MFEM_FOREACH_THREAD(j, y, d1d)
355  {
356  MFEM_FOREACH_THREAD(k, z, d1d)
357  {
358  for (int l = 0; l < dim; l++)
359  {
360  for (int m = 0; m < dim; m++)
361  {
362  Ke_diag_m(i, j, k, l, e, m) = Ke_diag(i, j, k, l, m);
363  }
364  }
365  }
366  }
367  }
368  }); // for each element
369 }
370 
371 } // namespace ElasticityKernels
372 
373 } // namespace mfem
374 
375 #endif
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
Definition: array.hpp:307
void forall_3D(int N, int X, int Y, int Z, lambda &&body)
Definition: forall.hpp:763
MFEM_HOST_DEVICE T AtomicAdd(T &add, const T val)
Definition: backends.hpp:84
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:453
void CheckMemoryRestriction(int d1d, int q1d)
Runtime check for memory restrictions that are determined at compile time.
int material(Vector &x, Vector &xmin, Vector &xmax)
Definition: shaper.cpp:53
virtual double * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:469
int dim
Definition: ex24.cpp:53
Vector data type.
Definition: vector.hpp:58
double sigma(const Vector &x)
Definition: maxwell.cpp:102
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.
Definition: dtensor.hpp:131