LCOV - code coverage report
Current view: top level - tensors - mandel_notation.cxx (source / functions) Coverage Total Hit
Test: coverage.info Lines: 100.0 % 90 90
Test Date: 2025-06-29 01:25:44 Functions: 100.0 % 22 22

            Line data    Source code
       1              : // Copyright 2024, UChicago Argonne, LLC
       2              : // All Rights Reserved
       3              : // Software Name: NEML2 -- the New Engineering material Model Library, version 2
       4              : // By: Argonne National Laboratory
       5              : // OPEN SOURCE LICENSE (MIT)
       6              : //
       7              : // Permission is hereby granted, free of charge, to any person obtaining a copy
       8              : // of this software and associated documentation files (the "Software"), to deal
       9              : // in the Software without restriction, including without limitation the rights
      10              : // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      11              : // copies of the Software, and to permit persons to whom the Software is
      12              : // furnished to do so, subject to the following conditions:
      13              : //
      14              : // The above copyright notice and this permission notice shall be included in
      15              : // all copies or substantial portions of the Software.
      16              : //
      17              : // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      18              : // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      19              : // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      20              : // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      21              : // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      22              : // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
      23              : // THE SOFTWARE.
      24              : 
      25              : #include "neml2/tensors/mandel_notation.h"
      26              : #include "neml2/tensors/TensorCache.h"
      27              : #include "neml2/tensors/shape_utils.h"
      28              : 
      29              : namespace neml2
      30              : {
      31              : const Tensor &
      32          273 : full_to_mandel_map(const TensorOptions & opt)
      33              : {
      34            1 :   auto maker = [](const TensorOptions & opt) -> Tensor
      35            7 :   { return Tensor::create({0, 4, 8, 5, 2, 1}, opt); };
      36          273 :   thread_local TensorCache _ftmm(maker);
      37          546 :   return _ftmm(opt);
      38              : }
      39              : 
      40              : const Tensor &
      41          206 : mandel_to_full_map(const TensorOptions & opt)
      42              : {
      43            1 :   auto maker = [](const TensorOptions & opt) -> Tensor
      44           10 :   { return Tensor::create({0, 5, 4, 5, 1, 3, 4, 3, 2}, opt); };
      45          206 :   thread_local TensorCache _mtfm(maker);
      46          412 :   return _mtfm(opt);
      47              : }
      48              : 
      49              : const Tensor &
      50          273 : full_to_mandel_factor(const TensorOptions & opt)
      51              : {
      52            1 :   auto maker = [](const TensorOptions & opt) -> Tensor
      53            7 :   { return Tensor::create({1.0, 1.0, 1.0, sqrt2, sqrt2, sqrt2}, opt); };
      54          273 :   thread_local TensorCache _ftmf(maker);
      55          546 :   return _ftmf(opt);
      56              : }
      57              : 
      58              : const Tensor &
      59          206 : mandel_to_full_factor(const TensorOptions & opt)
      60              : {
      61            1 :   auto maker = [](const TensorOptions & opt) -> Tensor
      62              :   {
      63           10 :     return Tensor::create(
      64           11 :         {1.0, invsqrt2, invsqrt2, invsqrt2, 1.0, invsqrt2, invsqrt2, invsqrt2, 1.0}, opt);
      65            1 :   };
      66          206 :   thread_local TensorCache _mtff(maker);
      67          412 :   return _mtff(opt);
      68              : }
      69              : 
      70              : const Tensor &
      71           49 : full_to_skew_map(const TensorOptions & opt)
      72              : {
      73            4 :   auto maker = [](const TensorOptions & opt) -> Tensor { return Tensor::create({7, 2, 3}, opt); };
      74           49 :   thread_local TensorCache _ftsm(maker);
      75           98 :   return _ftsm(opt);
      76              : }
      77              : 
      78              : const Tensor &
      79           33 : skew_to_full_map(const TensorOptions & opt)
      80              : {
      81            1 :   auto maker = [](const TensorOptions & opt) -> Tensor
      82           10 :   { return Tensor::create({0, 2, 1, 2, 0, 0, 1, 0, 0}, opt); };
      83           33 :   thread_local TensorCache _stfm(maker);
      84           66 :   return _stfm(opt);
      85              : }
      86              : 
      87              : const Tensor &
      88           49 : full_to_skew_factor(const TensorOptions & opt)
      89              : {
      90            1 :   auto maker = [](const TensorOptions & opt) -> Tensor
      91            4 :   { return Tensor::create({1.0, 1.0, 1.0}, opt); };
      92           49 :   thread_local TensorCache _ftsf(maker);
      93           98 :   return _ftsf(opt);
      94              : }
      95              : 
      96              : const Tensor &
      97           33 : skew_to_full_factor(const TensorOptions & opt)
      98              : {
      99            1 :   auto maker = [](const TensorOptions & opt) -> Tensor
     100           10 :   { return Tensor::create({0.0, -1.0, 1.0, 1.0, 0.0, -1.0, -1.0, 1.0, 0.0}, opt); };
     101           33 :   thread_local TensorCache _stff(maker);
     102           66 :   return _stff(opt);
     103              : }
     104              : 
     105              : Tensor
     106          322 : full_to_reduced(const Tensor & full, const Tensor & rmap, const Tensor & rfactors, Size dim)
     107              : {
     108          322 :   const auto & batch_shape = full.batch_sizes();
     109          322 :   auto batch_dim = full.batch_dim();
     110          322 :   auto trailing_dim = full.base_dim() - dim - 2; // 2 comes from the reduced axes (3,3)
     111          322 :   auto starting_shape = full.base_sizes().slice(0, dim);
     112          322 :   auto trailing_shape = full.base_sizes().slice(dim + 2);
     113              : 
     114          322 :   indexing::TensorIndices net(dim, indexing::None);
     115          322 :   net.push_back(indexing::Ellipsis);
     116          322 :   net.insert(net.end(), trailing_dim, indexing::None);
     117          322 :   auto map_shape = utils::add_shapes(starting_shape, rmap.size(0), trailing_shape);
     118          322 :   auto map = rmap.index(net).expand(map_shape);
     119          322 :   auto factor = rfactors.index(net);
     120              : 
     121          322 :   auto batched_map = Tensor(map, 0).batch_expand_as(full);
     122          644 :   auto reduced = at::gather(full.base_reshape(utils::add_shapes(starting_shape, 9, trailing_shape)),
     123              :                             batch_dim + dim,
     124          644 :                             batched_map);
     125              : 
     126          644 :   return Tensor(factor, 0) * Tensor(reduced, batch_shape);
     127          322 : }
     128              : 
     129              : Tensor
     130          239 : reduced_to_full(const Tensor & reduced, const Tensor & rmap, const Tensor & rfactors, Size dim)
     131              : {
     132          239 :   const auto & batch_shape = reduced.batch_sizes();
     133          239 :   auto batch_dim = reduced.batch_dim();
     134          239 :   auto trailing_dim = reduced.base_dim() - dim - 1; // There's only 1 axis to unsqueeze
     135          239 :   auto starting_shape = reduced.base_sizes().slice(0, dim);
     136          239 :   auto trailing_shape = reduced.base_sizes().slice(dim + 1);
     137              : 
     138          239 :   indexing::TensorIndices net(dim, indexing::None);
     139          239 :   net.push_back(indexing::Ellipsis);
     140          239 :   net.insert(net.end(), trailing_dim, indexing::None);
     141          239 :   auto map_shape = utils::add_shapes(starting_shape, rmap.size(0), trailing_shape);
     142          239 :   auto map = rmap.index(net).expand(map_shape);
     143          239 :   auto factor = rfactors.index(net);
     144              : 
     145          239 :   auto batched_map = Tensor(map, 0).batch_expand_as(reduced);
     146          239 :   auto full = Tensor(factor * at::gather(reduced, batch_dim + dim, batched_map), batch_shape);
     147              : 
     148          478 :   return full.base_reshape(utils::add_shapes(starting_shape, 3, 3, trailing_shape));
     149          239 : }
     150              : 
     151              : Tensor
     152          273 : full_to_mandel(const Tensor & full, Size dim)
     153              : {
     154              :   return full_to_reduced(full,
     155          273 :                          full_to_mandel_map(full.options().dtype(get_default_integer_dtype())),
     156          546 :                          full_to_mandel_factor(full.options()),
     157          819 :                          dim);
     158              : }
     159              : 
     160              : Tensor
     161          206 : mandel_to_full(const Tensor & mandel, Size dim)
     162              : {
     163              :   return reduced_to_full(mandel,
     164          206 :                          mandel_to_full_map(mandel.options().dtype(get_default_integer_dtype())),
     165          412 :                          mandel_to_full_factor(mandel.options()),
     166          618 :                          dim);
     167              : }
     168              : 
     169              : Tensor
     170           49 : full_to_skew(const Tensor & full, Size dim)
     171              : {
     172              :   return full_to_reduced(full,
     173           49 :                          full_to_skew_map(full.options().dtype(get_default_integer_dtype())),
     174           98 :                          full_to_skew_factor(full.options()),
     175          147 :                          dim);
     176              : }
     177              : 
     178              : Tensor
     179           33 : skew_to_full(const Tensor & skew, Size dim)
     180              : {
     181              :   return reduced_to_full(skew,
     182           33 :                          skew_to_full_map(skew.options().dtype(get_default_integer_dtype())),
     183           66 :                          skew_to_full_factor(skew.options()),
     184           99 :                          dim);
     185              : }
     186              : } // namespace neml2
        

Generated by: LCOV version 2.0-1