LCOV - code coverage report
Current view: top level - models - Assembler.cxx (source / functions) Coverage Total Hit
Test: coverage.info Lines: 98.2 % 110 108
Test Date: 2025-06-29 01:25:44 Functions: 100.0 % 6 6

            Line data    Source code
       1              : // Copyright 2024, UChicago Argonne, LLC
       2              : // All Rights Reserved
       3              : // Software Name: NEML2 -- the New Engineering material Model Library, version 2
       4              : // By: Argonne National Laboratory
       5              : // OPEN SOURCE LICENSE (MIT)
       6              : //
       7              : // Permission is hereby granted, free of charge, to any person obtaining a copy
       8              : // of this software and associated documentation files (the "Software"), to deal
       9              : // in the Software without restriction, including without limitation the rights
      10              : // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      11              : // copies of the Software, and to permit persons to whom the Software is
      12              : // furnished to do so, subject to the following conditions:
      13              : //
      14              : // The above copyright notice and this permission notice shall be included in
      15              : // all copies or substantial portions of the Software.
      16              : //
      17              : // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      18              : // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      19              : // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      20              : // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      21              : // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      22              : // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
      23              : // THE SOFTWARE.
      24              : 
      25              : #include "neml2/models/Assembler.h"
      26              : #include "neml2/tensors/functions/cat.h"
      27              : #include "neml2/misc/assertions.h"
      28              : 
      29              : namespace neml2
      30              : {
      31              : Tensor
      32         1755 : VectorAssembler::assemble_by_variable(const ValueMap & vals_dict) const
      33              : {
      34         1755 :   const auto vars = _axis.variable_names();
      35              : 
      36              :   // We need to know the dtype and device so that undefined tensors can be filled with zeros
      37         1755 :   auto options = TensorOptions();
      38         1755 :   bool options_defined = false;
      39              : 
      40              :   // Look up variable values from the given dictionary.
      41              :   // If a variable is not found, tensor at that position remains undefined, and all undefined tensor
      42              :   // will later be filled with zeros.
      43         1755 :   std::vector<Tensor> vals(vars.size());
      44         6844 :   for (std::size_t i = 0; i < vars.size(); ++i)
      45              :   {
      46         5089 :     const auto val = vals_dict.find(_axis.qualify(vars[i]));
      47         5089 :     if (val != vals_dict.end())
      48              :     {
      49         5086 :       vals[i] = val->second.base_flatten();
      50         5086 :       neml_assert_dbg(vals[i].base_size(0) == _axis.variable_sizes()[i],
      51              :                       "Invalid size for variable ",
      52         5086 :                       vars[i],
      53              :                       ". Expected ",
      54         5086 :                       _axis.variable_sizes()[i],
      55              :                       ", got ",
      56         5086 :                       vals[i].base_size(0));
      57         5086 :       if (!options_defined)
      58              :       {
      59         1755 :         options = options.dtype(vals[i].dtype()).device(vals[i].device());
      60         1755 :         options_defined = true;
      61              :       }
      62              :     }
      63              :   }
      64              : 
      65         1755 :   neml_assert(options_defined, "No variable values found for assembly");
      66              : 
      67              :   // Expand defined tensors with the broadcast batch shape and fill undefined tensors with zeros.
      68         1755 :   const auto batch_sizes = utils::broadcast_batch_sizes(vals);
      69         6844 :   for (std::size_t i = 0; i < vars.size(); ++i)
      70         5089 :     if (vals[i].defined())
      71         5086 :       vals[i] = vals[i].batch_expand(batch_sizes);
      72              :     else
      73            3 :       vals[i] = Tensor::zeros(batch_sizes, _axis.variable_sizes()[i], options);
      74              : 
      75         3510 :   return base_cat(vals, -1);
      76         1755 : }
      77              : 
      78              : ValueMap
      79         1590 : VectorAssembler::split_by_variable(const Tensor & tensor) const
      80              : {
      81         1590 :   ValueMap ret;
      82              : 
      83         1590 :   const auto keys = _axis.variable_names();
      84         1590 :   const auto vals = tensor.split(_axis.variable_sizes(), -1);
      85              : 
      86         6231 :   for (std::size_t i = 0; i < keys.size(); ++i)
      87         4641 :     ret[_axis.qualify(keys[i])] = Tensor(vals[i], tensor.batch_sizes());
      88              : 
      89         3180 :   return ret;
      90         1590 : }
      91              : 
      92              : std::map<SubaxisName, Tensor>
      93            1 : VectorAssembler::split_by_subaxis(const Tensor & tensor) const
      94              : {
      95            1 :   std::map<SubaxisName, Tensor> ret;
      96              : 
      97            1 :   const auto keys = _axis.subaxis_names();
      98            1 :   const auto vals = tensor.split(_axis.subaxis_sizes(), -1);
      99              : 
     100            3 :   for (std::size_t i = 0; i < keys.size(); ++i)
     101            2 :     ret[_axis.qualify(keys[i])] = Tensor(vals[i], tensor.batch_sizes());
     102              : 
     103            2 :   return ret;
     104            1 : }
     105              : 
     106              : Tensor
     107         1247 : MatrixAssembler::assemble_by_variable(const DerivMap & vals_dict) const
     108              : {
     109         1247 :   const auto yvars = _yaxis.variable_names();
     110         1247 :   const auto xvars = _xaxis.variable_names();
     111              : 
     112              :   // We need to know the dtype and device so that undefined tensors can be filled with zeros
     113         1247 :   auto options = TensorOptions();
     114         1247 :   bool options_defined = false;
     115              : 
     116              :   // Assemble columns of each row
     117         1247 :   std::vector<Tensor> rows(yvars.size());
     118         4905 :   for (std::size_t i = 0; i < yvars.size(); ++i)
     119              :   {
     120         3658 :     const auto vals_row = vals_dict.find(_yaxis.qualify(yvars[i]));
     121         3658 :     if (vals_row == vals_dict.end())
     122            0 :       continue;
     123              : 
     124              :     // Look up variable values from the given dictionary.
     125              :     // If a variable is not found, tensor at that position remains undefined, and all undefined
     126              :     // tensor will later be filled with zeros.
     127         3658 :     std::vector<Tensor> vals(xvars.size());
     128        14831 :     for (std::size_t j = 0; j < xvars.size(); ++j)
     129              :     {
     130        11173 :       const auto val = vals_row->second.find(_xaxis.qualify(xvars[j]));
     131        11173 :       if (val != vals_row->second.end())
     132              :       {
     133        11161 :         vals[j] = val->second;
     134        11161 :         neml_assert_dbg(vals[j].base_dim() == 2,
     135              :                         "During matrix assembly, found a tensor associated with variables ",
     136        11161 :                         yvars[i],
     137              :                         "/",
     138        11161 :                         xvars[j],
     139              :                         " with base dimension ",
     140        11161 :                         vals[j].base_dim(),
     141              :                         ". Expected base dimension of 2.");
     142        22322 :         neml_assert_dbg(vals[j].base_size(0) == _yaxis.variable_sizes()[i] &&
     143        11161 :                             vals[j].base_size(1) == _xaxis.variable_sizes()[j],
     144              :                         "Invalid tensor shape associated with variables ",
     145        11161 :                         yvars[i],
     146              :                         "/",
     147        11161 :                         xvars[j],
     148              :                         ". Expected base shape ",
     149        22322 :                         TensorShape{_yaxis.variable_sizes()[i], _xaxis.variable_sizes()[j]},
     150              :                         ", got ",
     151        11161 :                         vals[j].base_sizes());
     152        11161 :         if (!options_defined)
     153              :         {
     154         1247 :           options = options.dtype(vals[j].dtype()).device(vals[j].device());
     155         1247 :           options_defined = true;
     156              :         }
     157              :       }
     158              :     }
     159              : 
     160         3658 :     neml_assert(options_defined, "No variable values found for assembly");
     161              : 
     162              :     // Expand defined tensors with the broadcast batch shape and fill undefined tensors with zeros.
     163         3658 :     const auto batch_sizes = utils::broadcast_batch_sizes(vals);
     164        14831 :     for (std::size_t j = 0; j < xvars.size(); ++j)
     165        11173 :       if (vals[j].defined())
     166        11161 :         vals[j] = vals[j].batch_expand(batch_sizes);
     167              :       else
     168           24 :         vals[j] = Tensor::zeros(
     169           24 :             batch_sizes, {_yaxis.variable_sizes()[i], _xaxis.variable_sizes()[j]}, options);
     170              : 
     171         3658 :     rows[i] = base_cat(vals, -1);
     172         3658 :   }
     173              : 
     174              :   // Expand defined tensors with the broadcast batch shape and fill undefined tensors with zeros.
     175         1247 :   const auto batch_sizes = utils::broadcast_batch_sizes(rows);
     176         4905 :   for (std::size_t i = 0; i < yvars.size(); ++i)
     177         3658 :     if (rows[i].defined())
     178         3658 :       rows[i] = rows[i].batch_expand(batch_sizes);
     179              :     else
     180            0 :       rows[i] = Tensor::zeros(batch_sizes, {_yaxis.variable_sizes()[i], _xaxis.size()}, options);
     181              : 
     182         2494 :   return base_cat(rows, -2);
     183         1247 : }
     184              : 
     185              : DerivMap
     186            9 : MatrixAssembler::split_by_variable(const Tensor & tensor) const
     187              : {
     188            9 :   DerivMap ret;
     189              : 
     190            9 :   const auto yvars = _yaxis.variable_names();
     191            9 :   const auto xvars = _xaxis.variable_names();
     192              : 
     193            9 :   const auto rows = tensor.split(_yaxis.variable_sizes(), -2);
     194           30 :   for (std::size_t i = 0; i < yvars.size(); ++i)
     195              :   {
     196           21 :     const auto vals = rows[i].split(_xaxis.variable_sizes(), -1);
     197           60 :     for (std::size_t j = 0; j < xvars.size(); ++j)
     198           78 :       ret[_yaxis.qualify(yvars[i])][_xaxis.qualify(xvars[j])] =
     199          117 :           Tensor(vals[j], tensor.batch_sizes());
     200           21 :   }
     201              : 
     202           18 :   return ret;
     203            9 : }
     204              : 
     205              : std::map<SubaxisName, std::map<SubaxisName, Tensor>>
     206            3 : MatrixAssembler::split_by_subaxis(const Tensor & tensor) const
     207              : {
     208            3 :   std::map<SubaxisName, std::map<SubaxisName, Tensor>> ret;
     209              : 
     210            3 :   const auto ynames = _yaxis.subaxis_names();
     211            3 :   const auto xnames = _xaxis.subaxis_names();
     212              : 
     213            3 :   const auto rows = tensor.split(_yaxis.subaxis_sizes(), -2);
     214            6 :   for (std::size_t i = 0; i < ynames.size(); ++i)
     215              :   {
     216            3 :     const auto vals = rows[i].split(_xaxis.subaxis_sizes(), -1);
     217           15 :     for (std::size_t j = 0; j < xnames.size(); ++j)
     218           24 :       ret[_yaxis.qualify(ynames[i])][_xaxis.qualify(xnames[j])] =
     219           36 :           Tensor(vals[j], tensor.batch_sizes());
     220            3 :   }
     221              : 
     222            6 :   return ret;
     223            3 : }
     224              : } // namespace neml2
        

Generated by: LCOV version 2.0-1