Line data Source code
1 : // Copyright 2024, UChicago Argonne, LLC
2 : // All Rights Reserved
3 : // Software Name: NEML2 -- the New Engineering material Model Library, version 2
4 : // By: Argonne National Laboratory
5 : // OPEN SOURCE LICENSE (MIT)
6 : //
7 : // Permission is hereby granted, free of charge, to any person obtaining a copy
8 : // of this software and associated documentation files (the "Software"), to deal
9 : // in the Software without restriction, including without limitation the rights
10 : // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 : // copies of the Software, and to permit persons to whom the Software is
12 : // furnished to do so, subject to the following conditions:
13 : //
14 : // The above copyright notice and this permission notice shall be included in
15 : // all copies or substantial portions of the Software.
16 : //
17 : // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 : // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 : // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 : // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 : // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 : // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 : // THE SOFTWARE.
24 :
25 : #include "neml2/tensors/mandel_notation.h"
26 : #include "neml2/tensors/TensorCache.h"
27 : #include "neml2/tensors/shape_utils.h"
28 :
29 : namespace neml2
30 : {
31 : const Tensor &
32 365 : full_to_mandel_map(const TensorOptions & opt)
33 : {
34 1 : auto maker = [](const TensorOptions & opt) -> Tensor
35 7 : { return Tensor::create({0, 4, 8, 5, 2, 1}, opt); };
36 365 : thread_local TensorCache _ftmm(maker);
37 730 : return _ftmm(opt);
38 : }
39 :
40 : const Tensor &
41 222 : mandel_to_full_map(const TensorOptions & opt)
42 : {
43 1 : auto maker = [](const TensorOptions & opt) -> Tensor
44 10 : { return Tensor::create({0, 5, 4, 5, 1, 3, 4, 3, 2}, opt); };
45 222 : thread_local TensorCache _mtfm(maker);
46 444 : return _mtfm(opt);
47 : }
48 :
49 : const Tensor &
50 365 : full_to_mandel_factor(const TensorOptions & opt)
51 : {
52 1 : auto maker = [](const TensorOptions & opt) -> Tensor
53 7 : { return Tensor::create({1.0, 1.0, 1.0, sqrt2, sqrt2, sqrt2}, opt); };
54 365 : thread_local TensorCache _ftmf(maker);
55 730 : return _ftmf(opt);
56 : }
57 :
58 : const Tensor &
59 222 : mandel_to_full_factor(const TensorOptions & opt)
60 : {
61 1 : auto maker = [](const TensorOptions & opt) -> Tensor
62 : {
63 10 : return Tensor::create(
64 11 : {1.0, invsqrt2, invsqrt2, invsqrt2, 1.0, invsqrt2, invsqrt2, invsqrt2, 1.0}, opt);
65 1 : };
66 222 : thread_local TensorCache _mtff(maker);
67 444 : return _mtff(opt);
68 : }
69 :
70 : const Tensor &
71 51 : full_to_skew_map(const TensorOptions & opt)
72 : {
73 4 : auto maker = [](const TensorOptions & opt) -> Tensor { return Tensor::create({7, 2, 3}, opt); };
74 51 : thread_local TensorCache _ftsm(maker);
75 102 : return _ftsm(opt);
76 : }
77 :
78 : const Tensor &
79 33 : skew_to_full_map(const TensorOptions & opt)
80 : {
81 1 : auto maker = [](const TensorOptions & opt) -> Tensor
82 10 : { return Tensor::create({0, 2, 1, 2, 0, 0, 1, 0, 0}, opt); };
83 33 : thread_local TensorCache _stfm(maker);
84 66 : return _stfm(opt);
85 : }
86 :
87 : const Tensor &
88 51 : full_to_skew_factor(const TensorOptions & opt)
89 : {
90 1 : auto maker = [](const TensorOptions & opt) -> Tensor
91 4 : { return Tensor::create({1.0, 1.0, 1.0}, opt); };
92 51 : thread_local TensorCache _ftsf(maker);
93 102 : return _ftsf(opt);
94 : }
95 :
96 : const Tensor &
97 33 : skew_to_full_factor(const TensorOptions & opt)
98 : {
99 1 : auto maker = [](const TensorOptions & opt) -> Tensor
100 10 : { return Tensor::create({0.0, -1.0, 1.0, 1.0, 0.0, -1.0, -1.0, 1.0, 0.0}, opt); };
101 33 : thread_local TensorCache _stff(maker);
102 66 : return _stff(opt);
103 : }
104 :
105 : Tensor
106 416 : full_to_reduced(const Tensor & full, const Tensor & rmap, const Tensor & rfactors, Size dim)
107 : {
108 416 : const auto & batch_shape = full.batch_sizes();
109 416 : auto batch_dim = full.batch_dim();
110 416 : auto trailing_dim = full.base_dim() - dim - 2; // 2 comes from the reduced axes (3,3)
111 416 : auto starting_shape = full.base_sizes().slice(0, dim);
112 416 : auto trailing_shape = full.base_sizes().slice(dim + 2);
113 :
114 416 : indexing::TensorIndices net(dim, indexing::None);
115 416 : net.push_back(indexing::Ellipsis);
116 416 : net.insert(net.end(), trailing_dim, indexing::None);
117 416 : auto map_shape = utils::add_shapes(starting_shape, rmap.size(0), trailing_shape);
118 416 : auto map = rmap.index(net).expand(map_shape);
119 416 : auto factor = rfactors.index(net);
120 :
121 416 : auto batched_map = Tensor(map, 0).batch_expand_as(full);
122 832 : auto reduced = at::gather(full.base_reshape(utils::add_shapes(starting_shape, 9, trailing_shape)),
123 : batch_dim + dim,
124 832 : batched_map);
125 :
126 832 : return Tensor(factor, 0) * Tensor(reduced, batch_shape);
127 416 : }
128 :
129 : Tensor
130 255 : reduced_to_full(const Tensor & reduced, const Tensor & rmap, const Tensor & rfactors, Size dim)
131 : {
132 255 : const auto & batch_shape = reduced.batch_sizes();
133 255 : auto batch_dim = reduced.batch_dim();
134 255 : auto trailing_dim = reduced.base_dim() - dim - 1; // There's only 1 axis to unsqueeze
135 255 : auto starting_shape = reduced.base_sizes().slice(0, dim);
136 255 : auto trailing_shape = reduced.base_sizes().slice(dim + 1);
137 :
138 255 : indexing::TensorIndices net(dim, indexing::None);
139 255 : net.push_back(indexing::Ellipsis);
140 255 : net.insert(net.end(), trailing_dim, indexing::None);
141 255 : auto map_shape = utils::add_shapes(starting_shape, rmap.size(0), trailing_shape);
142 255 : auto map = rmap.index(net).expand(map_shape);
143 255 : auto factor = rfactors.index(net);
144 :
145 255 : auto batched_map = Tensor(map, 0).batch_expand_as(reduced);
146 255 : auto full = Tensor(factor * at::gather(reduced, batch_dim + dim, batched_map), batch_shape);
147 :
148 510 : return full.base_reshape(utils::add_shapes(starting_shape, 3, 3, trailing_shape));
149 255 : }
150 :
151 : Tensor
152 365 : full_to_mandel(const Tensor & full, Size dim)
153 : {
154 : return full_to_reduced(full,
155 365 : full_to_mandel_map(full.options().dtype(get_default_integer_dtype())),
156 730 : full_to_mandel_factor(full.options()),
157 1095 : dim);
158 : }
159 :
160 : Tensor
161 222 : mandel_to_full(const Tensor & mandel, Size dim)
162 : {
163 : return reduced_to_full(mandel,
164 222 : mandel_to_full_map(mandel.options().dtype(get_default_integer_dtype())),
165 444 : mandel_to_full_factor(mandel.options()),
166 666 : dim);
167 : }
168 :
169 : Tensor
170 51 : full_to_skew(const Tensor & full, Size dim)
171 : {
172 : return full_to_reduced(full,
173 51 : full_to_skew_map(full.options().dtype(get_default_integer_dtype())),
174 102 : full_to_skew_factor(full.options()),
175 153 : dim);
176 : }
177 :
178 : Tensor
179 33 : skew_to_full(const Tensor & skew, Size dim)
180 : {
181 : return reduced_to_full(skew,
182 33 : skew_to_full_map(skew.options().dtype(get_default_integer_dtype())),
183 66 : skew_to_full_factor(skew.options()),
184 99 : dim);
185 : }
186 : } // namespace neml2
|