92struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
94 typedef TensorAssignOp<LeftArgType, RightArgType> XprType;
95 typedef typename XprType::Index Index;
96 typedef typename XprType::Scalar Scalar;
97 typedef typename XprType::CoeffReturnType CoeffReturnType;
98 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
99 typedef typename TensorEvaluator<RightArgType, Device>::Dimensions Dimensions;
100 typedef StorageMemory<CoeffReturnType, Device> Storage;
101 typedef typename Storage::Type EvaluatorPointerType;
103 static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
104 static const int NumDims = XprType::NumDims;
107 IsAligned = int(TensorEvaluator<LeftArgType, Device>::IsAligned) &
108 int(TensorEvaluator<RightArgType, Device>::IsAligned),
109 PacketAccess = int(TensorEvaluator<LeftArgType, Device>::PacketAccess) &
110 int(TensorEvaluator<RightArgType, Device>::PacketAccess),
111 BlockAccess = int(TensorEvaluator<LeftArgType, Device>::BlockAccess) &
112 int(TensorEvaluator<RightArgType, Device>::BlockAccess),
113 PreferBlockAccess = int(TensorEvaluator<LeftArgType, Device>::PreferBlockAccess) |
114 int(TensorEvaluator<RightArgType, Device>::PreferBlockAccess),
115 Layout = TensorEvaluator<LeftArgType, Device>::Layout,
116 RawAccess = TensorEvaluator<LeftArgType, Device>::RawAccess
120 typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
121 typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
123 typedef typename TensorEvaluator<const RightArgType, Device>::TensorBlock
127 TensorEvaluator(
const XprType& op,
const Device& device) :
128 m_leftImpl(op.lhsExpression(), device),
129 m_rightImpl(op.rhsExpression(), device)
132 (
static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
133 static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)),
134 YOU_MADE_A_PROGRAMMING_MISTAKE);
137 EIGEN_DEVICE_FUNC
const Dimensions& dimensions()
const
142 return m_rightImpl.dimensions();
145 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType) {
146 eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions()));
147 m_leftImpl.evalSubExprsIfNeeded(NULL);
152 return m_rightImpl.evalSubExprsIfNeeded(m_leftImpl.data());
155#ifdef EIGEN_USE_THREADS
156 template <
typename EvalSubExprsCallback>
157 EIGEN_STRONG_INLINE
void evalSubExprsIfNeededAsync(
158 EvaluatorPointerType, EvalSubExprsCallback done) {
159 m_leftImpl.evalSubExprsIfNeededAsync(
nullptr, [
this, done](
bool) {
160 m_rightImpl.evalSubExprsIfNeededAsync(
161 m_leftImpl.data(), [done](
bool need_assign) { done(need_assign); });
166 EIGEN_STRONG_INLINE
void cleanup() {
167 m_leftImpl.cleanup();
168 m_rightImpl.cleanup();
171 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void evalScalar(Index i) {
172 m_leftImpl.coeffRef(i) = m_rightImpl.coeff(i);
174 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void evalPacket(Index i) {
176 const int LhsStoreMode = TensorEvaluator<LeftArgType, Device>::IsAligned ?
Aligned :
Unaligned;
177 const int RhsLoadMode = TensorEvaluator<RightArgType, Device>::IsAligned ?
Aligned :
Unaligned;
178 m_leftImpl.template writePacket<LhsStoreMode>(i, m_rightImpl.template packet<RhsLoadMode>(i));
180 EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index)
const
182 return m_leftImpl.coeff(index);
184 template<
int LoadMode>
185 EIGEN_DEVICE_FUNC PacketReturnType packet(Index index)
const
187 return m_leftImpl.template packet<LoadMode>(index);
190 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
191 costPerCoeff(
bool vectorized)
const {
195 TensorOpCost left = m_leftImpl.costPerCoeff(vectorized);
196 return m_rightImpl.costPerCoeff(vectorized) +
198 numext::maxi(0.0, left.bytes_loaded() -
sizeof(CoeffReturnType)),
199 left.bytes_stored(), left.compute_cycles()) +
200 TensorOpCost(0,
sizeof(CoeffReturnType), 0, vectorized, PacketSize);
203 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
204 internal::TensorBlockResourceRequirements getResourceRequirements()
const {
205 return internal::TensorBlockResourceRequirements::merge(
206 m_leftImpl.getResourceRequirements(),
207 m_rightImpl.getResourceRequirements());
210 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void evalBlock(
211 TensorBlockDesc& desc, TensorBlockScratch& scratch) {
212 if (TensorEvaluator<LeftArgType, Device>::RawAccess &&
213 m_leftImpl.data() != NULL) {
216 desc.template AddDestinationBuffer<Layout>(
217 m_leftImpl.data() + desc.offset(),
218 internal::strides<Layout>(m_leftImpl.dimensions()));
221 RightTensorBlock block = m_rightImpl.block(desc, scratch,
true);
223 if (block.kind() != internal::TensorBlockKind::kMaterializedInOutput) {
224 m_leftImpl.writeBlock(desc, block);
231 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void bind(cl::sycl::handler &cgh)
const {
232 m_leftImpl.bind(cgh);
233 m_rightImpl.bind(cgh);
237 EIGEN_DEVICE_FUNC EvaluatorPointerType data()
const {
return m_leftImpl.data(); }
240 TensorEvaluator<LeftArgType, Device> m_leftImpl;
241 TensorEvaluator<RightArgType, Device> m_rightImpl;