10#include "mlir/Dialect/Affine/IR/AffineOps.h"
16 OpBuilder &builder, Location loc) {
17 auto inputs = op.getInputs();
18 auto outputs = op.getOutputs();
19 auto resultTensors = op.getResultTensors();
20 auto indexingMaps = op.getIndexingMapsAttr();
21 auto iteratorTypes = op.getIteratorTypesAttr();
23 auto genericOp = mlir::linalg::GenericOp::create(
24 builder, loc, TypeRange(resultTensors), ValueRange(inputs),
25 ValueRange(outputs), ArrayAttr(indexingMaps), ArrayAttr(iteratorTypes),
26 StringAttr(), StringAttr());
28 auto &body = genericOp.getRegion();
29 body.takeBody(op.getRegion());
41 if (a->isProperAncestor(b)) {
43 }
else if (b->isProperAncestor(a->getParentOp())) {
48 while (!a->getParentOp()->isAncestor(b))
51 while (!b->getParentOp()->isAncestor(a))
54 assert(a->getParentOp() == b->getParentOp());
56 if (a->getBlock() == b->getBlock()) {
57 return a->isBeforeInBlock(b);
59 return blockCmp(a->getBlock(), b->getBlock());
68 if (a->getParentOp()->isProperAncestor(b->getParentOp())) {
70 }
else if (b->getParentOp()->isProperAncestor(a->getParentOp())) {
74 if (a->getParentOp() == b->getParentOp()) {
75 return a->getRegionNumber() < b->getRegionNumber();
77 return opCmp(a->getParentOp(), b->getParentOp());
85 if (a->getParent()->isProperAncestor(b->getParent())) {
87 }
else if (b->getParent()->isProperAncestor(a->getParent())) {
91 if (a->getParent() == b->getParent()) {
94 for (
auto &bb : *b->getParent()) {
101 return regionCmp(a->getParent(), b->getParent());
109 auto ba = dyn_cast<BlockArgument>(a);
110 auto bb = dyn_cast<BlockArgument>(b);
117 if (ba.getOwner() == bb.getOwner()) {
118 return ba.getArgNumber() < bb.getArgNumber();
120 return blockCmp(ba.getOwner(), bb.getOwner());
123 OpResult ra = cast<OpResult>(a);
124 OpResult rb = cast<OpResult>(b);
126 if (ra.getOwner() == rb.getOwner()) {
127 return ra.getResultNumber() < rb.getResultNumber();
129 return opCmp(ra.getOwner(), rb.getOwner());
134 auto valTy = val.getType();
135 if (
auto valTensorTy = dyn_cast<TensorType>(valTy)) {
137 SmallVector<int64_t> out_shape = {width};
138 out_shape.append(valTensorTy.getShape().begin(),
139 valTensorTy.getShape().end());
140 auto outTy = valTensorTy.clone(out_shape);
142 }
else if (
auto valMemrefTy = dyn_cast<MemRefType>(valTy)) {
144 SmallVector<int64_t> out_shape = {width};
145 out_shape.append(valMemrefTy.getShape().begin(),
146 valMemrefTy.getShape().end());
147 auto outTy = valMemrefTy.clone(out_shape);
151 return RankedTensorType::get(width, valTy);
156 ArrayRef<Value> argList) {
157 int64_t width = argList.size();
159 mlir::Value out = enzyme::ConcatOp::create(builder, loc, out_type, argList);
164 Type argTy, Value val, int64_t index) {
166 IntegerAttr indexAttr = builder.getI64IntegerAttr(index);
167 Value out = enzyme::ExtractOp::create(builder, loc, argTy, val, indexAttr);
172 AffineMap map, ValueRange operands,
173 SmallVectorImpl<Value> &indices) {
174 for (
unsigned i = 0; i < map.getNumResults(); i++) {
175 indices.push_back(affine::AffineApplyOp::create(
176 builder, loc, map.getSubMap({i}), operands));
static mlir::linalg::GenericOp adjointToGeneric(enzyme::GenericAdjointOp &op, OpBuilder &builder, Location loc)
bool valueCmp(mlir::Value a, mlir::Value b)
Type getConcatType(Value val, int64_t width)
bool blockCmp(mlir::Block *a, mlir::Block *b)
Value getConcatValue(OpBuilder &builder, Location loc, ArrayRef< Value > argList)
bool opCmp(mlir::Operation *a, mlir::Operation *b)
bool regionCmp(mlir::Region *a, mlir::Region *b)
void computeAffineIndices(OpBuilder &builder, Location loc, AffineMap map, ValueRange operands, SmallVectorImpl< Value > &indices)
Value getExtractValue(OpBuilder &builder, Location loc, Type argTy, Value val, int64_t index)