llvm/mlir/lib/Conversion/ArmSMEToSCF/ArmSMEToSCF.cpp

//===- ArmSMEToSCF.cpp - Convert ArmSME to SCF dialect ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements lowering of ArmSME operations to SCF.
//
//===----------------------------------------------------------------------===//
#include "mlir/Conversion/ArmSMEToSCF/ArmSMEToSCF.h"

#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/ArmSME/IR/ArmSME.h"
#include "mlir/Dialect/ArmSME/Utils/Utils.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"

namespace mlir {
#define GEN_PASS_DEF_CONVERTARMSMETOSCF
#include "mlir/Conversion/Passes.h.inc"
} // namespace mlir

usingnamespacemlir;

namespace {
/// Returns adjusted (1-D or 2-D) `indices` for a tile slice as follows:
///   rank 1: (indices[0] + (tileSliceIndex * tileSliceNumElts))
///   rank 2: (indices[0] + tileSliceIndex, indices[1])
SmallVector<Value, 2> getMemrefIndices(ValueRange indices, unsigned rank,
                                       Value tileSliceIndex,
                                       Value tileSliceNumElts, Location loc,
                                       PatternRewriter &rewriter) {}

/// Creates an scf.for for the load/store of an ArmSME tile.
FailureOr<scf::ForOp> createLoadStoreForOverTileSlices(
    PatternRewriter &rewriter, Location loc, VectorType tileType,
    ValueRange memrefIndices, int memrefRank, Value mask, Value initTile,
    function_ref<Value(/*index=*/Value, ValueRange, /*predicate=*/Value,
                       /*currentTile=*/Value)>
        makeLoopBody) {}

FailureOr<scf::ForOp> createLoadStoreForOverTileSlices(
    PatternRewriter &rewriter, Location loc, VectorType tileType,
    ValueRange memrefIndices, int memrefRank, Value mask,
    function_ref<void(/*index=*/Value, ValueRange, /*predicate=*/Value)>
        makeLoopBody) {}

/// Lower `arm_sme.tile_load` without a mask, or with a mask and a zero pad.
///
///  With a mask:
///
///  BEFORE:
///  ```mlir
///  %pad = arith.constant 0 : i32
///  %mask = vector.create_mask %num_rows, %num_cols : vector<[4]x[4]xi1>
///  %tile = arm_sme.tile_load %src[%c0, %c0], %pad, %mask :
///    memref<?x?xi32>, vector<[4]x[4]xi32>
///  ```
///
///  AFTER:
///  ```mlir
///  %init_tile = arm_sme.zero : vector<[4]x[4]xi32>
///  %mask_cols = vector.create_mask %num_cols : vector<[4]xi1>
///  %loop_rows = arith.minsi %num_rows, %svl_s : index
///  %tile = scf.for %tile_slice_idx = %c0 to %loop_rows step %c1
///                iter_args(%iter_tile = %init_tile) -> (vector<[4]x[4]xi32>) {
///    %tile_update = arm_sme.load_tile_slice
///      %src[%tile_slice_idx], %num_cols, %iter_tile, %tile_slice_idx :
///      memref<?x?xi32>, vector<[1]xi32>, vector<[4]x[4]xi32>
///    scf.yield %tile_update : vector<[4]x[4]xi32>
///  }
///  ```
///
/// Without a mask the lowering is pretty much identical. The only difference is
/// %mask_cols becomes an all-true mask, and %loop_rows becomes %svl_s.
///
/// NOTE: Only mask of 'vector.create_mask' op is currently supported.
struct TileLoadOpConversion : public OpRewritePattern<arm_sme::TileLoadOp> {};

/// Lower `arm_sme.tile_load` with mask and non-zero pad.
///
///  BEFORE:
///  ```mlir
///  %mask = vector.create_mask %num_rows, %num_cols : vector<[4]x[4]xi1>
///  %tile = arm_sme.tile_load %src[%c0, %c0], %pad, %mask :
///    memref<?x?xi32>, vector<[4]x[4]xi32>
///  ```
///
///  AFTER:
///  ```mlir
///  ...
///  %pad_1d = vector.splat %pad : vector<[4]xi32>
///  %tile = scf.for %tile_slice_idx = %c0 to %svl_s step %c1
///                iter_args(%iter_tile = %init_tile) -> (vector<[4]x[4]xi32>) {
///    ...
///    %mask_1d = vector.create_mask <combined_mask> : vector<[4]xi1>
///    %slice = vector.maskedload %base[%tile_slice_idx, %c0], %mask_1d, %pad_1d
///      : memref<?x?xi32>, vector<[4]xi1>,
///        vector<[4]xi32> into vector<[4]xi32>
///    // Insert slice into tile
///    %tile_update = arm_sme.insert_tile_slice
///      %slice, %iter_tile[%tile_slice_idx] :
///      vector<[4]xi32> into vector<[4]x[4]xi32>
///    scf.yield %tile_update : vector<[4]x[4]xi32>
///  }
///  ```
struct TileLoadOpWithMaskAndPadNonZeroConversion
    : public OpRewritePattern<arm_sme::TileLoadOp> {};

/// Lower `arm_sme.tile_store` to a loop over the tile slices and store each
/// slice using `arm_sme.store_tile_slice`.
///
///  BEFORE:
///  ```mlir
///  arm_sme.tile_store %tile, %dest[%c0, %c0] layout<vertical>
///    : memref<?x?xi32>, vector<[4]x[4]xi32
///  ```
///
///  AFTER:
///  ```mlir
///  %vscale = vector.vscale
///  %c0 = arith.constant 0 : index
///  %c1 = arith.constant 1 : index
///  %min_svl_s = arith.constant 4 : index
///  %svl_s = arith.muli %min_svl_s, %vscale : index
///  scf.for %tile_slice_idx = %c0 to %svl_s step %c1 {
///    arm_sme.store_tile_slice %tile, %tile_slice_idx, %dest[%tile_slice_idx],
///      layout<vertical> : memref<?x?xi32>, vector<[4]x[4]xi32>
///  }
///  ```
struct TileStoreOpConversion : public OpRewritePattern<arm_sme::TileStoreOp> {};

} // namespace

void mlir::populateArmSMEToSCFConversionPatterns(RewritePatternSet &patterns) {}

namespace {

struct ConvertArmSMEToSCFPass
    : public impl::ConvertArmSMEToSCFBase<ConvertArmSMEToSCFPass> {};

} // namespace

std::unique_ptr<Pass> mlir::createConvertArmSMEToSCFPass() {}