llvm/lldb/test/API/linux/aarch64/mte_tag_access/main.c

#include <arm_acle.h>
#include <asm/hwcap.h>
#include <asm/mman.h>
#include <stdlib.h>
#include <string.h>
#include <sys/auxv.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <unistd.h>

// This file uses ACLE intrinsics as detailed in:
// https://developer.arm.com/documentation/101028/0012/10--Memory-tagging-intrinsics?lang=en

char *checked_mmap(size_t page_size, int prot) {
  char *ptr = mmap(0, page_size, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  if (ptr == MAP_FAILED)
    exit(1);
  return ptr;
}

int main(int argc, char const *argv[]) {
  // We assume that the test runner has checked we're on an MTE system

  if (prctl(PR_SET_TAGGED_ADDR_CTRL,
            PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC |
                // Allow all tags to be generated by the addg
                // instruction __arm_mte_increment_tag produces.
                (0xffff << PR_MTE_TAG_SHIFT),
            0, 0, 0)) {
    return 1;
  }

  size_t page_size = sysconf(_SC_PAGESIZE);

  // We're going to mmap pages in this order:
  // <high addres>
  // MTE read/write
  // MTE read/write executable
  // non MTE
  // MTE read only
  // <low address>
  //
  // This means that the first two MTE pages end up next
  // to each other. Since the second one is also executable
  // it will create a new entry in /proc/smaps.
  int mte_prot = PROT_READ | PROT_MTE;

  char *mte_buf_2 = checked_mmap(page_size, mte_prot | PROT_WRITE);
  char *mte_buf = checked_mmap(page_size, mte_prot | PROT_WRITE | PROT_EXEC);
  // We expect the mappings to be next to each other
  if (mte_buf_2 - mte_buf != page_size)
    return 1;

  char *non_mte_buf = checked_mmap(page_size, PROT_READ);
  char *mte_read_only = checked_mmap(page_size, mte_prot);

  // Target value for "memory find" testing.
  strncpy(mte_buf+128, "LLDB", 4);

  // Set incrementing tags until end of the first page
  char *tagged_ptr = mte_buf;
  // This ignores tag bits when subtracting the addresses
  while (__arm_mte_ptrdiff(tagged_ptr, mte_buf) < page_size) {
    // Set the allocation tag for this location
    __arm_mte_set_tag(tagged_ptr);
    // + 16 for 16 byte granules
    // Earlier we allowed all tag values, so this will give us an
    // incrementing pattern 0-0xF wrapping back to 0.
    tagged_ptr = __arm_mte_increment_tag(tagged_ptr + 16, 1);
  }

  // Tag the original pointer with 9
  mte_buf = __arm_mte_create_random_tag(mte_buf, ~(1 << 9));
  // A different tag so that mte_buf_alt_tag > mte_buf if you don't handle the
  // tag
  char *mte_buf_alt_tag = __arm_mte_create_random_tag(mte_buf, ~(1 << 10));

  // The memory tag manager should be removing the whole top byte, not just the
  // tags. So fill 63-60 with something non zero so we'll fail if we only remove
  // tags.
#define SET_TOP_NIBBLE(ptr, value)                                             \
  (char *)((size_t)(ptr) | ((size_t)((value)&0xf) << 60))
  // mte_buf_alt_tag's nibble > mte_buf to check that lldb isn't just removing
  // tag bits but the whole top byte when making ranges.
  mte_buf = SET_TOP_NIBBLE(mte_buf, 0xA);
  mte_buf_alt_tag = SET_TOP_NIBBLE(mte_buf_alt_tag, 0xB);
  mte_buf_2 = SET_TOP_NIBBLE(mte_buf_2, 0xC);
  mte_read_only = SET_TOP_NIBBLE(mte_read_only, 0xD);

// The top level commands should be removing all non-address bits, including
// pointer signatures. This signs ptr with PAC key A. That signature goes
// in some bits other than the top byte.
#define sign_ptr(ptr) __asm__ __volatile__("pacdza %0" : "=r"(ptr) : "r"(ptr))
  sign_ptr(mte_buf);
  sign_ptr(mte_buf_alt_tag);
  sign_ptr(mte_buf_2);
  sign_ptr(mte_read_only);

  // Breakpoint here
  return 0;
}