chromium/third_party/breakpad/breakpad/src/client/linux/minidump_writer/linux_dumper.cc

// Copyright 2010 Google LLC
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//     * Neither the name of Google LLC nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

// linux_dumper.cc: Implement google_breakpad::LinuxDumper.
// See linux_dumper.h for details.

// This code deals with the mechanics of getting information about a crashed
// process. Since this code may run in a compromised address space, the same
// rules apply as detailed at the top of minidump_writer.h: no libc calls and
// use the alternative allocator.

#ifdef HAVE_CONFIG_H
#include <config.h>  // Must come first
#endif

#include "client/linux/minidump_writer/linux_dumper.h"

#include <assert.h>
#include <elf.h>
#include <fcntl.h>
#include <limits.h>
#include <stddef.h>
#include <string.h>

#include "client/linux/minidump_writer/line_reader.h"
#include "common/linux/elfutils.h"
#include "common/linux/file_id.h"
#include "common/linux/linux_libc_support.h"
#include "common/linux/memory_mapped_file.h"
#include "common/linux/safe_readlink.h"
#include "google_breakpad/common/minidump_exception_linux.h"
#include "third_party/lss/linux_syscall_support.h"

FileID;

#if defined(__ANDROID__)

// Android packed relocations definitions are not yet available from the
// NDK header files, so we have to provide them manually here.
#ifndef DT_LOOS
#define DT_LOOS
#endif
#ifndef DT_ANDROID_REL
static const int DT_ANDROID_REL = DT_LOOS + 2;
#endif
#ifndef DT_ANDROID_RELA
static const int DT_ANDROID_RELA = DT_LOOS + 4;
#endif

#endif  // __ANDROID __

static const char kMappedFileUnsafePrefix[] =;
static const char kDeletedSuffix[] =;

inline static bool IsMappedFileOpenUnsafe(
    const google_breakpad::MappingInfo& mapping) {}

namespace google_breakpad {

namespace {

bool MappingContainsAddress(const MappingInfo& mapping, uintptr_t address) {}

#if defined(__CHROMEOS__)

// Recover memory mappings before writing dump on ChromeOS
//
// On Linux, breakpad relies on /proc/[pid]/maps to associate symbols from
// addresses. ChromeOS' hugepage implementation replaces some segments with
// anonymous private pages, which is a restriction of current implementation
// in Linux kernel at the time of writing. Thus, breakpad can no longer
// symbolize addresses from those text segments replaced with hugepages.
//
// This postprocess tries to recover the mappings. Because hugepages are always
// inserted in between some .text sections, it tries to infer the names and
// offsets of the segments, by looking at segments immediately precede and
// succeed them.
//
// For example, a text segment before hugepage optimization
//   02001000-03002000 r-xp /opt/google/chrome/chrome
//
// can be broken into
//   02001000-02200000 r-xp /opt/google/chrome/chrome
//   02200000-03000000 r-xp
//   03000000-03002000 r-xp /opt/google/chrome/chrome
//
// For more details, see:
// crbug.com/628040 ChromeOS' use of hugepages confuses crash symbolization

// Copied from CrOS' hugepage implementation, which is unlikely to change.
// The hugepage size is 2M.
const unsigned int kHpageShift = 21;
const size_t kHpageSize = (1 << kHpageShift);
const size_t kHpageMask = (~(kHpageSize - 1));

// Find and merge anonymous r-xp segments with surrounding named segments.
// There are two cases:

// Case 1: curr, next
//   curr is anonymous
//   curr is r-xp
//   curr.size >= 2M
//   curr.size is a multiple of 2M.
//   next is backed by some file.
//   curr and next are contiguous.
//   offset(next) == sizeof(curr)
void TryRecoverMappings(MappingInfo* curr, MappingInfo* next) {
  // Merged segments are marked with size = 0.
  if (curr->size == 0 || next->size == 0)
    return;

  if (curr->size >= kHpageSize &&
      curr->exec &&
      (curr->size & kHpageMask) == curr->size &&
      (curr->start_addr & kHpageMask) == curr->start_addr &&
      curr->name[0] == '\0' &&
      next->name[0] != '\0' &&
      curr->start_addr + curr->size == next->start_addr &&
      curr->size == next->offset) {

    // matched
    my_strlcpy(curr->name, next->name, NAME_MAX);
    if (next->exec) {
      // (curr, next)
      curr->size += next->size;
      next->size = 0;
    }
  }
}

// Case 2: prev, curr, next
//   curr is anonymous
//   curr is r-xp
//   curr.size >= 2M
//   curr.size is a multiple of 2M.
//   next and prev are backed by the same file.
//   prev, curr and next are contiguous.
//   offset(next) == offset(prev) + sizeof(prev) + sizeof(curr)
void TryRecoverMappings(MappingInfo* prev, MappingInfo* curr,
                        MappingInfo* next) {
  // Merged segments are marked with size = 0.
  if (prev->size == 0 || curr->size == 0 || next->size == 0)
    return;

  if (curr->size >= kHpageSize &&
      curr->exec &&
      (curr->size & kHpageMask) == curr->size &&
      (curr->start_addr & kHpageMask) == curr->start_addr &&
      curr->name[0] == '\0' &&
      next->name[0] != '\0' &&
      curr->start_addr + curr->size == next->start_addr &&
      prev->start_addr + prev->size == curr->start_addr &&
      my_strncmp(prev->name, next->name, NAME_MAX) == 0 &&
      next->offset == prev->offset + prev->size + curr->size) {

    // matched
    my_strlcpy(curr->name, prev->name, NAME_MAX);
    if (prev->exec) {
      curr->offset = prev->offset;
      curr->start_addr = prev->start_addr;
      if (next->exec) {
        // (prev, curr, next)
        curr->size += prev->size + next->size;
        prev->size = 0;
        next->size = 0;
      } else {
        // (prev, curr), next
        curr->size += prev->size;
        prev->size = 0;
      }
    } else {
      curr->offset = prev->offset + prev->size;
      if (next->exec) {
        // prev, (curr, next)
        curr->size += next->size;
        next->size = 0;
      } else {
        // prev, curr, next
      }
    }
  }
}

// mappings_ is sorted excepted for the first entry.
// This function tries to merge segemnts into the first entry,
// then check for other sorted entries.
// See LinuxDumper::EnumerateMappings().
void CrOSPostProcessMappings(wasteful_vector<MappingInfo*>& mappings) {
  // Find the candidate "next" to first segment, which is the only one that
  // could be out-of-order.
  size_t l = 1;
  size_t r = mappings.size();
  size_t next = mappings.size();
  while (l < r) {
    int m = (l + r) / 2;
    if (mappings[m]->start_addr > mappings[0]->start_addr)
      r = next = m;
    else
      l = m + 1;
  }

  // Shows the range that contains the entry point is
  // [first_start_addr, first_end_addr)
  size_t first_start_addr = mappings[0]->start_addr;
  size_t first_end_addr = mappings[0]->start_addr + mappings[0]->size;

  // Put the out-of-order segment in order.
  std::rotate(mappings.begin(), mappings.begin() + 1, mappings.begin() + next);

  // Iterate through normal, sorted cases.
  // Normal case 1.
  for (size_t i = 0; i < mappings.size() - 1; i++)
    TryRecoverMappings(mappings[i], mappings[i + 1]);

  // Normal case 2.
  for (size_t i = 0; i < mappings.size() - 2; i++)
    TryRecoverMappings(mappings[i], mappings[i + 1], mappings[i + 2]);

  // Collect merged (size == 0) segments.
  size_t f, e;
  for (f = e = 0; e < mappings.size(); e++)
    if (mappings[e]->size > 0)
      mappings[f++] = mappings[e];
  mappings.resize(f);

  // The entry point is in the first mapping. We want to find the location
  // of the entry point after merging segment. To do this, we want to find
  // the mapping that covers the first mapping from the original mapping list.
  // If the mapping is not in the beginning, we move it to the begining via
  // a right rotate by using reverse iterators.
  for (l = 0; l < mappings.size(); l++) {
    if (mappings[l]->start_addr <= first_start_addr
        && (mappings[l]->start_addr + mappings[l]->size >= first_end_addr))
      break;
  }
  if (l > 0) {
    r = mappings.size();
    std::rotate(mappings.rbegin() + r - l - 1, mappings.rbegin() + r - l,
                mappings.rend());
  }
}

#endif  // __CHROMEOS__

}  // namespace

// All interesting auvx entry types are below AT_SYSINFO_EHDR
#define AT_MAX

LinuxDumper::LinuxDumper(pid_t pid, const char* root_prefix)
    :{}

LinuxDumper::~LinuxDumper() {}

bool LinuxDumper::Init() {}

bool LinuxDumper::LateInit() {}

bool
LinuxDumper::ElfFileIdentifierForMapping(const MappingInfo& mapping,
                                         bool member,
                                         unsigned int mapping_id,
                                         wasteful_vector<uint8_t>& identifier) {}

void LinuxDumper::SetCrashInfoFromSigInfo(const siginfo_t& siginfo) {}

const char* LinuxDumper::GetCrashSignalString() const {}

bool LinuxDumper::GetMappingAbsolutePath(const MappingInfo& mapping,
                                         char path[PATH_MAX]) const {}

namespace {
// Find the shared object name (SONAME) by examining the ELF information
// for |mapping|. If the SONAME is found copy it into the passed buffer
// |soname| and return true. The size of the buffer is |soname_size|.
// The SONAME will be truncated if it is too long to fit in the buffer.
bool ElfFileSoName(const LinuxDumper& dumper,
    const MappingInfo& mapping, char* soname, size_t soname_size) {}

}  // namespace


void LinuxDumper::GetMappingEffectiveNameAndPath(const MappingInfo& mapping,
                                                 char* file_path,
                                                 size_t file_path_size,
                                                 char* file_name,
                                                 size_t file_name_size) {}

bool LinuxDumper::ReadAuxv() {}

bool LinuxDumper::EnumerateMappings() {}

#if defined(__ANDROID__)

bool LinuxDumper::GetLoadedElfHeader(uintptr_t start_addr, ElfW(Ehdr)* ehdr) {
  CopyFromProcess(ehdr, pid_,
                  reinterpret_cast<const void*>(start_addr),
                  sizeof(*ehdr));
  return my_memcmp(&ehdr->e_ident, ELFMAG, SELFMAG) == 0;
}

void LinuxDumper::ParseLoadedElfProgramHeaders(ElfW(Ehdr)* ehdr,
                                               uintptr_t start_addr,
                                               uintptr_t* min_vaddr_ptr,
                                               uintptr_t* dyn_vaddr_ptr,
                                               size_t* dyn_count_ptr) {
  uintptr_t phdr_addr = start_addr + ehdr->e_phoff;

  const uintptr_t max_addr = UINTPTR_MAX;
  uintptr_t min_vaddr = max_addr;
  uintptr_t dyn_vaddr = 0;
  size_t dyn_count = 0;

  for (size_t i = 0; i < ehdr->e_phnum; ++i) {
    ElfW(Phdr) phdr;
    CopyFromProcess(&phdr, pid_,
                    reinterpret_cast<const void*>(phdr_addr),
                    sizeof(phdr));
    if (phdr.p_type == PT_LOAD && phdr.p_vaddr < min_vaddr) {
      min_vaddr = phdr.p_vaddr;
    }
    if (phdr.p_type == PT_DYNAMIC) {
      dyn_vaddr = phdr.p_vaddr;
      dyn_count = phdr.p_memsz / sizeof(ElfW(Dyn));
    }
    phdr_addr += sizeof(phdr);
  }

  *min_vaddr_ptr = min_vaddr;
  *dyn_vaddr_ptr = dyn_vaddr;
  *dyn_count_ptr = dyn_count;
}

bool LinuxDumper::HasAndroidPackedRelocations(uintptr_t load_bias,
                                              uintptr_t dyn_vaddr,
                                              size_t dyn_count) {
  uintptr_t dyn_addr = load_bias + dyn_vaddr;
  for (size_t i = 0; i < dyn_count; ++i) {
    ElfW(Dyn) dyn;
    CopyFromProcess(&dyn, pid_,
                    reinterpret_cast<const void*>(dyn_addr),
                    sizeof(dyn));
    if (dyn.d_tag == DT_ANDROID_REL || dyn.d_tag == DT_ANDROID_RELA) {
      return true;
    }
    dyn_addr += sizeof(dyn);
  }
  return false;
}

uintptr_t LinuxDumper::GetEffectiveLoadBias(ElfW(Ehdr)* ehdr,
                                            uintptr_t start_addr) {
  uintptr_t min_vaddr = 0;
  uintptr_t dyn_vaddr = 0;
  size_t dyn_count = 0;
  ParseLoadedElfProgramHeaders(ehdr, start_addr,
                               &min_vaddr, &dyn_vaddr, &dyn_count);
  // If |min_vaddr| is non-zero and we find Android packed relocation tags,
  // return the effective load bias.
  if (min_vaddr != 0) {
    const uintptr_t load_bias = start_addr - min_vaddr;
    if (HasAndroidPackedRelocations(load_bias, dyn_vaddr, dyn_count)) {
      return load_bias;
    }
  }
  // Either |min_vaddr| is zero, or it is non-zero but we did not find the
  // expected Android packed relocations tags.
  return start_addr;
}

void LinuxDumper::LatePostprocessMappings() {
  for (size_t i = 0; i < mappings_.size(); ++i) {
    // Only consider exec mappings that indicate a file path was mapped, and
    // where the ELF header indicates a mapped shared library.
    MappingInfo* mapping = mappings_[i];
    if (!(mapping->exec && mapping->name[0] == '/')) {
      continue;
    }
    ElfW(Ehdr) ehdr;
    if (!GetLoadedElfHeader(mapping->start_addr, &ehdr)) {
      continue;
    }
    if (ehdr.e_type == ET_DYN) {
      // Compute the effective load bias for this mapped library, and update
      // the mapping to hold that rather than |start_addr|, at the same time
      // adjusting |size| to account for the change in |start_addr|. Where
      // the library does not contain Android packed relocations,
      // GetEffectiveLoadBias() returns |start_addr| and the mapping entry
      // is not changed.
      const uintptr_t load_bias = GetEffectiveLoadBias(&ehdr,
                                                       mapping->start_addr);
      mapping->size += mapping->start_addr - load_bias;
      mapping->start_addr = load_bias;
    }
  }
}

#endif  // __ANDROID__

// Get information about the stack, given the stack pointer. We don't try to
// walk the stack since we might not have all the information needed to do
// unwind. So we just grab, up to, 32k of stack.
bool LinuxDumper::GetStackInfo(const void** stack, size_t* stack_len,
                               uintptr_t int_stack_pointer) {}

void LinuxDumper::SanitizeStackCopy(uint8_t* stack_copy, size_t stack_len,
                                    uintptr_t stack_pointer,
                                    uintptr_t sp_offset) {}

bool LinuxDumper::StackHasPointerToMapping(const uint8_t* stack_copy,
                                           size_t stack_len,
                                           uintptr_t sp_offset,
                                           const MappingInfo& mapping) {}

// Find the mapping which the given memory address falls in.
const MappingInfo* LinuxDumper::FindMapping(const void* address) const {}

// Find the mapping which the given memory address falls in. Uses the
// unadjusted mapping address range from the kernel, rather than the
// biased range.
const MappingInfo* LinuxDumper::FindMappingNoBias(uintptr_t address) const {}

bool LinuxDumper::HandleDeletedFileInMapping(char* path) const {}

}  // namespace google_breakpad