folly/folly/detail/MemoryIdler.cpp

/*
 * Copyright (c) Meta Platforms, Inc. and affiliates.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <folly/detail/MemoryIdler.h>

#include <climits>
#include <cstdio>
#include <cstring>
#include <utility>

#include <folly/GLog.h>
#include <folly/Portability.h>
#include <folly/ScopeGuard.h>
#include <folly/concurrency/CacheLocality.h>
#include <folly/memory/MallctlHelper.h>
#include <folly/memory/Malloc.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/PThread.h>
#include <folly/portability/SysMman.h>
#include <folly/portability/Unistd.h>
#include <folly/system/Pid.h>
#include <folly/system/ThreadId.h>

FOLLY_GFLAGS_DEFINE_bool();

namespace folly {
namespace detail {

AtomicStruct<std::chrono::steady_clock::duration>
    MemoryIdler::defaultIdleTimeout(std::chrono::seconds(5));

bool MemoryIdler::isUnmapUnusedStackAvailable() noexcept {}

void MemoryIdler::flushLocalMallocCaches() {}

// Stack madvise isn't Linux or glibc specific, but the system calls
// and arithmetic (and bug compatibility) are not portable.  The set of
// platforms could be increased if it was useful.
#if defined(__GLIBC__) && defined(__linux__) && !FOLLY_MOBILE && \
    !FOLLY_SANITIZE_ADDRESS

static thread_local uintptr_t tls_stackLimit;
static thread_local size_t tls_stackSize;

static size_t pageSize() {
  static const size_t s_pageSize = sysconf(_SC_PAGESIZE);
  return s_pageSize;
}

static void fetchStackLimits() {
  int err;
  pthread_attr_t attr;
  if ((err = pthread_getattr_np(pthread_self(), &attr))) {
    // some restricted environments can't access /proc
    FB_LOG_ONCE(ERROR) << "pthread_getaddr_np failed errno=" << err;
    tls_stackSize = 1;
    return;
  }
  SCOPE_EXIT {
    pthread_attr_destroy(&attr);
  };

  void* addr;
  size_t rawSize;
  if ((err = pthread_attr_getstack(&attr, &addr, &rawSize))) {
    // unexpected, but it is better to continue in prod than do nothing
    FB_LOG_ONCE(ERROR) << "pthread_attr_getstack error " << err;
    assert(false);
    tls_stackSize = 1;
    return;
  }
  if (rawSize >= (1ULL << 32)) {
    // Avoid unmapping huge swaths of memory if there is an insane
    // stack size.  The boundary of sanity is somewhat arbitrary: 4GB.
    //
    // If we went into /proc to find the actual contiguous mapped pages
    // before unmapping we wouldn't care about the stack size at all,
    // but our current strategy is to unmap the entire range that might
    // be used for the stack even if it hasn't been fully faulted-in.
    //
    // Very large stack size is a bug (hence the assert), but we can
    // carry on if we are in prod.
    FB_LOG_ONCE(ERROR) << "pthread_attr_getstack returned insane stack size "
                       << rawSize;
    assert(false);
    tls_stackSize = 1;
    return;
  }
  assert(addr != nullptr);
  assert(
      0 < PTHREAD_STACK_MIN &&
      rawSize >= static_cast<size_t>(PTHREAD_STACK_MIN));

  // glibc subtracts guard page from stack size, even though pthread docs
  // seem to imply the opposite
  size_t guardSize;
  if (pthread_attr_getguardsize(&attr, &guardSize) != 0) {
    guardSize = 0;
  }
  assert(rawSize > guardSize);

  // stack goes down, so guard page adds to the base addr
  tls_stackLimit = reinterpret_cast<uintptr_t>(addr) + guardSize;
  tls_stackSize = rawSize - guardSize;

  assert((tls_stackLimit & (pageSize() - 1)) == 0);
}

FOLLY_NOINLINE static uintptr_t getStackPtr() {
  char marker;
  auto rv = reinterpret_cast<uintptr_t>(&marker);
  return rv;
}

void MemoryIdler::unmapUnusedStack(size_t retain) {
  if (!isUnmapUnusedStackAvailable()) {
    return;
  }

  if (tls_stackSize == 0) {
    fetchStackLimits();
  }
  if (tls_stackSize <= std::max(static_cast<size_t>(1), retain)) {
    // covers both missing stack info, and impossibly large retain
    return;
  }

  auto sp = getStackPtr();
  assert(sp >= tls_stackLimit);
  assert(sp - tls_stackLimit < tls_stackSize);

  auto end = (sp - retain) & ~(pageSize() - 1);
  if (end <= tls_stackLimit) {
    // no pages are eligible for unmapping
    return;
  }

  size_t len = end - tls_stackLimit;
  assert((len & (pageSize() - 1)) == 0);
  if (madvise((void*)tls_stackLimit, len, MADV_DONTNEED) != 0) {
    // It is likely that the stack vma hasn't been fully grown.  In this
    // case madvise will apply dontneed to the present vmas, then return
    // errno of ENOMEM.
    // If thread stack pages are backed by locked or huge pages, madvise will
    // fail with EINVAL. (EINVAL may also be returned if the address or length
    // are bad.) Warn in debug mode, since MemoryIdler may not function as
    // expected.
    // We can also get an EAGAIN, theoretically.
    PLOG_IF(WARNING, kIsDebug && errno == EINVAL) << "madvise failed";
    assert(errno == EAGAIN || errno == ENOMEM || errno == EINVAL);
  }
}

#else

void MemoryIdler::unmapUnusedStack(size_t /* retain */) {}

#endif

} // namespace detail
} // namespace folly