folly/folly/test/DeterministicSchedule.h

/*
 * Copyright (c) Meta Platforms, Inc. and affiliates.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#pragma once

#include <assert.h>
#include <errno.h>

#include <atomic>
#include <functional>
#include <list>
#include <mutex>
#include <queue>
#include <thread>
#include <unordered_map>
#include <unordered_set>
#include <vector>

#include <glog/logging.h>

#include <folly/ScopeGuard.h>
#include <folly/concurrency/CacheLocality.h>
#include <folly/detail/Futex.h>
#include <folly/lang/CustomizationPoint.h>
#include <folly/synchronization/AtomicNotification.h>
#include <folly/synchronization/detail/AtomicUtils.h>
#include <folly/synchronization/test/Semaphore.h>

namespace folly {
namespace test {

// This is ugly, but better perf for DeterministicAtomic translates
// directly to more states explored and tested
#define FOLLY_TEST_DSCHED_VLOG(...)

/* signatures of user-defined auxiliary functions */
AuxAct;
AuxChk;

struct DSchedThreadId {};

class DSchedTimestamp {};

class ThreadTimestamps {};

struct ThreadInfo {};

class ThreadSyncVar {};

/**
 * DeterministicSchedule coordinates the inter-thread communication of a
 * set of threads under test, so that despite concurrency the execution is
 * the same every time.  It works by stashing a reference to the schedule
 * in a thread-local variable, then blocking all but one thread at a time.
 *
 * In order for DeterministicSchedule to work, it needs to intercept
 * all inter-thread communication.  To do this you should use
 * DeterministicAtomic<T> instead of std::atomic<T>, create threads
 * using DeterministicSchedule::thread() instead of the std::thread
 * constructor, DeterministicSchedule::join(thr) instead of thr.join(),
 * and access semaphores via the helper functions in DeterministicSchedule.
 * Locks are not yet supported, although they would be easy to add with
 * the same strategy as the mapping of Sem::wait.
 *
 * The actual schedule is defined by a function from n -> [0,n). At
 * each step, the function will be given the number of active threads
 * (n), and it returns the index of the thread that should be run next.
 * Invocations of the scheduler function will be serialized, but will
 * occur from multiple threads.  A good starting schedule is uniform(0).
 */
class DeterministicSchedule {};

/**
 * DeterministicAtomic<T> is a drop-in replacement std::atomic<T> that
 * cooperates with DeterministicSchedule.
 */
template <
    typename T,
    typename Schedule = DeterministicSchedule,
    template <typename> class Atom = std::atomic>
struct DeterministicAtomicImpl {};

DeterministicAtomic;

/* Futex extensions for DeterministicSchedule based Futexes */
int futexWakeImpl(
    const detail::Futex<test::DeterministicAtomic>* futex,
    int count,
    uint32_t wakeMask);
detail::FutexResult futexWaitImpl(
    const detail::Futex<test::DeterministicAtomic>* futex,
    uint32_t expected,
    std::chrono::system_clock::time_point const* absSystemTime,
    std::chrono::steady_clock::time_point const* absSteadyTime,
    uint32_t waitMask);

/* Generic futex extensions to allow sharing between DeterministicAtomic and
 * BufferedDeterministicAtomic.*/
template <template <typename> class Atom>
int deterministicFutexWakeImpl(
    const detail::Futex<Atom>* futex,
    std::mutex& futexLock,
    std::unordered_map<
        const detail::Futex<Atom>*,
        std::list<std::pair<uint32_t, bool*>>>& futexQueues,
    int count,
    uint32_t wakeMask) {}

template <template <typename> class Atom>
detail::FutexResult deterministicFutexWaitImpl(
    const detail::Futex<Atom>* futex,
    std::mutex& futexLock,
    std::unordered_map<
        const detail::Futex<Atom>*,
        std::list<std::pair<uint32_t, bool*>>>& futexQueues,
    uint32_t expected,
    std::chrono::system_clock::time_point const* absSystemTimeout,
    std::chrono::steady_clock::time_point const* absSteadyTimeout,
    uint32_t waitMask) {}

/**
 * Implementations of the atomic_wait API for DeterministicAtomic, these are
 * no-ops here.  Which for a correct implementation should not make a
 * difference because threads are required to have atomic operations around
 * waits and wakes
 */
template <typename Integer>
void tag_invoke(
    cpo_t<atomic_wait>, const DeterministicAtomic<Integer>*, Integer) {}
template <typename Integer, typename Clock, typename Duration>
std::cv_status tag_invoke(
    cpo_t<atomic_wait_until>,
    const DeterministicAtomic<Integer>*,
    Integer,
    const std::chrono::time_point<Clock, Duration>&) {}
template <typename Integer>
void tag_invoke(cpo_t<atomic_notify_one>, const DeterministicAtomic<Integer>*) {}
template <typename Integer>
void tag_invoke(cpo_t<atomic_notify_all>, const DeterministicAtomic<Integer>*) {}

/**
 * DeterministicMutex is a drop-in replacement of std::mutex that
 * cooperates with DeterministicSchedule.
 */
struct DeterministicMutex {};
} // namespace test

template <>
Getcpu::Func AccessSpreader<test::DeterministicAtomic>::pickGetcpuFunc();

} // namespace folly