chromium/base/task/single_thread_task_executor_unittest.cc

// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "base/task/single_thread_task_executor.h"

#include <stddef.h>
#include <stdint.h>

#include <optional>
#include <string>
#include <vector>

#include "base/compiler_specific.h"
#include "base/functional/bind.h"
#include "base/functional/callback_helpers.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_pump_for_io.h"
#include "base/message_loop/message_pump_type.h"
#include "base/pending_task.h"
#include "base/posix/eintr_wrapper.h"
#include "base/run_loop.h"
#include "base/synchronization/waitable_event.h"
#include "base/task/current_thread.h"
#include "base/task/single_thread_task_runner.h"
#include "base/task/task_observer.h"
#include "base/task/thread_pool/thread_pool_instance.h"
#include "base/test/bind.h"
#include "base/test/gtest_util.h"
#include "base/test/metrics/histogram_tester.h"
#include "base/test/test_simple_task_runner.h"
#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
#include "base/threading/sequence_local_storage_slot.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"

#if BUILDFLAG(IS_ANDROID)
#include "base/android/java_handler_thread.h"
#include "base/android/jni_android.h"
#include "base/test/android/java_handler_thread_helpers.h"
#endif

#if BUILDFLAG(IS_WIN)
#include <windows.h>

#include "base/message_loop/message_pump_win.h"
#include "base/process/memory.h"
#include "base/win/current_module.h"
#include "base/win/message_window.h"
#include "base/win/scoped_handle.h"
#endif

IsNull;
NotNull;

namespace base {

// TODO(darin): Platform-specific MessageLoop tests should be grouped together
// to avoid chopping this file up with so many #ifdefs.

namespace {

class Foo : public RefCounted<Foo> {};

// This function runs slowly to simulate a large amount of work being done.
static void SlowFunc(TimeDelta pause,
                     int* quit_counter,
                     base::OnceClosure quit_closure) {}

// This function records the time when Run was called in a Time object, which is
// useful for building a variety of SingleThreadTaskExecutor tests.
static void RecordRunTimeFunc(TimeTicks* run_time,
                              int* quit_counter,
                              base::OnceClosure quit_closure) {}

enum TaskType {};

// Saves the order in which the tasks executed.
struct TaskItem {};

std::ostream& operator<<(std::ostream& os, TaskType type) {}

std::ostream& operator<<(std::ostream& os, const TaskItem& item) {}

class TaskList {};

class DummyTaskObserver : public TaskObserver {};

// A method which reposts itself |depth| times.
void RecursiveFunc(TaskList* order, int cookie, int depth) {}

void QuitFunc(TaskList* order, int cookie, base::OnceClosure quit_closure) {}

#if BUILDFLAG(IS_WIN)

void SubPumpFunc(OnceClosure on_done) {
  CurrentThread::ScopedAllowApplicationTasksInNativeNestedLoop
      allow_nestable_tasks;
  MSG msg;
  while (::GetMessage(&msg, NULL, 0, 0)) {
    ::TranslateMessage(&msg);
    ::DispatchMessage(&msg);
  }
  std::move(on_done).Run();
}

const wchar_t kMessageBoxTitle[] = L"SingleThreadTaskExecutor Unit Test";

// SingleThreadTaskExecutor implicitly start a "modal message loop". Modal
// dialog boxes, common controls (like OpenFile) and StartDoc printing function
// can cause implicit message loops.
void MessageBoxFunc(TaskList* order, int cookie, bool is_reentrant) {
  order->RecordStart(MESSAGEBOX, cookie);
  std::optional<CurrentThread::ScopedAllowApplicationTasksInNativeNestedLoop>
      maybe_allow_nesting;
  if (is_reentrant)
    maybe_allow_nesting.emplace();
  ::MessageBox(NULL, L"Please wait...", kMessageBoxTitle, MB_OK);
  order->RecordEnd(MESSAGEBOX, cookie);
}

// Will end the MessageBox.
void EndDialogFunc(TaskList* order, int cookie) {
  order->RecordStart(ENDDIALOG, cookie);
  HWND window = GetActiveWindow();
  if (window != NULL) {
    EXPECT_NE(::EndDialog(window, IDCONTINUE), 0);
    // Cheap way to signal that the window wasn't found if RunEnd() isn't
    // called.
    order->RecordEnd(ENDDIALOG, cookie);
  }
}

// A method which posts a RecursiveFunc that will want to run while
// ::MessageBox() is active.
void RecursiveFuncWin(scoped_refptr<SingleThreadTaskRunner> task_runner,
                      HANDLE event,
                      bool expect_window,
                      TaskList* order,
                      bool message_box_is_reentrant,
                      base::OnceClosure quit_closure) {
  task_runner->PostTask(FROM_HERE, BindOnce(&RecursiveFunc, order, 1, 2));
  task_runner->PostTask(
      FROM_HERE, BindOnce(&MessageBoxFunc, order, 2, message_box_is_reentrant));
  task_runner->PostTask(FROM_HERE, BindOnce(&RecursiveFunc, order, 3, 2));
  // The trick here is that for nested task processing, this task will be
  // ran _inside_ the MessageBox message loop, dismissing the MessageBox
  // without a chance.
  // For non-nested task processing, this will be executed _after_ the
  // MessageBox will have been dismissed by the code below, where
  // expect_window_ is true.
  task_runner->PostTask(FROM_HERE, BindOnce(&EndDialogFunc, order, 4));
  task_runner->PostTask(FROM_HERE,
                        BindOnce(&QuitFunc, order, 5, std::move(quit_closure)));

  // Enforce that every tasks are sent before starting to run the main thread
  // message loop.
  ASSERT_TRUE(SetEvent(event));

  // Poll for the MessageBox. Don't do this at home! At the speed we do it,
  // you will never realize one MessageBox was shown.
  for (; expect_window;) {
    HWND window = ::FindWindowW(L"#32770", kMessageBoxTitle);
    if (window) {
      // Dismiss it.
      for (;;) {
        HWND button = ::FindWindowExW(window, NULL, L"Button", NULL);
        if (button != NULL) {
          EXPECT_EQ(0, ::SendMessage(button, WM_LBUTTONDOWN, 0, 0));
          EXPECT_EQ(0, ::SendMessage(button, WM_LBUTTONUP, 0, 0));
          break;
        }
      }
      break;
    }
  }
}

#endif  // BUILDFLAG(IS_WIN)

void Post128KTasksThenQuit(SingleThreadTaskRunner* executor_task_runner,
                           TimeTicks begin_ticks,
                           TimeTicks last_post_ticks,
                           TimeDelta slowest_delay,
                           OnceClosure on_done,
                           int num_posts_done = 0) {}

#if BUILDFLAG(IS_WIN)

class TestIOHandler : public MessagePumpForIO::IOHandler {
 public:
  TestIOHandler(const wchar_t* name, HANDLE signal);

  void OnIOCompleted(MessagePumpForIO::IOContext* context,
                     DWORD bytes_transfered,
                     DWORD error) override;

  void Init();
  OVERLAPPED* context() { return &context_.overlapped; }
  DWORD size() { return sizeof(buffer_); }

 private:
  char buffer_[48];
  MessagePumpForIO::IOContext context_;
  HANDLE signal_;
  win::ScopedHandle file_;
};

TestIOHandler::TestIOHandler(const wchar_t* name, HANDLE signal)
    : MessagePumpForIO::IOHandler(FROM_HERE), signal_(signal) {
  memset(buffer_, 0, sizeof(buffer_));

  file_.Set(CreateFile(name, GENERIC_READ, 0, NULL, OPEN_EXISTING,
                       FILE_FLAG_OVERLAPPED, NULL));
  EXPECT_TRUE(file_.is_valid());
}

void TestIOHandler::Init() {
  CurrentIOThread::Get()->RegisterIOHandler(file_.get(), this);

  DWORD read;
  EXPECT_FALSE(ReadFile(file_.get(), buffer_, size(), &read, context()));
  EXPECT_EQ(static_cast<DWORD>(ERROR_IO_PENDING), GetLastError());
}

void TestIOHandler::OnIOCompleted(MessagePumpForIO::IOContext* context,
                                  DWORD bytes_transfered,
                                  DWORD error) {
  ASSERT_TRUE(context == &context_);
  ASSERT_TRUE(SetEvent(signal_));
}

void RunTest_IOHandler() {
  win::ScopedHandle callback_called(CreateEvent(NULL, TRUE, FALSE, NULL));
  ASSERT_TRUE(callback_called.is_valid());

  const wchar_t* kPipeName = L"\\\\.\\pipe\\iohandler_pipe";
  win::ScopedHandle server(
      CreateNamedPipe(kPipeName, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
  ASSERT_TRUE(server.is_valid());

  Thread thread("IOHandler test");
  Thread::Options options;
  options.message_pump_type = MessagePumpType::IO;
  ASSERT_TRUE(thread.StartWithOptions(std::move(options)));

  TestIOHandler handler(kPipeName, callback_called.get());
  thread.task_runner()->PostTask(
      FROM_HERE, BindOnce(&TestIOHandler::Init, Unretained(&handler)));
  // Make sure the thread runs and sleeps for lack of work.
  PlatformThread::Sleep(Milliseconds(100));

  const char buffer[] = "Hello there!";
  DWORD written;
  EXPECT_TRUE(WriteFile(server.get(), buffer, sizeof(buffer), &written, NULL));

  DWORD result = WaitForSingleObject(callback_called.get(), 1000);
  EXPECT_EQ(WAIT_OBJECT_0, result);

  thread.Stop();
}

#endif  // BUILDFLAG(IS_WIN)

}  // namespace

//-----------------------------------------------------------------------------
// Each test is run against each type of SingleThreadTaskExecutor.  That way we
// are sure that SingleThreadTaskExecutor works properly in all configurations.
// Of course, in some cases, a unit test may only be for a particular type of
// loop.

class SingleThreadTaskExecutorTypedTest
    : public ::testing::TestWithParam<MessagePumpType> {};

TEST_P(SingleThreadTaskExecutorTypedTest, PostTask) {}

TEST_P(SingleThreadTaskExecutorTypedTest, PostDelayedTask_Basic) {}

TEST_P(SingleThreadTaskExecutorTypedTest, PostDelayedTask_InDelayOrder) {}

TEST_P(SingleThreadTaskExecutorTypedTest, PostDelayedTask_InPostOrder) {}

TEST_P(SingleThreadTaskExecutorTypedTest, PostDelayedTask_InPostOrder_2) {}

TEST_P(SingleThreadTaskExecutorTypedTest, PostDelayedTask_InPostOrder_3) {}

TEST_P(SingleThreadTaskExecutorTypedTest, PostDelayedTask_SharedTimer) {}

namespace {

// This is used to inject a test point for recording the destructor calls for
// Closure objects send to MessageLoop::PostTask(). It is awkward usage since we
// are trying to hook the actual destruction, which is not a common operation.
class RecordDeletionProbe : public RefCounted<RecordDeletionProbe> {};

}  // namespace

/* TODO(darin): SingleThreadTaskExecutor does not support deleting all tasks in
 */
/* the destructor. */
/* Fails, http://crbug.com/50272. */
TEST_P(SingleThreadTaskExecutorTypedTest, DISABLED_EnsureDeletion) {}

/* TODO(darin): SingleThreadTaskExecutor does not support deleting all tasks in
 */
/* the destructor. */
/* Fails, http://crbug.com/50272. */
TEST_P(SingleThreadTaskExecutorTypedTest, DISABLED_EnsureDeletion_Chain) {}

namespace {

void NestingFunc(int* depth, base::OnceClosure quit_closure) {}

}  // namespace

TEST_P(SingleThreadTaskExecutorTypedTest, Nesting) {}

TEST_P(SingleThreadTaskExecutorTypedTest, Recursive) {}

namespace {

void OrderedFunc(TaskList* order, int cookie) {}

}  // namespace

// Tests that non nestable tasks run in FIFO if there are no nested loops.
TEST_P(SingleThreadTaskExecutorTypedTest, NonNestableWithNoNesting) {}

namespace {

void FuncThatPumps(TaskList* order, int cookie) {}

void SleepFunc(TaskList* order, int cookie, TimeDelta delay) {}

}  // namespace

// Tests that non nestable tasks don't run when there's code in the call stack.
TEST_P(SingleThreadTaskExecutorTypedTest, NonNestableDelayedInNestedLoop) {}

namespace {

void FuncThatRuns(TaskList* order, int cookie, RunLoop* run_loop) {}

void FuncThatQuitsNow(base::OnceClosure quit_closure) {}

}  // namespace

// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
TEST_P(SingleThreadTaskExecutorTypedTest, QuitNow) {}

// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitTop) {}

// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitNested) {}

// Quits current loop and immediately runs a nested loop.
void QuitAndRunNestedLoop(TaskList* order,
                          int cookie,
                          RunLoop* outer_run_loop,
                          RunLoop* nested_run_loop) {}

// Test that we can run nested loop after quitting the current one.
TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopNestedAfterQuit) {}

// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitBogus) {}

// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitDeep) {}

// Tests RunLoopQuit works before RunWithID.
TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitOrderBefore) {}

// Tests RunLoopQuit works during RunWithID.
TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitOrderDuring) {}

// Tests RunLoopQuit works after RunWithID.
TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitOrderAfter) {}

// Regression test for crbug.com/170904 where posting tasks recursively caused
// the message loop to hang in MessagePumpGLib, due to the buffer of the
// internal pipe becoming full. Test all SingleThreadTaskExecutor types to
// ensure this issue does not exist in other MessagePumps.
//
// On Linux, the pipe buffer size is 64KiB by default. The bug caused one byte
// accumulated in the pipe per two posts, so we should repeat 128K times to
// reproduce the bug.
#if BUILDFLAG(IS_CHROMEOS)
// TODO(crbug.com/40754898): This test is unreasonably slow on CrOS and flakily
// times out (100x slower than other platforms which take < 1s to complete
// it).
#define MAYBE_RecursivePostsDoNotFloodPipe
#else
#define MAYBE_RecursivePostsDoNotFloodPipe
#endif
TEST_P(SingleThreadTaskExecutorTypedTest, MAYBE_RecursivePostsDoNotFloodPipe) {}

TEST_P(SingleThreadTaskExecutorTypedTest,
       ApplicationTasksAllowedInNativeNestedLoopAtTopLevel) {}

// Nestable tasks shouldn't be allowed to run reentrantly by default (regression
// test for https://crbug.com/754112).
TEST_P(SingleThreadTaskExecutorTypedTest, NestableTasksDisallowedByDefault) {}

TEST_P(SingleThreadTaskExecutorTypedTest,
       NestableTasksProcessedWhenRunLoopAllows) {}

TEST_P(SingleThreadTaskExecutorTypedTest, IsIdleForTesting) {}

TEST_P(SingleThreadTaskExecutorTypedTest, IsIdleForTestingNonNestableTask) {}

INSTANTIATE_TEST_SUITE_P();

#if BUILDFLAG(IS_WIN)

// Verifies that the SingleThreadTaskExecutor ignores WM_QUIT, rather than
// quitting. Users of SingleThreadTaskExecutor typically expect to control when
// their RunLoops stop Run()ning explicitly, via QuitClosure() etc (see
// https://crbug.com/720078).
TEST(SingleThreadTaskExecutorTest, WmQuitIsIgnored) {
  SingleThreadTaskExecutor executor(MessagePumpType::UI);

  // Post a WM_QUIT message to the current thread.
  ::PostQuitMessage(0);

  // Post a task to the current thread, with a small delay to make it less
  // likely that we process the posted task before looking for WM_* messages.
  bool task_was_run = false;
  RunLoop run_loop;
  executor.task_runner()->PostDelayedTask(
      FROM_HERE,
      BindOnce(
          [](bool* flag, OnceClosure closure) {
            *flag = true;
            std::move(closure).Run();
          },
          &task_was_run, run_loop.QuitClosure()),
      TestTimeouts::tiny_timeout());

  // Run the loop, and ensure that the posted task is processed before we quit.
  run_loop.Run();
  EXPECT_TRUE(task_was_run);
}

TEST(SingleThreadTaskExecutorTest, PostDelayedTask_SharedTimer_SubPump) {
  SingleThreadTaskExecutor executor(MessagePumpType::UI);

  // Test that the interval of the timer, used to run the next delayed task, is
  // set to a value corresponding to when the next delayed task should run.

  // By setting num_tasks to 1, we ensure that the first task to run causes the
  // run loop to exit.
  int num_tasks = 1;
  TimeTicks run_time;

  RunLoop run_loop;

  executor.task_runner()->PostTask(
      FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));

  // This very delayed task should never run.
  executor.task_runner()->PostDelayedTask(
      FROM_HERE,
      BindOnce(&RecordRunTimeFunc, &run_time, &num_tasks,
               run_loop.QuitWhenIdleClosure()),
      Seconds(1000));

  // This slightly delayed task should run from within SubPumpFunc.
  executor.task_runner()->PostDelayedTask(
      FROM_HERE, BindOnce(&::PostQuitMessage, 0), Milliseconds(10));

  Time start_time = Time::Now();

  run_loop.Run();
  EXPECT_EQ(1, num_tasks);

  // Ensure that we ran in far less time than the slower timer.
  TimeDelta total_time = Time::Now() - start_time;
  EXPECT_GT(5000, total_time.InMilliseconds());

  // In case both timers somehow run at nearly the same time, sleep a little
  // and then run all pending to force them both to have run.  This is just
  // encouraging flakiness if there is any.
  PlatformThread::Sleep(Milliseconds(100));
  RunLoop().RunUntilIdle();

  EXPECT_TRUE(run_time.is_null());
}

namespace {

// When this fires (per the associated WM_TIMER firing), it posts an
// application task to quit the native loop.
bool QuitOnSystemTimer(UINT message,
                       WPARAM wparam,
                       LPARAM lparam,
                       LRESULT* result) {
  if (message == static_cast<UINT>(WM_TIMER)) {
    SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
        FROM_HERE, BindOnce(&::PostQuitMessage, 0));
  }
  *result = 0;
  return true;
}

// When this fires (per the associated WM_TIMER firing), it posts a delayed
// application task to quit the native loop.
bool DelayedQuitOnSystemTimer(UINT message,
                              WPARAM wparam,
                              LPARAM lparam,
                              LRESULT* result) {
  if (message == static_cast<UINT>(WM_TIMER)) {
    SingleThreadTaskRunner::GetCurrentDefault()->PostDelayedTask(
        FROM_HERE, BindOnce(&::PostQuitMessage, 0), Milliseconds(10));
  }
  *result = 0;
  return true;
}

}  // namespace

// This is a regression test for
// https://crrev.com/c/1455266/9/base/message_loop/message_pump_win.cc#125
// See below for the delayed task version.
TEST(SingleThreadTaskExecutorTest, PostImmediateTaskFromSystemPump) {
  SingleThreadTaskExecutor executor(MessagePumpType::UI);

  RunLoop run_loop;

  // A native message window to generate a system message which invokes
  // QuitOnSystemTimer() when the native timer fires.
  win::MessageWindow local_message_window;
  local_message_window.Create(BindRepeating(&QuitOnSystemTimer));
  ASSERT_TRUE(::SetTimer(local_message_window.hwnd(), 0, 20, nullptr));

  // The first task will enter a native message loop. This test then verifies
  // that the pump is able to run an immediate application task after the native
  // pump went idle.
  executor.task_runner()->PostTask(
      FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));

  // Test success is determined by not hanging in this Run() call.
  run_loop.Run();
}

// This is a regression test for
// https://crrev.com/c/1455266/9/base/message_loop/message_pump_win.cc#125 This
// is the delayed task equivalent of the above PostImmediateTaskFromSystemPump
// test.
//
// As a reminder of how this works, here's the sequence of events in this test:
//  1) Test start:
//       work_deduplicator.cc(24): BindToCurrentThread
//       work_deduplicator.cc(34): OnWorkRequested
//       thread_controller_with_message_pump_impl.cc(237) : DoWork
//       work_deduplicator.cc(50): OnWorkStarted
//  2) SubPumpFunc entered:
//       message_loop_unittest.cc(278): SubPumpFunc
//  3) ScopedAllowApplicationTasksInNativeNestedLoop triggers nested
//     ScheduleWork: work_deduplicator.cc(34): OnWorkRequested
//  4) Nested system loop starts and pumps internal kMsgHaveWork:
//       message_loop_unittest.cc(282): SubPumpFunc : Got Message
//       message_pump_win.cc(302): HandleWorkMessage
//       thread_controller_with_message_pump_impl.cc(237) : DoWork
//  5) Attempt to DoWork(), there's nothing to do, NextWorkInfo indicates delay.
//       work_deduplicator.cc(50): OnWorkStarted
//       work_deduplicator.cc(58): WillCheckForMoreWork
//       work_deduplicator.cc(67): DidCheckForMoreWork
//  6) Return control to HandleWorkMessage() which schedules native timer
//     and goes to sleep (no kMsgHaveWork in native queue).
//       message_pump_win.cc(328): HandleWorkMessage ScheduleNativeTimer
//  7) Native timer fires and posts the delayed application task:
//       message_loop_unittest.cc(282): SubPumpFunc : Got Message
//       message_loop_unittest.cc(1581): DelayedQuitOnSystemTimer
//  !! This is the critical step verified by this test. Since the
//     ThreadController is idle after (6), it won't be invoked again and thus
//     won't get a chance to return a NextWorkInfo that indicates the next
//     delay. A native timer is thus required to have SubPumpFunc handle it.
//       work_deduplicator.cc(42): OnDelayedWorkRequested
//       message_pump_win.cc(129): ScheduleDelayedWork
//  9) The scheduled native timer fires and runs application task binding
//     ::PostQuitMessage :
//       message_loop_unittest.cc(282) SubPumpFunc : Got Message
//       work_deduplicator.cc(50): OnWorkStarted
//       thread_controller_with_message_pump_impl.cc(237) : DoWork
//  10) SequenceManager updates delay to none and notifies
//      (TODO(scheduler-dev): Could remove this step but WorkDeduplicator knows
//                            to ignore at least):
//       work_deduplicator.cc(42): OnDelayedWorkRequested
//  11) Nested application task completes and SubPumpFunc unwinds:
//       work_deduplicator.cc(58): WillCheckForMoreWork
//       work_deduplicator.cc(67): DidCheckForMoreWork
//  12) ~ScopedAllowApplicationTasksInNativeNestedLoop() makes sure
//      WorkDeduplicator knows we're back in DoWork() (not relevant in this test
//      but important overall). work_deduplicator.cc(50): OnWorkStarted
//  13) Application task which ran SubPumpFunc completes and test finishes.
//       work_deduplicator.cc(67): DidCheckForMoreWork
TEST(SingleThreadTaskExecutorTest, PostDelayedTaskFromSystemPump) {
  SingleThreadTaskExecutor executor(MessagePumpType::UI);

  RunLoop run_loop;

  // A native message window to generate a system message which invokes
  // DelayedQuitOnSystemTimer() when the native timer fires.
  win::MessageWindow local_message_window;
  local_message_window.Create(BindRepeating(&DelayedQuitOnSystemTimer));
  ASSERT_TRUE(::SetTimer(local_message_window.hwnd(), 0, 20, nullptr));

  // The first task will enter a native message loop. This test then verifies
  // that the pump is able to run a delayed application task after the native
  // pump went idle.
  executor.task_runner()->PostTask(
      FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));

  // Test success is determined by not hanging in this Run() call.
  run_loop.Run();
}

TEST(SingleThreadTaskExecutorTest, WmQuitIsVisibleToSubPump) {
  SingleThreadTaskExecutor executor(MessagePumpType::UI);

  // Regression test for https://crbug.com/888559. When processing a
  // kMsgHaveWork we peek and remove the next message and dispatch that ourself,
  // to minimize impact of these messages on message-queue processing. If we
  // received kMsgHaveWork dispatched by a nested pump (e.g. ::GetMessage()
  // loop) then there is a risk that the next message is that loop's WM_QUIT
  // message, which must be processed directly by ::GetMessage() for the loop to
  // actually quit. This test verifies that WM_QUIT exits works as expected even
  // if it happens to immediately follow a kMsgHaveWork in the queue.

  RunLoop run_loop;

  // This application task will enter the subpump.
  executor.task_runner()->PostTask(
      FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));

  // This application task will post a native WM_QUIT.
  executor.task_runner()->PostTask(FROM_HERE, BindOnce(&::PostQuitMessage, 0));

  // The presence of this application task means that the pump will see a
  // non-empty queue after processing the previous application task (which
  // posted the WM_QUIT) and hence will repost a kMsgHaveWork message in the
  // native event queue. Without the fix to https://crbug.com/888559, this would
  // previously result in the subpump processing kMsgHaveWork and it stealing
  // the WM_QUIT message, leaving the test hung in the subpump.
  executor.task_runner()->PostTask(FROM_HERE, DoNothing());

  // Test success is determined by not hanging in this Run() call.
  run_loop.Run();
}

TEST(SingleThreadTaskExecutorTest,
     RepostingWmQuitDoesntStarveUpcomingNativeLoop) {
  SingleThreadTaskExecutor executor(MessagePumpType::UI);

  // This test ensures that application tasks are being processed by the native
  // subpump despite the kMsgHaveWork event having already been consumed by the
  // time the subpump is entered. This is subtly enforced by
  // CurrentThread::ScopedAllowApplicationTasksInNativeNestedLoop which
  // will ScheduleWork() upon construction (and if it's absent, the
  // SingleThreadTaskExecutor shouldn't process application tasks so
  // kMsgHaveWork is irrelevant). Note: This test also fails prior to the fix
  // for https://crbug.com/888559 (in fact, the last two tasks are sufficient as
  // a regression test), probably because of a dangling kMsgHaveWork recreating
  // the effect from
  // SingleThreadTaskExecutorTest.NativeMsgProcessingDoesntStealWmQuit.

  RunLoop run_loop;

  // This application task will post a native WM_QUIT which will be ignored
  // by the main message pump.
  executor.task_runner()->PostTask(FROM_HERE, BindOnce(&::PostQuitMessage, 0));

  // Make sure the pump does a few extra cycles and processes (ignores) the
  // WM_QUIT.
  executor.task_runner()->PostTask(FROM_HERE, DoNothing());
  executor.task_runner()->PostTask(FROM_HERE, DoNothing());

  // This application task will enter the subpump.
  executor.task_runner()->PostTask(
      FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));

  // Post an application task that will post WM_QUIT to the nested loop. The
  // test will hang if the subpump doesn't process application tasks as it
  // should.
  executor.task_runner()->PostTask(FROM_HERE, BindOnce(&::PostQuitMessage, 0));

  // Test success is determined by not hanging in this Run() call.
  run_loop.Run();
}

// TODO(crbug.com/40595757): Enable once multiple layers of nested loops
// works.
TEST(SingleThreadTaskExecutorTest,
     DISABLED_UnwindingMultipleSubPumpsDoesntStarveApplicationTasks) {
  SingleThreadTaskExecutor executor(MessagePumpType::UI);

  // Regression test for https://crbug.com/890016.
  // Tests that the subpump is still processing application tasks after
  // unwinding from nested subpumps (i.e. that they didn't consume the last
  // kMsgHaveWork).

  RunLoop run_loop;

  // Enter multiple levels of nested subpumps.
  executor.task_runner()->PostTask(
      FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
  executor.task_runner()->PostTask(FROM_HERE,
                                   BindOnce(&SubPumpFunc, DoNothing()));
  executor.task_runner()->PostTask(FROM_HERE,
                                   BindOnce(&SubPumpFunc, DoNothing()));

  // Quit two layers (with tasks in between to allow each quit to be handled
  // before continuing -- ::PostQuitMessage() sets a bit, it's not a real queued
  // message :
  // https://blogs.msdn.microsoft.com/oldnewthing/20051104-33/?p=33453).
  executor.task_runner()->PostTask(FROM_HERE, BindOnce(&::PostQuitMessage, 0));
  executor.task_runner()->PostTask(FROM_HERE, DoNothing());
  executor.task_runner()->PostTask(FROM_HERE, DoNothing());
  executor.task_runner()->PostTask(FROM_HERE, BindOnce(&::PostQuitMessage, 0));
  executor.task_runner()->PostTask(FROM_HERE, DoNothing());
  executor.task_runner()->PostTask(FROM_HERE, DoNothing());

  bool last_task_ran = false;
  executor.task_runner()->PostTask(
      FROM_HERE, BindOnce([](bool* to_set) { *to_set = true; },
                          Unretained(&last_task_ran)));

  executor.task_runner()->PostTask(FROM_HERE, BindOnce(&::PostQuitMessage, 0));

  run_loop.Run();

  EXPECT_TRUE(last_task_ran);
}

namespace {

// A side effect of this test is the generation a beep. Sorry.
void RunTest_NestingDenial2(MessagePumpType message_pump_type) {
  SingleThreadTaskExecutor executor(message_pump_type);
  base::RunLoop loop;
  Thread worker("NestingDenial2_worker");
  Thread::Options options;
  options.message_pump_type = message_pump_type;
  ASSERT_EQ(true, worker.StartWithOptions(std::move(options)));
  TaskList order;
  win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
  worker.task_runner()->PostTask(
      FROM_HERE,
      BindOnce(&RecursiveFuncWin, SingleThreadTaskRunner::GetCurrentDefault(),
               event.get(), true, &order, false, loop.QuitWhenIdleClosure()));
  // Let the other thread execute.
  WaitForSingleObject(event.get(), INFINITE);
  loop.Run();

  ASSERT_EQ(17u, order.Size());
  EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
  EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
  EXPECT_EQ(order.Get(2), TaskItem(MESSAGEBOX, 2, true));
  EXPECT_EQ(order.Get(3), TaskItem(MESSAGEBOX, 2, false));
  EXPECT_EQ(order.Get(4), TaskItem(RECURSIVE, 3, true));
  EXPECT_EQ(order.Get(5), TaskItem(RECURSIVE, 3, false));
  // When EndDialogFunc is processed, the window is already dismissed, hence no
  // "end" entry.
  EXPECT_EQ(order.Get(6), TaskItem(ENDDIALOG, 4, true));
  EXPECT_EQ(order.Get(7), TaskItem(QUITMESSAGELOOP, 5, true));
  EXPECT_EQ(order.Get(8), TaskItem(QUITMESSAGELOOP, 5, false));
  EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 1, true));
  EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, false));
  EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 3, true));
  EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 3, false));
  EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 1, true));
  EXPECT_EQ(order.Get(14), TaskItem(RECURSIVE, 1, false));
  EXPECT_EQ(order.Get(15), TaskItem(RECURSIVE, 3, true));
  EXPECT_EQ(order.Get(16), TaskItem(RECURSIVE, 3, false));
}

}  // namespace

// This test occasionally hangs, would need to be turned into an
// interactive_ui_test, see http://crbug.com/44567.
TEST(SingleThreadTaskExecutorTest, DISABLED_NestingDenial2) {
  RunTest_NestingDenial2(MessagePumpType::DEFAULT);
  RunTest_NestingDenial2(MessagePumpType::UI);
  RunTest_NestingDenial2(MessagePumpType::IO);
}

// A side effect of this test is the generation a beep. Sorry.  This test also
// needs to process windows messages on the current thread.
TEST(SingleThreadTaskExecutorTest, NestingSupport2) {
  SingleThreadTaskExecutor executor(MessagePumpType::UI);
  base::RunLoop loop;
  Thread worker("NestingSupport2_worker");
  Thread::Options options;
  options.message_pump_type = MessagePumpType::UI;
  ASSERT_EQ(true, worker.StartWithOptions(std::move(options)));
  TaskList order;
  win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
  worker.task_runner()->PostTask(
      FROM_HERE,
      BindOnce(&RecursiveFuncWin, SingleThreadTaskRunner::GetCurrentDefault(),
               event.get(), false, &order, true, loop.QuitWhenIdleClosure()));
  // Let the other thread execute.
  WaitForSingleObject(event.get(), INFINITE);
  loop.Run();

  ASSERT_EQ(18u, order.Size());
  EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
  EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
  EXPECT_EQ(order.Get(2), TaskItem(MESSAGEBOX, 2, true));
  // Note that this executes in the MessageBox modal loop.
  EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 3, true));
  EXPECT_EQ(order.Get(4), TaskItem(RECURSIVE, 3, false));
  EXPECT_EQ(order.Get(5), TaskItem(ENDDIALOG, 4, true));
  EXPECT_EQ(order.Get(6), TaskItem(ENDDIALOG, 4, false));
  EXPECT_EQ(order.Get(7), TaskItem(MESSAGEBOX, 2, false));
  /* The order can subtly change here. The reason is that when RecursiveFunc(1)
     is called in the main thread, if it is faster than getting to the
     PostTask(FROM_HERE, BindOnce(&QuitFunc) execution, the order of task
     execution can change. We don't care anyway that the order isn't correct.
  EXPECT_EQ(order.Get(8), TaskItem(QUITMESSAGELOOP, 5, true));
  EXPECT_EQ(order.Get(9), TaskItem(QUITMESSAGELOOP, 5, false));
  EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, true));
  EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 1, false));
  */
  EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 3, true));
  EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 3, false));
  EXPECT_EQ(order.Get(14), TaskItem(RECURSIVE, 1, true));
  EXPECT_EQ(order.Get(15), TaskItem(RECURSIVE, 1, false));
  EXPECT_EQ(order.Get(16), TaskItem(RECURSIVE, 3, true));
  EXPECT_EQ(order.Get(17), TaskItem(RECURSIVE, 3, false));
}

#endif  // BUILDFLAG(IS_WIN)

#if BUILDFLAG(IS_WIN)
TEST(SingleThreadTaskExecutorTest, IOHandler) {
  RunTest_IOHandler();
}
#endif  // BUILDFLAG(IS_WIN)

namespace {
// Inject a test point for recording the destructor calls for Closure objects
// send to MessageLoop::PostTask(). It is awkward usage since we are trying to
// hook the actual destruction, which is not a common operation.
class DestructionObserverProbe : public RefCounted<DestructionObserverProbe> {};

class MLDestructionObserver : public CurrentThread::DestructionObserver {};

}  // namespace

TEST(SingleThreadTaskExecutorTest, DestructionObserverTest) {}

// Verify that SingleThreadTaskExecutor sets ThreadMainTaskRunner::current() and
// it posts tasks on that message loop.
TEST(SingleThreadTaskExecutorTest, ThreadMainTaskRunner) {}

TEST(SingleThreadTaskExecutorTest, type) {}

#if BUILDFLAG(IS_WIN)
void EmptyFunction() {}

void PostMultipleTasks() {
  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
      FROM_HERE, base::BindOnce(&EmptyFunction));
  SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
      FROM_HERE, base::BindOnce(&EmptyFunction));
}

static const int kSignalMsg = WM_USER + 2;
// this feels wrong
static base::RunLoop* g_loop_to_quit_from_message_handler = nullptr;
void PostWindowsMessage(HWND message_hwnd) {
  PostMessage(message_hwnd, kSignalMsg, 0, 2);
}

void EndTest(bool* did_run, HWND hwnd) {
  *did_run = true;
  PostMessage(hwnd, WM_CLOSE, 0, 0);
}

int kMyMessageFilterCode = 0x5002;

LRESULT CALLBACK TestWndProcThunk(HWND hwnd,
                                  UINT message,
                                  WPARAM wparam,
                                  LPARAM lparam) {
  if (message == WM_CLOSE)
    EXPECT_TRUE(DestroyWindow(hwnd));
  if (message != kSignalMsg)
    return DefWindowProc(hwnd, message, wparam, lparam);

  switch (lparam) {
    case 1:
      // First, we post a task that will post multiple no-op tasks to make sure
      // that the pump's incoming task queue does not become empty during the
      // test.
      SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
          FROM_HERE, base::BindOnce(&PostMultipleTasks));
      // Next, we post a task that posts a windows message to trigger the second
      // stage of the test.
      SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
          FROM_HERE, base::BindOnce(&PostWindowsMessage, hwnd));
      break;
    case 2:
      // Since we're about to enter a modal loop, tell the message loop that we
      // intend to nest tasks.
      CurrentThread::ScopedAllowApplicationTasksInNativeNestedLoop
          allow_nestable_tasks;
      bool did_run = false;
      SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
          FROM_HERE, base::BindOnce(&EndTest, &did_run, hwnd));
      // Run a nested windows-style message loop and verify that our task runs.
      // If it doesn't, then we'll loop here until the test times out.
      MSG msg;
      while (GetMessage(&msg, 0, 0, 0)) {
        if (!CallMsgFilter(&msg, kMyMessageFilterCode))
          DispatchMessage(&msg);
        // If this message is a WM_CLOSE, explicitly exit the modal loop.
        // Posting a WM_QUIT should handle this, but unfortunately
        // MessagePumpWin eats WM_QUIT messages even when running inside a modal
        // loop.
        if (msg.message == WM_CLOSE)
          break;
      }
      EXPECT_TRUE(did_run);

      g_loop_to_quit_from_message_handler->QuitWhenIdle();

      break;
  }
  return 0;
}

TEST(SingleThreadTaskExecutorTest, AlwaysHaveUserMessageWhenNesting) {
  SingleThreadTaskExecutor executor(MessagePumpType::UI);
  RunLoop loop;

  HINSTANCE instance = CURRENT_MODULE();
  WNDCLASSEX wc = {0};
  wc.cbSize = sizeof(wc);
  wc.lpfnWndProc = TestWndProcThunk;
  wc.hInstance = instance;
  wc.lpszClassName = L"SingleThreadTaskExecutorTest_HWND";
  ATOM atom = RegisterClassEx(&wc);
  ASSERT_TRUE(atom);

  g_loop_to_quit_from_message_handler = &loop;
  HWND message_hwnd = CreateWindow(MAKEINTATOM(atom), 0, 0, 0, 0, 0, 0,
                                   HWND_MESSAGE, 0, instance, 0);

  ASSERT_TRUE(message_hwnd) << GetLastError();

  ASSERT_TRUE(PostMessage(message_hwnd, kSignalMsg, 0, 1));

  loop.Run();

  ASSERT_TRUE(UnregisterClass(MAKEINTATOM(atom), instance));
  g_loop_to_quit_from_message_handler = nullptr;
}
#endif  // BUILDFLAG(IS_WIN)

TEST(SingleThreadTaskExecutorTest,
     ApplicationTasksAllowedInNativeNestedLoopExplicitlyInScope) {}

// Verify that tasks posted to and code running in the scope of the same
// SingleThreadTaskExecutor access the same SequenceLocalStorage values.
TEST(SingleThreadTaskExecutorTest, SequenceLocalStorageSetGet) {}

// Verify that tasks posted to and code running in different MessageLoops access
// different SequenceLocalStorage values.
TEST(SingleThreadTaskExecutorTest, SequenceLocalStorageDifferentMessageLoops) {}

namespace {

class PostTaskOnDestroy {};

}  // namespace

// Test that SingleThreadTaskExecutor destruction handles a task's destructor
// posting another task.
TEST(SingleThreadTaskExecutorDestructionTest,
     DestroysFineWithPostTaskOnDestroy) {}

}  // namespace base