MB-23750: Fix toggling ephemeral ejection policy 94/76394/11
authorJames Harrison <00jamesh@gmail.com>
Thu, 6 Apr 2017 12:23:38 +0000 (13:23 +0100)
committerDave Rigby <daver@couchbase.com>
Tue, 11 Apr 2017 11:06:03 +0000 (11:06 +0000)
If going

auto_delete -> fail_new_data -> auto_delete

the itemPager task was rescheduled, but as it had previously been
cancelled the state remained TASK_DEAD, and so ExecutorThread::run
behaves as if the task has just been cancelled and needs cleaning up.
By resetting scheduled tasks back to TASK_RUNNING if they are dead, this
can be avoided.

Change-Id: Id007a15fdaeb80a79828da0cade031a424e653cf
Reviewed-on: http://review.couchbase.org/76394
Tested-by: Build Bot <build@couchbase.com>
Reviewed-by: Dave Rigby <daver@couchbase.com>
src/syncobject.h
src/taskqueue.cc
tests/module_tests/executorpool_test.cc
tests/module_tests/executorpool_test.h

index d2dbaba..9aec8f5 100644 (file)
@@ -40,6 +40,11 @@ public:
         cond.wait(lock);
     }
 
+    template <class Predicate>
+    void wait(std::unique_lock<std::mutex>& lock, Predicate pred) {
+        cond.wait(lock, pred);
+    }
+
     void wait_for(std::unique_lock<std::mutex>& lock,
                   const double secs) {
         cond.wait_for(lock, std::chrono::milliseconds(int64_t(secs * 1000.0)));
index 90f6f2a..8b94799 100644 (file)
@@ -210,6 +210,10 @@ void TaskQueue::_schedule(ExTask &task) {
     {
         LockHolder lh(mutex);
 
+        // If we are rescheduling a previously cancelled task, we should reset
+        // the task state to the initial value of running.
+        task->setState(TASK_RUNNING, TASK_DEAD);
+
         futureQueue.push(task);
 
         LOG(EXTENSION_LOG_DEBUG,
index d00eb78..f92feba 100644 (file)
@@ -230,3 +230,32 @@ TEST_F(ExecutorPoolDynamicWorkerTest, new_worker_naming_test) {
     EXPECT_TRUE(pool->threadExists("writer_worker_0"));
     EXPECT_TRUE(pool->threadExists("writer_worker_1"));
 }
+
+/* Make sure that a task that has run once and been cancelled can be
+ * rescheduled and will run again properly.
+ */
+TEST_F(ExecutorPoolDynamicWorkerTest, reschedule_dead_task) {
+    size_t runCount{0};
+
+    ExTask task = new LambdaTask(taskable, TaskId::ItemPager, 0, true, [&] {
+        ++runCount;
+        return false;
+    });
+
+    ASSERT_EQ(TASK_RUNNING, task->getState())
+            << "Initial task state should be RUNNING";
+
+    pool->schedule(task);
+    pool->waitForEmptyTaskLocator();
+
+    EXPECT_EQ(TASK_DEAD, task->getState())
+            << "Task has completed and been cleaned up, state should be DEAD";
+
+    pool->schedule(task);
+    pool->waitForEmptyTaskLocator();
+
+    EXPECT_EQ(TASK_DEAD, task->getState())
+            << "Task has completed and been cleaned up, state should be DEAD";
+
+    EXPECT_EQ(2, runCount);
+}
index 7dcfb33..e7cdc33 100644 (file)
@@ -25,6 +25,7 @@
 #include <executorthread.h>
 #include <gtest/gtest.h>
 #include <taskable.h>
+#include <thread>
 #include "thread_gate.h"
 
 class MockTaskable : public Taskable {
@@ -89,6 +90,14 @@ public:
         return std::find(names.begin(), names.end(), name) != names.end();
     }
 
+    /** Waits indefinitely for the taskLocator to become empty, indicating all
+     * tasks have been cancelled and cleaned up.
+     */
+    void waitForEmptyTaskLocator() {
+        std::unique_lock<std::mutex> lh(tMutex);
+        tMutex.wait(lh, [this] { return taskLocator.empty(); });
+    }
+
     ~TestExecutorPool() = default;
 };