Below is a simplified version of my scheduler. A single thread processes packaged_tasks in the order they were enqueued.
executeschedules and waits for the completion of a taskschedulesenqueues a task and immediately returns to the userdrainsimply enqueues an empty task, with the idea being that once we get to it, we know all previously scheduled commands have executed
The question: are any guarantees here that drain won't be optimized away? I've tested this sample program with several compilers under -O3 and none optimize the empty lambda call out. Is there enough observable behaviour (manipulation of the mTasks list, synchronization barriers, etc.) that this drain implementation is valid? What does the standard say?
class Scheduler {
public:
void processTasks() {
// Runs on separate thread
// ... some synchronization here ignored
while (!mTasks.empty()) {
auto& tsk = mTasks.front();
tsk();
mTasks.pop_front();
}
}
void addTask(std::packaged_task<void(void)> task) {
std::unique_lock lck(mMutex);
mTasks.emplace_back(std::move(task));
mCond.notify_one();
}
auto schedule(std::function<void(void)> func) {
std::packaged_task<void(void)> task{func};
auto fut = task.get_future();
addTask(std::move(task));
return fut;
}
void execute(std::function<void(void)> func) {
auto fut = schedule(std::move(func));
fut.get();
}
void fireAndForget(std::function<void(void)> func) {
schedule(std::move(func));
}
void drain() {
execute([] {});
}
std::list<std::packaged_task<void(void)>> mTasks;
std::mutex mMutex;
std::condition_variable mCond;
};
int main() {
Scheduler sched;
std::jthread executor{&Scheduler::processTasks, &sched};
int counter = 0;
sched.fireAndForget([&] {
counter++;
std::this_thread::sleep_for(1s);
});
sched.fireAndForget([&] {
counter++;
std::this_thread::sleep_for(1s);
});
sched.fireAndForget([&] {
counter++;
std::this_thread::sleep_for(1s);
});
sched.drain();
assert(counter == 3);
return 0;
}