10#ifndef EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H
11#define EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H
14#include "./InternalHeaderCheck.h"
56 EventCount(MaxSizeVector<Waiter>& waiters) : state_(kStackMask), waiters_(waiters) {
57 eigen_plain_assert(waiters.size() < (1 << kWaiterBits) - 1);
60 EventCount(
const EventCount&) =
delete;
61 void operator=(
const EventCount&) =
delete;
65 eigen_plain_assert(state_.load() == kStackMask);
72 uint64_t state = state_.load(std::memory_order_relaxed);
75 uint64_t newstate = state + kWaiterInc;
77 if (state_.compare_exchange_weak(state, newstate, std::memory_order_seq_cst))
return;
82 void CommitWait(Waiter* w) {
83 eigen_plain_assert((w->epoch & ~kEpochMask) == 0);
84 w->state = Waiter::kNotSignaled;
85 const uint64_t me = (w - &waiters_[0]) | w->epoch;
86 uint64_t state = state_.load(std::memory_order_seq_cst);
88 CheckState(state,
true);
90 if ((state & kSignalMask) != 0) {
92 newstate = state - kWaiterInc - kSignalInc;
95 newstate = ((state & kWaiterMask) - kWaiterInc) | me;
96 w->next.store(state & (kStackMask | kEpochMask), std::memory_order_relaxed);
99 if (state_.compare_exchange_weak(state, newstate, std::memory_order_acq_rel)) {
100 if ((state & kSignalMask) == 0) {
101 w->epoch += kEpochInc;
111 uint64_t state = state_.load(std::memory_order_relaxed);
113 CheckState(state,
true);
114 uint64_t newstate = state - kWaiterInc;
119 if (((state & kWaiterMask) >> kWaiterShift) == ((state & kSignalMask) >> kSignalShift)) newstate -= kSignalInc;
120 CheckState(newstate);
121 if (state_.compare_exchange_weak(state, newstate, std::memory_order_acq_rel))
return;
127 void Notify(
bool notifyAll) {
128 std::atomic_thread_fence(std::memory_order_seq_cst);
129 uint64_t state = state_.load(std::memory_order_acquire);
132 const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
133 const uint64_t signals = (state & kSignalMask) >> kSignalShift;
135 if ((state & kStackMask) == kStackMask && waiters == signals)
return;
139 newstate = (state & kWaiterMask) | (waiters << kSignalShift) | kStackMask;
140 }
else if (signals < waiters) {
142 newstate = state + kSignalInc;
145 Waiter* w = &waiters_[state & kStackMask];
146 uint64_t next = w->next.load(std::memory_order_relaxed);
147 newstate = (state & (kWaiterMask | kSignalMask)) | next;
149 CheckState(newstate);
150 if (state_.compare_exchange_weak(state, newstate, std::memory_order_acq_rel)) {
151 if (!notifyAll && (signals < waiters))
return;
152 if ((state & kStackMask) == kStackMask)
return;
153 Waiter* w = &waiters_[state & kStackMask];
154 if (!notifyAll) w->next.store(kStackMask, std::memory_order_relaxed);
170 static const uint64_t kWaiterBits = 14;
171 static const uint64_t kStackMask = (1ull << kWaiterBits) - 1;
172 static const uint64_t kWaiterShift = kWaiterBits;
173 static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1) << kWaiterShift;
174 static const uint64_t kWaiterInc = 1ull << kWaiterShift;
175 static const uint64_t kSignalShift = 2 * kWaiterBits;
176 static const uint64_t kSignalMask = ((1ull << kWaiterBits) - 1) << kSignalShift;
177 static const uint64_t kSignalInc = 1ull << kSignalShift;
178 static const uint64_t kEpochShift = 3 * kWaiterBits;
179 static const uint64_t kEpochBits = 64 - kEpochShift;
180 static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift;
181 static const uint64_t kEpochInc = 1ull << kEpochShift;
185 friend class EventCount;
193 EIGEN_ALIGN_TO_AVOID_FALSE_SHARING std::atomic<uint64_t> next{kStackMask};
197 unsigned state{kNotSignaled};
201 static void CheckState(uint64_t state,
bool waiter =
false) {
202 static_assert(kEpochBits >= 20,
"not enough bits to prevent ABA problem");
203 const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
204 const uint64_t signals = (state & kSignalMask) >> kSignalShift;
205 eigen_plain_assert(waiters >= signals);
206 eigen_plain_assert(waiters < (1 << kWaiterBits) - 1);
207 eigen_plain_assert(!waiter || waiters > 0);
212 void Park(Waiter* w) {
213 EIGEN_MUTEX_LOCK lock(w->mu);
214 while (w->state != Waiter::kSignaled) {
215 w->state = Waiter::kWaiting;
220 void Unpark(Waiter* w) {
221 for (Waiter* next; w; w = next) {
222 uint64_t wnext = w->next.load(std::memory_order_relaxed) & kStackMask;
223 next = wnext == kStackMask ? nullptr : &waiters_[internal::convert_index<size_t>(wnext)];
226 EIGEN_MUTEX_LOCK lock(w->mu);
228 w->state = Waiter::kSignaled;
231 if (state == Waiter::kWaiting) w->cv.notify_one();
235 std::atomic<uint64_t> state_;
236 MaxSizeVector<Waiter>& waiters_;
Namespace containing all symbols from the Eigen library.
Definition B01_Experimental.dox:1