392 s_postAlwaysSignals =
false;
398 return s_postAlwaysSignals;
406template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
412 return state & k_DISABLED_GEN_MASK;
415template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
418 FastPostSemaphoreImpl<ATOMIC_OP, MUTEX, CONDITION, THREADUTIL>
419 ::getValueRaw(Int64 state)
421 return (state >> k_AVAILABLE_SHIFT) - (state & k_BLOCKED_MASK);
424template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
426bool FastPostSemaphoreImpl<ATOMIC_OP, MUTEX, CONDITION, THREADUTIL>
427 ::hasAvailable(Int64 state)
429 return k_AVAILABLE_INC <= state;
432template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
434bool FastPostSemaphoreImpl<ATOMIC_OP, MUTEX, CONDITION, THREADUTIL>
435 ::hasBlockedThread(Int64 state)
437 return 0 != (state & k_BLOCKED_MASK);
440template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
442bool FastPostSemaphoreImpl<ATOMIC_OP, MUTEX, CONDITION, THREADUTIL>
443 ::isDisabled(Int64 state)
445 return 0 != (state & k_DISABLED_GEN_INC);
448template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
450bool FastPostSemaphoreImpl<ATOMIC_OP, MUTEX, CONDITION, THREADUTIL>
451 ::willHaveBlockedThread(Int64 state)
453 return (state >> k_AVAILABLE_SHIFT) < (state & k_BLOCKED_MASK);
457template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
458int FastPostSemaphoreImpl<ATOMIC_OP, MUTEX, CONDITION, THREADUTIL>
464 const Int64 disabledGen = disabledGeneration(initialState);
471 Int64 state = ATOMIC_OP::getInt64Acquire(&d_state);
473 if (willHaveBlockedThread(state)) {
475 LockGuard<MUTEX> guard(&d_waitMutex);
481 state = ATOMIC_OP::addInt64NvAcqRel(
483 k_AVAILABLE_INC + k_BLOCKED_INC);
488 while ( !hasAvailable(state)
489 && disabledGen == disabledGeneration(state)) {
490 int rv = d_waitCondition.timedWait(&d_waitMutex, absTime);
492 ATOMIC_OP::addInt64AcqRel(&d_state, -k_BLOCKED_INC);
498 state = ATOMIC_OP::getInt64Acquire(&d_state);
501 if (hasAvailable(state)) {
502 state = ATOMIC_OP::addInt64NvAcqRel(
504 -(k_AVAILABLE_INC + k_BLOCKED_INC));
507 ATOMIC_OP::addInt64AcqRel(&d_state, -k_BLOCKED_INC);
515 if ( hasAvailable(state)
516 && !isDisabled(state)
517 && hasBlockedThread(state)) {
518 d_waitCondition.signal();
525 if ( hasAvailable(state)
526 && !isDisabled(state)
527 && hasBlockedThread(state)) {
529 LockGuard<MUTEX> guard(&d_waitMutex);
531 d_waitCondition.signal();
538template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
539int FastPostSemaphoreImpl<ATOMIC_OP, MUTEX, CONDITION, THREADUTIL>
544 const Int64 disabledGen = disabledGeneration(initialState);
551 Int64 state = ATOMIC_OP::getInt64Acquire(&d_state);
553 if (willHaveBlockedThread(state)) {
555 LockGuard<MUTEX> guard(&d_waitMutex);
561 state = ATOMIC_OP::addInt64NvAcqRel(
563 k_AVAILABLE_INC + k_BLOCKED_INC);
568 while ( !hasAvailable(state)
569 && disabledGen == disabledGeneration(state)) {
570 int rv = d_waitCondition.wait(&d_waitMutex);
572 ATOMIC_OP::addInt64AcqRel(&d_state, -k_BLOCKED_INC);
575 state = ATOMIC_OP::getInt64Acquire(&d_state);
578 if (hasAvailable(state)) {
579 state = ATOMIC_OP::addInt64NvAcqRel(
581 -(k_AVAILABLE_INC + k_BLOCKED_INC));
584 ATOMIC_OP::addInt64AcqRel(&d_state, -k_BLOCKED_INC);
592 if ( hasAvailable(state)
593 && !isDisabled(state)
594 && hasBlockedThread(state)) {
595 d_waitCondition.signal();
602 if ( hasAvailable(state)
603 && !isDisabled(state)
604 && hasBlockedThread(state)) {
606 LockGuard<MUTEX> guard(&d_waitMutex);
608 d_waitCondition.signal();
616template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
618FastPostSemaphoreImpl<ATOMIC_OP, MUTEX, CONDITION, THREADUTIL>
621, d_waitCondition(clockType)
623 ATOMIC_OP::initInt64(&d_state, 0);
626template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
632, d_waitCondition(clockType)
638template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
641 Int64 state = ATOMIC_OP::getInt64Acquire(&d_state);
643 while (
false == isDisabled(state)) {
644 const Int64 expState = state;
648 Int64 newState = (state & ~k_DISABLED_GEN_MASK) |
649 ((state + k_DISABLED_GEN_INC) & k_DISABLED_GEN_MASK);
651 state = ATOMIC_OP::testAndSwapInt64AcqRel(&d_state,
655 if (expState == state) {
666 d_waitCondition.broadcast();
679 while (isDisabled(state) && willHaveBlockedThread(state)) {
682 state = ATOMIC_OP::getInt64Acquire(&d_state);
686template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
689 Int64 state = ATOMIC_OP::getInt64Acquire(&d_state);
691 while (isDisabled(state)) {
697 if (willHaveBlockedThread(state)) {
700 state = ATOMIC_OP::getInt64Acquire(&d_state);
703 const Int64 expState = state;
707 Int64 newState = (state & ~k_DISABLED_GEN_MASK) |
708 ((state + k_DISABLED_GEN_INC) & k_DISABLED_GEN_MASK);
710 state = ATOMIC_OP::testAndSwapInt64AcqRel(&d_state,
714 if (expState == state) {
721template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
725 Int64 state = ATOMIC_OP::addInt64NvAcqRel(&d_state, k_AVAILABLE_INC);
731 usePostAlwaysSignalsMitigation()
732 || k_AVAILABLE_INC == (state & k_AVAILABLE_MASK))
733 && !isDisabled(state)
734 && hasBlockedThread(state)) {
744 d_waitCondition.signal();
748template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
753 Int64 v = k_AVAILABLE_INC * value;
754 Int64 state = ATOMIC_OP::addInt64NvAcqRel(&d_state, v);
760 usePostAlwaysSignalsMitigation()
761 || v == (state & k_AVAILABLE_MASK))
762 && !isDisabled(state)
763 && hasBlockedThread(state)) {
773 d_waitCondition.signal();
777template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
784 Int64 v = k_AVAILABLE_INC * value;
785 Int64 state = ATOMIC_OP::addInt64NvAcqRel(&d_state, v);
792 usePostAlwaysSignalsMitigation()
793 || v == (state & k_AVAILABLE_MASK)
794 || ( k_AVAILABLE_INC * available <= (state & k_AVAILABLE_MASK)
795 && blocked <= (state & k_BLOCKED_MASK)))
796 && !isDisabled(state)
797 && hasBlockedThread(state)) {
807 d_waitCondition.signal();
810 usePostAlwaysSignalsMitigation()
811 || v == (state & k_AVAILABLE_MASK))
812 &&
"redundant signal sent");
816template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
821 Int64 state = ATOMIC_OP::getInt64Acquire(&d_state);
830 count = getValueRaw(state);
836 if (maximumToTake < count) {
837 count = maximumToTake;
840 state = ATOMIC_OP::testAndSwapInt64AcqRel(
843 state - k_AVAILABLE_INC * count);
844 }
while (state != expState);
846 return static_cast<int>(count);
849template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
853 return take(INT_MAX);
856template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
861 Int64 state = ATOMIC_OP::addInt64NvAcqRel(&d_state, -k_AVAILABLE_INC);
863 if (isDisabled(state)) {
868 if (willHaveBlockedThread(state)) {
869 return timedWaitSlowPath(absTime, state);
875template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
879 Int64 state = ATOMIC_OP::addInt64NvAcqRel(&d_state, -k_AVAILABLE_INC);
881 if (isDisabled(state)) {
886 if (willHaveBlockedThread(state)) {
888 return e_WOULD_BLOCK;
894template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
898 Int64 state = ATOMIC_OP::addInt64NvAcqRel(&d_state, -k_AVAILABLE_INC);
900 if (isDisabled(state)) {
905 if (willHaveBlockedThread(state)) {
906 return waitSlowPath(state);
913template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
922template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
927 Int64 state = ATOMIC_OP::getInt64Acquire(&d_state);
929 return static_cast<int>((state & k_DISABLED_GEN_MASK)
930 >> k_DISABLED_GEN_SHIFT);
933template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
938 Int64 count = getValueRaw(ATOMIC_OP::getInt64Acquire(&d_state));
940 return static_cast<int>(count > 0 ? count : 0);
943template <
class ATOMIC_OP,
class MUTEX,
class CONDITION,
class THREADUTIL>
948 Int64 state = ATOMIC_OP::getInt64Acquire(&d_state);
950 return isDisabled(state);