BDE 4.14.0 Production release
Loading...
Searching...
No Matches
bdlcc_singleconsumerqueueimpl.h
Go to the documentation of this file.
1/// @file bdlcc_singleconsumerqueueimpl.h
2///
3/// The content of this file has been pre-processed for Doxygen.
4///
5
6
7// bdlcc_singleconsumerqueueimpl.h -*-C++-*-
8
9#ifndef INCLUDED_BDLCC_SINGLECONSUMERQUEUEIMPL
10#define INCLUDED_BDLCC_SINGLECONSUMERQUEUEIMPL
11
12#include <bsls_ident.h>
13BSLS_IDENT("$Id: $")
14
15/// @defgroup bdlcc_singleconsumerqueueimpl bdlcc_singleconsumerqueueimpl
16/// @brief Provide a testable thread-aware single consumer queue of values.
17/// @addtogroup bdl
18/// @{
19/// @addtogroup bdlcc
20/// @{
21/// @addtogroup bdlcc_singleconsumerqueueimpl
22/// @{
23///
24/// <h1> Outline </h1>
25/// * <a href="#bdlcc_singleconsumerqueueimpl-purpose"> Purpose</a>
26/// * <a href="#bdlcc_singleconsumerqueueimpl-classes"> Classes </a>
27/// * <a href="#bdlcc_singleconsumerqueueimpl-description"> Description </a>
28/// * <a href="#bdlcc_singleconsumerqueueimpl-allocator-requirements"> Allocator Requirements </a>
29/// * <a href="#bdlcc_singleconsumerqueueimpl-exception-safety"> Exception Safety </a>
30/// * <a href="#bdlcc_singleconsumerqueueimpl-move-semantics-in-c-03"> Move Semantics in C++03 </a>
31/// * <a href="#bdlcc_singleconsumerqueueimpl-memory-usage"> Memory Usage </a>
32/// * <a href="#bdlcc_singleconsumerqueueimpl-warning-synchronization-required-on-destruction"> WARNING: Synchronization Required on Destruction </a>
33/// * <a href="#bdlcc_singleconsumerqueueimpl-usage"> Usage </a>
34///
35/// # Purpose {#bdlcc_singleconsumerqueueimpl-purpose}
36/// Provide a testable thread-aware single consumer queue of values.
37///
38/// # Classes {#bdlcc_singleconsumerqueueimpl-classes}
39///
40/// - bdlcc::SingleConsumerQueueImpl: thread-aware single consumer `TYPE` queue
41///
42/// # Description {#bdlcc_singleconsumerqueueimpl-description}
43/// This component defines a type,
44/// `bdlcc::SingleConsumerQueueImpl`, that provides an efficient, thread-aware
45/// queue of values assuming a single consumer (the use of `popFront`,
46/// `tryPopFront`, and `removeAll` is done by one thread or a group of threads
47/// using external synchronization). The behavior of the methods `popFront`,
48/// `tryPopFront`, and `removeAll` is undefined unless the use is by a single
49/// consumer. This class is ideal for synchronization and communication between
50/// threads in a producer-consumer model when there is only one consumer thread.
51///
52/// The queue provides `pushBack` and `popFront` methods for pushing data into
53/// the queue and popping data from the queue. The queue will allocate memory
54/// as necessary to accommodate `pushBack` invocations (`pushBack` will never
55/// block and is provided for consistency with other containers). When the
56/// queue is empty, the `popFront` methods block until data appears in the
57/// queue. Non-blocking methods `tryPushBack` and `tryPopFront` are also
58/// provided. The `tryPopFront` method fails immediately, returning a non-zero
59/// value, if the queue is empty.
60///
61/// The queue may be placed into a "enqueue disabled" state using the
62/// `disablePushBack` method. When disabled, `pushBack` and `tryPushBack` fail
63/// immediately and return an error code. The queue may be restored to normal
64/// operation with the `enablePushBack` method.
65///
66/// The queue may be placed into a "dequeue disabled" state using the
67/// `disablePopFront` method. When dequeue disabled, `popFront` and
68/// `tryPopFront` fail immediately and return an error code. Any threads
69/// blocked in `popFront` when the queue is dequeue disabled return from
70/// `popFront` immediately and return an error code.
71///
72/// ## Allocator Requirements {#bdlcc_singleconsumerqueueimpl-allocator-requirements}
73///
74///
75/// Access to the allocator supplied to the constructor is internally
76/// synchronized by this component. If allocations performed by this component
77/// must be synchronized with external allocations (performed outside of this
78/// component), that synchronization must be guaranteed by the user. Using a
79/// thread-safe allocator is the common way to satisfy this requirement.
80///
81/// ## Exception Safety {#bdlcc_singleconsumerqueueimpl-exception-safety}
82///
83///
84/// A `bdlcc::SingleConsumerQueueImpl` is exception neutral, and all of the
85/// methods of `bdlcc::SingleConsumerQueueImpl` provide the basic exception
86/// safety guarantee (see @ref bsldoc_glossary ).
87///
88/// ## Move Semantics in C++03 {#bdlcc_singleconsumerqueueimpl-move-semantics-in-c-03}
89///
90///
91/// Move-only types are supported by `bdlcc::SingleConsumerQueueImpl` on C++11
92/// platforms only (where `BSLMF_MOVABLEREF_USES_RVALUE_REFERENCES` is defined),
93/// and are not supported on C++03 platforms. Unfortunately, in C++03, there
94/// are user types where a `bslmf::MovableRef` will not safely degrade to a
95/// lvalue reference when a move constructor is not available (types providing a
96/// constructor template taking any type), so `bslmf::MovableRefUtil::move`
97/// cannot be used directly on a user supplied template type. See internal bug
98/// report 99039150 for more information.
99///
100/// ## Memory Usage {#bdlcc_singleconsumerqueueimpl-memory-usage}
101///
102///
103/// `bdlcc::SingleConsumerQueueImpl` is most efficient when dealing with small
104/// objects or fundamental types (as a thread-safe container, its methods pass
105/// objects *by* *value*). We recommend large objects be stored as
106/// shared-pointers (or possibly raw pointers).
107///
108/// ## WARNING: Synchronization Required on Destruction {#bdlcc_singleconsumerqueueimpl-warning-synchronization-required-on-destruction}
109///
110///
111/// The behavior for the destructor is undefined unless all access or
112/// modification of the object is completed prior to its destruction. Some form
113/// of synchronization, external to the component, is required to ensure the
114/// precondition on the destructor is met. For example, if two (or more)
115/// threads are manipulating a queue, it is *not* safe to anticipate the number
116/// of elements added to the queue, and destroy that queue immediately after the
117/// last element is popped (without additional synchronization) because one of
118/// the corresponding push functions may not have completed (push may, for
119/// instance, signal waiting threads after the element is considered added to
120/// the container).
121///
122/// ## Usage {#bdlcc_singleconsumerqueueimpl-usage}
123///
124///
125/// There is no usage example for this component since it is not meant for
126/// direct client use.
127/// @}
128/** @} */
129/** @} */
130
131/** @addtogroup bdl
132 * @{
133 */
134/** @addtogroup bdlcc
135 * @{
136 */
137/** @addtogroup bdlcc_singleconsumerqueueimpl
138 * @{
139 */
140
141#include <bdlscm_version.h>
142
144
146
148
149#include <bslmf_movableref.h>
151
152#include <bslmt_lockguard.h>
153#include <bslmt_threadutil.h>
154
155#include <bsls_assert.h>
156#include <bsls_objectbuffer.h>
157#include <bsls_types.h>
158
159#include <bsl_cstddef.h>
160
161
162namespace bdlcc {
163
164 // ================================================
165 // class SingleConsumerQueueImpl_MarkReclaimProctor
166 // ================================================
167
168/// This class implements a proctor that, unless its `release` method has
169/// previously been invoked, automatically invokes `markReclaim` on a `NODE`
170/// upon destruction.
171///
172/// See @ref bdlcc_singleconsumerqueueimpl
173template <class TYPE, class NODE>
175
176 // DATA
177 TYPE *d_queue_p; // managed queue owning the managed node
178 NODE *d_node_p; // managed node
179
180 // NOT IMPLEMENTED
186
187 public:
188 // CREATORS
189
190 /// Create a `markReclaim` proctor managing the specified `node` of the
191 /// specified `queue`.
192 SingleConsumerQueueImpl_MarkReclaimProctor(TYPE *queue, NODE *node);
193
194 /// Destroy this object and, if `release` has not been invoked, invoke
195 /// the managed queue's `markReclaim` method with the managed node.
197
198 // MANIPULATORS
199
200 /// Release from management the queue and node currently managed by this
201 /// proctor. If no queue, this method has no effect.
202 void release();
203};
204
205 // ==============================================
206 // class SingleConsumerQueueImpl_PopCompleteGuard
207 // ==============================================
208
209/// This class implements a guard that automatically invokes `popComplete`
210/// on the managed queue upon destruction.
211///
212/// See @ref bdlcc_singleconsumerqueueimpl
213template <class TYPE>
215
216 // DATA
217 TYPE *d_queue_p; // managed queue
218
219 // NOT IMPLEMENTED
225
226 public:
227 // CREATORS
228
229 /// Create a `popComplete` guard managing the specified `queue`.
230 explicit
232
233 /// Destroy this object and invoke the `popComplete` method on the
234 /// managed queue.
236};
237
238 // ===============================================
239 // class SingleConsumerQueueImpl_AllocateLockGuard
240 // ===============================================
241
242/// This class implements a guard that automatically invokes
243/// `releaseAllocateLock` on the managed queue upon destruction.
244///
245/// See @ref bdlcc_singleconsumerqueueimpl
246template <class TYPE>
248
249 // DATA
250 TYPE *d_queue_p; // managed queue
251
252 // NOT IMPLEMENTED
258
259 public:
260 // CREATORS
261
262 /// Create a `releaseAllocateLock` guard managing the specified `queue`.
263 explicit SingleConsumerQueueImpl_AllocateLockGuard(TYPE *queue);
264
265 /// Destroy this object and invoke the managed queue's
267 // 'releaseAllocateLock' method.
268};
269
270 // =============================
271 // class SingleConsumerQueueImpl
272 // =============================
273
274/// This class provides a thread-safe unbounded queue of values that assumes
275/// a single consumer thread.
276///
277/// The types `ATOMIC_OP`, `MUTEX`, and `CONDITION` are exposed for testing.
278/// Typical usage is with `bsls::AtomicOperations` for `ATOMIC_OP`,
279/// `bslmt::Mutex` for `MUTEX`, and `bslmt::Condition` for `CONDITION`.
280///
281/// See @ref bdlcc_singleconsumerqueueimpl
282template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
284
285 // PRIVATE CONSTANTS
286 enum {
287 // These value are used as values for 'd_state' in 'Node'. A node is
288 // writable at creation and after a read completes (when the producers
289 // can write to the node). A node is readable after it is written
290 // (when the node can be read by the single consumer). The states
291 // in-between these two states (e.g., writing) are not needed by this
292 // implementation of the queue.
293
294 e_READABLE, // node can be read
295 e_WRITABLE, // node can be written
296 e_WRITABLE_AND_BLOCKED, // node can be written and has blocked reader
297 e_RECLAIM // node suffered exception while being written
298 };
299
300 static const bsl::size_t k_ALLOCATION_BATCH_SIZE = 8;
301 // number of nodes to allocate during a
302 // 'pushBack' when no nodes are available for
303 // reuse
304
305 // The queus's state is maintained in 'd_state' whose bits have the
306 // following meaning (and can be maintained by the constants below):
307 //..
308 // 63 19 18 0
309 // +-----------+--+-----+
310 // | available |a | use |
311 // +-----------+--+-----+
312 //..
313 //
314 //: * available: the number of nodes available for use, which becomes
315 //: negative when an allocation is needed
316 //:
317 //: * a (allocate): a bit indicating a thread is holding the allocation
318 //: lock
319 //:
320 //: * use: number of threads attempting to use existing nodes
321 //
322 // The 'k_*_MASK' constants define the layout of the attributes, the
323 // 'k_*_INC' constants are used to modify the 'd_state' attributes, and the
324 // 'k_*_SHIFT' constants allow recovery of the stored value.
325 //
326 // See *Implementation* *Note* for further details.
327
328 static const bsls::Types::Int64 k_ALLOCATE_MASK = 0x0000000000080000LL;
329 static const bsls::Types::Int64 k_ALLOCATE_INC = 0x0000000000080000LL;
330 static const bsls::Types::Int64 k_USE_MASK = 0x000000000007ffffLL;
331 static const bsls::Types::Int64 k_USE_INC = 0x0000000000000001LL;
332 static const bsls::Types::Int64 k_AVAILABLE_MASK = 0xfffffffffff00000LL;
333 static const bsls::Types::Int64 k_AVAILABLE_INC = 0x0000000000100000LL;
334 static const int k_AVAILABLE_SHIFT = 20;
335
336 // PRIVATE TYPES
337 typedef typename ATOMIC_OP::AtomicTypes::Int AtomicInt;
338 typedef typename ATOMIC_OP::AtomicTypes::Int64 AtomicInt64;
339 typedef typename ATOMIC_OP::AtomicTypes::Uint AtomicUint;
340 typedef typename ATOMIC_OP::AtomicTypes::Pointer AtomicPointer;
341
342 template <class T>
343 struct QueueNode {
344 // PUBLIC DATA
345 bsls::ObjectBuffer<T> d_value; // stored value
346 AtomicInt d_state; // 'e_READABLE', 'e_WRITABLE', etc.
347 AtomicPointer d_next; // pointer to next node
348 };
349
350 typedef QueueNode<TYPE> Node;
352
353 // DATA
354 AtomicPointer d_nextWrite; // pointer to next write to node
355
356 AtomicPointer d_nextRead; // pointer to next read from node
357
358 MUTEX d_readMutex; // used with 'd_readCondition' to
359 // block until an element is
360 // available for popping
361
362 CONDITION d_readCondition; // condition variable for popping
363 // thread
364
365 MUTEX d_writeMutex; // during allocation, used to
366 // synchronize threads access to
367 // 'd_nextWrite'
368
369 AtomicInt64 d_capacity; // capacity of this queue
370
371 mutable MUTEX d_emptyMutex; // blocking point for consumer
372 // during 'waitUntilEmpty'
373
374 mutable CONDITION d_emptyCondition; // condition variable for consumer
375 // during 'waitUntilEmpty'
376
377 AtomicInt64 d_state; // bit pattern representing the
378 // state of the queue (see
379 // implementation notes)
380
381 AtomicUint d_popFrontDisabled; // is queue pop disabled and
382 // generation count; see
383 // *Implementation* *Note*
384
385 AtomicUint d_pushBackDisabled; // is queue push disabled and
386 // generation count; see
387 // *Implementation* *Note*
388
389 Allocator d_allocator; // allocator
390
391 // FRIENDS
394 ATOMIC_OP,
395 MUTEX,
396 CONDITION>,
397 typename SingleConsumerQueueImpl<TYPE,
398 ATOMIC_OP,
399 MUTEX,
400 CONDITION>::Node >;
401
403 SingleConsumerQueueImpl<TYPE,
404 ATOMIC_OP,
405 MUTEX,
406 CONDITION> >;
407
409 SingleConsumerQueueImpl<TYPE,
410 ATOMIC_OP,
411 MUTEX,
412 CONDITION> >;
413
414 // PRIVATE CLASS METHODS
415
416 /// Return the available attribute from the specified `state`.
417 static bsls::Types::Int64 available(bsls::Types::Int64 state);
418
419 // PRIVATE MANIPULATORS
420
421 /// If the specified `value` does not have its lowest-order bit set to
422 /// the value of the specified `bitValue`, increment `value` until it
423 /// does. Note that this method is used to modify the generation counts
424 /// stored in `d_popFrontDisabled` and `d_pushBackDisabled`. See
425 /// *Implementation* *Note* for further details.
426 void incrementUntil(AtomicUint *value, unsigned int bitValue);
427
428 /// Mark the specified `node` as a node to be reclaimed.
429 void markReclaim(Node *node);
430
431 /// If the specified `destruct` is true, destruct the value stored in
432 /// `d_nextRead`. Mark `d_nextRead` writable, and if the queue is empty
433 /// then signal the queue empty condition. This method is used to
434 /// complete the reclamation of a node in the presence of an exception.
435 void popComplete(bool destruct);
436
437 /// Return a pointer to the node to assign the value being pushed into
438 /// this queue, or 0 if `isPushBackDisabled()`.
439 Node *pushBackHelper();
440
441 /// Remove the allocation lock indicator from `d_state`. This method is
442 /// intended to be used to remove the allocation lock indicator from
443 /// `d_state` when there is an exception during allocation and the
444 /// locked state is set (i.e., `pushBackHelper`).
445 void releaseAllocateLock();
446
447 // NOT IMPLEMENTED
448 SingleConsumerQueueImpl(const SingleConsumerQueueImpl&);
449 SingleConsumerQueueImpl& operator=(const SingleConsumerQueueImpl&);
450
451 public:
452 // TRAITS
453 BSLMF_NESTED_TRAIT_DECLARATION(SingleConsumerQueueImpl,
454 bslma::UsesBslmaAllocator);
455
456 // PUBLIC TYPES
457 typedef TYPE value_type; // The type for elements.
458
459 // PUBLIC CONSTANTS
460 enum {
461 e_SUCCESS = 0, // must be 0
462 e_EMPTY = -1,
463 e_DISABLED = -2
464 };
465
466 // CREATORS
467
468 /// Create a thread-aware queue. Optionally specify a `basicAllocator`
469 /// used to supply memory. If `basicAllocator` is 0, the currently
470 /// installed default allocator is used.
471 explicit
473
474 /// Create a thread-aware queue with, at least, the specified
475 /// `capacity`. Optionally specify a `basicAllocator` used to supply
476 /// memory. If `basicAllocator` is 0, the currently installed default
477 /// allocator is used.
478 explicit
479 SingleConsumerQueueImpl(bsl::size_t capacity,
480 bslma::Allocator *basicAllocator = 0);
481
482 /// Destroy this container. The behavior is undefined unless all access
483 /// or modification of the container has completed prior to this call.
485
486 // MANIPULATORS
487
488 /// Remove the element from the front of this queue and load that
489 /// element into the specified `value`. If the queue is empty, block
490 /// until it is not empty. Return 0 on success, and a non-zero value
491 /// otherwise. Specifically, return `e_DISABLED` if
492 /// `isPopFrontDisabled()`. On failure, `value` is not changed.
493 /// Threads blocked due to the queue being empty will return
494 /// `e_DISABLED` if `disablePopFront` is invoked. The behavior is
495 /// undefined unless the invoker of this method is the single consumer.
496 int popFront(TYPE *value);
497
498 /// Append the specified `value` to the back of this queue. Return 0 on
499 /// success, and a non-zero value otherwise. Specifically, return
500 /// `e_DISABLED` if `isPushBackDisabled()`.
501 int pushBack(const TYPE& value);
502
503 /// Append the specified move-insertable `value` to the back of this
504 /// queue. `value` is left in a valid but unspecified state. Return 0
505 /// on success, and a non-zero value otherwise. Specifically, return
506 /// `e_DISABLED` if `isPushBackDisabled()`. On failure, `value` is not
507 /// changed.
509
510 /// Remove all items currently in this queue. Note that this operation
511 /// is not atomic; if other threads are concurrently pushing items into
512 /// the queue the result of `numElements()` after this function returns
513 /// is not guaranteed to be 0. The behavior is undefined unless the
514 /// invoker of this method is the single consumer.
515 void removeAll();
516
517 /// Attempt to remove the element from the front of this queue without
518 /// blocking, and, if successful, load the specified `value` with the
519 /// removed element. Return 0 on success, and a non-zero value
520 /// otherwise. Specifically, return `e_DISABLED` if
521 /// `isPopFrontDisabled()`, and `e_EMPTY` if `!isPopFrontDisabled()` and
522 /// the queue was empty. On failure, `value` is not changed. The
523 /// behavior is undefined unless the invoker of this method is the
524 /// single consumer.
525 int tryPopFront(TYPE *value);
526
527 /// Append the specified `value` to the back of this queue. Return 0 on
528 /// success, and a non-zero value otherwise. Specifically, retun
529 /// `e_DISABLED` if `isPushBackDisabled()`.
530 int tryPushBack(const TYPE& value);
531
532 /// Append the specified move-insertable `value` to the back of this
533 /// queue. `value` is left in a valid but unspecified state. Return 0
534 /// on success, and a non-zero value otherwise. Specifically, return
535 /// `e_DISABLED` if `isPushBackDisabled()`. On failure, `value` is not
536 /// changed.
538
539 // Enqueue/Dequeue State
540
541 /// Disable dequeueing from this queue. All subsequent invocations of
542 /// `popFront` or `tryPopFront` will fail immediately. All blocked
543 /// invocations of `popFront` and `waitUntilEmpty` will fail
544 /// immediately. If the queue is already dequeue disabled, this method
545 /// has no effect.
547
548 /// Disable enqueueing into this queue. All subsequent invocations of
549 /// `pushBack` or `tryPushBack` will fail immediately. All blocked
550 /// invocations of `pushBack` will fail immediately. If the queue is
551 /// already enqueue disabled, this method has no effect.
553
554 /// Enable dequeueing. If the queue is not dequeue disabled, this call
555 /// has no effect.
557
558 /// Enable queuing. If the queue is not enqueue disabled, this call has
559 /// no effect.
561
562 // ACCESSORS
563
564 /// Return `true` if this queue is empty (has no elements), or `false`
565 /// otherwise.
566 bool isEmpty() const;
567
568 /// Return `true` if this queue is full (has no available capacity), or
569 /// `false` otherwise. Note that for unbounded queues, this method
570 /// always returns `false`.
571 bool isFull() const;
572
573 /// Return `true` if this queue is dequeue disabled, and `false`
574 /// otherwise. Note that the queue is created in the "dequeue enabled"
575 /// state.
576 bool isPopFrontDisabled() const;
577
578 /// Return `true` if this queue is enqueue disabled, and `false`
579 /// otherwise. Note that the queue is created in the "enqueue enabled"
580 /// state.
581 bool isPushBackDisabled() const;
582
583 /// Returns the number of elements currently in this queue.
584 bsl::size_t numElements() const;
585
586 /// Block until all the elements in this queue are removed. Return 0 on
587 /// success, and a non-zero value otherwise. Specifically, return
588 /// `e_DISABLED` if `!isEmpty() && isPopFrontDisabled()`. A blocked
589 /// thread waiting for the queue to empty will return `e_DISABLED` if
590 /// `disablePopFront` is invoked.
591 int waitUntilEmpty() const;
592
593 // Aspects
594
595 /// Return the allocator used by this object to supply memory.
597};
598
599// ============================================================================
600// INLINE DEFINITIONS
601// ============================================================================
602
603 // ------------------------------------------------
604 // class SingleConsumerQueueImpl_MarkReclaimProctor
605 // ------------------------------------------------
606
607// CREATORS
608template <class TYPE, class NODE>
610 SingleConsumerQueueImpl_MarkReclaimProctor(TYPE *queue, NODE *node)
611: d_queue_p(queue)
612, d_node_p(node)
613{
614}
615
616template <class TYPE, class NODE>
619{
620 if (d_queue_p) {
621 d_queue_p->markReclaim(d_node_p);
622 }
623}
624
625// MANIPULATORS
626template <class TYPE, class NODE>
631
632 // ----------------------------------------------
633 // class SingleConsumerQueueImpl_PopCompleteGuard
634 // ----------------------------------------------
635
636// CREATORS
637template <class TYPE>
643
644template <class TYPE>
650
651 // ------------------------------------------------------
652 // class SingleConsumerQueueImpl_AllocateLockGuardProctor
653 // ------------------------------------------------------
654
655// CREATORS
656template <class TYPE>
662
663template <class TYPE>
669
670 // -----------------------------
671 // class SingleConsumerQueueImpl
672 // -----------------------------
673
674// PRIVATE CLASS METHODS
675template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
678{
679 return state >> k_AVAILABLE_SHIFT;
680}
681
682// PRIVATE MANIPULATORS
683template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
684void SingleConsumerQueueImpl<TYPE, ATOMIC_OP, MUTEX, CONDITION>
685 ::incrementUntil(AtomicUint *value, unsigned int bitValue)
686{
687 unsigned int state = ATOMIC_OP::getUintAcquire(value);
688 if (bitValue != (state & 1)) {
689 unsigned int expState;
690 do {
691 expState = state;
692 state = ATOMIC_OP::testAndSwapUintAcqRel(value,
693 state,
694 state + 1);
695 } while (state != expState && (bitValue == (state & 1)));
696 }
697}
698
699template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
700void SingleConsumerQueueImpl<TYPE, ATOMIC_OP, MUTEX, CONDITION>
701 ::markReclaim(Node *node)
702{
703 // A reclaimed node is used to denote an exception occurred and the node is
704 // currently invalid. A reclaimed node is considered removed and not
705 // counted as part of the capacity of the queue (thus ensuring
706 // 'numElements' is correct).
707
708 ATOMIC_OP::addInt64AcqRel(&d_capacity, -1);
709
710 int nodeState = ATOMIC_OP::swapIntAcqRel(&node->d_state, e_RECLAIM);
711 if (e_WRITABLE_AND_BLOCKED == nodeState) {
712 {
713 bslmt::LockGuard<MUTEX> guard(&d_readMutex);
714 }
715 d_readCondition.signal();
716 }
717}
718
719template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
720void SingleConsumerQueueImpl<TYPE, ATOMIC_OP, MUTEX, CONDITION>
721 ::popComplete(bool destruct)
722{
723 Node *nextRead =
724 static_cast<Node *>(ATOMIC_OP::getPtrAcquire(&d_nextRead));
725
726 if (destruct) {
727 nextRead->d_value.object().~TYPE();
728 }
729
730 ATOMIC_OP::setIntRelease(&nextRead->d_state, e_WRITABLE);
731
732 ATOMIC_OP::setPtrRelease(&d_nextRead,
733 ATOMIC_OP::getPtrAcquire(&nextRead->d_next));
734
735 bsls::Types::Int64 state = ATOMIC_OP::addInt64NvAcqRel(&d_state,
736 k_AVAILABLE_INC);
737
738 if (ATOMIC_OP::getInt64Acquire(&d_capacity) == available(state)) {
739 {
740 bslmt::LockGuard<MUTEX> guard(&d_emptyMutex);
741 }
742 d_emptyCondition.broadcast();
743 }
744}
745
746template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
747typename SingleConsumerQueueImpl<TYPE, ATOMIC_OP, MUTEX, CONDITION>::Node *
748 SingleConsumerQueueImpl<TYPE, ATOMIC_OP, MUTEX, CONDITION>
749 ::pushBackHelper()
750{
751 if (1 == (ATOMIC_OP::getUintAcquire(&d_pushBackDisabled) & 1)) {
752 return 0; // RETURN
753 }
754
755 // Fast path requires an available node ('-k_AVAILABLE_INC') and needs to
756 // indicate a thread is intending to use an existing node ('k_USE_INC').
757
758 bsls::Types::Int64 state = ATOMIC_OP::addInt64NvAcqRel(
759 &d_state,
760 k_USE_INC - k_AVAILABLE_INC);
761
762 if (state < 0 || 0 != (state & k_ALLOCATE_MASK)) {
763 // The determination to use an existing node was premature, undo the
764 // indication.
765
766 state = ATOMIC_OP::addInt64NvAcqRel(&d_state,
767 k_AVAILABLE_INC - k_USE_INC);
768
769 bsls::Types::Int64 expState;
770
771 do {
772 expState = state;
773 if (state >= k_AVAILABLE_INC && 0 == (state & k_ALLOCATE_MASK)) {
774 // The are now sufficient available nodes to reserve one. This
775 // can be due to an allocation completing or a node being made
776 // available by the consumer.
777
778 state = ATOMIC_OP::testAndSwapInt64AcqRel(
779 &d_state,
780 state,
781 state + k_USE_INC - k_AVAILABLE_INC);
782 }
783 else if ( 0 == ( (state >> k_AVAILABLE_SHIFT)
784 + (state & k_USE_MASK))
785 && 0 == (state & k_ALLOCATE_MASK)) {
786 // '-AVAILABLE == USE' indicates all threads will wait for the
787 // allocation, so attempt to become the allocating thread.
788 // Note that 'AVAILABLE < 0 && -AVAILABLE != USE' indicates the
789 // temporary state where threads are still completing the
790 // re-use of an existing node or there are available nodes to
791 // be re-used (nodes were allocated or made available by the
792 // consumer). If threads are still completing the re-use of an
793 // existing node, ownership of 'd_nextWrite' can not be
794 // guaranteed by setting the allocation lock.
795
796 state = ATOMIC_OP::testAndSwapInt64AcqRel(
797 &d_state,
798 state,
799 state + k_ALLOCATE_INC);
800 if (expState == state) {
801 // This thread is the only thread acccessing 'd_nextWrite'.
802 // Allocate new nodes and insert them. The variables 'a'
803 // and 'b' in the below are pointers to 'Node' as per the
804 // following diagram.
805 //..
806 // d_nextWrite
807 // |
808 // V
809 // +---+ +---+
810 // --> | | --> | | -->
811 // +---+ +---+
812 // ^ ^
813 // | |
814 // a b
815 //..
816
817 SingleConsumerQueueImpl_AllocateLockGuard<
818 SingleConsumerQueueImpl<TYPE,
819 ATOMIC_OP,
820 MUTEX,
821 CONDITION> > guard(this);
822
823 Node *a = static_cast<Node *>(
824 ATOMIC_OP::getPtrAcquire(&d_nextWrite));
825
826 Node *b = static_cast<Node *>(
827 ATOMIC_OP::getPtrAcquire(&a->d_next));
828
829 Node *nodes = static_cast<Node *>(
830 d_allocator.allocate( sizeof(Node)
831 * k_ALLOCATION_BATCH_SIZE));
832 for (bsl::size_t i = 0;
833 i < k_ALLOCATION_BATCH_SIZE - 1;
834 ++i) {
835 Node *n = nodes + i;
836 ATOMIC_OP::initInt(&n->d_state, e_WRITABLE);
837 ATOMIC_OP::initPointer(&n->d_next, n + 1);
838 }
839 {
840 Node *n = nodes + k_ALLOCATION_BATCH_SIZE - 1;
841 ATOMIC_OP::initInt(&n->d_state, e_WRITABLE);
842 ATOMIC_OP::initPointer(&n->d_next, b);
843 }
844
845 ATOMIC_OP::setPtrRelease(&a->d_next, nodes);
846 ATOMIC_OP::setPtrRelease(&d_nextWrite, nodes);
847
848 ATOMIC_OP::addInt64AcqRel(&d_capacity,
849 k_ALLOCATION_BATCH_SIZE);
850
851 // Reserve one node for this thread and make the other
852 // nodes available to other threads.
853
854 ATOMIC_OP::addInt64AcqRel(
855 &d_state,
856 k_AVAILABLE_INC * (k_ALLOCATION_BATCH_SIZE - 1));
857
858 return a; // RETURN
859 }
860
861 expState = ~state; // cause the 'while' to fail and remain in
862 // this loop
863 }
864 else {
866 state = ATOMIC_OP::getInt64Acquire(&d_state);
867
868 expState = ~state; // cause the 'while' to fail and remain in
869 // this loop
870 }
871 } while (state != expState);
872 }
873
874 Node *nextWrite =
875 static_cast<Node *>(ATOMIC_OP::getPtrAcquire(&d_nextWrite));
876
877 Node *expNextWrite;
878 do {
879 expNextWrite = nextWrite;
880 Node *next = static_cast<Node *>(ATOMIC_OP::getPtrAcquire(
881 &nextWrite->d_next));
882
883 nextWrite = static_cast<Node *>(ATOMIC_OP::testAndSwapPtrAcqRel(
884 &d_nextWrite,
885 nextWrite,
886 next));
887 } while (nextWrite != expNextWrite);
888
889 ATOMIC_OP::addInt64AcqRel(&d_state, -k_USE_INC);
890
891 return nextWrite;
892}
893
894template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
895inline
896void SingleConsumerQueueImpl<TYPE, ATOMIC_OP, MUTEX, CONDITION>
897 ::releaseAllocateLock()
898{
899 ATOMIC_OP::addInt64AcqRel(&d_state, -k_ALLOCATE_INC);
900}
901
902// CREATORS
903template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
906: d_readMutex()
907, d_readCondition()
908, d_writeMutex()
909, d_emptyMutex()
910, d_emptyCondition()
911, d_allocator(basicAllocator)
912{
913 ATOMIC_OP::initInt64(&d_capacity, 0);
914 ATOMIC_OP::initInt64(&d_state, 0);
915
916 ATOMIC_OP::initUint(&d_popFrontDisabled, 0);
917 ATOMIC_OP::initUint(&d_pushBackDisabled, 0);
918
919 ATOMIC_OP::initPointer(&d_nextWrite, 0);
920
921 Node *n = static_cast<Node *>(d_allocator.allocate(sizeof(Node)));
922 ATOMIC_OP::initInt(&n->d_state, e_WRITABLE);
923 ATOMIC_OP::initPointer(&n->d_next, n);
924
925 ATOMIC_OP::setPtrRelease(&d_nextWrite, n);
926 ATOMIC_OP::setPtrRelease(&d_nextRead, n);
927}
928
929template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
931 SingleConsumerQueueImpl(bsl::size_t capacity,
932 bslma::Allocator *basicAllocator)
933: d_readMutex()
934, d_readCondition()
935, d_writeMutex()
936, d_emptyMutex()
937, d_emptyCondition()
938, d_allocator(basicAllocator)
939{
940 ATOMIC_OP::initInt64(&d_capacity, 0);
941 ATOMIC_OP::initInt64(&d_state, 0);
942
943 ATOMIC_OP::initUint(&d_popFrontDisabled, 0);
944 ATOMIC_OP::initUint(&d_pushBackDisabled, 0);
945
946 ATOMIC_OP::initPointer(&d_nextWrite, 0);
947
948 Node *nodes = static_cast<Node *>(d_allocator.allocate( sizeof(Node)
949 * (capacity + 1)));
950 for (bsl::size_t i = 0; i < capacity; ++i) {
951 Node *n = nodes + i;
952 ATOMIC_OP::initInt(&n->d_state, e_WRITABLE);
953 ATOMIC_OP::initPointer(&n->d_next, n + 1);
954 }
955 {
956 Node *n = nodes + capacity;
957 ATOMIC_OP::initInt(&n->d_state, e_WRITABLE);
958 ATOMIC_OP::initPointer(&n->d_next, nodes);
959 }
960
961 ATOMIC_OP::setPtrRelease(&d_nextWrite, nodes);
962 ATOMIC_OP::setPtrRelease(&d_nextRead, nodes);
963
964 ATOMIC_OP::addInt64AcqRel(&d_capacity, capacity);
965 ATOMIC_OP::addInt64AcqRel(&d_state, k_AVAILABLE_INC * capacity);
966}
967
968template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
971{
972 Node *end = static_cast<Node *>(ATOMIC_OP::getPtrAcquire(&d_nextWrite));
973
974 if (end) {
975 Node *at = static_cast<Node *>(ATOMIC_OP::getPtrAcquire(&end->d_next));
976
977 while (at != end) {
978 Node *next =
979 static_cast<Node *>(ATOMIC_OP::getPtrAcquire(&at->d_next));
980
981 if (e_READABLE == ATOMIC_OP::getIntAcquire(&at->d_state)) {
982 at->d_value.object().~TYPE();
983 }
984
985 at = next;
986 }
987
988 if (e_READABLE == ATOMIC_OP::getIntAcquire(&at->d_state)) {
989 at->d_value.object().~TYPE();
990 }
991 }
992}
993
994// MANIPULATORS
995template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
997 TYPE *value)
998{
999 unsigned int generation = ATOMIC_OP::getUintAcquire(&d_popFrontDisabled);
1000 if (1 == (generation & 1)) {
1001 return e_DISABLED; // RETURN
1002 }
1003
1004 Node *nextRead =
1005 static_cast<Node *>(ATOMIC_OP::getPtrAcquire(&d_nextRead));
1006 int nodeState = ATOMIC_OP::getIntAcquire(&nextRead->d_state);
1007 do {
1008 // Note that 'e_WRITABLE_AND_BLOCKED != nodeState' since if the one
1009 // consumer sets this state, the one consumer waits until the node is
1010 // readable, and either the producer that signalled the consumer
1011 // changed the node state already, or the consumer will change the node
1012 // state in 'popComplete'.
1013
1014 if (e_WRITABLE == nodeState) {
1016 nodeState = ATOMIC_OP::getIntAcquire(&nextRead->d_state);
1017 if (e_WRITABLE == nodeState) {
1018 bslmt::LockGuard<MUTEX> guard(&d_readMutex);
1019 nodeState = ATOMIC_OP::swapIntAcqRel(&nextRead->d_state,
1020 e_WRITABLE_AND_BLOCKED);
1021 while (e_READABLE != nodeState && e_RECLAIM != nodeState) {
1022 if (generation !=
1023 ATOMIC_OP::getUintAcquire(&d_popFrontDisabled)) {
1024 return e_DISABLED; // RETURN
1025 }
1026 d_readCondition.wait(&d_readMutex);
1027 nodeState = ATOMIC_OP::getIntAcquire(&nextRead->d_state);
1028 }
1029 }
1030 }
1031 if (e_RECLAIM == nodeState) {
1032 ATOMIC_OP::addInt64AcqRel(&d_capacity, 1);
1033 popComplete(false);
1034 nextRead =
1035 static_cast<Node *>(ATOMIC_OP::getPtrAcquire(&d_nextRead));
1036 nodeState = ATOMIC_OP::getIntAcquire(&nextRead->d_state);
1037 }
1038 } while (e_RECLAIM == nodeState);
1039
1042 ATOMIC_OP,
1043 MUTEX,
1044 CONDITION> > guard(this);
1045
1046#if defined(BSLMF_MOVABLEREF_USES_RVALUE_REFERENCES)
1047 *value = bslmf::MovableRefUtil::move(nextRead->d_value.object());
1048#else
1049 *value = nextRead->d_value.object();
1050#endif
1051
1052 return 0;
1053}
1054
1055template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1057 const TYPE& value)
1058{
1059 Node *target = pushBackHelper();
1060
1061 if (0 == target) {
1062 return e_DISABLED; // RETURN
1063 }
1064
1067 ATOMIC_OP,
1068 MUTEX,
1069 CONDITION>,
1070 Node> proctor(this, target);
1071
1072 bslalg::ScalarPrimitives::copyConstruct(target->d_value.address(),
1073 value,
1074 allocator());
1075
1076 proctor.release();
1077
1078 int nodeState = ATOMIC_OP::swapIntAcqRel(&target->d_state, e_READABLE);
1079 if (e_WRITABLE_AND_BLOCKED == nodeState) {
1080 {
1081 bslmt::LockGuard<MUTEX> guard(&d_readMutex);
1082 }
1083 d_readCondition.signal();
1084 }
1085
1086 return 0;
1087}
1088
1089template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1092{
1093 Node *target = pushBackHelper();
1094
1095 if (0 == target) {
1096 return e_DISABLED; // RETURN
1097 }
1098
1101 ATOMIC_OP,
1102 MUTEX,
1103 CONDITION>,
1104 Node> proctor(this, target);
1105
1106 TYPE& dummy = value;
1107 bslalg::ScalarPrimitives::moveConstruct(target->d_value.address(),
1108 dummy,
1109 allocator());
1110
1111 proctor.release();
1112
1113 int nodeState = ATOMIC_OP::swapIntAcqRel(&target->d_state, e_READABLE);
1114 if (e_WRITABLE_AND_BLOCKED == nodeState) {
1115 {
1116 bslmt::LockGuard<MUTEX> guard(&d_readMutex);
1117 }
1118 d_readCondition.signal();
1119 }
1120
1121 return 0;
1122}
1123
1124template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1126{
1127 int count = 0;
1128
1129 bsls::Types::Int64 reclaim = 0;
1130
1131 Node *nextRead =
1132 static_cast<Node *>(ATOMIC_OP::getPtrAcquire(&d_nextRead));
1133 int nodeState = ATOMIC_OP::getIntAcquire(&nextRead->d_state);
1134 while (e_READABLE == nodeState || e_RECLAIM == nodeState) {
1135 if (e_READABLE == nodeState) {
1136 nextRead->d_value.object().~TYPE();
1137 }
1138 else {
1139 ++reclaim;
1140 }
1141 ATOMIC_OP::setIntRelease(&nextRead->d_state, e_WRITABLE);
1142 nextRead =
1143 static_cast<Node *>(ATOMIC_OP::getPtrAcquire(&nextRead->d_next));
1144 ATOMIC_OP::setPtrRelease(&d_nextRead, nextRead);
1145 nodeState = ATOMIC_OP::getIntAcquire(&nextRead->d_state);
1146 ++count;
1147 }
1148
1149 ATOMIC_OP::addInt64AcqRel(&d_capacity, reclaim);
1150 ATOMIC_OP::addInt64AcqRel(&d_state, k_AVAILABLE_INC * count);
1151
1152 {
1153 bslmt::LockGuard<MUTEX> guard(&d_emptyMutex);
1154 }
1155 d_emptyCondition.broadcast();
1156}
1157
1158template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1160 TYPE *value)
1161{
1162 unsigned int generation = ATOMIC_OP::getUintAcquire(&d_popFrontDisabled);
1163 if (1 == (generation & 1)) {
1164 return e_DISABLED; // RETURN
1165 }
1166
1167 Node *nextRead =
1168 static_cast<Node *>(ATOMIC_OP::getPtrAcquire(&d_nextRead));
1169 int nodeState = ATOMIC_OP::getIntAcquire(&nextRead->d_state);
1170
1171 while (e_RECLAIM == nodeState) {
1172 ATOMIC_OP::addInt64AcqRel(&d_capacity, 1);
1173 popComplete(false);
1174 nextRead = static_cast<Node *>(ATOMIC_OP::getPtrAcquire(&d_nextRead));
1175 nodeState = ATOMIC_OP::getIntAcquire(&nextRead->d_state);
1176 }
1177
1178 if (e_READABLE != nodeState) {
1179 return e_EMPTY; // RETURN
1180 }
1181
1184 ATOMIC_OP,
1185 MUTEX,
1186 CONDITION> > guard(this);
1187
1188#if defined(BSLMF_MOVABLEREF_USES_RVALUE_REFERENCES)
1189 *value = bslmf::MovableRefUtil::move(nextRead->d_value.object());
1190#else
1191 *value = nextRead->d_value.object();
1192#endif
1193
1194 return 0;
1195}
1196
1197template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1199 const TYPE& value)
1200{
1201 return pushBack(value);
1202}
1203
1204template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1210
1211 // Enqueue/Dequeue State
1212
1213template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1216{
1217 incrementUntil(&d_popFrontDisabled, 1);
1218
1219 {
1220 bslmt::LockGuard<MUTEX> guard(&d_readMutex);
1221 }
1222 d_readCondition.signal();
1223
1224 {
1225 bslmt::LockGuard<MUTEX> guard(&d_emptyMutex);
1226 }
1227 d_emptyCondition.broadcast();
1228}
1229
1230template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1233{
1234 incrementUntil(&d_pushBackDisabled, 1);
1235}
1236
1237template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1240{
1241 incrementUntil(&d_popFrontDisabled, 0);
1242}
1243
1244template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1247{
1248 incrementUntil(&d_pushBackDisabled, 0);
1249}
1250
1251// ACCESSORS
1252template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1255{
1256 return ATOMIC_OP::getInt64Acquire(&d_capacity) ==
1257 available(ATOMIC_OP::getInt64Acquire(&d_state));
1258}
1259
1260template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1265
1266template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1269{
1270 return 1 == (ATOMIC_OP::getUintAcquire(&d_popFrontDisabled) & 1);
1271}
1272
1273template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1276{
1277 return 1 == (ATOMIC_OP::getUintAcquire(&d_pushBackDisabled) & 1);
1278}
1279
1280template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1283{
1284 bsls::Types::Int64 avail = available(ATOMIC_OP::getInt64Acquire(&d_state));
1285 return static_cast<bsl::size_t>(
1286 avail > 0
1287 ? ATOMIC_OP::getInt64Acquire(&d_capacity) - avail
1288 : ATOMIC_OP::getInt64Acquire(&d_capacity));
1289}
1290
1291template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1294{
1295 unsigned int generation = ATOMIC_OP::getUintAcquire(&d_popFrontDisabled);
1296 if (1 == (generation & 1)) {
1297 return e_DISABLED; // RETURN
1298 }
1299
1300 bslmt::LockGuard<MUTEX> guard(&d_emptyMutex);
1301
1302 bsls::Types::Int64 state = ATOMIC_OP::getInt64Acquire(&d_state);
1303 while (ATOMIC_OP::getInt64Acquire(&d_capacity) != available(state)) {
1304 if (generation != ATOMIC_OP::getUintAcquire(&d_popFrontDisabled)) {
1305 return e_DISABLED; // RETURN
1306 }
1307 d_emptyCondition.wait(&d_emptyMutex);
1308 state = ATOMIC_OP::getInt64Acquire(&d_state);
1309 }
1310
1311 return 0;
1312}
1313
1314 // Aspects
1315
1316template <class TYPE, class ATOMIC_OP, class MUTEX, class CONDITION>
1318 allocator() const
1319{
1320 return d_allocator.allocator();
1321}
1322
1323} // close package namespace
1324
1325
1326#endif
1327
1328// ----------------------------------------------------------------------------
1329// Copyright 2019 Bloomberg Finance L.P.
1330//
1331// Licensed under the Apache License, Version 2.0 (the "License");
1332// you may not use this file except in compliance with the License.
1333// You may obtain a copy of the License at
1334//
1335// http://www.apache.org/licenses/LICENSE-2.0
1336//
1337// Unless required by applicable law or agreed to in writing, software
1338// distributed under the License is distributed on an "AS IS" BASIS,
1339// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1340// See the License for the specific language governing permissions and
1341// limitations under the License.
1342// ----------------------------- END-OF-FILE ----------------------------------
1343
1344/** @} */
1345/** @} */
1346/** @} */
#define BSLMF_NESTED_TRAIT_DECLARATION(t_TYPE, t_TRAIT)
Definition bslmf_nestedtraitdeclaration.h:231
Definition bdlcc_singleconsumerqueueimpl.h:247
~SingleConsumerQueueImpl_AllocateLockGuard()
Destroy this object and invoke the managed queue's.
Definition bdlcc_singleconsumerqueueimpl.h:665
Definition bdlcc_singleconsumerqueueimpl.h:174
~SingleConsumerQueueImpl_MarkReclaimProctor()
Definition bdlcc_singleconsumerqueueimpl.h:618
void release()
Definition bdlcc_singleconsumerqueueimpl.h:627
Definition bdlcc_singleconsumerqueueimpl.h:214
~SingleConsumerQueueImpl_PopCompleteGuard()
Definition bdlcc_singleconsumerqueueimpl.h:646
Definition bdlcc_singleconsumerqueueimpl.h:283
SingleConsumerQueueImpl(bsl::size_t capacity, bslma::Allocator *basicAllocator=0)
Definition bdlcc_singleconsumerqueueimpl.h:931
bool isPopFrontDisabled() const
Definition bdlcc_singleconsumerqueueimpl.h:1268
int tryPushBack(bslmf::MovableRef< TYPE > value)
Definition bdlcc_singleconsumerqueueimpl.h:1205
int waitUntilEmpty() const
Definition bdlcc_singleconsumerqueueimpl.h:1293
int tryPushBack(const TYPE &value)
Definition bdlcc_singleconsumerqueueimpl.h:1198
~SingleConsumerQueueImpl()
Definition bdlcc_singleconsumerqueueimpl.h:970
bslma::Allocator * allocator() const
Return the allocator used by this object to supply memory.
Definition bdlcc_singleconsumerqueueimpl.h:1318
bool isPushBackDisabled() const
Definition bdlcc_singleconsumerqueueimpl.h:1275
void disablePopFront()
Definition bdlcc_singleconsumerqueueimpl.h:1215
SingleConsumerQueueImpl(bslma::Allocator *basicAllocator=0)
Definition bdlcc_singleconsumerqueueimpl.h:905
void disablePushBack()
Definition bdlcc_singleconsumerqueueimpl.h:1232
int tryPopFront(TYPE *value)
Definition bdlcc_singleconsumerqueueimpl.h:1159
bsl::size_t numElements() const
Returns the number of elements currently in this queue.
Definition bdlcc_singleconsumerqueueimpl.h:1282
void enablePushBack()
Definition bdlcc_singleconsumerqueueimpl.h:1246
int pushBack(const TYPE &value)
Definition bdlcc_singleconsumerqueueimpl.h:1056
TYPE value_type
Definition bdlcc_singleconsumerqueueimpl.h:457
bool isEmpty() const
Definition bdlcc_singleconsumerqueueimpl.h:1254
void removeAll()
Definition bdlcc_singleconsumerqueueimpl.h:1125
int pushBack(bslmf::MovableRef< TYPE > value)
Definition bdlcc_singleconsumerqueueimpl.h:1090
bool isFull() const
Definition bdlcc_singleconsumerqueueimpl.h:1261
int popFront(TYPE *value)
Definition bdlcc_singleconsumerqueueimpl.h:996
void enablePopFront()
Definition bdlcc_singleconsumerqueueimpl.h:1239
Definition bdlma_infrequentdeleteblocklist.h:241
void * allocate(bsls::Types::size_type size)
Definition bslma_allocator.h:457
Definition bslmf_movableref.h:751
Definition bslmt_lockguard.h:234
#define BSLS_IDENT(str)
Definition bsls_ident.h:195
Definition bdlcc_boundedqueue.h:270
static void moveConstruct(TARGET_TYPE *address, TARGET_TYPE &original, bslma::Allocator *allocator)
Definition bslalg_scalarprimitives.h:1642
static void copyConstruct(TARGET_TYPE *address, const TARGET_TYPE &original, bslma::Allocator *allocator)
Definition bslalg_scalarprimitives.h:1599
static MovableRef< t_TYPE > move(t_TYPE &reference) BSLS_KEYWORD_NOEXCEPT
Definition bslmf_movableref.h:1060
static void yield()
Definition bslmt_threadutil.h:1013
long long Int64
Definition bsls_types.h:132
Definition bsls_objectbuffer.h:276