14 #ifndef STXXL_CONTAINERS_STACK_HEADER
15 #define STXXL_CONTAINERS_STACK_HEADER
38 template <
class ValueType,
39 unsigned BlocksPerPage = 4,
46 enum { blocks_per_page = BlocksPerPage };
48 enum { block_size = BlockSize };
60 template <
class StackConfig>
64 typedef StackConfig
cfg;
71 blocks_per_page = cfg::blocks_per_page,
72 block_size = cfg::block_size
86 std::vector<bid_type>
bids;
97 current_element(NULL),
98 cache(blocks_per_page * 2),
99 front_page(cache.begin() + blocks_per_page),
100 back_page(cache.begin()),
103 bids.reserve(blocks_per_page);
111 std::swap(m_size, obj.
m_size);
114 std::swap(cache, obj.
cache);
117 std::swap(bids, obj.
bids);
129 template <
class StackType>
133 current_element(NULL),
134 cache(blocks_per_page * 2),
135 front_page(cache.begin() + blocks_per_page),
136 back_page(cache.begin()),
139 bids.reserve(blocks_per_page);
141 StackType stack_copy = stack_;
142 size_t sz = stack_copy.size();
144 std::vector<value_type> tmp(sz);
146 for (
size_t i = 0; i < sz; ++i) {
147 tmp[sz - i - 1] = stack_copy.top();
151 for (
size_t i = 0; i < sz; ++i)
158 block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
188 return (*current_element);
196 return (*current_element);
203 assert(cache_offset <= 2 * blocks_per_page * block_type::size);
206 if (
UNLIKELY(cache_offset == 2 * blocks_per_page * block_type::size))
210 bids.resize(bids.size() + blocks_per_page);
211 typename std::vector<bid_type>::iterator cur_bid = bids.end() - blocks_per_page;
212 block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
216 for (
int i = 0; i < blocks_per_page; ++i, ++cur_bid)
218 requests[i] = (back_page + i)->write(*cur_bid);
221 std::swap(back_page, front_page);
223 bids.reserve(bids.size() + blocks_per_page);
225 cache_offset = blocks_per_page * block_type::size + 1;
226 current_element = &((*front_page)[0]);
231 *current_element = val;
236 current_element = element(cache_offset);
237 *current_element = val;
246 assert(cache_offset <= 2 * blocks_per_page * block_type::size);
247 assert(cache_offset > 0);
250 if (
UNLIKELY(cache_offset == 1 && bids.size() >= blocks_per_page))
257 typename std::vector<bid_type>::const_iterator cur_bid = bids.end();
258 for (
int i = blocks_per_page - 1; i >= 0; --i)
260 requests[i] = (front_page + i)->read(*(--cur_bid));
264 std::swap(front_page, back_page);
266 cache_offset = blocks_per_page * block_type::size;
268 current_element = &((*(back_page + (blocks_per_page - 1)))[block_type::size - 1]);
272 block_manager::get_instance()->delete_blocks(bids.end() - blocks_per_page, bids.end());
273 bids.resize(bids.size() - blocks_per_page);
280 current_element = element((--cache_offset) - 1);
288 if (offset < blocks_per_page * block_type::size)
289 return &((*(back_page + offset / block_type::size))[offset % block_type::size]);
291 unsigned_type unbiased_offset = offset - blocks_per_page * block_type::size;
292 return &((*(front_page + unbiased_offset / block_type::size))[unbiased_offset % block_type::size]);
301 template <
class StackConfig>
312 blocks_per_page = cfg::blocks_per_page,
313 block_size = cfg::block_size
339 current_element(NULL),
340 cache(blocks_per_page * 2),
341 cache_buffers(cache.begin()),
342 overlap_buffers(cache.begin() + blocks_per_page),
343 requests(blocks_per_page),
346 bids.reserve(blocks_per_page);
356 std::swap(m_size, obj.
m_size);
359 std::swap(cache, obj.
cache);
363 std::swap(bids, obj.
bids);
375 template <
class StackType>
379 current_element(NULL),
380 cache(blocks_per_page * 2),
381 cache_buffers(cache.begin()),
382 overlap_buffers(cache.begin() + blocks_per_page),
383 requests(blocks_per_page),
386 bids.reserve(blocks_per_page);
388 StackType stack_copy = stack_;
389 size_t sz = stack_copy.size();
391 std::vector<value_type> tmp(sz);
393 for (
size_t i = 0; i < sz; ++i)
395 tmp[sz - i - 1] = stack_copy.top();
399 for (
size_t i = 0; i < sz; ++i)
407 if (requests[0].
get())
408 wait_all(requests.begin(), blocks_per_page);
412 block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
441 return (*current_element);
449 return (*current_element);
456 assert(cache_offset <= blocks_per_page * block_type::size);
459 if (
UNLIKELY(cache_offset == blocks_per_page * block_type::size))
463 bids.resize(bids.size() + blocks_per_page);
464 typename std::vector<bid_type>::iterator cur_bid = bids.end() - blocks_per_page;
465 block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
467 for (
int i = 0; i < blocks_per_page; ++i, ++cur_bid)
469 if (requests[i].
get())
472 requests[i] = (cache_buffers + i)->write(*cur_bid);
475 std::swap(cache_buffers, overlap_buffers);
477 bids.reserve(bids.size() + blocks_per_page);
480 current_element = &((*cache_buffers)[0]);
483 *current_element = val;
488 current_element = &((*(cache_buffers + cache_offset / block_type::size))[cache_offset % block_type::size]);
489 *current_element = val;
498 assert(cache_offset <= blocks_per_page * block_type::size);
499 assert(cache_offset > 0);
502 if (
UNLIKELY(cache_offset == 1 && bids.size() >= blocks_per_page))
506 if (requests[0].
get())
507 wait_all(requests.begin(), blocks_per_page);
509 std::swap(cache_buffers, overlap_buffers);
511 if (bids.size() > blocks_per_page)
514 typename std::vector<bid_type>::const_iterator cur_bid = bids.end() - blocks_per_page;
515 for (
int i = blocks_per_page - 1; i >= 0; --i)
516 requests[i] = (overlap_buffers + i)->read(*(--cur_bid));
519 block_manager::get_instance()->delete_blocks(bids.end() - blocks_per_page, bids.end());
520 bids.resize(bids.size() - blocks_per_page);
522 cache_offset = blocks_per_page * block_type::size;
524 current_element = &((*(cache_buffers + (blocks_per_page - 1)))[block_type::size - 1]);
531 current_element = &((*(cache_buffers + cur_offset / block_type::size))[cur_offset % block_type::size]);
539 template <
class StackConfig>
550 blocks_per_page = cfg::blocks_per_page,
551 block_size = cfg::block_size
583 pref_aggr(prefetch_aggressiveness),
606 pref_aggr(prefetch_aggressiveness),
607 owned_pool(new
pool_type(p_pool_, w_pool_)),
620 std::swap(m_size, obj.
m_size);
622 std::swap(cache, obj.
cache);
623 std::swap(bids, obj.
bids);
627 std::swap(pool, obj.
pool);
640 const int_type bids_size = bids.size();
643 for (i = bids_size - 1; i >= last_pref; --i)
646 pool->invalidate(bids[i]);
648 typename std::vector<bid_type>::iterator cur = bids.begin();
649 typename std::vector<bid_type>::const_iterator end = bids.end();
650 for ( ; cur != end; ++cur)
664 block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
694 assert(cache_offset <= block_type::size);
696 if (
UNLIKELY(cache_offset == block_type::size))
698 STXXL_VERBOSE2(
"grow_shrink_stack2::push(" << val <<
") growing, size: " << m_size);
700 bids.resize(bids.size() + 1);
701 typename std::vector<bid_type>::iterator cur_bid = bids.end() - 1;
702 block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
703 pool->write(cache, bids.back());
704 cache = pool->steal();
705 const int_type bids_size = bids.size();
707 for (
int_type i = bids_size - 2; i >= last_pref; --i)
710 pool->invalidate(bids[i]);
714 (*cache)[cache_offset] = val;
718 assert(cache_offset > 0);
719 assert(cache_offset <= block_type::size);
727 assert(cache_offset > 0);
728 assert(cache_offset <= block_type::size);
729 return (*cache)[cache_offset - 1];
737 assert(cache_offset > 0);
738 assert(cache_offset <= block_type::size);
739 return (*cache)[cache_offset - 1];
748 assert(cache_offset > 0);
749 assert(cache_offset <= block_type::size);
750 if (
UNLIKELY(cache_offset == 1 && (!bids.empty())))
752 STXXL_VERBOSE2(
"grow_shrink_stack2::pop() shrinking, size = " << m_size);
756 pool->read(cache, last_block)->wait();
757 block_manager::get_instance()->delete_block(last_block);
759 cache_offset = block_type::size + 1;
776 if (pref_aggr > new_p)
778 const int_type bids_size = bids.size();
780 for (
int_type i = bids_size - new_p - 1; i >= last_pref; --i)
783 pool->invalidate(bids[i]);
802 const int_type bids_size = bids.size();
804 for (
int_type i = bids_size - 1; i >= last_pref; --i)
814 template <
unsigned_type CritSize,
class ExternalStack,
class InternalStack>
818 typedef typename ExternalStack::cfg
cfg;
824 blocks_per_page = cfg::blocks_per_page,
825 block_size = cfg::block_size
832 enum { critical_size = CritSize };
839 template <
class StackType>
874 bool internal()
const
876 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
877 return (int_impl != NULL);
882 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
883 return (ext_impl != NULL);
894 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
895 return (int_impl) ? int_impl->empty() : ext_impl->empty();
900 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
901 return (int_impl) ?
size_type(int_impl->size()) : ext_impl->size();
913 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
914 return (int_impl) ? int_impl->top() : ext_impl->top();
921 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
922 return (int_impl) ? int_impl->top() : ext_impl->top();
929 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
934 if (
UNLIKELY(int_impl->size() == critical_size))
950 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
1002 unsigned BlocksPerPage = 4,
1005 class IntStackType = std::stack<ValueType>,
1032 template <
class StackConfig>
1039 template <
class StackConfig>
1046 template <
class StackConfig>
1053 template <stxxl::
unsigned_type CritSize,
class ExternalStack,
class InternalStack>
1062 #endif // !STXXL_CONTAINERS_STACK_HEADER
cfg::size_type size_type
type for sizes (64-bit)
simple_vector< block_type >::iterator front_page
void swap(normal_stack &obj)
External stack container. Introduction to stack container: see STXXL Stack tutorial. Design and Internals of stack container: see Stack Conservative implementation. Fits best if your access pattern consists of irregularly mixed push'es and pop's. For semantics of the methods see documentation of the STL std::stack. To gain full bandwidth of disks StackConfig::BlocksPerPage must >= number of disks
#define STXXL_DEFAULT_BLOCK_SIZE(type)
value_type * current_element
std::vector< bid_type > bids
simple_vector< block_type > cache
A stack that migrates from internal memory to external when its size exceeds a certain threshold...
virtual ~migrating_stack()
cfg::size_type size_type
type for sizes (64-bit)
simple_vector< block_type >::iterator cache_buffers
void push(const value_type &val)
Inserts an element at the top of the stack. Postconditions: size() is incremented by 1...
value_type & top()
Return mutable reference to the element at the top of the stack. Precondition: stack is not empty()...
IF< Externality==migrating, migrating_stack< MigrCritSize, ExtStackType, IntStackType >, ExtStackType >::result MigrOrNotStackType
stack_config_generator< ValueType, BlocksPerPage, BlockSize, AllocStr, SizeType > cfg
cfg::alloc_strategy alloc_strategy_type
unsigned long long int uint64
IF< Behaviour==grow_shrink, grow_shrink_stack< cfg >, grow_shrink_stack2< cfg > >::result GrShrTp
grow_shrink_stack(const StackType &stack_)
Copy-construction from a another stack of any type.
BID< block_size > bid_type
bool external() const
Returns true if current implementation is external, otherwise false.
size_type size() const
Returns the number of elements contained in the stack.
alloc_strategy_type alloc_strategy
value_type * element(unsigned_type offset)
BID< block_size > bid_type
virtual ~grow_shrink_stack2()
void push(const value_type &val)
Inserts an element at the top of the stack. Postconditions: size() is incremented by 1...
value_type & top()
Return mutable reference to the element at the top of the stack. Precondition: stack is not empty()...
cfg::alloc_strategy alloc_strategy_type
#define STXXL_DEFAULT_ALLOC_STRATEGY
size_type size() const
Returns the number of elements contained in the stack.
size_type size() const
Returns the number of elements contained in the stack.
migrating_stack()
Default constructor: creates empty stack.
#define STXXL_VERBOSE2(x)
cfg::value_type value_type
type of the elements stored in the stack
cfg::alloc_strategy alloc_strategy_type
#define STXXL_DEPRECATED(x)
typed_block< block_size, value_type > block_type
type of block used in disk-memory transfers
alloc_strategy_type alloc_strategy
void set_prefetch_aggr(unsigned_type new_p)
Sets level of prefetch aggressiveness (number of blocks from the prefetch pool used for prefetching)...
ext_stack_type * ext_impl
const value_type & top() const
Return constant reference to the element at the top of the stack. Precondition: stack is not empty()...
void pop()
Removes the element at the top of the stack. Precondition: stack is not empty(). Postcondition: size(...
IF< Externality==internal, IntStackType, MigrOrNotStackType >::result result
typed_block< block_size, value_type > block_type
type of block used in disk-memory transfers
Implements dynamically resizable buffered writing pool.
simple_vector< block_type > cache
simple_vector< block_type >::iterator back_page
const value_type & top() const
Return constant reference to the element at the top of the stack. Precondition: stack is not empty()...
Implements dynamically resizable prefetching pool.
Block containing elements of fixed length.
int_stack_type * int_impl
value_type * current_element
std::vector< bid_type > bids
#define STXXL_VERBOSE3(x)
bool empty() const
Returns true if the stack is empty.
Implements dynamically resizable buffered writing and prefetched reading pool.
choose_int_types< my_pointer_size >::int_type int_type
void swap(grow_shrink_stack2 &obj)
iterator begin()
return mutable iterator to first element
void swap(grow_shrink_stack &obj)
grow_shrink_stack2(pool_type &pool_, unsigned_type prefetch_aggressiveness=0)
Default constructor: creates empty stack. The stack will use the read_write_pool for prefetching and ...
const Type & STXXL_MAX(const Type &a, const Type &b)
std::vector< bid_type > bids
virtual ~grow_shrink_stack()
bool empty() const
Returns true if the stack is empty.
#define STXXL_BEGIN_NAMESPACE
void push(const value_type &val)
Inserts an element at the top of the stack. Postconditions: size() is incremented by 1...
read_write_pool< block_type > pool_type
void rehint()
hint the last pref_aggr external blocks.
unsigned_type get_prefetch_aggr() const
Returns number of blocks used for prefetching.
cfg::value_type value_type
type of the elements stored in the stack
void pop()
Removes the element at the top of the stack. Precondition: stack is not empty(). Postcondition: size(...
const value_type & top() const
Return constant reference to the element at the top of the stack. Precondition: stack is not empty()...
bool empty() const
Returns true if the stack is empty.
cfg::size_type size_type
type for sizes (64-bit)
const value_type & top() const
Return constant reference to the element at the top of the stack. Precondition: stack is not empty()...
normal_stack()
Default constructor: creates empty stack.
bool empty() const
Returns true if the stack is empty.
Efficient implementation that uses prefetching and overlapping using (shared) buffers pools...
size_type size() const
Returns the number of elements contained in the stack.
Simpler non-growing vector without initialization.
unsigned_type cache_offset
BID< block_size > bid_type
void wait_all(RequestIterator reqs_begin, RequestIterator reqs_end)
Collection of functions to track statuses of a number of requests.
alloc_strategy_type alloc_strategy
simple_vector< request_ptr > requests
normal_stack(const StackType &stack_)
Copy-construction from a another stack of any type.
choose_int_types< my_pointer_size >::unsigned_type unsigned_type
value_type & top()
Return mutable reference to the element at the top of the stack. Precondition: stack is not empty()...
value_type & top()
Return mutable reference to the element at the top of the stack. Precondition: stack is not empty()...
unsigned_type cache_offset
InternalStack int_stack_type
Stack type generator Introduction to stack container: see STXXL Stack tutorial. Design and Internals of stack container: see Stack.
void swap(migrating_stack &obj)
IF template metaprogramming statement.
void pop()
Removes the element at the top of the stack. Precondition: stack is not empty(). Postcondition: size(...
Efficient implementation that uses prefetching and overlapping using internal buffers.
cfg::size_type size_type
type for sizes (64-bit)
ExternalStack ext_stack_type
cfg::value_type value_type
type of the elements stored in the stack
cfg::value_type value_type
type of the elements stored in the stack
simple_vector< block_type >::iterator overlap_buffers
void push(const value_type &val)
Inserts an element at the top of the stack. Postconditions: size() is incremented by 1...
typed_block< block_size, value_type > block_type
type of block used in disk-memory transfers
grow_shrink_stack()
Default constructor: creates empty stack.
#define STXXL_PRETTY_FUNCTION_NAME
void pop()
Removes the element at the top of the stack. Precondition: stack is not empty(). Postcondition: size(...
#define STXXL_END_NAMESPACE
unsigned_type cache_offset
IF< Behaviour==normal, normal_stack< cfg >, GrShrTp >::result ExtStackType