14 #ifndef STXXL_STACK_HEADER
15 #define STXXL_STACK_HEADER
20 #include <stxxl/bits/deprecated.h>
21 #include <stxxl/bits/io/request_operations.h>
22 #include <stxxl/bits/mng/mng.h>
23 #include <stxxl/bits/mng/typed_block.h>
24 #include <stxxl/bits/common/simple_vector.h>
25 #include <stxxl/bits/common/tmeta.h>
26 #include <stxxl/bits/mng/read_write_pool.h>
27 #include <stxxl/bits/mng/write_pool.h>
28 #include <stxxl/bits/mng/prefetch_pool.h>
31 __STXXL_BEGIN_NAMESPACE
36 template <
class ValTp,
37 unsigned BlocksPerPage = 4,
38 unsigned BlkSz = STXXL_DEFAULT_BLOCK_SIZE(ValTp),
39 class AllocStr = STXXL_DEFAULT_ALLOC_STRATEGY,
40 class SzTp = stxxl::int64>
41 struct stack_config_generator
43 typedef ValTp value_type;
44 enum { blocks_per_page = BlocksPerPage };
45 typedef AllocStr alloc_strategy;
46 enum { block_size = BlkSz };
47 typedef SzTp size_type;
58 template <
class Config_>
63 typedef typename cfg::value_type value_type;
64 typedef typename cfg::alloc_strategy alloc_strategy_type;
65 typedef typename cfg::size_type size_type;
67 blocks_per_page = cfg::blocks_per_page,
68 block_size = cfg::block_size
76 unsigned_type cache_offset;
77 value_type * current_element;
78 simple_vector<block_type> cache;
79 typename simple_vector<block_type>::iterator front_page;
80 typename simple_vector<block_type>::iterator back_page;
81 std::vector<bid_type> bids;
82 alloc_strategy_type alloc_strategy;
88 current_element(NULL),
89 cache(blocks_per_page * 2),
90 front_page(cache.begin() + blocks_per_page),
91 back_page(cache.begin()),
94 bids.reserve(blocks_per_page);
99 std::swap(size_, obj.size_);
100 std::swap(cache_offset, obj.cache_offset);
101 std::swap(current_element, obj.current_element);
102 std::swap(cache, obj.cache);
103 std::swap(front_page, obj.front_page);
104 std::swap(back_page, obj.back_page);
105 std::swap(bids, obj.bids);
106 std::swap(alloc_strategy, obj.alloc_strategy);
112 template <
class stack_type>
116 current_element(NULL),
117 cache(blocks_per_page * 2),
118 front_page(cache.begin() + blocks_per_page),
119 back_page(cache.begin()),
122 bids.reserve(blocks_per_page);
124 stack_type stack_copy = stack_;
125 const size_type sz = stack_copy.size();
128 std::vector<value_type> tmp(sz);
130 for (i = 0; i < sz; ++i)
132 tmp[sz - i - 1] = stack_copy.top();
135 for (i = 0; i < sz; ++i)
140 STXXL_VERBOSE(STXXL_PRETTY_FUNCTION_NAME);
141 block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
143 size_type size()
const
154 return (*current_element);
156 const value_type & top()
const
159 return (*current_element);
161 void push(
const value_type & val)
168 STXXL_VERBOSE2(
"growing, size: " << size_);
170 bids.resize(bids.size() + blocks_per_page);
171 typename std::vector<bid_type>::iterator cur_bid = bids.end() - blocks_per_page;
172 block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
174 simple_vector<request_ptr> requests(blocks_per_page);
176 for (
int i = 0; i < blocks_per_page; ++i, ++cur_bid)
178 requests[i] = (back_page + i)->write(*cur_bid);
182 std::swap(back_page, front_page);
184 bids.reserve(bids.size() + blocks_per_page);
186 cache_offset = blocks_per_page * block_type::size + 1;
187 current_element = &((*front_page)[0]);
190 wait_all(requests.begin(), blocks_per_page);
192 *current_element = val;
197 current_element = element(cache_offset);
198 *current_element = val;
204 assert(cache_offset <= 2 * blocks_per_page * block_type::size);
205 assert(cache_offset > 0);
208 if (cache_offset == 1 && bids.size() >= blocks_per_page)
210 STXXL_VERBOSE2(
"shrinking, size: " << size_);
212 simple_vector<request_ptr> requests(blocks_per_page);
215 typename std::vector<bid_type>::const_iterator cur_bid = bids.end();
216 for (
int i = blocks_per_page - 1; i >= 0; --i)
218 requests[i] = (front_page + i)->read(*(--cur_bid));
222 std::swap(front_page, back_page);
226 current_element = &((*(back_page + (blocks_per_page - 1)))[block_type::size - 1]);
228 wait_all(requests.begin(), blocks_per_page);
230 block_manager::get_instance()->delete_blocks(bids.end() - blocks_per_page, bids.end());
231 bids.resize(bids.size() - blocks_per_page);
238 current_element = element((--cache_offset) - 1);
242 value_type * element(unsigned_type offset)
244 if (offset < blocks_per_page * block_type::size)
248 unsigned_type unbiased_offset = offset - blocks_per_page *
block_type::size;
259 template <
class Config_>
264 typedef typename cfg::value_type value_type;
265 typedef typename cfg::alloc_strategy alloc_strategy_type;
266 typedef typename cfg::size_type size_type;
268 blocks_per_page = cfg::blocks_per_page,
269 block_size = cfg::block_size,
277 unsigned_type cache_offset;
278 value_type * current_element;
279 simple_vector<block_type> cache;
280 typename simple_vector<block_type>::iterator cache_buffers;
281 typename simple_vector<block_type>::iterator overlap_buffers;
282 simple_vector<request_ptr> requests;
283 std::vector<bid_type> bids;
284 alloc_strategy_type alloc_strategy;
290 current_element(NULL),
291 cache(blocks_per_page * 2),
292 cache_buffers(cache.begin()),
293 overlap_buffers(cache.begin() + blocks_per_page),
294 requests(blocks_per_page),
297 bids.reserve(blocks_per_page);
302 std::swap(size_, obj.size_);
303 std::swap(cache_offset, obj.cache_offset);
304 std::swap(current_element, obj.current_element);
305 std::swap(cache, obj.cache);
306 std::swap(cache_buffers, obj.cache_buffers);
307 std::swap(overlap_buffers, obj.overlap_buffers);
308 std::swap(requests, obj.requests);
309 std::swap(bids, obj.bids);
310 std::swap(alloc_strategy, obj.alloc_strategy);
316 template <
class stack_type>
320 current_element(NULL),
321 cache(blocks_per_page * 2),
322 cache_buffers(cache.begin()),
323 overlap_buffers(cache.begin() + blocks_per_page),
324 requests(blocks_per_page),
327 bids.reserve(blocks_per_page);
329 stack_type stack_copy = stack_;
330 const size_type sz = stack_copy.size();
333 std::vector<value_type> tmp(sz);
335 for (i = 0; i < sz; ++i)
337 tmp[sz - i - 1] = stack_copy.top();
340 for (i = 0; i < sz; ++i)
345 STXXL_VERBOSE(STXXL_PRETTY_FUNCTION_NAME);
348 if (requests[0].
get())
349 wait_all(requests.begin(), blocks_per_page);
351 catch (
const io_error & ex)
353 block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
355 size_type size()
const
366 return (*current_element);
368 const value_type & top()
const
371 return (*current_element);
373 void push(
const value_type & val)
375 assert(cache_offset <= blocks_per_page * block_type::size);
378 if (cache_offset == blocks_per_page * block_type::size)
380 STXXL_VERBOSE2(
"growing, size: " << size_);
382 bids.resize(bids.size() + blocks_per_page);
383 typename std::vector<bid_type>::iterator cur_bid = bids.end() - blocks_per_page;
384 block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
386 for (
int i = 0; i < blocks_per_page; ++i, ++cur_bid)
388 if (requests[i].
get())
391 requests[i] = (cache_buffers + i)->write(*cur_bid);
394 std::swap(cache_buffers, overlap_buffers);
396 bids.reserve(bids.size() + blocks_per_page);
399 current_element = &((*cache_buffers)[0]);
402 *current_element = val;
408 *current_element = val;
414 assert(cache_offset <= blocks_per_page * block_type::size);
415 assert(cache_offset > 0);
418 if (cache_offset == 1 && bids.size() >= blocks_per_page)
420 STXXL_VERBOSE2(
"shrinking, size: " << size_);
422 if (requests[0].
get())
423 wait_all(requests.begin(), blocks_per_page);
426 std::swap(cache_buffers, overlap_buffers);
428 if (bids.size() > blocks_per_page)
430 STXXL_VERBOSE2(
"prefetching, size: " << size_);
431 typename std::vector<bid_type>::const_iterator cur_bid = bids.end() - blocks_per_page;
432 for (
int i = blocks_per_page - 1; i >= 0; --i)
433 requests[i] = (overlap_buffers + i)->read(*(--cur_bid));
436 block_manager::get_instance()->delete_blocks(bids.end() - blocks_per_page, bids.end());
437 bids.resize(bids.size() - blocks_per_page);
441 current_element = &((*(cache_buffers + (blocks_per_page - 1)))[block_type::size - 1]);
447 unsigned_type cur_offset = (--cache_offset) - 1;
454 template <
class Config_>
459 typedef typename cfg::value_type value_type;
460 typedef typename cfg::alloc_strategy alloc_strategy_type;
461 typedef typename cfg::size_type size_type;
463 blocks_per_page = cfg::blocks_per_page,
464 block_size = cfg::block_size,
474 unsigned_type cache_offset;
476 std::vector<bid_type> bids;
477 alloc_strategy_type alloc_strategy;
478 unsigned_type pref_aggr;
488 unsigned_type prefetch_aggressiveness = 0) :
492 pref_aggr(prefetch_aggressiveness),
496 STXXL_VERBOSE2(
"grow_shrink_stack2::grow_shrink_stack2(...)");
506 unsigned_type prefetch_aggressiveness = 0)) :
510 pref_aggr(prefetch_aggressiveness),
511 owned_pool(new
pool_type(p_pool_, w_pool_)),
514 STXXL_VERBOSE2(
"grow_shrink_stack2::grow_shrink_stack2(...)");
519 std::swap(size_, obj.size_);
520 std::swap(cache_offset, obj.cache_offset);
521 std::swap(cache, obj.cache);
522 std::swap(bids, obj.bids);
523 std::swap(alloc_strategy, obj.alloc_strategy);
524 std::swap(pref_aggr, obj.pref_aggr);
525 std::swap(owned_pool, obj.owned_pool);
526 std::swap(pool, obj.pool);
533 STXXL_VERBOSE2(
"grow_shrink_stack2::~grow_shrink_stack2()");
534 const int_type bids_size = bids.size();
535 const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
537 for (i = bids_size - 1; i >= last_pref; --i)
540 pool->invalidate(bids[i]);
542 typename std::vector<bid_type>::iterator cur = bids.begin();
543 typename std::vector<bid_type>::const_iterator end = bids.end();
544 for ( ; cur != end; ++cur)
547 block_type * b = NULL;
556 catch (
const io_error & ex)
558 block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
562 size_type size()
const
572 void push(
const value_type & val)
574 STXXL_VERBOSE3(
"grow_shrink_stack2::push(" << val <<
")");
575 assert(cache_offset <= block_type::size);
577 if (cache_offset == block_type::size)
579 STXXL_VERBOSE2(
"grow_shrink_stack2::push(" << val <<
") growing, size: " << size_);
581 bids.resize(bids.size() + 1);
582 typename std::vector<bid_type>::iterator cur_bid = bids.end() - 1;
583 block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
584 pool->
write(cache, bids.back());
585 cache = pool->
steal();
586 const int_type bids_size = bids.size();
587 const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr) - 1, (int_type)0);
588 for (int_type i = bids_size - 2; i >= last_pref; --i)
591 pool->invalidate(bids[i]);
595 (*cache)[cache_offset] = val;
599 assert(cache_offset > 0);
600 assert(cache_offset <= block_type::size);
606 assert(cache_offset > 0);
607 assert(cache_offset <= block_type::size);
608 return (*cache)[cache_offset - 1];
611 const value_type & top()
const
614 assert(cache_offset > 0);
615 assert(cache_offset <= block_type::size);
616 return (*cache)[cache_offset - 1];
621 STXXL_VERBOSE3(
"grow_shrink_stack2::pop()");
623 assert(cache_offset > 0);
624 assert(cache_offset <= block_type::size);
625 if (cache_offset == 1 && (!bids.empty()))
627 STXXL_VERBOSE2(
"grow_shrink_stack2::pop() shrinking, size = " << size_);
629 bid_type last_block = bids.back();
631 pool->
read(cache, last_block)->
wait();
632 block_manager::get_instance()->delete_block(last_block);
634 cache_offset = block_type::size + 1;
646 if (pref_aggr > new_p)
648 const int_type bids_size = bids.size();
649 const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
650 for (int_type i = bids_size - new_p - 1; i >= last_pref; --i)
653 pool->invalidate(bids[i]);
670 const int_type bids_size = bids.size();
671 const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
672 for (int_type i = bids_size - 1; i >= last_pref; --i)
683 template <
unsigned_type CritSize,
class ExternalStack,
class InternalStack>
687 typedef typename ExternalStack::cfg cfg;
688 typedef typename cfg::value_type value_type;
689 typedef typename cfg::size_type size_type;
691 blocks_per_page = cfg::blocks_per_page,
692 block_size = cfg::block_size
696 typedef InternalStack int_stack_type;
697 typedef ExternalStack ext_stack_type;
700 enum { critical_size = CritSize };
702 int_stack_type * int_impl;
703 ext_stack_type * ext_impl;
706 template <
class stack_type>
714 std::swap(int_impl, obj.int_impl);
715 std::swap(ext_impl, obj.ext_impl);
719 bool internal()
const
721 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
727 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
733 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
734 return (int_impl) ? int_impl->empty() : ext_impl->empty();
736 size_type size()
const
738 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
739 return (int_impl) ? size_type(int_impl->size()) : ext_impl->size();
743 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
744 return (int_impl) ? int_impl->top() : ext_impl->top();
746 const value_type & top()
const
748 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
749 return (int_impl) ? int_impl->top() : ext_impl->top();
751 void push(
const value_type & val)
753 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
758 if (int_impl->size() == critical_size)
761 ext_impl =
new ext_stack_type(*int_impl);
771 assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
791 enum stack_externality { external, migrating,
internal };
792 enum stack_behaviour { normal, grow_shrink, grow_shrink2 };
832 stack_externality Externality = external,
833 stack_behaviour Behaviour = normal,
834 unsigned BlocksPerPage = 4,
835 unsigned BlkSz = STXXL_DEFAULT_BLOCK_SIZE(ValTp),
837 class IntStackTp = std::stack<ValTp>,
838 unsigned_type MigrCritSize = (2 * BlocksPerPage * BlkSz),
840 class AllocStr = STXXL_DEFAULT_ALLOC_STRATEGY,
841 class SzTp = stxxl::int64
845 typedef stack_config_generator<ValTp, BlocksPerPage, BlkSz, AllocStr, SzTp> cfg;
847 typedef typename IF<Behaviour == grow_shrink,
851 typedef typename IF<Externality == migrating,
855 typedef typename IF<Externality == internal, IntStackTp, MigrOrNotStackTp>::result result;
860 __STXXL_END_NAMESPACE
865 template <
class Config_>
866 void swap(stxxl::normal_stack<Config_> & a,
867 stxxl::normal_stack<Config_> & b)
872 template <
class Config_>
873 void swap(stxxl::grow_shrink_stack<Config_> & a,
874 stxxl::grow_shrink_stack<Config_> & b)
879 template <
class Config_>
880 void swap(stxxl::grow_shrink_stack2<Config_> & a,
881 stxxl::grow_shrink_stack2<Config_> & b)
886 template <stxxl::
unsigned_type CritSize,
class ExternalStack,
class InternalStack>
887 void swap(stxxl::migrating_stack<CritSize, ExternalStack, InternalStack> & a,
888 stxxl::migrating_stack<CritSize, ExternalStack, InternalStack> & b)
894 #endif // !STXXL_STACK_HEADER
grow_shrink_stack2(pool_type &pool_, unsigned_type prefetch_aggressiveness=0)
Constructs stack.
Definition: stack.h:486
Block containing elements of fixed length.
Definition: typed_block.h:223
bool external() const
Returns true if current implementation is external, otherwise false.
Definition: stack.h:725
Efficient implementation that uses prefetching and overlapping using internal buffers.
Definition: stack.h:260
number of elements in block
Definition: typed_block.h:240
Implements dynamically resizable prefetching pool.
Definition: prefetch_pool.h:34
Implements dynamically resizable buffered writing pool.
Definition: write_pool.h:36
Implements dynamically resizable buffered writing and prefetched reading pool.
Definition: read_write_pool.h:28
bool hint(bid_type bid)
Gives a hint for prefetching a block.
Definition: read_write_pool.h:135
A stack that migrates from internal memory to external when its size exceeds a certain threshold...
Definition: stack.h:684
External stack container.
Definition: stack.h:59
void wait_all(request_iterator_ reqs_begin, request_iterator_ reqs_end)
Collection of functions to track statuses of a number of requests.
Definition: request_operations.h:36
unsigned_type get_prefetch_aggr() const
Returns number of blocks used for prefetching.
Definition: stack.h:661
request_ptr write(block_type *&block, bid_type bid)
Passes a block to the pool for writing.
Definition: read_write_pool.h:102
grow_shrink_stack(const stack_type &stack_)
Construction from a stack.
Definition: stack.h:317
void set_prefetch_aggr(unsigned_type new_p)
Sets level of prefetch aggressiveness (number of blocks from the prefetch pool used for prefetching) ...
Definition: stack.h:644
Efficient implementation that uses prefetching and overlapping using (shared) buffers pools...
Definition: stack.h:455
virtual void wait(bool measure_time=true)=0
Suspends calling thread until completion of the request.
_STXXL_DEPRECATED(grow_shrink_stack2(prefetch_pool< block_type > &p_pool_, write_pool< block_type > &w_pool_, unsigned_type prefetch_aggressiveness=0))
Constructs stack.
Definition: stack.h:503
request_ptr read(block_type *&block, bid_type bid)
Reads block. If this block is cached block is not read but passed from the cache. ...
Definition: read_write_pool.h:151
Stack type generator.
Definition: stack.h:843
normal_stack(const stack_type &stack_)
Construction from a stack.
Definition: stack.h:113
IF template metaprogramming statement.
Definition: tmeta.h:31
block_type * steal()
Take out a block from the pool.
Definition: read_write_pool.h:116
Block identifier class.
Definition: bid.h:40