Stxxl  1.3.2
stack.h
1 /***************************************************************************
2  * include/stxxl/bits/containers/stack.h
3  *
4  * Part of the STXXL. See http://stxxl.sourceforge.net
5  *
6  * Copyright (C) 2003-2004 Roman Dementiev <[email protected]>
7  * Copyright (C) 2009, 2010 Andreas Beckmann <[email protected]>
8  *
9  * Distributed under the Boost Software License, Version 1.0.
10  * (See accompanying file LICENSE_1_0.txt or copy at
11  * http://www.boost.org/LICENSE_1_0.txt)
12  **************************************************************************/
13 
14 #ifndef STXXL_STACK_HEADER
15 #define STXXL_STACK_HEADER
16 
17 #include <stack>
18 #include <vector>
19 
20 #include <stxxl/bits/deprecated.h>
21 #include <stxxl/bits/io/request_operations.h>
22 #include <stxxl/bits/mng/mng.h>
23 #include <stxxl/bits/mng/typed_block.h>
24 #include <stxxl/bits/common/simple_vector.h>
25 #include <stxxl/bits/common/tmeta.h>
26 #include <stxxl/bits/mng/read_write_pool.h>
27 #include <stxxl/bits/mng/write_pool.h>
28 #include <stxxl/bits/mng/prefetch_pool.h>
29 
30 
31 __STXXL_BEGIN_NAMESPACE
32 
35 
36 template <class ValTp,
37  unsigned BlocksPerPage = 4,
38  unsigned BlkSz = STXXL_DEFAULT_BLOCK_SIZE(ValTp),
39  class AllocStr = STXXL_DEFAULT_ALLOC_STRATEGY,
40  class SzTp = stxxl::int64>
41 struct stack_config_generator
42 {
43  typedef ValTp value_type;
44  enum { blocks_per_page = BlocksPerPage };
45  typedef AllocStr alloc_strategy;
46  enum { block_size = BlkSz };
47  typedef SzTp size_type;
48 };
49 
50 
52 
58 template <class Config_>
59 class normal_stack : private noncopyable
60 {
61 public:
62  typedef Config_ cfg;
63  typedef typename cfg::value_type value_type;
64  typedef typename cfg::alloc_strategy alloc_strategy_type;
65  typedef typename cfg::size_type size_type;
66  enum {
67  blocks_per_page = cfg::blocks_per_page,
68  block_size = cfg::block_size
69  };
70 
72  typedef BID<block_size> bid_type;
73 
74 private:
75  size_type size_;
76  unsigned_type cache_offset;
77  value_type * current_element;
78  simple_vector<block_type> cache;
79  typename simple_vector<block_type>::iterator front_page;
80  typename simple_vector<block_type>::iterator back_page;
81  std::vector<bid_type> bids;
82  alloc_strategy_type alloc_strategy;
83 
84 public:
85  normal_stack() :
86  size_(0),
87  cache_offset(0),
88  current_element(NULL),
89  cache(blocks_per_page * 2),
90  front_page(cache.begin() + blocks_per_page),
91  back_page(cache.begin()),
92  bids(0)
93  {
94  bids.reserve(blocks_per_page);
95  }
96 
97  void swap(normal_stack & obj)
98  {
99  std::swap(size_, obj.size_);
100  std::swap(cache_offset, obj.cache_offset);
101  std::swap(current_element, obj.current_element);
102  std::swap(cache, obj.cache);
103  std::swap(front_page, obj.front_page);
104  std::swap(back_page, obj.back_page);
105  std::swap(bids, obj.bids);
106  std::swap(alloc_strategy, obj.alloc_strategy);
107  }
108 
112  template <class stack_type>
113  normal_stack(const stack_type & stack_) :
114  size_(0),
115  cache_offset(0),
116  current_element(NULL),
117  cache(blocks_per_page * 2),
118  front_page(cache.begin() + blocks_per_page),
119  back_page(cache.begin()),
120  bids(0)
121  {
122  bids.reserve(blocks_per_page);
123 
124  stack_type stack_copy = stack_;
125  const size_type sz = stack_copy.size();
126  size_type i;
127 
128  std::vector<value_type> tmp(sz);
129 
130  for (i = 0; i < sz; ++i)
131  {
132  tmp[sz - i - 1] = stack_copy.top();
133  stack_copy.pop();
134  }
135  for (i = 0; i < sz; ++i)
136  this->push(tmp[i]);
137  }
138  virtual ~normal_stack()
139  {
140  STXXL_VERBOSE(STXXL_PRETTY_FUNCTION_NAME);
141  block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
142  }
143  size_type size() const
144  {
145  return size_;
146  }
147  bool empty() const
148  {
149  return (!size_);
150  }
151  value_type & top()
152  {
153  assert(size_ > 0);
154  return (*current_element);
155  }
156  const value_type & top() const
157  {
158  assert(size_ > 0);
159  return (*current_element);
160  }
161  void push(const value_type & val)
162  {
163  assert(cache_offset <= 2 * blocks_per_page * block_type::size);
164  //assert(cache_offset >= 0);
165 
166  if (cache_offset == 2 * blocks_per_page * block_type::size) // cache overflow
167  {
168  STXXL_VERBOSE2("growing, size: " << size_);
169 
170  bids.resize(bids.size() + blocks_per_page);
171  typename std::vector<bid_type>::iterator cur_bid = bids.end() - blocks_per_page;
172  block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
173 
174  simple_vector<request_ptr> requests(blocks_per_page);
175 
176  for (int i = 0; i < blocks_per_page; ++i, ++cur_bid)
177  {
178  requests[i] = (back_page + i)->write(*cur_bid);
179  }
180 
181 
182  std::swap(back_page, front_page);
183 
184  bids.reserve(bids.size() + blocks_per_page);
185 
186  cache_offset = blocks_per_page * block_type::size + 1;
187  current_element = &((*front_page)[0]);
188  ++size_;
189 
190  wait_all(requests.begin(), blocks_per_page);
191 
192  *current_element = val;
193 
194  return;
195  }
196 
197  current_element = element(cache_offset);
198  *current_element = val;
199  ++size_;
200  ++cache_offset;
201  }
202  void pop()
203  {
204  assert(cache_offset <= 2 * blocks_per_page * block_type::size);
205  assert(cache_offset > 0);
206  assert(size_ > 0);
207 
208  if (cache_offset == 1 && bids.size() >= blocks_per_page)
209  {
210  STXXL_VERBOSE2("shrinking, size: " << size_);
211 
212  simple_vector<request_ptr> requests(blocks_per_page);
213 
214  {
215  typename std::vector<bid_type>::const_iterator cur_bid = bids.end();
216  for (int i = blocks_per_page - 1; i >= 0; --i)
217  {
218  requests[i] = (front_page + i)->read(*(--cur_bid));
219  }
220  }
221 
222  std::swap(front_page, back_page);
223 
224  cache_offset = blocks_per_page * block_type::size;
225  --size_;
226  current_element = &((*(back_page + (blocks_per_page - 1)))[block_type::size - 1]);
227 
228  wait_all(requests.begin(), blocks_per_page);
229 
230  block_manager::get_instance()->delete_blocks(bids.end() - blocks_per_page, bids.end());
231  bids.resize(bids.size() - blocks_per_page);
232 
233  return;
234  }
235 
236  --size_;
237 
238  current_element = element((--cache_offset) - 1);
239  }
240 
241 private:
242  value_type * element(unsigned_type offset)
243  {
244  if (offset < blocks_per_page * block_type::size)
245  return &((*(back_page + offset / block_type::size))[offset % block_type::size]);
246 
247 
248  unsigned_type unbiased_offset = offset - blocks_per_page * block_type::size;
249  return &((*(front_page + unbiased_offset / block_type::size))[unbiased_offset % block_type::size]);
250  }
251 };
252 
253 
255 
259 template <class Config_>
260 class grow_shrink_stack : private noncopyable
261 {
262 public:
263  typedef Config_ cfg;
264  typedef typename cfg::value_type value_type;
265  typedef typename cfg::alloc_strategy alloc_strategy_type;
266  typedef typename cfg::size_type size_type;
267  enum {
268  blocks_per_page = cfg::blocks_per_page,
269  block_size = cfg::block_size,
270  };
271 
273  typedef BID<block_size> bid_type;
274 
275 private:
276  size_type size_;
277  unsigned_type cache_offset;
278  value_type * current_element;
279  simple_vector<block_type> cache;
280  typename simple_vector<block_type>::iterator cache_buffers;
281  typename simple_vector<block_type>::iterator overlap_buffers;
282  simple_vector<request_ptr> requests;
283  std::vector<bid_type> bids;
284  alloc_strategy_type alloc_strategy;
285 
286 public:
288  size_(0),
289  cache_offset(0),
290  current_element(NULL),
291  cache(blocks_per_page * 2),
292  cache_buffers(cache.begin()),
293  overlap_buffers(cache.begin() + blocks_per_page),
294  requests(blocks_per_page),
295  bids(0)
296  {
297  bids.reserve(blocks_per_page);
298  }
299 
300  void swap(grow_shrink_stack & obj)
301  {
302  std::swap(size_, obj.size_);
303  std::swap(cache_offset, obj.cache_offset);
304  std::swap(current_element, obj.current_element);
305  std::swap(cache, obj.cache);
306  std::swap(cache_buffers, obj.cache_buffers);
307  std::swap(overlap_buffers, obj.overlap_buffers);
308  std::swap(requests, obj.requests);
309  std::swap(bids, obj.bids);
310  std::swap(alloc_strategy, obj.alloc_strategy);
311  }
312 
316  template <class stack_type>
317  grow_shrink_stack(const stack_type & stack_) :
318  size_(0),
319  cache_offset(0),
320  current_element(NULL),
321  cache(blocks_per_page * 2),
322  cache_buffers(cache.begin()),
323  overlap_buffers(cache.begin() + blocks_per_page),
324  requests(blocks_per_page),
325  bids(0)
326  {
327  bids.reserve(blocks_per_page);
328 
329  stack_type stack_copy = stack_;
330  const size_type sz = stack_copy.size();
331  size_type i;
332 
333  std::vector<value_type> tmp(sz);
334 
335  for (i = 0; i < sz; ++i)
336  {
337  tmp[sz - i - 1] = stack_copy.top();
338  stack_copy.pop();
339  }
340  for (i = 0; i < sz; ++i)
341  this->push(tmp[i]);
342  }
343  virtual ~grow_shrink_stack()
344  {
345  STXXL_VERBOSE(STXXL_PRETTY_FUNCTION_NAME);
346  try
347  {
348  if (requests[0].get())
349  wait_all(requests.begin(), blocks_per_page);
350  }
351  catch (const io_error & ex)
352  { }
353  block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
354  }
355  size_type size() const
356  {
357  return size_;
358  }
359  bool empty() const
360  {
361  return (!size_);
362  }
363  value_type & top()
364  {
365  assert(size_ > 0);
366  return (*current_element);
367  }
368  const value_type & top() const
369  {
370  assert(size_ > 0);
371  return (*current_element);
372  }
373  void push(const value_type & val)
374  {
375  assert(cache_offset <= blocks_per_page * block_type::size);
376  //assert(cache_offset >= 0);
377 
378  if (cache_offset == blocks_per_page * block_type::size) // cache overflow
379  {
380  STXXL_VERBOSE2("growing, size: " << size_);
381 
382  bids.resize(bids.size() + blocks_per_page);
383  typename std::vector<bid_type>::iterator cur_bid = bids.end() - blocks_per_page;
384  block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
385 
386  for (int i = 0; i < blocks_per_page; ++i, ++cur_bid)
387  {
388  if (requests[i].get())
389  requests[i]->wait();
390 
391  requests[i] = (cache_buffers + i)->write(*cur_bid);
392  }
393 
394  std::swap(cache_buffers, overlap_buffers);
395 
396  bids.reserve(bids.size() + blocks_per_page);
397 
398  cache_offset = 1;
399  current_element = &((*cache_buffers)[0]);
400  ++size_;
401 
402  *current_element = val;
403 
404  return;
405  }
406 
407  current_element = &((*(cache_buffers + cache_offset / block_type::size))[cache_offset % block_type::size]);
408  *current_element = val;
409  ++size_;
410  ++cache_offset;
411  }
412  void pop()
413  {
414  assert(cache_offset <= blocks_per_page * block_type::size);
415  assert(cache_offset > 0);
416  assert(size_ > 0);
417 
418  if (cache_offset == 1 && bids.size() >= blocks_per_page)
419  {
420  STXXL_VERBOSE2("shrinking, size: " << size_);
421 
422  if (requests[0].get())
423  wait_all(requests.begin(), blocks_per_page);
424 
425 
426  std::swap(cache_buffers, overlap_buffers);
427 
428  if (bids.size() > blocks_per_page)
429  {
430  STXXL_VERBOSE2("prefetching, size: " << size_);
431  typename std::vector<bid_type>::const_iterator cur_bid = bids.end() - blocks_per_page;
432  for (int i = blocks_per_page - 1; i >= 0; --i)
433  requests[i] = (overlap_buffers + i)->read(*(--cur_bid));
434  }
435 
436  block_manager::get_instance()->delete_blocks(bids.end() - blocks_per_page, bids.end());
437  bids.resize(bids.size() - blocks_per_page);
438 
439  cache_offset = blocks_per_page * block_type::size;
440  --size_;
441  current_element = &((*(cache_buffers + (blocks_per_page - 1)))[block_type::size - 1]);
442 
443  return;
444  }
445 
446  --size_;
447  unsigned_type cur_offset = (--cache_offset) - 1;
448  current_element = &((*(cache_buffers + cur_offset / block_type::size))[cur_offset % block_type::size]);
449  }
450 };
451 
454 template <class Config_>
455 class grow_shrink_stack2 : private noncopyable
456 {
457 public:
458  typedef Config_ cfg;
459  typedef typename cfg::value_type value_type;
460  typedef typename cfg::alloc_strategy alloc_strategy_type;
461  typedef typename cfg::size_type size_type;
462  enum {
463  blocks_per_page = cfg::blocks_per_page, // stack of this type has only one page
464  block_size = cfg::block_size,
465  };
466 
468  typedef BID<block_size> bid_type;
469 
470 private:
472 
473  size_type size_;
474  unsigned_type cache_offset;
475  block_type * cache;
476  std::vector<bid_type> bids;
477  alloc_strategy_type alloc_strategy;
478  unsigned_type pref_aggr;
479  pool_type * owned_pool;
480  pool_type * pool;
481 
482 public:
487  pool_type & pool_,
488  unsigned_type prefetch_aggressiveness = 0) :
489  size_(0),
490  cache_offset(0),
491  cache(new block_type),
492  pref_aggr(prefetch_aggressiveness),
493  owned_pool(NULL),
494  pool(&pool_)
495  {
496  STXXL_VERBOSE2("grow_shrink_stack2::grow_shrink_stack2(...)");
497  }
498 
504  prefetch_pool<block_type> & p_pool_,
505  write_pool<block_type> & w_pool_,
506  unsigned_type prefetch_aggressiveness = 0)) :
507  size_(0),
508  cache_offset(0),
509  cache(new block_type),
510  pref_aggr(prefetch_aggressiveness),
511  owned_pool(new pool_type(p_pool_, w_pool_)),
512  pool(owned_pool)
513  {
514  STXXL_VERBOSE2("grow_shrink_stack2::grow_shrink_stack2(...)");
515  }
516 
517  void swap(grow_shrink_stack2 & obj)
518  {
519  std::swap(size_, obj.size_);
520  std::swap(cache_offset, obj.cache_offset);
521  std::swap(cache, obj.cache);
522  std::swap(bids, obj.bids);
523  std::swap(alloc_strategy, obj.alloc_strategy);
524  std::swap(pref_aggr, obj.pref_aggr);
525  std::swap(owned_pool, obj.owned_pool);
526  std::swap(pool, obj.pool);
527  }
528 
529  virtual ~grow_shrink_stack2()
530  {
531  try
532  {
533  STXXL_VERBOSE2("grow_shrink_stack2::~grow_shrink_stack2()");
534  const int_type bids_size = bids.size();
535  const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
536  int_type i;
537  for (i = bids_size - 1; i >= last_pref; --i)
538  {
539  // clean the prefetch buffer
540  pool->invalidate(bids[i]);
541  }
542  typename std::vector<bid_type>::iterator cur = bids.begin();
543  typename std::vector<bid_type>::const_iterator end = bids.end();
544  for ( ; cur != end; ++cur)
545  {
546  // FIXME: read_write_pool needs something like cancel_write(bid)
547  block_type * b = NULL; // w_pool.steal(*cur);
548  if (b)
549  {
550  pool->add(cache); // return buffer
551  cache = b;
552  }
553  }
554  delete cache;
555  }
556  catch (const io_error & ex)
557  { }
558  block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
559  delete owned_pool;
560  }
561 
562  size_type size() const
563  {
564  return size_;
565  }
566 
567  bool empty() const
568  {
569  return (!size_);
570  }
571 
572  void push(const value_type & val)
573  {
574  STXXL_VERBOSE3("grow_shrink_stack2::push(" << val << ")");
575  assert(cache_offset <= block_type::size);
576 
577  if (cache_offset == block_type::size)
578  {
579  STXXL_VERBOSE2("grow_shrink_stack2::push(" << val << ") growing, size: " << size_);
580 
581  bids.resize(bids.size() + 1);
582  typename std::vector<bid_type>::iterator cur_bid = bids.end() - 1;
583  block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
584  pool->write(cache, bids.back());
585  cache = pool->steal();
586  const int_type bids_size = bids.size();
587  const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr) - 1, (int_type)0);
588  for (int_type i = bids_size - 2; i >= last_pref; --i)
589  {
590  // clean prefetch buffers
591  pool->invalidate(bids[i]);
592  }
593  cache_offset = 0;
594  }
595  (*cache)[cache_offset] = val;
596  ++size_;
597  ++cache_offset;
598 
599  assert(cache_offset > 0);
600  assert(cache_offset <= block_type::size);
601  }
602 
603  value_type & top()
604  {
605  assert(size_ > 0);
606  assert(cache_offset > 0);
607  assert(cache_offset <= block_type::size);
608  return (*cache)[cache_offset - 1];
609  }
610 
611  const value_type & top() const
612  {
613  assert(size_ > 0);
614  assert(cache_offset > 0);
615  assert(cache_offset <= block_type::size);
616  return (*cache)[cache_offset - 1];
617  }
618 
619  void pop()
620  {
621  STXXL_VERBOSE3("grow_shrink_stack2::pop()");
622  assert(size_ > 0);
623  assert(cache_offset > 0);
624  assert(cache_offset <= block_type::size);
625  if (cache_offset == 1 && (!bids.empty()))
626  {
627  STXXL_VERBOSE2("grow_shrink_stack2::pop() shrinking, size = " << size_);
628 
629  bid_type last_block = bids.back();
630  bids.pop_back();
631  pool->read(cache, last_block)->wait();
632  block_manager::get_instance()->delete_block(last_block);
633  rehint();
634  cache_offset = block_type::size + 1;
635  }
636 
637  --cache_offset;
638  --size_;
639  }
640 
644  void set_prefetch_aggr(unsigned_type new_p)
645  {
646  if (pref_aggr > new_p)
647  {
648  const int_type bids_size = bids.size();
649  const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
650  for (int_type i = bids_size - new_p - 1; i >= last_pref; --i)
651  {
652  // clean prefetch buffers
653  pool->invalidate(bids[i]);
654  }
655  }
656  pref_aggr = new_p;
657  rehint();
658  }
659 
661  unsigned_type get_prefetch_aggr() const
662  {
663  return pref_aggr;
664  }
665 
666 private:
668  void rehint()
669  {
670  const int_type bids_size = bids.size();
671  const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
672  for (int_type i = bids_size - 1; i >= last_pref; --i)
673  {
674  pool->hint(bids[i]); // prefetch
675  }
676  }
677 };
678 
679 
681 
683 template <unsigned_type CritSize, class ExternalStack, class InternalStack>
684 class migrating_stack : private noncopyable
685 {
686 public:
687  typedef typename ExternalStack::cfg cfg;
688  typedef typename cfg::value_type value_type;
689  typedef typename cfg::size_type size_type;
690  enum {
691  blocks_per_page = cfg::blocks_per_page,
692  block_size = cfg::block_size
693  };
694 
695 
696  typedef InternalStack int_stack_type;
697  typedef ExternalStack ext_stack_type;
698 
699 private:
700  enum { critical_size = CritSize };
701 
702  int_stack_type * int_impl;
703  ext_stack_type * ext_impl;
704 
705  // not implemented yet
706  template <class stack_type>
707  migrating_stack(const stack_type & stack_);
708 
709 public:
710  migrating_stack() : int_impl(new int_stack_type()), ext_impl(NULL) { }
711 
712  void swap(migrating_stack & obj)
713  {
714  std::swap(int_impl, obj.int_impl);
715  std::swap(ext_impl, obj.ext_impl);
716  }
717 
719  bool internal() const
720  {
721  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
722  return int_impl;
723  }
725  bool external() const
726  {
727  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
728  return ext_impl;
729  }
730 
731  bool empty() const
732  {
733  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
734  return (int_impl) ? int_impl->empty() : ext_impl->empty();
735  }
736  size_type size() const
737  {
738  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
739  return (int_impl) ? size_type(int_impl->size()) : ext_impl->size();
740  }
741  value_type & top()
742  {
743  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
744  return (int_impl) ? int_impl->top() : ext_impl->top();
745  }
746  const value_type & top() const
747  {
748  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
749  return (int_impl) ? int_impl->top() : ext_impl->top();
750  }
751  void push(const value_type & val)
752  {
753  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
754 
755  if (int_impl)
756  {
757  int_impl->push(val);
758  if (int_impl->size() == critical_size)
759  {
760  // migrate to external stack
761  ext_impl = new ext_stack_type(*int_impl);
762  delete int_impl;
763  int_impl = NULL;
764  }
765  }
766  else
767  ext_impl->push(val);
768  }
769  void pop()
770  {
771  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
772 
773  if (int_impl)
774  int_impl->pop();
775  else
776  ext_impl->pop();
777  }
778  virtual ~migrating_stack()
779  {
780  delete int_impl;
781  delete ext_impl;
782  }
783 };
784 
786 
787 
790 
791 enum stack_externality { external, migrating, internal };
792 enum stack_behaviour { normal, grow_shrink, grow_shrink2 };
793 
795 
830 template <
831  class ValTp,
832  stack_externality Externality = external,
833  stack_behaviour Behaviour = normal,
834  unsigned BlocksPerPage = 4,
835  unsigned BlkSz = STXXL_DEFAULT_BLOCK_SIZE(ValTp),
836 
837  class IntStackTp = std::stack<ValTp>,
838  unsigned_type MigrCritSize = (2 * BlocksPerPage * BlkSz),
839 
840  class AllocStr = STXXL_DEFAULT_ALLOC_STRATEGY,
841  class SzTp = stxxl::int64
842  >
844 {
845  typedef stack_config_generator<ValTp, BlocksPerPage, BlkSz, AllocStr, SzTp> cfg;
846 
847  typedef typename IF<Behaviour == grow_shrink,
849  grow_shrink_stack2<cfg> >::result GrShrTp;
850  typedef typename IF<Behaviour == normal, normal_stack<cfg>, GrShrTp>::result ExtStackTp;
851  typedef typename IF<Externality == migrating,
852  migrating_stack<MigrCritSize, ExtStackTp, IntStackTp>, ExtStackTp>::result MigrOrNotStackTp;
853 
854 public:
855  typedef typename IF<Externality == internal, IntStackTp, MigrOrNotStackTp>::result result;
856 };
857 
859 
860 __STXXL_END_NAMESPACE
861 
862 
863 namespace std
864 {
865  template <class Config_>
866  void swap(stxxl::normal_stack<Config_> & a,
867  stxxl::normal_stack<Config_> & b)
868  {
869  a.swap(b);
870  }
871 
872  template <class Config_>
873  void swap(stxxl::grow_shrink_stack<Config_> & a,
874  stxxl::grow_shrink_stack<Config_> & b)
875  {
876  a.swap(b);
877  }
878 
879  template <class Config_>
880  void swap(stxxl::grow_shrink_stack2<Config_> & a,
881  stxxl::grow_shrink_stack2<Config_> & b)
882  {
883  a.swap(b);
884  }
885 
886  template <stxxl::unsigned_type CritSize, class ExternalStack, class InternalStack>
887  void swap(stxxl::migrating_stack<CritSize, ExternalStack, InternalStack> & a,
888  stxxl::migrating_stack<CritSize, ExternalStack, InternalStack> & b)
889  {
890  a.swap(b);
891  }
892 }
893 
894 #endif // !STXXL_STACK_HEADER
895 // vim: et:ts=4:sw=4
grow_shrink_stack2(pool_type &pool_, unsigned_type prefetch_aggressiveness=0)
Constructs stack.
Definition: stack.h:486
Block containing elements of fixed length.
Definition: typed_block.h:223
bool external() const
Returns true if current implementation is external, otherwise false.
Definition: stack.h:725
Efficient implementation that uses prefetching and overlapping using internal buffers.
Definition: stack.h:260
number of elements in block
Definition: typed_block.h:240
Implements dynamically resizable prefetching pool.
Definition: prefetch_pool.h:34
Implements dynamically resizable buffered writing pool.
Definition: write_pool.h:36
Implements dynamically resizable buffered writing and prefetched reading pool.
Definition: read_write_pool.h:28
bool hint(bid_type bid)
Gives a hint for prefetching a block.
Definition: read_write_pool.h:135
A stack that migrates from internal memory to external when its size exceeds a certain threshold...
Definition: stack.h:684
External stack container.
Definition: stack.h:59
void wait_all(request_iterator_ reqs_begin, request_iterator_ reqs_end)
Collection of functions to track statuses of a number of requests.
Definition: request_operations.h:36
unsigned_type get_prefetch_aggr() const
Returns number of blocks used for prefetching.
Definition: stack.h:661
request_ptr write(block_type *&block, bid_type bid)
Passes a block to the pool for writing.
Definition: read_write_pool.h:102
grow_shrink_stack(const stack_type &stack_)
Construction from a stack.
Definition: stack.h:317
void set_prefetch_aggr(unsigned_type new_p)
Sets level of prefetch aggressiveness (number of blocks from the prefetch pool used for prefetching) ...
Definition: stack.h:644
Efficient implementation that uses prefetching and overlapping using (shared) buffers pools...
Definition: stack.h:455
virtual void wait(bool measure_time=true)=0
Suspends calling thread until completion of the request.
_STXXL_DEPRECATED(grow_shrink_stack2(prefetch_pool< block_type > &p_pool_, write_pool< block_type > &w_pool_, unsigned_type prefetch_aggressiveness=0))
Constructs stack.
Definition: stack.h:503
request_ptr read(block_type *&block, bid_type bid)
Reads block. If this block is cached block is not read but passed from the cache. ...
Definition: read_write_pool.h:151
Stack type generator.
Definition: stack.h:843
normal_stack(const stack_type &stack_)
Construction from a stack.
Definition: stack.h:113
IF template metaprogramming statement.
Definition: tmeta.h:31
block_type * steal()
Take out a block from the pool.
Definition: read_write_pool.h:116
Block identifier class.
Definition: bid.h:40