STXXL  1.4-dev
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
block_scheduler.h
Go to the documentation of this file.
1 /***************************************************************************
2  * include/stxxl/bits/mng/block_scheduler.h
3  *
4  * Part of the STXXL. See http://stxxl.sourceforge.net
5  *
6  * Copyright (C) 2010-2011 Raoul Steffen <[email protected]>
7  *
8  * Distributed under the Boost Software License, Version 1.0.
9  * (See accompanying file LICENSE_1_0.txt or copy at
10  * http://www.boost.org/LICENSE_1_0.txt)
11  **************************************************************************/
12 
13 #ifndef STXXL_MNG_BLOCK_SCHEDULER_HEADER
14 #define STXXL_MNG_BLOCK_SCHEDULER_HEADER
15 
16 #include <stack>
17 #include <queue>
18 #include <limits>
19 
23 
25 
26 //! Virtualization of a block of data.
27 //! Holds information for allocating and swapping. To use in cooperation with block_scheduler.
28 //!
29 //! A swappable_block can be uninitialized, i.e. it holds no data.
30 //! When access is required, is has to be acquired first, and released afterwards, so it can be swapped in and out as required.
31 //! If the stored data is no longer needed, it can get uninitialized, freeing both internal and external memory.
32 //! \tparam ValueType type of contained objects (POD with no references to internal memory).
33 //! \tparam BlockSize Number of objects in one block.
34 //! BlockSize*sizeof(ValueType) must be divisible by 4096.
35 template <typename ValueType, unsigned BlockSize>
37 {
38 protected:
39  static const unsigned_type raw_block_size = BlockSize * sizeof(ValueType);
40 
41 public:
44 
45 protected:
46  external_block_type external_data; //!external_data.valid if no associated space on disk
47  internal_block_type* internal_data; //NULL if there is no internal memory reserved
48  bool dirty;
50 
52 
54  { block_manager::get_instance()->new_block(striping(), external_data, ++disk_allocation_offset); }
55 
57  {
58  block_manager::get_instance()->delete_block(external_data);
59  external_data = external_block_type(); // make invalid
60  }
61 
62 public:
63  //! Create in uninitialized state.
65  : external_data() /*!valid*/, internal_data(0), dirty(false), reference_count(0) { }
66 
68 
69  //! If it has an internal_block. The internal_block implicitly holds valid data.
70  bool is_internal() const
71  { return (internal_data != NULL); }
72 
73  //! If the external_block does not hold valid data.
74  bool is_dirty() const
75  { return dirty; }
76 
77  //! If it has an external_block.
78  bool has_external_block() const
79  { return external_data.valid(); }
80 
81  //! If it has an external_block that holds valid data.
82  bool is_external() const
83  { return has_external_block() && ! is_dirty(); }
84 
85  //! If it is acquired.
86  bool is_acquired() const
87  { return reference_count > 0; }
88 
89  //! If it holds an internal_block but does not need it.
90  bool is_evictable() const
91  { return ! is_acquired() && is_internal(); }
92 
93  //! If it has some valid data (in- or external).
94  bool is_initialized() const
95  { return is_internal() || is_external(); }
96 
97  //! Invalidate external data if true.
98  //! \return is_dirty()
99  bool make_dirty_if(const bool make_dirty)
100  {
101  assert(is_acquired());
102  return dirty = make_dirty || dirty;
103  }
104 
105  //! Acquire the block, i.e. add a reference. Has to be internal.
106  //! \return A reference to the data-block.
108  {
109  assert(is_internal());
110  ++reference_count;
111  return *internal_data;
112  }
113 
114  //! Release the block, i.e. subduct a reference. Has to be acquired.
115  void release()
116  {
117  assert(is_acquired());
118  --reference_count;
119  }
120 
121  //! Get a reference to the data-block. Has to be acquired.
123  {
124  assert(is_acquired());
125  return *internal_data;
126  }
127 
128  //! Get a reference to the data-block. Has to be acquired.
130  {
131  assert(is_acquired());
132  return *internal_data;
133  }
134 
135  //! Fill block with default data, is supposed to be overwritten by subclass. Has to be internal.
136  void fill_default() { }
137 
138  //! Read asyncronusly from external_block to internal_block. Has to be internal and have an external_block.
139  //! \return A request pointer to the I/O.
141  {
142  assert(is_internal());
143  assert(has_external_block());
144  #ifdef RW_VERBOSE
145  STXXL_MSG("reading block");
146  #endif
147  dirty = false;
148  return internal_data->read(external_data, on_cmpl);
149  }
150 
151  //! Read synchronously from external_block to internal_block. Has to be internal and have an external_block.
152  void read_sync()
153  { read_async()->wait(); }
154 
155  //! Write asyncronusly from internal_block to external_block if necessary.
156  //! \return A request pointer to the I/O, an invalid request pointer if not necessary.
158  {
159  if (! is_dirty())
160  return request_ptr();
161  if (! has_external_block())
162  get_external_block();
163  #ifdef RW_VERBOSE
164  STXXL_MSG("writing block");
165  #endif
166  dirty = false;
167  return internal_data->write(external_data, on_cmpl);
168  }
169 
170  //! Write synchronously from internal_block to external_block if necessary.
171  void clean_sync()
172  {
173  request_ptr rp = clean_async();
174  if (rp.valid())
175  rp->wait();
176  }
177 
178  //! Attach an internal_block, making the block internal. Has to be not internal.
180  {
181  assert(! is_internal());
182  internal_data = iblock;
183  }
184 
185  //! Detach the internal_block. Writes to external_block if necessary. Has to be evictable.
186  //! \return A pointer to the internal_block.
188  {
189  assert(is_evictable());
190  clean_sync();
191  internal_block_type* iblock = internal_data;
192  internal_data = 0;
193  return iblock;
194  }
195 
196  //! Bring the block in uninitialized state, freeing external and internal memory.
197  //! Returns a pointer to the internal_block, NULL if it had none.
198  //! \return A pointer to the freed internal_block, NULL if it had none.
200  {
201  assert(! is_acquired());
202  dirty = false;
203  // free external_block (so that it becomes invalid and the disk-space can be used again)
204  if (has_external_block())
205  free_external_block();
206  // free internal_block
207  internal_block_type* iblock = internal_data;
208  internal_data = 0;
209  return iblock;
210  }
211 
212  //! Set the external_block that holds the swappable_block's data. The block gets initialized with it.
213  //! \param eblock The external_block holding initial data.
215  {
216  assert(! is_initialized());
217  external_data = eblock;
218  }
219 
220  //! Extract the swappable_blocks data in an external_block. The block gets uninitialized.
221  //! \return The external_block that holds the swappable_block's data.
223  {
224  assert(! is_internal());
225  external_block_type eblock = external_data;
226  external_data = external_block_type();
227  return eblock;
228  }
229 };
230 
231 template <typename ValueType, unsigned BlockSize>
232 unsigned_type swappable_block<ValueType, BlockSize>::disk_allocation_offset = 0;
233 
234 template <class SwappableBlockType>
236 
237 template <class SwappableBlockType>
239 
240 //! Schedules swapping of blocks and provides blocks for temporary storage.
241 //!
242 //! In simple mode, it tries to save I/Os through caching only.
243 //! In simulation mode, it records access patterns into a prediction sequence.
244 //! The prediction sequence can then be used for prefetching in the (offline) execute mode.
245 //! This will only work for algorithms with deterministic, data oblivious access patterns.
246 //! In simulation mode, no I/O is performed; the data provided is accessible but undefined.
247 //! In execute mode, it does caching, prefetching, and possibly other optimizations.
248 //! \tparam SwappableBlockType Type of swappable_blocks to manage. Can be some specialized subclass.
249 template <class SwappableBlockType>
251 {
252 protected:
253  // tuning-parameter: To acquire blocks, internal memory has to be allocated.
254  // This constant limits the number of internal_blocks allocated at once.
256 
258 
259 public:
260  typedef typename SwappableBlockType::internal_block_type internal_block_type;
261  typedef typename SwappableBlockType::external_block_type external_block_type;
262  typedef typename std::vector<SwappableBlockType>::size_type swappable_block_identifier_type;
263 
264  /*/! Mode the block scheduler currently works in
265  enum mode
266  {
267  online, //serve requests immediately, without any prediction, LRU caching
268  simulation, //record prediction sequence only, do not serve requests, (returned blocks must not be accessed)
269  offline_lfd, //serve requests based on prediction sequence, using longest-forward-distance caching
270  offline_lfd_prefetch //serve requests based on prediction sequence, using longest-forward-distance caching, and prefetching
271  };*/
272 
273 public:
274  // -------- prediction_sequence -------
276  {
283  op_extract_external_block
284  };
285 
287  {
291 
294  : op(op), id(id), time(time) { }
295  };
296 
297  typedef std::list<prediction_sequence_element> prediction_sequence_type;
298  // ---- end prediction_sequence -------
299 
300 protected:
301  template <class SBT>
303 
306  //! Stores pointers to arrays of internal_blocks. Used to deallocate them only.
307  std::stack<internal_block_type*> internal_blocks_blocks;
308  //! holds free internal_blocks with attributes reset.
309  std::stack<internal_block_type*> free_internal_blocks;
310  //! temporary blocks that will not be needed after algorithm termination.
311  mutable std::vector<SwappableBlockType> swappable_blocks;
312  //! holds indices of free swappable_blocks with attributes reset.
313  std::priority_queue<swappable_block_identifier_type, std::vector<swappable_block_identifier_type>,
314  std::greater<swappable_block_identifier_type> > free_swappable_blocks;
317 
318  //! Get an internal_block from the freelist or a newly allocated one if available.
319  //! \return Pointer to the internal_block. NULL if none available.
321  {
322  if (! free_internal_blocks.empty())
323  {
324  // => there are internal_blocks in the free-list
325  internal_block_type* iblock = free_internal_blocks.top();
326  free_internal_blocks.pop();
327  return iblock;
328  }
329  else if (remaining_internal_blocks > 0)
330  {
331  // => more internal_blocks can be allocated
332  int_type num_blocks = std::min(max_internal_blocks_alloc_at_once, remaining_internal_blocks);
333  remaining_internal_blocks -= num_blocks;
334  internal_block_type* iblocks = new internal_block_type[num_blocks];
335  internal_blocks_blocks.push(iblocks);
336  for (int_type i = num_blocks - 1; i > 0; --i)
337  free_internal_blocks.push(iblocks + i);
338  return iblocks;
339  }
340  else
341  {
342  // => no internal_block available
343  return 0;
344  }
345  }
346 
347  //! Return an internal_block to the freelist.
349  { free_internal_blocks.push(iblock); }
350 
351 public:
352  //! Create a block_scheduler with empty prediction sequence in simple mode.
353  //! \param max_internal_memory Amount of internal memory (in bytes) the scheduler is allowed to use for acquiring, prefetching and caching.
354  explicit block_scheduler(const int_type max_internal_memory)
355  : max_internal_blocks(div_ceil(max_internal_memory, sizeof(internal_block_type))),
356  remaining_internal_blocks(max_internal_blocks),
357  bm(block_manager::get_instance()),
358  algo(0)
359  {
361  }
362 
364  {
365  delete algo;
366  int_type num_freed_internal_blocks = 0;
367  if (free_swappable_blocks.size() != swappable_blocks.size())
368  {
369  // => not all swappable_blocks are free, at least deinitialize them
370  STXXL_ERRMSG("not all swappable_blocks are free, those not acquired will be deinitialized");
371  // evictable_blocks would suffice
372  for (typename std::vector<SwappableBlockType>::iterator it = swappable_blocks.begin();
373  it != swappable_blocks.end(); ++it)
374  {
375  if (! it->is_acquired() && it->deinitialize())
376  // count internal_blocks that get freed
377  num_freed_internal_blocks++;
378  }
379  }
380  if (int_type nlost = (max_internal_blocks - remaining_internal_blocks)
381  - (free_internal_blocks.size() + num_freed_internal_blocks)) {
382  STXXL_ERRMSG(nlost << " internal_blocks are lost. They will get deallocated.");
383  }
384  while (! internal_blocks_blocks.empty())
385  {
386  delete[] internal_blocks_blocks.top();
387  internal_blocks_blocks.pop();
388  }
389  }
390 
391  //! Acquire the given block.
392  //! Has to be in pairs with release. Pairs may be nested and interleaved.
393  //! \return Reference to the block's data.
394  //! param sbid Swappable block to acquire.
395  internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized = false)
396  { return algo->acquire(sbid, uninitialized); }
397 
398  //! Release the given block.
399  //! Has to be in pairs with acquire. Pairs may be nested and interleaved.
400  //! \param sbid Swappable block to release.
401  //! \param dirty If the data has been changed, invalidating possible data in external storage.
402  void release(const swappable_block_identifier_type sbid, const bool dirty)
403  { algo->release(sbid, dirty); }
404 
405  //! Drop all data in the given block, freeing in- and external memory.
407  { algo->deinitialize(sbid); }
408 
409  //! Initialize the swappable_block with the given external_block.
410  //!
411  //! It will use the the external_block for swapping and take care about
412  //! it's deallocation. Has to be uninitialized.
413  //! \param sbid identifier to the swappable_block
414  //! \param eblock external_block a.k.a. bid
416  { algo->initialize(sbid, eblock); }
417 
418  //! Deinitialize the swappable_block and return it's contents in an external_block.
419  //!
420  //! \param sbid identifier to the swappable_block
421  //! \return external_block a.k.a. bid
423  { return algo->extract_external_block(sbid); }
424 
425  //! check if the swappable_block is initialized.
426  //! \param sbid identifier to the swappable_block
427  //! \return if the swappable_block is initialized
429  { return algo->is_initialized(sbid); }
430 
431  //! Record a timestep in the prediction sequence to seperate consecutive
432  //! acquire rsp. release-operations. Has an effect only in simulation mode.
434  { algo->explicit_timestep(); }
435 
436  //! Get a const reference to given block's data. Block has to be already acquired.
437  //! \param sbid Swappable block to access.
439  { return swappable_blocks[sbid].get_internal_block(); }
440 
441  //! Allocate an uninitialized swappable_block.
442  //! \return An identifier of the block.
444  {
446  if (free_swappable_blocks.empty())
447  {
448  // create new swappable_block
449  sbid = swappable_blocks.size();
450  swappable_blocks.resize(sbid + 1);
451  algo->swappable_blocks_resize(sbid + 1);
452  }
453  else
454  {
455  // take swappable_block from freelist
456  sbid = free_swappable_blocks.top();
457  free_swappable_blocks.pop();
458  }
459  return sbid;
460  }
461 
462  //! Free given no longer used temporary swappable_block.
463  //! \param sbid Temporary swappable_block to free.
465  {
466  deinitialize(sbid);
467  free_swappable_blocks.push(sbid);
468  }
469 
470  //! Returns if simulation mode is on, i.e. if a prediction sequence is being recorded.
471  //! \return If simulation mode is on.
472  bool is_simulating() const
473  { return algo->is_simulating(); }
474 
475  //! Switch the used algorithm, e.g. to simulation etc..
476  //! \param new_algo Pointer to the new algorithm object. Has to be instantiated to the block scheduler (or the old algorithm object).
477  //! \return Pointer to the old algorithm object.
479  {
480  assert(&new_algo->bs == this);
482  algo = new_algo;
483  return old_algo;
484  }
485 
486  //! Return the current algorithm.
488  {
489  return algo;
490  }
491 
492  //! Get the prediction_sequence.
493  //! \return reference to the prediction_sequence
495  { return algo->get_prediction_sequence(); }
496 
497  void flush()
498  {
499  std::vector<request_ptr> requests;
500  while (! algo->evictable_blocks_empty())
501  {
502  swappable_block_identifier_type sbid = algo->evictable_blocks_pop();
503  request_ptr rq = swappable_blocks[sbid].clean_async();
504  if (rq.valid())
505  requests.push_back(rq);
506  return_free_internal_block(swappable_blocks[sbid].detach_internal_block());
507  }
508  for (typename std::vector<request_ptr>::reverse_iterator it = requests.rbegin();
509  it != requests.rend(); ++it)
510  {
511  (*it)->wait();
512  }
513  }
514 };
515 
516 template <class SwappableBlockType>
517 const int_type block_scheduler<SwappableBlockType>::max_internal_blocks_alloc_at_once = 128;
518 
519 //! Interface of a block scheduling algorithm.
520 template <class SwappableBlockType>
521 class block_scheduler_algorithm : private noncopyable
522 {
523 protected:
530 
531 public:
533 
534 protected:
535  std::vector<SwappableBlockType>& swappable_blocks;
537 
539  { return bs.algo; }
540 
541  //! Get an internal_block from the block_scheduler.
542  //! \return Pointer to the internal_block. NULL if none available.
544  { return bs.get_free_internal_block(); }
545 
546  //! Return an internal_block to the block_scheduler.
548  { bs.return_free_internal_block(iblock); }
549 
550 public:
552  : bs(bs),
553  swappable_blocks(bs.swappable_blocks)
554  { }
555 
557  : bs(old->bs),
558  swappable_blocks(bs.swappable_blocks)
559  { }
560 
562 
563  virtual bool evictable_blocks_empty() = 0;
564  virtual swappable_block_identifier_type evictable_blocks_pop() = 0;
566 
567  virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized = false) = 0;
568  virtual void release(swappable_block_identifier_type sbid, const bool dirty) = 0;
569  virtual void deinitialize(swappable_block_identifier_type sbid) = 0;
570  virtual void initialize(swappable_block_identifier_type sbid, external_block_type eblock) = 0;
571  virtual external_block_type extract_external_block(swappable_block_identifier_type sbid) = 0;
572 
573  virtual bool is_initialized(const swappable_block_identifier_type sbid) const
574  { return swappable_blocks[sbid].is_initialized(); }
575 
576  virtual void explicit_timestep() { }
577  virtual bool is_simulating() const
578  { return false; }
580  { return prediction_sequence; }
581 };
582 
583 //! Block scheduling algorithm caching via the least recently used policy (online).
584 template <class SwappableBlockType>
585 class block_scheduler_algorithm_online_lru : public block_scheduler_algorithm<SwappableBlockType>
586 {
587 protected:
593 
594  using block_scheduler_algorithm_type::bs;
595  using block_scheduler_algorithm_type::swappable_blocks;
596  using block_scheduler_algorithm_type::get_algorithm_from_block_scheduler;
597  using block_scheduler_algorithm_type::get_free_internal_block_from_block_scheduler;
598  using block_scheduler_algorithm_type::return_free_internal_block_to_block_scheduler;
599 
600  //! Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired.
602 
604  {
605  // try to get a free internal_block
606  if (internal_block_type* iblock = get_free_internal_block_from_block_scheduler())
607  return iblock;
608  // evict block
609  assert(! evictable_blocks.empty()); // fails it there is not enough memory available
610  return swappable_blocks[evictable_blocks.pop()].detach_internal_block();
611  }
612 
614  { return_free_internal_block_to_block_scheduler(iblock); }
615 
616  void init()
617  {
618  if (get_algorithm_from_block_scheduler())
619  while (! get_algorithm_from_block_scheduler()->evictable_blocks_empty())
620  evictable_blocks.insert(get_algorithm_from_block_scheduler()->evictable_blocks_pop());
621  }
622 
623 public:
626  { init(); }
627 
630  { init(); }
631 
633  {
634  if (! evictable_blocks.empty())
635  STXXL_ERRMSG("Destructing block_scheduler_algorithm_online that still holds evictable blocks. They get deinitialized.");
636  while (! evictable_blocks.empty())
637  {
638  SwappableBlockType& sblock = swappable_blocks[evictable_blocks.pop()];
639  if (internal_block_type* iblock = sblock.deinitialize())
640  return_free_internal_block(iblock);
641  }
642  }
643 
644  virtual bool evictable_blocks_empty()
645  { return evictable_blocks.empty(); }
646 
648  { return evictable_blocks.pop(); }
649 
650  virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized = false)
651  {
652  SwappableBlockType& sblock = swappable_blocks[sbid];
653  /* acquired => internal -> increase reference count
654  internal but not acquired -> remove from evictable_blocks, increase reference count
655  not internal => uninitialized or external -> get internal_block, increase reference count
656  uninitialized -> fill with default value
657  external -> read */
658  if (sblock.is_internal())
659  {
660  if (! sblock.is_acquired())
661  // not acquired yet -> remove from evictable_blocks
662  evictable_blocks.erase(sbid);
663  sblock.acquire();
664  }
665  else if (sblock.is_initialized())
666  {
667  // => external but not internal
668  //get internal_block
669  sblock.attach_internal_block(get_free_internal_block());
670  if (! uninitialized)
671  //load block synchronously
672  sblock.read_sync();
673  sblock.acquire();
674  }
675  else
676  {
677  // => ! sblock.is_initialized()
678  //get internal_block
679  sblock.attach_internal_block(get_free_internal_block());
680  sblock.acquire();
681  //initialize new block
682  if (! uninitialized)
683  sblock.fill_default();
684  }
685  return sblock.get_internal_block();
686  }
687 
688  virtual void release(swappable_block_identifier_type sbid, const bool dirty)
689  {
690  SwappableBlockType& sblock = swappable_blocks[sbid];
691  sblock.make_dirty_if(dirty);
692  sblock.release();
693  if (! sblock.is_acquired())
694  {
695  if (sblock.is_dirty() || sblock.is_external())
696  // => evictable, put in pq
697  evictable_blocks.insert(sbid);
698  else
699  // => uninitialized, release internal block and put it in freelist
700  return_free_internal_block(sblock.detach_internal_block());
701  }
702  }
703 
705  {
706  SwappableBlockType& sblock = swappable_blocks[sbid];
707  if (sblock.is_evictable())
708  evictable_blocks.erase(sbid);
709  if (internal_block_type* iblock = sblock.deinitialize())
710  return_free_internal_block(iblock);
711  }
712 
714  {
715  SwappableBlockType& sblock = swappable_blocks[sbid];
716  sblock.initialize(eblock);
717  }
718 
720  {
721  SwappableBlockType& sblock = swappable_blocks[sbid];
722  if (sblock.is_evictable())
723  evictable_blocks.erase(sbid);
724  if (sblock.is_internal())
725  return_free_internal_block(sblock.detach_internal_block());
726  return sblock.extract_external_block();
727  }
728 };
729 
730 //! Pseudo block scheduling algorithm only recording the request sequence.
731 template <class SwappableBlockType>
733 {
734 protected:
742 
743  using block_scheduler_algorithm_type::bs;
744  using block_scheduler_algorithm_type::prediction_sequence;
745  using block_scheduler_algorithm_type::swappable_blocks;
746  using block_scheduler_algorithm_type::get_algorithm_from_block_scheduler;
747  using block_scheduler_algorithm_type::get_free_internal_block_from_block_scheduler;
748  using block_scheduler_algorithm_type::return_free_internal_block_to_block_scheduler;
749 
750  //! Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired.
751  std::stack<swappable_block_identifier_type> evictable_blocks;
754  std::vector<int_type> reference_counts;
756 
758  { return_free_internal_block_to_block_scheduler(iblock); }
759 
760  void init()
761  {
762  if (get_algorithm_from_block_scheduler())
763  while (! get_algorithm_from_block_scheduler()->evictable_blocks_empty())
764  evictable_blocks.push(get_algorithm_from_block_scheduler()->evictable_blocks_pop());
765  for (swappable_block_identifier_type i = 0; i < reference_counts.size(); ++i)
766  reference_counts[i] = swappable_blocks[i].is_initialized();
767  }
768 
769 public:
772  time_count(0),
773  last_op_release(false),
774  reference_counts(swappable_blocks.size())
775  { init(); }
776 
779  time_count(0),
780  last_op_release(false),
781  reference_counts(swappable_blocks.size())
782  { init(); }
783 
785  {
786  if (! evictable_blocks.empty())
787  STXXL_ERRMSG("Destructing block_scheduler_algorithm_record_prediction_sequence that still holds evictable blocks. They get deinitialized.");
788  while (! evictable_blocks.empty())
789  {
790  SwappableBlockType& sblock = swappable_blocks[evictable_blocks.top()];
791  if (internal_block_type* iblock = sblock.deinitialize())
792  return_free_internal_block(iblock);
793  evictable_blocks.pop();
794  }
795  }
796 
797  virtual bool evictable_blocks_empty()
798  { return evictable_blocks.empty(); }
799 
801  {
802  swappable_block_identifier_type sbid = evictable_blocks.top();
803  evictable_blocks.pop();
804  return sbid;
805  }
806 
807  virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized = false)
808  {
809  ++reference_counts[sbid];
810  last_op_release = false;
811  if (uninitialized)
812  prediction_sequence.push_back(
813  prediction_sequence_element_type(block_scheduler_type::op_acquire_uninitialized, sbid, time_count)
814  );
815  else
816  prediction_sequence.push_back(
817  prediction_sequence_element_type(block_scheduler_type::op_acquire, sbid, time_count)
818  );
819  return dummy_block;
820  }
821 
822  virtual void release(swappable_block_identifier_type sbid, const bool dirty)
823  {
824  --reference_counts[sbid] += dirty;
825  time_count += ! last_op_release;
826  last_op_release = true;
827  if (dirty)
828  prediction_sequence.push_back(
829  prediction_sequence_element_type(block_scheduler_type::op_release_dirty, sbid, time_count)
830  );
831  else
832  prediction_sequence.push_back(
833  prediction_sequence_element_type(block_scheduler_type::op_release, sbid, time_count)
834  );
835  }
836 
838  {
839  reference_counts[sbid] = false;
840  prediction_sequence.push_back(
841  prediction_sequence_element_type(block_scheduler_type::op_deinitialize, sbid, time_count)
842  );
843  }
844 
846  {
847  reference_counts[sbid] = true;
848  prediction_sequence.push_back(
849  prediction_sequence_element_type(block_scheduler_type::op_initialize, sbid, time_count)
850  );
851  }
852 
854  {
855  reference_counts[sbid] = false;
856  prediction_sequence.push_back(
857  prediction_sequence_element_type(block_scheduler_type::op_extract_external_block, sbid, time_count)
858  );
859  return external_block_type();
860  }
861 
863  {
864  reference_counts.resize(size, 0);
865  }
866 
867  virtual bool is_initialized(const swappable_block_identifier_type sbid) const
868  {
869  return reference_counts[sbid] > 0;
870  }
871 
872  virtual void explicit_timestep()
873  { ++time_count; }
874 
875  virtual bool is_simulating() const
876  { return true; }
877 };
878 
879 //! Block scheduling algorithm caching via the longest forward distance policy (offline).
880 template <class SwappableBlockType>
882 {
883 protected:
891 
892  using block_scheduler_algorithm_type::bs;
893  using block_scheduler_algorithm_type::swappable_blocks;
894  using block_scheduler_algorithm_type::get_algorithm_from_block_scheduler;
895  using block_scheduler_algorithm_type::get_free_internal_block_from_block_scheduler;
896  using block_scheduler_algorithm_type::return_free_internal_block_to_block_scheduler;
897 
898  class priority
899  {
901 
902  public:
903  priority(const SwappableBlockType& sblock, const std::pair<bool, time_type>& t)
904  {
905  // p larger => evict earlier
906  if (t.first)
907  {
908  // most significant: next use
909  p = unsigned_type(t.second) << 2;
910  // less significant: not dirty
911  p |= unsigned_type(! sblock.is_dirty()) << 1;
912  // less significant: has external_block
913  p |= unsigned_type(sblock.has_external_block()) << 0;
914  }
915  else
916  {
917  // most significant: next use
919  // less significant: next operation: extract > accessed no more > deinitialize
920  p |= unsigned_type(t.second) << 0;
921  }
922  }
923 
924  // less => evict earlier
925  bool operator < (const priority& right) const
926  { return p > right.p; }
927  };
928 
929  //! Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired.
931  /*!
932  * Stores for the sequence of releases extracted from the prediction_sequence:
933  * (true, timestamp of the blocks next acquire) if it is acquired next
934  * (false, 0) if it is deinitialized next
935  * (false, 1) if it is not accessed any more
936  * (false, 2) if it is extracted next
937  * (false, 3) if it is initialized next
938  */
939  std::deque<std::pair<bool, time_type> > next_use;
940 
942  {
943  // try to get a free internal_block
944  if (internal_block_type* iblock = get_free_internal_block_from_block_scheduler())
945  return iblock;
946  // evict block
947  assert(! evictable_blocks.empty()); // fails it there is not enough memory available
948  return swappable_blocks[evictable_blocks.pop()].detach_internal_block();
949  }
950 
952  { return_free_internal_block_to_block_scheduler(iblock); }
953 
955  {
956  std::vector<std::pair<bool, time_type> >
957  blocks_next_acquire(swappable_blocks.size(), std::make_pair(false, 1));
958  if (old_algo)
959  {
960  // precomputations for priorities: init next_acquires
961  const prediction_sequence_type& ps = old_algo->get_prediction_sequence();
962  for (typename prediction_sequence_type::const_reverse_iterator it = ps.rbegin(); it != ps.rend(); ++it)
963  {
964  switch (it->op)
965  {
966  case (block_scheduler_type::op_acquire):
967  case (block_scheduler_type::op_acquire_uninitialized):
968  blocks_next_acquire[it->id] = std::make_pair(true, it->time);
969  break;
970  case (block_scheduler_type::op_release):
971  case (block_scheduler_type::op_release_dirty):
972  next_use.push_front(blocks_next_acquire[it->id]);
973  break;
974  case (block_scheduler_type::op_deinitialize):
975  blocks_next_acquire[it->id] = std::make_pair(false, 0);
976  break;
977  case (block_scheduler_type::op_initialize):
978  blocks_next_acquire[it->id] = std::make_pair(false, 3);
979  break;
980  case (block_scheduler_type::op_extract_external_block):
981  blocks_next_acquire[it->id] = std::make_pair(false, 2);
982  break;
983  }
984  }
985  }
986  if (get_algorithm_from_block_scheduler())
987  {
988  while (! get_algorithm_from_block_scheduler()->evictable_blocks_empty())
989  {
990  // insert already evictable blocks with the right priority
991  const swappable_block_identifier_type sbid = get_algorithm_from_block_scheduler()->evictable_blocks_pop();
992  evictable_blocks.insert(sbid, priority(swappable_blocks[sbid], blocks_next_acquire[sbid]));
993  }
994  }
995  }
996 
997 public:
1000  { init(get_algorithm_from_block_scheduler()); }
1001 
1002  // It is possible to keep an old simulation-algorithm object and reuse it's prediction sequence
1005  { init(old); }
1006 
1008  {
1009  if (! evictable_blocks.empty())
1010  STXXL_ERRMSG("Destructing block_scheduler_algorithm_offline_lfd that still holds evictable blocks. They get deinitialized.");
1011  while (! evictable_blocks.empty())
1012  {
1013  SwappableBlockType& sblock = swappable_blocks[evictable_blocks.pop()];
1014  if (internal_block_type* iblock = sblock.deinitialize())
1015  return_free_internal_block(iblock);
1016  }
1017  }
1018 
1019  virtual bool evictable_blocks_empty()
1020  { return evictable_blocks.empty(); }
1021 
1023  { return evictable_blocks.pop(); }
1024 
1025  virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized = false)
1026  {
1027  SwappableBlockType& sblock = swappable_blocks[sbid];
1028  /* acquired => internal -> increase reference count
1029  internal but not acquired -> remove from evictable_blocks, increase reference count
1030  not intern => uninitialized or external -> get internal_block, increase reference count
1031  uninitialized -> fill with default value
1032  external -> read */
1033  if (sblock.is_internal())
1034  {
1035  if (! sblock.is_acquired())
1036  // not acquired yet -> remove from evictable_blocks
1037  evictable_blocks.erase(sbid);
1038  sblock.acquire();
1039  }
1040  else if (sblock.is_initialized())
1041  {
1042  // => external but not internal
1043  //get internal_block
1044  sblock.attach_internal_block(get_free_internal_block());
1045  if (! uninitialized)
1046  //load block synchronously
1047  sblock.read_sync();
1048  sblock.acquire();
1049  }
1050  else
1051  {
1052  // => ! sblock.is_initialized()
1053  //get internal_block
1054  sblock.attach_internal_block(get_free_internal_block());
1055  sblock.acquire();
1056  //initialize new block
1057  if (! uninitialized)
1058  sblock.fill_default();
1059  }
1060  return sblock.get_internal_block();
1061  }
1062 
1063  virtual void release(swappable_block_identifier_type sbid, const bool dirty)
1064  {
1065  if (next_use.empty())
1066  {
1067  STXXL_ERRMSG("block_scheduler_algorithm_offline_lfd got release-request but prediction sequence ended. Switching to block_scheduler_algorithm_online.");
1068  // switch algorithm
1069  block_scheduler_algorithm_type* new_algo, * old_algo;
1071  old_algo = bs.switch_algorithm_to(new_algo);
1072  // redirect call
1073  new_algo->release(sbid, dirty);
1074  // delete self
1075  delete old_algo;
1076  return;
1077  }
1078  SwappableBlockType& sblock = swappable_blocks[sbid];
1079  sblock.make_dirty_if(dirty);
1080  sblock.release();
1081  if (! sblock.is_acquired())
1082  {
1083  if (sblock.is_dirty() || sblock.is_external())
1084  // => evictable, put in pq
1085  evictable_blocks.insert(sbid, priority(swappable_blocks[sbid], next_use.front()));
1086  else
1087  // => uninitialized, release internal block and put it in freelist
1088  return_free_internal_block(sblock.detach_internal_block());
1089  }
1090  next_use.pop_front();
1091  }
1092 
1094  {
1095  SwappableBlockType& sblock = swappable_blocks[sbid];
1096  if (sblock.is_evictable())
1097  evictable_blocks.erase(sbid);
1098  if (internal_block_type* iblock = sblock.deinitialize())
1099  return_free_internal_block(iblock);
1100  }
1101 
1103  {
1104  SwappableBlockType& sblock = swappable_blocks[sbid];
1105  sblock.initialize(eblock);
1106  }
1107 
1109  {
1110  SwappableBlockType& sblock = swappable_blocks[sbid];
1111  if (sblock.is_evictable())
1112  evictable_blocks.erase(sbid);
1113  if (sblock.is_internal())
1114  return_free_internal_block(sblock.detach_internal_block());
1115  return sblock.extract_external_block();
1116  }
1117 };
1118 
1119 //! Block scheduling algorithm caching via the least recently used policy
1120 //! (offline), and prefetching in addition.
1121 template <class SwappableBlockType>
1123 {
1124 protected:
1125  struct scheduled_block_meta;
1127 
1136  typedef typename std::vector<SwappableBlockType>::iterator swappable_blocks_iterator;
1137 
1138  typedef std::map<swappable_block_identifier_type, scheduled_block_meta> scheduled_blocks_type;
1139  typedef typename scheduled_blocks_type::iterator scheduled_blocks_iterator;
1140  typedef typename scheduled_blocks_type::reference scheduled_blocks_reference;
1141  typedef std::map<swappable_block_identifier_type, write_read_request*> write_scheduled_blocks_type;
1142  typedef typename write_scheduled_blocks_type::iterator write_scheduled_blocks_iterator;
1143  typedef typename write_scheduled_blocks_type::reference write_scheduled_blocks_reference;
1144 
1145  using block_scheduler_algorithm_type::bs;
1146  using block_scheduler_algorithm_type::swappable_blocks;
1147  using block_scheduler_algorithm_type::get_algorithm_from_block_scheduler;
1148  using block_scheduler_algorithm_type::get_free_internal_block_from_block_scheduler;
1149  using block_scheduler_algorithm_type::return_free_internal_block_to_block_scheduler;
1150  using block_scheduler_algorithm_type::prediction_sequence;
1151 
1153  {
1154  bool write_done_soon; // set by read_after_write, checked by schedule_read()
1155  bool shall_read; // checked by read_after_write, set by schedule_read()
1156  swappable_blocks_iterator block_to_start_read; // used by read_after_write, set by schedule_read()
1157  scheduled_blocks_iterator taker; // read_req set by read_after_write
1158  request_ptr write_req; // completes with read_after_write
1159 
1161  : write_done_soon(false), shall_read(false), block_to_start_read(), taker(), write_req(0) { }
1162  };
1163 
1165  {
1167 
1169  : wrr(write_read_req) { }
1170 
1171  void operator () (request*)
1172  {
1173  wrr->write_done_soon = true;
1174  if (wrr->shall_read)
1175  wrr->taker->second.read_req = wrr->block_to_start_read->read_async();
1176  }
1177  };
1178 
1180  {
1182  std::pair<bool, swappable_block_identifier_type> giver;
1184  std::deque<block_scheduler_operation> operations; // invariant: not empty; front: last scheduled operation, back: upcoming operation
1185 
1187  : reserved_iblock(0),
1188  giver(false, 0),
1189  read_req(0),
1190  operations()
1191  { operations.push_front(op); }
1192  };
1193 
1194  //! Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired.
1195  std::set<swappable_block_identifier_type> free_evictable_blocks;
1196  std::set<swappable_block_identifier_type> scheduled_evictable_blocks;
1197 
1198  //! Holds not internal swappable_blocks, whose next access has already been scheduled.
1200 
1201  //! Holds swappable_blocks, whose internal block has been taken away but the clean did not finish yet.
1203  typename prediction_sequence_type::iterator next_op_to_schedule;
1204 
1205  //! Schedule an internal, possibly dirty swappable_block to write.
1206  //!
1207  //! The block becomes not dirty. if it was dirty, an entry in write_scheduled_blocks is made referencing the write_read_request.
1208  //! \param sbid block to write
1209  //! \return pointer to the write_read_request
1211  {
1212  SwappableBlockType& sblock = swappable_blocks[sbid];
1214  wrr->write_req = sblock.clean_async(read_after_write(wrr));
1215  if (wrr->write_req.valid())
1216  {
1217  bool t = write_scheduled_blocks.insert(std::make_pair(sbid, wrr)).second;
1218  STXXL_ASSERT(t);
1219  return wrr;
1220  }
1221  else
1222  {
1223  delete wrr;
1224  return 0;
1225  }
1226  }
1227 
1228  //! Try to interrupt a read scheduled in a write_read_request.
1229  //!
1230  //! side-effect: possibly erases entry from write_scheduled_blocks, so the iterator writing_block may become invalid
1231  //! \return if successful
1233  {
1234  // stop read
1235  writing_block->second->shall_read = false;
1236  // check if stopped
1237  if (! writing_block->second->write_done_soon)
1238  return true;
1239  // => possibly to late
1240  scheduled_blocks_reference taker = *writing_block->second->taker;
1241  // wait
1242  wait_on_write(writing_block);
1243  // check if read started
1244  if (taker.second.read_req.valid())
1245  // => read started, to late
1246  return false;
1247  else
1248  // => just in time
1249  return true;
1250  }
1251 
1252  //! Schedule an internal and external block to read.
1253  //!
1254  //! If the giver is still writing, schedule read via its write_read_request.
1256  {
1257  // first check if block_to_read is still writing. do not read before write finished
1258  // wait_on_write(block_to_read->first);
1259 
1260  write_scheduled_blocks_iterator it = write_scheduled_blocks.find(block_to_read->first);
1261  if (it != write_scheduled_blocks.end())
1262  {
1263  scheduled_blocks_iterator other_block_to_read = it->second->taker;
1264  // check if scheduled to read
1265  if (it->second->shall_read)
1266  {
1267  if (try_interrupt_read(it))
1268  {
1269  // => interrupted, swap internal_blocks
1270  std::swap(other_block_to_read->second.giver, block_to_read->second.giver);
1271  if (other_block_to_read->second.giver.first)
1272  {
1273  write_scheduled_blocks_iterator it = write_scheduled_blocks.find(other_block_to_read->second.giver.second);
1274  if (it != write_scheduled_blocks.end())
1275  it->second->taker = other_block_to_read;
1276  else
1277  other_block_to_read->second.giver.first = false;
1278  }
1279  if (block_to_read->second.giver.first)
1280  {
1281  write_scheduled_blocks_iterator it = write_scheduled_blocks.find(block_to_read->second.giver.second);
1282  if (it != write_scheduled_blocks.end())
1283  it->second->taker = block_to_read;
1284  else
1285  block_to_read->second.giver.first = false;
1286  }
1287 
1288  internal_block_type* tmp_iblock = swappable_blocks[block_to_read->first].detach_internal_block();
1289  swappable_blocks[block_to_read->first].attach_internal_block(
1290  swappable_blocks[other_block_to_read->first].detach_internal_block());
1291  swappable_blocks[other_block_to_read->first].attach_internal_block(tmp_iblock);
1292  // => this block has its internal_block back, no need to read
1293  // reschedule other
1294  schedule_read(other_block_to_read);
1295  return;
1296  }
1297  // else => read already started, but write done -> read this
1298  }
1299  else
1300  {
1301  // => no read scheduled, swap internal_blocks
1302  std::swap(other_block_to_read->second.giver, block_to_read->second.giver);
1303  if (other_block_to_read->second.giver.first)
1304  {
1305  write_scheduled_blocks_iterator it = write_scheduled_blocks.find(other_block_to_read->second.giver.second);
1306  if (it != write_scheduled_blocks.end())
1307  it->second->taker = other_block_to_read;
1308  else
1309  other_block_to_read->second.giver.first = false;
1310  }
1311  if (block_to_read->second.giver.first)
1312  {
1313  write_scheduled_blocks_iterator it = write_scheduled_blocks.find(block_to_read->second.giver.second);
1314  if (it != write_scheduled_blocks.end())
1315  it->second->taker = block_to_read;
1316  else
1317  block_to_read->second.giver.first = false;
1318  }
1319 
1320  internal_block_type* tmp_iblock = swappable_blocks[block_to_read->first].detach_internal_block();
1321  swappable_blocks[block_to_read->first].attach_internal_block(
1322  other_block_to_read->second.reserved_iblock);
1323  other_block_to_read->second.reserved_iblock = tmp_iblock;
1324  // => this block has its internal_block back, no need to read
1325  return;
1326  }
1327  }
1328 
1329  // schedule block_to_read to read
1330  if (block_to_read->second.giver.first)
1331  {
1332  write_scheduled_blocks_iterator writing_block = write_scheduled_blocks.find(block_to_read->second.giver.second);
1333  if (writing_block != write_scheduled_blocks.end())
1334  {
1335  // => there is a write scheduled
1336  // tell the completion handler that we want a read
1337  writing_block->second->block_to_start_read = swappable_blocks.begin() + block_to_read->first;
1338  writing_block->second->taker = block_to_read;
1339  writing_block->second->shall_read = true;
1340  // and check if it is not to late
1341  if (writing_block->second->write_done_soon)
1342  {
1343  // => the completion handler may have missed our wish to read
1344  // so wait for it to finish and check
1345  wait_on_write(writing_block);
1346  block_to_read->second.giver.first = false;
1347  if (block_to_read->second.read_req.valid())
1348  // read scheduled
1349  return;
1350  }
1351  else
1352  // read scheduled
1353  return;
1354  }
1355  else
1356  block_to_read->second.giver.first = false;
1357  }
1358  // => read could not be scheduled through the completion handler
1359  block_to_read->second.read_req = swappable_blocks[block_to_read->first].read_async();
1360  }
1361 
1362  //! Wait for the write to finish.
1363  //!
1364  //! side-effect: erases entry from write_scheduled_blocks
1366  {
1367  writing_block->second->write_req->wait();
1368  delete writing_block->second;
1369  write_scheduled_blocks.erase(writing_block);
1370  }
1371 
1372  //! Wait for the write to finish.
1373  //!
1374  //! side-effect: erases entry from write_scheduled_blocks
1376  {
1377  write_scheduled_blocks_iterator it = write_scheduled_blocks.find(writing_block);
1378  if (it != write_scheduled_blocks.end())
1379  wait_on_write(it);
1380  }
1381 
1382  //! Wait for the write of the giver to finish.
1383  //!
1384  //! side-effect: erases entry from write_scheduled_blocks
1385  void wait_on_write(const scheduled_blocks_iterator& schedule_meta)
1386  {
1387  if (schedule_meta->second.giver.first)
1388  {
1389  wait_on_write(schedule_meta->second.giver.second);
1390  schedule_meta->second.giver.first = false;
1391  }
1392  }
1393 
1394  //! Wait for the read to finish.
1395  //!
1396  //! side-effect: erases entry for the write of the giver from write_scheduled_blocks
1397  void wait_on_read(const scheduled_blocks_iterator& schedule_meta)
1398  {
1399  wait_on_write(schedule_meta);
1400  if (schedule_meta->second.read_req.valid())
1401  {
1402  schedule_meta->second.read_req->wait();
1403  schedule_meta->second.read_req = 0;
1404  }
1405  }
1406 
1407  //! Wait for the write of the giver to finish and return reserved internal_block.
1408  //!
1409  //! side-effect: erases entry for the write of the giver from write_scheduled_blocks
1411  {
1412  wait_on_write(schedule_meta);
1413  internal_block_type* r = schedule_meta->second.reserved_iblock;
1414  schedule_meta->second.reserved_iblock = 0;
1415  return r;
1416  }
1417 
1418  bool shall_keep_internal_block(const scheduled_blocks_iterator& schedule_meta, const bool ignore_first = true) const
1419  {
1420  // returns true iif there is an acquire or acquire_uninitialized scheduled or there is a deinitialize scheduled and the block is dirty
1421  for (typename std::deque<block_scheduler_operation>::reverse_iterator
1422  rit = schedule_meta->second.operations.rbegin() + ignore_first;
1423  rit != schedule_meta->second.operations.rend(); ++rit)
1424  {
1425  switch (*rit)
1426  {
1427  case block_scheduler_type::op_acquire:
1428  case block_scheduler_type::op_acquire_uninitialized:
1429  return true;
1430  break;
1431  case block_scheduler_type::op_release:
1432  case block_scheduler_type::op_release_dirty:
1433  break;
1434  case block_scheduler_type::op_deinitialize:
1435  if (swappable_blocks[schedule_meta->first].is_dirty()) return true; break;
1436  case block_scheduler_type::op_initialize:
1437  case block_scheduler_type::op_extract_external_block:
1438  break;
1439  }
1440  }
1441  return false;
1442  }
1443 
1444  // assumes the current operation to be still in operations
1445  bool shall_be_cleaned(const scheduled_blocks_iterator& schedule_meta) const
1446  {
1447  // returns true iif there is an extract_external_block scheduled and no release_dirty, deinitialize or initialize before
1448  for (typename std::deque<block_scheduler_operation>::reverse_iterator
1449  rit = schedule_meta->second.operations.rbegin() + 1;
1450  rit != schedule_meta->second.operations.rend(); ++rit)
1451  {
1452  switch (*rit)
1453  {
1454  case block_scheduler_type::op_acquire:
1455  case block_scheduler_type::op_acquire_uninitialized:
1456  case block_scheduler_type::op_release:
1457  break;
1458  case block_scheduler_type::op_release_dirty:
1459  case block_scheduler_type::op_deinitialize:
1460  case block_scheduler_type::op_initialize:
1461  return false;
1462  break;
1463  case block_scheduler_type::op_extract_external_block:
1464  return true;
1465  break;
1466  }
1467  }
1468  return false;
1469  }
1470 
1471  bool shall_be_read(const scheduled_blocks_iterator& schedule_meta, const bool ignore_first = true) const
1472  {
1473  // returns true iif there is an acquire scheduled next and the block is initialized
1474  return swappable_blocks[schedule_meta->first].is_initialized()
1475  && schedule_meta->second.operations.rbegin() + ignore_first != schedule_meta->second.operations.rend()
1476  && *(schedule_meta->second.operations.rbegin() + ignore_first) == block_scheduler_type::op_acquire;
1477  }
1478 
1480  {
1481  schedule_meta->second.operations.pop_back();
1482  if (schedule_meta->second.operations.empty())
1483  {
1484  assert(! schedule_meta->second.giver.first);
1485  scheduled_blocks.erase(schedule_meta);
1486  }
1487  }
1488 
1489  block_scheduler_algorithm_type * give_up(std::string err_msg = "detected some error in the prediction sequence")
1490  {
1491  STXXL_ERRMSG("block_scheduler_algorithm_offline_lru_prefetching: " << err_msg << ". Switching to block_scheduler_algorithm_online.");
1492  // switch algorithm
1495  // and delete self
1496  delete bs.switch_algorithm_to(new_algo);
1497  return new_algo;
1498  }
1499 
1501  { return_free_internal_block_to_block_scheduler(iblock); }
1502 
1504  {
1505  while (next_op_to_schedule != prediction_sequence.end())
1506  {
1507  // list operation in scheduled_blocks
1508  std::pair<scheduled_blocks_iterator, bool> ins_res = scheduled_blocks.insert(
1509  std::make_pair(next_op_to_schedule->id, next_op_to_schedule->op));
1510  scheduled_blocks_iterator schedule_meta = ins_res.first;
1511  if (! ins_res.second)
1512  schedule_meta->second.operations.push_front(next_op_to_schedule->op);
1513  SwappableBlockType& sblock = swappable_blocks[next_op_to_schedule->id];
1514 
1515  // do appropriate preparations
1516  if (next_op_to_schedule->op == block_scheduler_type::op_acquire
1517  || next_op_to_schedule->op == block_scheduler_type::op_acquire_uninitialized)
1518  {
1519  if (sblock.is_internal())
1520  {
1521  if (free_evictable_blocks.erase(next_op_to_schedule->id))
1522  scheduled_evictable_blocks.insert(next_op_to_schedule->id);
1523  }
1524  else
1525  {
1526  if (! schedule_meta->second.reserved_iblock)
1527  {
1528  // => needs internal_block -> try to get one
1529  // -> try to get one from block_scheduler
1530  schedule_meta->second.reserved_iblock = get_free_internal_block_from_block_scheduler();
1531  if (! schedule_meta->second.reserved_iblock)
1532  {
1533  // -> try to get one by evicting
1534  if (free_evictable_blocks.empty())
1535  {
1536  // => can not schedule acquire
1537  // remove operation from scheduled_blocks
1538  if (ins_res.second)
1539  scheduled_blocks.erase(ins_res.first);
1540  else
1541  schedule_meta->second.operations.pop_front();
1542  // stop scheduling
1543  return;
1544  }
1545  swappable_block_identifier_type giver = pop_begin(free_evictable_blocks);
1546  {
1547  assert(scheduled_blocks.find(giver) == scheduled_blocks.end() ||
1548  !shall_keep_internal_block(scheduled_blocks.find(giver), false));
1549  }
1550  write_read_request* wrr = schedule_write(giver);
1551  schedule_meta->second.giver.first = (wrr != NULL);
1552  schedule_meta->second.giver.second = giver;
1553  schedule_meta->second.reserved_iblock = swappable_blocks[giver].detach_internal_block();
1554  if (wrr)
1555  wrr->taker = schedule_meta;
1556  }
1557  // read if desired
1558  if (shall_be_read(schedule_meta, false))
1559  {
1560  // => there is no operation scheduled for this block before this acquire and it is initialized
1561  // -> start prefetching now
1562  sblock.attach_internal_block(schedule_meta->second.reserved_iblock);
1563  schedule_meta->second.reserved_iblock = 0;
1564  scheduled_evictable_blocks.insert(next_op_to_schedule->id);
1565  schedule_read(schedule_meta);
1566  }
1567  }
1568  }
1569  }
1570  else if (next_op_to_schedule->op == block_scheduler_type::op_deinitialize)
1571  {
1572  if (sblock.is_dirty())
1573  if (free_evictable_blocks.erase(next_op_to_schedule->id))
1574  scheduled_evictable_blocks.insert(next_op_to_schedule->id);
1575  }
1576 
1577  ++next_op_to_schedule;
1578  }
1579  for (typename std::set<swappable_block_identifier_type>::iterator it = free_evictable_blocks.begin();
1580  it != free_evictable_blocks.end(); ++it)
1581  {
1582  if (! write_scheduled_blocks.count(*it))
1583  schedule_write(*it);
1584  }
1585  }
1586 
1588  {
1589  if (old_algo)
1590  // copy prediction sequence
1591  prediction_sequence = old_algo->get_prediction_sequence();
1592  next_op_to_schedule = prediction_sequence.begin();
1593  if (get_algorithm_from_block_scheduler())
1594  while (! get_algorithm_from_block_scheduler()->evictable_blocks_empty())
1595  free_evictable_blocks.insert(get_algorithm_from_block_scheduler()->evictable_blocks_pop());
1596  schedule_next_operations();
1597  }
1598 
1599  void deinit()
1600  {
1601  // TODO remove
1602  if (! scheduled_blocks.empty())
1603  STXXL_MSG("deinit while scheduled_blocks not empty");
1604  if (! scheduled_evictable_blocks.empty())
1605  STXXL_MSG("deinit while scheduled_evictable_blocks not empty");
1606 
1607  // empty scheduled_blocks
1608  free_evictable_blocks.insert(scheduled_evictable_blocks.begin(), scheduled_evictable_blocks.end());
1609  //for (typename std::set<swappable_block_identifier_type>::iterator it = scheduled_evictable_blocks.begin();
1610  // it != scheduled_evictable_blocks.end(); ++it)
1611  // free_evictable_blocks.insert(*it);
1612  scheduled_evictable_blocks.clear();
1613  while (! scheduled_blocks.empty())
1614  {
1615  scheduled_blocks_iterator it = scheduled_blocks.begin();
1616  wait_on_read(it);
1617  if (it->second.reserved_iblock)
1618  return_free_internal_block(it->second.reserved_iblock);
1619  scheduled_blocks.erase(it);
1620  }
1621  while (! write_scheduled_blocks.empty())
1622  {
1623  write_scheduled_blocks_iterator it = write_scheduled_blocks.begin();
1624  wait_on_write(it);
1625  }
1626  }
1627 
1628 public:
1631  { init(get_algorithm_from_block_scheduler()); }
1632 
1633  // It is possible to keep an old simulation-algorithm object and reuse it's prediction sequence
1636  { init(old); }
1637 
1639  {
1640  deinit();
1641  if (! free_evictable_blocks.empty())
1642  STXXL_ERRMSG("Destructing block_scheduler_algorithm_offline_lru_prefetching that still holds evictable blocks. They get deinitialized.");
1643  while (! free_evictable_blocks.empty())
1644  {
1645  SwappableBlockType& sblock = swappable_blocks[pop_begin(free_evictable_blocks)];
1646  if (internal_block_type* iblock = sblock.deinitialize())
1647  return_free_internal_block(iblock);
1648  }
1649  }
1650 
1651  virtual bool evictable_blocks_empty()
1652  {
1653  deinit();
1654  return free_evictable_blocks.empty();
1655  }
1656 
1658  { return pop_begin(free_evictable_blocks); }
1659 
1660  virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized = false)
1661  {
1662  assert(! prediction_sequence.empty());
1663  assert(prediction_sequence.front().op ==
1664  ((uninitialized) ? block_scheduler_type::op_acquire_uninitialized : block_scheduler_type::op_acquire));
1665  assert(prediction_sequence.front().id == sbid);
1666  prediction_sequence.pop_front();
1667  scheduled_blocks_iterator schedule_meta = scheduled_blocks.find(sbid);
1668  assert(schedule_meta != scheduled_blocks.end()); // acquire not scheduled or out of internal_blocks (i.e. not enough internal memory)
1669  assert(schedule_meta->second.operations.back() ==
1670  ((uninitialized) ? block_scheduler_type::op_acquire_uninitialized : block_scheduler_type::op_acquire)); // acquire not scheduled or out of internal_blocks (i.e. not enough internal memory)
1671 
1672  SwappableBlockType& sblock = swappable_blocks[sbid];
1673  /* acquired => internal -> increase reference count
1674  internal but not acquired -> remove from scheduled_evictable_blocks, increase reference count
1675  not internal => uninitialized or external -> get internal_block, increase reference count
1676  uninitialized -> fill with default value
1677  external -> read */
1678  if (sblock.is_internal())
1679  {
1680  if (! sblock.is_acquired())
1681  {
1682  // not acquired yet -> remove from scheduled_evictable_blocks
1683  size_t t = scheduled_evictable_blocks.erase(sbid);
1684  STXXL_ASSERT(t != 0);
1685  wait_on_read(schedule_meta);
1686  }
1687  sblock.acquire();
1688  }
1689  else
1690  {
1691  assert(uninitialized || ! sblock.is_initialized()); // initialized blocks should be scheduled to read and thus internal
1692  //get internal_block
1693  sblock.attach_internal_block(get_ready_block(schedule_meta));
1694  sblock.acquire();
1695  //initialize new block
1696  if (! uninitialized)
1697  sblock.fill_default();
1698  }
1699 
1700  operation_done(schedule_meta);
1701  return sblock.get_internal_block();
1702  }
1703 
1704  virtual void release(swappable_block_identifier_type sbid, const bool dirty)
1705  {
1706  assert(! prediction_sequence.empty());
1707  assert(prediction_sequence.front().op ==
1708  ((dirty) ? block_scheduler_type::op_release_dirty : block_scheduler_type::op_release));
1709  assert(prediction_sequence.front().id == sbid);
1710  prediction_sequence.pop_front();
1711  scheduled_blocks_iterator schedule_meta = scheduled_blocks.find(sbid);
1712  assert(schedule_meta != scheduled_blocks.end());
1713  assert(schedule_meta->second.operations.back() ==
1714  ((dirty) ? block_scheduler_type::op_release_dirty : block_scheduler_type::op_release));
1715 
1716  SwappableBlockType& sblock = swappable_blocks[sbid];
1717  sblock.make_dirty_if(dirty);
1718  sblock.release();
1719  if (! sblock.is_acquired())
1720  {
1721  if (sblock.is_dirty() || sblock.is_external())
1722  {
1723  // => evictable
1724  if (shall_keep_internal_block(schedule_meta))
1725  {
1726  // => swappable_block shall keep its internal_block
1727  scheduled_evictable_blocks.insert(sbid);
1728  if (shall_be_cleaned(schedule_meta))
1729  schedule_write(sbid);
1730  }
1731  else
1732  {
1733  // give block to scheduler
1734  free_evictable_blocks.insert(sbid);
1735  if (next_op_to_schedule != prediction_sequence.end())
1736  schedule_next_operations();
1737  else {
1738  if (! write_scheduled_blocks.count(sbid))
1739  schedule_write(sbid);
1740  }
1741  }
1742  }
1743  else
1744  {
1745  // => uninitialized
1746  if (shall_keep_internal_block(schedule_meta))
1747  // => swappable_block shall keep its internal_block
1748  schedule_meta->second.reserved_iblock = sblock.detach_internal_block();
1749  else
1750  {
1751  // release internal block and give it to prefetcher
1752  return_free_internal_block(sblock.detach_internal_block());
1753  if (next_op_to_schedule != prediction_sequence.end())
1754  schedule_next_operations();
1755  }
1756  }
1757  }
1758  operation_done(schedule_meta);
1759  }
1760 
1762  {
1763  assert(! prediction_sequence.empty());
1764  assert(prediction_sequence.front().op == block_scheduler_type::op_deinitialize);
1765  assert(prediction_sequence.front().id == sbid);
1766  prediction_sequence.pop_front();
1767  scheduled_blocks_iterator schedule_meta = scheduled_blocks.find(sbid);
1768  assert(schedule_meta != scheduled_blocks.end());
1769  assert(schedule_meta->second.operations.back() == block_scheduler_type::op_deinitialize);
1770 
1771  SwappableBlockType& sblock = swappable_blocks[sbid];
1772  if (sblock.is_evictable())
1773  {
1774  size_t t;
1775  if (shall_keep_internal_block(schedule_meta, false))
1776  {
1777  t = scheduled_evictable_blocks.erase(sbid);
1778  if (t == 0) {
1779  STXXL_ERRMSG("dirty block not scheduled on deinitialize");
1780  t = free_evictable_blocks.erase(sbid);
1781  }
1782  }
1783  else
1784  t = free_evictable_blocks.erase(sbid);
1785  assert(t != 0);
1786  }
1787  if (internal_block_type* iblock = sblock.deinitialize())
1788  {
1789  if (shall_keep_internal_block(schedule_meta))
1790  // => swappable_block shall keep its internal_block
1791  schedule_meta->second.reserved_iblock = iblock;
1792  else
1793  {
1794  // release internal block and give it to prefetcher
1795  return_free_internal_block(iblock);
1796  if (next_op_to_schedule != prediction_sequence.end())
1797  schedule_next_operations();
1798  }
1799  }
1800  operation_done(schedule_meta);
1801  }
1802 
1804  {
1805  assert(! prediction_sequence.empty());
1806  assert(prediction_sequence.front().op == block_scheduler_type::op_initialize);
1807  assert(prediction_sequence.front().id == sbid);
1808  prediction_sequence.pop_front();
1809  scheduled_blocks_iterator schedule_meta = scheduled_blocks.find(sbid);
1810  assert(schedule_meta != scheduled_blocks.end());
1811  assert(schedule_meta->second.operations.back() == block_scheduler_type::op_initialize);
1812 
1813  SwappableBlockType& sblock = swappable_blocks[sbid];
1814  sblock.initialize(eblock);
1815  if (shall_be_read(schedule_meta))
1816  {
1817  sblock.attach_internal_block(schedule_meta->second.reserved_iblock);
1818  schedule_meta->second.reserved_iblock = 0;
1819  scheduled_evictable_blocks.insert(sbid);
1820  schedule_read(schedule_meta);
1821  }
1822  operation_done(schedule_meta);
1823  }
1824 
1826  {
1827  assert(! prediction_sequence.empty());
1828  assert(prediction_sequence.front().op == block_scheduler_type::op_extract_external_block);
1829  assert(prediction_sequence.front().id == sbid);
1830  prediction_sequence.pop_front();
1831  scheduled_blocks_iterator schedule_meta = scheduled_blocks.find(sbid);
1832  assert(schedule_meta != scheduled_blocks.end());
1833  assert(schedule_meta->second.operations.back() == block_scheduler_type::op_extract_external_block);
1834 
1835  SwappableBlockType& sblock = swappable_blocks[sbid];
1836  wait_on_write(sbid);
1837  operation_done(schedule_meta);
1838  return sblock.extract_external_block();
1839  }
1840 };
1841 
1843 
1844 #endif // !STXXL_MNG_BLOCK_SCHEDULER_HEADER
internal_block_type * detach_internal_block()
Detach the internal_block. Writes to external_block if necessary. Has to be evictable.
addressable_fifo_queue< swappable_block_identifier_type > evictable_blocks
Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired...
void wait_on_read(const scheduled_blocks_iterator &schedule_meta)
Wait for the read to finish.
#define STXXL_ASSERT(condition)
Definition: verbose.h:220
block_scheduler_algorithm_type * give_up(std::string err_msg="detected some error in the prediction sequence")
compat::remove_const< Integral >::type div_ceil(Integral n, Integral2 d)
Definition: utils.h:199
bool shall_be_cleaned(const scheduled_blocks_iterator &schedule_meta) const
const int_type max_internal_blocks
block_scheduler_type::internal_block_type internal_block_type
virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized=false)
virtual void swappable_blocks_resize(swappable_block_identifier_type size)
internal_block_type & get_internal_block(const swappable_block_identifier_type sbid) const
Get a const reference to given block&#39;s data. Block has to be already acquired.
bool is_initialized(const swappable_block_identifier_type sbid) const
check if the swappable_block is initialized.
void free_swappable_block(const swappable_block_identifier_type sbid)
Free given no longer used temporary swappable_block.
virtual void initialize(swappable_block_identifier_type sbid, external_block_type eblock)
virtual bool is_initialized(const swappable_block_identifier_type sbid) const
std::map< swappable_block_identifier_type, write_read_request * > write_scheduled_blocks_type
Completion handler class (Loki-style).
scheduled_blocks_type scheduled_blocks
Holds not internal swappable_blocks, whose next access has already been scheduled.
scheduled_blocks_type::reference scheduled_blocks_reference
internal_block_type * get_free_internal_block_from_block_scheduler()
Get an internal_block from the block_scheduler.
Block manager class.
Definition: block_manager.h:61
void deinitialize(const swappable_block_identifier_type sbid)
Drop all data in the given block, freeing in- and external memory.
external_block_type external_data
block_scheduler_algorithm< SwappableBlockType > block_scheduler_algorithm_type
block_scheduler_type::swappable_block_identifier_type swappable_block_identifier_type
virtual void initialize(swappable_block_identifier_type sbid, external_block_type)
SwappableBlockType::internal_block_type internal_block_type
block_scheduler_type::swappable_block_identifier_type swappable_block_identifier_type
virtual bool is_simulating() const
internal_block_type * get_ready_block(const scheduled_blocks_iterator &schedule_meta)
Wait for the write of the giver to finish and return reserved internal_block.
std::stack< internal_block_type * > internal_blocks_blocks
Stores pointers to arrays of internal_blocks. Used to deallocate them only.
block_scheduler_algorithm< SwappableBlockType > block_scheduler_algorithm_type
std::deque< std::pair< bool, time_type > > next_use
Stores for the sequence of releases extracted from the prediction_sequence: (true, timestamp of the blocks next acquire) if it is acquired next (false, 0) if it is deinitialized next (false, 1) if it is not accessed any more (false, 2) if it is extracted next (false, 3) if it is initialized next.
block_scheduler_algorithm_offline_lru_prefetching(block_scheduler_algorithm_type *old)
Container::value_type pop_begin(Container &c)
Definition: utils.h:303
void initialize(const swappable_block_identifier_type sbid, external_block_type eblock)
Initialize the swappable_block with the given external_block.
block_scheduler_algorithm_offline_lfd(block_scheduler_algorithm_type *old)
void read_sync()
Read synchronously from external_block to internal_block. Has to be internal and have an external_blo...
void clean_sync()
Write synchronously from internal_block to external_block if necessary.
An internal priority queue that allows removing elements addressed with (a copy of) themselves...
std::vector< SwappableBlockType > swappable_blocks
temporary blocks that will not be needed after algorithm termination.
void init(block_scheduler_algorithm_type *old_algo)
void explicit_timestep()
Record a timestep in the prediction sequence to seperate consecutive acquire rsp. release-operations...
static uint_pair min()
return an uint_pair instance containing the smallest value possible
Definition: uint_types.h:234
request_ptr read_async(completion_handler on_cmpl=completion_handler())
Read asyncronusly from external_block to internal_block. Has to be internal and have an external_bloc...
bool has_external_block() const
If it has an external_block.
static const int_type max_internal_blocks_alloc_at_once
virtual const prediction_sequence_type & get_prediction_sequence() const
block_scheduler_algorithm_online_lru(block_scheduler_algorithm_type *old)
internal_block_type * internal_data
external_data.valid if no associated space on disk
prediction_sequence_type prediction_sequence
block_scheduler_algorithm_offline_lfd(block_scheduler_type &bs)
block_scheduler_type::time_type time_type
block_scheduler_type::prediction_sequence_type prediction_sequence_type
Virtualization of a block of data. Holds information for allocating and swapping. To use in cooperati...
virtual external_block_type extract_external_block(swappable_block_identifier_type sbid)
bool valid() const
test for a non-NULL pointer
Definition: counting_ptr.h:138
bool is_simulating() const
Returns if simulation mode is on, i.e. if a prediction sequence is being recorded.
block_scheduler< SwappableBlockType > block_scheduler_type
std::stack< swappable_block_identifier_type > evictable_blocks
Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired...
Block scheduling algorithm caching via the least recently used policy (offline), and prefetching in a...
internal_block_type::bid_type external_block_type
virtual void release(swappable_block_identifier_type sbid, const bool dirty)
prediction_sequence_type::iterator next_op_to_schedule
void return_free_internal_block(internal_block_type *iblock)
block_scheduler< SwappableBlockType > block_scheduler_type
internal_block_type * deinitialize()
Bring the block in uninitialized state, freeing external and internal memory. Returns a pointer to th...
void initialize(external_block_type eblock)
Set the external_block that holds the swappable_block&#39;s data. The block gets initialized with it...
Block scheduling algorithm caching via the least recently used policy (online).
virtual void deinitialize(swappable_block_identifier_type sbid)
write_scheduled_blocks_type::iterator write_scheduled_blocks_iterator
void init(block_scheduler_algorithm_type *old_algo)
block_scheduler_algorithm * get_algorithm_from_block_scheduler()
block_scheduler_type::external_block_type external_block_type
SwappableBlockType::external_block_type external_block_type
bool is_initialized() const
If it has some valid data (in- or external).
block_scheduler_algorithm_type::time_type time_type
virtual void release(swappable_block_identifier_type sbid, const bool dirty)
block_scheduler_type::prediction_sequence_type prediction_sequence_type
Block containing elements of fixed length.
Definition: typed_block.h:237
counting_ptr< request > request_ptr
A reference counting pointer for request.
Definition: request.h:113
virtual void wait(bool measure_time=true)=0
Suspends calling thread until completion of the request.
virtual void initialize(swappable_block_identifier_type sbid, external_block_type eblock)
block_scheduler_type::external_block_type external_block_type
internal_block_type & acquire()
Acquire the block, i.e. add a reference. Has to be internal.
void operation_done(scheduled_blocks_iterator &schedule_meta)
virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized=false)
block_scheduler_algorithm_simulation(block_scheduler_algorithm_type *old)
virtual bool is_initialized(const swappable_block_identifier_type sbid) const
virtual external_block_type extract_external_block(swappable_block_identifier_type sbid)
choose_int_types< my_pointer_size >::int_type int_type
Definition: types.h:63
std::priority_queue< swappable_block_identifier_type, std::vector< swappable_block_identifier_type >, std::greater< swappable_block_identifier_type > > free_swappable_blocks
holds indices of free swappable_blocks with attributes reset.
bool try_interrupt_read(const write_scheduled_blocks_iterator &writing_block)
Try to interrupt a read scheduled in a write_read_request.
bool shall_be_read(const scheduled_blocks_iterator &schedule_meta, const bool ignore_first=true) const
block_scheduler_type::external_block_type external_block_type
virtual void initialize(swappable_block_identifier_type sbid, external_block_type eblock)
internal_block_type * get_free_internal_block()
Get an internal_block from the freelist or a newly allocated one if available.
block_scheduler< SwappableBlockType > block_scheduler_type
virtual void deinitialize(swappable_block_identifier_type sbid)
bool is_acquired() const
If it is acquired.
addressable_priority_queue< swappable_block_identifier_type, priority > evictable_blocks
Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired...
Striping disk allocation scheme functor.
Definition: block_alloc.h:41
swappable_block_identifier_type allocate_swappable_block()
Allocate an uninitialized swappable_block.
std::set< swappable_block_identifier_type > free_evictable_blocks
Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired...
void wait_on_write(const write_scheduled_blocks_iterator &writing_block)
Wait for the write to finish.
block_scheduler_type::external_block_type external_block_type
Pseudo block scheduling algorithm only recording the request sequence.
bool operator<(const uint_pair &b) const
less-than comparison operator
Definition: uint_types.h:204
block_scheduler_algorithm_simulation(block_scheduler_type &bs)
#define STXXL_BEGIN_NAMESPACE
Definition: namespace.h:16
write_scheduled_blocks_type::reference write_scheduled_blocks_reference
std::vector< SwappableBlockType >::size_type swappable_block_identifier_type
const internal_block_type & get_internal_block() const
Get a reference to the data-block. Has to be acquired.
block_scheduler(const int_type max_internal_memory)
Create a block_scheduler with empty prediction sequence in simple mode.
block_scheduler_type::block_scheduler_operation block_scheduler_operation
write_read_request * schedule_write(const swappable_block_identifier_type sbid)
Schedule an internal, possibly dirty swappable_block to write.
std::map< swappable_block_identifier_type, scheduled_block_meta > scheduled_blocks_type
block_scheduler_algorithm(block_scheduler_algorithm *old)
bool shall_keep_internal_block(const scheduled_blocks_iterator &schedule_meta, const bool ignore_first=true) const
virtual void deinitialize(swappable_block_identifier_type sbid)
virtual swappable_block_identifier_type evictable_blocks_pop()
const prediction_sequence_type & get_prediction_sequence() const
Get the prediction_sequence.
request_ptr clean_async(completion_handler on_cmpl=completion_handler())
Write asyncronusly from internal_block to external_block if necessary.
std::list< prediction_sequence_element > prediction_sequence_type
static uint_pair max()
return an uint_pair instance containing the largest value possible
Definition: uint_types.h:241
virtual swappable_block_identifier_type evictable_blocks_pop()
void return_free_internal_block_to_block_scheduler(internal_block_type *iblock)
Return an internal_block to the block_scheduler.
block_scheduler_type::prediction_sequence_type prediction_sequence_type
virtual swappable_block_identifier_type evictable_blocks_pop()
typed_block< raw_block_size, ValueType > internal_block_type
block_scheduler_type::internal_block_type internal_block_type
virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized=false)
virtual void swappable_blocks_resize(swappable_block_identifier_type)
void fill_default()
Fill block with default data, is supposed to be overwritten by subclass. Has to be internal...
virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized=false)
void schedule_read(scheduled_blocks_iterator block_to_read)
Schedule an internal and external block to read.
virtual void deinitialize(swappable_block_identifier_type sbid)
virtual void release(swappable_block_identifier_type sbid, const bool dirty)=0
bool is_dirty() const
If the external_block does not hold valid data.
swappable_block()
Create in uninitialized state.
block_scheduler_type::internal_block_type internal_block_type
Block scheduling algorithm caching via the longest forward distance policy (offline).
#define STXXL_ERRMSG(x)
Definition: verbose.h:94
block_scheduler_algorithm_online_lru(block_scheduler_type &bs)
void attach_internal_block(internal_block_type *iblock)
Attach an internal_block, making the block internal. Has to be not internal.
block_scheduler_algorithm< SwappableBlockType > * algo
std::set< swappable_block_identifier_type > scheduled_evictable_blocks
std::vector< SwappableBlockType >::iterator swappable_blocks_iterator
block_scheduler< SwappableBlockType > block_scheduler_type
void wait_on_write(const swappable_block_identifier_type &writing_block)
Wait for the write to finish.
block_scheduler_type::swappable_block_identifier_type swappable_block_identifier_type
bool is_external() const
If it has an external_block that holds valid data.
choose_int_types< my_pointer_size >::unsigned_type unsigned_type
Definition: types.h:64
priority(const SwappableBlockType &sblock, const std::pair< bool, time_type > &t)
block_scheduler_algorithm(block_scheduler_type &bs)
virtual external_block_type extract_external_block(swappable_block_identifier_type sbid)
static unsigned_type disk_allocation_offset
std::vector< SwappableBlockType > & swappable_blocks
void return_free_internal_block(internal_block_type *iblock)
#define STXXL_MSG(x)
Definition: verbose.h:73
block_scheduler_type::internal_block_type internal_block_type
block_scheduler_type::swappable_block_identifier_type swappable_block_identifier_type
void release()
Release the block, i.e. subduct a reference. Has to be acquired.
void return_free_internal_block(internal_block_type *iblock)
Return an internal_block to the freelist.
prediction_sequence_element(block_scheduler_operation op, swappable_block_identifier_type id, int_type time)
void release(const swappable_block_identifier_type sbid, const bool dirty)
Release the given block. Has to be in pairs with acquire. Pairs may be nested and interleaved...
virtual void release(swappable_block_identifier_type sbid, const bool dirty)
virtual external_block_type extract_external_block(swappable_block_identifier_type sbid)
Schedules swapping of blocks and provides blocks for temporary storage.
block_scheduler_algorithm_type::time_type time_type
write_scheduled_blocks_type write_scheduled_blocks
Holds swappable_blocks, whose internal block has been taken away but the clean did not finish yet...
block_scheduler_type::internal_block_type internal_block_type
void return_free_internal_block(internal_block_type *iblock)
block_scheduler_algorithm< SwappableBlockType > * get_current_algorithm() const
Return the current algorithm.
block_scheduler_type::swappable_block_identifier_type swappable_block_identifier_type
block_scheduler_type::prediction_sequence_element prediction_sequence_element_type
block_scheduler_algorithm_offline_lru_prefetching(block_scheduler_type &bs)
virtual swappable_block_identifier_type evictable_blocks_pop()
block_scheduler< SwappableBlockType > block_scheduler_type
virtual void release(swappable_block_identifier_type sbid, const bool dirty)
bool make_dirty_if(const bool make_dirty)
Invalidate external data if true.
Interface of a block scheduling algorithm.
block_scheduler_algorithm< SwappableBlockType > block_scheduler_algorithm_type
external_block_type extract_external_block()
Extract the swappable_blocks data in an external_block. The block gets uninitialized.
block_scheduler_algorithm< SwappableBlockType > * switch_algorithm_to(block_scheduler_algorithm< SwappableBlockType > *new_algo)
Switch the used algorithm, e.g. to simulation etc..
Request object encapsulating basic properties like file and offset.
Definition: request.h:38
block_scheduler_algorithm_type::time_type time_type
std::stack< internal_block_type * > free_internal_blocks
holds free internal_blocks with attributes reset.
bool is_internal() const
If it has an internal_block. The internal_block implicitly holds valid data.
block_scheduler_type::external_block_type external_block_type
block_scheduler_algorithm< SwappableBlockType > block_scheduler_algorithm_type
void wait_on_write(const scheduled_blocks_iterator &schedule_meta)
Wait for the write of the giver to finish.
void return_free_internal_block(internal_block_type *iblock)
#define STXXL_END_NAMESPACE
Definition: namespace.h:17
internal_block_type & get_internal_block()
Get a reference to the data-block. Has to be acquired.
internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized=false)
Acquire the given block. Has to be in pairs with release. Pairs may be nested and interleaved...
external_block_type extract_external_block(const swappable_block_identifier_type sbid)
Deinitialize the swappable_block and return it&#39;s contents in an external_block.
bool is_evictable() const
If it holds an internal_block but does not need it.