0.9.8.10
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
MaintenanceScheduler.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007-2015 Hypertable, Inc.
3  *
4  * This file is part of Hypertable.
5  *
6  * Hypertable is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; version 3 of the
9  * License, or any later version.
10  *
11  * Hypertable is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19  * 02110-1301, USA.
20  */
21 
27 
28 #include <Common/Compat.h>
29 #include "MaintenanceScheduler.h"
30 
40 
41 #include <Common/Config.h>
42 #include <Common/SystemInfo.h>
43 #include <Common/TimeWindow.h>
44 #include <Common/md5.h>
45 
46 #include <algorithm>
47 #include <chrono>
48 #include <fstream>
49 #include <functional>
50 #include <iostream>
51 #include <iterator>
52 #include <limits>
53 
54 using namespace Hypertable;
55 using namespace std;
56 using namespace Hypertable::Config;
57 
58 namespace {
59  struct RangeDataAscending {
60  bool operator()(const RangeData x, const RangeData y) const {
61  return x.data->priority < y.data->priority;
62  }
63  };
64 }
65 
66 
68  TableInfoMapPtr &live_map)
69  : m_queue(queue), m_live_map(live_map), m_start_offset(0),
70  m_initialized(false), m_low_memory_mode(false) {
72  m_maintenance_interval = get_i32("Hypertable.RangeServer.Maintenance.Interval");
73  m_query_cache_memory = get_i64("Hypertable.RangeServer.QueryCache.MaxMemory");
74  m_low_memory_prioritization = get_bool("Hypertable.RangeServer.Maintenance.LowMemoryPrioritization");
75 
76  // Setup to immediately schedule maintenance
77  m_last_low_memory = chrono::steady_clock::now();
79 
80  m_low_memory_limit_percentage = get_i32("Hypertable.RangeServer.LowMemoryLimit.Percentage");
81  m_merging_delay = get_i32("Hypertable.RangeServer.Maintenance.MergingCompaction.Delay");
82  m_merges_per_interval = get_i32("Hypertable.RangeServer.Maintenance.MergesPerInterval",
83  std::numeric_limits<int32_t>::max());
84  m_move_compactions_per_interval = get_i32("Hypertable.RangeServer.Maintenance.MoveCompactionsPerInterval");
85  m_initialization_per_interval = get_i32("Hypertable.RangeServer.Maintenance.InitializationPerInterval",
86  std::numeric_limits<int32_t>::max());
87 
89  (int32_t)Global::maintenance_queue->worker_count();
90 }
91 
92 
94  lock_guard<mutex> lock(m_mutex);
95  if (m_table_blacklist.count(table.id) > 0)
96  return;
97  m_table_blacklist.insert(table.id);
98  // Drop range maintenance tasks for table ID
99  function<bool(Range *)> drop_predicate =
100  [table](Range *r) -> bool {return r->get_table_id().compare(table.id)==0;};
101  Global::maintenance_queue->drop_range_tasks(drop_predicate);
102 }
103 
105  lock_guard<mutex> lock(m_mutex);
106  m_table_blacklist.erase(table.id);
107 }
108 
109 
110 
112  Ranges ranges;
113  Ranges ranges_prioritized;
115  String output;
116  String ag_name;
117  String trace_str;
118  int64_t excess = 0;
120  int32_t priority = 1;
121  bool low_memory = low_memory_mode();
122  bool do_scheduling = true;
123  bool debug = false;
124  function<bool(RangeData &)> in_blacklist =
125  [this](RangeData &rd) -> bool {return this->m_table_blacklist.count(rd.data->table_id);};
126 
127  auto now = chrono::steady_clock::now();
128 
129  Global::load_statistics->recompute();
130 
131  debug = debug_signal_file_exists(now);
132 
133  memory_state.balance = Global::memory_tracker->balance();
134  memory_state.limit = Global::memory_limit;
135 
136  // adjust limit if it makes sense
140  - (int64_t)(System::mem_stat().free * Property::MiB);
141  if (excess > 0)
142  memory_state.limit = memory_state.balance - excess;
143  }
144 
145  if (low_memory) {
146  if (Global::maintenance_queue->full())
147  do_scheduling = false;
148  excess = (memory_state.balance > memory_state.limit) ? memory_state.balance - memory_state.limit : 0;
149  memory_state.needed = ((memory_state.limit * m_low_memory_limit_percentage) / 100) + excess;
150  }
151 
152  if (debug) {
153  trace_str += String("low_memory\t") + (low_memory ? "true" : "false") + "\n";
154  trace_str += format("Global::memory_tracker->balance()\t%lld\n", (Lld)Global::memory_tracker->balance());
155  trace_str += format("Global::memory_limit\t%lld\n", (Lld)Global::memory_limit);
156  trace_str += format("Global::memory_limit_ensure_unused_current\t%lld\n", (Lld)Global::memory_limit_ensure_unused_current);
157  trace_str += format("m_query_cache_memory\t%lld\n", (Lld)m_query_cache_memory);
158  trace_str += format("excess\t%lld\n", (Lld)excess);
159  trace_str += String("Global::maintenance_queue->full()\t") + (Global::maintenance_queue->full() ? "true" : "false") + "\n";
160  trace_str += format("m_low_memory_limit_percentage\t%lld\n", (Lld)m_low_memory_limit_percentage);
161  trace_str += format("memory_state.balance\t%lld\n", (Lld)memory_state.balance);
162  trace_str += format("memory_state.limit\t%lld\n", (Lld)memory_state.limit);
163  trace_str += format("memory_state.needed\t%lld\n", (Lld)memory_state.needed);
164  }
165 
166  {
167  uint64_t max_memory = 0;
168  uint64_t available_memory = 0;
169  uint64_t accesses = 0;
170  uint64_t hits = 0;
172  Global::block_cache->get_stats(&max_memory, &available_memory, &accesses, &hits);
173  if (debug) {
174  trace_str += format("FileBlockCache-max_memory\t%llu\n", (Llu)max_memory);
175  trace_str += format("FileBlockCache-available_memory\t%llu\n", (Llu)available_memory);
176  trace_str += format("FileBlockCache-accesses\t%llu\n", (Llu)accesses);
177  trace_str += format("FileBlockCache-hits\t%llu\n", (Llu)hits);
178  }
179  }
180 
181  if (!do_scheduling)
182  return;
183 
184  // Drop all outstanding range tasks from maintenance queue
185  Global::maintenance_queue->drop_range_tasks([](Range *) -> bool {return true;});
186 
187  StringSet remove_ok_logs, removed_logs;
188  m_live_map->get_ranges(ranges, &remove_ok_logs);
189  time_t current_time = time(0);
190  int flags = 0;
192  if (Global::low_activity_time.update_current_time()) {
193  if (Global::low_activity_time.is_window_enabled())
195  HT_INFOF("%s low activity window", Global::low_activity_time.within_window()
196  ? "Entering" : "Exiting");
197  }
198  if (debug)
199  trace_str +=
200  format("Within low activity window = %s\n",
201  Global::low_activity_time.within_window() ? "true" : "false");
202 
203  // Fetch maintenance data for ranges and thier access groups
204  for (auto &rd : ranges.array)
205  rd.data = rd.range->get_maintenance_data(ranges.arena, current_time, flags);
206 
207  if (ranges.array.empty()) {
210  return;
211  }
212 
213  // Make a copy of the range statistics array for get_statistics()
214  {
215  RangesPtr ranges_copy = make_shared<Ranges>();
216  ranges_copy->array = ranges.array;
217  for (size_t i=0; i<ranges.array.size(); i++) {
218  ranges_copy->array[i].data =
219  (Range::MaintenanceData *)ranges_copy->arena.alloc( sizeof(Range::MaintenanceData) );
220  memcpy(ranges_copy->array[i].data, ranges.array[i].data, sizeof(Range::MaintenanceData));
221  ranges_copy->array[i].data->agdata = 0;
222  }
223  Global::set_ranges(ranges_copy);
224  }
225 
226  // Rotate the starting point to avoid compaction starvation during high write
227  // activity with many ranges.
228  if (!low_memory) {
229  m_start_offset %= ranges.array.size();
230  if (m_start_offset != 0) {
231  std::vector<RangeData> rotated;
232  rotated.reserve(ranges.array.size());
233  std::vector<RangeData>::iterator iter = ranges.array.begin() + m_start_offset;
234  rotated.insert(rotated.end(), iter, ranges.array.end());
235  rotated.insert(rotated.end(), ranges.array.begin(), iter);
236  ranges.array.swap(rotated);
237  }
239  }
240 
241  // Remove ranges in table blacklist
242  {
243  lock_guard<mutex> lock(m_mutex);
244  if (!m_table_blacklist.empty())
245  ranges.remove_if(in_blacklist);
246  }
247 
249 
250  int64_t block_index_memory = 0;
251  int64_t bloom_filter_memory = 0;
252  int64_t cell_cache_memory = 0;
253  int64_t shadow_cache_memory = 0;
254  int64_t not_acknowledged = 0;
255 
259  {
260  int64_t revision_user = TIMESTAMP_MAX;
261  int64_t revision_metadata = TIMESTAMP_MAX;
262  int64_t revision_system = TIMESTAMP_MAX;
263  int64_t revision_root = TIMESTAMP_MAX;
265 
266  if (debug) {
267  trace_str += format("before revision_root\t%llu\n", (Llu)revision_root);
268  trace_str += format("before revision_metadata\t%llu\n", (Llu)revision_metadata);
269  trace_str += format("before revision_system\t%llu\n", (Llu)revision_system);
270  trace_str += format("before revision_user\t%llu\n", (Llu)revision_user);
271  }
272 
273  for (auto &rd : ranges.array) {
274 
275  if (rd.data->needs_major_compaction && priority <= m_move_compactions_per_interval) {
276  rd.data->priority = priority++;
277  rd.data->maintenance_flags = MaintenanceFlag::COMPACT_MOVE;
278  }
279 
280  if (!rd.data->load_acknowledged)
281  not_acknowledged++;
282 
283  for (ag_data = rd.data->agdata; ag_data; ag_data = ag_data->next) {
284 
285  // compute memory stats
286  cell_cache_memory += ag_data->mem_allocated;
287  for (cs_data = ag_data->csdata; cs_data; cs_data = cs_data->next) {
288  shadow_cache_memory += cs_data->shadow_cache_size;
289  block_index_memory += cs_data->index_stats.block_index_memory;
290  bloom_filter_memory += cs_data->index_stats.bloom_filter_memory;
291  }
292 
293  if (ag_data->earliest_cached_revision != TIMESTAMP_MAX) {
294  if (rd.range->is_root()) {
295  if (ag_data->earliest_cached_revision < revision_root)
296  revision_root = ag_data->earliest_cached_revision;
297  }
298  else if (rd.data->is_metadata) {
299  if (ag_data->earliest_cached_revision < revision_metadata)
300  revision_metadata = ag_data->earliest_cached_revision;
301  }
302  else if (rd.data->is_system) {
303  if (ag_data->earliest_cached_revision < revision_system)
304  revision_system = ag_data->earliest_cached_revision;
305  }
306  else {
307  if (ag_data->earliest_cached_revision < revision_user)
308  revision_user = ag_data->earliest_cached_revision;
309  }
310  }
311  }
312  }
313 
314  if (debug) {
315  trace_str += format("after revision_root\t%llu\n", (Llu)revision_root);
316  trace_str += format("after revision_metadata\t%llu\n", (Llu)revision_metadata);
317  trace_str += format("after revision_system\t%llu\n", (Llu)revision_system);
318  trace_str += format("after revision_user\t%llu\n", (Llu)revision_user);
319  }
320 
321  if (Global::root_log)
322  Global::root_log->purge(revision_root, remove_ok_logs, removed_logs, &trace_str);
323 
325  Global::metadata_log->purge(revision_metadata, remove_ok_logs, removed_logs, &trace_str);
326 
327  if (Global::system_log)
328  Global::system_log->purge(revision_system, remove_ok_logs, removed_logs, &trace_str);
329 
330  if (Global::user_log)
331  Global::user_log->purge(revision_user, remove_ok_logs, removed_logs, &trace_str);
332 
333  // Remove logs that were removed from the MetaLogEntityRemoveOkLogs entity
334  if (!removed_logs.empty()) {
335  Global::remove_ok_logs->remove(removed_logs);
337  }
338  }
339 
340  {
341  int64_t block_cache_memory = Global::block_cache ? Global::block_cache->memory_used() : 0;
342  int64_t total_memory = block_cache_memory + block_index_memory + bloom_filter_memory + cell_cache_memory + shadow_cache_memory + m_query_cache_memory;
343  double block_cache_pct = ((double)block_cache_memory / (double)total_memory) * 100.0;
344  double block_index_pct = ((double)block_index_memory / (double)total_memory) * 100.0;
345  double bloom_filter_pct = ((double)bloom_filter_memory / (double)total_memory) * 100.0;
346  double cell_cache_pct = ((double)cell_cache_memory / (double)total_memory) * 100.0;
347  double shadow_cache_pct = ((double)shadow_cache_memory / (double)total_memory) * 100.0;
348  double query_cache_pct = ((double)m_query_cache_memory / (double)total_memory) * 100.0;
349 
350  HT_INFOF("Memory Statistics (MB): VM=%.2f, RSS=%.2f, tracked=%.2f, computed=%.2f limit=%.2f",
351  System::proc_stat().vm_size, System::proc_stat().vm_resident,
352  (double)memory_state.balance/(double)Property::MiB, (double)total_memory/(double)Property::MiB,
353  (double)Global::memory_limit/(double)Property::MiB);
354  HT_INFOF("Memory Allocation: BlockCache=%.2f%% BlockIndex=%.2f%% "
355  "BloomFilter=%.2f%% CellCache=%.2f%% ShadowCache=%.2f%% "
356  "QueryCache=%.2f%%",
357  block_cache_pct, block_index_pct, bloom_filter_pct,
358  cell_cache_pct, shadow_cache_pct, query_cache_pct);
359  }
360 
361  if (debug)
362  trace_str += "\nScheduling Decisions:\n";
363 
364  m_prioritizer->prioritize(ranges.array, memory_state, priority,
365  debug ? &trace_str : 0);
366 
367  if (debug)
368  write_debug_output(now, ranges, trace_str);
369 
370  auto schedule_time = chrono::steady_clock::now();
371 
372  if (not_acknowledged) {
373  HT_INFOF("Found load_acknowledged=false in %d ranges", (int)not_acknowledged);
374  }
375 
376  bool uninitialized_range_seen {};
377 
378  // if this is the first time around, just enqueue work that
379  // was in progress
380  if (!m_initialized) {
381  uint32_t level = 0, priority = 0;
382  for (auto &rd : ranges.array) {
383  if (!rd.data->initialized)
384  uninitialized_range_seen = true;
385  if (rd.data->state == RangeState::SPLIT_LOG_INSTALLED ||
386  rd.data->state == RangeState::SPLIT_SHRUNK) {
387  level = get_level(rd);
388  Global::maintenance_queue->add(new MaintenanceTaskSplit(level, priority++, schedule_time, rd.range));
389  }
390  else if (rd.data->state == RangeState::RELINQUISH_LOG_INSTALLED) {
391  level = get_level(rd);
392  Global::maintenance_queue->add(new MaintenanceTaskRelinquish(level, priority++, schedule_time, rd.range));
393  }
394  }
395  m_initialized = true;
396  }
397  else {
398 
399  lock_guard<mutex> lock(m_mutex);
400 
401  // Remove ranges for tables in blacklist
402  if (!m_table_blacklist.empty())
403  ranges.remove_if(in_blacklist);
404 
405  // Sort the ranges based on priority
406  ranges_prioritized.array.reserve( ranges.array.size() );
407  for (auto &rd : ranges.array) {
408  if (rd.data->priority > 0)
409  ranges_prioritized.array.push_back(rd);
410  }
411  struct RangeDataAscending ordering;
412  sort(ranges_prioritized.array.begin(), ranges_prioritized.array.end(), ordering);
413 
414  int32_t merges_created = 0;
415  int32_t initialization_created = 0;
416  uint32_t level = 0;
417 
418  for (auto &rd : ranges_prioritized.array) {
419  if (!rd.data->initialized) {
420  uninitialized_range_seen = true;
421  if (!low_memory && initialization_created < m_initialization_per_interval) {
422  level = get_level(rd);
424  level, rd.data->priority,
425  schedule_time, rd.range));
426  ++initialization_created;
427  }
428  }
429  if (rd.data->maintenance_flags & MaintenanceFlag::SPLIT) {
430  level = get_level(rd);
431  Global::maintenance_queue->add(new MaintenanceTaskSplit(level, rd.data->priority,
432  schedule_time, rd.range));
433  }
434  else if (rd.data->maintenance_flags & MaintenanceFlag::RELINQUISH) {
435  level = get_level(rd);
436  Global::maintenance_queue->add(new MaintenanceTaskRelinquish(level, rd.data->priority,
437  schedule_time, rd.range));
438  }
439  else if (rd.data->maintenance_flags & MaintenanceFlag::COMPACT) {
441  level = get_level(rd);
442  task = new MaintenanceTaskCompaction(level, rd.data->priority,
443  schedule_time, rd.range);
444  if (!rd.data->needs_major_compaction) {
445  for (AccessGroup::MaintenanceData *ag_data=rd.data->agdata; ag_data; ag_data=ag_data->next) {
449  task->add_subtask(ag_data->ag, ag_data->maintenance_flags);
451  if (merges_created < m_merges_per_interval) {
452  task->add_subtask(ag_data->ag, ag_data->maintenance_flags);
453  merges_created++;
454  }
455  }
456  }
457  }
458  Global::maintenance_queue->add(task);
459  }
460  else if (rd.data->maintenance_flags & MaintenanceFlag::MEMORY_PURGE) {
462  level = get_level(rd);
463  task = new MaintenanceTaskMemoryPurge(level, rd.data->priority,
464  schedule_time, rd.range);
465  for (AccessGroup::MaintenanceData *ag_data=rd.data->agdata; ag_data; ag_data=ag_data->next) {
467  task->add_subtask(ag_data->ag, ag_data->maintenance_flags);
468  for (AccessGroup::CellStoreMaintenanceData *cs_data=ag_data->csdata; cs_data; cs_data=cs_data->next) {
469  if (cs_data->maintenance_flags & MaintenanceFlag::MEMORY_PURGE)
470  task->add_subtask(cs_data->cs, cs_data->maintenance_flags);
471  }
472  }
473  }
474  Global::maintenance_queue->add(task);
475  }
476  }
477  }
478 
479  if (!Global::range_initialization_complete && !uninitialized_range_seen)
481 
482  MaintenanceTaskWorkQueue *task = 0;
483  {
484  lock_guard<mutex> lock(Global::mutex);
485  if (!Global::work_queue.empty())
487  }
488  if (task)
489  Global::maintenance_queue->add(task);
490 
491  //cout << flush << trace_str << flush;
492 }
493 
495  if (rd.range->is_root())
496  return 0;
497  if (rd.data->is_metadata)
498  return 1;
499  else if (rd.data->is_system)
500  return 2;
501  return 3;
502 }
503 
504 
505 bool MaintenanceScheduler::debug_signal_file_exists(chrono::steady_clock::time_point now) {
506  if (now - m_last_check >= chrono::milliseconds(60000)) {
507  m_last_check = now;
508  return FileUtils::exists(System::install_dir + "/run/debug-scheduler");
509  }
510  return false;
511 }
512 
513 
514 void MaintenanceScheduler::write_debug_output(chrono::steady_clock::time_point now,
515  Ranges &ranges,
516  const String &header_str) {
518  String output_fname = System::install_dir + "/run/scheduler.output";
519  ofstream out;
520  out.open(output_fname.c_str());
521  out << header_str << "\n";
522  for (auto &rd : ranges.array) {
523  out << *rd.data << "\n";
524  for (ag_data = rd.data->agdata; ag_data; ag_data = ag_data->next)
525  out << *ag_data << "\n";
526  }
527  StringSet logs;
528  Global::remove_ok_logs->get(logs);
529  out << "RemoveOkLogs:\n";
530  for (const auto &log : logs)
531  cout << log << "\n";
532  out.close();
533  FileUtils::unlink(System::install_dir + "/run/debug-scheduler");
534 }
void exclude(const TableIdentifier &table)
Excludes a table from maintenance scheduling.
std::set< String > StringSet
STL Set managing Strings.
Definition: StringExt.h:42
void remove_if(Func pred)
Template function for removing ranges that satisfy a predicate.
Definition: TableInfo.h:78
static void set_ranges(RangesPtr &r)
Definition: Global.cc:112
Recompute CellStore merge run to test if merging compaction needed.
std::set< std::string > m_table_blacklist
Set of table IDs to exclude from maintenance scheduling.
void add_subtask(const void *obj, int flags)
std::string String
A String is simply a typedef to std::string.
Definition: String.h:44
static bool unlink(const String &fname)
Unlinks (deletes) a file or directory.
Definition: FileUtils.cc:427
String format(const char *fmt,...)
Returns a String using printf like format facilities Vanilla snprintf is about 1.5x faster than this...
Definition: String.cc:37
Declarations for TimeWindow.
long long unsigned int Llu
Shortcut for printf formats.
Definition: String.h:50
static const ProcStat & proc_stat()
Retrieves updated Process statistics (see SystemInfo.h)
Definition: SystemInfo.cc:382
static int64_t memory_limit_ensure_unused_current
Definition: Global.h:105
MaintenancePrioritizerLogCleanup m_prioritizer_log_cleanup
static int64_t memory_limit
Definition: Global.h:99
static bool exists(const String &fname)
Checks if a file or directory exists.
Definition: FileUtils.cc:420
bool low_memory_mode()
Checks if low memory maintenance prioritization is enabled.
MaintenancePrioritizer * m_prioritizer
CellStoreMaintenanceData * csdata
Definition: AccessGroup.h:86
const uint64_t MiB
Definition: Properties.h:157
STL namespace.
std::chrono::steady_clock::time_point m_last_check
static MetaLogEntityRemoveOkLogsPtr remove_ok_logs
Definition: Global.h:71
Represents a table row range.
Definition: Range.h:69
#define HT_ASSERT(_e_)
Definition: Logger.h:396
std::shared_ptr< MaintenanceQueue > MaintenanceQueuePtr
Smart pointer to MaintenanceQueue.
static Hypertable::MemoryTracker * memory_tracker
Definition: Global.h:94
static std::vector< MetaLog::EntityTaskPtr > work_queue
Definition: Global.h:114
bool debug_signal_file_exists(std::chrono::steady_clock::time_point now)
Checks to see if scheduler debug signal file exists.
Declarations for MaintenanceScheduler.
ByteArena arena
Memory arena.
Definition: TableInfo.h:87
std::vector< RangeData > array
Vector of RangeData objects.
Definition: TableInfo.h:85
static CommitLogPtr root_log
Definition: Global.h:80
static MetaLog::WriterPtr rsml_writer
Definition: Global.h:81
Compatibility Macros for C/C++.
void include(const TableIdentifier &table)
Includes a table for maintenance scheduling.
void schedule()
Schedules maintenance.
void write_debug_output(std::chrono::steady_clock::time_point now, Ranges &ranges, const String &header_str)
Writes debugging output and removes signal file.
std::shared_ptr< TableInfoMap > TableInfoMapPtr
Shared smart pointer to TableInfoMap.
Definition: TableInfoMap.h:223
Hypertable definitions
static Hypertable::MaintenanceQueuePtr maintenance_queue
Definition: Global.h:67
virtual void prioritize(std::vector< RangeData > &range_data, MemoryState &memory_state, int32_t priority, String *trace)=0
long long int Lld
Shortcut for printf formats.
Definition: String.h:53
static const int64_t TIMESTAMP_MAX
Definition: KeySpec.h:35
std::chrono::steady_clock::time_point m_last_low_memory
static CommitLogPtr system_log
Definition: Global.h:78
Split - range shrunk.
Definition: RangeState.h:55
#define HT_INFOF(msg,...)
Definition: Logger.h:272
static String install_dir
The installation directory.
Definition: System.h:114
static std::mutex mutex
Definition: Global.h:62
void get_stats(uint64_t *max_memoryp, uint64_t *available_memoryp, uint64_t *accessesp, uint64_t *hitsp)
static bool range_initialization_complete
Definition: Global.h:112
static CommitLogPtr user_log
Definition: Global.h:77
static LoadStatisticsPtr load_statistics
Definition: Global.h:72
MaintenanceScheduler(MaintenanceQueuePtr &queue, TableInfoMapPtr &live_map)
Constructor.
Configuration settings.
static TimeWindow low_activity_time
Definition: Global.h:116
Declarations for MaintenanceFlag This file contains declarations that are part of the MaintenanceFlag...
bool major_compaction(int flags)
Tests the COMPACT_MAJOR bit of flags
System information and statistics based on libsigar.
Holds pointers to a Range and associated Range::MaintenanceData.
Definition: TableInfo.h:55
static const MemStat & mem_stat()
Retrieves updated Memory statistics (see SystemInfo.h)
Definition: SystemInfo.cc:339
md5 digest routines.
Range::MaintenanceData * data
Pointer to maintenance data for range.
Definition: TableInfo.h:66
std::mutex m_mutex
Mutex to serialize concurrent access
static Hypertable::FileBlockCache * block_cache
Definition: Global.h:90
int64_t balance()
Return total range server memory used.
Definition: MemoryTracker.h:70
Holds vector of RangeData objects and memory arena.
Definition: TableInfo.h:72
bool gc_compaction(int flags)
Tests the COMPACT_GC bit of flags
bool merging_compaction(int flags)
Tests the COMPACT_MERGING bit of flags
bool minor_compaction(int flags)
Tests the COMPACT_MINOR bit of flags
static CommitLogPtr metadata_log
Definition: Global.h:79
RangePtr range
Pointer to Range.
Definition: TableInfo.h:63
std::shared_ptr< Ranges > RangesPtr
Smart pointer to Ranges.
Definition: TableInfo.h:91