CernVM-FS  2.10.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
monitor.cc
Go to the documentation of this file.
1 
11 #include "cvmfs_config.h"
12 #include "monitor.h"
13 
14 #include <errno.h>
15 #include <execinfo.h>
16 #include <poll.h>
17 #include <pthread.h>
18 #include <signal.h>
19 #include <sys/resource.h>
20 #include <sys/types.h>
21 #ifdef __APPLE__
22  #include <sys/ucontext.h>
23 #else
24  #include <ucontext.h>
25 #endif
26 #include <sys/uio.h>
27 #include <sys/wait.h>
28 #include <syslog.h>
29 #include <time.h>
30 #include <unistd.h>
31 
32 #include <cassert>
33 #include <cstdio>
34 #include <cstdlib>
35 #include <cstring>
36 #include <map>
37 #include <string>
38 #include <vector>
39 
40 #if defined(CVMFS_FUSE_MODULE)
41 #include "cvmfs.h"
42 #endif
43 #include "util/exception.h"
44 #include "util/logging.h"
45 #include "util/platform.h"
46 #include "util/posix.h"
47 #include "util/smalloc.h"
48 #include "util/string.h"
49 
50 // Used for address offset calculation
51 #if defined(CVMFS_FUSE_MODULE)
52 extern loader::CvmfsExports *g_cvmfs_exports;
53 #endif
54 
55 using namespace std; // NOLINT
56 
58 
59 
60 Watchdog *Watchdog::Create(const string &crash_dump_path) {
61  assert(instance_ == NULL);
62  instance_ = new Watchdog(crash_dump_path);
63  return instance_;
64 }
65 
66 
71 string Watchdog::GenerateStackTrace(pid_t pid) {
72  int retval;
73  string result = "";
74 
75  // re-gain root permissions to allow for ptrace of died cvmfs2 process
76  const bool retrievable = true;
77  if (!SwitchCredentials(0, getgid(), retrievable)) {
78  result += "failed to re-gain root permissions... still give it a try\n";
79  }
80 
81  // run gdb and attach to the dying process
82  int fd_stdin;
83  int fd_stdout;
84  int fd_stderr;
85  vector<string> argv;
86  argv.push_back("-p");
87  argv.push_back(StringifyInt(pid));
88  pid_t gdb_pid = 0;
89  const bool double_fork = false;
90  retval = ExecuteBinary(&fd_stdin,
91  &fd_stdout,
92  &fd_stderr,
93 #ifdef __APPLE__
94  "lldb",
95 #else
96  "gdb",
97 #endif
98  argv,
99  double_fork,
100  &gdb_pid);
101  assert(retval);
102 
103 
104  // Skip the gdb startup output
105  ReadUntilGdbPrompt(fd_stdout);
106 
107  // Send stacktrace command to gdb
108 #ifdef __APPLE__
109  const string gdb_cmd = "bt all\n" "quit\n";
110 #else
111  const string gdb_cmd = "thread apply all bt\n" "quit\n";
112 #endif
113  // The execve can have failed, which can't be detected in ExecuteBinary.
114  // Instead, writing to the pipe will fail.
115  ssize_t nbytes = write(fd_stdin, gdb_cmd.data(), gdb_cmd.length());
116  if ((nbytes < 0) || (static_cast<unsigned>(nbytes) != gdb_cmd.length())) {
117  result += "failed to start gdb/lldb (" + StringifyInt(nbytes) + " bytes "
118  "written, errno " + StringifyInt(errno) + ")\n";
119  return result;
120  }
121 
122  // Read the stack trace from the stdout of our gdb process
123 #ifdef __APPLE__
124  // lldb has one more prompt
125  result += ReadUntilGdbPrompt(fd_stdout);
126 #endif
127  result += ReadUntilGdbPrompt(fd_stdout) + "\n\n";
128 
129  // Check for output on stderr
130  string result_err;
131  Block2Nonblock(fd_stderr);
132  char cbuf;
133  while (read(fd_stderr, &cbuf, 1) == 1)
134  result_err.push_back(cbuf);
135  if (!result_err.empty())
136  result += "\nError output:\n" + result_err + "\n";
137 
138  // Close the connection to the terminated gdb process
139  close(fd_stderr);
140  close(fd_stdout);
141  close(fd_stdin);
142 
143  // Make sure gdb has terminated (wait for it for a short while)
144  unsigned int timeout = 15;
145  int statloc;
146  while (timeout > 0 && waitpid(gdb_pid, &statloc, WNOHANG) != gdb_pid) {
147  --timeout;
148  SafeSleepMs(1000);
149  }
150 
151  // when the timeout expired, gdb probably hangs... we need to kill it
152  if (timeout == 0) {
153  result += "gdb did not exit as expected. sending SIGKILL... ";
154  result += (kill(gdb_pid, SIGKILL) != 0) ? "failed\n" : "okay\n";
155  }
156 
157  return result;
158 }
159 
160 
162  if (instance_ != NULL) {
163  if (!instance_->spawned_)
164  return getpid();
165  else
166  return instance_->watchdog_pid_;
167  }
168  return getpid();
169 }
170 
175 void Watchdog::LogEmergency(string msg) {
176  char ctime_buffer[32];
177 
178  if (!crash_dump_path_.empty()) {
179  FILE *fp = fopen(crash_dump_path_.c_str(), "a");
180  if (fp) {
181  time_t now = time(NULL);
182  msg += "\nTimestamp: " + string(ctime_r(&now, ctime_buffer));
183  if (fwrite(&msg[0], 1, msg.length(), fp) != msg.length()) {
184  msg +=
185  " (failed to report into crash dump file " + crash_dump_path_ + ")";
186  } else {
187  msg += "\n Crash logged also on file: " + crash_dump_path_ + "\n";
188  }
189  fclose(fp);
190  } else {
191  msg += " (failed to open crash dump file " + crash_dump_path_ + ")";
192  }
193  }
194  LogCvmfs(kLogMonitor, kLogSyslogErr, "%s", msg.c_str());
195 }
196 
204 string Watchdog::ReadUntilGdbPrompt(int fd_pipe) {
205 #ifdef __APPLE__
206  static const string gdb_prompt = "(lldb)";
207 #else
208  static const string gdb_prompt = "\n(gdb) ";
209 #endif
210 
211  string result;
212  char mini_buffer;
213  int chars_io;
214  unsigned int ring_buffer_pos = 0;
215 
216  // read from stdout of gdb until gdb prompt occures --> (gdb)
217  while (1) {
218  chars_io = read(fd_pipe, &mini_buffer, 1);
219 
220  // in case something goes wrong...
221  if (chars_io <= 0) break;
222 
223  result += mini_buffer;
224 
225  // find the gdb_promt in the stdout data
226  if (mini_buffer == gdb_prompt[ring_buffer_pos]) {
227  ++ring_buffer_pos;
228  if (ring_buffer_pos == gdb_prompt.size()) {
229  break;
230  }
231  } else {
232  ring_buffer_pos = 0;
233  }
234  }
235 
236  return result;
237 }
238 
239 
240 void Watchdog::RegisterOnCrash(void (*CleanupOnCrash)(void)) {
241  on_crash_ = CleanupOnCrash;
242 }
243 
244 
249  // Re-activate ┬ÁSyslog, if necessary
251 
252  CrashData crash_data;
253  if (!pipe_watchdog_->TryRead(&crash_data)) {
254  return "failed to read crash data (" + StringifyInt(errno) + ")";
255  }
256 
257  string debug = "--\n";
258  debug += "Signal: " + StringifyInt(crash_data.signal);
259  debug += ", errno: " + StringifyInt(crash_data.sys_errno);
260  debug += ", version: " + string(VERSION);
261  debug += ", PID: " + StringifyInt(crash_data.pid) + "\n";
262  debug += "Executable path: " + exe_path_ + "\n";
263 
264  debug += GenerateStackTrace(crash_data.pid);
265 
266  // Give the dying process the finishing stroke
267  if (kill(crash_data.pid, SIGKILL) != 0) {
268  debug += "Failed to kill cvmfs client! (";
269  switch (errno) {
270  case EINVAL:
271  debug += "invalid signal";
272  break;
273  case EPERM:
274  debug += "permission denied";
275  break;
276  case ESRCH:
277  debug += "no such process";
278  break;
279  default:
280  debug += "unknown error " + StringifyInt(errno);
281  }
282  debug += ")\n\n";
283  }
284 
285  return debug;
286 }
287 
288 
290  int sig, siginfo_t *siginfo, void * /* context */)
291 {
293  "watchdog: received unexpected signal %d from PID %d / UID %d",
294  sig, siginfo->si_pid, siginfo->si_uid);
295  _exit(1);
296 }
297 
298 
299 void Watchdog::SendTrace(int sig, siginfo_t *siginfo, void *context) {
300  int send_errno = errno;
301  if (platform_spinlock_trylock(&Me()->lock_handler_) != 0) {
302  // Concurrent call, wait for the first one to exit the process
303  while (true) {}
304  }
305 
306  // Set the original signal handler for the raised signal in
307  // SIGQUIT (watchdog process will raise SIGQUIT)
308  (void) sigaction(SIGQUIT, &(Me()->old_signal_handlers_[sig]), NULL);
309 
310  // Inform the watchdog that CernVM-FS crashed
311  if (!Me()->pipe_watchdog_->Write(ControlFlow::kProduceStacktrace)) {
312  _exit(1);
313  }
314 
315  // Send crash information to the watchdog
316  CrashData crash_data;
317  crash_data.signal = sig;
318  crash_data.sys_errno = send_errno;
319  crash_data.pid = getpid();
320  if (!Me()->pipe_watchdog_->Write(crash_data)) {
321  _exit(1);
322  }
323 
324  // Do not die before the stack trace was generated
325  // kill -SIGQUIT <pid> will finish this
326  int counter = 0;
327  while (true) {
328  SafeSleepMs(100);
329  // quit anyway after 30 seconds
330  if (++counter == 300) {
331  LogCvmfs(kLogCvmfs, kLogSyslogErr, "stack trace generation failed");
332  // Last attempt to log something useful
333 #if defined(CVMFS_FUSE_MODULE)
334  LogCvmfs(kLogCvmfs, kLogSyslogErr, "Signal %d, errno %d",
335  sig, send_errno);
336  void *addr[kMaxBacktrace];
337  // Note: this doesn't work due to the signal stack on OS X (it works on
338  // Linux). Since anyway lldb is supposed to produce the backtrace, we
339  // consider it more important to protect cvmfs against stack overflows.
340  int num_addr = backtrace(addr, kMaxBacktrace);
341  char **symbols = backtrace_symbols(addr, num_addr);
342  string backtrace = "Backtrace (" + StringifyInt(num_addr) +
343  " symbols):\n";
344  for (int i = 0; i < num_addr; ++i)
345  backtrace += string(symbols[i]) + "\n";
346  LogCvmfs(kLogCvmfs, kLogSyslogErr, "%s", backtrace.c_str());
347  LogCvmfs(kLogCvmfs, kLogSyslogErr, "address of g_cvmfs_exports: %p",
348  &g_cvmfs_exports);
349 #endif
350 
351  _exit(1);
352  }
353  }
354 
355  _exit(1);
356 }
357 
358 
367  const SigactionMap &signal_handlers)
368 {
369  SigactionMap old_signal_handlers;
370  SigactionMap::const_iterator i = signal_handlers.begin();
371  SigactionMap::const_iterator iend = signal_handlers.end();
372  for (; i != iend; ++i) {
373  struct sigaction old_signal_handler;
374  if (sigaction(i->first, &i->second, &old_signal_handler) != 0) {
375  PANIC(NULL);
376  }
377  old_signal_handlers[i->first] = old_signal_handler;
378  }
379 
380  return old_signal_handlers;
381 }
382 
383 
388  Pipe pipe_pid;
389  pipe_watchdog_ = new Pipe();
390  pipe_listener_ = new Pipe();
391 
392  pid_t pid;
393  int statloc;
394  int max_fd = sysconf(_SC_OPEN_MAX);
395  assert(max_fd >= 0);
396  switch (pid = fork()) {
397  case -1: PANIC(NULL);
398  case 0:
399  // Double fork to avoid zombie
400  switch (fork()) {
401  case -1: exit(1);
402  case 0: {
403  close(pipe_watchdog_->write_end);
404  Daemonize();
405  // send the watchdog PID to cvmfs
406  pid_t watchdog_pid = getpid();
407  pipe_pid.Write(watchdog_pid);
408  close(pipe_pid.write_end);
409  // Close all unused file descriptors
410  // close also usyslog, only get it back if necessary
411  // string usyslog_save = GetLogMicroSyslog();
412  string debuglog_save = GetLogDebugFile();
413  // SetLogMicroSyslog("");
414  SetLogDebugFile("");
415  // Gracefully close the syslog before closing all fds. The next call
416  // to syslog will reopen it.
417  closelog();
418  // Let's keep stdin, stdout, stderr open at /dev/null (daemonized)
419  // in order to prevent accidental outputs from messing with another
420  // file descriptor
421  for (int fd = 3; fd < max_fd; fd++) {
422  if (fd == pipe_watchdog_->read_end)
423  continue;
424  if (fd == pipe_listener_->write_end)
425  continue;
426  close(fd);
427  }
428  // SetLogMicroSyslog(usyslog_save); // no-op if usyslog not used
429  SetLogDebugFile(debuglog_save); // no-op if debug log not used
430  Supervise();
431  exit(0);
432  }
433  default:
434  exit(0);
435  }
436  default:
437  close(pipe_watchdog_->read_end);
438  close(pipe_listener_->write_end);
439  if (waitpid(pid, &statloc, 0) != pid) PANIC(NULL);
440  if (!WIFEXITED(statloc) || WEXITSTATUS(statloc)) PANIC(NULL);
441  }
442 
443  // retrieve the watchdog PID from the pipe
444  close(pipe_pid.write_end);
445  pipe_pid.Read(&watchdog_pid_);
446  close(pipe_pid.read_end);
447 
448  // lower restrictions for ptrace
449  if (!platform_allow_ptrace(watchdog_pid_)) {
451  "failed to allow ptrace() for watchdog (PID: %d). "
452  "Post crash stacktrace might not work",
453  watchdog_pid_);
454  }
455 
456  // Extra stack for signal handlers
457  int stack_size = kSignalHandlerStacksize; // 2 MB
458  sighandler_stack_.ss_sp = smalloc(stack_size);
459  sighandler_stack_.ss_size = stack_size;
460  sighandler_stack_.ss_flags = 0;
461  if (sigaltstack(&sighandler_stack_, NULL) != 0)
462  PANIC(NULL);
463 
464  // define our crash signal handler
465  struct sigaction sa;
466  memset(&sa, 0, sizeof(sa));
467  sa.sa_sigaction = SendTrace;
468  sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
469  sigfillset(&sa.sa_mask);
470 
471  SigactionMap signal_handlers;
472  signal_handlers[SIGQUIT] = sa;
473  signal_handlers[SIGILL] = sa;
474  signal_handlers[SIGABRT] = sa;
475  signal_handlers[SIGFPE] = sa;
476  signal_handlers[SIGSEGV] = sa;
477  signal_handlers[SIGBUS] = sa;
478  signal_handlers[SIGPIPE] = sa;
479  signal_handlers[SIGXFSZ] = sa;
480  old_signal_handlers_ = SetSignalHandlers(signal_handlers);
481 
482  pipe_terminate_ = new Pipe();
483  int retval =
484  pthread_create(&thread_listener_, NULL, MainWatchdogListener, this);
485  assert(retval == 0);
486 
487  spawned_ = true;
488 }
489 
490 
492  Watchdog *watchdog = static_cast<Watchdog *>(data);
493  LogCvmfs(kLogMonitor, kLogDebug, "starting watchdog listener");
494 
495  struct pollfd watch_fds[2];
496  watch_fds[0].fd = watchdog->pipe_listener_->read_end;
497  watch_fds[0].events = 0; // Only check for POLL[ERR,HUP,NVAL] in revents
498  watch_fds[0].revents = 0;
499  watch_fds[1].fd = watchdog->pipe_terminate_->read_end;
500  watch_fds[1].events = POLLIN | POLLPRI;
501  watch_fds[1].revents = 0;
502  while (true) {
503  int retval = poll(watch_fds, 2, -1);
504  if (retval < 0) {
505  continue;
506  }
507 
508  // Terminate I/O thread
509  if (watch_fds[1].revents)
510  break;
511 
512  if (watch_fds[0].revents) {
513  if ((watch_fds[0].revents & POLLERR) ||
514  (watch_fds[0].revents & POLLHUP) ||
515  (watch_fds[0].revents & POLLNVAL))
516  {
518  "watchdog disappeared, disabling stack trace reporting "
519  "(revents: %d / %d|%d|%d)",
520  watch_fds[0].revents, POLLERR, POLLHUP, POLLNVAL);
521  watchdog->SetSignalHandlers(watchdog->old_signal_handlers_);
522  PANIC(kLogDebug | kLogSyslogErr, "watchdog disappeared, aborting");
523  }
524  PANIC(NULL);
525  }
526  }
527  close(watchdog->pipe_listener_->read_end);
528 
529  LogCvmfs(kLogMonitor, kLogDebug, "stopping watchdog listener");
530  return NULL;
531 }
532 
533 
535  // We want that the reading from the pipe fd fails if the pipe breaks,
536  // instead of receiving a signal
537  signal(SIGPIPE, SIG_IGN);
538 
539  // The watchdog is not supposed to receive signals. If it does, report it.
540  struct sigaction sa;
541  memset(&sa, 0, sizeof(sa));
542  sa.sa_sigaction = ReportSignalAndTerminate;
543  sa.sa_flags = SA_SIGINFO;
544  sigfillset(&sa.sa_mask);
545 
546  SigactionMap signal_handlers;
547  signal_handlers[SIGHUP] = sa;
548  signal_handlers[SIGINT] = sa;
549  signal_handlers[SIGQUIT] = sa;
550  signal_handlers[SIGILL] = sa;
551  signal_handlers[SIGABRT] = sa;
552  signal_handlers[SIGBUS] = sa;
553  signal_handlers[SIGFPE] = sa;
554  signal_handlers[SIGUSR1] = sa;
555  signal_handlers[SIGSEGV] = sa;
556  signal_handlers[SIGUSR2] = sa;
557  signal_handlers[SIGTERM] = sa;
558  signal_handlers[SIGXCPU] = sa;
559  signal_handlers[SIGXFSZ] = sa;
560  SetSignalHandlers(signal_handlers);
561 
562  ControlFlow::Flags control_flow = ControlFlow::kUnknown;
563 
564  if (!pipe_watchdog_->TryRead(&control_flow)) {
565  // Re-activate ┬ÁSyslog, if necessary
567  LogEmergency("watchdog: unexpected termination (" +
568  StringifyInt(control_flow) + ")");
569  if (on_crash_) on_crash_();
570  } else {
571  switch (control_flow) {
572  case ControlFlow::kProduceStacktrace:
573  LogEmergency(ReportStacktrace());
574  if (on_crash_) on_crash_();
575  break;
576 
577  case ControlFlow::kQuit:
578  break;
579 
580  default:
581  // Re-activate ┬ÁSyslog, if necessary
583  LogEmergency("watchdog: unexpected error");
584  break;
585  }
586  }
587 
588  close(pipe_watchdog_->read_end);
589  close(pipe_listener_->write_end);
590 }
591 
592 
593 Watchdog::Watchdog(const string &crash_dump_path)
594  : spawned_(false)
595  , crash_dump_path_(crash_dump_path)
596  , exe_path_(string(platform_getexepath()))
597  , watchdog_pid_(0)
598  , pipe_watchdog_(NULL)
599  , pipe_listener_(NULL)
600  , pipe_terminate_(NULL)
601  , on_crash_(NULL)
602 {
603  int retval = platform_spinlock_init(&lock_handler_, 0);
604  assert(retval == 0);
605  memset(&sighandler_stack_, 0, sizeof(sighandler_stack_));
606 }
607 
608 
610  if (spawned_) {
611  // Reset signal handlers
612  signal(SIGQUIT, SIG_DFL);
613  signal(SIGILL, SIG_DFL);
614  signal(SIGABRT, SIG_DFL);
615  signal(SIGFPE, SIG_DFL);
616  signal(SIGSEGV, SIG_DFL);
617  signal(SIGBUS, SIG_DFL);
618  signal(SIGPIPE, SIG_DFL);
619  signal(SIGXFSZ, SIG_DFL);
620  free(sighandler_stack_.ss_sp);
621  sighandler_stack_.ss_size = 0;
622 
624  pthread_join(thread_listener_, NULL);
626 
628  close(pipe_watchdog_->write_end);
629  }
630 
631  delete pipe_watchdog_;
632  delete pipe_listener_;
633  delete pipe_terminate_;
634 
636  LogCvmfs(kLogMonitor, kLogDebug, "monitor stopped");
637  instance_ = NULL;
638 }
639 
640 
641 
642 namespace monitor {
643 
644 const unsigned kMinOpenFiles = 8192;
645 
646 unsigned GetMaxOpenFiles() {
647  static unsigned max_open_files;
648  static bool already_done = false;
649 
650  /* check number of open files (lazy evaluation) */
651  if (!already_done) {
652  unsigned soft_limit = 0;
653  unsigned hard_limit = 0;
654  GetLimitNoFile(&soft_limit, &hard_limit);
655 
656  if (soft_limit < kMinOpenFiles) {
658  "Warning: current limits for number of open files are "
659  "(%lu/%lu)\n"
660  "CernVM-FS is likely to run out of file descriptors, "
661  "set ulimit -n to at least %lu",
662  soft_limit, hard_limit, kMinOpenFiles);
663  }
664  max_open_files = soft_limit;
665  already_done = true;
666  }
667 
668  return max_open_files;
669 }
670 
671 } // namespace monitor
#define LogCvmfs(source, mask,...)
Definition: logging.h:22
Watchdog(const std::string &crash_dump_path)
Definition: monitor.cc:593
void Supervise()
Definition: monitor.cc:534
std::map< int, struct sigaction > SigactionMap
Definition: monitor.h:36
std::string GenerateStackTrace(pid_t pid)
Definition: monitor.cc:71
std::string ReportStacktrace()
Definition: monitor.cc:248
static void ReportSignalAndTerminate(int sig, siginfo_t *siginfo, void *context)
Definition: monitor.cc:289
SigactionMap old_signal_handlers_
Definition: monitor.h:91
static void SendTrace(int sig, siginfo_t *siginfo, void *context)
Definition: monitor.cc:299
static Watchdog * Create(const std::string &crash_dump_path)
Definition: monitor.cc:60
#define PANIC(...)
Definition: exception.h:27
int read_end
Definition: posix.h:245
int platform_spinlock_init(platform_spinlock *lock, int pshared)
void Daemonize()
Definition: posix.cc:1604
static void * MainWatchdogListener(void *data)
void Spawn()
Definition: monitor.cc:387
void LogEmergency(std::string msg)
Definition: monitor.cc:175
assert((mem||(size==0))&&"Out Of Memory")
void SetLogMicroSyslog(const std::string &filename)
Definition: logging.cc:272
std::string platform_getexepath()
static Watchdog * instance_
Definition: monitor.h:63
bool Write(const T &data)
Definition: posix.h:213
void Close()
Definition: posix.h:207
#define SetLogDebugFile(filename)
bool platform_allow_ptrace(const pid_t pid)
stack_t sighandler_stack_
Definition: monitor.h:90
static pid_t GetPid()
Definition: monitor.cc:161
pthread_t thread_listener_
Definition: monitor.h:87
Pipe * pipe_watchdog_
Definition: monitor.h:82
void GetLimitNoFile(unsigned *soft_limit, unsigned *hard_limit)
Definition: posix.cc:1457
#define GetLogDebugFile()
int platform_spinlock_trylock(platform_spinlock *lock)
int write_end
Definition: posix.h:246
std::string ReadUntilGdbPrompt(int fd_pipe)
Definition: monitor.cc:204
static void * MainWatchdogListener(void *data)
Definition: monitor.cc:491
unsigned GetMaxOpenFiles()
Definition: monitor.cc:646
Pipe * pipe_terminate_
Send the terminate signal to the listener.
Definition: monitor.h:86
SigactionMap SetSignalHandlers(const SigactionMap &signal_handlers)
Definition: monitor.cc:366
void Read(T *data)
Definition: posix.h:230
string StringifyInt(const int64_t value)
Definition: string.cc:78
std::string GetLogMicroSyslog()
Definition: logging.cc:308
bool spawned_
Definition: monitor.h:78
bool ExecuteBinary(int *fd_stdin, int *fd_stdout, int *fd_stderr, const std::string &binary_path, const std::vector< std::string > &argv, const bool double_fork, pid_t *child_pid)
Definition: posix.cc:1635
~Watchdog()
Definition: monitor.cc:609
void RegisterOnCrash(void(*CleanupOnCrash)(void))
Definition: monitor.cc:240
Definition: posix.h:196
bool SwitchCredentials(const uid_t uid, const gid_t gid, const bool temporarily)
Definition: posix.cc:748
Pipe * pipe_listener_
The supervisee makes sure its watchdog does not die.
Definition: monitor.h:84
void SafeSleepMs(const unsigned ms)
Definition: posix.cc:1879
void Block2Nonblock(int filedes)
Definition: posix.cc:642
int platform_spinlock_destroy(platform_spinlock *lock)
const unsigned kMinOpenFiles
Definition: monitor.cc:644
platform_spinlock lock_handler_
Definition: monitor.h:89