CernVM-FS  2.11.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
monitor.cc
Go to the documentation of this file.
1 
11 #include "cvmfs_config.h"
12 #include "monitor.h"
13 
14 #include <errno.h>
15 #include <execinfo.h>
16 #include <poll.h>
17 #include <pthread.h>
18 #include <signal.h>
19 #include <sys/resource.h>
20 #include <sys/types.h>
21 #ifdef __APPLE__
22  #include <sys/ucontext.h>
23 #else
24  #include <ucontext.h>
25 #endif
26 #include <sys/uio.h>
27 #include <sys/wait.h>
28 #include <syslog.h>
29 #include <time.h>
30 #include <unistd.h>
31 
32 #include <cassert>
33 #include <cstdio>
34 #include <cstdlib>
35 #include <cstring>
36 #include <map>
37 #include <set>
38 #include <string>
39 #include <vector>
40 
41 #if defined(CVMFS_FUSE_MODULE)
42 #include "cvmfs.h"
43 #endif
44 #include "util/exception.h"
45 #include "util/logging.h"
46 #include "util/platform.h"
47 #include "util/posix.h"
48 #include "util/smalloc.h"
49 #include "util/string.h"
50 
51 // Used for address offset calculation
52 #if defined(CVMFS_FUSE_MODULE)
53 extern loader::CvmfsExports *g_cvmfs_exports;
54 #endif
55 
56 using namespace std; // NOLINT
57 
59 
60 
61 Watchdog *Watchdog::Create(FnOnCrash on_crash) {
62  assert(instance_ == NULL);
63  instance_ = new Watchdog(on_crash);
64  instance_->Fork();
65  return instance_;
66 }
67 
68 
73 string Watchdog::GenerateStackTrace(pid_t pid) {
74  int retval;
75  string result = "";
76 
77  // re-gain root permissions to allow for ptrace of died cvmfs2 process
78  const bool retrievable = true;
79  if (!SwitchCredentials(0, getgid(), retrievable)) {
80  result += "failed to re-gain root permissions... still give it a try\n";
81  }
82 
83  // run gdb and attach to the dying process
84  int fd_stdin;
85  int fd_stdout;
86  int fd_stderr;
87  vector<string> argv;
88  argv.push_back("-p");
89  argv.push_back(StringifyInt(pid));
90  pid_t gdb_pid = 0;
91  const bool double_fork = false;
92  retval = ExecuteBinary(&fd_stdin,
93  &fd_stdout,
94  &fd_stderr,
95 #ifdef __APPLE__
96  "lldb",
97 #else
98  "gdb",
99 #endif
100  argv,
101  double_fork,
102  &gdb_pid);
103  assert(retval);
104 
105 
106  // Skip the gdb startup output
107  ReadUntilGdbPrompt(fd_stdout);
108 
109  // Send stacktrace command to gdb
110 #ifdef __APPLE__
111  const string gdb_cmd = "bt all\n" "quit\n";
112 #else
113  const string gdb_cmd = "thread apply all bt\n" "quit\n";
114 #endif
115  // The execve can have failed, which can't be detected in ExecuteBinary.
116  // Instead, writing to the pipe will fail.
117  ssize_t nbytes = write(fd_stdin, gdb_cmd.data(), gdb_cmd.length());
118  if ((nbytes < 0) || (static_cast<unsigned>(nbytes) != gdb_cmd.length())) {
119  result += "failed to start gdb/lldb (" + StringifyInt(nbytes) + " bytes "
120  "written, errno " + StringifyInt(errno) + ")\n";
121  return result;
122  }
123 
124  // Read the stack trace from the stdout of our gdb process
125 #ifdef __APPLE__
126  // lldb has one more prompt
127  result += ReadUntilGdbPrompt(fd_stdout);
128 #endif
129  result += ReadUntilGdbPrompt(fd_stdout) + "\n\n";
130 
131  // Check for output on stderr
132  string result_err;
133  Block2Nonblock(fd_stderr);
134  char cbuf;
135  while (read(fd_stderr, &cbuf, 1) == 1)
136  result_err.push_back(cbuf);
137  if (!result_err.empty())
138  result += "\nError output:\n" + result_err + "\n";
139 
140  // Close the connection to the terminated gdb process
141  close(fd_stderr);
142  close(fd_stdout);
143  close(fd_stdin);
144 
145  // Make sure gdb has terminated (wait for it for a short while)
146  unsigned int timeout = 15;
147  int statloc;
148  while (timeout > 0 && waitpid(gdb_pid, &statloc, WNOHANG) != gdb_pid) {
149  --timeout;
150  SafeSleepMs(1000);
151  }
152 
153  // when the timeout expired, gdb probably hangs... we need to kill it
154  if (timeout == 0) {
155  result += "gdb did not exit as expected. sending SIGKILL... ";
156  result += (kill(gdb_pid, SIGKILL) != 0) ? "failed\n" : "okay\n";
157  }
158 
159  return result;
160 }
161 
162 
164  if (instance_ != NULL) {
165  return instance_->watchdog_pid_;
166  }
167  return getpid();
168 }
169 
174 void Watchdog::LogEmergency(string msg) {
175  char ctime_buffer[32];
176 
177  if (!crash_dump_path_.empty()) {
178  FILE *fp = fopen(crash_dump_path_.c_str(), "a");
179  if (fp) {
180  time_t now = time(NULL);
181  msg += "\nTimestamp: " + string(ctime_r(&now, ctime_buffer));
182  if (fwrite(&msg[0], 1, msg.length(), fp) != msg.length()) {
183  msg +=
184  " (failed to report into crash dump file " + crash_dump_path_ + ")";
185  } else {
186  msg += "\n Crash logged also on file: " + crash_dump_path_ + "\n";
187  }
188  fclose(fp);
189  } else {
190  msg += " (failed to open crash dump file " + crash_dump_path_ + ")";
191  }
192  }
193  LogCvmfs(kLogMonitor, kLogSyslogErr, "%s", msg.c_str());
194 }
195 
203 string Watchdog::ReadUntilGdbPrompt(int fd_pipe) {
204 #ifdef __APPLE__
205  static const string gdb_prompt = "(lldb)";
206 #else
207  static const string gdb_prompt = "\n(gdb) ";
208 #endif
209 
210  string result;
211  char mini_buffer;
212  int chars_io;
213  unsigned int ring_buffer_pos = 0;
214 
215  // read from stdout of gdb until gdb prompt occurs --> (gdb)
216  while (1) {
217  chars_io = read(fd_pipe, &mini_buffer, 1);
218 
219  // in case something goes wrong...
220  if (chars_io <= 0) break;
221 
222  result += mini_buffer;
223 
224  // find the gdb_promt in the stdout data
225  if (mini_buffer == gdb_prompt[ring_buffer_pos]) {
226  ++ring_buffer_pos;
227  if (ring_buffer_pos == gdb_prompt.size()) {
228  break;
229  }
230  } else {
231  ring_buffer_pos = 0;
232  }
233  }
234 
235  return result;
236 }
237 
238 
243  CrashData crash_data;
244  if (!pipe_watchdog_->TryRead<CrashData>(&crash_data)) {
245  return "failed to read crash data (" + StringifyInt(errno) + ")";
246  }
247 
248  string debug = "--\n";
249  debug += "Signal: " + StringifyInt(crash_data.signal);
250  debug += ", errno: " + StringifyInt(crash_data.sys_errno);
251  debug += ", version: " + string(VERSION);
252  debug += ", PID: " + StringifyInt(crash_data.pid) + "\n";
253  debug += "Executable path: " + exe_path_ + "\n";
254 
255  debug += GenerateStackTrace(crash_data.pid);
256 
257  // Give the dying process the finishing stroke
258  if (kill(crash_data.pid, SIGKILL) != 0) {
259  debug += "Failed to kill cvmfs client! (";
260  switch (errno) {
261  case EINVAL:
262  debug += "invalid signal";
263  break;
264  case EPERM:
265  debug += "permission denied";
266  break;
267  case ESRCH:
268  debug += "no such process";
269  break;
270  default:
271  debug += "unknown error " + StringifyInt(errno);
272  }
273  debug += ")\n\n";
274  }
275 
276  return debug;
277 }
278 
279 
281  int sig, siginfo_t *siginfo, void * /* context */)
282 {
284  "watchdog: received unexpected signal %d from PID %d / UID %d",
285  sig, siginfo->si_pid, siginfo->si_uid);
286  _exit(1);
287 }
288 
289 
290 void Watchdog::SendTrace(int sig, siginfo_t *siginfo, void *context) {
291  int send_errno = errno;
292  if (platform_spinlock_trylock(&Me()->lock_handler_) != 0) {
293  // Concurrent call, wait for the first one to exit the process
294  while (true) {}
295  }
296 
297  // Set the original signal handler for the raised signal in
298  // SIGQUIT (watchdog process will raise SIGQUIT)
299  (void) sigaction(SIGQUIT, &(Me()->old_signal_handlers_[sig]), NULL);
300 
301  // Inform the watchdog that CernVM-FS crashed
302  if (!Me()->pipe_watchdog_->Write(ControlFlow::kProduceStacktrace)) {
303  _exit(1);
304  }
305 
306  // Send crash information to the watchdog
307  CrashData crash_data;
308  crash_data.signal = sig;
309  crash_data.sys_errno = send_errno;
310  crash_data.pid = getpid();
311  if (!Me()->pipe_watchdog_->Write<CrashData>(crash_data)) {
312  _exit(1);
313  }
314 
315  // Do not die before the stack trace was generated
316  // kill -SIGQUIT <pid> will finish this
317  int counter = 0;
318  while (true) {
319  SafeSleepMs(100);
320  // quit anyway after 30 seconds
321  if (++counter == 300) {
322  LogCvmfs(kLogCvmfs, kLogSyslogErr, "stack trace generation failed");
323  // Last attempt to log something useful
324 #if defined(CVMFS_FUSE_MODULE)
325  LogCvmfs(kLogCvmfs, kLogSyslogErr, "Signal %d, errno %d",
326  sig, send_errno);
327  void *addr[kMaxBacktrace];
328  // Note: this doesn't work due to the signal stack on OS X (it works on
329  // Linux). Since anyway lldb is supposed to produce the backtrace, we
330  // consider it more important to protect cvmfs against stack overflows.
331  int num_addr = backtrace(addr, kMaxBacktrace);
332  char **symbols = backtrace_symbols(addr, num_addr);
333  string backtrace = "Backtrace (" + StringifyInt(num_addr) +
334  " symbols):\n";
335  for (int i = 0; i < num_addr; ++i)
336  backtrace += string(symbols[i]) + "\n";
337  LogCvmfs(kLogCvmfs, kLogSyslogErr, "%s", backtrace.c_str());
338  LogCvmfs(kLogCvmfs, kLogSyslogErr, "address of g_cvmfs_exports: %p",
339  &g_cvmfs_exports);
340 #endif
341 
342  _exit(1);
343  }
344  }
345 
346  _exit(1);
347 }
348 
349 
358  const SigactionMap &signal_handlers)
359 {
360  SigactionMap old_signal_handlers;
361  SigactionMap::const_iterator i = signal_handlers.begin();
362  SigactionMap::const_iterator iend = signal_handlers.end();
363  for (; i != iend; ++i) {
364  struct sigaction old_signal_handler;
365  if (sigaction(i->first, &i->second, &old_signal_handler) != 0) {
366  PANIC(NULL);
367  }
368  old_signal_handlers[i->first] = old_signal_handler;
369  }
370 
371  return old_signal_handlers;
372 }
373 
374 
379  Pipe<kPipeWatchdogPid> pipe_pid;
380  pipe_watchdog_ = new Pipe<kPipeWatchdog>();
381  pipe_listener_ = new Pipe<kPipeWatchdogSupervisor>();
382 
383  pid_t pid;
384  int statloc;
385  switch (pid = fork()) {
386  case -1: PANIC(NULL);
387  case 0:
388  // Double fork to avoid zombie
389  switch (fork()) {
390  case -1: _exit(1);
391  case 0: {
392  pipe_watchdog_->CloseWriteFd();
393  Daemonize();
394  // send the watchdog PID to the supervisee
395  pid_t watchdog_pid = getpid();
396  pipe_pid.Write(watchdog_pid);
397  pipe_pid.CloseWriteFd();
398  // Close all unused file descriptors
399  // close also usyslog, only get it back if necessary
400  // string usyslog_save = GetLogMicroSyslog();
401  string debuglog_save = GetLogDebugFile();
402  SetLogDebugFile("");
403  string usyslog_save = GetLogMicroSyslog();
404  SetLogMicroSyslog("");
405  // Gracefully close the syslog before closing all fds. The next call
406  // to syslog will reopen it.
407  closelog();
408  // Let's keep stdin, stdout, stderr open at /dev/null (daemonized)
409  // in order to prevent accidental outputs from messing with another
410  // file descriptor
411  std::set<int> preserve_fds;
412  preserve_fds.insert(0);
413  preserve_fds.insert(1);
414  preserve_fds.insert(2);
415  preserve_fds.insert(pipe_watchdog_->GetReadFd());
416  preserve_fds.insert(pipe_listener_->GetWriteFd());
417  CloseAllFildes(preserve_fds);
418  SetLogMicroSyslog(usyslog_save); // no-op if usyslog not used
419  SetLogDebugFile(debuglog_save); // no-op if debug log not used
420 
421  if (WaitForSupervisee())
422  Supervise();
423 
424  pipe_watchdog_->CloseReadFd();
425  pipe_listener_->CloseWriteFd();
426  exit(0);
427  }
428  default:
429  _exit(0);
430  }
431  default:
432  pipe_watchdog_->CloseReadFd();
433  pipe_listener_->CloseWriteFd();
434  pipe_pid.CloseWriteFd();
435  if (waitpid(pid, &statloc, 0) != pid) PANIC(NULL);
436  if (!WIFEXITED(statloc) || WEXITSTATUS(statloc)) PANIC(NULL);
437  }
438 
439  // retrieve the watchdog PID from the pipe
440  pipe_pid.Read(&watchdog_pid_);
441  pipe_pid.CloseReadFd();
442 }
443 
444 
446  // We want broken pipes not to raise a signal but handle the error in the
447  // read/write code
448  platform_sighandler_t rv_sig = signal(SIGPIPE, SIG_IGN);
449  assert(rv_sig != SIG_ERR);
450 
451  // The watchdog is not supposed to receive signals. If it does, report it.
452  struct sigaction sa;
453  memset(&sa, 0, sizeof(sa));
454  sa.sa_sigaction = ReportSignalAndTerminate;
455  sa.sa_flags = SA_SIGINFO;
456  sigfillset(&sa.sa_mask);
457 
458  SigactionMap signal_handlers;
459  signal_handlers[SIGHUP] = sa;
460  signal_handlers[SIGINT] = sa;
461  signal_handlers[SIGQUIT] = sa;
462  signal_handlers[SIGILL] = sa;
463  signal_handlers[SIGABRT] = sa;
464  signal_handlers[SIGBUS] = sa;
465  signal_handlers[SIGFPE] = sa;
466  signal_handlers[SIGUSR1] = sa;
467  signal_handlers[SIGSEGV] = sa;
468  signal_handlers[SIGUSR2] = sa;
469  signal_handlers[SIGTERM] = sa;
470  signal_handlers[SIGXCPU] = sa;
471  signal_handlers[SIGXFSZ] = sa;
472  SetSignalHandlers(signal_handlers);
473 
474  ControlFlow::Flags control_flow = ControlFlow::kUnknown;
475 
476  if (!pipe_watchdog_->TryRead(&control_flow)) {
477  LogCvmfs(kLogMonitor, kLogDebug, "supervisee canceled watchdog");
478  return false;
479  }
480 
481  switch (control_flow) {
482  case ControlFlow::kQuit:
483  return false;
484  case ControlFlow::kSupervise:
485  break;
486  default:
487  LogEmergency("Internal error: invalid control flow");
488  return false;
489  }
490 
491  size_t size;
492  pipe_watchdog_->Read(&size);
493  crash_dump_path_.resize(size);
494  if (size > 0) {
495  pipe_watchdog_->Read(&crash_dump_path_[0], size);
496 
497  int retval = chdir(GetParentPath(crash_dump_path_).c_str());
498  if (retval != 0) {
499  LogEmergency(std::string("Cannot change to crash dump directory: ") +
500  crash_dump_path_);
501  return false;
502  }
503  crash_dump_path_ = GetFileName(crash_dump_path_);
504  }
505  return true;
506 }
507 
511 void Watchdog::Spawn(const std::string &crash_dump_path) {
512  // lower restrictions for ptrace
513  if (!platform_allow_ptrace(watchdog_pid_)) {
515  "failed to allow ptrace() for watchdog (PID: %d). "
516  "Post crash stacktrace might not work",
517  watchdog_pid_);
518  }
519 
520  // Extra stack for signal handlers
521  int stack_size = kSignalHandlerStacksize; // 2 MB
522  sighandler_stack_.ss_sp = smalloc(stack_size);
523  sighandler_stack_.ss_size = stack_size;
524  sighandler_stack_.ss_flags = 0;
525  if (sigaltstack(&sighandler_stack_, NULL) != 0)
526  PANIC(NULL);
527 
528  // define our crash signal handler
529  struct sigaction sa;
530  memset(&sa, 0, sizeof(sa));
531  sa.sa_sigaction = SendTrace;
532  sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
533  sigfillset(&sa.sa_mask);
534 
535  SigactionMap signal_handlers;
536  signal_handlers[SIGQUIT] = sa;
537  signal_handlers[SIGILL] = sa;
538  signal_handlers[SIGABRT] = sa;
539  signal_handlers[SIGFPE] = sa;
540  signal_handlers[SIGSEGV] = sa;
541  signal_handlers[SIGBUS] = sa;
542  signal_handlers[SIGPIPE] = sa;
543  signal_handlers[SIGXFSZ] = sa;
544  old_signal_handlers_ = SetSignalHandlers(signal_handlers);
545 
546  pipe_terminate_ = new Pipe<kPipeThreadTerminator>();
547  int retval =
548  pthread_create(&thread_listener_, NULL, MainWatchdogListener, this);
549  assert(retval == 0);
550 
551  pipe_watchdog_->Write(ControlFlow::kSupervise);
552  size_t path_size = crash_dump_path.size();
553  pipe_watchdog_->Write(path_size);
554  if (path_size > 0) {
555  pipe_watchdog_->Write(crash_dump_path.data(), path_size);
556  }
557 
558  spawned_ = true;
559 }
560 
561 
563  Watchdog *watchdog = static_cast<Watchdog *>(data);
564  LogCvmfs(kLogMonitor, kLogDebug, "starting watchdog listener");
565 
566  struct pollfd watch_fds[2];
567  watch_fds[0].fd = watchdog->pipe_listener_->GetReadFd();
568  watch_fds[0].events = 0; // Only check for POLL[ERR,HUP,NVAL] in revents
569  watch_fds[0].revents = 0;
570  watch_fds[1].fd = watchdog->pipe_terminate_->GetReadFd();
571  watch_fds[1].events = POLLIN | POLLPRI;
572  watch_fds[1].revents = 0;
573  while (true) {
574  int retval = poll(watch_fds, 2, -1);
575  if (retval < 0) {
576  continue;
577  }
578 
579  // Terminate I/O thread
580  if (watch_fds[1].revents)
581  break;
582 
583  if (watch_fds[0].revents) {
584  if ((watch_fds[0].revents & POLLERR) ||
585  (watch_fds[0].revents & POLLHUP) ||
586  (watch_fds[0].revents & POLLNVAL))
587  {
589  "watchdog disappeared, disabling stack trace reporting "
590  "(revents: %d / %d|%d|%d)",
591  watch_fds[0].revents, POLLERR, POLLHUP, POLLNVAL);
592  watchdog->SetSignalHandlers(watchdog->old_signal_handlers_);
593  PANIC(kLogDebug | kLogSyslogErr, "watchdog disappeared, aborting");
594  }
595  PANIC(NULL);
596  }
597  }
598 
599  LogCvmfs(kLogMonitor, kLogDebug, "stopping watchdog listener");
600  return NULL;
601 }
602 
603 
605  ControlFlow::Flags control_flow = ControlFlow::kUnknown;
606 
607  if (!pipe_watchdog_->TryRead<ControlFlow::Flags>(&control_flow)) {
608  LogEmergency("watchdog: unexpected termination (" +
609  StringifyInt(control_flow) + ")");
610  if (on_crash_) on_crash_();
611  } else {
612  switch (control_flow) {
613  case ControlFlow::kProduceStacktrace:
614  LogEmergency(ReportStacktrace());
615  if (on_crash_) on_crash_();
616  break;
617 
618  case ControlFlow::kQuit:
619  break;
620 
621  default:
622  LogEmergency("watchdog: unexpected error");
623  break;
624  }
625  }
626 }
627 
628 
629 Watchdog::Watchdog(FnOnCrash on_crash)
630  : spawned_(false)
631  , exe_path_(string(platform_getexepath()))
632  , watchdog_pid_(0)
633  , on_crash_(on_crash)
634 {
635  int retval = platform_spinlock_init(&lock_handler_, 0);
636  assert(retval == 0);
637  memset(&sighandler_stack_, 0, sizeof(sighandler_stack_));
638 }
639 
640 
642  if (spawned_) {
643  // Reset signal handlers
644  signal(SIGQUIT, SIG_DFL);
645  signal(SIGILL, SIG_DFL);
646  signal(SIGABRT, SIG_DFL);
647  signal(SIGFPE, SIG_DFL);
648  signal(SIGSEGV, SIG_DFL);
649  signal(SIGBUS, SIG_DFL);
650  signal(SIGPIPE, SIG_DFL);
651  signal(SIGXFSZ, SIG_DFL);
652  free(sighandler_stack_.ss_sp);
653  sighandler_stack_.ss_size = 0;
654 
656  pthread_join(thread_listener_, NULL);
657  pipe_terminate_->Close();
658  }
659 
661  pipe_watchdog_->CloseWriteFd();
662  pipe_listener_->CloseReadFd();
663 
665  LogCvmfs(kLogMonitor, kLogDebug, "monitor stopped");
666  instance_ = NULL;
667 }
#define LogCvmfs(source, mask,...)
Definition: logging.h:25
void CloseReadFd()
Definition: pipe.h:88
bool Write(const T &data)
Definition: pipe.h:125
void Supervise()
Definition: monitor.cc:604
std::map< int, struct sigaction > SigactionMap
Definition: monitor.h:46
std::string GenerateStackTrace(pid_t pid)
Definition: monitor.cc:73
std::string ReportStacktrace()
Definition: monitor.cc:242
UniquePtr< Pipe< kPipeThreadTerminator > > pipe_terminate_
Send the terminate signal to the listener.
Definition: monitor.h:101
NameString GetFileName(const PathString &path)
Definition: shortstring.cc:29
static void ReportSignalAndTerminate(int sig, siginfo_t *siginfo, void *context)
Definition: monitor.cc:280
SigactionMap old_signal_handlers_
Definition: monitor.h:106
static void SendTrace(int sig, siginfo_t *siginfo, void *context)
Definition: monitor.cc:290
#define PANIC(...)
Definition: exception.h:29
int platform_spinlock_init(platform_spinlock *lock, int pshared)
void Daemonize()
Definition: posix.cc:1625
static void * MainWatchdogListener(void *data)
void CloseWriteFd()
Definition: pipe.h:98
void LogEmergency(std::string msg)
Definition: monitor.cc:174
assert((mem||(size==0))&&"Out Of Memory")
int GetWriteFd() const
Definition: pipe.h:193
void SetLogMicroSyslog(const std::string &filename)
Definition: logging.cc:272
std::string platform_getexepath()
static Watchdog * instance_
Definition: monitor.h:74
#define SetLogDebugFile(filename)
bool platform_allow_ptrace(const pid_t pid)
Definition: pipe.h:45
stack_t sighandler_stack_
Definition: monitor.h:105
static pid_t GetPid()
Definition: monitor.cc:163
static Watchdog * Create(FnOnCrash on_crash)
Definition: monitor.cc:61
pthread_t thread_listener_
Definition: monitor.h:102
Watchdog(FnOnCrash on_crash)
Definition: monitor.cc:629
#define GetLogDebugFile()
int platform_spinlock_trylock(platform_spinlock *lock)
std::string ReadUntilGdbPrompt(int fd_pipe)
Definition: monitor.cc:203
#define platform_sighandler_t
static void * MainWatchdogListener(void *data)
Definition: monitor.cc:562
bool WaitForSupervisee()
Definition: monitor.cc:445
void Fork()
Definition: monitor.cc:378
SigactionMap SetSignalHandlers(const SigactionMap &signal_handlers)
Definition: monitor.cc:357
bool Read(T *data)
Definition: pipe.h:166
string StringifyInt(const int64_t value)
Definition: string.cc:78
UniquePtr< Pipe< kPipeWatchdogSupervisor > > pipe_listener_
The supervisee makes sure its watchdog does not die.
Definition: monitor.h:99
std::string GetLogMicroSyslog()
Definition: logging.cc:308
UniquePtr< Pipe< kPipeWatchdog > > pipe_watchdog_
Definition: monitor.h:97
bool spawned_
Definition: monitor.h:93
bool ExecuteBinary(int *fd_stdin, int *fd_stdout, int *fd_stderr, const std::string &binary_path, const std::vector< std::string > &argv, const bool double_fork, pid_t *child_pid)
Definition: posix.cc:1656
bool CloseAllFildes(const std::set< int > &preserve_fildes)
Definition: posix.cc:1807
~Watchdog()
Definition: monitor.cc:641
bool SwitchCredentials(const uid_t uid, const gid_t gid, const bool temporarily)
Definition: posix.cc:758
PathString GetParentPath(const PathString &path)
Definition: shortstring.cc:15
void SafeSleepMs(const unsigned ms)
Definition: posix.cc:1969
void Spawn(const std::string &crash_dump_path)
Definition: monitor.cc:511
void Block2Nonblock(int filedes)
Definition: posix.cc:644
int platform_spinlock_destroy(platform_spinlock *lock)
platform_spinlock lock_handler_
Definition: monitor.h:104
static void size_t size
Definition: smalloc.h:54