gdb/infrun.c - gdb

Global variables defined

Data types defined

Functions defined

Macros defined

Source code

  1. /* Target-struct-independent code to start (run) and stop an inferior
  2.    process.

  3.    Copyright (C) 1986-2015 Free Software Foundation, Inc.

  4.    This file is part of GDB.

  5.    This program is free software; you can redistribute it and/or modify
  6.    it under the terms of the GNU General Public License as published by
  7.    the Free Software Foundation; either version 3 of the License, or
  8.    (at your option) any later version.

  9.    This program is distributed in the hope that it will be useful,
  10.    but WITHOUT ANY WARRANTY; without even the implied warranty of
  11.    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12.    GNU General Public License for more details.

  13.    You should have received a copy of the GNU General Public License
  14.    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */

  15. #include "defs.h"
  16. #include "infrun.h"
  17. #include <ctype.h>
  18. #include "symtab.h"
  19. #include "frame.h"
  20. #include "inferior.h"
  21. #include "breakpoint.h"
  22. #include "gdb_wait.h"
  23. #include "gdbcore.h"
  24. #include "gdbcmd.h"
  25. #include "cli/cli-script.h"
  26. #include "target.h"
  27. #include "gdbthread.h"
  28. #include "annotate.h"
  29. #include "symfile.h"
  30. #include "top.h"
  31. #include <signal.h>
  32. #include "inf-loop.h"
  33. #include "regcache.h"
  34. #include "value.h"
  35. #include "observer.h"
  36. #include "language.h"
  37. #include "solib.h"
  38. #include "main.h"
  39. #include "dictionary.h"
  40. #include "block.h"
  41. #include "mi/mi-common.h"
  42. #include "event-top.h"
  43. #include "record.h"
  44. #include "record-full.h"
  45. #include "inline-frame.h"
  46. #include "jit.h"
  47. #include "tracepoint.h"
  48. #include "continuations.h"
  49. #include "interps.h"
  50. #include "skip.h"
  51. #include "probe.h"
  52. #include "objfiles.h"
  53. #include "completer.h"
  54. #include "target-descriptions.h"
  55. #include "target-dcache.h"
  56. #include "terminal.h"

  57. /* Prototypes for local functions */

  58. static void signals_info (char *, int);

  59. static void handle_command (char *, int);

  60. static void sig_print_info (enum gdb_signal);

  61. static void sig_print_header (void);

  62. static void resume_cleanups (void *);

  63. static int hook_stop_stub (void *);

  64. static int restore_selected_frame (void *);

  65. static int follow_fork (void);

  66. static int follow_fork_inferior (int follow_child, int detach_fork);

  67. static void follow_inferior_reset_breakpoints (void);

  68. static void set_schedlock_func (char *args, int from_tty,
  69.                                 struct cmd_list_element *c);

  70. static int currently_stepping (struct thread_info *tp);

  71. static void xdb_handle_command (char *args, int from_tty);

  72. void _initialize_infrun (void);

  73. void nullify_last_target_wait_ptid (void);

  74. static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);

  75. static void insert_step_resume_breakpoint_at_caller (struct frame_info *);

  76. static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);

  77. /* When set, stop the 'step' command if we enter a function which has
  78.    no line number information.  The normal behavior is that we step
  79.    over such function.  */
  80. int step_stop_if_no_debug = 0;
  81. static void
  82. show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
  83.                             struct cmd_list_element *c, const char *value)
  84. {
  85.   fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
  86. }

  87. /* In asynchronous mode, but simulating synchronous execution.  */

  88. int sync_execution = 0;

  89. /* proceed and normal_stop use this to notify the user when the
  90.    inferior stopped in a different thread than it had been running
  91.    in.  */

  92. static ptid_t previous_inferior_ptid;

  93. /* If set (default for legacy reasons), when following a fork, GDB
  94.    will detach from one of the fork branches, child or parent.
  95.    Exactly which branch is detached depends on 'set follow-fork-mode'
  96.    setting.  */

  97. static int detach_fork = 1;

  98. int debug_displaced = 0;
  99. static void
  100. show_debug_displaced (struct ui_file *file, int from_tty,
  101.                       struct cmd_list_element *c, const char *value)
  102. {
  103.   fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
  104. }

  105. unsigned int debug_infrun = 0;
  106. static void
  107. show_debug_infrun (struct ui_file *file, int from_tty,
  108.                    struct cmd_list_element *c, const char *value)
  109. {
  110.   fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
  111. }


  112. /* Support for disabling address space randomization.  */

  113. int disable_randomization = 1;

  114. static void
  115. show_disable_randomization (struct ui_file *file, int from_tty,
  116.                             struct cmd_list_element *c, const char *value)
  117. {
  118.   if (target_supports_disable_randomization ())
  119.     fprintf_filtered (file,
  120.                       _("Disabling randomization of debuggee's "
  121.                         "virtual address space is %s.\n"),
  122.                       value);
  123.   else
  124.     fputs_filtered (_("Disabling randomization of debuggee's "
  125.                       "virtual address space is unsupported on\n"
  126.                       "this platform.\n"), file);
  127. }

  128. static void
  129. set_disable_randomization (char *args, int from_tty,
  130.                            struct cmd_list_element *c)
  131. {
  132.   if (!target_supports_disable_randomization ())
  133.     error (_("Disabling randomization of debuggee's "
  134.              "virtual address space is unsupported on\n"
  135.              "this platform."));
  136. }

  137. /* User interface for non-stop mode.  */

  138. int non_stop = 0;
  139. static int non_stop_1 = 0;

  140. static void
  141. set_non_stop (char *args, int from_tty,
  142.               struct cmd_list_element *c)
  143. {
  144.   if (target_has_execution)
  145.     {
  146.       non_stop_1 = non_stop;
  147.       error (_("Cannot change this setting while the inferior is running."));
  148.     }

  149.   non_stop = non_stop_1;
  150. }

  151. static void
  152. show_non_stop (struct ui_file *file, int from_tty,
  153.                struct cmd_list_element *c, const char *value)
  154. {
  155.   fprintf_filtered (file,
  156.                     _("Controlling the inferior in non-stop mode is %s.\n"),
  157.                     value);
  158. }

  159. /* "Observer mode" is somewhat like a more extreme version of
  160.    non-stop, in which all GDB operations that might affect the
  161.    target's execution have been disabled.  */

  162. int observer_mode = 0;
  163. static int observer_mode_1 = 0;

  164. static void
  165. set_observer_mode (char *args, int from_tty,
  166.                    struct cmd_list_element *c)
  167. {
  168.   if (target_has_execution)
  169.     {
  170.       observer_mode_1 = observer_mode;
  171.       error (_("Cannot change this setting while the inferior is running."));
  172.     }

  173.   observer_mode = observer_mode_1;

  174.   may_write_registers = !observer_mode;
  175.   may_write_memory = !observer_mode;
  176.   may_insert_breakpoints = !observer_mode;
  177.   may_insert_tracepoints = !observer_mode;
  178.   /* We can insert fast tracepoints in or out of observer mode,
  179.      but enable them if we're going into this mode.  */
  180.   if (observer_mode)
  181.     may_insert_fast_tracepoints = 1;
  182.   may_stop = !observer_mode;
  183.   update_target_permissions ();

  184.   /* Going *into* observer mode we must force non-stop, then
  185.      going out we leave it that way.  */
  186.   if (observer_mode)
  187.     {
  188.       pagination_enabled = 0;
  189.       non_stop = non_stop_1 = 1;
  190.     }

  191.   if (from_tty)
  192.     printf_filtered (_("Observer mode is now %s.\n"),
  193.                      (observer_mode ? "on" : "off"));
  194. }

  195. static void
  196. show_observer_mode (struct ui_file *file, int from_tty,
  197.                     struct cmd_list_element *c, const char *value)
  198. {
  199.   fprintf_filtered (file, _("Observer mode is %s.\n"), value);
  200. }

  201. /* This updates the value of observer mode based on changes in
  202.    permissions.  Note that we are deliberately ignoring the values of
  203.    may-write-registers and may-write-memory, since the user may have
  204.    reason to enable these during a session, for instance to turn on a
  205.    debugging-related global.  */

  206. void
  207. update_observer_mode (void)
  208. {
  209.   int newval;

  210.   newval = (!may_insert_breakpoints
  211.             && !may_insert_tracepoints
  212.             && may_insert_fast_tracepoints
  213.             && !may_stop
  214.             && non_stop);

  215.   /* Let the user know if things change.  */
  216.   if (newval != observer_mode)
  217.     printf_filtered (_("Observer mode is now %s.\n"),
  218.                      (newval ? "on" : "off"));

  219.   observer_mode = observer_mode_1 = newval;
  220. }

  221. /* Tables of how to react to signals; the user sets them.  */

  222. static unsigned char *signal_stop;
  223. static unsigned char *signal_print;
  224. static unsigned char *signal_program;

  225. /* Table of signals that are registered with "catch signal".  A
  226.    non-zero entry indicates that the signal is caught by some "catch
  227.    signal" command.  This has size GDB_SIGNAL_LAST, to accommodate all
  228.    signals.  */
  229. static unsigned char *signal_catch;

  230. /* Table of signals that the target may silently handle.
  231.    This is automatically determined from the flags above,
  232.    and simply cached here.  */
  233. static unsigned char *signal_pass;

  234. #define SET_SIGS(nsigs,sigs,flags) \
  235.   do { \
  236.     int signum = (nsigs); \
  237.     while (signum-- > 0) \
  238.       if ((sigs)[signum]) \
  239.         (flags)[signum] = 1; \
  240.   } while (0)

  241. #define UNSET_SIGS(nsigs,sigs,flags) \
  242.   do { \
  243.     int signum = (nsigs); \
  244.     while (signum-- > 0) \
  245.       if ((sigs)[signum]) \
  246.         (flags)[signum] = 0; \
  247.   } while (0)

  248. /* Update the target's copy of SIGNAL_PROGRAM.  The sole purpose of
  249.    this function is to avoid exporting `signal_program'.  */

  250. void
  251. update_signals_program_target (void)
  252. {
  253.   target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
  254. }

  255. /* Value to pass to target_resume() to cause all threads to resume.  */

  256. #define RESUME_ALL minus_one_ptid

  257. /* Command list pointer for the "stop" placeholder.  */

  258. static struct cmd_list_element *stop_command;

  259. /* Function inferior was in as of last step command.  */

  260. static struct symbol *step_start_function;

  261. /* Nonzero if we want to give control to the user when we're notified
  262.    of shared library events by the dynamic linker.  */
  263. int stop_on_solib_events;

  264. /* Enable or disable optional shared library event breakpoints
  265.    as appropriate when the above flag is changed.  */

  266. static void
  267. set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
  268. {
  269.   update_solib_breakpoints ();
  270. }

  271. static void
  272. show_stop_on_solib_events (struct ui_file *file, int from_tty,
  273.                            struct cmd_list_element *c, const char *value)
  274. {
  275.   fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
  276.                     value);
  277. }

  278. /* Nonzero means expecting a trace trap
  279.    and should stop the inferior and return silently when it happens.  */

  280. int stop_after_trap;

  281. /* Save register contents here when executing a "finish" command or are
  282.    about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
  283.    Thus this contains the return value from the called function (assuming
  284.    values are returned in a register).  */

  285. struct regcache *stop_registers;

  286. /* Nonzero after stop if current stack frame should be printed.  */

  287. static int stop_print_frame;

  288. /* This is a cached copy of the pid/waitstatus of the last event
  289.    returned by target_wait()/deprecated_target_wait_hook().  This
  290.    information is returned by get_last_target_status().  */
  291. static ptid_t target_last_wait_ptid;
  292. static struct target_waitstatus target_last_waitstatus;

  293. static void context_switch (ptid_t ptid);

  294. void init_thread_stepping_state (struct thread_info *tss);

  295. static const char follow_fork_mode_child[] = "child";
  296. static const char follow_fork_mode_parent[] = "parent";

  297. static const char *const follow_fork_mode_kind_names[] = {
  298.   follow_fork_mode_child,
  299.   follow_fork_mode_parent,
  300.   NULL
  301. };

  302. static const char *follow_fork_mode_string = follow_fork_mode_parent;
  303. static void
  304. show_follow_fork_mode_string (struct ui_file *file, int from_tty,
  305.                               struct cmd_list_element *c, const char *value)
  306. {
  307.   fprintf_filtered (file,
  308.                     _("Debugger response to a program "
  309.                       "call of fork or vfork is \"%s\".\n"),
  310.                     value);
  311. }


  312. /* Handle changes to the inferior list based on the type of fork,
  313.    which process is being followed, and whether the other process
  314.    should be detached.  On entry inferior_ptid must be the ptid of
  315.    the fork parent.  At return inferior_ptid is the ptid of the
  316.    followed inferior.  */

  317. static int
  318. follow_fork_inferior (int follow_child, int detach_fork)
  319. {
  320.   int has_vforked;
  321.   int parent_pid, child_pid;

  322.   has_vforked = (inferior_thread ()->pending_follow.kind
  323.                  == TARGET_WAITKIND_VFORKED);
  324.   parent_pid = ptid_get_lwp (inferior_ptid);
  325.   if (parent_pid == 0)
  326.     parent_pid = ptid_get_pid (inferior_ptid);
  327.   child_pid
  328.     = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);

  329.   if (has_vforked
  330.       && !non_stop /* Non-stop always resumes both branches.  */
  331.       && (!target_is_async_p () || sync_execution)
  332.       && !(follow_child || detach_fork || sched_multi))
  333.     {
  334.       /* The parent stays blocked inside the vfork syscall until the
  335.          child execs or exits.  If we don't let the child run, then
  336.          the parent stays blocked.  If we're telling the parent to run
  337.          in the foreground, the user will not be able to ctrl-c to get
  338.          back the terminal, effectively hanging the debug session.  */
  339.       fprintf_filtered (gdb_stderr, _("\
  340. Can not resume the parent process over vfork in the foreground while\n\
  341. holding the child stopped.  Try \"set detach-on-fork\" or \
  342. \"set schedule-multiple\".\n"));
  343.       /* FIXME output string > 80 columns.  */
  344.       return 1;
  345.     }

  346.   if (!follow_child)
  347.     {
  348.       /* Detach new forked process?  */
  349.       if (detach_fork)
  350.         {
  351.           struct cleanup *old_chain;

  352.           /* Before detaching from the child, remove all breakpoints
  353.              from it.  If we forked, then this has already been taken
  354.              care of by infrun.c.  If we vforked however, any
  355.              breakpoint inserted in the parent is visible in the
  356.              child, even those added while stopped in a vfork
  357.              catchpoint.  This will remove the breakpoints from the
  358.              parent also, but they'll be reinserted below.  */
  359.           if (has_vforked)
  360.             {
  361.               /* Keep breakpoints list in sync.  */
  362.               remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
  363.             }

  364.           if (info_verbose || debug_infrun)
  365.             {
  366.               target_terminal_ours_for_output ();
  367.               fprintf_filtered (gdb_stdlog,
  368.                                 _("Detaching after %s from "
  369.                                   "child process %d.\n"),
  370.                                 has_vforked ? "vfork" : "fork",
  371.                                 child_pid);
  372.             }
  373.         }
  374.       else
  375.         {
  376.           struct inferior *parent_inf, *child_inf;
  377.           struct cleanup *old_chain;

  378.           /* Add process to GDB's tables.  */
  379.           child_inf = add_inferior (child_pid);

  380.           parent_inf = current_inferior ();
  381.           child_inf->attach_flag = parent_inf->attach_flag;
  382.           copy_terminal_info (child_inf, parent_inf);
  383.           child_inf->gdbarch = parent_inf->gdbarch;
  384.           copy_inferior_target_desc_info (child_inf, parent_inf);

  385.           old_chain = save_inferior_ptid ();
  386.           save_current_program_space ();

  387.           inferior_ptid = ptid_build (child_pid, child_pid, 0);
  388.           add_thread (inferior_ptid);
  389.           child_inf->symfile_flags = SYMFILE_NO_READ;

  390.           /* If this is a vfork child, then the address-space is
  391.              shared with the parent.  */
  392.           if (has_vforked)
  393.             {
  394.               child_inf->pspace = parent_inf->pspace;
  395.               child_inf->aspace = parent_inf->aspace;

  396.               /* The parent will be frozen until the child is done
  397.                  with the shared region.  Keep track of the
  398.                  parent.  */
  399.               child_inf->vfork_parent = parent_inf;
  400.               child_inf->pending_detach = 0;
  401.               parent_inf->vfork_child = child_inf;
  402.               parent_inf->pending_detach = 0;
  403.             }
  404.           else
  405.             {
  406.               child_inf->aspace = new_address_space ();
  407.               child_inf->pspace = add_program_space (child_inf->aspace);
  408.               child_inf->removable = 1;
  409.               set_current_program_space (child_inf->pspace);
  410.               clone_program_space (child_inf->pspace, parent_inf->pspace);

  411.               /* Let the shared library layer (e.g., solib-svr4) learn
  412.                  about this new process, relocate the cloned exec, pull
  413.                  in shared libraries, and install the solib event
  414.                  breakpoint.  If a "cloned-VM" event was propagated
  415.                  better throughout the core, this wouldn't be
  416.                  required.  */
  417.               solib_create_inferior_hook (0);
  418.             }

  419.           do_cleanups (old_chain);
  420.         }

  421.       if (has_vforked)
  422.         {
  423.           struct inferior *parent_inf;

  424.           parent_inf = current_inferior ();

  425.           /* If we detached from the child, then we have to be careful
  426.              to not insert breakpoints in the parent until the child
  427.              is done with the shared memory region.  However, if we're
  428.              staying attached to the child, then we can and should
  429.              insert breakpoints, so that we can debug it.  A
  430.              subsequent child exec or exit is enough to know when does
  431.              the child stops using the parent's address space.  */
  432.           parent_inf->waiting_for_vfork_done = detach_fork;
  433.           parent_inf->pspace->breakpoints_not_allowed = detach_fork;
  434.         }
  435.     }
  436.   else
  437.     {
  438.       /* Follow the child.  */
  439.       struct inferior *parent_inf, *child_inf;
  440.       struct program_space *parent_pspace;

  441.       if (info_verbose || debug_infrun)
  442.         {
  443.           target_terminal_ours_for_output ();
  444.           fprintf_filtered (gdb_stdlog,
  445.                             _("Attaching after process %d "
  446.                               "%s to child process %d.\n"),
  447.                             parent_pid,
  448.                             has_vforked ? "vfork" : "fork",
  449.                             child_pid);
  450.         }

  451.       /* Add the new inferior first, so that the target_detach below
  452.          doesn't unpush the target.  */

  453.       child_inf = add_inferior (child_pid);

  454.       parent_inf = current_inferior ();
  455.       child_inf->attach_flag = parent_inf->attach_flag;
  456.       copy_terminal_info (child_inf, parent_inf);
  457.       child_inf->gdbarch = parent_inf->gdbarch;
  458.       copy_inferior_target_desc_info (child_inf, parent_inf);

  459.       parent_pspace = parent_inf->pspace;

  460.       /* If we're vforking, we want to hold on to the parent until the
  461.          child exits or execs.  At child exec or exit time we can
  462.          remove the old breakpoints from the parent and detach or
  463.          resume debugging it.  Otherwise, detach the parent now; we'll
  464.          want to reuse it's program/address spaces, but we can't set
  465.          them to the child before removing breakpoints from the
  466.          parent, otherwise, the breakpoints module could decide to
  467.          remove breakpoints from the wrong process (since they'd be
  468.          assigned to the same address space).  */

  469.       if (has_vforked)
  470.         {
  471.           gdb_assert (child_inf->vfork_parent == NULL);
  472.           gdb_assert (parent_inf->vfork_child == NULL);
  473.           child_inf->vfork_parent = parent_inf;
  474.           child_inf->pending_detach = 0;
  475.           parent_inf->vfork_child = child_inf;
  476.           parent_inf->pending_detach = detach_fork;
  477.           parent_inf->waiting_for_vfork_done = 0;
  478.         }
  479.       else if (detach_fork)
  480.         {
  481.           if (info_verbose || debug_infrun)
  482.             {
  483.               target_terminal_ours_for_output ();
  484.               fprintf_filtered (gdb_stdlog,
  485.                                 _("Detaching after fork from "
  486.                                   "child process %d.\n"),
  487.                                 child_pid);
  488.             }

  489.           target_detach (NULL, 0);
  490.         }

  491.       /* Note that the detach above makes PARENT_INF dangling.  */

  492.       /* Add the child thread to the appropriate lists, and switch to
  493.          this new thread, before cloning the program space, and
  494.          informing the solib layer about this new process.  */

  495.       inferior_ptid = ptid_build (child_pid, child_pid, 0);
  496.       add_thread (inferior_ptid);

  497.       /* If this is a vfork child, then the address-space is shared
  498.          with the parent.  If we detached from the parent, then we can
  499.          reuse the parent's program/address spaces.  */
  500.       if (has_vforked || detach_fork)
  501.         {
  502.           child_inf->pspace = parent_pspace;
  503.           child_inf->aspace = child_inf->pspace->aspace;
  504.         }
  505.       else
  506.         {
  507.           child_inf->aspace = new_address_space ();
  508.           child_inf->pspace = add_program_space (child_inf->aspace);
  509.           child_inf->removable = 1;
  510.           child_inf->symfile_flags = SYMFILE_NO_READ;
  511.           set_current_program_space (child_inf->pspace);
  512.           clone_program_space (child_inf->pspace, parent_pspace);

  513.           /* Let the shared library layer (e.g., solib-svr4) learn
  514.              about this new process, relocate the cloned exec, pull in
  515.              shared libraries, and install the solib event breakpoint.
  516.              If a "cloned-VM" event was propagated better throughout
  517.              the core, this wouldn't be required.  */
  518.           solib_create_inferior_hook (0);
  519.         }
  520.     }

  521.   return target_follow_fork (follow_child, detach_fork);
  522. }

  523. /* Tell the target to follow the fork we're stopped at.  Returns true
  524.    if the inferior should be resumed; false, if the target for some
  525.    reason decided it's best not to resume.  */

  526. static int
  527. follow_fork (void)
  528. {
  529.   int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
  530.   int should_resume = 1;
  531.   struct thread_info *tp;

  532.   /* Copy user stepping state to the new inferior threadFIXME: the
  533.      followed fork child thread should have a copy of most of the
  534.      parent thread structure's run control related fields, not just these.
  535.      Initialized to avoid "may be used uninitialized" warnings from gcc.  */
  536.   struct breakpoint *step_resume_breakpoint = NULL;
  537.   struct breakpoint *exception_resume_breakpoint = NULL;
  538.   CORE_ADDR step_range_start = 0;
  539.   CORE_ADDR step_range_end = 0;
  540.   struct frame_id step_frame_id = { 0 };
  541.   struct interp *command_interp = NULL;

  542.   if (!non_stop)
  543.     {
  544.       ptid_t wait_ptid;
  545.       struct target_waitstatus wait_status;

  546.       /* Get the last target status returned by target_wait().  */
  547.       get_last_target_status (&wait_ptid, &wait_status);

  548.       /* If not stopped at a fork event, then there's nothing else to
  549.          do.  */
  550.       if (wait_status.kind != TARGET_WAITKIND_FORKED
  551.           && wait_status.kind != TARGET_WAITKIND_VFORKED)
  552.         return 1;

  553.       /* Check if we switched over from WAIT_PTID, since the event was
  554.          reported.  */
  555.       if (!ptid_equal (wait_ptid, minus_one_ptid)
  556.           && !ptid_equal (inferior_ptid, wait_ptid))
  557.         {
  558.           /* We did.  Switch back to WAIT_PTID thread, to tell the
  559.              target to follow it (in either direction).  We'll
  560.              afterwards refuse to resume, and inform the user what
  561.              happened.  */
  562.           switch_to_thread (wait_ptid);
  563.           should_resume = 0;
  564.         }
  565.     }

  566.   tp = inferior_thread ();

  567.   /* If there were any forks/vforks that were caught and are now to be
  568.      followed, then do so now.  */
  569.   switch (tp->pending_follow.kind)
  570.     {
  571.     case TARGET_WAITKIND_FORKED:
  572.     case TARGET_WAITKIND_VFORKED:
  573.       {
  574.         ptid_t parent, child;

  575.         /* If the user did a next/step, etc, over a fork call,
  576.            preserve the stepping state in the fork child.  */
  577.         if (follow_child && should_resume)
  578.           {
  579.             step_resume_breakpoint = clone_momentary_breakpoint
  580.                                          (tp->control.step_resume_breakpoint);
  581.             step_range_start = tp->control.step_range_start;
  582.             step_range_end = tp->control.step_range_end;
  583.             step_frame_id = tp->control.step_frame_id;
  584.             exception_resume_breakpoint
  585.               = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
  586.             command_interp = tp->control.command_interp;

  587.             /* For now, delete the parent's sr breakpoint, otherwise,
  588.                parent/child sr breakpoints are considered duplicates,
  589.                and the child version will not be installed.  Remove
  590.                this when the breakpoints module becomes aware of
  591.                inferiors and address spaces.  */
  592.             delete_step_resume_breakpoint (tp);
  593.             tp->control.step_range_start = 0;
  594.             tp->control.step_range_end = 0;
  595.             tp->control.step_frame_id = null_frame_id;
  596.             delete_exception_resume_breakpoint (tp);
  597.             tp->control.command_interp = NULL;
  598.           }

  599.         parent = inferior_ptid;
  600.         child = tp->pending_follow.value.related_pid;

  601.         /* Set up inferior(s) as specified by the caller, and tell the
  602.            target to do whatever is necessary to follow either parent
  603.            or child.  */
  604.         if (follow_fork_inferior (follow_child, detach_fork))
  605.           {
  606.             /* Target refused to follow, or there's some other reason
  607.                we shouldn't resume.  */
  608.             should_resume = 0;
  609.           }
  610.         else
  611.           {
  612.             /* This pending follow fork event is now handled, one way
  613.                or another.  The previous selected thread may be gone
  614.                from the lists by now, but if it is still around, need
  615.                to clear the pending follow request.  */
  616.             tp = find_thread_ptid (parent);
  617.             if (tp)
  618.               tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;

  619.             /* This makes sure we don't try to apply the "Switched
  620.                over from WAIT_PID" logic above.  */
  621.             nullify_last_target_wait_ptid ();

  622.             /* If we followed the child, switch to it...  */
  623.             if (follow_child)
  624.               {
  625.                 switch_to_thread (child);

  626.                 /* ... and preserve the stepping state, in case the
  627.                    user was stepping over the fork call.  */
  628.                 if (should_resume)
  629.                   {
  630.                     tp = inferior_thread ();
  631.                     tp->control.step_resume_breakpoint
  632.                       = step_resume_breakpoint;
  633.                     tp->control.step_range_start = step_range_start;
  634.                     tp->control.step_range_end = step_range_end;
  635.                     tp->control.step_frame_id = step_frame_id;
  636.                     tp->control.exception_resume_breakpoint
  637.                       = exception_resume_breakpoint;
  638.                     tp->control.command_interp = command_interp;
  639.                   }
  640.                 else
  641.                   {
  642.                     /* If we get here, it was because we're trying to
  643.                        resume from a fork catchpoint, but, the user
  644.                        has switched threads away from the thread that
  645.                        forked.  In that case, the resume command
  646.                        issued is most likely not applicable to the
  647.                        child, so just warn, and refuse to resume.  */
  648.                     warning (_("Not resuming: switched threads "
  649.                                "before following fork child.\n"));
  650.                   }

  651.                 /* Reset breakpoints in the child as appropriate.  */
  652.                 follow_inferior_reset_breakpoints ();
  653.               }
  654.             else
  655.               switch_to_thread (parent);
  656.           }
  657.       }
  658.       break;
  659.     case TARGET_WAITKIND_SPURIOUS:
  660.       /* Nothing to follow.  */
  661.       break;
  662.     default:
  663.       internal_error (__FILE__, __LINE__,
  664.                       "Unexpected pending_follow.kind %d\n",
  665.                       tp->pending_follow.kind);
  666.       break;
  667.     }

  668.   return should_resume;
  669. }

  670. static void
  671. follow_inferior_reset_breakpoints (void)
  672. {
  673.   struct thread_info *tp = inferior_thread ();

  674.   /* Was there a step_resume breakpoint?  (There was if the user
  675.      did a "next" at the fork() call.)  If so, explicitly reset its
  676.      thread number.  Cloned step_resume breakpoints are disabled on
  677.      creation, so enable it here now that it is associated with the
  678.      correct thread.

  679.      step_resumes are a form of bp that are made to be per-thread.
  680.      Since we created the step_resume bp when the parent process
  681.      was being debugged, and now are switching to the child process,
  682.      from the breakpoint package's viewpoint, that's a switch of
  683.      "threads".  We must update the bp's notion of which thread
  684.      it is for, or it'll be ignored when it triggers.  */

  685.   if (tp->control.step_resume_breakpoint)
  686.     {
  687.       breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
  688.       tp->control.step_resume_breakpoint->loc->enabled = 1;
  689.     }

  690.   /* Treat exception_resume breakpoints like step_resume breakpoints.  */
  691.   if (tp->control.exception_resume_breakpoint)
  692.     {
  693.       breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
  694.       tp->control.exception_resume_breakpoint->loc->enabled = 1;
  695.     }

  696.   /* Reinsert all breakpoints in the child.  The user may have set
  697.      breakpoints after catching the fork, in which case those
  698.      were never set in the child, but only in the parent.  This makes
  699.      sure the inserted breakpoints match the breakpoint list.  */

  700.   breakpoint_re_set ();
  701.   insert_breakpoints ();
  702. }

  703. /* The child has exited or execed: resume threads of the parent the
  704.    user wanted to be executing.  */

  705. static int
  706. proceed_after_vfork_done (struct thread_info *thread,
  707.                           void *arg)
  708. {
  709.   int pid = * (int *) arg;

  710.   if (ptid_get_pid (thread->ptid) == pid
  711.       && is_running (thread->ptid)
  712.       && !is_executing (thread->ptid)
  713.       && !thread->stop_requested
  714.       && thread->suspend.stop_signal == GDB_SIGNAL_0)
  715.     {
  716.       if (debug_infrun)
  717.         fprintf_unfiltered (gdb_stdlog,
  718.                             "infrun: resuming vfork parent thread %s\n",
  719.                             target_pid_to_str (thread->ptid));

  720.       switch_to_thread (thread->ptid);
  721.       clear_proceed_status (0);
  722.       proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
  723.     }

  724.   return 0;
  725. }

  726. /* Called whenever we notice an exec or exit event, to handle
  727.    detaching or resuming a vfork parent.  */

  728. static void
  729. handle_vfork_child_exec_or_exit (int exec)
  730. {
  731.   struct inferior *inf = current_inferior ();

  732.   if (inf->vfork_parent)
  733.     {
  734.       int resume_parent = -1;

  735.       /* This exec or exit marks the end of the shared memory region
  736.          between the parent and the child.  If the user wanted to
  737.          detach from the parent, now is the time.  */

  738.       if (inf->vfork_parent->pending_detach)
  739.         {
  740.           struct thread_info *tp;
  741.           struct cleanup *old_chain;
  742.           struct program_space *pspace;
  743.           struct address_space *aspace;

  744.           /* follow-fork child, detach-on-fork on.  */

  745.           inf->vfork_parent->pending_detach = 0;

  746.           if (!exec)
  747.             {
  748.               /* If we're handling a child exit, then inferior_ptid
  749.                  points at the inferior's pid, not to a thread.  */
  750.               old_chain = save_inferior_ptid ();
  751.               save_current_program_space ();
  752.               save_current_inferior ();
  753.             }
  754.           else
  755.             old_chain = save_current_space_and_thread ();

  756.           /* We're letting loose of the parent.  */
  757.           tp = any_live_thread_of_process (inf->vfork_parent->pid);
  758.           switch_to_thread (tp->ptid);

  759.           /* We're about to detach from the parent, which implicitly
  760.              removes breakpoints from its address space.  There's a
  761.              catch here: we want to reuse the spaces for the child,
  762.              but, parent/child are still sharing the pspace at this
  763.              point, although the exec in reality makes the kernel give
  764.              the child a fresh set of new pages.  The problem here is
  765.              that the breakpoints module being unaware of this, would
  766.              likely chose the child process to write to the parent
  767.              address space.  Swapping the child temporarily away from
  768.              the spaces has the desired effect.  Yes, this is "sort
  769.              of" a hack.  */

  770.           pspace = inf->pspace;
  771.           aspace = inf->aspace;
  772.           inf->aspace = NULL;
  773.           inf->pspace = NULL;

  774.           if (debug_infrun || info_verbose)
  775.             {
  776.               target_terminal_ours_for_output ();

  777.               if (exec)
  778.                 {
  779.                   fprintf_filtered (gdb_stdlog,
  780.                                     _("Detaching vfork parent process "
  781.                                       "%d after child exec.\n"),
  782.                                     inf->vfork_parent->pid);
  783.                 }
  784.               else
  785.                 {
  786.                   fprintf_filtered (gdb_stdlog,
  787.                                     _("Detaching vfork parent process "
  788.                                       "%d after child exit.\n"),
  789.                                     inf->vfork_parent->pid);
  790.                 }
  791.             }

  792.           target_detach (NULL, 0);

  793.           /* Put it back.  */
  794.           inf->pspace = pspace;
  795.           inf->aspace = aspace;

  796.           do_cleanups (old_chain);
  797.         }
  798.       else if (exec)
  799.         {
  800.           /* We're staying attached to the parent, so, really give the
  801.              child a new address space.  */
  802.           inf->pspace = add_program_space (maybe_new_address_space ());
  803.           inf->aspace = inf->pspace->aspace;
  804.           inf->removable = 1;
  805.           set_current_program_space (inf->pspace);

  806.           resume_parent = inf->vfork_parent->pid;

  807.           /* Break the bonds.  */
  808.           inf->vfork_parent->vfork_child = NULL;
  809.         }
  810.       else
  811.         {
  812.           struct cleanup *old_chain;
  813.           struct program_space *pspace;

  814.           /* If this is a vfork child exiting, then the pspace and
  815.              aspaces were shared with the parent.  Since we're
  816.              reporting the process exit, we'll be mourning all that is
  817.              found in the address space, and switching to null_ptid,
  818.              preparing to start a new inferior.  But, since we don't
  819.              want to clobber the parent's address/program spaces, we
  820.              go ahead and create a new one for this exiting
  821.              inferior.  */

  822.           /* Switch to null_ptid, so that clone_program_space doesn't want
  823.              to read the selected frame of a dead process.  */
  824.           old_chain = save_inferior_ptid ();
  825.           inferior_ptid = null_ptid;

  826.           /* This inferior is dead, so avoid giving the breakpoints
  827.              module the option to write through to it (cloning a
  828.              program space resets breakpoints).  */
  829.           inf->aspace = NULL;
  830.           inf->pspace = NULL;
  831.           pspace = add_program_space (maybe_new_address_space ());
  832.           set_current_program_space (pspace);
  833.           inf->removable = 1;
  834.           inf->symfile_flags = SYMFILE_NO_READ;
  835.           clone_program_space (pspace, inf->vfork_parent->pspace);
  836.           inf->pspace = pspace;
  837.           inf->aspace = pspace->aspace;

  838.           /* Put back inferior_ptid.  We'll continue mourning this
  839.              inferior.  */
  840.           do_cleanups (old_chain);

  841.           resume_parent = inf->vfork_parent->pid;
  842.           /* Break the bonds.  */
  843.           inf->vfork_parent->vfork_child = NULL;
  844.         }

  845.       inf->vfork_parent = NULL;

  846.       gdb_assert (current_program_space == inf->pspace);

  847.       if (non_stop && resume_parent != -1)
  848.         {
  849.           /* If the user wanted the parent to be running, let it go
  850.              free now.  */
  851.           struct cleanup *old_chain = make_cleanup_restore_current_thread ();

  852.           if (debug_infrun)
  853.             fprintf_unfiltered (gdb_stdlog,
  854.                                 "infrun: resuming vfork parent process %d\n",
  855.                                 resume_parent);

  856.           iterate_over_threads (proceed_after_vfork_done, &resume_parent);

  857.           do_cleanups (old_chain);
  858.         }
  859.     }
  860. }

  861. /* Enum strings for "set|show follow-exec-mode".  */

  862. static const char follow_exec_mode_new[] = "new";
  863. static const char follow_exec_mode_same[] = "same";
  864. static const char *const follow_exec_mode_names[] =
  865. {
  866.   follow_exec_mode_new,
  867.   follow_exec_mode_same,
  868.   NULL,
  869. };

  870. static const char *follow_exec_mode_string = follow_exec_mode_same;
  871. static void
  872. show_follow_exec_mode_string (struct ui_file *file, int from_tty,
  873.                               struct cmd_list_element *c, const char *value)
  874. {
  875.   fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"),  value);
  876. }

  877. /* EXECD_PATHNAME is assumed to be non-NULL.  */

  878. static void
  879. follow_exec (ptid_t pid, char *execd_pathname)
  880. {
  881.   struct thread_info *th = inferior_thread ();
  882.   struct inferior *inf = current_inferior ();

  883.   /* This is an exec event that we actually wish to pay attention to.
  884.      Refresh our symbol table to the newly exec'd program, remove any
  885.      momentary bp's, etc.

  886.      If there are breakpoints, they aren't really inserted now,
  887.      since the exec() transformed our inferior into a fresh set
  888.      of instructions.

  889.      We want to preserve symbolic breakpoints on the list, since
  890.      we have hopes that they can be reset after the new a.out's
  891.      symbol table is read.

  892.      However, any "raw" breakpoints must be removed from the list
  893.      (e.g., the solib bp's), since their address is probably invalid
  894.      now.

  895.      And, we DON'T want to call delete_breakpoints() here, since
  896.      that may write the bp's "shadow contents" (the instruction
  897.      value that was overwritten witha TRAP instruction).  Since
  898.      we now have a new a.out, those shadow contents aren't valid.  */

  899.   mark_breakpoints_out ();

  900.   update_breakpoints_after_exec ();

  901.   /* If there was one, it's gone now.  We cannot truly step-to-next
  902.      statement through an exec().  */
  903.   th->control.step_resume_breakpoint = NULL;
  904.   th->control.exception_resume_breakpoint = NULL;
  905.   th->control.single_step_breakpoints = NULL;
  906.   th->control.step_range_start = 0;
  907.   th->control.step_range_end = 0;

  908.   /* The target reports the exec event to the main thread, even if
  909.      some other thread does the exec, and even if the main thread was
  910.      already stopped --- if debugging in non-stop mode, it's possible
  911.      the user had the main thread held stopped in the previous image
  912.      --- release it now.  This is the same behavior as step-over-exec
  913.      with scheduler-locking on in all-stop mode.  */
  914.   th->stop_requested = 0;

  915.   /* What is this a.out's name?  */
  916.   printf_unfiltered (_("%s is executing new program: %s\n"),
  917.                      target_pid_to_str (inferior_ptid),
  918.                      execd_pathname);

  919.   /* We've followed the inferior through an exec.  Therefore, the
  920.      inferior has essentially been killed & reborn.  */

  921.   gdb_flush (gdb_stdout);

  922.   breakpoint_init_inferior (inf_execd);

  923.   if (gdb_sysroot && *gdb_sysroot)
  924.     {
  925.       char *name = alloca (strlen (gdb_sysroot)
  926.                             + strlen (execd_pathname)
  927.                             + 1);

  928.       strcpy (name, gdb_sysroot);
  929.       strcat (name, execd_pathname);
  930.       execd_pathname = name;
  931.     }

  932.   /* Reset the shared library package.  This ensures that we get a
  933.      shlib event when the child reaches "_start", at which point the
  934.      dld will have had a chance to initialize the child.  */
  935.   /* Also, loading a symbol file below may trigger symbol lookups, and
  936.      we don't want those to be satisfied by the libraries of the
  937.      previous incarnation of this process.  */
  938.   no_shared_libraries (NULL, 0);

  939.   if (follow_exec_mode_string == follow_exec_mode_new)
  940.     {
  941.       struct program_space *pspace;

  942.       /* The user wants to keep the old inferior and program spaces
  943.          around.  Create a new fresh one, and switch to it.  */

  944.       inf = add_inferior (current_inferior ()->pid);
  945.       pspace = add_program_space (maybe_new_address_space ());
  946.       inf->pspace = pspace;
  947.       inf->aspace = pspace->aspace;

  948.       exit_inferior_num_silent (current_inferior ()->num);

  949.       set_current_inferior (inf);
  950.       set_current_program_space (pspace);
  951.     }
  952.   else
  953.     {
  954.       /* The old description may no longer be fit for the new image.
  955.          E.g, a 64-bit process exec'ed a 32-bit process.  Clear the
  956.          old description; we'll read a new one below.  No need to do
  957.          this on "follow-exec-mode new", as the old inferior stays
  958.          around (its description is later cleared/refetched on
  959.          restart).  */
  960.       target_clear_description ();
  961.     }

  962.   gdb_assert (current_program_space == inf->pspace);

  963.   /* That a.out is now the one to use.  */
  964.   exec_file_attach (execd_pathname, 0);

  965.   /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
  966.      (Position Independent Executable) main symbol file will get applied by
  967.      solib_create_inferior_hook below.  breakpoint_re_set would fail to insert
  968.      the breakpoints with the zero displacement.  */

  969.   symbol_file_add (execd_pathname,
  970.                    (inf->symfile_flags
  971.                     | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
  972.                    NULL, 0);

  973.   if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
  974.     set_initial_language ();

  975.   /* If the target can specify a description, read it.  Must do this
  976.      after flipping to the new executable (because the target supplied
  977.      description must be compatible with the executable's
  978.      architecture, and the old executable may e.g., be 32-bit, while
  979.      the new one 64-bit), and before anything involving memory or
  980.      registers.  */
  981.   target_find_description ();

  982.   solib_create_inferior_hook (0);

  983.   jit_inferior_created_hook ();

  984.   breakpoint_re_set ();

  985.   /* Reinsert all breakpoints.  (Those which were symbolic have
  986.      been reset to the proper address in the new a.out, thanks
  987.      to symbol_file_command...).  */
  988.   insert_breakpoints ();

  989.   /* The next resume of this inferior should bring it to the shlib
  990.      startup breakpoints.  (If the user had also set bp's on
  991.      "main" from the old (parent) process, then they'll auto-
  992.      matically get reset there in the new process.).  */
  993. }

  994. /* Info about an instruction that is being stepped over.  */

  995. struct step_over_info
  996. {
  997.   /* If we're stepping past a breakpoint, this is the address space
  998.      and address of the instruction the breakpoint is set at.  We'll
  999.      skip inserting all breakpoints here.  Valid iff ASPACE is
  1000.      non-NULL.  */
  1001.   struct address_space *aspace;
  1002.   CORE_ADDR address;

  1003.   /* The instruction being stepped over triggers a nonsteppable
  1004.      watchpoint.  If true, we'll skip inserting watchpoints.  */
  1005.   int nonsteppable_watchpoint_p;
  1006. };

  1007. /* The step-over info of the location that is being stepped over.

  1008.    Note that with async/breakpoint always-inserted mode, a user might
  1009.    set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
  1010.    being stepped over.  As setting a new breakpoint inserts all
  1011.    breakpoints, we need to make sure the breakpoint being stepped over
  1012.    isn't inserted then.  We do that by only clearing the step-over
  1013.    info when the step-over is actually finished (or aborted).

  1014.    Presently GDB can only step over one breakpoint at any given time.
  1015.    Given threads that can't run code in the same address space as the
  1016.    breakpoint's can't really miss the breakpoint, GDB could be taught
  1017.    to step-over at most one breakpoint per address space (so this info
  1018.    could move to the address space object if/when GDB is extended).
  1019.    The set of breakpoints being stepped over will normally be much
  1020.    smaller than the set of all breakpoints, so a flag in the
  1021.    breakpoint location structure would be wasteful.  A separate list
  1022.    also saves complexity and run-time, as otherwise we'd have to go
  1023.    through all breakpoint locations clearing their flag whenever we
  1024.    start a new sequence.  Similar considerations weigh against storing
  1025.    this info in the thread object.  Plus, not all step overs actually
  1026.    have breakpoint locations -- e.g., stepping past a single-step
  1027.    breakpoint, or stepping to complete a non-continuable
  1028.    watchpoint.  */
  1029. static struct step_over_info step_over_info;

  1030. /* Record the address of the breakpoint/instruction we're currently
  1031.    stepping over.  */

  1032. static void
  1033. set_step_over_info (struct address_space *aspace, CORE_ADDR address,
  1034.                     int nonsteppable_watchpoint_p)
  1035. {
  1036.   step_over_info.aspace = aspace;
  1037.   step_over_info.address = address;
  1038.   step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
  1039. }

  1040. /* Called when we're not longer stepping over a breakpoint / an
  1041.    instruction, so all breakpoints are free to be (re)inserted.  */

  1042. static void
  1043. clear_step_over_info (void)
  1044. {
  1045.   step_over_info.aspace = NULL;
  1046.   step_over_info.address = 0;
  1047.   step_over_info.nonsteppable_watchpoint_p = 0;
  1048. }

  1049. /* See infrun.h.  */

  1050. int
  1051. stepping_past_instruction_at (struct address_space *aspace,
  1052.                               CORE_ADDR address)
  1053. {
  1054.   return (step_over_info.aspace != NULL
  1055.           && breakpoint_address_match (aspace, address,
  1056.                                        step_over_info.aspace,
  1057.                                        step_over_info.address));
  1058. }

  1059. /* See infrun.h.  */

  1060. int
  1061. stepping_past_nonsteppable_watchpoint (void)
  1062. {
  1063.   return step_over_info.nonsteppable_watchpoint_p;
  1064. }

  1065. /* Returns true if step-over info is valid.  */

  1066. static int
  1067. step_over_info_valid_p (void)
  1068. {
  1069.   return (step_over_info.aspace != NULL
  1070.           || stepping_past_nonsteppable_watchpoint ());
  1071. }


  1072. /* Displaced stepping.  */

  1073. /* In non-stop debugging mode, we must take special care to manage
  1074.    breakpoints properly; in particular, the traditional strategy for
  1075.    stepping a thread past a breakpoint it has hit is unsuitable.
  1076.    'Displaced stepping' is a tactic for stepping one thread past a
  1077.    breakpoint it has hit while ensuring that other threads running
  1078.    concurrently will hit the breakpoint as they should.

  1079.    The traditional way to step a thread T off a breakpoint in a
  1080.    multi-threaded program in all-stop mode is as follows:

  1081.    a0) Initially, all threads are stopped, and breakpoints are not
  1082.        inserted.
  1083.    a1) We single-step T, leaving breakpoints uninserted.
  1084.    a2) We insert breakpoints, and resume all threads.

  1085.    In non-stop debugging, however, this strategy is unsuitable: we
  1086.    don't want to have to stop all threads in the system in order to
  1087.    continue or step T past a breakpoint.  Instead, we use displaced
  1088.    stepping:

  1089.    n0) Initially, T is stopped, other threads are running, and
  1090.        breakpoints are inserted.
  1091.    n1) We copy the instruction "under" the breakpoint to a separate
  1092.        location, outside the main code stream, making any adjustments
  1093.        to the instruction, register, and memory state as directed by
  1094.        T's architecture.
  1095.    n2) We single-step T over the instruction at its new location.
  1096.    n3) We adjust the resulting register and memory state as directed
  1097.        by T's architecture.  This includes resetting T's PC to point
  1098.        back into the main instruction stream.
  1099.    n4) We resume T.

  1100.    This approach depends on the following gdbarch methods:

  1101.    - gdbarch_max_insn_length and gdbarch_displaced_step_location
  1102.      indicate where to copy the instruction, and how much space must
  1103.      be reserved there.  We use these in step n1.

  1104.    - gdbarch_displaced_step_copy_insn copies a instruction to a new
  1105.      address, and makes any necessary adjustments to the instruction,
  1106.      register contents, and memory.  We use this in step n1.

  1107.    - gdbarch_displaced_step_fixup adjusts registers and memory after
  1108.      we have successfuly single-stepped the instruction, to yield the
  1109.      same effect the instruction would have had if we had executed it
  1110.      at its original address.  We use this in step n3.

  1111.    - gdbarch_displaced_step_free_closure provides cleanup.

  1112.    The gdbarch_displaced_step_copy_insn and
  1113.    gdbarch_displaced_step_fixup functions must be written so that
  1114.    copying an instruction with gdbarch_displaced_step_copy_insn,
  1115.    single-stepping across the copied instruction, and then applying
  1116.    gdbarch_displaced_insn_fixup should have the same effects on the
  1117.    thread's memory and registers as stepping the instruction in place
  1118.    would have.  Exactly which responsibilities fall to the copy and
  1119.    which fall to the fixup is up to the author of those functions.

  1120.    See the comments in gdbarch.sh for details.

  1121.    Note that displaced stepping and software single-step cannot
  1122.    currently be used in combination, although with some care I think
  1123.    they could be made to.  Software single-step works by placing
  1124.    breakpoints on all possible subsequent instructions; if the
  1125.    displaced instruction is a PC-relative jump, those breakpoints
  1126.    could fall in very strange places --- on pages that aren't
  1127.    executable, or at addresses that are not proper instruction
  1128.    boundaries.  (We do generally let other threads run while we wait
  1129.    to hit the software single-step breakpoint, and they might
  1130.    encounter such a corrupted instruction.)  One way to work around
  1131.    this would be to have gdbarch_displaced_step_copy_insn fully
  1132.    simulate the effect of PC-relative instructions (and return NULL)
  1133.    on architectures that use software single-stepping.

  1134.    In non-stop mode, we can have independent and simultaneous step
  1135.    requests, so more than one thread may need to simultaneously step
  1136.    over a breakpoint.  The current implementation assumes there is
  1137.    only one scratch space per process.  In this case, we have to
  1138.    serialize access to the scratch space.  If thread A wants to step
  1139.    over a breakpoint, but we are currently waiting for some other
  1140.    thread to complete a displaced step, we leave thread A stopped and
  1141.    place it in the displaced_step_request_queue.  Whenever a displaced
  1142.    step finishes, we pick the next thread in the queue and start a new
  1143.    displaced step operation on it.  See displaced_step_prepare and
  1144.    displaced_step_fixup for details.  */

  1145. struct displaced_step_request
  1146. {
  1147.   ptid_t ptid;
  1148.   struct displaced_step_request *next;
  1149. };

  1150. /* Per-inferior displaced stepping state.  */
  1151. struct displaced_step_inferior_state
  1152. {
  1153.   /* Pointer to next in linked list.  */
  1154.   struct displaced_step_inferior_state *next;

  1155.   /* The process this displaced step state refers to.  */
  1156.   int pid;

  1157.   /* A queue of pending displaced stepping requests.  One entry per
  1158.      thread that needs to do a displaced step.  */
  1159.   struct displaced_step_request *step_request_queue;

  1160.   /* If this is not null_ptid, this is the thread carrying out a
  1161.      displaced single-step in process PID.  This thread's state will
  1162.      require fixing up once it has completed its step.  */
  1163.   ptid_t step_ptid;

  1164.   /* The architecture the thread had when we stepped it.  */
  1165.   struct gdbarch *step_gdbarch;

  1166.   /* The closure provided gdbarch_displaced_step_copy_insn, to be used
  1167.      for post-step cleanup.  */
  1168.   struct displaced_step_closure *step_closure;

  1169.   /* The address of the original instruction, and the copy we
  1170.      made.  */
  1171.   CORE_ADDR step_original, step_copy;

  1172.   /* Saved contents of copy area.  */
  1173.   gdb_byte *step_saved_copy;
  1174. };

  1175. /* The list of states of processes involved in displaced stepping
  1176.    presently.  */
  1177. static struct displaced_step_inferior_state *displaced_step_inferior_states;

  1178. /* Get the displaced stepping state of process PID.  */

  1179. static struct displaced_step_inferior_state *
  1180. get_displaced_stepping_state (int pid)
  1181. {
  1182.   struct displaced_step_inferior_state *state;

  1183.   for (state = displaced_step_inferior_states;
  1184.        state != NULL;
  1185.        state = state->next)
  1186.     if (state->pid == pid)
  1187.       return state;

  1188.   return NULL;
  1189. }

  1190. /* Add a new displaced stepping state for process PID to the displaced
  1191.    stepping state list, or return a pointer to an already existing
  1192.    entry, if it already exists.  Never returns NULL.  */

  1193. static struct displaced_step_inferior_state *
  1194. add_displaced_stepping_state (int pid)
  1195. {
  1196.   struct displaced_step_inferior_state *state;

  1197.   for (state = displaced_step_inferior_states;
  1198.        state != NULL;
  1199.        state = state->next)
  1200.     if (state->pid == pid)
  1201.       return state;

  1202.   state = xcalloc (1, sizeof (*state));
  1203.   state->pid = pid;
  1204.   state->next = displaced_step_inferior_states;
  1205.   displaced_step_inferior_states = state;

  1206.   return state;
  1207. }

  1208. /* If inferior is in displaced stepping, and ADDR equals to starting address
  1209.    of copy area, return corresponding displaced_step_closure.  Otherwise,
  1210.    return NULL.  */

  1211. struct displaced_step_closure*
  1212. get_displaced_step_closure_by_addr (CORE_ADDR addr)
  1213. {
  1214.   struct displaced_step_inferior_state *displaced
  1215.     = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));

  1216.   /* If checking the mode of displaced instruction in copy area.  */
  1217.   if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
  1218.      && (displaced->step_copy == addr))
  1219.     return displaced->step_closure;

  1220.   return NULL;
  1221. }

  1222. /* Remove the displaced stepping state of process PID.  */

  1223. static void
  1224. remove_displaced_stepping_state (int pid)
  1225. {
  1226.   struct displaced_step_inferior_state *it, **prev_next_p;

  1227.   gdb_assert (pid != 0);

  1228.   it = displaced_step_inferior_states;
  1229.   prev_next_p = &displaced_step_inferior_states;
  1230.   while (it)
  1231.     {
  1232.       if (it->pid == pid)
  1233.         {
  1234.           *prev_next_p = it->next;
  1235.           xfree (it);
  1236.           return;
  1237.         }

  1238.       prev_next_p = &it->next;
  1239.       it = *prev_next_p;
  1240.     }
  1241. }

  1242. static void
  1243. infrun_inferior_exit (struct inferior *inf)
  1244. {
  1245.   remove_displaced_stepping_state (inf->pid);
  1246. }

  1247. /* If ON, and the architecture supports it, GDB will use displaced
  1248.    stepping to step over breakpoints.  If OFF, or if the architecture
  1249.    doesn't support it, GDB will instead use the traditional
  1250.    hold-and-step approach.  If AUTO (which is the default), GDB will
  1251.    decide which technique to use to step over breakpoints depending on
  1252.    which of all-stop or non-stop mode is active --- displaced stepping
  1253.    in non-stop mode; hold-and-step in all-stop mode.  */

  1254. static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;

  1255. static void
  1256. show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
  1257.                                  struct cmd_list_element *c,
  1258.                                  const char *value)
  1259. {
  1260.   if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
  1261.     fprintf_filtered (file,
  1262.                       _("Debugger's willingness to use displaced stepping "
  1263.                         "to step over breakpoints is %s (currently %s).\n"),
  1264.                       value, non_stop ? "on" : "off");
  1265.   else
  1266.     fprintf_filtered (file,
  1267.                       _("Debugger's willingness to use displaced stepping "
  1268.                         "to step over breakpoints is %s.\n"), value);
  1269. }

  1270. /* Return non-zero if displaced stepping can/should be used to step
  1271.    over breakpoints.  */

  1272. static int
  1273. use_displaced_stepping (struct gdbarch *gdbarch)
  1274. {
  1275.   return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
  1276.            || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
  1277.           && gdbarch_displaced_step_copy_insn_p (gdbarch)
  1278.           && find_record_target () == NULL);
  1279. }

  1280. /* Clean out any stray displaced stepping state.  */
  1281. static void
  1282. displaced_step_clear (struct displaced_step_inferior_state *displaced)
  1283. {
  1284.   /* Indicate that there is no cleanup pending.  */
  1285.   displaced->step_ptid = null_ptid;

  1286.   if (displaced->step_closure)
  1287.     {
  1288.       gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
  1289.                                            displaced->step_closure);
  1290.       displaced->step_closure = NULL;
  1291.     }
  1292. }

  1293. static void
  1294. displaced_step_clear_cleanup (void *arg)
  1295. {
  1296.   struct displaced_step_inferior_state *state = arg;

  1297.   displaced_step_clear (state);
  1298. }

  1299. /* Dump LEN bytes at BUF in hex to FILE, followed by a newline.  */
  1300. void
  1301. displaced_step_dump_bytes (struct ui_file *file,
  1302.                            const gdb_byte *buf,
  1303.                            size_t len)
  1304. {
  1305.   int i;

  1306.   for (i = 0; i < len; i++)
  1307.     fprintf_unfiltered (file, "%02x ", buf[i]);
  1308.   fputs_unfiltered ("\n", file);
  1309. }

  1310. /* Prepare to single-step, using displaced stepping.

  1311.    Note that we cannot use displaced stepping when we have a signal to
  1312.    deliver.  If we have a signal to deliver and an instruction to step
  1313.    over, then after the step, there will be no indication from the
  1314.    target whether the thread entered a signal handler or ignored the
  1315.    signal and stepped over the instruction successfully --- both cases
  1316.    result in a simple SIGTRAP.  In the first case we mustn't do a
  1317.    fixup, and in the second case we must --- but we can't tell which.
  1318.    Comments in the code for 'random signals' in handle_inferior_event
  1319.    explain how we handle this case instead.

  1320.    Returns 1 if preparing was successful -- this thread is going to be
  1321.    stepped now; or 0 if displaced stepping this thread got queued.  */
  1322. static int
  1323. displaced_step_prepare (ptid_t ptid)
  1324. {
  1325.   struct cleanup *old_cleanups, *ignore_cleanups;
  1326.   struct thread_info *tp = find_thread_ptid (ptid);
  1327.   struct regcache *regcache = get_thread_regcache (ptid);
  1328.   struct gdbarch *gdbarch = get_regcache_arch (regcache);
  1329.   CORE_ADDR original, copy;
  1330.   ULONGEST len;
  1331.   struct displaced_step_closure *closure;
  1332.   struct displaced_step_inferior_state *displaced;
  1333.   int status;

  1334.   /* We should never reach this function if the architecture does not
  1335.      support displaced stepping.  */
  1336.   gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));

  1337.   /* Disable range stepping while executing in the scratch pad.  We
  1338.      want a single-step even if executing the displaced instruction in
  1339.      the scratch buffer lands within the stepping range (e.g., a
  1340.      jump/branch).  */
  1341.   tp->control.may_range_step = 0;

  1342.   /* We have to displaced step one thread at a time, as we only have
  1343.      access to a single scratch space per inferior.  */

  1344.   displaced = add_displaced_stepping_state (ptid_get_pid (ptid));

  1345.   if (!ptid_equal (displaced->step_ptid, null_ptid))
  1346.     {
  1347.       /* Already waiting for a displaced step to finish.  Defer this
  1348.          request and place in queue.  */
  1349.       struct displaced_step_request *req, *new_req;

  1350.       if (debug_displaced)
  1351.         fprintf_unfiltered (gdb_stdlog,
  1352.                             "displaced: defering step of %s\n",
  1353.                             target_pid_to_str (ptid));

  1354.       new_req = xmalloc (sizeof (*new_req));
  1355.       new_req->ptid = ptid;
  1356.       new_req->next = NULL;

  1357.       if (displaced->step_request_queue)
  1358.         {
  1359.           for (req = displaced->step_request_queue;
  1360.                req && req->next;
  1361.                req = req->next)
  1362.             ;
  1363.           req->next = new_req;
  1364.         }
  1365.       else
  1366.         displaced->step_request_queue = new_req;

  1367.       return 0;
  1368.     }
  1369.   else
  1370.     {
  1371.       if (debug_displaced)
  1372.         fprintf_unfiltered (gdb_stdlog,
  1373.                             "displaced: stepping %s now\n",
  1374.                             target_pid_to_str (ptid));
  1375.     }

  1376.   displaced_step_clear (displaced);

  1377.   old_cleanups = save_inferior_ptid ();
  1378.   inferior_ptid = ptid;

  1379.   original = regcache_read_pc (regcache);

  1380.   copy = gdbarch_displaced_step_location (gdbarch);
  1381.   len = gdbarch_max_insn_length (gdbarch);

  1382.   /* Save the original contents of the copy area.  */
  1383.   displaced->step_saved_copy = xmalloc (len);
  1384.   ignore_cleanups = make_cleanup (free_current_contents,
  1385.                                   &displaced->step_saved_copy);
  1386.   status = target_read_memory (copy, displaced->step_saved_copy, len);
  1387.   if (status != 0)
  1388.     throw_error (MEMORY_ERROR,
  1389.                  _("Error accessing memory address %s (%s) for "
  1390.                    "displaced-stepping scratch space."),
  1391.                  paddress (gdbarch, copy), safe_strerror (status));
  1392.   if (debug_displaced)
  1393.     {
  1394.       fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
  1395.                           paddress (gdbarch, copy));
  1396.       displaced_step_dump_bytes (gdb_stdlog,
  1397.                                  displaced->step_saved_copy,
  1398.                                  len);
  1399.     };

  1400.   closure = gdbarch_displaced_step_copy_insn (gdbarch,
  1401.                                               original, copy, regcache);

  1402.   /* We don't support the fully-simulated case at present.  */
  1403.   gdb_assert (closure);

  1404.   /* Save the information we need to fix things up if the step
  1405.      succeeds.  */
  1406.   displaced->step_ptid = ptid;
  1407.   displaced->step_gdbarch = gdbarch;
  1408.   displaced->step_closure = closure;
  1409.   displaced->step_original = original;
  1410.   displaced->step_copy = copy;

  1411.   make_cleanup (displaced_step_clear_cleanup, displaced);

  1412.   /* Resume execution at the copy.  */
  1413.   regcache_write_pc (regcache, copy);

  1414.   discard_cleanups (ignore_cleanups);

  1415.   do_cleanups (old_cleanups);

  1416.   if (debug_displaced)
  1417.     fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
  1418.                         paddress (gdbarch, copy));

  1419.   return 1;
  1420. }

  1421. static void
  1422. write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
  1423.                    const gdb_byte *myaddr, int len)
  1424. {
  1425.   struct cleanup *ptid_cleanup = save_inferior_ptid ();

  1426.   inferior_ptid = ptid;
  1427.   write_memory (memaddr, myaddr, len);
  1428.   do_cleanups (ptid_cleanup);
  1429. }

  1430. /* Restore the contents of the copy area for thread PTID.  */

  1431. static void
  1432. displaced_step_restore (struct displaced_step_inferior_state *displaced,
  1433.                         ptid_t ptid)
  1434. {
  1435.   ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);

  1436.   write_memory_ptid (ptid, displaced->step_copy,
  1437.                      displaced->step_saved_copy, len);
  1438.   if (debug_displaced)
  1439.     fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
  1440.                         target_pid_to_str (ptid),
  1441.                         paddress (displaced->step_gdbarch,
  1442.                                   displaced->step_copy));
  1443. }

  1444. static void
  1445. displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
  1446. {
  1447.   struct cleanup *old_cleanups;
  1448.   struct displaced_step_inferior_state *displaced
  1449.     = get_displaced_stepping_state (ptid_get_pid (event_ptid));

  1450.   /* Was any thread of this process doing a displaced step?  */
  1451.   if (displaced == NULL)
  1452.     return;

  1453.   /* Was this event for the pid we displaced?  */
  1454.   if (ptid_equal (displaced->step_ptid, null_ptid)
  1455.       || ! ptid_equal (displaced->step_ptid, event_ptid))
  1456.     return;

  1457.   old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);

  1458.   displaced_step_restore (displaced, displaced->step_ptid);

  1459.   /* Did the instruction complete successfully?  */
  1460.   if (signal == GDB_SIGNAL_TRAP)
  1461.     {
  1462.       /* Fix up the resulting state.  */
  1463.       gdbarch_displaced_step_fixup (displaced->step_gdbarch,
  1464.                                     displaced->step_closure,
  1465.                                     displaced->step_original,
  1466.                                     displaced->step_copy,
  1467.                                     get_thread_regcache (displaced->step_ptid));
  1468.     }
  1469.   else
  1470.     {
  1471.       /* Since the instruction didn't complete, all we can do is
  1472.          relocate the PC.  */
  1473.       struct regcache *regcache = get_thread_regcache (event_ptid);
  1474.       CORE_ADDR pc = regcache_read_pc (regcache);

  1475.       pc = displaced->step_original + (pc - displaced->step_copy);
  1476.       regcache_write_pc (regcache, pc);
  1477.     }

  1478.   do_cleanups (old_cleanups);

  1479.   displaced->step_ptid = null_ptid;

  1480.   /* Are there any pending displaced stepping requests?  If so, run
  1481.      one now.  Leave the state object around, since we're likely to
  1482.      need it again soon.  */
  1483.   while (displaced->step_request_queue)
  1484.     {
  1485.       struct displaced_step_request *head;
  1486.       ptid_t ptid;
  1487.       struct regcache *regcache;
  1488.       struct gdbarch *gdbarch;
  1489.       CORE_ADDR actual_pc;
  1490.       struct address_space *aspace;

  1491.       head = displaced->step_request_queue;
  1492.       ptid = head->ptid;
  1493.       displaced->step_request_queue = head->next;
  1494.       xfree (head);

  1495.       context_switch (ptid);

  1496.       regcache = get_thread_regcache (ptid);
  1497.       actual_pc = regcache_read_pc (regcache);
  1498.       aspace = get_regcache_aspace (regcache);

  1499.       if (breakpoint_here_p (aspace, actual_pc))
  1500.         {
  1501.           if (debug_displaced)
  1502.             fprintf_unfiltered (gdb_stdlog,
  1503.                                 "displaced: stepping queued %s now\n",
  1504.                                 target_pid_to_str (ptid));

  1505.           displaced_step_prepare (ptid);

  1506.           gdbarch = get_regcache_arch (regcache);

  1507.           if (debug_displaced)
  1508.             {
  1509.               CORE_ADDR actual_pc = regcache_read_pc (regcache);
  1510.               gdb_byte buf[4];

  1511.               fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
  1512.                                   paddress (gdbarch, actual_pc));
  1513.               read_memory (actual_pc, buf, sizeof (buf));
  1514.               displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
  1515.             }

  1516.           if (gdbarch_displaced_step_hw_singlestep (gdbarch,
  1517.                                                     displaced->step_closure))
  1518.             target_resume (ptid, 1, GDB_SIGNAL_0);
  1519.           else
  1520.             target_resume (ptid, 0, GDB_SIGNAL_0);

  1521.           /* Done, we're stepping a thread.  */
  1522.           break;
  1523.         }
  1524.       else
  1525.         {
  1526.           int step;
  1527.           struct thread_info *tp = inferior_thread ();

  1528.           /* The breakpoint we were sitting under has since been
  1529.              removed.  */
  1530.           tp->control.trap_expected = 0;

  1531.           /* Go back to what we were trying to do.  */
  1532.           step = currently_stepping (tp);

  1533.           if (debug_displaced)
  1534.             fprintf_unfiltered (gdb_stdlog,
  1535.                                 "displaced: breakpoint is gone: %s, step(%d)\n",
  1536.                                 target_pid_to_str (tp->ptid), step);

  1537.           target_resume (ptid, step, GDB_SIGNAL_0);
  1538.           tp->suspend.stop_signal = GDB_SIGNAL_0;

  1539.           /* This request was discarded.  See if there's any other
  1540.              thread waiting for its turn.  */
  1541.         }
  1542.     }
  1543. }

  1544. /* Update global variables holding ptids to hold NEW_PTID if they were
  1545.    holding OLD_PTID.  */
  1546. static void
  1547. infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
  1548. {
  1549.   struct displaced_step_request *it;
  1550.   struct displaced_step_inferior_state *displaced;

  1551.   if (ptid_equal (inferior_ptid, old_ptid))
  1552.     inferior_ptid = new_ptid;

  1553.   for (displaced = displaced_step_inferior_states;
  1554.        displaced;
  1555.        displaced = displaced->next)
  1556.     {
  1557.       if (ptid_equal (displaced->step_ptid, old_ptid))
  1558.         displaced->step_ptid = new_ptid;

  1559.       for (it = displaced->step_request_queue; it; it = it->next)
  1560.         if (ptid_equal (it->ptid, old_ptid))
  1561.           it->ptid = new_ptid;
  1562.     }
  1563. }


  1564. /* Resuming.  */

  1565. /* Things to clean up if we QUIT out of resume ().  */
  1566. static void
  1567. resume_cleanups (void *ignore)
  1568. {
  1569.   if (!ptid_equal (inferior_ptid, null_ptid))
  1570.     delete_single_step_breakpoints (inferior_thread ());

  1571.   normal_stop ();
  1572. }

  1573. static const char schedlock_off[] = "off";
  1574. static const char schedlock_on[] = "on";
  1575. static const char schedlock_step[] = "step";
  1576. static const char *const scheduler_enums[] = {
  1577.   schedlock_off,
  1578.   schedlock_on,
  1579.   schedlock_step,
  1580.   NULL
  1581. };
  1582. static const char *scheduler_mode = schedlock_off;
  1583. static void
  1584. show_scheduler_mode (struct ui_file *file, int from_tty,
  1585.                      struct cmd_list_element *c, const char *value)
  1586. {
  1587.   fprintf_filtered (file,
  1588.                     _("Mode for locking scheduler "
  1589.                       "during execution is \"%s\".\n"),
  1590.                     value);
  1591. }

  1592. static void
  1593. set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
  1594. {
  1595.   if (!target_can_lock_scheduler)
  1596.     {
  1597.       scheduler_mode = schedlock_off;
  1598.       error (_("Target '%s' cannot support this command."), target_shortname);
  1599.     }
  1600. }

  1601. /* True if execution commands resume all threads of all processes by
  1602.    default; otherwise, resume only threads of the current inferior
  1603.    process.  */
  1604. int sched_multi = 0;

  1605. /* Try to setup for software single stepping over the specified location.
  1606.    Return 1 if target_resume() should use hardware single step.

  1607.    GDBARCH the current gdbarch.
  1608.    PC the location to step over.  */

  1609. static int
  1610. maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
  1611. {
  1612.   int hw_step = 1;

  1613.   if (execution_direction == EXEC_FORWARD
  1614.       && gdbarch_software_single_step_p (gdbarch)
  1615.       && gdbarch_software_single_step (gdbarch, get_current_frame ()))
  1616.     {
  1617.       hw_step = 0;
  1618.     }
  1619.   return hw_step;
  1620. }

  1621. ptid_t
  1622. user_visible_resume_ptid (int step)
  1623. {
  1624.   /* By default, resume all threads of all processes.  */
  1625.   ptid_t resume_ptid = RESUME_ALL;

  1626.   /* Maybe resume only all threads of the current process.  */
  1627.   if (!sched_multi && target_supports_multi_process ())
  1628.     {
  1629.       resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
  1630.     }

  1631.   /* Maybe resume a single thread after all.  */
  1632.   if (non_stop)
  1633.     {
  1634.       /* With non-stop mode on, threads are always handled
  1635.          individually.  */
  1636.       resume_ptid = inferior_ptid;
  1637.     }
  1638.   else if ((scheduler_mode == schedlock_on)
  1639.            || (scheduler_mode == schedlock_step && step))
  1640.     {
  1641.       /* User-settable 'scheduler' mode requires solo thread resume.  */
  1642.       resume_ptid = inferior_ptid;
  1643.     }

  1644.   /* We may actually resume fewer threads at first, e.g., if a thread
  1645.      is stopped at a breakpoint that needs stepping-off, but that
  1646.      should not be visible to the user/frontend, and neither should
  1647.      the frontend/user be allowed to proceed any of the threads that
  1648.      happen to be stopped for internal run control handling, if a
  1649.      previous command wanted them resumed.  */
  1650.   return resume_ptid;
  1651. }

  1652. /* Resume the inferior, but allow a QUIT.  This is useful if the user
  1653.    wants to interrupt some lengthy single-stepping operation
  1654.    (for child processes, the SIGINT goes to the inferior, and so
  1655.    we get a SIGINT random_signal, but for remote debugging and perhaps
  1656.    other targets, that's not true).

  1657.    STEP nonzero if we should step (zero to continue instead).
  1658.    SIG is the signal to give the inferior (zero for none).  */
  1659. void
  1660. resume (int step, enum gdb_signal sig)
  1661. {
  1662.   struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
  1663.   struct regcache *regcache = get_current_regcache ();
  1664.   struct gdbarch *gdbarch = get_regcache_arch (regcache);
  1665.   struct thread_info *tp = inferior_thread ();
  1666.   CORE_ADDR pc = regcache_read_pc (regcache);
  1667.   struct address_space *aspace = get_regcache_aspace (regcache);
  1668.   ptid_t resume_ptid;
  1669.   /* From here on, this represents the caller's step vs continue
  1670.      request, while STEP represents what we'll actually request the
  1671.      target to do.  STEP can decay from a step to a continue, if e.g.,
  1672.      we need to implement single-stepping with breakpoints (software
  1673.      single-step).  When deciding whether "set scheduler-locking step"
  1674.      applies, it's the callers intention that counts.  */
  1675.   const int entry_step = step;

  1676.   tp->stepped_breakpoint = 0;

  1677.   QUIT;

  1678.   if (current_inferior ()->waiting_for_vfork_done)
  1679.     {
  1680.       /* Don't try to single-step a vfork parent that is waiting for
  1681.          the child to get out of the shared memory region (by exec'ing
  1682.          or exiting).  This is particularly important on software
  1683.          single-step archs, as the child process would trip on the
  1684.          software single step breakpoint inserted for the parent
  1685.          process.  Since the parent will not actually execute any
  1686.          instruction until the child is out of the shared region (such
  1687.          are vfork's semantics), it is safe to simply continue it.
  1688.          Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
  1689.          the parent, and tell it to `keep_going', which automatically
  1690.          re-sets it stepping.  */
  1691.       if (debug_infrun)
  1692.         fprintf_unfiltered (gdb_stdlog,
  1693.                             "infrun: resume : clear step\n");
  1694.       step = 0;
  1695.     }

  1696.   if (debug_infrun)
  1697.     fprintf_unfiltered (gdb_stdlog,
  1698.                         "infrun: resume (step=%d, signal=%s), "
  1699.                         "trap_expected=%d, current thread [%s] at %s\n",
  1700.                         step, gdb_signal_to_symbol_string (sig),
  1701.                         tp->control.trap_expected,
  1702.                         target_pid_to_str (inferior_ptid),
  1703.                         paddress (gdbarch, pc));

  1704.   /* Normally, by the time we reach `resume', the breakpoints are either
  1705.      removed or inserted, as appropriate.  The exception is if we're sitting
  1706.      at a permanent breakpoint; we need to step over it, but permanent
  1707.      breakpoints can't be removed.  So we have to test for it here.  */
  1708.   if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
  1709.     {
  1710.       if (sig != GDB_SIGNAL_0)
  1711.         {
  1712.           /* We have a signal to pass to the inferior.  The resume
  1713.              may, or may not take us to the signal handler.  If this
  1714.              is a step, we'll need to stop in the signal handler, if
  1715.              there's one, (if the target supports stepping into
  1716.              handlers), or in the next mainline instruction, if
  1717.              there's no handler.  If this is a continue, we need to be
  1718.              sure to run the handler with all breakpoints inserted.
  1719.              In all cases, set a breakpoint at the current address
  1720.              (where the handler returns to), and once that breakpoint
  1721.              is hit, resume skipping the permanent breakpoint.  If
  1722.              that breakpoint isn't hit, then we've stepped into the
  1723.              signal handler (or hit some other event).  We'll delete
  1724.              the step-resume breakpoint then.  */

  1725.           if (debug_infrun)
  1726.             fprintf_unfiltered (gdb_stdlog,
  1727.                                 "infrun: resume: skipping permanent breakpoint, "
  1728.                                 "deliver signal first\n");

  1729.           clear_step_over_info ();
  1730.           tp->control.trap_expected = 0;

  1731.           if (tp->control.step_resume_breakpoint == NULL)
  1732.             {
  1733.               /* Set a "high-priority" step-resume, as we don't want
  1734.                  user breakpoints at PC to trigger (again) when this
  1735.                  hits.  */
  1736.               insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
  1737.               gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);

  1738.               tp->step_after_step_resume_breakpoint = step;
  1739.             }

  1740.           insert_breakpoints ();
  1741.         }
  1742.       else
  1743.         {
  1744.           /* There's no signal to pass, we can go ahead and skip the
  1745.              permanent breakpoint manually.  */
  1746.           if (debug_infrun)
  1747.             fprintf_unfiltered (gdb_stdlog,
  1748.                                 "infrun: resume: skipping permanent breakpoint\n");
  1749.           gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
  1750.           /* Update pc to reflect the new address from which we will
  1751.              execute instructions.  */
  1752.           pc = regcache_read_pc (regcache);

  1753.           if (step)
  1754.             {
  1755.               /* We've already advanced the PC, so the stepping part
  1756.                  is done.  Now we need to arrange for a trap to be
  1757.                  reported to handle_inferior_event.  Set a breakpoint
  1758.                  at the current PC, and run to it.  Don't update
  1759.                  prev_pc, because if we end in
  1760.                  switch_back_to_stepping, we want the "expected thread
  1761.                  advanced also" branch to be taken.  IOW, we don't
  1762.                  want this thread to step further from PC
  1763.                  (overstep).  */
  1764.               insert_single_step_breakpoint (gdbarch, aspace, pc);
  1765.               insert_breakpoints ();

  1766.               tp->suspend.stop_signal = GDB_SIGNAL_0;
  1767.               /* We're continuing with all breakpoints inserted.  It's
  1768.                  safe to let the target bypass signals.  */
  1769.               target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
  1770.               /* ... and safe to let other threads run, according to
  1771.                  schedlock.  */
  1772.               resume_ptid = user_visible_resume_ptid (entry_step);
  1773.               target_resume (resume_ptid, 0, GDB_SIGNAL_0);
  1774.               discard_cleanups (old_cleanups);
  1775.               return;
  1776.             }
  1777.         }
  1778.     }

  1779.   /* If we have a breakpoint to step over, make sure to do a single
  1780.      step only.  Same if we have software watchpoints.  */
  1781.   if (tp->control.trap_expected || bpstat_should_step ())
  1782.     tp->control.may_range_step = 0;

  1783.   /* If enabled, step over breakpoints by executing a copy of the
  1784.      instruction at a different address.

  1785.      We can't use displaced stepping when we have a signal to deliver;
  1786.      the comments for displaced_step_prepare explain why.  The
  1787.      comments in the handle_inferior event for dealing with 'random
  1788.      signals' explain what we do instead.

  1789.      We can't use displaced stepping when we are waiting for vfork_done
  1790.      event, displaced stepping breaks the vfork child similarly as single
  1791.      step software breakpoint.  */
  1792.   if (use_displaced_stepping (gdbarch)
  1793.       && tp->control.trap_expected
  1794.       && sig == GDB_SIGNAL_0
  1795.       && !current_inferior ()->waiting_for_vfork_done)
  1796.     {
  1797.       struct displaced_step_inferior_state *displaced;

  1798.       if (!displaced_step_prepare (inferior_ptid))
  1799.         {
  1800.           /* Got placed in displaced stepping queue.  Will be resumed
  1801.              later when all the currently queued displaced stepping
  1802.              requests finish.  The thread is not executing at this
  1803.              point, and the call to set_executing will be made later.
  1804.              But we need to call set_running here, since from the
  1805.              user/frontend's point of view, threads were set running.
  1806.              Unless we're calling an inferior function, as in that
  1807.              case we pretend the inferior doesn't run at all.  */
  1808.           if (!tp->control.in_infcall)
  1809.             set_running (user_visible_resume_ptid (entry_step), 1);
  1810.           discard_cleanups (old_cleanups);
  1811.           return;
  1812.         }

  1813.       /* Update pc to reflect the new address from which we will execute
  1814.          instructions due to displaced stepping.  */
  1815.       pc = regcache_read_pc (get_thread_regcache (inferior_ptid));

  1816.       displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
  1817.       step = gdbarch_displaced_step_hw_singlestep (gdbarch,
  1818.                                                    displaced->step_closure);
  1819.     }

  1820.   /* Do we need to do it the hard way, w/temp breakpoints?  */
  1821.   else if (step)
  1822.     step = maybe_software_singlestep (gdbarch, pc);

  1823.   /* Currently, our software single-step implementation leads to different
  1824.      results than hardware single-stepping in one situation: when stepping
  1825.      into delivering a signal which has an associated signal handler,
  1826.      hardware single-step will stop at the first instruction of the handler,
  1827.      while software single-step will simply skip execution of the handler.

  1828.      For now, this difference in behavior is accepted since there is no
  1829.      easy way to actually implement single-stepping into a signal handler
  1830.      without kernel support.

  1831.      However, there is one scenario where this difference leads to follow-on
  1832.      problems: if we're stepping off a breakpoint by removing all breakpoints
  1833.      and then single-stepping.  In this case, the software single-step
  1834.      behavior means that even if there is a *breakpoint* in the signal
  1835.      handler, GDB still would not stop.

  1836.      Fortunately, we can at least fix this particular issue.  We detect
  1837.      here the case where we are about to deliver a signal while software
  1838.      single-stepping with breakpoints removed.  In this situation, we
  1839.      revert the decisions to remove all breakpoints and insert single-
  1840.      step breakpoints, and instead we install a step-resume breakpoint
  1841.      at the current address, deliver the signal without stepping, and
  1842.      once we arrive back at the step-resume breakpoint, actually step
  1843.      over the breakpoint we originally wanted to step over.  */
  1844.   if (thread_has_single_step_breakpoints_set (tp)
  1845.       && sig != GDB_SIGNAL_0
  1846.       && step_over_info_valid_p ())
  1847.     {
  1848.       /* If we have nested signals or a pending signal is delivered
  1849.          immediately after a handler returns, might might already have
  1850.          a step-resume breakpoint set on the earlier handler.  We cannot
  1851.          set another step-resume breakpoint; just continue on until the
  1852.          original breakpoint is hit.  */
  1853.       if (tp->control.step_resume_breakpoint == NULL)
  1854.         {
  1855.           insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
  1856.           tp->step_after_step_resume_breakpoint = 1;
  1857.         }

  1858.       delete_single_step_breakpoints (tp);

  1859.       clear_step_over_info ();
  1860.       tp->control.trap_expected = 0;

  1861.       insert_breakpoints ();
  1862.     }

  1863.   /* If STEP is set, it's a request to use hardware stepping
  1864.      facilities.  But in that case, we should never
  1865.      use singlestep breakpoint.  */
  1866.   gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));

  1867.   /* Decide the set of threads to ask the target to resume.  Start
  1868.      by assuming everything will be resumed, than narrow the set
  1869.      by applying increasingly restricting conditions.  */
  1870.   resume_ptid = user_visible_resume_ptid (entry_step);

  1871.   /* Even if RESUME_PTID is a wildcard, and we end up resuming less
  1872.      (e.g., we might need to step over a breakpoint), from the
  1873.      user/frontend's point of view, all threads in RESUME_PTID are now
  1874.      running.  Unless we're calling an inferior function, as in that
  1875.      case pretend we inferior doesn't run at all.  */
  1876.   if (!tp->control.in_infcall)
  1877.     set_running (resume_ptid, 1);

  1878.   /* Maybe resume a single thread after all.  */
  1879.   if ((step || thread_has_single_step_breakpoints_set (tp))
  1880.       && tp->control.trap_expected)
  1881.     {
  1882.       /* We're allowing a thread to run past a breakpoint it has
  1883.          hit, by single-stepping the thread with the breakpoint
  1884.          removed.  In which case, we need to single-step only this
  1885.          thread, and keep others stopped, as they can miss this
  1886.          breakpoint if allowed to run.  */
  1887.       resume_ptid = inferior_ptid;
  1888.     }

  1889.   if (execution_direction != EXEC_REVERSE
  1890.       && step && breakpoint_inserted_here_p (aspace, pc))
  1891.     {
  1892.       /* The only case we currently need to step a breakpoint
  1893.          instruction is when we have a signal to deliver.  See
  1894.          handle_signal_stop where we handle random signals that could
  1895.          take out us out of the stepping range.  Normally, in that
  1896.          case we end up continuing (instead of stepping) over the
  1897.          signal handler with a breakpoint at PC, but there are cases
  1898.          where we should _always_ single-step, even if we have a
  1899.          step-resume breakpoint, like when a software watchpoint is
  1900.          set.  Assuming single-stepping and delivering a signal at the
  1901.          same time would takes us to the signal handler, then we could
  1902.          have removed the breakpoint at PC to step over it.  However,
  1903.          some hardware step targets (like e.g., Mac OS) can't step
  1904.          into signal handlers, and for those, we need to leave the
  1905.          breakpoint at PC inserted, as otherwise if the handler
  1906.          recurses and executes PC again, it'll miss the breakpoint.
  1907.          So we leave the breakpoint inserted anyway, but we need to
  1908.          record that we tried to step a breakpoint instruction, so
  1909.          that adjust_pc_after_break doesn't end up confused.  */
  1910.       gdb_assert (sig != GDB_SIGNAL_0);

  1911.       tp->stepped_breakpoint = 1;

  1912.       /* Most targets can step a breakpoint instruction, thus
  1913.          executing it normally.  But if this one cannot, just
  1914.          continue and we will hit it anyway.  */
  1915.       if (gdbarch_cannot_step_breakpoint (gdbarch))
  1916.         step = 0;
  1917.     }

  1918.   if (debug_displaced
  1919.       && use_displaced_stepping (gdbarch)
  1920.       && tp->control.trap_expected)
  1921.     {
  1922.       struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
  1923.       struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
  1924.       CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
  1925.       gdb_byte buf[4];

  1926.       fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
  1927.                           paddress (resume_gdbarch, actual_pc));
  1928.       read_memory (actual_pc, buf, sizeof (buf));
  1929.       displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
  1930.     }

  1931.   if (tp->control.may_range_step)
  1932.     {
  1933.       /* If we're resuming a thread with the PC out of the step
  1934.          range, then we're doing some nested/finer run control
  1935.          operation, like stepping the thread out of the dynamic
  1936.          linker or the displaced stepping scratch pad.  We
  1937.          shouldn't have allowed a range step then.  */
  1938.       gdb_assert (pc_in_thread_step_range (pc, tp));
  1939.     }

  1940.   /* Install inferior's terminal modes.  */
  1941.   target_terminal_inferior ();

  1942.   /* Avoid confusing the next resume, if the next stop/resume
  1943.      happens to apply to another thread.  */
  1944.   tp->suspend.stop_signal = GDB_SIGNAL_0;

  1945.   /* Advise target which signals may be handled silently.  If we have
  1946.      removed breakpoints because we are stepping over one (in any
  1947.      thread), we need to receive all signals to avoid accidentally
  1948.      skipping a breakpoint during execution of a signal handler.  */
  1949.   if (step_over_info_valid_p ())
  1950.     target_pass_signals (0, NULL);
  1951.   else
  1952.     target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);

  1953.   target_resume (resume_ptid, step, sig);

  1954.   discard_cleanups (old_cleanups);
  1955. }

  1956. /* Proceeding.  */

  1957. /* Clear out all variables saying what to do when inferior is continued.
  1958.    First do this, then set the ones you want, then call `proceed'.  */

  1959. static void
  1960. clear_proceed_status_thread (struct thread_info *tp)
  1961. {
  1962.   if (debug_infrun)
  1963.     fprintf_unfiltered (gdb_stdlog,
  1964.                         "infrun: clear_proceed_status_thread (%s)\n",
  1965.                         target_pid_to_str (tp->ptid));

  1966.   /* If this signal should not be seen by program, give it zero.
  1967.      Used for debugging signals.  */
  1968.   if (!signal_pass_state (tp->suspend.stop_signal))
  1969.     tp->suspend.stop_signal = GDB_SIGNAL_0;

  1970.   tp->control.trap_expected = 0;
  1971.   tp->control.step_range_start = 0;
  1972.   tp->control.step_range_end = 0;
  1973.   tp->control.may_range_step = 0;
  1974.   tp->control.step_frame_id = null_frame_id;
  1975.   tp->control.step_stack_frame_id = null_frame_id;
  1976.   tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
  1977.   tp->stop_requested = 0;

  1978.   tp->control.stop_step = 0;

  1979.   tp->control.proceed_to_finish = 0;

  1980.   tp->control.command_interp = NULL;

  1981.   /* Discard any remaining commands or status from previous stop.  */
  1982.   bpstat_clear (&tp->control.stop_bpstat);
  1983. }

  1984. void
  1985. clear_proceed_status (int step)
  1986. {
  1987.   if (!non_stop)
  1988.     {
  1989.       struct thread_info *tp;
  1990.       ptid_t resume_ptid;

  1991.       resume_ptid = user_visible_resume_ptid (step);

  1992.       /* In all-stop mode, delete the per-thread status of all threads
  1993.          we're about to resume, implicitly and explicitly.  */
  1994.       ALL_NON_EXITED_THREADS (tp)
  1995.         {
  1996.           if (!ptid_match (tp->ptid, resume_ptid))
  1997.             continue;
  1998.           clear_proceed_status_thread (tp);
  1999.         }
  2000.     }

  2001.   if (!ptid_equal (inferior_ptid, null_ptid))
  2002.     {
  2003.       struct inferior *inferior;

  2004.       if (non_stop)
  2005.         {
  2006.           /* If in non-stop mode, only delete the per-thread status of
  2007.              the current thread.  */
  2008.           clear_proceed_status_thread (inferior_thread ());
  2009.         }

  2010.       inferior = current_inferior ();
  2011.       inferior->control.stop_soon = NO_STOP_QUIETLY;
  2012.     }

  2013.   stop_after_trap = 0;

  2014.   clear_step_over_info ();

  2015.   observer_notify_about_to_proceed ();

  2016.   if (stop_registers)
  2017.     {
  2018.       regcache_xfree (stop_registers);
  2019.       stop_registers = NULL;
  2020.     }
  2021. }

  2022. /* Returns true if TP is still stopped at a breakpoint that needs
  2023.    stepping-over in order to make progress.  If the breakpoint is gone
  2024.    meanwhile, we can skip the whole step-over dance.  */

  2025. static int
  2026. thread_still_needs_step_over (struct thread_info *tp)
  2027. {
  2028.   if (tp->stepping_over_breakpoint)
  2029.     {
  2030.       struct regcache *regcache = get_thread_regcache (tp->ptid);

  2031.       if (breakpoint_here_p (get_regcache_aspace (regcache),
  2032.                              regcache_read_pc (regcache))
  2033.           == ordinary_breakpoint_here)
  2034.         return 1;

  2035.       tp->stepping_over_breakpoint = 0;
  2036.     }

  2037.   return 0;
  2038. }

  2039. /* Returns true if scheduler locking applies.  STEP indicates whether
  2040.    we're about to do a step/next-like command to a thread.  */

  2041. static int
  2042. schedlock_applies (int step)
  2043. {
  2044.   return (scheduler_mode == schedlock_on
  2045.           || (scheduler_mode == schedlock_step
  2046.               && step));
  2047. }

  2048. /* Look a thread other than EXCEPT that has previously reported a
  2049.    breakpoint event, and thus needs a step-over in order to make
  2050.    progress.  Returns NULL is none is found.  STEP indicates whether
  2051.    we're about to step the current thread, in order to decide whether
  2052.    "set scheduler-locking step" applies.  */

  2053. static struct thread_info *
  2054. find_thread_needs_step_over (int step, struct thread_info *except)
  2055. {
  2056.   struct thread_info *tp, *current;

  2057.   /* With non-stop mode on, threads are always handled individually.  */
  2058.   gdb_assert (! non_stop);

  2059.   current = inferior_thread ();

  2060.   /* If scheduler locking applies, we can avoid iterating over all
  2061.      threads.  */
  2062.   if (schedlock_applies (step))
  2063.     {
  2064.       if (except != current
  2065.           && thread_still_needs_step_over (current))
  2066.         return current;

  2067.       return NULL;
  2068.     }

  2069.   ALL_NON_EXITED_THREADS (tp)
  2070.     {
  2071.       /* Ignore the EXCEPT thread.  */
  2072.       if (tp == except)
  2073.         continue;
  2074.       /* Ignore threads of processes we're not resuming.  */
  2075.       if (!sched_multi
  2076.           && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
  2077.         continue;

  2078.       if (thread_still_needs_step_over (tp))
  2079.         return tp;
  2080.     }

  2081.   return NULL;
  2082. }

  2083. /* Basic routine for continuing the program in various fashions.

  2084.    ADDR is the address to resume at, or -1 for resume where stopped.
  2085.    SIGGNAL is the signal to give it, or 0 for none,
  2086.    or -1 for act according to how it stopped.
  2087.    STEP is nonzero if should trap after one instruction.
  2088.    -1 means return after that and print nothing.
  2089.    You should probably set various step_... variables
  2090.    before calling here, if you are stepping.

  2091.    You should call clear_proceed_status before calling proceed.  */

  2092. void
  2093. proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
  2094. {
  2095.   struct regcache *regcache;
  2096.   struct gdbarch *gdbarch;
  2097.   struct thread_info *tp;
  2098.   CORE_ADDR pc;
  2099.   struct address_space *aspace;

  2100.   /* If we're stopped at a fork/vfork, follow the branch set by the
  2101.      "set follow-fork-mode" command; otherwise, we'll just proceed
  2102.      resuming the current thread.  */
  2103.   if (!follow_fork ())
  2104.     {
  2105.       /* The target for some reason decided not to resume.  */
  2106.       normal_stop ();
  2107.       if (target_can_async_p ())
  2108.         inferior_event_handler (INF_EXEC_COMPLETE, NULL);
  2109.       return;
  2110.     }

  2111.   /* We'll update this if & when we switch to a new thread.  */
  2112.   previous_inferior_ptid = inferior_ptid;

  2113.   regcache = get_current_regcache ();
  2114.   gdbarch = get_regcache_arch (regcache);
  2115.   aspace = get_regcache_aspace (regcache);
  2116.   pc = regcache_read_pc (regcache);
  2117.   tp = inferior_thread ();

  2118.   if (step > 0)
  2119.     step_start_function = find_pc_function (pc);
  2120.   if (step < 0)
  2121.     stop_after_trap = 1;

  2122.   /* Fill in with reasonable starting values.  */
  2123.   init_thread_stepping_state (tp);

  2124.   if (addr == (CORE_ADDR) -1)
  2125.     {
  2126.       if (pc == stop_pc
  2127.           && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
  2128.           && execution_direction != EXEC_REVERSE)
  2129.         /* There is a breakpoint at the address we will resume at,
  2130.            step one instruction before inserting breakpoints so that
  2131.            we do not stop right away (and report a second hit at this
  2132.            breakpoint).

  2133.            Note, we don't do this in reverse, because we won't
  2134.            actually be executing the breakpoint insn anyway.
  2135.            We'll be (un-)executing the previous instruction.  */
  2136.         tp->stepping_over_breakpoint = 1;
  2137.       else if (gdbarch_single_step_through_delay_p (gdbarch)
  2138.                && gdbarch_single_step_through_delay (gdbarch,
  2139.                                                      get_current_frame ()))
  2140.         /* We stepped onto an instruction that needs to be stepped
  2141.            again before re-inserting the breakpoint, do so.  */
  2142.         tp->stepping_over_breakpoint = 1;
  2143.     }
  2144.   else
  2145.     {
  2146.       regcache_write_pc (regcache, addr);
  2147.     }

  2148.   if (siggnal != GDB_SIGNAL_DEFAULT)
  2149.     tp->suspend.stop_signal = siggnal;

  2150.   /* Record the interpreter that issued the execution command that
  2151.      caused this thread to resume.  If the top level interpreter is
  2152.      MI/async, and the execution command was a CLI command
  2153.      (next/step/etc.), we'll want to print stop event output to the MI
  2154.      console channel (the stepped-to line, etc.), as if the user
  2155.      entered the execution command on a real GDB console.  */
  2156.   inferior_thread ()->control.command_interp = command_interp ();

  2157.   if (debug_infrun)
  2158.     fprintf_unfiltered (gdb_stdlog,
  2159.                         "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
  2160.                         paddress (gdbarch, addr),
  2161.                         gdb_signal_to_symbol_string (siggnal), step);

  2162.   if (non_stop)
  2163.     /* In non-stop, each thread is handled individually.  The context
  2164.        must already be set to the right thread here.  */
  2165.     ;
  2166.   else
  2167.     {
  2168.       struct thread_info *step_over;

  2169.       /* In a multi-threaded task we may select another thread and
  2170.          then continue or step.

  2171.          But if the old thread was stopped at a breakpoint, it will
  2172.          immediately cause another breakpoint stop without any
  2173.          execution (i.e. it will report a breakpoint hit incorrectly).
  2174.          So we must step over it first.

  2175.          Look for a thread other than the current (TP) that reported a
  2176.          breakpoint hit and hasn't been resumed yet since.  */
  2177.       step_over = find_thread_needs_step_over (step, tp);
  2178.       if (step_over != NULL)
  2179.         {
  2180.           if (debug_infrun)
  2181.             fprintf_unfiltered (gdb_stdlog,
  2182.                                 "infrun: need to step-over [%s] first\n",
  2183.                                 target_pid_to_str (step_over->ptid));

  2184.           /* Store the prev_pc for the stepping thread too, needed by
  2185.              switch_back_to_stepping thread.  */
  2186.           tp->prev_pc = regcache_read_pc (get_current_regcache ());
  2187.           switch_to_thread (step_over->ptid);
  2188.           tp = step_over;
  2189.         }
  2190.     }

  2191.   /* If we need to step over a breakpoint, and we're not using
  2192.      displaced stepping to do so, insert all breakpoints (watchpoints,
  2193.      etc.) but the one we're stepping over, step one instruction, and
  2194.      then re-insert the breakpoint when that step is finished.  */
  2195.   if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
  2196.     {
  2197.       struct regcache *regcache = get_current_regcache ();

  2198.       set_step_over_info (get_regcache_aspace (regcache),
  2199.                           regcache_read_pc (regcache), 0);
  2200.     }
  2201.   else
  2202.     clear_step_over_info ();

  2203.   insert_breakpoints ();

  2204.   tp->control.trap_expected = tp->stepping_over_breakpoint;

  2205.   annotate_starting ();

  2206.   /* Make sure that output from GDB appears before output from the
  2207.      inferior.  */
  2208.   gdb_flush (gdb_stdout);

  2209.   /* Refresh prev_pc value just prior to resuming.  This used to be
  2210.      done in stop_waiting, however, setting prev_pc there did not handle
  2211.      scenarios such as inferior function calls or returning from
  2212.      a function via the return command.  In those cases, the prev_pc
  2213.      value was not set properly for subsequent commands.  The prev_pc value
  2214.      is used to initialize the starting line number in the ecs.  With an
  2215.      invalid value, the gdb next command ends up stopping at the position
  2216.      represented by the next line table entry past our start position.
  2217.      On platforms that generate one line table entry per line, this
  2218.      is not a problem.  However, on the ia64, the compiler generates
  2219.      extraneous line table entries that do not increase the line number.
  2220.      When we issue the gdb next command on the ia64 after an inferior call
  2221.      or a return command, we often end up a few instructions forward, still
  2222.      within the original line we started.

  2223.      An attempt was made to refresh the prev_pc at the same time the
  2224.      execution_control_state is initialized (for instance, just before
  2225.      waiting for an inferior event).  But this approach did not work
  2226.      because of platforms that use ptrace, where the pc register cannot
  2227.      be read unless the inferior is stopped.  At that point, we are not
  2228.      guaranteed the inferior is stopped and so the regcache_read_pc() call
  2229.      can fail.  Setting the prev_pc value here ensures the value is updated
  2230.      correctly when the inferior is stopped.  */
  2231.   tp->prev_pc = regcache_read_pc (get_current_regcache ());

  2232.   /* Resume inferior.  */
  2233.   resume (tp->control.trap_expected || step || bpstat_should_step (),
  2234.           tp->suspend.stop_signal);

  2235.   /* Wait for it to stop (if not standalone)
  2236.      and in any case decode why it stopped, and act accordingly.  */
  2237.   /* Do this only if we are not using the event loop, or if the target
  2238.      does not support asynchronous execution.  */
  2239.   if (!target_can_async_p ())
  2240.     {
  2241.       wait_for_inferior ();
  2242.       normal_stop ();
  2243.     }
  2244. }


  2245. /* Start remote-debugging of a machine over a serial link.  */

  2246. void
  2247. start_remote (int from_tty)
  2248. {
  2249.   struct inferior *inferior;

  2250.   inferior = current_inferior ();
  2251.   inferior->control.stop_soon = STOP_QUIETLY_REMOTE;

  2252.   /* Always go on waiting for the target, regardless of the mode.  */
  2253.   /* FIXME: cagney/1999-09-23: At present it isn't possible to
  2254.      indicate to wait_for_inferior that a target should timeout if
  2255.      nothing is returned (instead of just blocking).  Because of this,
  2256.      targets expecting an immediate response need to, internally, set
  2257.      things up so that the target_wait() is forced to eventually
  2258.      timeout.  */
  2259.   /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
  2260.      differentiate to its caller what the state of the target is after
  2261.      the initial open has been performed.  Here we're assuming that
  2262.      the target has stopped.  It should be possible to eventually have
  2263.      target_open() return to the caller an indication that the target
  2264.      is currently running and GDB state should be set to the same as
  2265.      for an async run.  */
  2266.   wait_for_inferior ();

  2267.   /* Now that the inferior has stopped, do any bookkeeping like
  2268.      loading shared libraries.  We want to do this before normal_stop,
  2269.      so that the displayed frame is up to date.  */
  2270.   post_create_inferior (&current_target, from_tty);

  2271.   normal_stop ();
  2272. }

  2273. /* Initialize static vars when a new inferior begins.  */

  2274. void
  2275. init_wait_for_inferior (void)
  2276. {
  2277.   /* These are meaningless until the first time through wait_for_inferior.  */

  2278.   breakpoint_init_inferior (inf_starting);

  2279.   clear_proceed_status (0);

  2280.   target_last_wait_ptid = minus_one_ptid;

  2281.   previous_inferior_ptid = inferior_ptid;

  2282.   /* Discard any skipped inlined frames.  */
  2283.   clear_inline_frame_state (minus_one_ptid);
  2284. }


  2285. /* Data to be passed around while handling an event.  This data is
  2286.    discarded between events.  */
  2287. struct execution_control_state
  2288. {
  2289.   ptid_t ptid;
  2290.   /* The thread that got the event, if this was a thread event; NULL
  2291.      otherwise.  */
  2292.   struct thread_info *event_thread;

  2293.   struct target_waitstatus ws;
  2294.   int stop_func_filled_in;
  2295.   CORE_ADDR stop_func_start;
  2296.   CORE_ADDR stop_func_end;
  2297.   const char *stop_func_name;
  2298.   int wait_some_more;

  2299.   /* True if the event thread hit the single-step breakpoint of
  2300.      another thread.  Thus the event doesn't cause a stop, the thread
  2301.      needs to be single-stepped past the single-step breakpoint before
  2302.      we can switch back to the original stepping thread.  */
  2303.   int hit_singlestep_breakpoint;
  2304. };

  2305. static void handle_inferior_event (struct execution_control_state *ecs);

  2306. static void handle_step_into_function (struct gdbarch *gdbarch,
  2307.                                        struct execution_control_state *ecs);
  2308. static void handle_step_into_function_backward (struct gdbarch *gdbarch,
  2309.                                                 struct execution_control_state *ecs);
  2310. static void handle_signal_stop (struct execution_control_state *ecs);
  2311. static void check_exception_resume (struct execution_control_state *,
  2312.                                     struct frame_info *);

  2313. static void end_stepping_range (struct execution_control_state *ecs);
  2314. static void stop_waiting (struct execution_control_state *ecs);
  2315. static void prepare_to_wait (struct execution_control_state *ecs);
  2316. static void keep_going (struct execution_control_state *ecs);
  2317. static void process_event_stop_test (struct execution_control_state *ecs);
  2318. static int switch_back_to_stepped_thread (struct execution_control_state *ecs);

  2319. /* Callback for iterate over threads.  If the thread is stopped, but
  2320.    the user/frontend doesn't know about that yet, go through
  2321.    normal_stop, as if the thread had just stopped now.  ARG points at
  2322.    a ptid.  If PTID is MINUS_ONE_PTID, applies to all threads.  If
  2323.    ptid_is_pid(PTID) is true, applies to all threads of the process
  2324.    pointed at by PTID.  Otherwise, apply only to the thread pointed by
  2325.    PTID.  */

  2326. static int
  2327. infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
  2328. {
  2329.   ptid_t ptid = * (ptid_t *) arg;

  2330.   if ((ptid_equal (info->ptid, ptid)
  2331.        || ptid_equal (minus_one_ptid, ptid)
  2332.        || (ptid_is_pid (ptid)
  2333.            && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
  2334.       && is_running (info->ptid)
  2335.       && !is_executing (info->ptid))
  2336.     {
  2337.       struct cleanup *old_chain;
  2338.       struct execution_control_state ecss;
  2339.       struct execution_control_state *ecs = &ecss;

  2340.       memset (ecs, 0, sizeof (*ecs));

  2341.       old_chain = make_cleanup_restore_current_thread ();

  2342.       overlay_cache_invalid = 1;
  2343.       /* Flush target cache before starting to handle each event.
  2344.          Target was running and cache could be stale.  This is just a
  2345.          heuristic.  Running threads may modify target memory, but we
  2346.          don't get any event.  */
  2347.       target_dcache_invalidate ();

  2348.       /* Go through handle_inferior_event/normal_stop, so we always
  2349.          have consistent output as if the stop event had been
  2350.          reported.  */
  2351.       ecs->ptid = info->ptid;
  2352.       ecs->event_thread = find_thread_ptid (info->ptid);
  2353.       ecs->ws.kind = TARGET_WAITKIND_STOPPED;
  2354.       ecs->ws.value.sig = GDB_SIGNAL_0;

  2355.       handle_inferior_event (ecs);

  2356.       if (!ecs->wait_some_more)
  2357.         {
  2358.           struct thread_info *tp;

  2359.           normal_stop ();

  2360.           /* Finish off the continuations.  */
  2361.           tp = inferior_thread ();
  2362.           do_all_intermediate_continuations_thread (tp, 1);
  2363.           do_all_continuations_thread (tp, 1);
  2364.         }

  2365.       do_cleanups (old_chain);
  2366.     }

  2367.   return 0;
  2368. }

  2369. /* This function is attached as a "thread_stop_requested" observer.
  2370.    Cleanup local state that assumed the PTID was to be resumed, and
  2371.    report the stop to the frontend.  */

  2372. static void
  2373. infrun_thread_stop_requested (ptid_t ptid)
  2374. {
  2375.   struct displaced_step_inferior_state *displaced;

  2376.   /* PTID was requested to stop.  Remove it from the displaced
  2377.      stepping queue, so we don't try to resume it automatically.  */

  2378.   for (displaced = displaced_step_inferior_states;
  2379.        displaced;
  2380.        displaced = displaced->next)
  2381.     {
  2382.       struct displaced_step_request *it, **prev_next_p;

  2383.       it = displaced->step_request_queue;
  2384.       prev_next_p = &displaced->step_request_queue;
  2385.       while (it)
  2386.         {
  2387.           if (ptid_match (it->ptid, ptid))
  2388.             {
  2389.               *prev_next_p = it->next;
  2390.               it->next = NULL;
  2391.               xfree (it);
  2392.             }
  2393.           else
  2394.             {
  2395.               prev_next_p = &it->next;
  2396.             }

  2397.           it = *prev_next_p;
  2398.         }
  2399.     }

  2400.   iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
  2401. }

  2402. static void
  2403. infrun_thread_thread_exit (struct thread_info *tp, int silent)
  2404. {
  2405.   if (ptid_equal (target_last_wait_ptid, tp->ptid))
  2406.     nullify_last_target_wait_ptid ();
  2407. }

  2408. /* Delete the step resume, single-step and longjmp/exception resume
  2409.    breakpoints of TP.  */

  2410. static void
  2411. delete_thread_infrun_breakpoints (struct thread_info *tp)
  2412. {
  2413.   delete_step_resume_breakpoint (tp);
  2414.   delete_exception_resume_breakpoint (tp);
  2415.   delete_single_step_breakpoints (tp);
  2416. }

  2417. /* If the target still has execution, call FUNC for each thread that
  2418.    just stopped.  In all-stop, that's all the non-exited threads; in
  2419.    non-stop, that's the current thread, only.  */

  2420. typedef void (*for_each_just_stopped_thread_callback_func)
  2421.   (struct thread_info *tp);

  2422. static void
  2423. for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
  2424. {
  2425.   if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
  2426.     return;

  2427.   if (non_stop)
  2428.     {
  2429.       /* If in non-stop mode, only the current thread stopped.  */
  2430.       func (inferior_thread ());
  2431.     }
  2432.   else
  2433.     {
  2434.       struct thread_info *tp;

  2435.       /* In all-stop mode, all threads have stopped.  */
  2436.       ALL_NON_EXITED_THREADS (tp)
  2437.         {
  2438.           func (tp);
  2439.         }
  2440.     }
  2441. }

  2442. /* Delete the step resume and longjmp/exception resume breakpoints of
  2443.    the threads that just stopped.  */

  2444. static void
  2445. delete_just_stopped_threads_infrun_breakpoints (void)
  2446. {
  2447.   for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
  2448. }

  2449. /* Delete the single-step breakpoints of the threads that just
  2450.    stopped.  */

  2451. static void
  2452. delete_just_stopped_threads_single_step_breakpoints (void)
  2453. {
  2454.   for_each_just_stopped_thread (delete_single_step_breakpoints);
  2455. }

  2456. /* A cleanup wrapper.  */

  2457. static void
  2458. delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
  2459. {
  2460.   delete_just_stopped_threads_infrun_breakpoints ();
  2461. }

  2462. /* Pretty print the results of target_wait, for debugging purposes.  */

  2463. static void
  2464. print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
  2465.                            const struct target_waitstatus *ws)
  2466. {
  2467.   char *status_string = target_waitstatus_to_string (ws);
  2468.   struct ui_file *tmp_stream = mem_fileopen ();
  2469.   char *text;

  2470.   /* The text is split over several lines because it was getting too long.
  2471.      Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
  2472.      output as a unit; we want only one timestamp printed if debug_timestamp
  2473.      is set.  */

  2474.   fprintf_unfiltered (tmp_stream,
  2475.                       "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
  2476.   if (ptid_get_pid (waiton_ptid) != -1)
  2477.     fprintf_unfiltered (tmp_stream,
  2478.                         " [%s]", target_pid_to_str (waiton_ptid));
  2479.   fprintf_unfiltered (tmp_stream, ", status) =\n");
  2480.   fprintf_unfiltered (tmp_stream,
  2481.                       "infrun:   %d [%s],\n",
  2482.                       ptid_get_pid (result_ptid),
  2483.                       target_pid_to_str (result_ptid));
  2484.   fprintf_unfiltered (tmp_stream,
  2485.                       "infrun:   %s\n",
  2486.                       status_string);

  2487.   text = ui_file_xstrdup (tmp_stream, NULL);

  2488.   /* This uses %s in part to handle %'s in the text, but also to avoid
  2489.      a gcc error: the format attribute requires a string literal.  */
  2490.   fprintf_unfiltered (gdb_stdlog, "%s", text);

  2491.   xfree (status_string);
  2492.   xfree (text);
  2493.   ui_file_delete (tmp_stream);
  2494. }

  2495. /* Prepare and stabilize the inferior for detaching it.  E.g.,
  2496.    detaching while a thread is displaced stepping is a recipe for
  2497.    crashing it, as nothing would readjust the PC out of the scratch
  2498.    pad.  */

  2499. void
  2500. prepare_for_detach (void)
  2501. {
  2502.   struct inferior *inf = current_inferior ();
  2503.   ptid_t pid_ptid = pid_to_ptid (inf->pid);
  2504.   struct cleanup *old_chain_1;
  2505.   struct displaced_step_inferior_state *displaced;

  2506.   displaced = get_displaced_stepping_state (inf->pid);

  2507.   /* Is any thread of this process displaced stepping?  If not,
  2508.      there's nothing else to do.  */
  2509.   if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
  2510.     return;

  2511.   if (debug_infrun)
  2512.     fprintf_unfiltered (gdb_stdlog,
  2513.                         "displaced-stepping in-process while detaching");

  2514.   old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
  2515.   inf->detaching = 1;

  2516.   while (!ptid_equal (displaced->step_ptid, null_ptid))
  2517.     {
  2518.       struct cleanup *old_chain_2;
  2519.       struct execution_control_state ecss;
  2520.       struct execution_control_state *ecs;

  2521.       ecs = &ecss;
  2522.       memset (ecs, 0, sizeof (*ecs));

  2523.       overlay_cache_invalid = 1;
  2524.       /* Flush target cache before starting to handle each event.
  2525.          Target was running and cache could be stale.  This is just a
  2526.          heuristic.  Running threads may modify target memory, but we
  2527.          don't get any event.  */
  2528.       target_dcache_invalidate ();

  2529.       if (deprecated_target_wait_hook)
  2530.         ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
  2531.       else
  2532.         ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);

  2533.       if (debug_infrun)
  2534.         print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);

  2535.       /* If an error happens while handling the event, propagate GDB's
  2536.          knowledge of the executing state to the frontend/user running
  2537.          state.  */
  2538.       old_chain_2 = make_cleanup (finish_thread_state_cleanup,
  2539.                                   &minus_one_ptid);

  2540.       /* Now figure out what to do with the result of the result.  */
  2541.       handle_inferior_event (ecs);

  2542.       /* No error, don't finish the state yet.  */
  2543.       discard_cleanups (old_chain_2);

  2544.       /* Breakpoints and watchpoints are not installed on the target
  2545.          at this point, and signals are passed directly to the
  2546.          inferior, so this must mean the process is gone.  */
  2547.       if (!ecs->wait_some_more)
  2548.         {
  2549.           discard_cleanups (old_chain_1);
  2550.           error (_("Program exited while detaching"));
  2551.         }
  2552.     }

  2553.   discard_cleanups (old_chain_1);
  2554. }

  2555. /* Wait for control to return from inferior to debugger.

  2556.    If inferior gets a signal, we may decide to start it up again
  2557.    instead of returning.  That is why there is a loop in this function.
  2558.    When this function actually returns it means the inferior
  2559.    should be left stopped and GDB should read more commands.  */

  2560. void
  2561. wait_for_inferior (void)
  2562. {
  2563.   struct cleanup *old_cleanups;

  2564.   if (debug_infrun)
  2565.     fprintf_unfiltered
  2566.       (gdb_stdlog, "infrun: wait_for_inferior ()\n");

  2567.   old_cleanups
  2568.     = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
  2569.                     NULL);

  2570.   while (1)
  2571.     {
  2572.       struct execution_control_state ecss;
  2573.       struct execution_control_state *ecs = &ecss;
  2574.       struct cleanup *old_chain;
  2575.       ptid_t waiton_ptid = minus_one_ptid;

  2576.       memset (ecs, 0, sizeof (*ecs));

  2577.       overlay_cache_invalid = 1;

  2578.       /* Flush target cache before starting to handle each event.
  2579.          Target was running and cache could be stale.  This is just a
  2580.          heuristic.  Running threads may modify target memory, but we
  2581.          don't get any event.  */
  2582.       target_dcache_invalidate ();

  2583.       if (deprecated_target_wait_hook)
  2584.         ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
  2585.       else
  2586.         ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);

  2587.       if (debug_infrun)
  2588.         print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);

  2589.       /* If an error happens while handling the event, propagate GDB's
  2590.          knowledge of the executing state to the frontend/user running
  2591.          state.  */
  2592.       old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);

  2593.       /* Now figure out what to do with the result of the result.  */
  2594.       handle_inferior_event (ecs);

  2595.       /* No error, don't finish the state yet.  */
  2596.       discard_cleanups (old_chain);

  2597.       if (!ecs->wait_some_more)
  2598.         break;
  2599.     }

  2600.   do_cleanups (old_cleanups);
  2601. }

  2602. /* Cleanup that reinstalls the readline callback handler, if the
  2603.    target is running in the background.  If while handling the target
  2604.    event something triggered a secondary prompt, like e.g., a
  2605.    pagination prompt, we'll have removed the callback handler (see
  2606.    gdb_readline_wrapper_line).  Need to do this as we go back to the
  2607.    event loop, ready to process further input.  Note this has no
  2608.    effect if the handler hasn't actually been removed, because calling
  2609.    rl_callback_handler_install resets the line buffer, thus losing
  2610.    input.  */

  2611. static void
  2612. reinstall_readline_callback_handler_cleanup (void *arg)
  2613. {
  2614.   if (async_command_editing_p && !sync_execution)
  2615.     gdb_rl_callback_handler_reinstall ();
  2616. }

  2617. /* Asynchronous version of wait_for_inferior.  It is called by the
  2618.    event loop whenever a change of state is detected on the file
  2619.    descriptor corresponding to the target.  It can be called more than
  2620.    once to complete a single execution command.  In such cases we need
  2621.    to keep the state in a global variable ECSS.  If it is the last time
  2622.    that this function is called for a single execution command, then
  2623.    report to the user that the inferior has stopped, and do the
  2624.    necessary cleanups.  */

  2625. void
  2626. fetch_inferior_event (void *client_data)
  2627. {
  2628.   struct execution_control_state ecss;
  2629.   struct execution_control_state *ecs = &ecss;
  2630.   struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
  2631.   struct cleanup *ts_old_chain;
  2632.   int was_sync = sync_execution;
  2633.   int cmd_done = 0;
  2634.   ptid_t waiton_ptid = minus_one_ptid;

  2635.   memset (ecs, 0, sizeof (*ecs));

  2636.   /* End up with readline processing input, if necessary.  */
  2637.   make_cleanup (reinstall_readline_callback_handler_cleanup, NULL);

  2638.   /* We're handling a live event, so make sure we're doing live
  2639.      debugging.  If we're looking at traceframes while the target is
  2640.      running, we're going to need to get back to that mode after
  2641.      handling the event.  */
  2642.   if (non_stop)
  2643.     {
  2644.       make_cleanup_restore_current_traceframe ();
  2645.       set_current_traceframe (-1);
  2646.     }

  2647.   if (non_stop)
  2648.     /* In non-stop mode, the user/frontend should not notice a thread
  2649.        switch due to internal events.  Make sure we reverse to the
  2650.        user selected thread and frame after handling the event and
  2651.        running any breakpoint commands.  */
  2652.     make_cleanup_restore_current_thread ();

  2653.   overlay_cache_invalid = 1;
  2654.   /* Flush target cache before starting to handle each event.  Target
  2655.      was running and cache could be stale.  This is just a heuristic.
  2656.      Running threads may modify target memory, but we don't get any
  2657.      event.  */
  2658.   target_dcache_invalidate ();

  2659.   make_cleanup_restore_integer (&execution_direction);
  2660.   execution_direction = target_execution_direction ();

  2661.   if (deprecated_target_wait_hook)
  2662.     ecs->ptid =
  2663.       deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
  2664.   else
  2665.     ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);

  2666.   if (debug_infrun)
  2667.     print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);

  2668.   /* If an error happens while handling the event, propagate GDB's
  2669.      knowledge of the executing state to the frontend/user running
  2670.      state.  */
  2671.   if (!non_stop)
  2672.     ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
  2673.   else
  2674.     ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);

  2675.   /* Get executed before make_cleanup_restore_current_thread above to apply
  2676.      still for the thread which has thrown the exception.  */
  2677.   make_bpstat_clear_actions_cleanup ();

  2678.   make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup, NULL);

  2679.   /* Now figure out what to do with the result of the result.  */
  2680.   handle_inferior_event (ecs);

  2681.   if (!ecs->wait_some_more)
  2682.     {
  2683.       struct inferior *inf = find_inferior_ptid (ecs->ptid);

  2684.       delete_just_stopped_threads_infrun_breakpoints ();

  2685.       /* We may not find an inferior if this was a process exit.  */
  2686.       if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
  2687.         normal_stop ();

  2688.       if (target_has_execution
  2689.           && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
  2690.           && ecs->ws.kind != TARGET_WAITKIND_EXITED
  2691.           && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
  2692.           && ecs->event_thread->step_multi
  2693.           && ecs->event_thread->control.stop_step)
  2694.         inferior_event_handler (INF_EXEC_CONTINUE, NULL);
  2695.       else
  2696.         {
  2697.           inferior_event_handler (INF_EXEC_COMPLETE, NULL);
  2698.           cmd_done = 1;
  2699.         }
  2700.     }

  2701.   /* No error, don't finish the thread states yet.  */
  2702.   discard_cleanups (ts_old_chain);

  2703.   /* Revert thread and frame.  */
  2704.   do_cleanups (old_chain);

  2705.   /* If the inferior was in sync execution mode, and now isn't,
  2706.      restore the prompt (a synchronous execution command has finished,
  2707.      and we're ready for input).  */
  2708.   if (interpreter_async && was_sync && !sync_execution)
  2709.     observer_notify_sync_execution_done ();

  2710.   if (cmd_done
  2711.       && !was_sync
  2712.       && exec_done_display_p
  2713.       && (ptid_equal (inferior_ptid, null_ptid)
  2714.           || !is_running (inferior_ptid)))
  2715.     printf_unfiltered (_("completed.\n"));
  2716. }

  2717. /* Record the frame and location we're currently stepping through.  */
  2718. void
  2719. set_step_info (struct frame_info *frame, struct symtab_and_line sal)
  2720. {
  2721.   struct thread_info *tp = inferior_thread ();

  2722.   tp->control.step_frame_id = get_frame_id (frame);
  2723.   tp->control.step_stack_frame_id = get_stack_frame_id (frame);

  2724.   tp->current_symtab = sal.symtab;
  2725.   tp->current_line = sal.line;
  2726. }

  2727. /* Clear context switchable stepping state.  */

  2728. void
  2729. init_thread_stepping_state (struct thread_info *tss)
  2730. {
  2731.   tss->stepped_breakpoint = 0;
  2732.   tss->stepping_over_breakpoint = 0;
  2733.   tss->stepping_over_watchpoint = 0;
  2734.   tss->step_after_step_resume_breakpoint = 0;
  2735. }

  2736. /* Set the cached copy of the last ptid/waitstatus.  */

  2737. static void
  2738. set_last_target_status (ptid_t ptid, struct target_waitstatus status)
  2739. {
  2740.   target_last_wait_ptid = ptid;
  2741.   target_last_waitstatus = status;
  2742. }

  2743. /* Return the cached copy of the last pid/waitstatus returned by
  2744.    target_wait()/deprecated_target_wait_hook().  The data is actually
  2745.    cached by handle_inferior_event(), which gets called immediately
  2746.    after target_wait()/deprecated_target_wait_hook().  */

  2747. void
  2748. get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
  2749. {
  2750.   *ptidp = target_last_wait_ptid;
  2751.   *status = target_last_waitstatus;
  2752. }

  2753. void
  2754. nullify_last_target_wait_ptid (void)
  2755. {
  2756.   target_last_wait_ptid = minus_one_ptid;
  2757. }

  2758. /* Switch thread contexts.  */

  2759. static void
  2760. context_switch (ptid_t ptid)
  2761. {
  2762.   if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
  2763.     {
  2764.       fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
  2765.                           target_pid_to_str (inferior_ptid));
  2766.       fprintf_unfiltered (gdb_stdlog, "to %s\n",
  2767.                           target_pid_to_str (ptid));
  2768.     }

  2769.   switch_to_thread (ptid);
  2770. }

  2771. static void
  2772. adjust_pc_after_break (struct execution_control_state *ecs)
  2773. {
  2774.   struct regcache *regcache;
  2775.   struct gdbarch *gdbarch;
  2776.   struct address_space *aspace;
  2777.   CORE_ADDR breakpoint_pc, decr_pc;

  2778.   /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP.  If
  2779.      we aren't, just return.

  2780.      We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
  2781.      affected by gdbarch_decr_pc_after_break.  Other waitkinds which are
  2782.      implemented by software breakpoints should be handled through the normal
  2783.      breakpoint layer.

  2784.      NOTE drow/2004-01-31: On some targets, breakpoints may generate
  2785.      different signals (SIGILL or SIGEMT for instance), but it is less
  2786.      clear where the PC is pointing afterwards.  It may not match
  2787.      gdbarch_decr_pc_after_breakI don't know any specific target that
  2788.      generates these signals at breakpoints (the code has been in GDB since at
  2789.      least 1992) so I can not guess how to handle them here.

  2790.      In earlier versions of GDB, a target with
  2791.      gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
  2792.      watchpoint affected by gdbarch_decr_pc_after_breakI haven't found any
  2793.      target with both of these set in GDB history, and it seems unlikely to be
  2794.      correct, so gdbarch_have_nonsteppable_watchpoint is not checked here.  */

  2795.   if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
  2796.     return;

  2797.   if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
  2798.     return;

  2799.   /* In reverse execution, when a breakpoint is hit, the instruction
  2800.      under it has already been de-executed.  The reported PC always
  2801.      points at the breakpoint address, so adjusting it further would
  2802.      be wrong.  E.g., consider this case on a decr_pc_after_break == 1
  2803.      architecture:

  2804.        B1         0x08000000 :   INSN1
  2805.        B2         0x08000001 :   INSN2
  2806.                   0x08000002 :   INSN3
  2807.             PC -> 0x08000003 :   INSN4

  2808.      Say you're stopped at 0x08000003 as above.  Reverse continuing
  2809.      from that point should hit B2 as below.  Reading the PC when the
  2810.      SIGTRAP is reported should read 0x08000001 and INSN2 should have
  2811.      been de-executed already.

  2812.        B1         0x08000000 :   INSN1
  2813.        B2   PC -> 0x08000001 :   INSN2
  2814.                   0x08000002 :   INSN3
  2815.                   0x08000003 :   INSN4

  2816.      We can't apply the same logic as for forward execution, because
  2817.      we would wrongly adjust the PC to 0x08000000, since there's a
  2818.      breakpoint at PC - 1.  We'd then report a hit on B1, although
  2819.      INSN1 hadn't been de-executed yet.  Doing nothing is the correct
  2820.      behaviour.  */
  2821.   if (execution_direction == EXEC_REVERSE)
  2822.     return;

  2823.   /* If this target does not decrement the PC after breakpoints, then
  2824.      we have nothing to do.  */
  2825.   regcache = get_thread_regcache (ecs->ptid);
  2826.   gdbarch = get_regcache_arch (regcache);

  2827.   decr_pc = target_decr_pc_after_break (gdbarch);
  2828.   if (decr_pc == 0)
  2829.     return;

  2830.   aspace = get_regcache_aspace (regcache);

  2831.   /* Find the location where (if we've hit a breakpoint) the
  2832.      breakpoint would be.  */
  2833.   breakpoint_pc = regcache_read_pc (regcache) - decr_pc;

  2834.   /* Check whether there actually is a software breakpoint inserted at
  2835.      that location.

  2836.      If in non-stop mode, a race condition is possible where we've
  2837.      removed a breakpoint, but stop events for that breakpoint were
  2838.      already queued and arrive later.  To suppress those spurious
  2839.      SIGTRAPs, we keep a list of such breakpoint locations for a bit,
  2840.      and retire them after a number of stop events are reported.  */
  2841.   if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
  2842.       || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
  2843.     {
  2844.       struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);

  2845.       if (record_full_is_used ())
  2846.         record_full_gdb_operation_disable_set ();

  2847.       /* When using hardware single-step, a SIGTRAP is reported for both
  2848.          a completed single-step and a software breakpoint.  Need to
  2849.          differentiate between the two, as the latter needs adjusting
  2850.          but the former does not.

  2851.          The SIGTRAP can be due to a completed hardware single-step only if
  2852.           - we didn't insert software single-step breakpoints
  2853.           - the thread to be examined is still the current thread
  2854.           - this thread is currently being stepped

  2855.          If any of these events did not occur, we must have stopped due
  2856.          to hitting a software breakpoint, and have to back up to the
  2857.          breakpoint address.

  2858.          As a special case, we could have hardware single-stepped a
  2859.          software breakpoint.  In this case (prev_pc == breakpoint_pc),
  2860.          we also need to back up to the breakpoint address.  */

  2861.       if (thread_has_single_step_breakpoints_set (ecs->event_thread)
  2862.           || !ptid_equal (ecs->ptid, inferior_ptid)
  2863.           || !currently_stepping (ecs->event_thread)
  2864.           || (ecs->event_thread->stepped_breakpoint
  2865.               && ecs->event_thread->prev_pc == breakpoint_pc))
  2866.         regcache_write_pc (regcache, breakpoint_pc);

  2867.       do_cleanups (old_cleanups);
  2868.     }
  2869. }

  2870. static int
  2871. stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
  2872. {
  2873.   for (frame = get_prev_frame (frame);
  2874.        frame != NULL;
  2875.        frame = get_prev_frame (frame))
  2876.     {
  2877.       if (frame_id_eq (get_frame_id (frame), step_frame_id))
  2878.         return 1;
  2879.       if (get_frame_type (frame) != INLINE_FRAME)
  2880.         break;
  2881.     }

  2882.   return 0;
  2883. }

  2884. /* Auxiliary function that handles syscall entry/return events.
  2885.    It returns 1 if the inferior should keep going (and GDB
  2886.    should ignore the event), or 0 if the event deserves to be
  2887.    processed.  */

  2888. static int
  2889. handle_syscall_event (struct execution_control_state *ecs)
  2890. {
  2891.   struct regcache *regcache;
  2892.   int syscall_number;

  2893.   if (!ptid_equal (ecs->ptid, inferior_ptid))
  2894.     context_switch (ecs->ptid);

  2895.   regcache = get_thread_regcache (ecs->ptid);
  2896.   syscall_number = ecs->ws.value.syscall_number;
  2897.   stop_pc = regcache_read_pc (regcache);

  2898.   if (catch_syscall_enabled () > 0
  2899.       && catching_syscall_number (syscall_number) > 0)
  2900.     {
  2901.       if (debug_infrun)
  2902.         fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
  2903.                             syscall_number);

  2904.       ecs->event_thread->control.stop_bpstat
  2905.         = bpstat_stop_status (get_regcache_aspace (regcache),
  2906.                               stop_pc, ecs->ptid, &ecs->ws);

  2907.       if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
  2908.         {
  2909.           /* Catchpoint hit.  */
  2910.           return 0;
  2911.         }
  2912.     }

  2913.   /* If no catchpoint triggered for this, then keep going.  */
  2914.   keep_going (ecs);
  2915.   return 1;
  2916. }

  2917. /* Lazily fill in the execution_control_state's stop_func_* fields.  */

  2918. static void
  2919. fill_in_stop_func (struct gdbarch *gdbarch,
  2920.                    struct execution_control_state *ecs)
  2921. {
  2922.   if (!ecs->stop_func_filled_in)
  2923.     {
  2924.       /* Don't care about return value; stop_func_start and stop_func_name
  2925.          will both be 0 if it doesn't work.  */
  2926.       find_pc_partial_function (stop_pc, &ecs->stop_func_name,
  2927.                                 &ecs->stop_func_start, &ecs->stop_func_end);
  2928.       ecs->stop_func_start
  2929.         += gdbarch_deprecated_function_start_offset (gdbarch);

  2930.       if (gdbarch_skip_entrypoint_p (gdbarch))
  2931.         ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
  2932.                                                         ecs->stop_func_start);

  2933.       ecs->stop_func_filled_in = 1;
  2934.     }
  2935. }


  2936. /* Return the STOP_SOON field of the inferior pointed at by PTID.  */

  2937. static enum stop_kind
  2938. get_inferior_stop_soon (ptid_t ptid)
  2939. {
  2940.   struct inferior *inf = find_inferior_ptid (ptid);

  2941.   gdb_assert (inf != NULL);
  2942.   return inf->control.stop_soon;
  2943. }

  2944. /* Given an execution control state that has been freshly filled in by
  2945.    an event from the inferior, figure out what it means and take
  2946.    appropriate action.

  2947.    The alternatives are:

  2948.    1) stop_waiting and return; to really stop and return to the
  2949.    debugger.

  2950.    2) keep_going and return; to wait for the next event (set
  2951.    ecs->event_thread->stepping_over_breakpoint to 1 to single step
  2952.    once).  */

  2953. static void
  2954. handle_inferior_event (struct execution_control_state *ecs)
  2955. {
  2956.   enum stop_kind stop_soon;

  2957.   if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
  2958.     {
  2959.       /* We had an event in the inferior, but we are not interested in
  2960.          handling it at this level.  The lower layers have already
  2961.          done what needs to be done, if anything.

  2962.          One of the possible circumstances for this is when the
  2963.          inferior produces output for the console.  The inferior has
  2964.          not stopped, and we are ignoring the event.  Another possible
  2965.          circumstance is any event which the lower level knows will be
  2966.          reported multiple times without an intervening resume.  */
  2967.       if (debug_infrun)
  2968.         fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
  2969.       prepare_to_wait (ecs);
  2970.       return;
  2971.     }

  2972.   if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
  2973.       && target_can_async_p () && !sync_execution)
  2974.     {
  2975.       /* There were no unwaited-for children left in the target, but,
  2976.          we're not synchronously waiting for events either.  Just
  2977.          ignore.  Otherwise, if we were running a synchronous
  2978.          execution command, we need to cancel it and give the user
  2979.          back the terminal.  */
  2980.       if (debug_infrun)
  2981.         fprintf_unfiltered (gdb_stdlog,
  2982.                             "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
  2983.       prepare_to_wait (ecs);
  2984.       return;
  2985.     }

  2986.   /* Cache the last pid/waitstatus.  */
  2987.   set_last_target_status (ecs->ptid, ecs->ws);

  2988.   /* Always clear state belonging to the previous time we stopped.  */
  2989.   stop_stack_dummy = STOP_NONE;

  2990.   if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
  2991.     {
  2992.       /* No unwaited-for children left.  IOW, all resumed children
  2993.          have exited.  */
  2994.       if (debug_infrun)
  2995.         fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");

  2996.       stop_print_frame = 0;
  2997.       stop_waiting (ecs);
  2998.       return;
  2999.     }

  3000.   if (ecs->ws.kind != TARGET_WAITKIND_EXITED
  3001.       && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
  3002.     {
  3003.       ecs->event_thread = find_thread_ptid (ecs->ptid);
  3004.       /* If it's a new thread, add it to the thread database.  */
  3005.       if (ecs->event_thread == NULL)
  3006.         ecs->event_thread = add_thread (ecs->ptid);

  3007.       /* Disable range stepping.  If the next step request could use a
  3008.          range, this will be end up re-enabled then.  */
  3009.       ecs->event_thread->control.may_range_step = 0;
  3010.     }

  3011.   /* Dependent on valid ECS->EVENT_THREAD.  */
  3012.   adjust_pc_after_break (ecs);

  3013.   /* Dependent on the current PC value modified by adjust_pc_after_break.  */
  3014.   reinit_frame_cache ();

  3015.   breakpoint_retire_moribund ();

  3016.   /* First, distinguish signals caused by the debugger from signals
  3017.      that have to do with the program's own actions.  Note that
  3018.      breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
  3019.      on the operating system version.  Here we detect when a SIGILL or
  3020.      SIGEMT is really a breakpoint and change it to SIGTRAP.  We do
  3021.      something similar for SIGSEGV, since a SIGSEGV will be generated
  3022.      when we're trying to execute a breakpoint instruction on a
  3023.      non-executable stack.  This happens for call dummy breakpoints
  3024.      for architectures like SPARC that place call dummies on the
  3025.      stack.  */
  3026.   if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
  3027.       && (ecs->ws.value.sig == GDB_SIGNAL_ILL
  3028.           || ecs->ws.value.sig == GDB_SIGNAL_SEGV
  3029.           || ecs->ws.value.sig == GDB_SIGNAL_EMT))
  3030.     {
  3031.       struct regcache *regcache = get_thread_regcache (ecs->ptid);

  3032.       if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
  3033.                                       regcache_read_pc (regcache)))
  3034.         {
  3035.           if (debug_infrun)
  3036.             fprintf_unfiltered (gdb_stdlog,
  3037.                                 "infrun: Treating signal as SIGTRAP\n");
  3038.           ecs->ws.value.sig = GDB_SIGNAL_TRAP;
  3039.         }
  3040.     }

  3041.   /* Mark the non-executing threads accordingly.  In all-stop, all
  3042.      threads of all processes are stopped when we get any event
  3043.      reported.  In non-stop mode, only the event thread stops.  If
  3044.      we're handling a process exit in non-stop mode, there's nothing
  3045.      to do, as threads of the dead process are gone, and threads of
  3046.      any other process were left running.  */
  3047.   if (!non_stop)
  3048.     set_executing (minus_one_ptid, 0);
  3049.   else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
  3050.            && ecs->ws.kind != TARGET_WAITKIND_EXITED)
  3051.     set_executing (ecs->ptid, 0);

  3052.   switch (ecs->ws.kind)
  3053.     {
  3054.     case TARGET_WAITKIND_LOADED:
  3055.       if (debug_infrun)
  3056.         fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
  3057.       if (!ptid_equal (ecs->ptid, inferior_ptid))
  3058.         context_switch (ecs->ptid);
  3059.       /* Ignore gracefully during startup of the inferior, as it might
  3060.          be the shell which has just loaded some objects, otherwise
  3061.          add the symbols for the newly loaded objects.  Also ignore at
  3062.          the beginning of an attach or remote session; we will query
  3063.          the full list of libraries once the connection is
  3064.          established.  */

  3065.       stop_soon = get_inferior_stop_soon (ecs->ptid);
  3066.       if (stop_soon == NO_STOP_QUIETLY)
  3067.         {
  3068.           struct regcache *regcache;

  3069.           regcache = get_thread_regcache (ecs->ptid);

  3070.           handle_solib_event ();

  3071.           ecs->event_thread->control.stop_bpstat
  3072.             = bpstat_stop_status (get_regcache_aspace (regcache),
  3073.                                   stop_pc, ecs->ptid, &ecs->ws);

  3074.           if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
  3075.             {
  3076.               /* A catchpoint triggered.  */
  3077.               process_event_stop_test (ecs);
  3078.               return;
  3079.             }

  3080.           /* If requested, stop when the dynamic linker notifies
  3081.              gdb of events.  This allows the user to get control
  3082.              and place breakpoints in initializer routines for
  3083.              dynamically loaded objects (among other things).  */
  3084.           ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
  3085.           if (stop_on_solib_events)
  3086.             {
  3087.               /* Make sure we print "Stopped due to solib-event" in
  3088.                  normal_stop.  */
  3089.               stop_print_frame = 1;

  3090.               stop_waiting (ecs);
  3091.               return;
  3092.             }
  3093.         }

  3094.       /* If we are skipping through a shell, or through shared library
  3095.          loading that we aren't interested in, resume the program.  If
  3096.          we're running the program normally, also resume.  */
  3097.       if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
  3098.         {
  3099.           /* Loading of shared libraries might have changed breakpoint
  3100.              addresses.  Make sure new breakpoints are inserted.  */
  3101.           if (stop_soon == NO_STOP_QUIETLY)
  3102.             insert_breakpoints ();
  3103.           resume (0, GDB_SIGNAL_0);
  3104.           prepare_to_wait (ecs);
  3105.           return;
  3106.         }

  3107.       /* But stop if we're attaching or setting up a remote
  3108.          connection.  */
  3109.       if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
  3110.           || stop_soon == STOP_QUIETLY_REMOTE)
  3111.         {
  3112.           if (debug_infrun)
  3113.             fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
  3114.           stop_waiting (ecs);
  3115.           return;
  3116.         }

  3117.       internal_error (__FILE__, __LINE__,
  3118.                       _("unhandled stop_soon: %d"), (int) stop_soon);

  3119.     case TARGET_WAITKIND_SPURIOUS:
  3120.       if (debug_infrun)
  3121.         fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
  3122.       if (!ptid_equal (ecs->ptid, inferior_ptid))
  3123.         context_switch (ecs->ptid);
  3124.       resume (0, GDB_SIGNAL_0);
  3125.       prepare_to_wait (ecs);
  3126.       return;

  3127.     case TARGET_WAITKIND_EXITED:
  3128.     case TARGET_WAITKIND_SIGNALLED:
  3129.       if (debug_infrun)
  3130.         {
  3131.           if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
  3132.             fprintf_unfiltered (gdb_stdlog,
  3133.                                 "infrun: TARGET_WAITKIND_EXITED\n");
  3134.           else
  3135.             fprintf_unfiltered (gdb_stdlog,
  3136.                                 "infrun: TARGET_WAITKIND_SIGNALLED\n");
  3137.         }

  3138.       inferior_ptid = ecs->ptid;
  3139.       set_current_inferior (find_inferior_ptid (ecs->ptid));
  3140.       set_current_program_space (current_inferior ()->pspace);
  3141.       handle_vfork_child_exec_or_exit (0);
  3142.       target_terminal_ours ();        /* Must do this before mourn anyway.  */

  3143.       /* Clearing any previous state of convenience variables.  */
  3144.       clear_exit_convenience_vars ();

  3145.       if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
  3146.         {
  3147.           /* Record the exit code in the convenience variable $_exitcode, so
  3148.              that the user can inspect this again later.  */
  3149.           set_internalvar_integer (lookup_internalvar ("_exitcode"),
  3150.                                    (LONGEST) ecs->ws.value.integer);

  3151.           /* Also record this in the inferior itself.  */
  3152.           current_inferior ()->has_exit_code = 1;
  3153.           current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;

  3154.           /* Support the --return-child-result option.  */
  3155.           return_child_result_value = ecs->ws.value.integer;

  3156.           observer_notify_exited (ecs->ws.value.integer);
  3157.         }
  3158.       else
  3159.         {
  3160.           struct regcache *regcache = get_thread_regcache (ecs->ptid);
  3161.           struct gdbarch *gdbarch = get_regcache_arch (regcache);

  3162.           if (gdbarch_gdb_signal_to_target_p (gdbarch))
  3163.             {
  3164.               /* Set the value of the internal variable $_exitsignal,
  3165.                  which holds the signal uncaught by the inferior.  */
  3166.               set_internalvar_integer (lookup_internalvar ("_exitsignal"),
  3167.                                        gdbarch_gdb_signal_to_target (gdbarch,
  3168.                                                           ecs->ws.value.sig));
  3169.             }
  3170.           else
  3171.             {
  3172.               /* We don't have access to the target's method used for
  3173.                  converting between signal numbers (GDB's internal
  3174.                  representation <-> target's representation).
  3175.                  Therefore, we cannot do a good job at displaying this
  3176.                  information to the user.  It's better to just warn
  3177.                  her about it (if infrun debugging is enabled), and
  3178.                  give up.  */
  3179.               if (debug_infrun)
  3180.                 fprintf_filtered (gdb_stdlog, _("\
  3181. Cannot fill $_exitsignal with the correct signal number.\n"));
  3182.             }

  3183.           observer_notify_signal_exited (ecs->ws.value.sig);
  3184.         }

  3185.       gdb_flush (gdb_stdout);
  3186.       target_mourn_inferior ();
  3187.       stop_print_frame = 0;
  3188.       stop_waiting (ecs);
  3189.       return;

  3190.       /* The following are the only cases in which we keep going;
  3191.          the above cases end in a continue or goto.  */
  3192.     case TARGET_WAITKIND_FORKED:
  3193.     case TARGET_WAITKIND_VFORKED:
  3194.       if (debug_infrun)
  3195.         {
  3196.           if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
  3197.             fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
  3198.           else
  3199.             fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
  3200.         }

  3201.       /* Check whether the inferior is displaced stepping.  */
  3202.       {
  3203.         struct regcache *regcache = get_thread_regcache (ecs->ptid);
  3204.         struct gdbarch *gdbarch = get_regcache_arch (regcache);
  3205.         struct displaced_step_inferior_state *displaced
  3206.           = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));

  3207.         /* If checking displaced stepping is supported, and thread
  3208.            ecs->ptid is displaced stepping.  */
  3209.         if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
  3210.           {
  3211.             struct inferior *parent_inf
  3212.               = find_inferior_ptid (ecs->ptid);
  3213.             struct regcache *child_regcache;
  3214.             CORE_ADDR parent_pc;

  3215.             /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
  3216.                indicating that the displaced stepping of syscall instruction
  3217.                has been done.  Perform cleanup for parent process here.  Note
  3218.                that this operation also cleans up the child process for vfork,
  3219.                because their pages are shared.  */
  3220.             displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);

  3221.             if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
  3222.               {
  3223.                 /* Restore scratch pad for child process.  */
  3224.                 displaced_step_restore (displaced, ecs->ws.value.related_pid);
  3225.               }

  3226.             /* Since the vfork/fork syscall instruction was executed in the scratchpad,
  3227.                the child's PC is also within the scratchpad.  Set the child's PC
  3228.                to the parent's PC value, which has already been fixed up.
  3229.                FIXME: we use the parent's aspace here, although we're touching
  3230.                the child, because the child hasn't been added to the inferior
  3231.                list yet at this point.  */

  3232.             child_regcache
  3233.               = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
  3234.                                                  gdbarch,
  3235.                                                  parent_inf->aspace);
  3236.             /* Read PC value of parent process.  */
  3237.             parent_pc = regcache_read_pc (regcache);

  3238.             if (debug_displaced)
  3239.               fprintf_unfiltered (gdb_stdlog,
  3240.                                   "displaced: write child pc from %s to %s\n",
  3241.                                   paddress (gdbarch,
  3242.                                             regcache_read_pc (child_regcache)),
  3243.                                   paddress (gdbarch, parent_pc));

  3244.             regcache_write_pc (child_regcache, parent_pc);
  3245.           }
  3246.       }

  3247.       if (!ptid_equal (ecs->ptid, inferior_ptid))
  3248.         context_switch (ecs->ptid);

  3249.       /* Immediately detach breakpoints from the child before there's
  3250.          any chance of letting the user delete breakpoints from the
  3251.          breakpoint lists.  If we don't do this early, it's easy to
  3252.          leave left over traps in the child, vis: "break foo; catch
  3253.          fork; c; <fork>; del; c; <child calls foo>".  We only follow
  3254.          the fork on the last `continue', and by that time the
  3255.          breakpoint at "foo" is long gone from the breakpoint table.
  3256.          If we vforked, then we don't need to unpatch here, since both
  3257.          parent and child are sharing the same memory pages; we'll
  3258.          need to unpatch at follow/detach time instead to be certain
  3259.          that new breakpoints added between catchpoint hit time and
  3260.          vfork follow are detached.  */
  3261.       if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
  3262.         {
  3263.           /* This won't actually modify the breakpoint list, but will
  3264.              physically remove the breakpoints from the child.  */
  3265.           detach_breakpoints (ecs->ws.value.related_pid);
  3266.         }

  3267.       delete_just_stopped_threads_single_step_breakpoints ();

  3268.       /* In case the event is caught by a catchpoint, remember that
  3269.          the event is to be followed at the next resume of the thread,
  3270.          and not immediately.  */
  3271.       ecs->event_thread->pending_follow = ecs->ws;

  3272.       stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));

  3273.       ecs->event_thread->control.stop_bpstat
  3274.         = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
  3275.                               stop_pc, ecs->ptid, &ecs->ws);

  3276.       /* If no catchpoint triggered for this, then keep going.  Note
  3277.          that we're interested in knowing the bpstat actually causes a
  3278.          stop, not just if it may explain the signal.  Software
  3279.          watchpoints, for example, always appear in the bpstat.  */
  3280.       if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
  3281.         {
  3282.           ptid_t parent;
  3283.           ptid_t child;
  3284.           int should_resume;
  3285.           int follow_child
  3286.             = (follow_fork_mode_string == follow_fork_mode_child);

  3287.           ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;

  3288.           should_resume = follow_fork ();

  3289.           parent = ecs->ptid;
  3290.           child = ecs->ws.value.related_pid;

  3291.           /* In non-stop mode, also resume the other branch.  */
  3292.           if (non_stop && !detach_fork)
  3293.             {
  3294.               if (follow_child)
  3295.                 switch_to_thread (parent);
  3296.               else
  3297.                 switch_to_thread (child);

  3298.               ecs->event_thread = inferior_thread ();
  3299.               ecs->ptid = inferior_ptid;
  3300.               keep_going (ecs);
  3301.             }

  3302.           if (follow_child)
  3303.             switch_to_thread (child);
  3304.           else
  3305.             switch_to_thread (parent);

  3306.           ecs->event_thread = inferior_thread ();
  3307.           ecs->ptid = inferior_ptid;

  3308.           if (should_resume)
  3309.             keep_going (ecs);
  3310.           else
  3311.             stop_waiting (ecs);
  3312.           return;
  3313.         }
  3314.       process_event_stop_test (ecs);
  3315.       return;

  3316.     case TARGET_WAITKIND_VFORK_DONE:
  3317.       /* Done with the shared memory region.  Re-insert breakpoints in
  3318.          the parent, and keep going.  */

  3319.       if (debug_infrun)
  3320.         fprintf_unfiltered (gdb_stdlog,
  3321.                             "infrun: TARGET_WAITKIND_VFORK_DONE\n");

  3322.       if (!ptid_equal (ecs->ptid, inferior_ptid))
  3323.         context_switch (ecs->ptid);

  3324.       current_inferior ()->waiting_for_vfork_done = 0;
  3325.       current_inferior ()->pspace->breakpoints_not_allowed = 0;
  3326.       /* This also takes care of reinserting breakpoints in the
  3327.          previously locked inferior.  */
  3328.       keep_going (ecs);
  3329.       return;

  3330.     case TARGET_WAITKIND_EXECD:
  3331.       if (debug_infrun)
  3332.         fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");

  3333.       if (!ptid_equal (ecs->ptid, inferior_ptid))
  3334.         context_switch (ecs->ptid);

  3335.       stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));

  3336.       /* Do whatever is necessary to the parent branch of the vfork.  */
  3337.       handle_vfork_child_exec_or_exit (1);

  3338.       /* This causes the eventpoints and symbol table to be reset.
  3339.          Must do this now, before trying to determine whether to
  3340.          stop.  */
  3341.       follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);

  3342.       ecs->event_thread->control.stop_bpstat
  3343.         = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
  3344.                               stop_pc, ecs->ptid, &ecs->ws);

  3345.       /* Note that this may be referenced from inside
  3346.          bpstat_stop_status above, through inferior_has_execd.  */
  3347.       xfree (ecs->ws.value.execd_pathname);
  3348.       ecs->ws.value.execd_pathname = NULL;

  3349.       /* If no catchpoint triggered for this, then keep going.  */
  3350.       if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
  3351.         {
  3352.           ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
  3353.           keep_going (ecs);
  3354.           return;
  3355.         }
  3356.       process_event_stop_test (ecs);
  3357.       return;

  3358.       /* Be careful not to try to gather much state about a thread
  3359.          that's in a syscall.  It's frequently a losing proposition.  */
  3360.     case TARGET_WAITKIND_SYSCALL_ENTRY:
  3361.       if (debug_infrun)
  3362.         fprintf_unfiltered (gdb_stdlog,
  3363.                             "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
  3364.       /* Getting the current syscall number.  */
  3365.       if (handle_syscall_event (ecs) == 0)
  3366.         process_event_stop_test (ecs);
  3367.       return;

  3368.       /* Before examining the threads further, step this thread to
  3369.          get it entirely out of the syscall.  (We get notice of the
  3370.          event when the thread is just on the verge of exiting a
  3371.          syscall.  Stepping one instruction seems to get it back
  3372.          into user code.)  */
  3373.     case TARGET_WAITKIND_SYSCALL_RETURN:
  3374.       if (debug_infrun)
  3375.         fprintf_unfiltered (gdb_stdlog,
  3376.                             "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
  3377.       if (handle_syscall_event (ecs) == 0)
  3378.         process_event_stop_test (ecs);
  3379.       return;

  3380.     case TARGET_WAITKIND_STOPPED:
  3381.       if (debug_infrun)
  3382.         fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
  3383.       ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
  3384.       handle_signal_stop (ecs);
  3385.       return;

  3386.     case TARGET_WAITKIND_NO_HISTORY:
  3387.       if (debug_infrun)
  3388.         fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
  3389.       /* Reverse execution: target ran out of history info.  */

  3390.       delete_just_stopped_threads_single_step_breakpoints ();
  3391.       stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
  3392.       observer_notify_no_history ();
  3393.       stop_waiting (ecs);
  3394.       return;
  3395.     }
  3396. }

  3397. /* Come here when the program has stopped with a signal.  */

  3398. static void
  3399. handle_signal_stop (struct execution_control_state *ecs)
  3400. {
  3401.   struct frame_info *frame;
  3402.   struct gdbarch *gdbarch;
  3403.   int stopped_by_watchpoint;
  3404.   enum stop_kind stop_soon;
  3405.   int random_signal;

  3406.   gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);

  3407.   /* Do we need to clean up the state of a thread that has
  3408.      completed a displaced single-step?  (Doing so usually affects
  3409.      the PC, so do it here, before we set stop_pc.)  */
  3410.   displaced_step_fixup (ecs->ptid,
  3411.                         ecs->event_thread->suspend.stop_signal);

  3412.   /* If we either finished a single-step or hit a breakpoint, but
  3413.      the user wanted this thread to be stopped, pretend we got a
  3414.      SIG0 (generic unsignaled stop).  */
  3415.   if (ecs->event_thread->stop_requested
  3416.       && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
  3417.     ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;

  3418.   stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));

  3419.   if (debug_infrun)
  3420.     {
  3421.       struct regcache *regcache = get_thread_regcache (ecs->ptid);
  3422.       struct gdbarch *gdbarch = get_regcache_arch (regcache);
  3423.       struct cleanup *old_chain = save_inferior_ptid ();

  3424.       inferior_ptid = ecs->ptid;

  3425.       fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
  3426.                           paddress (gdbarch, stop_pc));
  3427.       if (target_stopped_by_watchpoint ())
  3428.         {
  3429.           CORE_ADDR addr;

  3430.           fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");

  3431.           if (target_stopped_data_address (&current_target, &addr))
  3432.             fprintf_unfiltered (gdb_stdlog,
  3433.                                 "infrun: stopped data address = %s\n",
  3434.                                 paddress (gdbarch, addr));
  3435.           else
  3436.             fprintf_unfiltered (gdb_stdlog,
  3437.                                 "infrun: (no data address available)\n");
  3438.         }

  3439.       do_cleanups (old_chain);
  3440.     }

  3441.   /* This is originated from start_remote(), start_inferior() and
  3442.      shared libraries hook functions.  */
  3443.   stop_soon = get_inferior_stop_soon (ecs->ptid);
  3444.   if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
  3445.     {
  3446.       if (!ptid_equal (ecs->ptid, inferior_ptid))
  3447.         context_switch (ecs->ptid);
  3448.       if (debug_infrun)
  3449.         fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
  3450.       stop_print_frame = 1;
  3451.       stop_waiting (ecs);
  3452.       return;
  3453.     }

  3454.   if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
  3455.       && stop_after_trap)
  3456.     {
  3457.       if (!ptid_equal (ecs->ptid, inferior_ptid))
  3458.         context_switch (ecs->ptid);
  3459.       if (debug_infrun)
  3460.         fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
  3461.       stop_print_frame = 0;
  3462.       stop_waiting (ecs);
  3463.       return;
  3464.     }

  3465.   /* This originates from attach_command().  We need to overwrite
  3466.      the stop_signal here, because some kernels don't ignore a
  3467.      SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
  3468.      See more comments in inferior.h.  On the other hand, if we
  3469.      get a non-SIGSTOP, report it to the user - assume the backend
  3470.      will handle the SIGSTOP if it should show up later.

  3471.      Also consider that the attach is complete when we see a
  3472.      SIGTRAP.  Some systems (e.g. Windows), and stubs supporting
  3473.      target extended-remote report it instead of a SIGSTOP
  3474.      (e.g. gdbserver).  We already rely on SIGTRAP being our
  3475.      signal, so this is no exception.

  3476.      Also consider that the attach is complete when we see a
  3477.      GDB_SIGNAL_0.  In non-stop mode, GDB will explicitly tell
  3478.      the target to stop all threads of the inferior, in case the
  3479.      low level attach operation doesn't stop them implicitly.  If
  3480.      they weren't stopped implicitly, then the stub will report a
  3481.      GDB_SIGNAL_0, meaning: stopped for no particular reason
  3482.      other than GDB's request.  */
  3483.   if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
  3484.       && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
  3485.           || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
  3486.           || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
  3487.     {
  3488.       stop_print_frame = 1;
  3489.       stop_waiting (ecs);
  3490.       ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
  3491.       return;
  3492.     }

  3493.   /* See if something interesting happened to the non-current thread.  If
  3494.      so, then switch to that thread.  */
  3495.   if (!ptid_equal (ecs->ptid, inferior_ptid))
  3496.     {
  3497.       if (debug_infrun)
  3498.         fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");

  3499.       context_switch (ecs->ptid);

  3500.       if (deprecated_context_hook)
  3501.         deprecated_context_hook (pid_to_thread_id (ecs->ptid));
  3502.     }

  3503.   /* At this point, get hold of the now-current thread's frame.  */
  3504.   frame = get_current_frame ();
  3505.   gdbarch = get_frame_arch (frame);

  3506.   /* Pull the single step breakpoints out of the target.  */
  3507.   if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
  3508.     {
  3509.       struct regcache *regcache;
  3510.       struct address_space *aspace;
  3511.       CORE_ADDR pc;

  3512.       regcache = get_thread_regcache (ecs->ptid);
  3513.       aspace = get_regcache_aspace (regcache);
  3514.       pc = regcache_read_pc (regcache);

  3515.       /* However, before doing so, if this single-step breakpoint was
  3516.          actually for another thread, set this thread up for moving
  3517.          past it.  */
  3518.       if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
  3519.                                                    aspace, pc))
  3520.         {
  3521.           if (single_step_breakpoint_inserted_here_p (aspace, pc))
  3522.             {
  3523.               if (debug_infrun)
  3524.                 {
  3525.                   fprintf_unfiltered (gdb_stdlog,
  3526.                                       "infrun: [%s] hit another thread's "
  3527.                                       "single-step breakpoint\n",
  3528.                                       target_pid_to_str (ecs->ptid));
  3529.                 }
  3530.               ecs->hit_singlestep_breakpoint = 1;
  3531.             }
  3532.         }
  3533.       else
  3534.         {
  3535.           if (debug_infrun)
  3536.             {
  3537.               fprintf_unfiltered (gdb_stdlog,
  3538.                                   "infrun: [%s] hit its "
  3539.                                   "single-step breakpoint\n",
  3540.                                   target_pid_to_str (ecs->ptid));
  3541.             }
  3542.         }
  3543.     }
  3544.   delete_just_stopped_threads_single_step_breakpoints ();

  3545.   if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
  3546.       && ecs->event_thread->control.trap_expected
  3547.       && ecs->event_thread->stepping_over_watchpoint)
  3548.     stopped_by_watchpoint = 0;
  3549.   else
  3550.     stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);

  3551.   /* If necessary, step over this watchpoint.  We'll be back to display
  3552.      it in a moment.  */
  3553.   if (stopped_by_watchpoint
  3554.       && (target_have_steppable_watchpoint
  3555.           || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
  3556.     {
  3557.       /* At this point, we are stopped at an instruction which has
  3558.          attempted to write to a piece of memory under control of
  3559.          a watchpoint.  The instruction hasn't actually executed
  3560.          yet.  If we were to evaluate the watchpoint expression
  3561.          now, we would get the old value, and therefore no change
  3562.          would seem to have occurred.

  3563.          In order to make watchpoints work `right', we really need
  3564.          to complete the memory write, and then evaluate the
  3565.          watchpoint expression.  We do this by single-stepping the
  3566.          target.

  3567.          It may not be necessary to disable the watchpoint to step over
  3568.          it.  For example, the PA can (with some kernel cooperation)
  3569.          single step over a watchpoint without disabling the watchpoint.

  3570.          It is far more common to need to disable a watchpoint to step
  3571.          the inferior over it.  If we have non-steppable watchpoints,
  3572.          we must disable the current watchpoint; it's simplest to
  3573.          disable all watchpoints.

  3574.          Any breakpoint at PC must also be stepped over -- if there's
  3575.          one, it will have already triggered before the watchpoint
  3576.          triggered, and we either already reported it to the user, or
  3577.          it didn't cause a stop and we called keep_going.  In either
  3578.          case, if there was a breakpoint at PC, we must be trying to
  3579.          step past it.  */
  3580.       ecs->event_thread->stepping_over_watchpoint = 1;
  3581.       keep_going (ecs);
  3582.       return;
  3583.     }

  3584.   ecs->event_thread->stepping_over_breakpoint = 0;
  3585.   ecs->event_thread->stepping_over_watchpoint = 0;
  3586.   bpstat_clear (&ecs->event_thread->control.stop_bpstat);
  3587.   ecs->event_thread->control.stop_step = 0;
  3588.   stop_print_frame = 1;
  3589.   stopped_by_random_signal = 0;

  3590.   /* Hide inlined functions starting here, unless we just performed stepi or
  3591.      nexti.  After stepi and nexti, always show the innermost frame (not any
  3592.      inline function call sites).  */
  3593.   if (ecs->event_thread->control.step_range_end != 1)
  3594.     {
  3595.       struct address_space *aspace =
  3596.         get_regcache_aspace (get_thread_regcache (ecs->ptid));

  3597.       /* skip_inline_frames is expensive, so we avoid it if we can
  3598.          determine that the address is one where functions cannot have
  3599.          been inlined.  This improves performance with inferiors that
  3600.          load a lot of shared libraries, because the solib event
  3601.          breakpoint is defined as the address of a function (i.e. not
  3602.          inline).  Note that we have to check the previous PC as well
  3603.          as the current one to catch cases when we have just
  3604.          single-stepped off a breakpoint prior to reinstating it.
  3605.          Note that we're assuming that the code we single-step to is
  3606.          not inline, but that's not definitive: there's nothing
  3607.          preventing the event breakpoint function from containing
  3608.          inlined code, and the single-step ending up there.  If the
  3609.          user had set a breakpoint on that inlined code, the missing
  3610.          skip_inline_frames call would break things.  Fortunately
  3611.          that's an extremely unlikely scenario.  */
  3612.       if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
  3613.           && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
  3614.                && ecs->event_thread->control.trap_expected
  3615.                && pc_at_non_inline_function (aspace,
  3616.                                              ecs->event_thread->prev_pc,
  3617.                                              &ecs->ws)))
  3618.         {
  3619.           skip_inline_frames (ecs->ptid);

  3620.           /* Re-fetch current thread's frame in case that invalidated
  3621.              the frame cache.  */
  3622.           frame = get_current_frame ();
  3623.           gdbarch = get_frame_arch (frame);
  3624.         }
  3625.     }

  3626.   if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
  3627.       && ecs->event_thread->control.trap_expected
  3628.       && gdbarch_single_step_through_delay_p (gdbarch)
  3629.       && currently_stepping (ecs->event_thread))
  3630.     {
  3631.       /* We're trying to step off a breakpoint.  Turns out that we're
  3632.          also on an instruction that needs to be stepped multiple
  3633.          times before it's been fully executing.  E.g., architectures
  3634.          with a delay slot.  It needs to be stepped twice, once for
  3635.          the instruction and once for the delay slot.  */
  3636.       int step_through_delay
  3637.         = gdbarch_single_step_through_delay (gdbarch, frame);

  3638.       if (debug_infrun && step_through_delay)
  3639.         fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
  3640.       if (ecs->event_thread->control.step_range_end == 0
  3641.           && step_through_delay)
  3642.         {
  3643.           /* The user issued a continue when stopped at a breakpoint.
  3644.              Set up for another trap and get out of here.  */
  3645.          ecs->event_thread->stepping_over_breakpoint = 1;
  3646.          keep_going (ecs);
  3647.          return;
  3648.         }
  3649.       else if (step_through_delay)
  3650.         {
  3651.           /* The user issued a step when stopped at a breakpoint.
  3652.              Maybe we should stop, maybe we should not - the delay
  3653.              slot *might* correspond to a line of source.  In any
  3654.              case, don't decide that here, just set
  3655.              ecs->stepping_over_breakpoint, making sure we
  3656.              single-step again before breakpoints are re-inserted.  */
  3657.           ecs->event_thread->stepping_over_breakpoint = 1;
  3658.         }
  3659.     }

  3660.   /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
  3661.      handles this event.  */
  3662.   ecs->event_thread->control.stop_bpstat
  3663.     = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
  3664.                           stop_pc, ecs->ptid, &ecs->ws);

  3665.   /* Following in case break condition called a
  3666.      function.  */
  3667.   stop_print_frame = 1;

  3668.   /* This is where we handle "moribund" watchpoints.  Unlike
  3669.      software breakpoints traps, hardware watchpoint traps are
  3670.      always distinguishable from random traps.  If no high-level
  3671.      watchpoint is associated with the reported stop data address
  3672.      anymore, then the bpstat does not explain the signal ---
  3673.      simply make sure to ignore it if `stopped_by_watchpoint' is
  3674.      set.  */

  3675.   if (debug_infrun
  3676.       && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
  3677.       && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
  3678.                                   GDB_SIGNAL_TRAP)
  3679.       && stopped_by_watchpoint)
  3680.     fprintf_unfiltered (gdb_stdlog,
  3681.                         "infrun: no user watchpoint explains "
  3682.                         "watchpoint SIGTRAP, ignoring\n");

  3683.   /* NOTE: cagney/2003-03-29: These checks for a random signal
  3684.      at one stage in the past included checks for an inferior
  3685.      function call's call dummy's return breakpoint.  The original
  3686.      comment, that went with the test, read:

  3687.      ``End of a stack dummy.  Some systems (e.g. Sony news) give
  3688.      another signal besides SIGTRAP, so check here as well as
  3689.      above.''

  3690.      If someone ever tries to get call dummys on a
  3691.      non-executable stack to work (where the target would stop
  3692.      with something like a SIGSEGV), then those tests might need
  3693.      to be re-instated.  Given, however, that the tests were only
  3694.      enabled when momentary breakpoints were not being used, I
  3695.      suspect that it won't be the case.

  3696.      NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
  3697.      be necessary for call dummies on a non-executable stack on
  3698.      SPARC.  */

  3699.   /* See if the breakpoints module can explain the signal.  */
  3700.   random_signal
  3701.     = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
  3702.                                ecs->event_thread->suspend.stop_signal);

  3703.   /* If not, perhaps stepping/nexting can.  */
  3704.   if (random_signal)
  3705.     random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
  3706.                       && currently_stepping (ecs->event_thread));

  3707.   /* Perhaps the thread hit a single-step breakpoint of _another_
  3708.      thread.  Single-step breakpoints are transparent to the
  3709.      breakpoints module.  */
  3710.   if (random_signal)
  3711.     random_signal = !ecs->hit_singlestep_breakpoint;

  3712.   /* No?  Perhaps we got a moribund watchpoint.  */
  3713.   if (random_signal)
  3714.     random_signal = !stopped_by_watchpoint;

  3715.   /* For the program's own signals, act according to
  3716.      the signal handling tables.  */

  3717.   if (random_signal)
  3718.     {
  3719.       /* Signal not for debugging purposes.  */
  3720.       struct inferior *inf = find_inferior_ptid (ecs->ptid);
  3721.       enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;

  3722.       if (debug_infrun)
  3723.          fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
  3724.                              gdb_signal_to_symbol_string (stop_signal));

  3725.       stopped_by_random_signal = 1;

  3726.       /* Always stop on signals if we're either just gaining control
  3727.          of the program, or the user explicitly requested this thread
  3728.          to remain stopped.  */
  3729.       if (stop_soon != NO_STOP_QUIETLY
  3730.           || ecs->event_thread->stop_requested
  3731.           || (!inf->detaching
  3732.               && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
  3733.         {
  3734.           stop_waiting (ecs);
  3735.           return;
  3736.         }

  3737.       /* Notify observers the signal has "handle print" set.  Note we
  3738.          returned early above if stopping; normal_stop handles the
  3739.          printing in that case.  */
  3740.       if (signal_print[ecs->event_thread->suspend.stop_signal])
  3741.         {
  3742.           /* The signal table tells us to print about this signal.  */
  3743.           target_terminal_ours_for_output ();
  3744.           observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
  3745.           target_terminal_inferior ();
  3746.         }

  3747.       /* Clear the signal if it should not be passed.  */
  3748.       if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
  3749.         ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;

  3750.       if (ecs->event_thread->prev_pc == stop_pc
  3751.           && ecs->event_thread->control.trap_expected
  3752.           && ecs->event_thread->control.step_resume_breakpoint == NULL)
  3753.         {
  3754.           /* We were just starting a new sequence, attempting to
  3755.              single-step off of a breakpoint and expecting a SIGTRAP.
  3756.              Instead this signal arrives.  This signal will take us out
  3757.              of the stepping range so GDB needs to remember to, when
  3758.              the signal handler returns, resume stepping off that
  3759.              breakpoint.  */
  3760.           /* To simplify things, "continue" is forced to use the same
  3761.              code paths as single-step - set a breakpoint at the
  3762.              signal return address and then, once hit, step off that
  3763.              breakpoint.  */
  3764.           if (debug_infrun)
  3765.             fprintf_unfiltered (gdb_stdlog,
  3766.                                 "infrun: signal arrived while stepping over "
  3767.                                 "breakpoint\n");

  3768.           insert_hp_step_resume_breakpoint_at_frame (frame);
  3769.           ecs->event_thread->step_after_step_resume_breakpoint = 1;
  3770.           /* Reset trap_expected to ensure breakpoints are re-inserted.  */
  3771.           ecs->event_thread->control.trap_expected = 0;

  3772.           /* If we were nexting/stepping some other thread, switch to
  3773.              it, so that we don't continue it, losing control.  */
  3774.           if (!switch_back_to_stepped_thread (ecs))
  3775.             keep_going (ecs);
  3776.           return;
  3777.         }

  3778.       if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
  3779.           && (pc_in_thread_step_range (stop_pc, ecs->event_thread)
  3780.               || ecs->event_thread->control.step_range_end == 1)
  3781.           && frame_id_eq (get_stack_frame_id (frame),
  3782.                           ecs->event_thread->control.step_stack_frame_id)
  3783.           && ecs->event_thread->control.step_resume_breakpoint == NULL)
  3784.         {
  3785.           /* The inferior is about to take a signal that will take it
  3786.              out of the single step range.  Set a breakpoint at the
  3787.              current PC (which is presumably where the signal handler
  3788.              will eventually return) and then allow the inferior to
  3789.              run free.

  3790.              Note that this is only needed for a signal delivered
  3791.              while in the single-step rangeNested signals aren't a
  3792.              problem as they eventually all return.  */
  3793.           if (debug_infrun)
  3794.             fprintf_unfiltered (gdb_stdlog,
  3795.                                 "infrun: signal may take us out of "
  3796.                                 "single-step range\n");

  3797.           insert_hp_step_resume_breakpoint_at_frame (frame);
  3798.           ecs->event_thread->step_after_step_resume_breakpoint = 1;
  3799.           /* Reset trap_expected to ensure breakpoints are re-inserted.  */
  3800.           ecs->event_thread->control.trap_expected = 0;
  3801.           keep_going (ecs);
  3802.           return;
  3803.         }

  3804.       /* Note: step_resume_breakpoint may be non-NULL.  This occures
  3805.          when either there's a nested signal, or when there's a
  3806.          pending signal enabled just as the signal handler returns
  3807.          (leaving the inferior at the step-resume-breakpoint without
  3808.          actually executing it).  Either way continue until the
  3809.          breakpoint is really hit.  */

  3810.       if (!switch_back_to_stepped_thread (ecs))
  3811.         {
  3812.           if (debug_infrun)
  3813.             fprintf_unfiltered (gdb_stdlog,
  3814.                                 "infrun: random signal, keep going\n");

  3815.           keep_going (ecs);
  3816.         }
  3817.       return;
  3818.     }

  3819.   process_event_stop_test (ecs);
  3820. }

  3821. /* Come here when we've got some debug event / signal we can explain
  3822.    (IOW, not a random signal), and test whether it should cause a
  3823.    stop, or whether we should resume the inferior (transparently).
  3824.    E.g., could be a breakpoint whose condition evaluates false; we
  3825.    could be still stepping within the line; etc.  */

  3826. static void
  3827. process_event_stop_test (struct execution_control_state *ecs)
  3828. {
  3829.   struct symtab_and_line stop_pc_sal;
  3830.   struct frame_info *frame;
  3831.   struct gdbarch *gdbarch;
  3832.   CORE_ADDR jmp_buf_pc;
  3833.   struct bpstat_what what;

  3834.   /* Handle cases caused by hitting a breakpoint.  */

  3835.   frame = get_current_frame ();
  3836.   gdbarch = get_frame_arch (frame);

  3837.   what = bpstat_what (ecs->event_thread->control.stop_bpstat);

  3838.   if (what.call_dummy)
  3839.     {
  3840.       stop_stack_dummy = what.call_dummy;
  3841.     }

  3842.   /* If we hit an internal event that triggers symbol changes, the
  3843.      current frame will be invalidated within bpstat_what (e.g., if we
  3844.      hit an internal solib event).  Re-fetch it.  */
  3845.   frame = get_current_frame ();
  3846.   gdbarch = get_frame_arch (frame);

  3847.   switch (what.main_action)
  3848.     {
  3849.     case BPSTAT_WHAT_SET_LONGJMP_RESUME:
  3850.       /* If we hit the breakpoint at longjmp while stepping, we
  3851.          install a momentary breakpoint at the target of the
  3852.          jmp_buf.  */

  3853.       if (debug_infrun)
  3854.         fprintf_unfiltered (gdb_stdlog,
  3855.                             "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");

  3856.       ecs->event_thread->stepping_over_breakpoint = 1;

  3857.       if (what.is_longjmp)
  3858.         {
  3859.           struct value *arg_value;

  3860.           /* If we set the longjmp breakpoint via a SystemTap probe,
  3861.              then use it to extract the arguments.  The destination PC
  3862.              is the third argument to the probe.  */
  3863.           arg_value = probe_safe_evaluate_at_pc (frame, 2);
  3864.           if (arg_value)
  3865.             {
  3866.               jmp_buf_pc = value_as_address (arg_value);
  3867.               jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
  3868.             }
  3869.           else if (!gdbarch_get_longjmp_target_p (gdbarch)
  3870.                    || !gdbarch_get_longjmp_target (gdbarch,
  3871.                                                    frame, &jmp_buf_pc))
  3872.             {
  3873.               if (debug_infrun)
  3874.                 fprintf_unfiltered (gdb_stdlog,
  3875.                                     "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
  3876.                                     "(!gdbarch_get_longjmp_target)\n");
  3877.               keep_going (ecs);
  3878.               return;
  3879.             }

  3880.           /* Insert a breakpoint at resume address.  */
  3881.           insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
  3882.         }
  3883.       else
  3884.         check_exception_resume (ecs, frame);
  3885.       keep_going (ecs);
  3886.       return;

  3887.     case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
  3888.       {
  3889.         struct frame_info *init_frame;

  3890.         /* There are several cases to consider.

  3891.            1. The initiating frame no longer exists.  In this case we
  3892.            must stop, because the exception or longjmp has gone too
  3893.            far.

  3894.            2. The initiating frame exists, and is the same as the
  3895.            current frame.  We stop, because the exception or longjmp
  3896.            has been caught.

  3897.            3. The initiating frame exists and is different from the
  3898.            current frame.  This means the exception or longjmp has
  3899.            been caught beneath the initiating frame, so keep going.

  3900.            4. longjmp breakpoint has been placed just to protect
  3901.            against stale dummy frames and user is not interested in
  3902.            stopping around longjmps.  */

  3903.         if (debug_infrun)
  3904.           fprintf_unfiltered (gdb_stdlog,
  3905.                               "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");

  3906.         gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
  3907.                     != NULL);
  3908.         delete_exception_resume_breakpoint (ecs->event_thread);

  3909.         if (what.is_longjmp)
  3910.           {
  3911.             check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);

  3912.             if (!frame_id_p (ecs->event_thread->initiating_frame))
  3913.               {
  3914.                 /* Case 4.  */
  3915.                 keep_going (ecs);
  3916.                 return;
  3917.               }
  3918.           }

  3919.         init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);

  3920.         if (init_frame)
  3921.           {
  3922.             struct frame_id current_id
  3923.               = get_frame_id (get_current_frame ());
  3924.             if (frame_id_eq (current_id,
  3925.                              ecs->event_thread->initiating_frame))
  3926.               {
  3927.                 /* Case 2.  Fall through.  */
  3928.               }
  3929.             else
  3930.               {
  3931.                 /* Case 3.  */
  3932.                 keep_going (ecs);
  3933.                 return;
  3934.               }
  3935.           }

  3936.         /* For Cases 1 and 2, remove the step-resume breakpoint, if it
  3937.            exists.  */
  3938.         delete_step_resume_breakpoint (ecs->event_thread);

  3939.         end_stepping_range (ecs);
  3940.       }
  3941.       return;

  3942.     case BPSTAT_WHAT_SINGLE:
  3943.       if (debug_infrun)
  3944.         fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
  3945.       ecs->event_thread->stepping_over_breakpoint = 1;
  3946.       /* Still need to check other stuff, at least the case where we
  3947.          are stepping and step out of the right range.  */
  3948.       break;

  3949.     case BPSTAT_WHAT_STEP_RESUME:
  3950.       if (debug_infrun)
  3951.         fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");

  3952.       delete_step_resume_breakpoint (ecs->event_thread);
  3953.       if (ecs->event_thread->control.proceed_to_finish
  3954.           && execution_direction == EXEC_REVERSE)
  3955.         {
  3956.           struct thread_info *tp = ecs->event_thread;

  3957.           /* We are finishing a function in reverse, and just hit the
  3958.              step-resume breakpoint at the start address of the
  3959.              function, and we're almost there -- just need to back up
  3960.              by one more single-step, which should take us back to the
  3961.              function call.  */
  3962.           tp->control.step_range_start = tp->control.step_range_end = 1;
  3963.           keep_going (ecs);
  3964.           return;
  3965.         }
  3966.       fill_in_stop_func (gdbarch, ecs);
  3967.       if (stop_pc == ecs->stop_func_start
  3968.           && execution_direction == EXEC_REVERSE)
  3969.         {
  3970.           /* We are stepping over a function call in reverse, and just
  3971.              hit the step-resume breakpoint at the start address of
  3972.              the function.  Go back to single-stepping, which should
  3973.              take us back to the function call.  */
  3974.           ecs->event_thread->stepping_over_breakpoint = 1;
  3975.           keep_going (ecs);
  3976.           return;
  3977.         }
  3978.       break;

  3979.     case BPSTAT_WHAT_STOP_NOISY:
  3980.       if (debug_infrun)
  3981.         fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
  3982.       stop_print_frame = 1;

  3983.       /* Assume the thread stopped for a breapoint.  We'll still check
  3984.          whether a/the breakpoint is there when the thread is next
  3985.          resumed.  */
  3986.       ecs->event_thread->stepping_over_breakpoint = 1;

  3987.       stop_waiting (ecs);
  3988.       return;

  3989.     case BPSTAT_WHAT_STOP_SILENT:
  3990.       if (debug_infrun)
  3991.         fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
  3992.       stop_print_frame = 0;

  3993.       /* Assume the thread stopped for a breapoint.  We'll still check
  3994.          whether a/the breakpoint is there when the thread is next
  3995.          resumed.  */
  3996.       ecs->event_thread->stepping_over_breakpoint = 1;
  3997.       stop_waiting (ecs);
  3998.       return;

  3999.     case BPSTAT_WHAT_HP_STEP_RESUME:
  4000.       if (debug_infrun)
  4001.         fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");

  4002.       delete_step_resume_breakpoint (ecs->event_thread);
  4003.       if (ecs->event_thread->step_after_step_resume_breakpoint)
  4004.         {
  4005.           /* Back when the step-resume breakpoint was inserted, we
  4006.              were trying to single-step off a breakpoint.  Go back to
  4007.              doing that.  */
  4008.           ecs->event_thread->step_after_step_resume_breakpoint = 0;
  4009.           ecs->event_thread->stepping_over_breakpoint = 1;
  4010.           keep_going (ecs);
  4011.           return;
  4012.         }
  4013.       break;

  4014.     case BPSTAT_WHAT_KEEP_CHECKING:
  4015.       break;
  4016.     }

  4017.   /* If we stepped a permanent breakpoint and we had a high priority
  4018.      step-resume breakpoint for the address we stepped, but we didn't
  4019.      hit it, then we must have stepped into the signal handler.  The
  4020.      step-resume was only necessary to catch the case of _not_
  4021.      stepping into the handler, so delete it, and fall through to
  4022.      checking whether the step finished.  */
  4023.   if (ecs->event_thread->stepped_breakpoint)
  4024.     {
  4025.       struct breakpoint *sr_bp
  4026.         = ecs->event_thread->control.step_resume_breakpoint;

  4027.       if (sr_bp->loc->permanent
  4028.           && sr_bp->type == bp_hp_step_resume
  4029.           && sr_bp->loc->address == ecs->event_thread->prev_pc)
  4030.         {
  4031.           if (debug_infrun)
  4032.             fprintf_unfiltered (gdb_stdlog,
  4033.                                 "infrun: stepped permanent breakpoint, stopped in "
  4034.                                 "handler\n");
  4035.           delete_step_resume_breakpoint (ecs->event_thread);
  4036.           ecs->event_thread->step_after_step_resume_breakpoint = 0;
  4037.         }
  4038.     }

  4039.   /* We come here if we hit a breakpoint but should not stop for it.
  4040.      Possibly we also were stepping and should stop for that.  So fall
  4041.      through and test for stepping.  But, if not stepping, do not
  4042.      stop.  */

  4043.   /* In all-stop mode, if we're currently stepping but have stopped in
  4044.      some other thread, we need to switch back to the stepped thread.  */
  4045.   if (switch_back_to_stepped_thread (ecs))
  4046.     return;

  4047.   if (ecs->event_thread->control.step_resume_breakpoint)
  4048.     {
  4049.       if (debug_infrun)
  4050.          fprintf_unfiltered (gdb_stdlog,
  4051.                              "infrun: step-resume breakpoint is inserted\n");

  4052.       /* Having a step-resume breakpoint overrides anything
  4053.          else having to do with stepping commands until
  4054.          that breakpoint is reached.  */
  4055.       keep_going (ecs);
  4056.       return;
  4057.     }

  4058.   if (ecs->event_thread->control.step_range_end == 0)
  4059.     {
  4060.       if (debug_infrun)
  4061.          fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
  4062.       /* Likewise if we aren't even stepping.  */
  4063.       keep_going (ecs);
  4064.       return;
  4065.     }

  4066.   /* Re-fetch current thread's frame in case the code above caused
  4067.      the frame cache to be re-initialized, making our FRAME variable
  4068.      a dangling pointer.  */
  4069.   frame = get_current_frame ();
  4070.   gdbarch = get_frame_arch (frame);
  4071.   fill_in_stop_func (gdbarch, ecs);

  4072.   /* If stepping through a line, keep going if still within it.

  4073.      Note that step_range_end is the address of the first instruction
  4074.      beyond the step range, and NOT the address of the last instruction
  4075.      within it!

  4076.      Note also that during reverse execution, we may be stepping
  4077.      through a function epilogue and therefore must detect when
  4078.      the current-frame changes in the middle of a line.  */

  4079.   if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
  4080.       && (execution_direction != EXEC_REVERSE
  4081.           || frame_id_eq (get_frame_id (frame),
  4082.                           ecs->event_thread->control.step_frame_id)))
  4083.     {
  4084.       if (debug_infrun)
  4085.         fprintf_unfiltered
  4086.           (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
  4087.            paddress (gdbarch, ecs->event_thread->control.step_range_start),
  4088.            paddress (gdbarch, ecs->event_thread->control.step_range_end));

  4089.       /* Tentatively re-enable range stepping; `resume' disables it if
  4090.          necessary (e.g., if we're stepping over a breakpoint or we
  4091.          have software watchpoints).  */
  4092.       ecs->event_thread->control.may_range_step = 1;

  4093.       /* When stepping backward, stop at beginning of line range
  4094.          (unless it's the function entry point, in which case
  4095.          keep going back to the call point).  */
  4096.       if (stop_pc == ecs->event_thread->control.step_range_start
  4097.           && stop_pc != ecs->stop_func_start
  4098.           && execution_direction == EXEC_REVERSE)
  4099.         end_stepping_range (ecs);
  4100.       else
  4101.         keep_going (ecs);

  4102.       return;
  4103.     }

  4104.   /* We stepped out of the stepping range.  */

  4105.   /* If we are stepping at the source level and entered the runtime
  4106.      loader dynamic symbol resolution code...

  4107.      EXEC_FORWARD: we keep on single stepping until we exit the run
  4108.      time loader code and reach the callee's address.

  4109.      EXEC_REVERSE: we've already executed the callee (backward), and
  4110.      the runtime loader code is handled just like any other
  4111.      undebuggable function call.  Now we need only keep stepping
  4112.      backward through the trampoline code, and that's handled further
  4113.      down, so there is nothing for us to do here.  */

  4114.   if (execution_direction != EXEC_REVERSE
  4115.       && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
  4116.       && in_solib_dynsym_resolve_code (stop_pc))
  4117.     {
  4118.       CORE_ADDR pc_after_resolver =
  4119.         gdbarch_skip_solib_resolver (gdbarch, stop_pc);

  4120.       if (debug_infrun)
  4121.          fprintf_unfiltered (gdb_stdlog,
  4122.                              "infrun: stepped into dynsym resolve code\n");

  4123.       if (pc_after_resolver)
  4124.         {
  4125.           /* Set up a step-resume breakpoint at the address
  4126.              indicated by SKIP_SOLIB_RESOLVER.  */
  4127.           struct symtab_and_line sr_sal;

  4128.           init_sal (&sr_sal);
  4129.           sr_sal.pc = pc_after_resolver;
  4130.           sr_sal.pspace = get_frame_program_space (frame);

  4131.           insert_step_resume_breakpoint_at_sal (gdbarch,
  4132.                                                 sr_sal, null_frame_id);
  4133.         }

  4134.       keep_going (ecs);
  4135.       return;
  4136.     }

  4137.   if (ecs->event_thread->control.step_range_end != 1
  4138.       && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
  4139.           || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
  4140.       && get_frame_type (frame) == SIGTRAMP_FRAME)
  4141.     {
  4142.       if (debug_infrun)
  4143.          fprintf_unfiltered (gdb_stdlog,
  4144.                              "infrun: stepped into signal trampoline\n");
  4145.       /* The inferior, while doing a "step" or "next", has ended up in
  4146.          a signal trampoline (either by a signal being delivered or by
  4147.          the signal handler returning).  Just single-step until the
  4148.          inferior leaves the trampoline (either by calling the handler
  4149.          or returning).  */
  4150.       keep_going (ecs);
  4151.       return;
  4152.     }

  4153.   /* If we're in the return path from a shared library trampoline,
  4154.      we want to proceed through the trampoline when stepping.  */
  4155.   /* macro/2012-04-25: This needs to come before the subroutine
  4156.      call check below as on some targets return trampolines look
  4157.      like subroutine calls (MIPS16 return thunks).  */
  4158.   if (gdbarch_in_solib_return_trampoline (gdbarch,
  4159.                                           stop_pc, ecs->stop_func_name)
  4160.       && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
  4161.     {
  4162.       /* Determine where this trampoline returns.  */
  4163.       CORE_ADDR real_stop_pc;

  4164.       real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);

  4165.       if (debug_infrun)
  4166.          fprintf_unfiltered (gdb_stdlog,
  4167.                              "infrun: stepped into solib return tramp\n");

  4168.       /* Only proceed through if we know where it's going.  */
  4169.       if (real_stop_pc)
  4170.         {
  4171.           /* And put the step-breakpoint there and go until there.  */
  4172.           struct symtab_and_line sr_sal;

  4173.           init_sal (&sr_sal);        /* initialize to zeroes */
  4174.           sr_sal.pc = real_stop_pc;
  4175.           sr_sal.section = find_pc_overlay (sr_sal.pc);
  4176.           sr_sal.pspace = get_frame_program_space (frame);

  4177.           /* Do not specify what the fp should be when we stop since
  4178.              on some machines the prologue is where the new fp value
  4179.              is established.  */
  4180.           insert_step_resume_breakpoint_at_sal (gdbarch,
  4181.                                                 sr_sal, null_frame_id);

  4182.           /* Restart without fiddling with the step ranges or
  4183.              other state.  */
  4184.           keep_going (ecs);
  4185.           return;
  4186.         }
  4187.     }

  4188.   /* Check for subroutine calls.  The check for the current frame
  4189.      equalling the step ID is not necessary - the check of the
  4190.      previous frame's ID is sufficient - but it is a common case and
  4191.      cheaper than checking the previous frame's ID.

  4192.      NOTE: frame_id_eq will never report two invalid frame IDs as
  4193.      being equal, so to get into this block, both the current and
  4194.      previous frame must have valid frame IDs.  */
  4195.   /* The outer_frame_id check is a heuristic to detect stepping
  4196.      through startup code.  If we step over an instruction which
  4197.      sets the stack pointer from an invalid value to a valid value,
  4198.      we may detect that as a subroutine call from the mythical
  4199.      "outermost" function.  This could be fixed by marking
  4200.      outermost frames as !stack_p,code_p,special_p.  Then the
  4201.      initial outermost frame, before sp was valid, would
  4202.      have code_addr == &_start.  See the comment in frame_id_eq
  4203.      for more.  */
  4204.   if (!frame_id_eq (get_stack_frame_id (frame),
  4205.                     ecs->event_thread->control.step_stack_frame_id)
  4206.       && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
  4207.                        ecs->event_thread->control.step_stack_frame_id)
  4208.           && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
  4209.                             outer_frame_id)
  4210.               || step_start_function != find_pc_function (stop_pc))))
  4211.     {
  4212.       CORE_ADDR real_stop_pc;

  4213.       if (debug_infrun)
  4214.          fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");

  4215.       if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
  4216.         {
  4217.           /* I presume that step_over_calls is only 0 when we're
  4218.              supposed to be stepping at the assembly language level
  4219.              ("stepi").  Just stop.  */
  4220.           /* And this works the same backward as frontward.  MVS */
  4221.           end_stepping_range (ecs);
  4222.           return;
  4223.         }

  4224.       /* Reverse stepping through solib trampolines.  */

  4225.       if (execution_direction == EXEC_REVERSE
  4226.           && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
  4227.           && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
  4228.               || (ecs->stop_func_start == 0
  4229.                   && in_solib_dynsym_resolve_code (stop_pc))))
  4230.         {
  4231.           /* Any solib trampoline code can be handled in reverse
  4232.              by simply continuing to single-step.  We have already
  4233.              executed the solib function (backwards), and a few
  4234.              steps will take us back through the trampoline to the
  4235.              caller.  */
  4236.           keep_going (ecs);
  4237.           return;
  4238.         }

  4239.       if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
  4240.         {
  4241.           /* We're doing a "next".

  4242.              Normal (forward) execution: set a breakpoint at the
  4243.              callee's return address (the address at which the caller
  4244.              will resume).

  4245.              Reverse (backward) execution.  set the step-resume
  4246.              breakpoint at the start of the function that we just
  4247.              stepped into (backwards), and continue to there.  When we
  4248.              get there, we'll need to single-step back to the caller.  */

  4249.           if (execution_direction == EXEC_REVERSE)
  4250.             {
  4251.               /* If we're already at the start of the function, we've either
  4252.                  just stepped backward into a single instruction function,
  4253.                  or stepped back out of a signal handler to the first instruction
  4254.                  of the function.  Just keep going, which will single-step back
  4255.                  to the caller.  */
  4256.               if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
  4257.                 {
  4258.                   struct symtab_and_line sr_sal;

  4259.                   /* Normal function call return (static or dynamic).  */
  4260.                   init_sal (&sr_sal);
  4261.                   sr_sal.pc = ecs->stop_func_start;
  4262.                   sr_sal.pspace = get_frame_program_space (frame);
  4263.                   insert_step_resume_breakpoint_at_sal (gdbarch,
  4264.                                                         sr_sal, null_frame_id);
  4265.                 }
  4266.             }
  4267.           else
  4268.             insert_step_resume_breakpoint_at_caller (frame);

  4269.           keep_going (ecs);
  4270.           return;
  4271.         }

  4272.       /* If we are in a function call trampoline (a stub between the
  4273.          calling routine and the real function), locate the real
  4274.          function.  That's what tells us (a) whether we want to step
  4275.          into it at all, and (b) what prologue we want to run to the
  4276.          end of, if we do step into it.  */
  4277.       real_stop_pc = skip_language_trampoline (frame, stop_pc);
  4278.       if (real_stop_pc == 0)
  4279.         real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
  4280.       if (real_stop_pc != 0)
  4281.         ecs->stop_func_start = real_stop_pc;

  4282.       if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
  4283.         {
  4284.           struct symtab_and_line sr_sal;

  4285.           init_sal (&sr_sal);
  4286.           sr_sal.pc = ecs->stop_func_start;
  4287.           sr_sal.pspace = get_frame_program_space (frame);

  4288.           insert_step_resume_breakpoint_at_sal (gdbarch,
  4289.                                                 sr_sal, null_frame_id);
  4290.           keep_going (ecs);
  4291.           return;
  4292.         }

  4293.       /* If we have line number information for the function we are
  4294.          thinking of stepping into and the function isn't on the skip
  4295.          list, step into it.

  4296.          If there are several symtabs at that PC (e.g. with include
  4297.          files), just want to know whether *any* of them have line
  4298.          numbers.  find_pc_line handles this.  */
  4299.       {
  4300.         struct symtab_and_line tmp_sal;

  4301.         tmp_sal = find_pc_line (ecs->stop_func_start, 0);
  4302.         if (tmp_sal.line != 0
  4303.             && !function_name_is_marked_for_skip (ecs->stop_func_name,
  4304.                                                   &tmp_sal))
  4305.           {
  4306.             if (execution_direction == EXEC_REVERSE)
  4307.               handle_step_into_function_backward (gdbarch, ecs);
  4308.             else
  4309.               handle_step_into_function (gdbarch, ecs);
  4310.             return;
  4311.           }
  4312.       }

  4313.       /* If we have no line number and the step-stop-if-no-debug is
  4314.          set, we stop the step so that the user has a chance to switch
  4315.          in assembly mode.  */
  4316.       if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
  4317.           && step_stop_if_no_debug)
  4318.         {
  4319.           end_stepping_range (ecs);
  4320.           return;
  4321.         }

  4322.       if (execution_direction == EXEC_REVERSE)
  4323.         {
  4324.           /* If we're already at the start of the function, we've either just
  4325.              stepped backward into a single instruction function without line
  4326.              number info, or stepped back out of a signal handler to the first
  4327.              instruction of the function without line number info.  Just keep
  4328.              going, which will single-step back to the caller.  */
  4329.           if (ecs->stop_func_start != stop_pc)
  4330.             {
  4331.               /* Set a breakpoint at callee's start address.
  4332.                  From there we can step once and be back in the caller.  */
  4333.               struct symtab_and_line sr_sal;

  4334.               init_sal (&sr_sal);
  4335.               sr_sal.pc = ecs->stop_func_start;
  4336.               sr_sal.pspace = get_frame_program_space (frame);
  4337.               insert_step_resume_breakpoint_at_sal (gdbarch,
  4338.                                                     sr_sal, null_frame_id);
  4339.             }
  4340.         }
  4341.       else
  4342.         /* Set a breakpoint at callee's return address (the address
  4343.            at which the caller will resume).  */
  4344.         insert_step_resume_breakpoint_at_caller (frame);

  4345.       keep_going (ecs);
  4346.       return;
  4347.     }

  4348.   /* Reverse stepping through solib trampolines.  */

  4349.   if (execution_direction == EXEC_REVERSE
  4350.       && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
  4351.     {
  4352.       if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
  4353.           || (ecs->stop_func_start == 0
  4354.               && in_solib_dynsym_resolve_code (stop_pc)))
  4355.         {
  4356.           /* Any solib trampoline code can be handled in reverse
  4357.              by simply continuing to single-step.  We have already
  4358.              executed the solib function (backwards), and a few
  4359.              steps will take us back through the trampoline to the
  4360.              caller.  */
  4361.           keep_going (ecs);
  4362.           return;
  4363.         }
  4364.       else if (in_solib_dynsym_resolve_code (stop_pc))
  4365.         {
  4366.           /* Stepped backward into the solib dynsym resolver.
  4367.              Set a breakpoint at its start and continue, then
  4368.              one more step will take us out.  */
  4369.           struct symtab_and_line sr_sal;

  4370.           init_sal (&sr_sal);
  4371.           sr_sal.pc = ecs->stop_func_start;
  4372.           sr_sal.pspace = get_frame_program_space (frame);
  4373.           insert_step_resume_breakpoint_at_sal (gdbarch,
  4374.                                                 sr_sal, null_frame_id);
  4375.           keep_going (ecs);
  4376.           return;
  4377.         }
  4378.     }

  4379.   stop_pc_sal = find_pc_line (stop_pc, 0);

  4380.   /* NOTE: tausq/2004-05-24: This if block used to be done before all
  4381.      the trampoline processing logic, however, there are some trampolines
  4382.      that have no names, so we should do trampoline handling first.  */
  4383.   if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
  4384.       && ecs->stop_func_name == NULL
  4385.       && stop_pc_sal.line == 0)
  4386.     {
  4387.       if (debug_infrun)
  4388.          fprintf_unfiltered (gdb_stdlog,
  4389.                              "infrun: stepped into undebuggable function\n");

  4390.       /* The inferior just stepped into, or returned to, an
  4391.          undebuggable function (where there is no debugging information
  4392.          and no line number corresponding to the address where the
  4393.          inferior stopped).  Since we want to skip this kind of code,
  4394.          we keep going until the inferior returns from this
  4395.          function - unless the user has asked us not to (via
  4396.          set step-mode) or we no longer know how to get back
  4397.          to the call site.  */
  4398.       if (step_stop_if_no_debug
  4399.           || !frame_id_p (frame_unwind_caller_id (frame)))
  4400.         {
  4401.           /* If we have no line number and the step-stop-if-no-debug
  4402.              is set, we stop the step so that the user has a chance to
  4403.              switch in assembly mode.  */
  4404.           end_stepping_range (ecs);
  4405.           return;
  4406.         }
  4407.       else
  4408.         {
  4409.           /* Set a breakpoint at callee's return address (the address
  4410.              at which the caller will resume).  */
  4411.           insert_step_resume_breakpoint_at_caller (frame);
  4412.           keep_going (ecs);
  4413.           return;
  4414.         }
  4415.     }

  4416.   if (ecs->event_thread->control.step_range_end == 1)
  4417.     {
  4418.       /* It is stepi or nexti.  We always want to stop stepping after
  4419.          one instruction.  */
  4420.       if (debug_infrun)
  4421.          fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
  4422.       end_stepping_range (ecs);
  4423.       return;
  4424.     }

  4425.   if (stop_pc_sal.line == 0)
  4426.     {
  4427.       /* We have no line number information.  That means to stop
  4428.          stepping (does this always happen right after one instruction,
  4429.          when we do "s" in a function with no line numbers,
  4430.          or can this happen as a result of a return or longjmp?).  */
  4431.       if (debug_infrun)
  4432.          fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
  4433.       end_stepping_range (ecs);
  4434.       return;
  4435.     }

  4436.   /* Look for "calls" to inlined functions, part one.  If the inline
  4437.      frame machinery detected some skipped call sites, we have entered
  4438.      a new inline function.  */

  4439.   if (frame_id_eq (get_frame_id (get_current_frame ()),
  4440.                    ecs->event_thread->control.step_frame_id)
  4441.       && inline_skipped_frames (ecs->ptid))
  4442.     {
  4443.       struct symtab_and_line call_sal;

  4444.       if (debug_infrun)
  4445.         fprintf_unfiltered (gdb_stdlog,
  4446.                             "infrun: stepped into inlined function\n");

  4447.       find_frame_sal (get_current_frame (), &call_sal);

  4448.       if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
  4449.         {
  4450.           /* For "step", we're going to stop.  But if the call site
  4451.              for this inlined function is on the same source line as
  4452.              we were previously stepping, go down into the function
  4453.              first.  Otherwise stop at the call site.  */

  4454.           if (call_sal.line == ecs->event_thread->current_line
  4455.               && call_sal.symtab == ecs->event_thread->current_symtab)
  4456.             step_into_inline_frame (ecs->ptid);

  4457.           end_stepping_range (ecs);
  4458.           return;
  4459.         }
  4460.       else
  4461.         {
  4462.           /* For "next", we should stop at the call site if it is on a
  4463.              different source line.  Otherwise continue through the
  4464.              inlined function.  */
  4465.           if (call_sal.line == ecs->event_thread->current_line
  4466.               && call_sal.symtab == ecs->event_thread->current_symtab)
  4467.             keep_going (ecs);
  4468.           else
  4469.             end_stepping_range (ecs);
  4470.           return;
  4471.         }
  4472.     }

  4473.   /* Look for "calls" to inlined functions, part two.  If we are still
  4474.      in the same real function we were stepping through, but we have
  4475.      to go further up to find the exact frame ID, we are stepping
  4476.      through a more inlined call beyond its call site.  */

  4477.   if (get_frame_type (get_current_frame ()) == INLINE_FRAME
  4478.       && !frame_id_eq (get_frame_id (get_current_frame ()),
  4479.                        ecs->event_thread->control.step_frame_id)
  4480.       && stepped_in_from (get_current_frame (),
  4481.                           ecs->event_thread->control.step_frame_id))
  4482.     {
  4483.       if (debug_infrun)
  4484.         fprintf_unfiltered (gdb_stdlog,
  4485.                             "infrun: stepping through inlined function\n");

  4486.       if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
  4487.         keep_going (ecs);
  4488.       else
  4489.         end_stepping_range (ecs);
  4490.       return;
  4491.     }

  4492.   if ((stop_pc == stop_pc_sal.pc)
  4493.       && (ecs->event_thread->current_line != stop_pc_sal.line
  4494.            || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
  4495.     {
  4496.       /* We are at the start of a different line.  So stop.  Note that
  4497.          we don't stop if we step into the middle of a different line.
  4498.          That is said to make things like for (;;) statements work
  4499.          better.  */
  4500.       if (debug_infrun)
  4501.          fprintf_unfiltered (gdb_stdlog,
  4502.                              "infrun: stepped to a different line\n");
  4503.       end_stepping_range (ecs);
  4504.       return;
  4505.     }

  4506.   /* We aren't done stepping.

  4507.      Optimize by setting the stepping range to the line.
  4508.      (We might not be in the original line, but if we entered a
  4509.      new line in mid-statement, we continue stepping.  This makes
  4510.      things like for(;;) statements work better.)  */

  4511.   ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
  4512.   ecs->event_thread->control.step_range_end = stop_pc_sal.end;
  4513.   ecs->event_thread->control.may_range_step = 1;
  4514.   set_step_info (frame, stop_pc_sal);

  4515.   if (debug_infrun)
  4516.      fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
  4517.   keep_going (ecs);
  4518. }

  4519. /* In all-stop mode, if we're currently stepping but have stopped in
  4520.    some other thread, we may need to switch back to the stepped
  4521.    thread.  Returns true we set the inferior running, false if we left
  4522.    it stopped (and the event needs further processing).  */

  4523. static int
  4524. switch_back_to_stepped_thread (struct execution_control_state *ecs)
  4525. {
  4526.   if (!non_stop)
  4527.     {
  4528.       struct thread_info *tp;
  4529.       struct thread_info *stepping_thread;
  4530.       struct thread_info *step_over;

  4531.       /* If any thread is blocked on some internal breakpoint, and we
  4532.          simply need to step over that breakpoint to get it going
  4533.          again, do that first.  */

  4534.       /* However, if we see an event for the stepping thread, then we
  4535.          know all other threads have been moved past their breakpoints
  4536.          already.  Let the caller check whether the step is finished,
  4537.          etc., before deciding to move it past a breakpoint.  */
  4538.       if (ecs->event_thread->control.step_range_end != 0)
  4539.         return 0;

  4540.       /* Check if the current thread is blocked on an incomplete
  4541.          step-over, interrupted by a random signal.  */
  4542.       if (ecs->event_thread->control.trap_expected
  4543.           && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
  4544.         {
  4545.           if (debug_infrun)
  4546.             {
  4547.               fprintf_unfiltered (gdb_stdlog,
  4548.                                   "infrun: need to finish step-over of [%s]\n",
  4549.                                   target_pid_to_str (ecs->event_thread->ptid));
  4550.             }
  4551.           keep_going (ecs);
  4552.           return 1;
  4553.         }

  4554.       /* Check if the current thread is blocked by a single-step
  4555.          breakpoint of another thread.  */
  4556.       if (ecs->hit_singlestep_breakpoint)
  4557.        {
  4558.          if (debug_infrun)
  4559.            {
  4560.              fprintf_unfiltered (gdb_stdlog,
  4561.                                  "infrun: need to step [%s] over single-step "
  4562.                                  "breakpoint\n",
  4563.                                  target_pid_to_str (ecs->ptid));
  4564.            }
  4565.          keep_going (ecs);
  4566.          return 1;
  4567.        }

  4568.       /* Otherwise, we no longer expect a trap in the current thread.
  4569.          Clear the trap_expected flag before switching back -- this is
  4570.          what keep_going does as well, if we call it.  */
  4571.       ecs->event_thread->control.trap_expected = 0;

  4572.       /* Likewise, clear the signal if it should not be passed.  */
  4573.       if (!signal_program[ecs->event_thread->suspend.stop_signal])
  4574.         ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;

  4575.       /* If scheduler locking applies even if not stepping, there's no
  4576.          need to walk over threads.  Above we've checked whether the
  4577.          current thread is stepping.  If some other thread not the
  4578.          event thread is stepping, then it must be that scheduler
  4579.          locking is not in effect.  */
  4580.       if (schedlock_applies (0))
  4581.         return 0;

  4582.       /* Look for the stepping/nexting thread, and check if any other
  4583.          thread other than the stepping thread needs to start a
  4584.          step-over.  Do all step-overs before actually proceeding with
  4585.          step/next/etc.  */
  4586.       stepping_thread = NULL;
  4587.       step_over = NULL;
  4588.       ALL_NON_EXITED_THREADS (tp)
  4589.         {
  4590.           /* Ignore threads of processes we're not resuming.  */
  4591.           if (!sched_multi
  4592.               && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
  4593.             continue;

  4594.           /* When stepping over a breakpoint, we lock all threads
  4595.              except the one that needs to move past the breakpoint.
  4596.              If a non-event thread has this set, the "incomplete
  4597.              step-over" check above should have caught it earlier.  */
  4598.           gdb_assert (!tp->control.trap_expected);

  4599.           /* Did we find the stepping thread?  */
  4600.           if (tp->control.step_range_end)
  4601.             {
  4602.               /* Yep.  There should only one though.  */
  4603.               gdb_assert (stepping_thread == NULL);

  4604.               /* The event thread is handled at the top, before we
  4605.                  enter this loop.  */
  4606.               gdb_assert (tp != ecs->event_thread);

  4607.               /* If some thread other than the event thread is
  4608.                  stepping, then scheduler locking can't be in effect,
  4609.                  otherwise we wouldn't have resumed the current event
  4610.                  thread in the first place.  */
  4611.               gdb_assert (!schedlock_applies (currently_stepping (tp)));

  4612.               stepping_thread = tp;
  4613.             }
  4614.           else if (thread_still_needs_step_over (tp))
  4615.             {
  4616.               step_over = tp;

  4617.               /* At the top we've returned early if the event thread
  4618.                  is stepping.  If some other thread not the event
  4619.                  thread is stepping, then scheduler locking can't be
  4620.                  in effect, and we can resume this thread.  No need to
  4621.                  keep looking for the stepping thread then.  */
  4622.               break;
  4623.             }
  4624.         }

  4625.       if (step_over != NULL)
  4626.         {
  4627.           tp = step_over;
  4628.           if (debug_infrun)
  4629.             {
  4630.               fprintf_unfiltered (gdb_stdlog,
  4631.                                   "infrun: need to step-over [%s]\n",
  4632.                                   target_pid_to_str (tp->ptid));
  4633.             }

  4634.           /* Only the stepping thread should have this set.  */
  4635.           gdb_assert (tp->control.step_range_end == 0);

  4636.           ecs->ptid = tp->ptid;
  4637.           ecs->event_thread = tp;
  4638.           switch_to_thread (ecs->ptid);
  4639.           keep_going (ecs);
  4640.           return 1;
  4641.         }

  4642.       if (stepping_thread != NULL)
  4643.         {
  4644.           struct frame_info *frame;
  4645.           struct gdbarch *gdbarch;

  4646.           tp = stepping_thread;

  4647.           /* If the stepping thread exited, then don't try to switch
  4648.              back and resume it, which could fail in several different
  4649.              ways depending on the target.  Instead, just keep going.

  4650.              We can find a stepping dead thread in the thread list in
  4651.              two cases:

  4652.              - The target supports thread exit events, and when the
  4653.              target tries to delete the thread from the thread list,
  4654.              inferior_ptid pointed at the exiting thread.  In such
  4655.              case, calling delete_thread does not really remove the
  4656.              thread from the list; instead, the thread is left listed,
  4657.              with 'exited' state.

  4658.              - The target's debug interface does not support thread
  4659.              exit events, and so we have no idea whatsoever if the
  4660.              previously stepping thread is still alive.  For that
  4661.              reason, we need to synchronously query the target
  4662.              now.  */
  4663.           if (is_exited (tp->ptid)
  4664.               || !target_thread_alive (tp->ptid))
  4665.             {
  4666.               if (debug_infrun)
  4667.                 fprintf_unfiltered (gdb_stdlog,
  4668.                                     "infrun: not switching back to "
  4669.                                     "stepped thread, it has vanished\n");

  4670.               delete_thread (tp->ptid);
  4671.               keep_going (ecs);
  4672.               return 1;
  4673.             }

  4674.           if (debug_infrun)
  4675.             fprintf_unfiltered (gdb_stdlog,
  4676.                                 "infrun: switching back to stepped thread\n");

  4677.           ecs->event_thread = tp;
  4678.           ecs->ptid = tp->ptid;
  4679.           context_switch (ecs->ptid);

  4680.           stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
  4681.           frame = get_current_frame ();
  4682.           gdbarch = get_frame_arch (frame);

  4683.           /* If the PC of the thread we were trying to single-step has
  4684.              changed, then that thread has trapped or been signaled,
  4685.              but the event has not been reported to GDB yet.  Re-poll
  4686.              the target looking for this particular thread's event
  4687.              (i.e. temporarily enable schedlock) by:

  4688.                - setting a break at the current PC
  4689.                - resuming that particular thread, only (by setting
  4690.                  trap expected)

  4691.              This prevents us continuously moving the single-step
  4692.              breakpoint forward, one instruction at a time,
  4693.              overstepping.  */

  4694.           if (stop_pc != tp->prev_pc)
  4695.             {
  4696.               if (debug_infrun)
  4697.                 fprintf_unfiltered (gdb_stdlog,
  4698.                                     "infrun: expected thread advanced also\n");

  4699.               /* Clear the info of the previous step-over, as it's no
  4700.                  longer valid.  It's what keep_going would do too, if
  4701.                  we called it.  Must do this before trying to insert
  4702.                  the sss breakpoint, otherwise if we were previously
  4703.                  trying to step over this exact address in another
  4704.                  thread, the breakpoint ends up not installed.  */
  4705.               clear_step_over_info ();

  4706.               insert_single_step_breakpoint (get_frame_arch (frame),
  4707.                                              get_frame_address_space (frame),
  4708.                                              stop_pc);
  4709.               ecs->event_thread->control.trap_expected = 1;

  4710.               resume (0, GDB_SIGNAL_0);
  4711.               prepare_to_wait (ecs);
  4712.             }
  4713.           else
  4714.             {
  4715.               if (debug_infrun)
  4716.                 fprintf_unfiltered (gdb_stdlog,
  4717.                                     "infrun: expected thread still "
  4718.                                     "hasn't advanced\n");
  4719.               keep_going (ecs);
  4720.             }

  4721.           return 1;
  4722.         }
  4723.     }
  4724.   return 0;
  4725. }

  4726. /* Is thread TP in the middle of single-stepping?  */

  4727. static int
  4728. currently_stepping (struct thread_info *tp)
  4729. {
  4730.   return ((tp->control.step_range_end
  4731.            && tp->control.step_resume_breakpoint == NULL)
  4732.           || tp->control.trap_expected
  4733.           || tp->stepped_breakpoint
  4734.           || bpstat_should_step ());
  4735. }

  4736. /* Inferior has stepped into a subroutine call with source code that
  4737.    we should not step over.  Do step to the first line of code in
  4738.    it.  */

  4739. static void
  4740. handle_step_into_function (struct gdbarch *gdbarch,
  4741.                            struct execution_control_state *ecs)
  4742. {
  4743.   struct compunit_symtab *cust;
  4744.   struct symtab_and_line stop_func_sal, sr_sal;

  4745.   fill_in_stop_func (gdbarch, ecs);

  4746.   cust = find_pc_compunit_symtab (stop_pc);
  4747.   if (cust != NULL && compunit_language (cust) != language_asm)
  4748.     ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
  4749.                                                   ecs->stop_func_start);

  4750.   stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
  4751.   /* Use the step_resume_break to step until the end of the prologue,
  4752.      even if that involves jumps (as it seems to on the vax under
  4753.      4.2).  */
  4754.   /* If the prologue ends in the middle of a source line, continue to
  4755.      the end of that source line (if it is still within the function).
  4756.      Otherwise, just go to end of prologue.  */
  4757.   if (stop_func_sal.end
  4758.       && stop_func_sal.pc != ecs->stop_func_start
  4759.       && stop_func_sal.end < ecs->stop_func_end)
  4760.     ecs->stop_func_start = stop_func_sal.end;

  4761.   /* Architectures which require breakpoint adjustment might not be able
  4762.      to place a breakpoint at the computed address.  If so, the test
  4763.      ``ecs->stop_func_start == stop_pc'' will never succeed.  Adjust
  4764.      ecs->stop_func_start to an address at which a breakpoint may be
  4765.      legitimately placed.

  4766.      Note:  kevinb/2004-01-19:  On FR-V, if this adjustment is not
  4767.      made, GDB will enter an infinite loop when stepping through
  4768.      optimized code consisting of VLIW instructions which contain
  4769.      subinstructions corresponding to different source lines.  On
  4770.      FR-V, it's not permitted to place a breakpoint on any but the
  4771.      first subinstruction of a VLIW instruction.  When a breakpoint is
  4772.      set, GDB will adjust the breakpoint address to the beginning of
  4773.      the VLIW instruction.  Thus, we need to make the corresponding
  4774.      adjustment here when computing the stop address.  */

  4775.   if (gdbarch_adjust_breakpoint_address_p (gdbarch))
  4776.     {
  4777.       ecs->stop_func_start
  4778.         = gdbarch_adjust_breakpoint_address (gdbarch,
  4779.                                              ecs->stop_func_start);
  4780.     }

  4781.   if (ecs->stop_func_start == stop_pc)
  4782.     {
  4783.       /* We are already there: stop now.  */
  4784.       end_stepping_range (ecs);
  4785.       return;
  4786.     }
  4787.   else
  4788.     {
  4789.       /* Put the step-breakpoint there and go until there.  */
  4790.       init_sal (&sr_sal);        /* initialize to zeroes */
  4791.       sr_sal.pc = ecs->stop_func_start;
  4792.       sr_sal.section = find_pc_overlay (ecs->stop_func_start);
  4793.       sr_sal.pspace = get_frame_program_space (get_current_frame ());

  4794.       /* Do not specify what the fp should be when we stop since on
  4795.          some machines the prologue is where the new fp value is
  4796.          established.  */
  4797.       insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);

  4798.       /* And make sure stepping stops right away then.  */
  4799.       ecs->event_thread->control.step_range_end
  4800.         = ecs->event_thread->control.step_range_start;
  4801.     }
  4802.   keep_going (ecs);
  4803. }

  4804. /* Inferior has stepped backward into a subroutine call with source
  4805.    code that we should not step over.  Do step to the beginning of the
  4806.    last line of code in it.  */

  4807. static void
  4808. handle_step_into_function_backward (struct gdbarch *gdbarch,
  4809.                                     struct execution_control_state *ecs)
  4810. {
  4811.   struct compunit_symtab *cust;
  4812.   struct symtab_and_line stop_func_sal;

  4813.   fill_in_stop_func (gdbarch, ecs);

  4814.   cust = find_pc_compunit_symtab (stop_pc);
  4815.   if (cust != NULL && compunit_language (cust) != language_asm)
  4816.     ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
  4817.                                                   ecs->stop_func_start);

  4818.   stop_func_sal = find_pc_line (stop_pc, 0);

  4819.   /* OK, we're just going to keep stepping here.  */
  4820.   if (stop_func_sal.pc == stop_pc)
  4821.     {
  4822.       /* We're there already.  Just stop stepping now.  */
  4823.       end_stepping_range (ecs);
  4824.     }
  4825.   else
  4826.     {
  4827.       /* Else just reset the step range and keep going.
  4828.          No step-resume breakpoint, they don't work for
  4829.          epilogues, which can have multiple entry paths.  */
  4830.       ecs->event_thread->control.step_range_start = stop_func_sal.pc;
  4831.       ecs->event_thread->control.step_range_end = stop_func_sal.end;
  4832.       keep_going (ecs);
  4833.     }
  4834.   return;
  4835. }

  4836. /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
  4837.    This is used to both functions and to skip over code.  */

  4838. static void
  4839. insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
  4840.                                         struct symtab_and_line sr_sal,
  4841.                                         struct frame_id sr_id,
  4842.                                         enum bptype sr_type)
  4843. {
  4844.   /* There should never be more than one step-resume or longjmp-resume
  4845.      breakpoint per thread, so we should never be setting a new
  4846.      step_resume_breakpoint when one is already active.  */
  4847.   gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
  4848.   gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);

  4849.   if (debug_infrun)
  4850.     fprintf_unfiltered (gdb_stdlog,
  4851.                         "infrun: inserting step-resume breakpoint at %s\n",
  4852.                         paddress (gdbarch, sr_sal.pc));

  4853.   inferior_thread ()->control.step_resume_breakpoint
  4854.     = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
  4855. }

  4856. void
  4857. insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
  4858.                                       struct symtab_and_line sr_sal,
  4859.                                       struct frame_id sr_id)
  4860. {
  4861.   insert_step_resume_breakpoint_at_sal_1 (gdbarch,
  4862.                                           sr_sal, sr_id,
  4863.                                           bp_step_resume);
  4864. }

  4865. /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
  4866.    This is used to skip a potential signal handler.

  4867.    This is called with the interrupted function's frame.  The signal
  4868.    handler, when it returns, will resume the interrupted function at
  4869.    RETURN_FRAME.pc.  */

  4870. static void
  4871. insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
  4872. {
  4873.   struct symtab_and_line sr_sal;
  4874.   struct gdbarch *gdbarch;

  4875.   gdb_assert (return_frame != NULL);
  4876.   init_sal (&sr_sal);                /* initialize to zeros */

  4877.   gdbarch = get_frame_arch (return_frame);
  4878.   sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
  4879.   sr_sal.section = find_pc_overlay (sr_sal.pc);
  4880.   sr_sal.pspace = get_frame_program_space (return_frame);

  4881.   insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
  4882.                                           get_stack_frame_id (return_frame),
  4883.                                           bp_hp_step_resume);
  4884. }

  4885. /* Insert a "step-resume breakpoint" at the previous frame's PC.  This
  4886.    is used to skip a function after stepping into it (for "next" or if
  4887.    the called function has no debugging information).

  4888.    The current function has almost always been reached by single
  4889.    stepping a call or return instruction.  NEXT_FRAME belongs to the
  4890.    current function, and the breakpoint will be set at the caller's
  4891.    resume address.

  4892.    This is a separate function rather than reusing
  4893.    insert_hp_step_resume_breakpoint_at_frame in order to avoid
  4894.    get_prev_frame, which may stop prematurely (see the implementation
  4895.    of frame_unwind_caller_id for an example).  */

  4896. static void
  4897. insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
  4898. {
  4899.   struct symtab_and_line sr_sal;
  4900.   struct gdbarch *gdbarch;

  4901.   /* We shouldn't have gotten here if we don't know where the call site
  4902.      is.  */
  4903.   gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));

  4904.   init_sal (&sr_sal);                /* initialize to zeros */

  4905.   gdbarch = frame_unwind_caller_arch (next_frame);
  4906.   sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
  4907.                                         frame_unwind_caller_pc (next_frame));
  4908.   sr_sal.section = find_pc_overlay (sr_sal.pc);
  4909.   sr_sal.pspace = frame_unwind_program_space (next_frame);

  4910.   insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
  4911.                                         frame_unwind_caller_id (next_frame));
  4912. }

  4913. /* Insert a "longjmp-resume" breakpoint at PC.  This is used to set a
  4914.    new breakpoint at the target of a jmp_buf.  The handling of
  4915.    longjmp-resume uses the same mechanisms used for handling
  4916.    "step-resume" breakpoints.  */

  4917. static void
  4918. insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
  4919. {
  4920.   /* There should never be more than one longjmp-resume breakpoint per
  4921.      thread, so we should never be setting a new
  4922.      longjmp_resume_breakpoint when one is already active.  */
  4923.   gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);

  4924.   if (debug_infrun)
  4925.     fprintf_unfiltered (gdb_stdlog,
  4926.                         "infrun: inserting longjmp-resume breakpoint at %s\n",
  4927.                         paddress (gdbarch, pc));

  4928.   inferior_thread ()->control.exception_resume_breakpoint =
  4929.     set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
  4930. }

  4931. /* Insert an exception resume breakpoint.  TP is the thread throwing
  4932.    the exception.  The block B is the block of the unwinder debug hook
  4933.    function.  FRAME is the frame corresponding to the call to this
  4934.    function.  SYM is the symbol of the function argument holding the
  4935.    target PC of the exception.  */

  4936. static void
  4937. insert_exception_resume_breakpoint (struct thread_info *tp,
  4938.                                     const struct block *b,
  4939.                                     struct frame_info *frame,
  4940.                                     struct symbol *sym)
  4941. {
  4942.   volatile struct gdb_exception e;

  4943.   /* We want to ignore errors here.  */
  4944.   TRY_CATCH (e, RETURN_MASK_ERROR)
  4945.     {
  4946.       struct symbol *vsym;
  4947.       struct value *value;
  4948.       CORE_ADDR handler;
  4949.       struct breakpoint *bp;

  4950.       vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
  4951.       value = read_var_value (vsym, frame);
  4952.       /* If the value was optimized out, revert to the old behavior.  */
  4953.       if (! value_optimized_out (value))
  4954.         {
  4955.           handler = value_as_address (value);

  4956.           if (debug_infrun)
  4957.             fprintf_unfiltered (gdb_stdlog,
  4958.                                 "infrun: exception resume at %lx\n",
  4959.                                 (unsigned long) handler);

  4960.           bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
  4961.                                                handler, bp_exception_resume);

  4962.           /* set_momentary_breakpoint_at_pc invalidates FRAME.  */
  4963.           frame = NULL;

  4964.           bp->thread = tp->num;
  4965.           inferior_thread ()->control.exception_resume_breakpoint = bp;
  4966.         }
  4967.     }
  4968. }

  4969. /* A helper for check_exception_resume that sets an
  4970.    exception-breakpoint based on a SystemTap probe.  */

  4971. static void
  4972. insert_exception_resume_from_probe (struct thread_info *tp,
  4973.                                     const struct bound_probe *probe,
  4974.                                     struct frame_info *frame)
  4975. {
  4976.   struct value *arg_value;
  4977.   CORE_ADDR handler;
  4978.   struct breakpoint *bp;

  4979.   arg_value = probe_safe_evaluate_at_pc (frame, 1);
  4980.   if (!arg_value)
  4981.     return;

  4982.   handler = value_as_address (arg_value);

  4983.   if (debug_infrun)
  4984.     fprintf_unfiltered (gdb_stdlog,
  4985.                         "infrun: exception resume at %s\n",
  4986.                         paddress (get_objfile_arch (probe->objfile),
  4987.                                   handler));

  4988.   bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
  4989.                                        handler, bp_exception_resume);
  4990.   bp->thread = tp->num;
  4991.   inferior_thread ()->control.exception_resume_breakpoint = bp;
  4992. }

  4993. /* This is called when an exception has been intercepted.  Check to
  4994.    see whether the exception's destination is of interest, and if so,
  4995.    set an exception resume breakpoint there.  */

  4996. static void
  4997. check_exception_resume (struct execution_control_state *ecs,
  4998.                         struct frame_info *frame)
  4999. {
  5000.   volatile struct gdb_exception e;
  5001.   struct bound_probe probe;
  5002.   struct symbol *func;

  5003.   /* First see if this exception unwinding breakpoint was set via a
  5004.      SystemTap probe point.  If so, the probe has two arguments: the
  5005.      CFA and the HANDLER.  We ignore the CFA, extract the handler, and
  5006.      set a breakpoint there.  */
  5007.   probe = find_probe_by_pc (get_frame_pc (frame));
  5008.   if (probe.probe)
  5009.     {
  5010.       insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
  5011.       return;
  5012.     }

  5013.   func = get_frame_function (frame);
  5014.   if (!func)
  5015.     return;

  5016.   TRY_CATCH (e, RETURN_MASK_ERROR)
  5017.     {
  5018.       const struct block *b;
  5019.       struct block_iterator iter;
  5020.       struct symbol *sym;
  5021.       int argno = 0;

  5022.       /* The exception breakpoint is a thread-specific breakpoint on
  5023.          the unwinder's debug hook, declared as:

  5024.          void _Unwind_DebugHook (void *cfa, void *handler);

  5025.          The CFA argument indicates the frame to which control is
  5026.          about to be transferred.  HANDLER is the destination PC.

  5027.          We ignore the CFA and set a temporary breakpoint at HANDLER.
  5028.          This is not extremely efficient but it avoids issues in gdb
  5029.          with computing the DWARF CFA, and it also works even in weird
  5030.          cases such as throwing an exception from inside a signal
  5031.          handler.  */

  5032.       b = SYMBOL_BLOCK_VALUE (func);
  5033.       ALL_BLOCK_SYMBOLS (b, iter, sym)
  5034.         {
  5035.           if (!SYMBOL_IS_ARGUMENT (sym))
  5036.             continue;

  5037.           if (argno == 0)
  5038.             ++argno;
  5039.           else
  5040.             {
  5041.               insert_exception_resume_breakpoint (ecs->event_thread,
  5042.                                                   b, frame, sym);
  5043.               break;
  5044.             }
  5045.         }
  5046.     }
  5047. }

  5048. static void
  5049. stop_waiting (struct execution_control_state *ecs)
  5050. {
  5051.   if (debug_infrun)
  5052.     fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");

  5053.   clear_step_over_info ();

  5054.   /* Let callers know we don't want to wait for the inferior anymore.  */
  5055.   ecs->wait_some_more = 0;
  5056. }

  5057. /* Called when we should continue running the inferior, because the
  5058.    current event doesn't cause a user visible stop.  This does the
  5059.    resuming part; waiting for the next event is done elsewhere.  */

  5060. static void
  5061. keep_going (struct execution_control_state *ecs)
  5062. {
  5063.   /* Make sure normal_stop is called if we get a QUIT handled before
  5064.      reaching resume.  */
  5065.   struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);

  5066.   /* Save the pc before execution, to compare with pc after stop.  */
  5067.   ecs->event_thread->prev_pc
  5068.     = regcache_read_pc (get_thread_regcache (ecs->ptid));

  5069.   if (ecs->event_thread->control.trap_expected
  5070.       && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
  5071.     {
  5072.       /* We haven't yet gotten our trap, and either: intercepted a
  5073.          non-signal event (e.g., a fork); or took a signal which we
  5074.          are supposed to pass through to the inferior.  Simply
  5075.          continue.  */
  5076.       discard_cleanups (old_cleanups);
  5077.       resume (currently_stepping (ecs->event_thread),
  5078.               ecs->event_thread->suspend.stop_signal);
  5079.     }
  5080.   else
  5081.     {
  5082.       volatile struct gdb_exception e;
  5083.       struct regcache *regcache = get_current_regcache ();
  5084.       int remove_bp;
  5085.       int remove_wps;

  5086.       /* Either the trap was not expected, but we are continuing
  5087.          anyway (if we got a signal, the user asked it be passed to
  5088.          the child)
  5089.          -- or --
  5090.          We got our expected trap, but decided we should resume from
  5091.          it.

  5092.          We're going to run this baby now!

  5093.          Note that insert_breakpoints won't try to re-insert
  5094.          already inserted breakpoints.  Therefore, we don't
  5095.          care if breakpoints were already inserted, or not.  */

  5096.       /* If we need to step over a breakpoint, and we're not using
  5097.          displaced stepping to do so, insert all breakpoints
  5098.          (watchpoints, etc.) but the one we're stepping over, step one
  5099.          instruction, and then re-insert the breakpoint when that step
  5100.          is finished.  */

  5101.       remove_bp = (ecs->hit_singlestep_breakpoint
  5102.                    || thread_still_needs_step_over (ecs->event_thread));
  5103.       remove_wps = (ecs->event_thread->stepping_over_watchpoint
  5104.                     && !target_have_steppable_watchpoint);

  5105.       if (remove_bp && !use_displaced_stepping (get_regcache_arch (regcache)))
  5106.         {
  5107.           set_step_over_info (get_regcache_aspace (regcache),
  5108.                               regcache_read_pc (regcache), remove_wps);
  5109.         }
  5110.       else if (remove_wps)
  5111.         set_step_over_info (NULL, 0, remove_wps);
  5112.       else
  5113.         clear_step_over_info ();

  5114.       /* Stop stepping if inserting breakpoints fails.  */
  5115.       TRY_CATCH (e, RETURN_MASK_ERROR)
  5116.         {
  5117.           insert_breakpoints ();
  5118.         }
  5119.       if (e.reason < 0)
  5120.         {
  5121.           exception_print (gdb_stderr, e);
  5122.           stop_waiting (ecs);
  5123.           return;
  5124.         }

  5125.       ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);

  5126.       /* Do not deliver GDB_SIGNAL_TRAP (except when the user
  5127.          explicitly specifies that such a signal should be delivered
  5128.          to the target program).  Typically, that would occur when a
  5129.          user is debugging a target monitor on a simulator: the target
  5130.          monitor sets a breakpoint; the simulator encounters this
  5131.          breakpoint and halts the simulation handing control to GDB;
  5132.          GDB, noting that the stop address doesn't map to any known
  5133.          breakpoint, returns control back to the simulator; the
  5134.          simulator then delivers the hardware equivalent of a
  5135.          GDB_SIGNAL_TRAP to the program being debugged.         */
  5136.       if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
  5137.           && !signal_program[ecs->event_thread->suspend.stop_signal])
  5138.         ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;

  5139.       discard_cleanups (old_cleanups);
  5140.       resume (currently_stepping (ecs->event_thread),
  5141.               ecs->event_thread->suspend.stop_signal);
  5142.     }

  5143.   prepare_to_wait (ecs);
  5144. }

  5145. /* This function normally comes after a resume, before
  5146.    handle_inferior_event exits.  It takes care of any last bits of
  5147.    housekeeping, and sets the all-important wait_some_more flag.  */

  5148. static void
  5149. prepare_to_wait (struct execution_control_state *ecs)
  5150. {
  5151.   if (debug_infrun)
  5152.     fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");

  5153.   /* This is the old end of the while loop.  Let everybody know we
  5154.      want to wait for the inferior some more and get called again
  5155.      soon.  */
  5156.   ecs->wait_some_more = 1;
  5157. }

  5158. /* We are done with the step range of a step/next/si/ni command.
  5159.    Called once for each n of a "step n" operation.  */

  5160. static void
  5161. end_stepping_range (struct execution_control_state *ecs)
  5162. {
  5163.   ecs->event_thread->control.stop_step = 1;
  5164.   stop_waiting (ecs);
  5165. }

  5166. /* Several print_*_reason functions to print why the inferior has stopped.
  5167.    We always print something when the inferior exits, or receives a signal.
  5168.    The rest of the cases are dealt with later on in normal_stop and
  5169.    print_it_typical.  Ideally there should be a call to one of these
  5170.    print_*_reason functions functions from handle_inferior_event each time
  5171.    stop_waiting is called.

  5172.    Note that we don't call these directly, instead we delegate that to
  5173.    the interpreters, through observers.  Interpreters then call these
  5174.    with whatever uiout is right.  */

  5175. void
  5176. print_end_stepping_range_reason (struct ui_out *uiout)
  5177. {
  5178.   /* For CLI-like interpreters, print nothing.  */

  5179.   if (ui_out_is_mi_like_p (uiout))
  5180.     {
  5181.       ui_out_field_string (uiout, "reason",
  5182.                            async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
  5183.     }
  5184. }

  5185. void
  5186. print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
  5187. {
  5188.   annotate_signalled ();
  5189.   if (ui_out_is_mi_like_p (uiout))
  5190.     ui_out_field_string
  5191.       (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
  5192.   ui_out_text (uiout, "\nProgram terminated with signal ");
  5193.   annotate_signal_name ();
  5194.   ui_out_field_string (uiout, "signal-name",
  5195.                        gdb_signal_to_name (siggnal));
  5196.   annotate_signal_name_end ();
  5197.   ui_out_text (uiout, ", ");
  5198.   annotate_signal_string ();
  5199.   ui_out_field_string (uiout, "signal-meaning",
  5200.                        gdb_signal_to_string (siggnal));
  5201.   annotate_signal_string_end ();
  5202.   ui_out_text (uiout, ".\n");
  5203.   ui_out_text (uiout, "The program no longer exists.\n");
  5204. }

  5205. void
  5206. print_exited_reason (struct ui_out *uiout, int exitstatus)
  5207. {
  5208.   struct inferior *inf = current_inferior ();
  5209.   const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));

  5210.   annotate_exited (exitstatus);
  5211.   if (exitstatus)
  5212.     {
  5213.       if (ui_out_is_mi_like_p (uiout))
  5214.         ui_out_field_string (uiout, "reason",
  5215.                              async_reason_lookup (EXEC_ASYNC_EXITED));
  5216.       ui_out_text (uiout, "[Inferior ");
  5217.       ui_out_text (uiout, plongest (inf->num));
  5218.       ui_out_text (uiout, " (");
  5219.       ui_out_text (uiout, pidstr);
  5220.       ui_out_text (uiout, ") exited with code ");
  5221.       ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
  5222.       ui_out_text (uiout, "]\n");
  5223.     }
  5224.   else
  5225.     {
  5226.       if (ui_out_is_mi_like_p (uiout))
  5227.         ui_out_field_string
  5228.           (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
  5229.       ui_out_text (uiout, "[Inferior ");
  5230.       ui_out_text (uiout, plongest (inf->num));
  5231.       ui_out_text (uiout, " (");
  5232.       ui_out_text (uiout, pidstr);
  5233.       ui_out_text (uiout, ") exited normally]\n");
  5234.     }
  5235. }

  5236. void
  5237. print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
  5238. {
  5239.   annotate_signal ();

  5240.   if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
  5241.     {
  5242.       struct thread_info *t = inferior_thread ();

  5243.       ui_out_text (uiout, "\n[");
  5244.       ui_out_field_string (uiout, "thread-name",
  5245.                            target_pid_to_str (t->ptid));
  5246.       ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
  5247.       ui_out_text (uiout, " stopped");
  5248.     }
  5249.   else
  5250.     {
  5251.       ui_out_text (uiout, "\nProgram received signal ");
  5252.       annotate_signal_name ();
  5253.       if (ui_out_is_mi_like_p (uiout))
  5254.         ui_out_field_string
  5255.           (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
  5256.       ui_out_field_string (uiout, "signal-name",
  5257.                            gdb_signal_to_name (siggnal));
  5258.       annotate_signal_name_end ();
  5259.       ui_out_text (uiout, ", ");
  5260.       annotate_signal_string ();
  5261.       ui_out_field_string (uiout, "signal-meaning",
  5262.                            gdb_signal_to_string (siggnal));
  5263.       annotate_signal_string_end ();
  5264.     }
  5265.   ui_out_text (uiout, ".\n");
  5266. }

  5267. void
  5268. print_no_history_reason (struct ui_out *uiout)
  5269. {
  5270.   ui_out_text (uiout, "\nNo more reverse-execution history.\n");
  5271. }

  5272. /* Print current location without a level number, if we have changed
  5273.    functions or hit a breakpoint.  Print source line if we have one.
  5274.    bpstat_print contains the logic deciding in detail what to print,
  5275.    based on the event(s) that just occurred.  */

  5276. void
  5277. print_stop_event (struct target_waitstatus *ws)
  5278. {
  5279.   int bpstat_ret;
  5280.   int source_flag;
  5281.   int do_frame_printing = 1;
  5282.   struct thread_info *tp = inferior_thread ();

  5283.   bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
  5284.   switch (bpstat_ret)
  5285.     {
  5286.     case PRINT_UNKNOWN:
  5287.       /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
  5288.          should) carry around the function and does (or should) use
  5289.          that when doing a frame comparison.  */
  5290.       if (tp->control.stop_step
  5291.           && frame_id_eq (tp->control.step_frame_id,
  5292.                           get_frame_id (get_current_frame ()))
  5293.           && step_start_function == find_pc_function (stop_pc))
  5294.         {
  5295.           /* Finished step, just print source line.  */
  5296.           source_flag = SRC_LINE;
  5297.         }
  5298.       else
  5299.         {
  5300.           /* Print location and source line.  */
  5301.           source_flag = SRC_AND_LOC;
  5302.         }
  5303.       break;
  5304.     case PRINT_SRC_AND_LOC:
  5305.       /* Print location and source line.  */
  5306.       source_flag = SRC_AND_LOC;
  5307.       break;
  5308.     case PRINT_SRC_ONLY:
  5309.       source_flag = SRC_LINE;
  5310.       break;
  5311.     case PRINT_NOTHING:
  5312.       /* Something bogus.  */
  5313.       source_flag = SRC_LINE;
  5314.       do_frame_printing = 0;
  5315.       break;
  5316.     default:
  5317.       internal_error (__FILE__, __LINE__, _("Unknown value."));
  5318.     }

  5319.   /* The behavior of this routine with respect to the source
  5320.      flag is:
  5321.      SRC_LINE: Print only source line
  5322.      LOCATION: Print only location
  5323.      SRC_AND_LOC: Print location and source line.  */
  5324.   if (do_frame_printing)
  5325.     print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);

  5326.   /* Display the auto-display expressions.  */
  5327.   do_displays ();
  5328. }

  5329. /* Here to return control to GDB when the inferior stops for real.
  5330.    Print appropriate messages, remove breakpoints, give terminal our modes.

  5331.    STOP_PRINT_FRAME nonzero means print the executing frame
  5332.    (pc, function, args, file, line number and line text).
  5333.    BREAKPOINTS_FAILED nonzero means stop was due to error
  5334.    attempting to insert breakpoints.  */

  5335. void
  5336. normal_stop (void)
  5337. {
  5338.   struct target_waitstatus last;
  5339.   ptid_t last_ptid;
  5340.   struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);

  5341.   get_last_target_status (&last_ptid, &last);

  5342.   /* If an exception is thrown from this point on, make sure to
  5343.      propagate GDB's knowledge of the executing state to the
  5344.      frontend/user running state.  A QUIT is an easy exception to see
  5345.      here, so do this before any filtered output.  */
  5346.   if (!non_stop)
  5347.     make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
  5348.   else if (last.kind != TARGET_WAITKIND_SIGNALLED
  5349.            && last.kind != TARGET_WAITKIND_EXITED
  5350.            && last.kind != TARGET_WAITKIND_NO_RESUMED)
  5351.     make_cleanup (finish_thread_state_cleanup, &inferior_ptid);

  5352.   /* As we're presenting a stop, and potentially removing breakpoints,
  5353.      update the thread list so we can tell whether there are threads
  5354.      running on the target.  With target remote, for example, we can
  5355.      only learn about new threads when we explicitly update the thread
  5356.      list.  Do this before notifying the interpreters about signal
  5357.      stops, end of stepping ranges, etc., so that the "new thread"
  5358.      output is emitted before e.g., "Program received signal FOO",
  5359.      instead of after.  */
  5360.   update_thread_list ();

  5361.   if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
  5362.     observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);

  5363.   /* As with the notification of thread events, we want to delay
  5364.      notifying the user that we've switched thread context until
  5365.      the inferior actually stops.

  5366.      There's no point in saying anything if the inferior has exited.
  5367.      Note that SIGNALLED here means "exited with a signal", not
  5368.      "received a signal".

  5369.      Also skip saying anything in non-stop mode.  In that mode, as we
  5370.      don't want GDB to switch threads behind the user's back, to avoid
  5371.      races where the user is typing a command to apply to thread x,
  5372.      but GDB switches to thread y before the user finishes entering
  5373.      the command, fetch_inferior_event installs a cleanup to restore
  5374.      the current thread back to the thread the user had selected right
  5375.      after this event is handled, so we're not really switching, only
  5376.      informing of a stop.  */
  5377.   if (!non_stop
  5378.       && !ptid_equal (previous_inferior_ptid, inferior_ptid)
  5379.       && target_has_execution
  5380.       && last.kind != TARGET_WAITKIND_SIGNALLED
  5381.       && last.kind != TARGET_WAITKIND_EXITED
  5382.       && last.kind != TARGET_WAITKIND_NO_RESUMED)
  5383.     {
  5384.       target_terminal_ours_for_output ();
  5385.       printf_filtered (_("[Switching to %s]\n"),
  5386.                        target_pid_to_str (inferior_ptid));
  5387.       annotate_thread_changed ();
  5388.       previous_inferior_ptid = inferior_ptid;
  5389.     }

  5390.   if (last.kind == TARGET_WAITKIND_NO_RESUMED)
  5391.     {
  5392.       gdb_assert (sync_execution || !target_can_async_p ());

  5393.       target_terminal_ours_for_output ();
  5394.       printf_filtered (_("No unwaited-for children left.\n"));
  5395.     }

  5396.   /* Note: this depends on the update_thread_list call above.  */
  5397.   if (!breakpoints_should_be_inserted_now () && target_has_execution)
  5398.     {
  5399.       if (remove_breakpoints ())
  5400.         {
  5401.           target_terminal_ours_for_output ();
  5402.           printf_filtered (_("Cannot remove breakpoints because "
  5403.                              "program is no longer writable.\nFurther "
  5404.                              "execution is probably impossible.\n"));
  5405.         }
  5406.     }

  5407.   /* If an auto-display called a function and that got a signal,
  5408.      delete that auto-display to avoid an infinite recursion.  */

  5409.   if (stopped_by_random_signal)
  5410.     disable_current_display ();

  5411.   /* Notify observers if we finished a "step"-like command, etc.  */
  5412.   if (target_has_execution
  5413.       && last.kind != TARGET_WAITKIND_SIGNALLED
  5414.       && last.kind != TARGET_WAITKIND_EXITED
  5415.       && inferior_thread ()->control.stop_step)
  5416.     {
  5417.       /* But not if in the middle of doing a "step n" operation for
  5418.          n > 1 */
  5419.       if (inferior_thread ()->step_multi)
  5420.         goto done;

  5421.       observer_notify_end_stepping_range ();
  5422.     }

  5423.   target_terminal_ours ();
  5424.   async_enable_stdin ();

  5425.   /* Set the current source location.  This will also happen if we
  5426.      display the frame below, but the current SAL will be incorrect
  5427.      during a user hook-stop function.  */
  5428.   if (has_stack_frames () && !stop_stack_dummy)
  5429.     set_current_sal_from_frame (get_current_frame ());

  5430.   /* Let the user/frontend see the threads as stopped, but do nothing
  5431.      if the thread was running an infcall.  We may be e.g., evaluating
  5432.      a breakpoint condition.  In that case, the thread had state
  5433.      THREAD_RUNNING before the infcall, and shall remain set to
  5434.      running, all without informing the user/frontend about state
  5435.      transition changes.  If this is actually a call command, then the
  5436.      thread was originally already stopped, so there's no state to
  5437.      finish either.  */
  5438.   if (target_has_execution && inferior_thread ()->control.in_infcall)
  5439.     discard_cleanups (old_chain);
  5440.   else
  5441.     do_cleanups (old_chain);

  5442.   /* Look up the hook_stop and run it (CLI internally handles problem
  5443.      of stop_command's pre-hook not existing).  */
  5444.   if (stop_command)
  5445.     catch_errors (hook_stop_stub, stop_command,
  5446.                   "Error while running hook_stop:\n", RETURN_MASK_ALL);

  5447.   if (!has_stack_frames ())
  5448.     goto done;

  5449.   if (last.kind == TARGET_WAITKIND_SIGNALLED
  5450.       || last.kind == TARGET_WAITKIND_EXITED)
  5451.     goto done;

  5452.   /* Select innermost stack frame - i.e., current frame is frame 0,
  5453.      and current location is based on that.
  5454.      Don't do this on return from a stack dummy routine,
  5455.      or if the program has exited.  */

  5456.   if (!stop_stack_dummy)
  5457.     {
  5458.       select_frame (get_current_frame ());

  5459.       /* If --batch-silent is enabled then there's no need to print the current
  5460.          source location, and to try risks causing an error message about
  5461.          missing source files.  */
  5462.       if (stop_print_frame && !batch_silent)
  5463.         print_stop_event (&last);
  5464.     }

  5465.   /* Save the function value return registers, if we care.
  5466.      We might be about to restore their previous contents.  */
  5467.   if (inferior_thread ()->control.proceed_to_finish
  5468.       && execution_direction != EXEC_REVERSE)
  5469.     {
  5470.       /* This should not be necessary.  */
  5471.       if (stop_registers)
  5472.         regcache_xfree (stop_registers);

  5473.       /* NB: The copy goes through to the target picking up the value of
  5474.          all the registers.  */
  5475.       stop_registers = regcache_dup (get_current_regcache ());
  5476.     }

  5477.   if (stop_stack_dummy == STOP_STACK_DUMMY)
  5478.     {
  5479.       /* Pop the empty frame that contains the stack dummy.
  5480.          This also restores inferior state prior to the call
  5481.          (struct infcall_suspend_state).  */
  5482.       struct frame_info *frame = get_current_frame ();

  5483.       gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
  5484.       frame_pop (frame);
  5485.       /* frame_pop() calls reinit_frame_cache as the last thing it
  5486.          does which means there's currently no selected frame.  We
  5487.          don't need to re-establish a selected frame if the dummy call
  5488.          returns normally, that will be done by
  5489.          restore_infcall_control_state.  However, we do have to handle
  5490.          the case where the dummy call is returning after being
  5491.          stopped (e.g. the dummy call previously hit a breakpoint).
  5492.          We can't know which case we have so just always re-establish
  5493.          a selected frame here.  */
  5494.       select_frame (get_current_frame ());
  5495.     }

  5496. done:
  5497.   annotate_stopped ();

  5498.   /* Suppress the stop observer if we're in the middle of:

  5499.      - a step n (n > 1), as there still more steps to be done.

  5500.      - a "finish" command, as the observer will be called in
  5501.        finish_command_continuation, so it can include the inferior
  5502.        function's return value.

  5503.      - calling an inferior function, as we pretend we inferior didn't
  5504.        run at all.  The return value of the call is handled by the
  5505.        expression evaluator, through call_function_by_hand.  */

  5506.   if (!target_has_execution
  5507.       || last.kind == TARGET_WAITKIND_SIGNALLED
  5508.       || last.kind == TARGET_WAITKIND_EXITED
  5509.       || last.kind == TARGET_WAITKIND_NO_RESUMED
  5510.       || (!(inferior_thread ()->step_multi
  5511.             && inferior_thread ()->control.stop_step)
  5512.           && !(inferior_thread ()->control.stop_bpstat
  5513.                && inferior_thread ()->control.proceed_to_finish)
  5514.           && !inferior_thread ()->control.in_infcall))
  5515.     {
  5516.       if (!ptid_equal (inferior_ptid, null_ptid))
  5517.         observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
  5518.                                      stop_print_frame);
  5519.       else
  5520.         observer_notify_normal_stop (NULL, stop_print_frame);
  5521.     }

  5522.   if (target_has_execution)
  5523.     {
  5524.       if (last.kind != TARGET_WAITKIND_SIGNALLED
  5525.           && last.kind != TARGET_WAITKIND_EXITED)
  5526.         /* Delete the breakpoint we stopped at, if it wants to be deleted.
  5527.            Delete any breakpoint that is to be deleted at the next stop.  */
  5528.         breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
  5529.     }

  5530.   /* Try to get rid of automatically added inferiors that are no
  5531.      longer needed.  Keeping those around slows down things linearly.
  5532.      Note that this never removes the current inferior.  */
  5533.   prune_inferiors ();
  5534. }

  5535. static int
  5536. hook_stop_stub (void *cmd)
  5537. {
  5538.   execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
  5539.   return (0);
  5540. }

  5541. int
  5542. signal_stop_state (int signo)
  5543. {
  5544.   return signal_stop[signo];
  5545. }

  5546. int
  5547. signal_print_state (int signo)
  5548. {
  5549.   return signal_print[signo];
  5550. }

  5551. int
  5552. signal_pass_state (int signo)
  5553. {
  5554.   return signal_program[signo];
  5555. }

  5556. static void
  5557. signal_cache_update (int signo)
  5558. {
  5559.   if (signo == -1)
  5560.     {
  5561.       for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
  5562.         signal_cache_update (signo);

  5563.       return;
  5564.     }

  5565.   signal_pass[signo] = (signal_stop[signo] == 0
  5566.                         && signal_print[signo] == 0
  5567.                         && signal_program[signo] == 1
  5568.                         && signal_catch[signo] == 0);
  5569. }

  5570. int
  5571. signal_stop_update (int signo, int state)
  5572. {
  5573.   int ret = signal_stop[signo];

  5574.   signal_stop[signo] = state;
  5575.   signal_cache_update (signo);
  5576.   return ret;
  5577. }

  5578. int
  5579. signal_print_update (int signo, int state)
  5580. {
  5581.   int ret = signal_print[signo];

  5582.   signal_print[signo] = state;
  5583.   signal_cache_update (signo);
  5584.   return ret;
  5585. }

  5586. int
  5587. signal_pass_update (int signo, int state)
  5588. {
  5589.   int ret = signal_program[signo];

  5590.   signal_program[signo] = state;
  5591.   signal_cache_update (signo);
  5592.   return ret;
  5593. }

  5594. /* Update the global 'signal_catch' from INFO and notify the
  5595.    target.  */

  5596. void
  5597. signal_catch_update (const unsigned int *info)
  5598. {
  5599.   int i;

  5600.   for (i = 0; i < GDB_SIGNAL_LAST; ++i)
  5601.     signal_catch[i] = info[i] > 0;
  5602.   signal_cache_update (-1);
  5603.   target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
  5604. }

  5605. static void
  5606. sig_print_header (void)
  5607. {
  5608.   printf_filtered (_("Signal        Stop\tPrint\tPass "
  5609.                      "to program\tDescription\n"));
  5610. }

  5611. static void
  5612. sig_print_info (enum gdb_signal oursig)
  5613. {
  5614.   const char *name = gdb_signal_to_name (oursig);
  5615.   int name_padding = 13 - strlen (name);

  5616.   if (name_padding <= 0)
  5617.     name_padding = 0;

  5618.   printf_filtered ("%s", name);
  5619.   printf_filtered ("%*.*s ", name_padding, name_padding, "                 ");
  5620.   printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
  5621.   printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
  5622.   printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
  5623.   printf_filtered ("%s\n", gdb_signal_to_string (oursig));
  5624. }

  5625. /* Specify how various signals in the inferior should be handled.  */

  5626. static void
  5627. handle_command (char *args, int from_tty)
  5628. {
  5629.   char **argv;
  5630.   int digits, wordlen;
  5631.   int sigfirst, signum, siglast;
  5632.   enum gdb_signal oursig;
  5633.   int allsigs;
  5634.   int nsigs;
  5635.   unsigned char *sigs;
  5636.   struct cleanup *old_chain;

  5637.   if (args == NULL)
  5638.     {
  5639.       error_no_arg (_("signal to handle"));
  5640.     }

  5641.   /* Allocate and zero an array of flags for which signals to handle.  */

  5642.   nsigs = (int) GDB_SIGNAL_LAST;
  5643.   sigs = (unsigned char *) alloca (nsigs);
  5644.   memset (sigs, 0, nsigs);

  5645.   /* Break the command line up into args.  */

  5646.   argv = gdb_buildargv (args);
  5647.   old_chain = make_cleanup_freeargv (argv);

  5648.   /* Walk through the args, looking for signal oursigs, signal names, and
  5649.      actions.  Signal numbers and signal names may be interspersed with
  5650.      actions, with the actions being performed for all signals cumulatively
  5651.      specified.  Signal ranges can be specified as <LOW>-<HIGH>.  */

  5652.   while (*argv != NULL)
  5653.     {
  5654.       wordlen = strlen (*argv);
  5655.       for (digits = 0; isdigit ((*argv)[digits]); digits++)
  5656.         {;
  5657.         }
  5658.       allsigs = 0;
  5659.       sigfirst = siglast = -1;

  5660.       if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
  5661.         {
  5662.           /* Apply action to all signals except those used by the
  5663.              debugger.  Silently skip those.  */
  5664.           allsigs = 1;
  5665.           sigfirst = 0;
  5666.           siglast = nsigs - 1;
  5667.         }
  5668.       else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
  5669.         {
  5670.           SET_SIGS (nsigs, sigs, signal_stop);
  5671.           SET_SIGS (nsigs, sigs, signal_print);
  5672.         }
  5673.       else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
  5674.         {
  5675.           UNSET_SIGS (nsigs, sigs, signal_program);
  5676.         }
  5677.       else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
  5678.         {
  5679.           SET_SIGS (nsigs, sigs, signal_print);
  5680.         }
  5681.       else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
  5682.         {
  5683.           SET_SIGS (nsigs, sigs, signal_program);
  5684.         }
  5685.       else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
  5686.         {
  5687.           UNSET_SIGS (nsigs, sigs, signal_stop);
  5688.         }
  5689.       else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
  5690.         {
  5691.           SET_SIGS (nsigs, sigs, signal_program);
  5692.         }
  5693.       else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
  5694.         {
  5695.           UNSET_SIGS (nsigs, sigs, signal_print);
  5696.           UNSET_SIGS (nsigs, sigs, signal_stop);
  5697.         }
  5698.       else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
  5699.         {
  5700.           UNSET_SIGS (nsigs, sigs, signal_program);
  5701.         }
  5702.       else if (digits > 0)
  5703.         {
  5704.           /* It is numeric.  The numeric signal refers to our own
  5705.              internal signal numbering from target.h, not to host/target
  5706.              signal  number.  This is a feature; users really should be
  5707.              using symbolic names anyway, and the common ones like
  5708.              SIGHUP, SIGINT, SIGALRM, etc. will work right anyway.  */

  5709.           sigfirst = siglast = (int)
  5710.             gdb_signal_from_command (atoi (*argv));
  5711.           if ((*argv)[digits] == '-')
  5712.             {
  5713.               siglast = (int)
  5714.                 gdb_signal_from_command (atoi ((*argv) + digits + 1));
  5715.             }
  5716.           if (sigfirst > siglast)
  5717.             {
  5718.               /* Bet he didn't figure we'd think of this case...  */
  5719.               signum = sigfirst;
  5720.               sigfirst = siglast;
  5721.               siglast = signum;
  5722.             }
  5723.         }
  5724.       else
  5725.         {
  5726.           oursig = gdb_signal_from_name (*argv);
  5727.           if (oursig != GDB_SIGNAL_UNKNOWN)
  5728.             {
  5729.               sigfirst = siglast = (int) oursig;
  5730.             }
  5731.           else
  5732.             {
  5733.               /* Not a number and not a recognized flag word => complain.  */
  5734.               error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
  5735.             }
  5736.         }

  5737.       /* If any signal numbers or symbol names were found, set flags for
  5738.          which signals to apply actions to.  */

  5739.       for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
  5740.         {
  5741.           switch ((enum gdb_signal) signum)
  5742.             {
  5743.             case GDB_SIGNAL_TRAP:
  5744.             case GDB_SIGNAL_INT:
  5745.               if (!allsigs && !sigs[signum])
  5746.                 {
  5747.                   if (query (_("%s is used by the debugger.\n\
  5748. Are you sure you want to change it? "),
  5749.                              gdb_signal_to_name ((enum gdb_signal) signum)))
  5750.                     {
  5751.                       sigs[signum] = 1;
  5752.                     }
  5753.                   else
  5754.                     {
  5755.                       printf_unfiltered (_("Not confirmed, unchanged.\n"));
  5756.                       gdb_flush (gdb_stdout);
  5757.                     }
  5758.                 }
  5759.               break;
  5760.             case GDB_SIGNAL_0:
  5761.             case GDB_SIGNAL_DEFAULT:
  5762.             case GDB_SIGNAL_UNKNOWN:
  5763.               /* Make sure that "all" doesn't print these.  */
  5764.               break;
  5765.             default:
  5766.               sigs[signum] = 1;
  5767.               break;
  5768.             }
  5769.         }

  5770.       argv++;
  5771.     }

  5772.   for (signum = 0; signum < nsigs; signum++)
  5773.     if (sigs[signum])
  5774.       {
  5775.         signal_cache_update (-1);
  5776.         target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
  5777.         target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);

  5778.         if (from_tty)
  5779.           {
  5780.             /* Show the results.  */
  5781.             sig_print_header ();
  5782.             for (; signum < nsigs; signum++)
  5783.               if (sigs[signum])
  5784.                 sig_print_info (signum);
  5785.           }

  5786.         break;
  5787.       }

  5788.   do_cleanups (old_chain);
  5789. }

  5790. /* Complete the "handle" command.  */

  5791. static VEC (char_ptr) *
  5792. handle_completer (struct cmd_list_element *ignore,
  5793.                   const char *text, const char *word)
  5794. {
  5795.   VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
  5796.   static const char * const keywords[] =
  5797.     {
  5798.       "all",
  5799.       "stop",
  5800.       "ignore",
  5801.       "print",
  5802.       "pass",
  5803.       "nostop",
  5804.       "noignore",
  5805.       "noprint",
  5806.       "nopass",
  5807.       NULL,
  5808.     };

  5809.   vec_signals = signal_completer (ignore, text, word);
  5810.   vec_keywords = complete_on_enum (keywords, word, word);

  5811.   return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
  5812.   VEC_free (char_ptr, vec_signals);
  5813.   VEC_free (char_ptr, vec_keywords);
  5814.   return return_val;
  5815. }

  5816. static void
  5817. xdb_handle_command (char *args, int from_tty)
  5818. {
  5819.   char **argv;
  5820.   struct cleanup *old_chain;

  5821.   if (args == NULL)
  5822.     error_no_arg (_("xdb command"));

  5823.   /* Break the command line up into args.  */

  5824.   argv = gdb_buildargv (args);
  5825.   old_chain = make_cleanup_freeargv (argv);
  5826.   if (argv[1] != (char *) NULL)
  5827.     {
  5828.       char *argBuf;
  5829.       int bufLen;

  5830.       bufLen = strlen (argv[0]) + 20;
  5831.       argBuf = (char *) xmalloc (bufLen);
  5832.       if (argBuf)
  5833.         {
  5834.           int validFlag = 1;
  5835.           enum gdb_signal oursig;

  5836.           oursig = gdb_signal_from_name (argv[0]);
  5837.           memset (argBuf, 0, bufLen);
  5838.           if (strcmp (argv[1], "Q") == 0)
  5839.             sprintf (argBuf, "%s %s", argv[0], "noprint");
  5840.           else
  5841.             {
  5842.               if (strcmp (argv[1], "s") == 0)
  5843.                 {
  5844.                   if (!signal_stop[oursig])
  5845.                     sprintf (argBuf, "%s %s", argv[0], "stop");
  5846.                   else
  5847.                     sprintf (argBuf, "%s %s", argv[0], "nostop");
  5848.                 }
  5849.               else if (strcmp (argv[1], "i") == 0)
  5850.                 {
  5851.                   if (!signal_program[oursig])
  5852.                     sprintf (argBuf, "%s %s", argv[0], "pass");
  5853.                   else
  5854.                     sprintf (argBuf, "%s %s", argv[0], "nopass");
  5855.                 }
  5856.               else if (strcmp (argv[1], "r") == 0)
  5857.                 {
  5858.                   if (!signal_print[oursig])
  5859.                     sprintf (argBuf, "%s %s", argv[0], "print");
  5860.                   else
  5861.                     sprintf (argBuf, "%s %s", argv[0], "noprint");
  5862.                 }
  5863.               else
  5864.                 validFlag = 0;
  5865.             }
  5866.           if (validFlag)
  5867.             handle_command (argBuf, from_tty);
  5868.           else
  5869.             printf_filtered (_("Invalid signal handling flag.\n"));
  5870.           if (argBuf)
  5871.             xfree (argBuf);
  5872.         }
  5873.     }
  5874.   do_cleanups (old_chain);
  5875. }

  5876. enum gdb_signal
  5877. gdb_signal_from_command (int num)
  5878. {
  5879.   if (num >= 1 && num <= 15)
  5880.     return (enum gdb_signal) num;
  5881.   error (_("Only signals 1-15 are valid as numeric signals.\n\
  5882. Use \"info signals\" for a list of symbolic signals."));
  5883. }

  5884. /* Print current contents of the tables set by the handle command.
  5885.    It is possible we should just be printing signals actually used
  5886.    by the current target (but for things to work right when switching
  5887.    targets, all signals should be in the signal tables).  */

  5888. static void
  5889. signals_info (char *signum_exp, int from_tty)
  5890. {
  5891.   enum gdb_signal oursig;

  5892.   sig_print_header ();

  5893.   if (signum_exp)
  5894.     {
  5895.       /* First see if this is a symbol name.  */
  5896.       oursig = gdb_signal_from_name (signum_exp);
  5897.       if (oursig == GDB_SIGNAL_UNKNOWN)
  5898.         {
  5899.           /* No, try numeric.  */
  5900.           oursig =
  5901.             gdb_signal_from_command (parse_and_eval_long (signum_exp));
  5902.         }
  5903.       sig_print_info (oursig);
  5904.       return;
  5905.     }

  5906.   printf_filtered ("\n");
  5907.   /* These ugly casts brought to you by the native VAX compiler.  */
  5908.   for (oursig = GDB_SIGNAL_FIRST;
  5909.        (int) oursig < (int) GDB_SIGNAL_LAST;
  5910.        oursig = (enum gdb_signal) ((int) oursig + 1))
  5911.     {
  5912.       QUIT;

  5913.       if (oursig != GDB_SIGNAL_UNKNOWN
  5914.           && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
  5915.         sig_print_info (oursig);
  5916.     }

  5917.   printf_filtered (_("\nUse the \"handle\" command "
  5918.                      "to change these tables.\n"));
  5919. }

  5920. /* Check if it makes sense to read $_siginfo from the current thread
  5921.    at this point.  If not, throw an error.  */

  5922. static void
  5923. validate_siginfo_access (void)
  5924. {
  5925.   /* No current inferior, no siginfo.  */
  5926.   if (ptid_equal (inferior_ptid, null_ptid))
  5927.     error (_("No thread selected."));

  5928.   /* Don't try to read from a dead thread.  */
  5929.   if (is_exited (inferior_ptid))
  5930.     error (_("The current thread has terminated"));

  5931.   /* ... or from a spinning thread.  */
  5932.   if (is_running (inferior_ptid))
  5933.     error (_("Selected thread is running."));
  5934. }

  5935. /* The $_siginfo convenience variable is a bit special.  We don't know
  5936.    for sure the type of the value until we actually have a chance to
  5937.    fetch the data.  The type can change depending on gdbarch, so it is
  5938.    also dependent on which thread you have selected.

  5939.      1. making $_siginfo be an internalvar that creates a new value on
  5940.      access.

  5941.      2. making the value of $_siginfo be an lval_computed value.  */

  5942. /* This function implements the lval_computed support for reading a
  5943.    $_siginfo value.  */

  5944. static void
  5945. siginfo_value_read (struct value *v)
  5946. {
  5947.   LONGEST transferred;

  5948.   validate_siginfo_access ();

  5949.   transferred =
  5950.     target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
  5951.                  NULL,
  5952.                  value_contents_all_raw (v),
  5953.                  value_offset (v),
  5954.                  TYPE_LENGTH (value_type (v)));

  5955.   if (transferred != TYPE_LENGTH (value_type (v)))
  5956.     error (_("Unable to read siginfo"));
  5957. }

  5958. /* This function implements the lval_computed support for writing a
  5959.    $_siginfo value.  */

  5960. static void
  5961. siginfo_value_write (struct value *v, struct value *fromval)
  5962. {
  5963.   LONGEST transferred;

  5964.   validate_siginfo_access ();

  5965.   transferred = target_write (&current_target,
  5966.                               TARGET_OBJECT_SIGNAL_INFO,
  5967.                               NULL,
  5968.                               value_contents_all_raw (fromval),
  5969.                               value_offset (v),
  5970.                               TYPE_LENGTH (value_type (fromval)));

  5971.   if (transferred != TYPE_LENGTH (value_type (fromval)))
  5972.     error (_("Unable to write siginfo"));
  5973. }

  5974. static const struct lval_funcs siginfo_value_funcs =
  5975.   {
  5976.     siginfo_value_read,
  5977.     siginfo_value_write
  5978.   };

  5979. /* Return a new value with the correct type for the siginfo object of
  5980.    the current thread using architecture GDBARCH.  Return a void value
  5981.    if there's no object available.  */

  5982. static struct value *
  5983. siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
  5984.                     void *ignore)
  5985. {
  5986.   if (target_has_stack
  5987.       && !ptid_equal (inferior_ptid, null_ptid)
  5988.       && gdbarch_get_siginfo_type_p (gdbarch))
  5989.     {
  5990.       struct type *type = gdbarch_get_siginfo_type (gdbarch);

  5991.       return allocate_computed_value (type, &siginfo_value_funcs, NULL);
  5992.     }

  5993.   return allocate_value (builtin_type (gdbarch)->builtin_void);
  5994. }


  5995. /* infcall_suspend_state contains state about the program itself like its
  5996.    registers and any signal it received when it last stopped.
  5997.    This state must be restored regardless of how the inferior function call
  5998.    ends (either successfully, or after it hits a breakpoint or signal)
  5999.    if the program is to properly continue where it left off.  */

  6000. struct infcall_suspend_state
  6001. {
  6002.   struct thread_suspend_state thread_suspend;
  6003. #if 0 /* Currently unused and empty structures are not valid C.  */
  6004.   struct inferior_suspend_state inferior_suspend;
  6005. #endif

  6006.   /* Other fields:  */
  6007.   CORE_ADDR stop_pc;
  6008.   struct regcache *registers;

  6009.   /* Format of SIGINFO_DATA or NULL if it is not present.  */
  6010.   struct gdbarch *siginfo_gdbarch;

  6011.   /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
  6012.      TYPE_LENGTH (gdbarch_get_siginfo_type ()).  For different gdbarch the
  6013.      content would be invalid.  */
  6014.   gdb_byte *siginfo_data;
  6015. };

  6016. struct infcall_suspend_state *
  6017. save_infcall_suspend_state (void)
  6018. {
  6019.   struct infcall_suspend_state *inf_state;
  6020.   struct thread_info *tp = inferior_thread ();
  6021. #if 0
  6022.   struct inferior *inf = current_inferior ();
  6023. #endif
  6024.   struct regcache *regcache = get_current_regcache ();
  6025.   struct gdbarch *gdbarch = get_regcache_arch (regcache);
  6026.   gdb_byte *siginfo_data = NULL;

  6027.   if (gdbarch_get_siginfo_type_p (gdbarch))
  6028.     {
  6029.       struct type *type = gdbarch_get_siginfo_type (gdbarch);
  6030.       size_t len = TYPE_LENGTH (type);
  6031.       struct cleanup *back_to;

  6032.       siginfo_data = xmalloc (len);
  6033.       back_to = make_cleanup (xfree, siginfo_data);

  6034.       if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
  6035.                        siginfo_data, 0, len) == len)
  6036.         discard_cleanups (back_to);
  6037.       else
  6038.         {
  6039.           /* Errors ignored.  */
  6040.           do_cleanups (back_to);
  6041.           siginfo_data = NULL;
  6042.         }
  6043.     }

  6044.   inf_state = XCNEW (struct infcall_suspend_state);

  6045.   if (siginfo_data)
  6046.     {
  6047.       inf_state->siginfo_gdbarch = gdbarch;
  6048.       inf_state->siginfo_data = siginfo_data;
  6049.     }

  6050.   inf_state->thread_suspend = tp->suspend;
  6051. #if 0 /* Currently unused and empty structures are not valid C.  */
  6052.   inf_state->inferior_suspend = inf->suspend;
  6053. #endif

  6054.   /* run_inferior_call will not use the signal due to its `proceed' call with
  6055.      GDB_SIGNAL_0 anyway.  */
  6056.   tp->suspend.stop_signal = GDB_SIGNAL_0;

  6057.   inf_state->stop_pc = stop_pc;

  6058.   inf_state->registers = regcache_dup (regcache);

  6059.   return inf_state;
  6060. }

  6061. /* Restore inferior session state to INF_STATE.  */

  6062. void
  6063. restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
  6064. {
  6065.   struct thread_info *tp = inferior_thread ();
  6066. #if 0
  6067.   struct inferior *inf = current_inferior ();
  6068. #endif
  6069.   struct regcache *regcache = get_current_regcache ();
  6070.   struct gdbarch *gdbarch = get_regcache_arch (regcache);

  6071.   tp->suspend = inf_state->thread_suspend;
  6072. #if 0 /* Currently unused and empty structures are not valid C.  */
  6073.   inf->suspend = inf_state->inferior_suspend;
  6074. #endif

  6075.   stop_pc = inf_state->stop_pc;

  6076.   if (inf_state->siginfo_gdbarch == gdbarch)
  6077.     {
  6078.       struct type *type = gdbarch_get_siginfo_type (gdbarch);

  6079.       /* Errors ignored.  */
  6080.       target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
  6081.                     inf_state->siginfo_data, 0, TYPE_LENGTH (type));
  6082.     }

  6083.   /* The inferior can be gone if the user types "print exit(0)"
  6084.      (and perhaps other times).  */
  6085.   if (target_has_execution)
  6086.     /* NB: The register write goes through to the target.  */
  6087.     regcache_cpy (regcache, inf_state->registers);

  6088.   discard_infcall_suspend_state (inf_state);
  6089. }

  6090. static void
  6091. do_restore_infcall_suspend_state_cleanup (void *state)
  6092. {
  6093.   restore_infcall_suspend_state (state);
  6094. }

  6095. struct cleanup *
  6096. make_cleanup_restore_infcall_suspend_state
  6097.   (struct infcall_suspend_state *inf_state)
  6098. {
  6099.   return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
  6100. }

  6101. void
  6102. discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
  6103. {
  6104.   regcache_xfree (inf_state->registers);
  6105.   xfree (inf_state->siginfo_data);
  6106.   xfree (inf_state);
  6107. }

  6108. struct regcache *
  6109. get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
  6110. {
  6111.   return inf_state->registers;
  6112. }

  6113. /* infcall_control_state contains state regarding gdb's control of the
  6114.    inferior itself like stepping control.  It also contains session state like
  6115.    the user's currently selected frame.  */

  6116. struct infcall_control_state
  6117. {
  6118.   struct thread_control_state thread_control;
  6119.   struct inferior_control_state inferior_control;

  6120.   /* Other fields:  */
  6121.   enum stop_stack_kind stop_stack_dummy;
  6122.   int stopped_by_random_signal;
  6123.   int stop_after_trap;

  6124.   /* ID if the selected frame when the inferior function call was made.  */
  6125.   struct frame_id selected_frame_id;
  6126. };

  6127. /* Save all of the information associated with the inferior<==>gdb
  6128.    connection.  */

  6129. struct infcall_control_state *
  6130. save_infcall_control_state (void)
  6131. {
  6132.   struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
  6133.   struct thread_info *tp = inferior_thread ();
  6134.   struct inferior *inf = current_inferior ();

  6135.   inf_status->thread_control = tp->control;
  6136.   inf_status->inferior_control = inf->control;

  6137.   tp->control.step_resume_breakpoint = NULL;
  6138.   tp->control.exception_resume_breakpoint = NULL;

  6139.   /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
  6140.      chain.  If caller's caller is walking the chain, they'll be happier if we
  6141.      hand them back the original chain when restore_infcall_control_state is
  6142.      called.  */
  6143.   tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);

  6144.   /* Other fields:  */
  6145.   inf_status->stop_stack_dummy = stop_stack_dummy;
  6146.   inf_status->stopped_by_random_signal = stopped_by_random_signal;
  6147.   inf_status->stop_after_trap = stop_after_trap;

  6148.   inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));

  6149.   return inf_status;
  6150. }

  6151. static int
  6152. restore_selected_frame (void *args)
  6153. {
  6154.   struct frame_id *fid = (struct frame_id *) args;
  6155.   struct frame_info *frame;

  6156.   frame = frame_find_by_id (*fid);

  6157.   /* If inf_status->selected_frame_id is NULL, there was no previously
  6158.      selected frame.  */
  6159.   if (frame == NULL)
  6160.     {
  6161.       warning (_("Unable to restore previously selected frame."));
  6162.       return 0;
  6163.     }

  6164.   select_frame (frame);

  6165.   return (1);
  6166. }

  6167. /* Restore inferior session state to INF_STATUS.  */

  6168. void
  6169. restore_infcall_control_state (struct infcall_control_state *inf_status)
  6170. {
  6171.   struct thread_info *tp = inferior_thread ();
  6172.   struct inferior *inf = current_inferior ();

  6173.   if (tp->control.step_resume_breakpoint)
  6174.     tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;

  6175.   if (tp->control.exception_resume_breakpoint)
  6176.     tp->control.exception_resume_breakpoint->disposition
  6177.       = disp_del_at_next_stop;

  6178.   /* Handle the bpstat_copy of the chain.  */
  6179.   bpstat_clear (&tp->control.stop_bpstat);

  6180.   tp->control = inf_status->thread_control;
  6181.   inf->control = inf_status->inferior_control;

  6182.   /* Other fields:  */
  6183.   stop_stack_dummy = inf_status->stop_stack_dummy;
  6184.   stopped_by_random_signal = inf_status->stopped_by_random_signal;
  6185.   stop_after_trap = inf_status->stop_after_trap;

  6186.   if (target_has_stack)
  6187.     {
  6188.       /* The point of catch_errors is that if the stack is clobbered,
  6189.          walking the stack might encounter a garbage pointer and
  6190.          error() trying to dereference it.  */
  6191.       if (catch_errors
  6192.           (restore_selected_frame, &inf_status->selected_frame_id,
  6193.            "Unable to restore previously selected frame:\n",
  6194.            RETURN_MASK_ERROR) == 0)
  6195.         /* Error in restoring the selected frame.  Select the innermost
  6196.            frame.  */
  6197.         select_frame (get_current_frame ());
  6198.     }

  6199.   xfree (inf_status);
  6200. }

  6201. static void
  6202. do_restore_infcall_control_state_cleanup (void *sts)
  6203. {
  6204.   restore_infcall_control_state (sts);
  6205. }

  6206. struct cleanup *
  6207. make_cleanup_restore_infcall_control_state
  6208.   (struct infcall_control_state *inf_status)
  6209. {
  6210.   return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
  6211. }

  6212. void
  6213. discard_infcall_control_state (struct infcall_control_state *inf_status)
  6214. {
  6215.   if (inf_status->thread_control.step_resume_breakpoint)
  6216.     inf_status->thread_control.step_resume_breakpoint->disposition
  6217.       = disp_del_at_next_stop;

  6218.   if (inf_status->thread_control.exception_resume_breakpoint)
  6219.     inf_status->thread_control.exception_resume_breakpoint->disposition
  6220.       = disp_del_at_next_stop;

  6221.   /* See save_infcall_control_state for info on stop_bpstat.  */
  6222.   bpstat_clear (&inf_status->thread_control.stop_bpstat);

  6223.   xfree (inf_status);
  6224. }

  6225. /* restore_inferior_ptid() will be used by the cleanup machinery
  6226.    to restore the inferior_ptid value saved in a call to
  6227.    save_inferior_ptid().  */

  6228. static void
  6229. restore_inferior_ptid (void *arg)
  6230. {
  6231.   ptid_t *saved_ptid_ptr = arg;

  6232.   inferior_ptid = *saved_ptid_ptr;
  6233.   xfree (arg);
  6234. }

  6235. /* Save the value of inferior_ptid so that it may be restored by a
  6236.    later call to do_cleanups().  Returns the struct cleanup pointer
  6237.    needed for later doing the cleanup.  */

  6238. struct cleanup *
  6239. save_inferior_ptid (void)
  6240. {
  6241.   ptid_t *saved_ptid_ptr;

  6242.   saved_ptid_ptr = xmalloc (sizeof (ptid_t));
  6243.   *saved_ptid_ptr = inferior_ptid;
  6244.   return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
  6245. }

  6246. /* See infrun.h.  */

  6247. void
  6248. clear_exit_convenience_vars (void)
  6249. {
  6250.   clear_internalvar (lookup_internalvar ("_exitsignal"));
  6251.   clear_internalvar (lookup_internalvar ("_exitcode"));
  6252. }


  6253. /* User interface for reverse debugging:
  6254.    Set exec-direction / show exec-direction commands
  6255.    (returns error unless target implements to_set_exec_direction method).  */

  6256. int execution_direction = EXEC_FORWARD;
  6257. static const char exec_forward[] = "forward";
  6258. static const char exec_reverse[] = "reverse";
  6259. static const char *exec_direction = exec_forward;
  6260. static const char *const exec_direction_names[] = {
  6261.   exec_forward,
  6262.   exec_reverse,
  6263.   NULL
  6264. };

  6265. static void
  6266. set_exec_direction_func (char *args, int from_tty,
  6267.                          struct cmd_list_element *cmd)
  6268. {
  6269.   if (target_can_execute_reverse)
  6270.     {
  6271.       if (!strcmp (exec_direction, exec_forward))
  6272.         execution_direction = EXEC_FORWARD;
  6273.       else if (!strcmp (exec_direction, exec_reverse))
  6274.         execution_direction = EXEC_REVERSE;
  6275.     }
  6276.   else
  6277.     {
  6278.       exec_direction = exec_forward;
  6279.       error (_("Target does not support this operation."));
  6280.     }
  6281. }

  6282. static void
  6283. show_exec_direction_func (struct ui_file *out, int from_tty,
  6284.                           struct cmd_list_element *cmd, const char *value)
  6285. {
  6286.   switch (execution_direction) {
  6287.   case EXEC_FORWARD:
  6288.     fprintf_filtered (out, _("Forward.\n"));
  6289.     break;
  6290.   case EXEC_REVERSE:
  6291.     fprintf_filtered (out, _("Reverse.\n"));
  6292.     break;
  6293.   default:
  6294.     internal_error (__FILE__, __LINE__,
  6295.                     _("bogus execution_direction value: %d"),
  6296.                     (int) execution_direction);
  6297.   }
  6298. }

  6299. static void
  6300. show_schedule_multiple (struct ui_file *file, int from_tty,
  6301.                         struct cmd_list_element *c, const char *value)
  6302. {
  6303.   fprintf_filtered (file, _("Resuming the execution of threads "
  6304.                             "of all processes is %s.\n"), value);
  6305. }

  6306. /* Implementation of `siginfo' variable.  */

  6307. static const struct internalvar_funcs siginfo_funcs =
  6308. {
  6309.   siginfo_make_value,
  6310.   NULL,
  6311.   NULL
  6312. };

  6313. void
  6314. _initialize_infrun (void)
  6315. {
  6316.   int i;
  6317.   int numsigs;
  6318.   struct cmd_list_element *c;

  6319.   add_info ("signals", signals_info, _("\
  6320. What debugger does when program gets various signals.\n\
  6321. Specify a signal as argument to print info on that signal only."));
  6322.   add_info_alias ("handle", "signals", 0);

  6323.   c = add_com ("handle", class_run, handle_command, _("\
  6324. Specify how to handle signals.\n\
  6325. Usage: handle SIGNAL [ACTIONS]\n\
  6326. Args are signals and actions to apply to those signals.\n\
  6327. If no actions are specified, the current settings for the specified signals\n\
  6328. will be displayed instead.\n\
  6329. \n\
  6330. Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
  6331. from 1-15 are allowed for compatibility with old versions of GDB.\n\
  6332. Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
  6333. The special arg \"all\" is recognized to mean all signals except those\n\
  6334. used by the debugger, typically SIGTRAP and SIGINT.\n\
  6335. \n\
  6336. Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
  6337. \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
  6338. Stop means reenter debugger if this signal happens (implies print).\n\
  6339. Print means print a message if this signal happens.\n\
  6340. Pass means let program see this signal; otherwise program doesn't know.\n\
  6341. Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
  6342. Pass and Stop may be combined.\n\
  6343. \n\
  6344. Multiple signals may be specified.  Signal numbers and signal names\n\
  6345. may be interspersed with actions, with the actions being performed for\n\
  6346. all signals cumulatively specified."));
  6347.   set_cmd_completer (c, handle_completer);

  6348.   if (xdb_commands)
  6349.     {
  6350.       add_com ("lz", class_info, signals_info, _("\
  6351. What debugger does when program gets various signals.\n\
  6352. Specify a signal as argument to print info on that signal only."));
  6353.       add_com ("z", class_run, xdb_handle_command, _("\
  6354. Specify how to handle a signal.\n\
  6355. Args are signals and actions to apply to those signals.\n\
  6356. Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
  6357. from 1-15 are allowed for compatibility with old versions of GDB.\n\
  6358. Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
  6359. The special arg \"all\" is recognized to mean all signals except those\n\
  6360. used by the debugger, typically SIGTRAP and SIGINT.\n\
  6361. Recognized actions include \"s\" (toggles between stop and nostop),\n\
  6362. \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
  6363. nopass), \"Q\" (noprint)\n\
  6364. Stop means reenter debugger if this signal happens (implies print).\n\
  6365. Print means print a message if this signal happens.\n\
  6366. Pass means let program see this signal; otherwise program doesn't know.\n\
  6367. Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
  6368. Pass and Stop may be combined."));
  6369.     }

  6370.   if (!dbx_commands)
  6371.     stop_command = add_cmd ("stop", class_obscure,
  6372.                             not_just_help_class_command, _("\
  6373. There is no `stop' command, but you can set a hook on `stop'.\n\
  6374. This allows you to set a list of commands to be run each time execution\n\
  6375. of the program stops."), &cmdlist);

  6376.   add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
  6377. Set inferior debugging."), _("\
  6378. Show inferior debugging."), _("\
  6379. When non-zero, inferior specific debugging is enabled."),
  6380.                              NULL,
  6381.                              show_debug_infrun,
  6382.                              &setdebuglist, &showdebuglist);

  6383.   add_setshow_boolean_cmd ("displaced", class_maintenance,
  6384.                            &debug_displaced, _("\
  6385. Set displaced stepping debugging."), _("\
  6386. Show displaced stepping debugging."), _("\
  6387. When non-zero, displaced stepping specific debugging is enabled."),
  6388.                             NULL,
  6389.                             show_debug_displaced,
  6390.                             &setdebuglist, &showdebuglist);

  6391.   add_setshow_boolean_cmd ("non-stop", no_class,
  6392.                            &non_stop_1, _("\
  6393. Set whether gdb controls the inferior in non-stop mode."), _("\
  6394. Show whether gdb controls the inferior in non-stop mode."), _("\
  6395. When debugging a multi-threaded program and this setting is\n\
  6396. off (the default, also called all-stop mode), when one thread stops\n\
  6397. (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
  6398. all other threads in the program while you interact with the thread of\n\
  6399. interest.  When you continue or step a thread, you can allow the other\n\
  6400. threads to run, or have them remain stopped, but while you inspect any\n\
  6401. thread's state, all threads stop.\n\
  6402. \n\
  6403. In non-stop mode, when one thread stops, other threads can continue\n\
  6404. to run freely.  You'll be able to step each thread independently,\n\
  6405. leave it stopped or free to run as needed."),
  6406.                            set_non_stop,
  6407.                            show_non_stop,
  6408.                            &setlist,
  6409.                            &showlist);

  6410.   numsigs = (int) GDB_SIGNAL_LAST;
  6411.   signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
  6412.   signal_print = (unsigned char *)
  6413.     xmalloc (sizeof (signal_print[0]) * numsigs);
  6414.   signal_program = (unsigned char *)
  6415.     xmalloc (sizeof (signal_program[0]) * numsigs);
  6416.   signal_catch = (unsigned char *)
  6417.     xmalloc (sizeof (signal_catch[0]) * numsigs);
  6418.   signal_pass = (unsigned char *)
  6419.     xmalloc (sizeof (signal_pass[0]) * numsigs);
  6420.   for (i = 0; i < numsigs; i++)
  6421.     {
  6422.       signal_stop[i] = 1;
  6423.       signal_print[i] = 1;
  6424.       signal_program[i] = 1;
  6425.       signal_catch[i] = 0;
  6426.     }

  6427.   /* Signals caused by debugger's own actions
  6428.      should not be given to the program afterwards.  */
  6429.   signal_program[GDB_SIGNAL_TRAP] = 0;
  6430.   signal_program[GDB_SIGNAL_INT] = 0;

  6431.   /* Signals that are not errors should not normally enter the debugger.  */
  6432.   signal_stop[GDB_SIGNAL_ALRM] = 0;
  6433.   signal_print[GDB_SIGNAL_ALRM] = 0;
  6434.   signal_stop[GDB_SIGNAL_VTALRM] = 0;
  6435.   signal_print[GDB_SIGNAL_VTALRM] = 0;
  6436.   signal_stop[GDB_SIGNAL_PROF] = 0;
  6437.   signal_print[GDB_SIGNAL_PROF] = 0;
  6438.   signal_stop[GDB_SIGNAL_CHLD] = 0;
  6439.   signal_print[GDB_SIGNAL_CHLD] = 0;
  6440.   signal_stop[GDB_SIGNAL_IO] = 0;
  6441.   signal_print[GDB_SIGNAL_IO] = 0;
  6442.   signal_stop[GDB_SIGNAL_POLL] = 0;
  6443.   signal_print[GDB_SIGNAL_POLL] = 0;
  6444.   signal_stop[GDB_SIGNAL_URG] = 0;
  6445.   signal_print[GDB_SIGNAL_URG] = 0;
  6446.   signal_stop[GDB_SIGNAL_WINCH] = 0;
  6447.   signal_print[GDB_SIGNAL_WINCH] = 0;
  6448.   signal_stop[GDB_SIGNAL_PRIO] = 0;
  6449.   signal_print[GDB_SIGNAL_PRIO] = 0;

  6450.   /* These signals are used internally by user-level thread
  6451.      implementations.  (See signal(5) on Solaris.)  Like the above
  6452.      signals, a healthy program receives and handles them as part of
  6453.      its normal operation.  */
  6454.   signal_stop[GDB_SIGNAL_LWP] = 0;
  6455.   signal_print[GDB_SIGNAL_LWP] = 0;
  6456.   signal_stop[GDB_SIGNAL_WAITING] = 0;
  6457.   signal_print[GDB_SIGNAL_WAITING] = 0;
  6458.   signal_stop[GDB_SIGNAL_CANCEL] = 0;
  6459.   signal_print[GDB_SIGNAL_CANCEL] = 0;

  6460.   /* Update cached state.  */
  6461.   signal_cache_update (-1);

  6462.   add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
  6463.                             &stop_on_solib_events, _("\
  6464. Set stopping for shared library events."), _("\
  6465. Show stopping for shared library events."), _("\
  6466. If nonzero, gdb will give control to the user when the dynamic linker\n\
  6467. notifies gdb of shared library events.  The most common event of interest\n\
  6468. to the user would be loading/unloading of a new library."),
  6469.                             set_stop_on_solib_events,
  6470.                             show_stop_on_solib_events,
  6471.                             &setlist, &showlist);

  6472.   add_setshow_enum_cmd ("follow-fork-mode", class_run,
  6473.                         follow_fork_mode_kind_names,
  6474.                         &follow_fork_mode_string, _("\
  6475. Set debugger response to a program call of fork or vfork."), _("\
  6476. Show debugger response to a program call of fork or vfork."), _("\
  6477. A fork or vfork creates a new process.  follow-fork-mode can be:\n\
  6478.   parent  - the original process is debugged after a fork\n\
  6479.   child   - the new process is debugged after a fork\n\
  6480. The unfollowed process will continue to run.\n\
  6481. By default, the debugger will follow the parent process."),
  6482.                         NULL,
  6483.                         show_follow_fork_mode_string,
  6484.                         &setlist, &showlist);

  6485.   add_setshow_enum_cmd ("follow-exec-mode", class_run,
  6486.                         follow_exec_mode_names,
  6487.                         &follow_exec_mode_string, _("\
  6488. Set debugger response to a program call of exec."), _("\
  6489. Show debugger response to a program call of exec."), _("\
  6490. An exec call replaces the program image of a process.\n\
  6491. \n\
  6492. follow-exec-mode can be:\n\
  6493. \n\
  6494.   new - the debugger creates a new inferior and rebinds the process\n\
  6495. to this new inferior.  The program the process was running before\n\
  6496. the exec call can be restarted afterwards by restarting the original\n\
  6497. inferior.\n\
  6498. \n\
  6499.   same - the debugger keeps the process bound to the same inferior.\n\
  6500. The new executable image replaces the previous executable loaded in\n\
  6501. the inferior.  Restarting the inferior after the exec call restarts\n\
  6502. the executable the process was running after the exec call.\n\
  6503. \n\
  6504. By default, the debugger will use the same inferior."),
  6505.                         NULL,
  6506.                         show_follow_exec_mode_string,
  6507.                         &setlist, &showlist);

  6508.   add_setshow_enum_cmd ("scheduler-locking", class_run,
  6509.                         scheduler_enums, &scheduler_mode, _("\
  6510. Set mode for locking scheduler during execution."), _("\
  6511. Show mode for locking scheduler during execution."), _("\
  6512. off  == no locking (threads may preempt at any time)\n\
  6513. on   == full locking (no thread except the current thread may run)\n\
  6514. step == scheduler locked during every single-step operation.\n\
  6515.         In this mode, no other thread may run during a step command.\n\
  6516.         Other threads may run while stepping over a function call ('next')."),
  6517.                         set_schedlock_func,        /* traps on target vector */
  6518.                         show_scheduler_mode,
  6519.                         &setlist, &showlist);

  6520.   add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
  6521. Set mode for resuming threads of all processes."), _("\
  6522. Show mode for resuming threads of all processes."), _("\
  6523. When on, execution commands (such as 'continue' or 'next') resume all\n\
  6524. threads of all processes.  When off (which is the default), execution\n\
  6525. commands only resume the threads of the current process.  The set of\n\
  6526. threads that are resumed is further refined by the scheduler-locking\n\
  6527. mode (see help set scheduler-locking)."),
  6528.                            NULL,
  6529.                            show_schedule_multiple,
  6530.                            &setlist, &showlist);

  6531.   add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
  6532. Set mode of the step operation."), _("\
  6533. Show mode of the step operation."), _("\
  6534. When set, doing a step over a function without debug line information\n\
  6535. will stop at the first instruction of that function. Otherwise, the\n\
  6536. function is skipped and the step command stops at a different source line."),
  6537.                            NULL,
  6538.                            show_step_stop_if_no_debug,
  6539.                            &setlist, &showlist);

  6540.   add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
  6541.                                 &can_use_displaced_stepping, _("\
  6542. Set debugger's willingness to use displaced stepping."), _("\
  6543. Show debugger's willingness to use displaced stepping."), _("\
  6544. If on, gdb will use displaced stepping to step over breakpoints if it is\n\
  6545. supported by the target architecture.  If off, gdb will not use displaced\n\
  6546. stepping to step over breakpoints, even if such is supported by the target\n\
  6547. architecture.  If auto (which is the default), gdb will use displaced stepping\n\
  6548. if the target architecture supports it and non-stop mode is active, but will not\n\
  6549. use it in all-stop mode (see help set non-stop)."),
  6550.                                 NULL,
  6551.                                 show_can_use_displaced_stepping,
  6552.                                 &setlist, &showlist);

  6553.   add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
  6554.                         &exec_direction, _("Set direction of execution.\n\
  6555. Options are 'forward' or 'reverse'."),
  6556.                         _("Show direction of execution (forward/reverse)."),
  6557.                         _("Tells gdb whether to execute forward or backward."),
  6558.                         set_exec_direction_func, show_exec_direction_func,
  6559.                         &setlist, &showlist);

  6560.   /* Set/show detach-on-fork: user-settable mode.  */

  6561.   add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
  6562. Set whether gdb will detach the child of a fork."), _("\
  6563. Show whether gdb will detach the child of a fork."), _("\
  6564. Tells gdb whether to detach the child of a fork."),
  6565.                            NULL, NULL, &setlist, &showlist);

  6566.   /* Set/show disable address space randomization mode.  */

  6567.   add_setshow_boolean_cmd ("disable-randomization", class_support,
  6568.                            &disable_randomization, _("\
  6569. Set disabling of debuggee's virtual address space randomization."), _("\
  6570. Show disabling of debuggee's virtual address space randomization."), _("\
  6571. When this mode is on (which is the default), randomization of the virtual\n\
  6572. address space is disabled.  Standalone programs run with the randomization\n\
  6573. enabled by default on some platforms."),
  6574.                            &set_disable_randomization,
  6575.                            &show_disable_randomization,
  6576.                            &setlist, &showlist);

  6577.   /* ptid initializations */
  6578.   inferior_ptid = null_ptid;
  6579.   target_last_wait_ptid = minus_one_ptid;

  6580.   observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
  6581.   observer_attach_thread_stop_requested (infrun_thread_stop_requested);
  6582.   observer_attach_thread_exit (infrun_thread_thread_exit);
  6583.   observer_attach_inferior_exit (infrun_inferior_exit);

  6584.   /* Explicitly create without lookup, since that tries to create a
  6585.      value with a void typed value, and when we get here, gdbarch
  6586.      isn't initialized yet.  At this point, we're quite sure there
  6587.      isn't another convenience variable of the same name.  */
  6588.   create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);

  6589.   add_setshow_boolean_cmd ("observer", no_class,
  6590.                            &observer_mode_1, _("\
  6591. Set whether gdb controls the inferior in observer mode."), _("\
  6592. Show whether gdb controls the inferior in observer mode."), _("\
  6593. In observer mode, GDB can get data from the inferior, but not\n\
  6594. affect its execution.  Registers and memory may not be changed,\n\
  6595. breakpoints may not be set, and the program cannot be interrupted\n\
  6596. or signalled."),
  6597.                            set_observer_mode,
  6598.                            show_observer_mode,
  6599.                            &setlist,
  6600.                            &showlist);
  6601. }