gdb/record-btrace.c - gdb

Global variables defined

Data types defined

Functions defined

Macros defined

Source code

  1. /* Branch trace support for GDB, the GNU debugger.

  2.    Copyright (C) 2013-2015 Free Software Foundation, Inc.

  3.    Contributed by Intel Corp. <markus.t.metzger@intel.com>

  4.    This file is part of GDB.

  5.    This program is free software; you can redistribute it and/or modify
  6.    it under the terms of the GNU General Public License as published by
  7.    the Free Software Foundation; either version 3 of the License, or
  8.    (at your option) any later version.

  9.    This program is distributed in the hope that it will be useful,
  10.    but WITHOUT ANY WARRANTY; without even the implied warranty of
  11.    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12.    GNU General Public License for more details.

  13.    You should have received a copy of the GNU General Public License
  14.    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */

  15. #include "defs.h"
  16. #include "record.h"
  17. #include "gdbthread.h"
  18. #include "target.h"
  19. #include "gdbcmd.h"
  20. #include "disasm.h"
  21. #include "observer.h"
  22. #include "cli/cli-utils.h"
  23. #include "source.h"
  24. #include "ui-out.h"
  25. #include "symtab.h"
  26. #include "filenames.h"
  27. #include "regcache.h"
  28. #include "frame-unwind.h"
  29. #include "hashtab.h"
  30. #include "infrun.h"
  31. #include "event-loop.h"
  32. #include "inf-loop.h"

  33. /* The target_ops of record-btrace.  */
  34. static struct target_ops record_btrace_ops;

  35. /* A new thread observer enabling branch tracing for the new thread.  */
  36. static struct observer *record_btrace_thread_observer;

  37. /* Memory access types used in set/show record btrace replay-memory-access.  */
  38. static const char replay_memory_access_read_only[] = "read-only";
  39. static const char replay_memory_access_read_write[] = "read-write";
  40. static const char *const replay_memory_access_types[] =
  41. {
  42.   replay_memory_access_read_only,
  43.   replay_memory_access_read_write,
  44.   NULL
  45. };

  46. /* The currently allowed replay memory access type.  */
  47. static const char *replay_memory_access = replay_memory_access_read_only;

  48. /* Command lists for "set/show record btrace".  */
  49. static struct cmd_list_element *set_record_btrace_cmdlist;
  50. static struct cmd_list_element *show_record_btrace_cmdlist;

  51. /* The execution direction of the last resume we got.  See record-full.c.  */
  52. static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;

  53. /* The async event handler for reverse/replay execution.  */
  54. static struct async_event_handler *record_btrace_async_inferior_event_handler;

  55. /* A flag indicating that we are currently generating a core file.  */
  56. static int record_btrace_generating_corefile;

  57. /* Print a record-btrace debug message.  Use do ... while (0) to avoid
  58.    ambiguities when used in if statements.  */

  59. #define DEBUG(msg, args...)                                                \
  60.   do                                                                        \
  61.     {                                                                        \
  62.       if (record_debug != 0)                                                \
  63.         fprintf_unfiltered (gdb_stdlog,                                        \
  64.                             "[record-btrace] " msg "\n", ##args);        \
  65.     }                                                                        \
  66.   while (0)


  67. /* Update the branch trace for the current thread and return a pointer to its
  68.    thread_info.

  69.    Throws an error if there is no thread or no trace.  This function never
  70.    returns NULL.  */

  71. static struct thread_info *
  72. require_btrace_thread (void)
  73. {
  74.   struct thread_info *tp;

  75.   DEBUG ("require");

  76.   tp = find_thread_ptid (inferior_ptid);
  77.   if (tp == NULL)
  78.     error (_("No thread."));

  79.   btrace_fetch (tp);

  80.   if (btrace_is_empty (tp))
  81.     error (_("No trace."));

  82.   return tp;
  83. }

  84. /* Update the branch trace for the current thread and return a pointer to its
  85.    branch trace information struct.

  86.    Throws an error if there is no thread or no trace.  This function never
  87.    returns NULL.  */

  88. static struct btrace_thread_info *
  89. require_btrace (void)
  90. {
  91.   struct thread_info *tp;

  92.   tp = require_btrace_thread ();

  93.   return &tp->btrace;
  94. }

  95. /* Enable branch tracing for one thread.  Warn on errors.  */

  96. static void
  97. record_btrace_enable_warn (struct thread_info *tp)
  98. {
  99.   volatile struct gdb_exception error;

  100.   TRY_CATCH (error, RETURN_MASK_ERROR)
  101.     btrace_enable (tp);

  102.   if (error.message != NULL)
  103.     warning ("%s", error.message);
  104. }

  105. /* Callback function to disable branch tracing for one thread.  */

  106. static void
  107. record_btrace_disable_callback (void *arg)
  108. {
  109.   struct thread_info *tp;

  110.   tp = arg;

  111.   btrace_disable (tp);
  112. }

  113. /* Enable automatic tracing of new threads.  */

  114. static void
  115. record_btrace_auto_enable (void)
  116. {
  117.   DEBUG ("attach thread observer");

  118.   record_btrace_thread_observer
  119.     = observer_attach_new_thread (record_btrace_enable_warn);
  120. }

  121. /* Disable automatic tracing of new threads.  */

  122. static void
  123. record_btrace_auto_disable (void)
  124. {
  125.   /* The observer may have been detached, already.  */
  126.   if (record_btrace_thread_observer == NULL)
  127.     return;

  128.   DEBUG ("detach thread observer");

  129.   observer_detach_new_thread (record_btrace_thread_observer);
  130.   record_btrace_thread_observer = NULL;
  131. }

  132. /* The record-btrace async event handler function.  */

  133. static void
  134. record_btrace_handle_async_inferior_event (gdb_client_data data)
  135. {
  136.   inferior_event_handler (INF_REG_EVENT, NULL);
  137. }

  138. /* The to_open method of target record-btrace.  */

  139. static void
  140. record_btrace_open (const char *args, int from_tty)
  141. {
  142.   struct cleanup *disable_chain;
  143.   struct thread_info *tp;

  144.   DEBUG ("open");

  145.   record_preopen ();

  146.   if (!target_has_execution)
  147.     error (_("The program is not being run."));

  148.   if (!target_supports_btrace ())
  149.     error (_("Target does not support branch tracing."));

  150.   if (non_stop)
  151.     error (_("Record btrace can't debug inferior in non-stop mode."));

  152.   gdb_assert (record_btrace_thread_observer == NULL);

  153.   disable_chain = make_cleanup (null_cleanup, NULL);
  154.   ALL_NON_EXITED_THREADS (tp)
  155.     if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
  156.       {
  157.         btrace_enable (tp);

  158.         make_cleanup (record_btrace_disable_callback, tp);
  159.       }

  160.   record_btrace_auto_enable ();

  161.   push_target (&record_btrace_ops);

  162.   record_btrace_async_inferior_event_handler
  163.     = create_async_event_handler (record_btrace_handle_async_inferior_event,
  164.                                   NULL);
  165.   record_btrace_generating_corefile = 0;

  166.   observer_notify_record_changed (current_inferior (),  1);

  167.   discard_cleanups (disable_chain);
  168. }

  169. /* The to_stop_recording method of target record-btrace.  */

  170. static void
  171. record_btrace_stop_recording (struct target_ops *self)
  172. {
  173.   struct thread_info *tp;

  174.   DEBUG ("stop recording");

  175.   record_btrace_auto_disable ();

  176.   ALL_NON_EXITED_THREADS (tp)
  177.     if (tp->btrace.target != NULL)
  178.       btrace_disable (tp);
  179. }

  180. /* The to_close method of target record-btrace.  */

  181. static void
  182. record_btrace_close (struct target_ops *self)
  183. {
  184.   struct thread_info *tp;

  185.   if (record_btrace_async_inferior_event_handler != NULL)
  186.     delete_async_event_handler (&record_btrace_async_inferior_event_handler);

  187.   /* Make sure automatic recording gets disabled even if we did not stop
  188.      recording before closing the record-btrace target.  */
  189.   record_btrace_auto_disable ();

  190.   /* We should have already stopped recording.
  191.      Tear down btrace in case we have not.  */
  192.   ALL_NON_EXITED_THREADS (tp)
  193.     btrace_teardown (tp);
  194. }

  195. /* The to_info_record method of target record-btrace.  */

  196. static void
  197. record_btrace_info (struct target_ops *self)
  198. {
  199.   struct btrace_thread_info *btinfo;
  200.   struct thread_info *tp;
  201.   unsigned int insns, calls;

  202.   DEBUG ("info");

  203.   tp = find_thread_ptid (inferior_ptid);
  204.   if (tp == NULL)
  205.     error (_("No thread."));

  206.   btrace_fetch (tp);

  207.   insns = 0;
  208.   calls = 0;

  209.   btinfo = &tp->btrace;

  210.   if (!btrace_is_empty (tp))
  211.     {
  212.       struct btrace_call_iterator call;
  213.       struct btrace_insn_iterator insn;

  214.       btrace_call_end (&call, btinfo);
  215.       btrace_call_prev (&call, 1);
  216.       calls = btrace_call_number (&call);

  217.       btrace_insn_end (&insn, btinfo);
  218.       btrace_insn_prev (&insn, 1);
  219.       insns = btrace_insn_number (&insn);
  220.     }

  221.   printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
  222.                        "%d (%s).\n"), insns, calls, tp->num,
  223.                      target_pid_to_str (tp->ptid));

  224.   if (btrace_is_replaying (tp))
  225.     printf_unfiltered (_("Replay in progress.  At instruction %u.\n"),
  226.                        btrace_insn_number (btinfo->replay));
  227. }

  228. /* Print an unsigned int.  */

  229. static void
  230. ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
  231. {
  232.   ui_out_field_fmt (uiout, fld, "%u", val);
  233. }

  234. /* Disassemble a section of the recorded instruction trace.  */

  235. static void
  236. btrace_insn_history (struct ui_out *uiout,
  237.                      const struct btrace_insn_iterator *begin,
  238.                      const struct btrace_insn_iterator *end, int flags)
  239. {
  240.   struct gdbarch *gdbarch;
  241.   struct btrace_insn_iterator it;

  242.   DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
  243.          btrace_insn_number (end));

  244.   gdbarch = target_gdbarch ();

  245.   for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
  246.     {
  247.       const struct btrace_insn *insn;

  248.       insn = btrace_insn_get (&it);

  249.       /* Print the instruction index.  */
  250.       ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
  251.       ui_out_text (uiout, "\t");

  252.       /* Disassembly with '/m' flag may not produce the expected result.
  253.          See PR gdb/11833.  */
  254.       gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
  255.     }
  256. }

  257. /* The to_insn_history method of target record-btrace.  */

  258. static void
  259. record_btrace_insn_history (struct target_ops *self, int size, int flags)
  260. {
  261.   struct btrace_thread_info *btinfo;
  262.   struct btrace_insn_history *history;
  263.   struct btrace_insn_iterator begin, end;
  264.   struct cleanup *uiout_cleanup;
  265.   struct ui_out *uiout;
  266.   unsigned int context, covered;

  267.   uiout = current_uiout;
  268.   uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
  269.                                                        "insn history");
  270.   context = abs (size);
  271.   if (context == 0)
  272.     error (_("Bad record instruction-history-size."));

  273.   btinfo = require_btrace ();
  274.   history = btinfo->insn_history;
  275.   if (history == NULL)
  276.     {
  277.       struct btrace_insn_iterator *replay;

  278.       DEBUG ("insn-history (0x%x): %d", flags, size);

  279.       /* If we're replaying, we start at the replay position.  Otherwise, we
  280.          start at the tail of the trace.  */
  281.       replay = btinfo->replay;
  282.       if (replay != NULL)
  283.         begin = *replay;
  284.       else
  285.         btrace_insn_end (&begin, btinfo);

  286.       /* We start from here and expand in the requested direction.  Then we
  287.          expand in the other direction, as well, to fill up any remaining
  288.          context.  */
  289.       end = begin;
  290.       if (size < 0)
  291.         {
  292.           /* We want the current position covered, as well.  */
  293.           covered = btrace_insn_next (&end, 1);
  294.           covered += btrace_insn_prev (&begin, context - covered);
  295.           covered += btrace_insn_next (&end, context - covered);
  296.         }
  297.       else
  298.         {
  299.           covered = btrace_insn_next (&end, context);
  300.           covered += btrace_insn_prev (&begin, context - covered);
  301.         }
  302.     }
  303.   else
  304.     {
  305.       begin = history->begin;
  306.       end = history->end;

  307.       DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
  308.              btrace_insn_number (&begin), btrace_insn_number (&end));

  309.       if (size < 0)
  310.         {
  311.           end = begin;
  312.           covered = btrace_insn_prev (&begin, context);
  313.         }
  314.       else
  315.         {
  316.           begin = end;
  317.           covered = btrace_insn_next (&end, context);
  318.         }
  319.     }

  320.   if (covered > 0)
  321.     btrace_insn_history (uiout, &begin, &end, flags);
  322.   else
  323.     {
  324.       if (size < 0)
  325.         printf_unfiltered (_("At the start of the branch trace record.\n"));
  326.       else
  327.         printf_unfiltered (_("At the end of the branch trace record.\n"));
  328.     }

  329.   btrace_set_insn_history (btinfo, &begin, &end);
  330.   do_cleanups (uiout_cleanup);
  331. }

  332. /* The to_insn_history_range method of target record-btrace.  */

  333. static void
  334. record_btrace_insn_history_range (struct target_ops *self,
  335.                                   ULONGEST from, ULONGEST to, int flags)
  336. {
  337.   struct btrace_thread_info *btinfo;
  338.   struct btrace_insn_history *history;
  339.   struct btrace_insn_iterator begin, end;
  340.   struct cleanup *uiout_cleanup;
  341.   struct ui_out *uiout;
  342.   unsigned int low, high;
  343.   int found;

  344.   uiout = current_uiout;
  345.   uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
  346.                                                        "insn history");
  347.   low = from;
  348.   high = to;

  349.   DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);

  350.   /* Check for wrap-arounds.  */
  351.   if (low != from || high != to)
  352.     error (_("Bad range."));

  353.   if (high < low)
  354.     error (_("Bad range."));

  355.   btinfo = require_btrace ();

  356.   found = btrace_find_insn_by_number (&begin, btinfo, low);
  357.   if (found == 0)
  358.     error (_("Range out of bounds."));

  359.   found = btrace_find_insn_by_number (&end, btinfo, high);
  360.   if (found == 0)
  361.     {
  362.       /* Silently truncate the range.  */
  363.       btrace_insn_end (&end, btinfo);
  364.     }
  365.   else
  366.     {
  367.       /* We want both begin and end to be inclusive.  */
  368.       btrace_insn_next (&end, 1);
  369.     }

  370.   btrace_insn_history (uiout, &begin, &end, flags);
  371.   btrace_set_insn_history (btinfo, &begin, &end);

  372.   do_cleanups (uiout_cleanup);
  373. }

  374. /* The to_insn_history_from method of target record-btrace.  */

  375. static void
  376. record_btrace_insn_history_from (struct target_ops *self,
  377.                                  ULONGEST from, int size, int flags)
  378. {
  379.   ULONGEST begin, end, context;

  380.   context = abs (size);
  381.   if (context == 0)
  382.     error (_("Bad record instruction-history-size."));

  383.   if (size < 0)
  384.     {
  385.       end = from;

  386.       if (from < context)
  387.         begin = 0;
  388.       else
  389.         begin = from - context + 1;
  390.     }
  391.   else
  392.     {
  393.       begin = from;
  394.       end = from + context - 1;

  395.       /* Check for wrap-around.  */
  396.       if (end < begin)
  397.         end = ULONGEST_MAX;
  398.     }

  399.   record_btrace_insn_history_range (self, begin, end, flags);
  400. }

  401. /* Print the instruction number range for a function call history line.  */

  402. static void
  403. btrace_call_history_insn_range (struct ui_out *uiout,
  404.                                 const struct btrace_function *bfun)
  405. {
  406.   unsigned int begin, end, size;

  407.   size = VEC_length (btrace_insn_s, bfun->insn);
  408.   gdb_assert (size > 0);

  409.   begin = bfun->insn_offset;
  410.   end = begin + size - 1;

  411.   ui_out_field_uint (uiout, "insn begin", begin);
  412.   ui_out_text (uiout, ",");
  413.   ui_out_field_uint (uiout, "insn end", end);
  414. }

  415. /* Print the source line information for a function call history line.  */

  416. static void
  417. btrace_call_history_src_line (struct ui_out *uiout,
  418.                               const struct btrace_function *bfun)
  419. {
  420.   struct symbol *sym;
  421.   int begin, end;

  422.   sym = bfun->sym;
  423.   if (sym == NULL)
  424.     return;

  425.   ui_out_field_string (uiout, "file",
  426.                        symtab_to_filename_for_display (symbol_symtab (sym)));

  427.   begin = bfun->lbegin;
  428.   end = bfun->lend;

  429.   if (end < begin)
  430.     return;

  431.   ui_out_text (uiout, ":");
  432.   ui_out_field_int (uiout, "min line", begin);

  433.   if (end == begin)
  434.     return;

  435.   ui_out_text (uiout, ",");
  436.   ui_out_field_int (uiout, "max line", end);
  437. }

  438. /* Get the name of a branch trace function.  */

  439. static const char *
  440. btrace_get_bfun_name (const struct btrace_function *bfun)
  441. {
  442.   struct minimal_symbol *msym;
  443.   struct symbol *sym;

  444.   if (bfun == NULL)
  445.     return "??";

  446.   msym = bfun->msym;
  447.   sym = bfun->sym;

  448.   if (sym != NULL)
  449.     return SYMBOL_PRINT_NAME (sym);
  450.   else if (msym != NULL)
  451.     return MSYMBOL_PRINT_NAME (msym);
  452.   else
  453.     return "??";
  454. }

  455. /* Disassemble a section of the recorded function trace.  */

  456. static void
  457. btrace_call_history (struct ui_out *uiout,
  458.                      const struct btrace_thread_info *btinfo,
  459.                      const struct btrace_call_iterator *begin,
  460.                      const struct btrace_call_iterator *end,
  461.                      enum record_print_flag flags)
  462. {
  463.   struct btrace_call_iterator it;

  464.   DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
  465.          btrace_call_number (end));

  466.   for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
  467.     {
  468.       const struct btrace_function *bfun;
  469.       struct minimal_symbol *msym;
  470.       struct symbol *sym;

  471.       bfun = btrace_call_get (&it);
  472.       sym = bfun->sym;
  473.       msym = bfun->msym;

  474.       /* Print the function index.  */
  475.       ui_out_field_uint (uiout, "index", bfun->number);
  476.       ui_out_text (uiout, "\t");

  477.       if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
  478.         {
  479.           int level = bfun->level + btinfo->level, i;

  480.           for (i = 0; i < level; ++i)
  481.             ui_out_text (uiout, "  ");
  482.         }

  483.       if (sym != NULL)
  484.         ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
  485.       else if (msym != NULL)
  486.         ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
  487.       else if (!ui_out_is_mi_like_p (uiout))
  488.         ui_out_field_string (uiout, "function", "??");

  489.       if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
  490.         {
  491.           ui_out_text (uiout, _("\tinst "));
  492.           btrace_call_history_insn_range (uiout, bfun);
  493.         }

  494.       if ((flags & RECORD_PRINT_SRC_LINE) != 0)
  495.         {
  496.           ui_out_text (uiout, _("\tat "));
  497.           btrace_call_history_src_line (uiout, bfun);
  498.         }

  499.       ui_out_text (uiout, "\n");
  500.     }
  501. }

  502. /* The to_call_history method of target record-btrace.  */

  503. static void
  504. record_btrace_call_history (struct target_ops *self, int size, int flags)
  505. {
  506.   struct btrace_thread_info *btinfo;
  507.   struct btrace_call_history *history;
  508.   struct btrace_call_iterator begin, end;
  509.   struct cleanup *uiout_cleanup;
  510.   struct ui_out *uiout;
  511.   unsigned int context, covered;

  512.   uiout = current_uiout;
  513.   uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
  514.                                                        "insn history");
  515.   context = abs (size);
  516.   if (context == 0)
  517.     error (_("Bad record function-call-history-size."));

  518.   btinfo = require_btrace ();
  519.   history = btinfo->call_history;
  520.   if (history == NULL)
  521.     {
  522.       struct btrace_insn_iterator *replay;

  523.       DEBUG ("call-history (0x%x): %d", flags, size);

  524.       /* If we're replaying, we start at the replay position.  Otherwise, we
  525.          start at the tail of the trace.  */
  526.       replay = btinfo->replay;
  527.       if (replay != NULL)
  528.         {
  529.           begin.function = replay->function;
  530.           begin.btinfo = btinfo;
  531.         }
  532.       else
  533.         btrace_call_end (&begin, btinfo);

  534.       /* We start from here and expand in the requested direction.  Then we
  535.          expand in the other direction, as well, to fill up any remaining
  536.          context.  */
  537.       end = begin;
  538.       if (size < 0)
  539.         {
  540.           /* We want the current position covered, as well.  */
  541.           covered = btrace_call_next (&end, 1);
  542.           covered += btrace_call_prev (&begin, context - covered);
  543.           covered += btrace_call_next (&end, context - covered);
  544.         }
  545.       else
  546.         {
  547.           covered = btrace_call_next (&end, context);
  548.           covered += btrace_call_prev (&begin, context- covered);
  549.         }
  550.     }
  551.   else
  552.     {
  553.       begin = history->begin;
  554.       end = history->end;

  555.       DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
  556.              btrace_call_number (&begin), btrace_call_number (&end));

  557.       if (size < 0)
  558.         {
  559.           end = begin;
  560.           covered = btrace_call_prev (&begin, context);
  561.         }
  562.       else
  563.         {
  564.           begin = end;
  565.           covered = btrace_call_next (&end, context);
  566.         }
  567.     }

  568.   if (covered > 0)
  569.     btrace_call_history (uiout, btinfo, &begin, &end, flags);
  570.   else
  571.     {
  572.       if (size < 0)
  573.         printf_unfiltered (_("At the start of the branch trace record.\n"));
  574.       else
  575.         printf_unfiltered (_("At the end of the branch trace record.\n"));
  576.     }

  577.   btrace_set_call_history (btinfo, &begin, &end);
  578.   do_cleanups (uiout_cleanup);
  579. }

  580. /* The to_call_history_range method of target record-btrace.  */

  581. static void
  582. record_btrace_call_history_range (struct target_ops *self,
  583.                                   ULONGEST from, ULONGEST to, int flags)
  584. {
  585.   struct btrace_thread_info *btinfo;
  586.   struct btrace_call_history *history;
  587.   struct btrace_call_iterator begin, end;
  588.   struct cleanup *uiout_cleanup;
  589.   struct ui_out *uiout;
  590.   unsigned int low, high;
  591.   int found;

  592.   uiout = current_uiout;
  593.   uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
  594.                                                        "func history");
  595.   low = from;
  596.   high = to;

  597.   DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);

  598.   /* Check for wrap-arounds.  */
  599.   if (low != from || high != to)
  600.     error (_("Bad range."));

  601.   if (high < low)
  602.     error (_("Bad range."));

  603.   btinfo = require_btrace ();

  604.   found = btrace_find_call_by_number (&begin, btinfo, low);
  605.   if (found == 0)
  606.     error (_("Range out of bounds."));

  607.   found = btrace_find_call_by_number (&end, btinfo, high);
  608.   if (found == 0)
  609.     {
  610.       /* Silently truncate the range.  */
  611.       btrace_call_end (&end, btinfo);
  612.     }
  613.   else
  614.     {
  615.       /* We want both begin and end to be inclusive.  */
  616.       btrace_call_next (&end, 1);
  617.     }

  618.   btrace_call_history (uiout, btinfo, &begin, &end, flags);
  619.   btrace_set_call_history (btinfo, &begin, &end);

  620.   do_cleanups (uiout_cleanup);
  621. }

  622. /* The to_call_history_from method of target record-btrace.  */

  623. static void
  624. record_btrace_call_history_from (struct target_ops *self,
  625.                                  ULONGEST from, int size, int flags)
  626. {
  627.   ULONGEST begin, end, context;

  628.   context = abs (size);
  629.   if (context == 0)
  630.     error (_("Bad record function-call-history-size."));

  631.   if (size < 0)
  632.     {
  633.       end = from;

  634.       if (from < context)
  635.         begin = 0;
  636.       else
  637.         begin = from - context + 1;
  638.     }
  639.   else
  640.     {
  641.       begin = from;
  642.       end = from + context - 1;

  643.       /* Check for wrap-around.  */
  644.       if (end < begin)
  645.         end = ULONGEST_MAX;
  646.     }

  647.   record_btrace_call_history_range (self, begin, end, flags);
  648. }

  649. /* The to_record_is_replaying method of target record-btrace.  */

  650. static int
  651. record_btrace_is_replaying (struct target_ops *self)
  652. {
  653.   struct thread_info *tp;

  654.   ALL_NON_EXITED_THREADS (tp)
  655.     if (btrace_is_replaying (tp))
  656.       return 1;

  657.   return 0;
  658. }

  659. /* The to_xfer_partial method of target record-btrace.  */

  660. static enum target_xfer_status
  661. record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
  662.                             const char *annex, gdb_byte *readbuf,
  663.                             const gdb_byte *writebuf, ULONGEST offset,
  664.                             ULONGEST len, ULONGEST *xfered_len)
  665. {
  666.   struct target_ops *t;

  667.   /* Filter out requests that don't make sense during replay.  */
  668.   if (replay_memory_access == replay_memory_access_read_only
  669.       && !record_btrace_generating_corefile
  670.       && record_btrace_is_replaying (ops))
  671.     {
  672.       switch (object)
  673.         {
  674.         case TARGET_OBJECT_MEMORY:
  675.           {
  676.             struct target_section *section;

  677.             /* We do not allow writing memory in general.  */
  678.             if (writebuf != NULL)
  679.               {
  680.                 *xfered_len = len;
  681.                 return TARGET_XFER_UNAVAILABLE;
  682.               }

  683.             /* We allow reading readonly memory.  */
  684.             section = target_section_by_addr (ops, offset);
  685.             if (section != NULL)
  686.               {
  687.                 /* Check if the section we found is readonly.  */
  688.                 if ((bfd_get_section_flags (section->the_bfd_section->owner,
  689.                                             section->the_bfd_section)
  690.                      & SEC_READONLY) != 0)
  691.                   {
  692.                     /* Truncate the request to fit into this section.  */
  693.                     len = min (len, section->endaddr - offset);
  694.                     break;
  695.                   }
  696.               }

  697.             *xfered_len = len;
  698.             return TARGET_XFER_UNAVAILABLE;
  699.           }
  700.         }
  701.     }

  702.   /* Forward the request.  */
  703.   ops = ops->beneath;
  704.   return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
  705.                                offset, len, xfered_len);
  706. }

  707. /* The to_insert_breakpoint method of target record-btrace.  */

  708. static int
  709. record_btrace_insert_breakpoint (struct target_ops *ops,
  710.                                  struct gdbarch *gdbarch,
  711.                                  struct bp_target_info *bp_tgt)
  712. {
  713.   volatile struct gdb_exception except;
  714.   const char *old;
  715.   int ret;

  716.   /* Inserting breakpoints requires accessing memory.  Allow it for the
  717.      duration of this function.  */
  718.   old = replay_memory_access;
  719.   replay_memory_access = replay_memory_access_read_write;

  720.   ret = 0;
  721.   TRY_CATCH (except, RETURN_MASK_ALL)
  722.     ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);

  723.   replay_memory_access = old;

  724.   if (except.reason < 0)
  725.     throw_exception (except);

  726.   return ret;
  727. }

  728. /* The to_remove_breakpoint method of target record-btrace.  */

  729. static int
  730. record_btrace_remove_breakpoint (struct target_ops *ops,
  731.                                  struct gdbarch *gdbarch,
  732.                                  struct bp_target_info *bp_tgt)
  733. {
  734.   volatile struct gdb_exception except;
  735.   const char *old;
  736.   int ret;

  737.   /* Removing breakpoints requires accessing memory.  Allow it for the
  738.      duration of this function.  */
  739.   old = replay_memory_access;
  740.   replay_memory_access = replay_memory_access_read_write;

  741.   ret = 0;
  742.   TRY_CATCH (except, RETURN_MASK_ALL)
  743.     ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);

  744.   replay_memory_access = old;

  745.   if (except.reason < 0)
  746.     throw_exception (except);

  747.   return ret;
  748. }

  749. /* The to_fetch_registers method of target record-btrace.  */

  750. static void
  751. record_btrace_fetch_registers (struct target_ops *ops,
  752.                                struct regcache *regcache, int regno)
  753. {
  754.   struct btrace_insn_iterator *replay;
  755.   struct thread_info *tp;

  756.   tp = find_thread_ptid (inferior_ptid);
  757.   gdb_assert (tp != NULL);

  758.   replay = tp->btrace.replay;
  759.   if (replay != NULL && !record_btrace_generating_corefile)
  760.     {
  761.       const struct btrace_insn *insn;
  762.       struct gdbarch *gdbarch;
  763.       int pcreg;

  764.       gdbarch = get_regcache_arch (regcache);
  765.       pcreg = gdbarch_pc_regnum (gdbarch);
  766.       if (pcreg < 0)
  767.         return;

  768.       /* We can only provide the PC register.  */
  769.       if (regno >= 0 && regno != pcreg)
  770.         return;

  771.       insn = btrace_insn_get (replay);
  772.       gdb_assert (insn != NULL);

  773.       regcache_raw_supply (regcache, regno, &insn->pc);
  774.     }
  775.   else
  776.     {
  777.       struct target_ops *t = ops->beneath;

  778.       t->to_fetch_registers (t, regcache, regno);
  779.     }
  780. }

  781. /* The to_store_registers method of target record-btrace.  */

  782. static void
  783. record_btrace_store_registers (struct target_ops *ops,
  784.                                struct regcache *regcache, int regno)
  785. {
  786.   struct target_ops *t;

  787.   if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
  788.     error (_("This record target does not allow writing registers."));

  789.   gdb_assert (may_write_registers != 0);

  790.   t = ops->beneath;
  791.   t->to_store_registers (t, regcache, regno);
  792. }

  793. /* The to_prepare_to_store method of target record-btrace.  */

  794. static void
  795. record_btrace_prepare_to_store (struct target_ops *ops,
  796.                                 struct regcache *regcache)
  797. {
  798.   struct target_ops *t;

  799.   if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
  800.     return;

  801.   t = ops->beneath;
  802.   t->to_prepare_to_store (t, regcache);
  803. }

  804. /* The branch trace frame cache.  */

  805. struct btrace_frame_cache
  806. {
  807.   /* The thread.  */
  808.   struct thread_info *tp;

  809.   /* The frame info.  */
  810.   struct frame_info *frame;

  811.   /* The branch trace function segment.  */
  812.   const struct btrace_function *bfun;
  813. };

  814. /* A struct btrace_frame_cache hash table indexed by NEXT.  */

  815. static htab_t bfcache;

  816. /* hash_f for htab_create_alloc of bfcache.  */

  817. static hashval_t
  818. bfcache_hash (const void *arg)
  819. {
  820.   const struct btrace_frame_cache *cache = arg;

  821.   return htab_hash_pointer (cache->frame);
  822. }

  823. /* eq_f for htab_create_alloc of bfcache.  */

  824. static int
  825. bfcache_eq (const void *arg1, const void *arg2)
  826. {
  827.   const struct btrace_frame_cache *cache1 = arg1;
  828.   const struct btrace_frame_cache *cache2 = arg2;

  829.   return cache1->frame == cache2->frame;
  830. }

  831. /* Create a new btrace frame cache.  */

  832. static struct btrace_frame_cache *
  833. bfcache_new (struct frame_info *frame)
  834. {
  835.   struct btrace_frame_cache *cache;
  836.   void **slot;

  837.   cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
  838.   cache->frame = frame;

  839.   slot = htab_find_slot (bfcache, cache, INSERT);
  840.   gdb_assert (*slot == NULL);
  841.   *slot = cache;

  842.   return cache;
  843. }

  844. /* Extract the branch trace function from a branch trace frame.  */

  845. static const struct btrace_function *
  846. btrace_get_frame_function (struct frame_info *frame)
  847. {
  848.   const struct btrace_frame_cache *cache;
  849.   const struct btrace_function *bfun;
  850.   struct btrace_frame_cache pattern;
  851.   void **slot;

  852.   pattern.frame = frame;

  853.   slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
  854.   if (slot == NULL)
  855.     return NULL;

  856.   cache = *slot;
  857.   return cache->bfun;
  858. }

  859. /* Implement stop_reason method for record_btrace_frame_unwind.  */

  860. static enum unwind_stop_reason
  861. record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
  862.                                         void **this_cache)
  863. {
  864.   const struct btrace_frame_cache *cache;
  865.   const struct btrace_function *bfun;

  866.   cache = *this_cache;
  867.   bfun = cache->bfun;
  868.   gdb_assert (bfun != NULL);

  869.   if (bfun->up == NULL)
  870.     return UNWIND_UNAVAILABLE;

  871.   return UNWIND_NO_REASON;
  872. }

  873. /* Implement this_id method for record_btrace_frame_unwind.  */

  874. static void
  875. record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
  876.                              struct frame_id *this_id)
  877. {
  878.   const struct btrace_frame_cache *cache;
  879.   const struct btrace_function *bfun;
  880.   CORE_ADDR code, special;

  881.   cache = *this_cache;

  882.   bfun = cache->bfun;
  883.   gdb_assert (bfun != NULL);

  884.   while (bfun->segment.prev != NULL)
  885.     bfun = bfun->segment.prev;

  886.   code = get_frame_func (this_frame);
  887.   special = bfun->number;

  888.   *this_id = frame_id_build_unavailable_stack_special (code, special);

  889.   DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
  890.          btrace_get_bfun_name (cache->bfun),
  891.          core_addr_to_string_nz (this_id->code_addr),
  892.          core_addr_to_string_nz (this_id->special_addr));
  893. }

  894. /* Implement prev_register method for record_btrace_frame_unwind.  */

  895. static struct value *
  896. record_btrace_frame_prev_register (struct frame_info *this_frame,
  897.                                    void **this_cache,
  898.                                    int regnum)
  899. {
  900.   const struct btrace_frame_cache *cache;
  901.   const struct btrace_function *bfun, *caller;
  902.   const struct btrace_insn *insn;
  903.   struct gdbarch *gdbarch;
  904.   CORE_ADDR pc;
  905.   int pcreg;

  906.   gdbarch = get_frame_arch (this_frame);
  907.   pcreg = gdbarch_pc_regnum (gdbarch);
  908.   if (pcreg < 0 || regnum != pcreg)
  909.     throw_error (NOT_AVAILABLE_ERROR,
  910.                  _("Registers are not available in btrace record history"));

  911.   cache = *this_cache;
  912.   bfun = cache->bfun;
  913.   gdb_assert (bfun != NULL);

  914.   caller = bfun->up;
  915.   if (caller == NULL)
  916.     throw_error (NOT_AVAILABLE_ERROR,
  917.                  _("No caller in btrace record history"));

  918.   if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
  919.     {
  920.       insn = VEC_index (btrace_insn_s, caller->insn, 0);
  921.       pc = insn->pc;
  922.     }
  923.   else
  924.     {
  925.       insn = VEC_last (btrace_insn_s, caller->insn);
  926.       pc = insn->pc;

  927.       pc += gdb_insn_length (gdbarch, pc);
  928.     }

  929.   DEBUG ("[frame] unwound PC in %s on level %d: %s",
  930.          btrace_get_bfun_name (bfun), bfun->level,
  931.          core_addr_to_string_nz (pc));

  932.   return frame_unwind_got_address (this_frame, regnum, pc);
  933. }

  934. /* Implement sniffer method for record_btrace_frame_unwind.  */

  935. static int
  936. record_btrace_frame_sniffer (const struct frame_unwind *self,
  937.                              struct frame_info *this_frame,
  938.                              void **this_cache)
  939. {
  940.   const struct btrace_function *bfun;
  941.   struct btrace_frame_cache *cache;
  942.   struct thread_info *tp;
  943.   struct frame_info *next;

  944.   /* THIS_FRAME does not contain a reference to its thread.  */
  945.   tp = find_thread_ptid (inferior_ptid);
  946.   gdb_assert (tp != NULL);

  947.   bfun = NULL;
  948.   next = get_next_frame (this_frame);
  949.   if (next == NULL)
  950.     {
  951.       const struct btrace_insn_iterator *replay;

  952.       replay = tp->btrace.replay;
  953.       if (replay != NULL)
  954.         bfun = replay->function;
  955.     }
  956.   else
  957.     {
  958.       const struct btrace_function *callee;

  959.       callee = btrace_get_frame_function (next);
  960.       if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
  961.         bfun = callee->up;
  962.     }

  963.   if (bfun == NULL)
  964.     return 0;

  965.   DEBUG ("[frame] sniffed frame for %s on level %d",
  966.          btrace_get_bfun_name (bfun), bfun->level);

  967.   /* This is our frame.  Initialize the frame cache.  */
  968.   cache = bfcache_new (this_frame);
  969.   cache->tp = tp;
  970.   cache->bfun = bfun;

  971.   *this_cache = cache;
  972.   return 1;
  973. }

  974. /* Implement sniffer method for record_btrace_tailcall_frame_unwind.  */

  975. static int
  976. record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
  977.                                       struct frame_info *this_frame,
  978.                                       void **this_cache)
  979. {
  980.   const struct btrace_function *bfun, *callee;
  981.   struct btrace_frame_cache *cache;
  982.   struct frame_info *next;

  983.   next = get_next_frame (this_frame);
  984.   if (next == NULL)
  985.     return 0;

  986.   callee = btrace_get_frame_function (next);
  987.   if (callee == NULL)
  988.     return 0;

  989.   if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
  990.     return 0;

  991.   bfun = callee->up;
  992.   if (bfun == NULL)
  993.     return 0;

  994.   DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
  995.          btrace_get_bfun_name (bfun), bfun->level);

  996.   /* This is our frame.  Initialize the frame cache.  */
  997.   cache = bfcache_new (this_frame);
  998.   cache->tp = find_thread_ptid (inferior_ptid);
  999.   cache->bfun = bfun;

  1000.   *this_cache = cache;
  1001.   return 1;
  1002. }

  1003. static void
  1004. record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
  1005. {
  1006.   struct btrace_frame_cache *cache;
  1007.   void **slot;

  1008.   cache = this_cache;

  1009.   slot = htab_find_slot (bfcache, cache, NO_INSERT);
  1010.   gdb_assert (slot != NULL);

  1011.   htab_remove_elt (bfcache, cache);
  1012. }

  1013. /* btrace recording does not store previous memory content, neither the stack
  1014.    frames content.  Any unwinding would return errorneous results as the stack
  1015.    contents no longer matches the changed PC value restored from history.
  1016.    Therefore this unwinder reports any possibly unwound registers as
  1017.    <unavailable>.  */

  1018. const struct frame_unwind record_btrace_frame_unwind =
  1019. {
  1020.   NORMAL_FRAME,
  1021.   record_btrace_frame_unwind_stop_reason,
  1022.   record_btrace_frame_this_id,
  1023.   record_btrace_frame_prev_register,
  1024.   NULL,
  1025.   record_btrace_frame_sniffer,
  1026.   record_btrace_frame_dealloc_cache
  1027. };

  1028. const struct frame_unwind record_btrace_tailcall_frame_unwind =
  1029. {
  1030.   TAILCALL_FRAME,
  1031.   record_btrace_frame_unwind_stop_reason,
  1032.   record_btrace_frame_this_id,
  1033.   record_btrace_frame_prev_register,
  1034.   NULL,
  1035.   record_btrace_tailcall_frame_sniffer,
  1036.   record_btrace_frame_dealloc_cache
  1037. };

  1038. /* Implement the to_get_unwinder method.  */

  1039. static const struct frame_unwind *
  1040. record_btrace_to_get_unwinder (struct target_ops *self)
  1041. {
  1042.   return &record_btrace_frame_unwind;
  1043. }

  1044. /* Implement the to_get_tailcall_unwinder method.  */

  1045. static const struct frame_unwind *
  1046. record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
  1047. {
  1048.   return &record_btrace_tailcall_frame_unwind;
  1049. }

  1050. /* Indicate that TP should be resumed according to FLAG.  */

  1051. static void
  1052. record_btrace_resume_thread (struct thread_info *tp,
  1053.                              enum btrace_thread_flag flag)
  1054. {
  1055.   struct btrace_thread_info *btinfo;

  1056.   DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);

  1057.   btinfo = &tp->btrace;

  1058.   if ((btinfo->flags & BTHR_MOVE) != 0)
  1059.     error (_("Thread already moving."));

  1060.   /* Fetch the latest branch trace.  */
  1061.   btrace_fetch (tp);

  1062.   btinfo->flags |= flag;
  1063. }

  1064. /* Find the thread to resume given a PTID.  */

  1065. static struct thread_info *
  1066. record_btrace_find_resume_thread (ptid_t ptid)
  1067. {
  1068.   struct thread_info *tp;

  1069.   /* When asked to resume everything, we pick the current thread.  */
  1070.   if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
  1071.     ptid = inferior_ptid;

  1072.   return find_thread_ptid (ptid);
  1073. }

  1074. /* Start replaying a thread.  */

  1075. static struct btrace_insn_iterator *
  1076. record_btrace_start_replaying (struct thread_info *tp)
  1077. {
  1078.   volatile struct gdb_exception except;
  1079.   struct btrace_insn_iterator *replay;
  1080.   struct btrace_thread_info *btinfo;
  1081.   int executing;

  1082.   btinfo = &tp->btrace;
  1083.   replay = NULL;

  1084.   /* We can't start replaying without trace.  */
  1085.   if (btinfo->begin == NULL)
  1086.     return NULL;

  1087.   /* Clear the executing flag to allow changes to the current frame.
  1088.      We are not actually running, yet.  We just started a reverse execution
  1089.      command or a record goto command.
  1090.      For the latter, EXECUTING is false and this has no effect.
  1091.      For the former, EXECUTING is true and we're in to_wait, about to
  1092.      move the thread.  Since we need to recompute the stack, we temporarily
  1093.      set EXECUTING to flase.  */
  1094.   executing = is_executing (tp->ptid);
  1095.   set_executing (tp->ptid, 0);

  1096.   /* GDB stores the current frame_id when stepping in order to detects steps
  1097.      into subroutines.
  1098.      Since frames are computed differently when we're replaying, we need to
  1099.      recompute those stored frames and fix them up so we can still detect
  1100.      subroutines after we started replaying.  */
  1101.   TRY_CATCH (except, RETURN_MASK_ALL)
  1102.     {
  1103.       struct frame_info *frame;
  1104.       struct frame_id frame_id;
  1105.       int upd_step_frame_id, upd_step_stack_frame_id;

  1106.       /* The current frame without replaying - computed via normal unwind.  */
  1107.       frame = get_current_frame ();
  1108.       frame_id = get_frame_id (frame);

  1109.       /* Check if we need to update any stepping-related frame id's.  */
  1110.       upd_step_frame_id = frame_id_eq (frame_id,
  1111.                                        tp->control.step_frame_id);
  1112.       upd_step_stack_frame_id = frame_id_eq (frame_id,
  1113.                                              tp->control.step_stack_frame_id);

  1114.       /* We start replaying at the end of the branch trace.  This corresponds
  1115.          to the current instruction.  */
  1116.       replay = xmalloc (sizeof (*replay));
  1117.       btrace_insn_end (replay, btinfo);

  1118.       /* We're not replaying, yet.  */
  1119.       gdb_assert (btinfo->replay == NULL);
  1120.       btinfo->replay = replay;

  1121.       /* Make sure we're not using any stale registers.  */
  1122.       registers_changed_ptid (tp->ptid);

  1123.       /* The current frame with replaying - computed via btrace unwind.  */
  1124.       frame = get_current_frame ();
  1125.       frame_id = get_frame_id (frame);

  1126.       /* Replace stepping related frames where necessary.  */
  1127.       if (upd_step_frame_id)
  1128.         tp->control.step_frame_id = frame_id;
  1129.       if (upd_step_stack_frame_id)
  1130.         tp->control.step_stack_frame_id = frame_id;
  1131.     }

  1132.   /* Restore the previous execution state.  */
  1133.   set_executing (tp->ptid, executing);

  1134.   if (except.reason < 0)
  1135.     {
  1136.       xfree (btinfo->replay);
  1137.       btinfo->replay = NULL;

  1138.       registers_changed_ptid (tp->ptid);

  1139.       throw_exception (except);
  1140.     }

  1141.   return replay;
  1142. }

  1143. /* Stop replaying a thread.  */

  1144. static void
  1145. record_btrace_stop_replaying (struct thread_info *tp)
  1146. {
  1147.   struct btrace_thread_info *btinfo;

  1148.   btinfo = &tp->btrace;

  1149.   xfree (btinfo->replay);
  1150.   btinfo->replay = NULL;

  1151.   /* Make sure we're not leaving any stale registers.  */
  1152.   registers_changed_ptid (tp->ptid);
  1153. }

  1154. /* The to_resume method of target record-btrace.  */

  1155. static void
  1156. record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
  1157.                       enum gdb_signal signal)
  1158. {
  1159.   struct thread_info *tp, *other;
  1160.   enum btrace_thread_flag flag;

  1161.   DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");

  1162.   /* Store the execution direction of the last resume.  */
  1163.   record_btrace_resume_exec_dir = execution_direction;

  1164.   tp = record_btrace_find_resume_thread (ptid);
  1165.   if (tp == NULL)
  1166.     error (_("Cannot find thread to resume."));

  1167.   /* Stop replaying other threads if the thread to resume is not replaying.  */
  1168.   if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
  1169.     ALL_NON_EXITED_THREADS (other)
  1170.       record_btrace_stop_replaying (other);

  1171.   /* As long as we're not replaying, just forward the request.  */
  1172.   if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
  1173.     {
  1174.       ops = ops->beneath;
  1175.       return ops->to_resume (ops, ptid, step, signal);
  1176.     }

  1177.   /* Compute the btrace thread flag for the requested move.  */
  1178.   if (step == 0)
  1179.     flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
  1180.   else
  1181.     flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;

  1182.   /* At the moment, we only move a single thread.  We could also move
  1183.      all threads in parallel by single-stepping each resumed thread
  1184.      until the first runs into an event.
  1185.      When we do that, we would want to continue all other threads.
  1186.      For now, just resume one thread to not confuse to_wait.  */
  1187.   record_btrace_resume_thread (tp, flag);

  1188.   /* We just indicate the resume intent here.  The actual stepping happens in
  1189.      record_btrace_wait below.  */

  1190.   /* Async support.  */
  1191.   if (target_can_async_p ())
  1192.     {
  1193.       target_async (inferior_event_handler, 0);
  1194.       mark_async_event_handler (record_btrace_async_inferior_event_handler);
  1195.     }
  1196. }

  1197. /* Find a thread to move.  */

  1198. static struct thread_info *
  1199. record_btrace_find_thread_to_move (ptid_t ptid)
  1200. {
  1201.   struct thread_info *tp;

  1202.   /* First check the parameter thread.  */
  1203.   tp = find_thread_ptid (ptid);
  1204.   if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
  1205.     return tp;

  1206.   /* Otherwise, find one other thread that has been resumed.  */
  1207.   ALL_NON_EXITED_THREADS (tp)
  1208.     if ((tp->btrace.flags & BTHR_MOVE) != 0)
  1209.       return tp;

  1210.   return NULL;
  1211. }

  1212. /* Return a target_waitstatus indicating that we ran out of history.  */

  1213. static struct target_waitstatus
  1214. btrace_step_no_history (void)
  1215. {
  1216.   struct target_waitstatus status;

  1217.   status.kind = TARGET_WAITKIND_NO_HISTORY;

  1218.   return status;
  1219. }

  1220. /* Return a target_waitstatus indicating that a step finished.  */

  1221. static struct target_waitstatus
  1222. btrace_step_stopped (void)
  1223. {
  1224.   struct target_waitstatus status;

  1225.   status.kind = TARGET_WAITKIND_STOPPED;
  1226.   status.value.sig = GDB_SIGNAL_TRAP;

  1227.   return status;
  1228. }

  1229. /* Clear the record histories.  */

  1230. static void
  1231. record_btrace_clear_histories (struct btrace_thread_info *btinfo)
  1232. {
  1233.   xfree (btinfo->insn_history);
  1234.   xfree (btinfo->call_history);

  1235.   btinfo->insn_history = NULL;
  1236.   btinfo->call_history = NULL;
  1237. }

  1238. /* Step a single thread.  */

  1239. static struct target_waitstatus
  1240. record_btrace_step_thread (struct thread_info *tp)
  1241. {
  1242.   struct btrace_insn_iterator *replay, end;
  1243.   struct btrace_thread_info *btinfo;
  1244.   struct address_space *aspace;
  1245.   struct inferior *inf;
  1246.   enum btrace_thread_flag flags;
  1247.   unsigned int steps;

  1248.   /* We can't step without an execution history.  */
  1249.   if (btrace_is_empty (tp))
  1250.     return btrace_step_no_history ();

  1251.   btinfo = &tp->btrace;
  1252.   replay = btinfo->replay;

  1253.   flags = btinfo->flags & BTHR_MOVE;
  1254.   btinfo->flags &= ~BTHR_MOVE;

  1255.   DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);

  1256.   switch (flags)
  1257.     {
  1258.     default:
  1259.       internal_error (__FILE__, __LINE__, _("invalid stepping type."));

  1260.     case BTHR_STEP:
  1261.       /* We're done if we're not replaying.  */
  1262.       if (replay == NULL)
  1263.         return btrace_step_no_history ();

  1264.       /* We are always able to step at least once.  */
  1265.       steps = btrace_insn_next (replay, 1);
  1266.       gdb_assert (steps == 1);

  1267.       /* Determine the end of the instruction trace.  */
  1268.       btrace_insn_end (&end, btinfo);

  1269.       /* We stop replaying if we reached the end of the trace.  */
  1270.       if (btrace_insn_cmp (replay, &end) == 0)
  1271.         record_btrace_stop_replaying (tp);

  1272.       return btrace_step_stopped ();

  1273.     case BTHR_RSTEP:
  1274.       /* Start replaying if we're not already doing so.  */
  1275.       if (replay == NULL)
  1276.         replay = record_btrace_start_replaying (tp);

  1277.       /* If we can't step any further, we reached the end of the history.  */
  1278.       steps = btrace_insn_prev (replay, 1);
  1279.       if (steps == 0)
  1280.         return btrace_step_no_history ();

  1281.       return btrace_step_stopped ();

  1282.     case BTHR_CONT:
  1283.       /* We're done if we're not replaying.  */
  1284.       if (replay == NULL)
  1285.         return btrace_step_no_history ();

  1286.       inf = find_inferior_ptid (tp->ptid);
  1287.       aspace = inf->aspace;

  1288.       /* Determine the end of the instruction trace.  */
  1289.       btrace_insn_end (&end, btinfo);

  1290.       for (;;)
  1291.         {
  1292.           const struct btrace_insn *insn;

  1293.           /* We are always able to step at least once.  */
  1294.           steps = btrace_insn_next (replay, 1);
  1295.           gdb_assert (steps == 1);

  1296.           /* We stop replaying if we reached the end of the trace.  */
  1297.           if (btrace_insn_cmp (replay, &end) == 0)
  1298.             {
  1299.               record_btrace_stop_replaying (tp);
  1300.               return btrace_step_no_history ();
  1301.             }

  1302.           insn = btrace_insn_get (replay);
  1303.           gdb_assert (insn);

  1304.           DEBUG ("stepping %d (%s) ... %s", tp->num,
  1305.                  target_pid_to_str (tp->ptid),
  1306.                  core_addr_to_string_nz (insn->pc));

  1307.           if (breakpoint_here_p (aspace, insn->pc))
  1308.             return btrace_step_stopped ();
  1309.         }

  1310.     case BTHR_RCONT:
  1311.       /* Start replaying if we're not already doing so.  */
  1312.       if (replay == NULL)
  1313.         replay = record_btrace_start_replaying (tp);

  1314.       inf = find_inferior_ptid (tp->ptid);
  1315.       aspace = inf->aspace;

  1316.       for (;;)
  1317.         {
  1318.           const struct btrace_insn *insn;

  1319.           /* If we can't step any further, we're done.  */
  1320.           steps = btrace_insn_prev (replay, 1);
  1321.           if (steps == 0)
  1322.             return btrace_step_no_history ();

  1323.           insn = btrace_insn_get (replay);
  1324.           gdb_assert (insn);

  1325.           DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
  1326.                  target_pid_to_str (tp->ptid),
  1327.                  core_addr_to_string_nz (insn->pc));

  1328.           if (breakpoint_here_p (aspace, insn->pc))
  1329.             return btrace_step_stopped ();
  1330.         }
  1331.     }
  1332. }

  1333. /* The to_wait method of target record-btrace.  */

  1334. static ptid_t
  1335. record_btrace_wait (struct target_ops *ops, ptid_t ptid,
  1336.                     struct target_waitstatus *status, int options)
  1337. {
  1338.   struct thread_info *tp, *other;

  1339.   DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);

  1340.   /* As long as we're not replaying, just forward the request.  */
  1341.   if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
  1342.     {
  1343.       ops = ops->beneath;
  1344.       return ops->to_wait (ops, ptid, status, options);
  1345.     }

  1346.   /* Let's find a thread to move.  */
  1347.   tp = record_btrace_find_thread_to_move (ptid);
  1348.   if (tp == NULL)
  1349.     {
  1350.       DEBUG ("wait %s: no thread", target_pid_to_str (ptid));

  1351.       status->kind = TARGET_WAITKIND_IGNORE;
  1352.       return minus_one_ptid;
  1353.     }

  1354.   /* We only move a single thread.  We're not able to correlate threads.  */
  1355.   *status = record_btrace_step_thread (tp);

  1356.   /* Stop all other threads. */
  1357.   if (!non_stop)
  1358.     ALL_NON_EXITED_THREADS (other)
  1359.       other->btrace.flags &= ~BTHR_MOVE;

  1360.   /* Start record histories anew from the current position.  */
  1361.   record_btrace_clear_histories (&tp->btrace);

  1362.   /* We moved the replay position but did not update registers.  */
  1363.   registers_changed_ptid (tp->ptid);

  1364.   return tp->ptid;
  1365. }

  1366. /* The to_can_execute_reverse method of target record-btrace.  */

  1367. static int
  1368. record_btrace_can_execute_reverse (struct target_ops *self)
  1369. {
  1370.   return 1;
  1371. }

  1372. /* The to_decr_pc_after_break method of target record-btrace.  */

  1373. static CORE_ADDR
  1374. record_btrace_decr_pc_after_break (struct target_ops *ops,
  1375.                                    struct gdbarch *gdbarch)
  1376. {
  1377.   /* When replaying, we do not actually execute the breakpoint instruction
  1378.      so there is no need to adjust the PC after hitting a breakpoint.  */
  1379.   if (record_btrace_is_replaying (ops))
  1380.     return 0;

  1381.   return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
  1382. }

  1383. /* The to_update_thread_list method of target record-btrace.  */

  1384. static void
  1385. record_btrace_update_thread_list (struct target_ops *ops)
  1386. {
  1387.   /* We don't add or remove threads during replay.  */
  1388.   if (record_btrace_is_replaying (ops))
  1389.     return;

  1390.   /* Forward the request.  */
  1391.   ops = ops->beneath;
  1392.   ops->to_update_thread_list (ops);
  1393. }

  1394. /* The to_thread_alive method of target record-btrace.  */

  1395. static int
  1396. record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
  1397. {
  1398.   /* We don't add or remove threads during replay.  */
  1399.   if (record_btrace_is_replaying (ops))
  1400.     return find_thread_ptid (ptid) != NULL;

  1401.   /* Forward the request.  */
  1402.   ops = ops->beneath;
  1403.   return ops->to_thread_alive (ops, ptid);
  1404. }

  1405. /* Set the replay branch trace instruction iterator.  If IT is NULL, replay
  1406.    is stopped.  */

  1407. static void
  1408. record_btrace_set_replay (struct thread_info *tp,
  1409.                           const struct btrace_insn_iterator *it)
  1410. {
  1411.   struct btrace_thread_info *btinfo;

  1412.   btinfo = &tp->btrace;

  1413.   if (it == NULL || it->function == NULL)
  1414.     record_btrace_stop_replaying (tp);
  1415.   else
  1416.     {
  1417.       if (btinfo->replay == NULL)
  1418.         record_btrace_start_replaying (tp);
  1419.       else if (btrace_insn_cmp (btinfo->replay, it) == 0)
  1420.         return;

  1421.       *btinfo->replay = *it;
  1422.       registers_changed_ptid (tp->ptid);
  1423.     }

  1424.   /* Start anew from the new replay position.  */
  1425.   record_btrace_clear_histories (btinfo);
  1426. }

  1427. /* The to_goto_record_begin method of target record-btrace.  */

  1428. static void
  1429. record_btrace_goto_begin (struct target_ops *self)
  1430. {
  1431.   struct thread_info *tp;
  1432.   struct btrace_insn_iterator begin;

  1433.   tp = require_btrace_thread ();

  1434.   btrace_insn_begin (&begin, &tp->btrace);
  1435.   record_btrace_set_replay (tp, &begin);

  1436.   print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
  1437. }

  1438. /* The to_goto_record_end method of target record-btrace.  */

  1439. static void
  1440. record_btrace_goto_end (struct target_ops *ops)
  1441. {
  1442.   struct thread_info *tp;

  1443.   tp = require_btrace_thread ();

  1444.   record_btrace_set_replay (tp, NULL);

  1445.   print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
  1446. }

  1447. /* The to_goto_record method of target record-btrace.  */

  1448. static void
  1449. record_btrace_goto (struct target_ops *self, ULONGEST insn)
  1450. {
  1451.   struct thread_info *tp;
  1452.   struct btrace_insn_iterator it;
  1453.   unsigned int number;
  1454.   int found;

  1455.   number = insn;

  1456.   /* Check for wrap-arounds.  */
  1457.   if (number != insn)
  1458.     error (_("Instruction number out of range."));

  1459.   tp = require_btrace_thread ();

  1460.   found = btrace_find_insn_by_number (&it, &tp->btrace, number);
  1461.   if (found == 0)
  1462.     error (_("No such instruction."));

  1463.   record_btrace_set_replay (tp, &it);

  1464.   print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
  1465. }

  1466. /* The to_execution_direction target method.  */

  1467. static enum exec_direction_kind
  1468. record_btrace_execution_direction (struct target_ops *self)
  1469. {
  1470.   return record_btrace_resume_exec_dir;
  1471. }

  1472. /* The to_prepare_to_generate_core target method.  */

  1473. static void
  1474. record_btrace_prepare_to_generate_core (struct target_ops *self)
  1475. {
  1476.   record_btrace_generating_corefile = 1;
  1477. }

  1478. /* The to_done_generating_core target method.  */

  1479. static void
  1480. record_btrace_done_generating_core (struct target_ops *self)
  1481. {
  1482.   record_btrace_generating_corefile = 0;
  1483. }

  1484. /* Initialize the record-btrace target ops.  */

  1485. static void
  1486. init_record_btrace_ops (void)
  1487. {
  1488.   struct target_ops *ops;

  1489.   ops = &record_btrace_ops;
  1490.   ops->to_shortname = "record-btrace";
  1491.   ops->to_longname = "Branch tracing target";
  1492.   ops->to_doc = "Collect control-flow trace and provide the execution history.";
  1493.   ops->to_open = record_btrace_open;
  1494.   ops->to_close = record_btrace_close;
  1495.   ops->to_detach = record_detach;
  1496.   ops->to_disconnect = record_disconnect;
  1497.   ops->to_mourn_inferior = record_mourn_inferior;
  1498.   ops->to_kill = record_kill;
  1499.   ops->to_stop_recording = record_btrace_stop_recording;
  1500.   ops->to_info_record = record_btrace_info;
  1501.   ops->to_insn_history = record_btrace_insn_history;
  1502.   ops->to_insn_history_from = record_btrace_insn_history_from;
  1503.   ops->to_insn_history_range = record_btrace_insn_history_range;
  1504.   ops->to_call_history = record_btrace_call_history;
  1505.   ops->to_call_history_from = record_btrace_call_history_from;
  1506.   ops->to_call_history_range = record_btrace_call_history_range;
  1507.   ops->to_record_is_replaying = record_btrace_is_replaying;
  1508.   ops->to_xfer_partial = record_btrace_xfer_partial;
  1509.   ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
  1510.   ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
  1511.   ops->to_fetch_registers = record_btrace_fetch_registers;
  1512.   ops->to_store_registers = record_btrace_store_registers;
  1513.   ops->to_prepare_to_store = record_btrace_prepare_to_store;
  1514.   ops->to_get_unwinder = &record_btrace_to_get_unwinder;
  1515.   ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
  1516.   ops->to_resume = record_btrace_resume;
  1517.   ops->to_wait = record_btrace_wait;
  1518.   ops->to_update_thread_list = record_btrace_update_thread_list;
  1519.   ops->to_thread_alive = record_btrace_thread_alive;
  1520.   ops->to_goto_record_begin = record_btrace_goto_begin;
  1521.   ops->to_goto_record_end = record_btrace_goto_end;
  1522.   ops->to_goto_record = record_btrace_goto;
  1523.   ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
  1524.   ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
  1525.   ops->to_execution_direction = record_btrace_execution_direction;
  1526.   ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
  1527.   ops->to_done_generating_core = record_btrace_done_generating_core;
  1528.   ops->to_stratum = record_stratum;
  1529.   ops->to_magic = OPS_MAGIC;
  1530. }

  1531. /* Alias for "target record".  */

  1532. static void
  1533. cmd_record_btrace_start (char *args, int from_tty)
  1534. {
  1535.   if (args != NULL && *args != 0)
  1536.     error (_("Invalid argument."));

  1537.   execute_command ("target record-btrace", from_tty);
  1538. }

  1539. /* The "set record btrace" command.  */

  1540. static void
  1541. cmd_set_record_btrace (char *args, int from_tty)
  1542. {
  1543.   cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
  1544. }

  1545. /* The "show record btrace" command.  */

  1546. static void
  1547. cmd_show_record_btrace (char *args, int from_tty)
  1548. {
  1549.   cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
  1550. }

  1551. /* The "show record btrace replay-memory-access" command.  */

  1552. static void
  1553. cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
  1554.                                struct cmd_list_element *c, const char *value)
  1555. {
  1556.   fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
  1557.                     replay_memory_access);
  1558. }

  1559. void _initialize_record_btrace (void);

  1560. /* Initialize btrace commands.  */

  1561. void
  1562. _initialize_record_btrace (void)
  1563. {
  1564.   add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
  1565.            _("Start branch trace recording."),
  1566.            &record_cmdlist);
  1567.   add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);

  1568.   add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
  1569.                   _("Set record options"), &set_record_btrace_cmdlist,
  1570.                   "set record btrace ", 0, &set_record_cmdlist);

  1571.   add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
  1572.                   _("Show record options"), &show_record_btrace_cmdlist,
  1573.                   "show record btrace ", 0, &show_record_cmdlist);

  1574.   add_setshow_enum_cmd ("replay-memory-access", no_class,
  1575.                         replay_memory_access_types, &replay_memory_access, _("\
  1576. Set what memory accesses are allowed during replay."), _("\
  1577. Show what memory accesses are allowed during replay."),
  1578.                            _("Default is READ-ONLY.\n\n\
  1579. The btrace record target does not trace data.\n\
  1580. The memory therefore corresponds to the live target and not \
  1581. to the current replay position.\n\n\
  1582. When READ-ONLY, allow accesses to read-only memory during replay.\n\
  1583. When READ-WRITE, allow accesses to read-only and read-write memory during \
  1584. replay."),
  1585.                            NULL, cmd_show_replay_memory_access,
  1586.                            &set_record_btrace_cmdlist,
  1587.                            &show_record_btrace_cmdlist);

  1588.   init_record_btrace_ops ();
  1589.   add_target (&record_btrace_ops);

  1590.   bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
  1591.                                xcalloc, xfree);
  1592. }