gdb/btrace.c - gdb

Global variables defined

Functions defined

Macros defined

Source code

  1. /* Branch trace support for GDB, the GNU debugger.

  2.    Copyright (C) 2013-2015 Free Software Foundation, Inc.

  3.    Contributed by Intel Corp. <markus.t.metzger@intel.com>

  4.    This file is part of GDB.

  5.    This program is free software; you can redistribute it and/or modify
  6.    it under the terms of the GNU General Public License as published by
  7.    the Free Software Foundation; either version 3 of the License, or
  8.    (at your option) any later version.

  9.    This program is distributed in the hope that it will be useful,
  10.    but WITHOUT ANY WARRANTY; without even the implied warranty of
  11.    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12.    GNU General Public License for more details.

  13.    You should have received a copy of the GNU General Public License
  14.    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */

  15. #include "defs.h"
  16. #include "btrace.h"
  17. #include "gdbthread.h"
  18. #include "inferior.h"
  19. #include "target.h"
  20. #include "record.h"
  21. #include "symtab.h"
  22. #include "disasm.h"
  23. #include "source.h"
  24. #include "filenames.h"
  25. #include "xml-support.h"
  26. #include "regcache.h"

  27. /* Print a record debug message.  Use do ... while (0) to avoid ambiguities
  28.    when used in if statements.  */

  29. #define DEBUG(msg, args...)                                                \
  30.   do                                                                        \
  31.     {                                                                        \
  32.       if (record_debug != 0)                                                \
  33.         fprintf_unfiltered (gdb_stdlog,                                        \
  34.                             "[btrace] " msg "\n", ##args);                \
  35.     }                                                                        \
  36.   while (0)

  37. #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)

  38. /* Return the function name of a recorded function segment for printing.
  39.    This function never returns NULL.  */

  40. static const char *
  41. ftrace_print_function_name (const struct btrace_function *bfun)
  42. {
  43.   struct minimal_symbol *msym;
  44.   struct symbol *sym;

  45.   msym = bfun->msym;
  46.   sym = bfun->sym;

  47.   if (sym != NULL)
  48.     return SYMBOL_PRINT_NAME (sym);

  49.   if (msym != NULL)
  50.     return MSYMBOL_PRINT_NAME (msym);

  51.   return "<unknown>";
  52. }

  53. /* Return the file name of a recorded function segment for printing.
  54.    This function never returns NULL.  */

  55. static const char *
  56. ftrace_print_filename (const struct btrace_function *bfun)
  57. {
  58.   struct symbol *sym;
  59.   const char *filename;

  60.   sym = bfun->sym;

  61.   if (sym != NULL)
  62.     filename = symtab_to_filename_for_display (symbol_symtab (sym));
  63.   else
  64.     filename = "<unknown>";

  65.   return filename;
  66. }

  67. /* Return a string representation of the address of an instruction.
  68.    This function never returns NULL.  */

  69. static const char *
  70. ftrace_print_insn_addr (const struct btrace_insn *insn)
  71. {
  72.   if (insn == NULL)
  73.     return "<nil>";

  74.   return core_addr_to_string_nz (insn->pc);
  75. }

  76. /* Print an ftrace debug status message.  */

  77. static void
  78. ftrace_debug (const struct btrace_function *bfun, const char *prefix)
  79. {
  80.   const char *fun, *file;
  81.   unsigned int ibegin, iend;
  82.   int lbegin, lend, level;

  83.   fun = ftrace_print_function_name (bfun);
  84.   file = ftrace_print_filename (bfun);
  85.   level = bfun->level;

  86.   lbegin = bfun->lbegin;
  87.   lend = bfun->lend;

  88.   ibegin = bfun->insn_offset;
  89.   iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);

  90.   DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
  91.                 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
  92.                 ibegin, iend);
  93. }

  94. /* Return non-zero if BFUN does not match MFUN and FUN,
  95.    return zero otherwise.  */

  96. static int
  97. ftrace_function_switched (const struct btrace_function *bfun,
  98.                           const struct minimal_symbol *mfun,
  99.                           const struct symbol *fun)
  100. {
  101.   struct minimal_symbol *msym;
  102.   struct symbol *sym;

  103.   msym = bfun->msym;
  104.   sym = bfun->sym;

  105.   /* If the minimal symbol changed, we certainly switched functions.  */
  106.   if (mfun != NULL && msym != NULL
  107.       && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
  108.     return 1;

  109.   /* If the symbol changed, we certainly switched functions.  */
  110.   if (fun != NULL && sym != NULL)
  111.     {
  112.       const char *bfname, *fname;

  113.       /* Check the function name.  */
  114.       if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
  115.         return 1;

  116.       /* Check the location of those functions, as well.  */
  117.       bfname = symtab_to_fullname (symbol_symtab (sym));
  118.       fname = symtab_to_fullname (symbol_symtab (fun));
  119.       if (filename_cmp (fname, bfname) != 0)
  120.         return 1;
  121.     }

  122.   /* If we lost symbol information, we switched functions.  */
  123.   if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
  124.     return 1;

  125.   /* If we gained symbol information, we switched functions.  */
  126.   if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
  127.     return 1;

  128.   return 0;
  129. }

  130. /* Return non-zero if we should skip this file when generating the function
  131.    call history, zero otherwise.
  132.    We would want to do that if, say, a macro that is defined in another file
  133.    is expanded in this function.  */

  134. static int
  135. ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
  136. {
  137.   struct symbol *sym;
  138.   const char *bfile;

  139.   sym = bfun->sym;
  140.   if (sym == NULL)
  141.     return 1;

  142.   bfile = symtab_to_fullname (symbol_symtab (sym));

  143.   return (filename_cmp (bfile, fullname) != 0);
  144. }

  145. /* Allocate and initialize a new branch trace function segment.
  146.    PREV is the chronologically preceding function segment.
  147.    MFUN and FUN are the symbol information we have for this function.  */

  148. static struct btrace_function *
  149. ftrace_new_function (struct btrace_function *prev,
  150.                      struct minimal_symbol *mfun,
  151.                      struct symbol *fun)
  152. {
  153.   struct btrace_function *bfun;

  154.   bfun = xzalloc (sizeof (*bfun));

  155.   bfun->msym = mfun;
  156.   bfun->sym = fun;
  157.   bfun->flow.prev = prev;

  158.   /* We start with the identities of min and max, respectively.  */
  159.   bfun->lbegin = INT_MAX;
  160.   bfun->lend = INT_MIN;

  161.   if (prev == NULL)
  162.     {
  163.       /* Start counting at one.  */
  164.       bfun->number = 1;
  165.       bfun->insn_offset = 1;
  166.     }
  167.   else
  168.     {
  169.       gdb_assert (prev->flow.next == NULL);
  170.       prev->flow.next = bfun;

  171.       bfun->number = prev->number + 1;
  172.       bfun->insn_offset = (prev->insn_offset
  173.                            + VEC_length (btrace_insn_s, prev->insn));
  174.     }

  175.   return bfun;
  176. }

  177. /* Update the UP field of a function segment.  */

  178. static void
  179. ftrace_update_caller (struct btrace_function *bfun,
  180.                       struct btrace_function *caller,
  181.                       enum btrace_function_flag flags)
  182. {
  183.   if (bfun->up != NULL)
  184.     ftrace_debug (bfun, "updating caller");

  185.   bfun->up = caller;
  186.   bfun->flags = flags;

  187.   ftrace_debug (bfun, "set caller");
  188. }

  189. /* Fix up the caller for all segments of a function.  */

  190. static void
  191. ftrace_fixup_caller (struct btrace_function *bfun,
  192.                      struct btrace_function *caller,
  193.                      enum btrace_function_flag flags)
  194. {
  195.   struct btrace_function *prev, *next;

  196.   ftrace_update_caller (bfun, caller, flags);

  197.   /* Update all function segments belonging to the same function.  */
  198.   for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
  199.     ftrace_update_caller (prev, caller, flags);

  200.   for (next = bfun->segment.next; next != NULL; next = next->segment.next)
  201.     ftrace_update_caller (next, caller, flags);
  202. }

  203. /* Add a new function segment for a call.
  204.    CALLER is the chronologically preceding function segment.
  205.    MFUN and FUN are the symbol information we have for this function.  */

  206. static struct btrace_function *
  207. ftrace_new_call (struct btrace_function *caller,
  208.                  struct minimal_symbol *mfun,
  209.                  struct symbol *fun)
  210. {
  211.   struct btrace_function *bfun;

  212.   bfun = ftrace_new_function (caller, mfun, fun);
  213.   bfun->up = caller;
  214.   bfun->level = caller->level + 1;

  215.   ftrace_debug (bfun, "new call");

  216.   return bfun;
  217. }

  218. /* Add a new function segment for a tail call.
  219.    CALLER is the chronologically preceding function segment.
  220.    MFUN and FUN are the symbol information we have for this function.  */

  221. static struct btrace_function *
  222. ftrace_new_tailcall (struct btrace_function *caller,
  223.                      struct minimal_symbol *mfun,
  224.                      struct symbol *fun)
  225. {
  226.   struct btrace_function *bfun;

  227.   bfun = ftrace_new_function (caller, mfun, fun);
  228.   bfun->up = caller;
  229.   bfun->level = caller->level + 1;
  230.   bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;

  231.   ftrace_debug (bfun, "new tail call");

  232.   return bfun;
  233. }

  234. /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
  235.    symbol information.  */

  236. static struct btrace_function *
  237. ftrace_find_caller (struct btrace_function *bfun,
  238.                     struct minimal_symbol *mfun,
  239.                     struct symbol *fun)
  240. {
  241.   for (; bfun != NULL; bfun = bfun->up)
  242.     {
  243.       /* Skip functions with incompatible symbol information.  */
  244.       if (ftrace_function_switched (bfun, mfun, fun))
  245.         continue;

  246.       /* This is the function segment we're looking for.  */
  247.       break;
  248.     }

  249.   return bfun;
  250. }

  251. /* Find the innermost caller in the back trace of BFUN, skipping all
  252.    function segments that do not end with a call instruction (e.g.
  253.    tail calls ending with a jump).  */

  254. static struct btrace_function *
  255. ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
  256. {
  257.   for (; bfun != NULL; bfun = bfun->up)
  258.     {
  259.       struct btrace_insn *last;
  260.       CORE_ADDR pc;

  261.       /* We do not allow empty function segments.  */
  262.       gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));

  263.       last = VEC_last (btrace_insn_s, bfun->insn);
  264.       pc = last->pc;

  265.       if (gdbarch_insn_is_call (gdbarch, pc))
  266.         break;
  267.     }

  268.   return bfun;
  269. }

  270. /* Add a continuation segment for a function into which we return.
  271.    PREV is the chronologically preceding function segment.
  272.    MFUN and FUN are the symbol information we have for this function.  */

  273. static struct btrace_function *
  274. ftrace_new_return (struct gdbarch *gdbarch,
  275.                    struct btrace_function *prev,
  276.                    struct minimal_symbol *mfun,
  277.                    struct symbol *fun)
  278. {
  279.   struct btrace_function *bfun, *caller;

  280.   bfun = ftrace_new_function (prev, mfun, fun);

  281.   /* It is important to start at PREV's caller.  Otherwise, we might find
  282.      PREV itself, if PREV is a recursive function.  */
  283.   caller = ftrace_find_caller (prev->up, mfun, fun);
  284.   if (caller != NULL)
  285.     {
  286.       /* The caller of PREV is the preceding btrace function segment in this
  287.          function instance.  */
  288.       gdb_assert (caller->segment.next == NULL);

  289.       caller->segment.next = bfun;
  290.       bfun->segment.prev = caller;

  291.       /* Maintain the function level.  */
  292.       bfun->level = caller->level;

  293.       /* Maintain the call stack.  */
  294.       bfun->up = caller->up;
  295.       bfun->flags = caller->flags;

  296.       ftrace_debug (bfun, "new return");
  297.     }
  298.   else
  299.     {
  300.       /* We did not find a caller.  This could mean that something went
  301.          wrong or that the call is simply not included in the trace.  */

  302.       /* Let's search for some actual call.  */
  303.       caller = ftrace_find_call (gdbarch, prev->up);
  304.       if (caller == NULL)
  305.         {
  306.           /* There is no call in PREV's back trace.  We assume that the
  307.              branch trace did not include it.  */

  308.           /* Let's find the topmost call function - this skips tail calls.  */
  309.           while (prev->up != NULL)
  310.             prev = prev->up;

  311.           /* We maintain levels for a series of returns for which we have
  312.              not seen the calls.
  313.              We start at the preceding function's level in case this has
  314.              already been a return for which we have not seen the call.
  315.              We start at level 0 otherwise, to handle tail calls correctly.  */
  316.           bfun->level = min (0, prev->level) - 1;

  317.           /* Fix up the call stack for PREV.  */
  318.           ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);

  319.           ftrace_debug (bfun, "new return - no caller");
  320.         }
  321.       else
  322.         {
  323.           /* There is a call in PREV's back trace to which we should have
  324.              returned.  Let's remain at this level.  */
  325.           bfun->level = prev->level;

  326.           ftrace_debug (bfun, "new return - unknown caller");
  327.         }
  328.     }

  329.   return bfun;
  330. }

  331. /* Add a new function segment for a function switch.
  332.    PREV is the chronologically preceding function segment.
  333.    MFUN and FUN are the symbol information we have for this function.  */

  334. static struct btrace_function *
  335. ftrace_new_switch (struct btrace_function *prev,
  336.                    struct minimal_symbol *mfun,
  337.                    struct symbol *fun)
  338. {
  339.   struct btrace_function *bfun;

  340.   /* This is an unexplained function switch.  The call stack will likely
  341.      be wrong at this point.  */
  342.   bfun = ftrace_new_function (prev, mfun, fun);

  343.   /* We keep the function level.  */
  344.   bfun->level = prev->level;

  345.   ftrace_debug (bfun, "new switch");

  346.   return bfun;
  347. }

  348. /* Update BFUN with respect to the instruction at PC.  This may create new
  349.    function segments.
  350.    Return the chronologically latest function segment, never NULL.  */

  351. static struct btrace_function *
  352. ftrace_update_function (struct gdbarch *gdbarch,
  353.                         struct btrace_function *bfun, CORE_ADDR pc)
  354. {
  355.   struct bound_minimal_symbol bmfun;
  356.   struct minimal_symbol *mfun;
  357.   struct symbol *fun;
  358.   struct btrace_insn *last;

  359.   /* Try to determine the function we're in.  We use both types of symbols
  360.      to avoid surprises when we sometimes get a full symbol and sometimes
  361.      only a minimal symbol.  */
  362.   fun = find_pc_function (pc);
  363.   bmfun = lookup_minimal_symbol_by_pc (pc);
  364.   mfun = bmfun.minsym;

  365.   if (fun == NULL && mfun == NULL)
  366.     DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));

  367.   /* If we didn't have a function before, we create one.  */
  368.   if (bfun == NULL)
  369.     return ftrace_new_function (bfun, mfun, fun);

  370.   /* Check the last instruction, if we have one.
  371.      We do this check first, since it allows us to fill in the call stack
  372.      links in addition to the normal flow links.  */
  373.   last = NULL;
  374.   if (!VEC_empty (btrace_insn_s, bfun->insn))
  375.     last = VEC_last (btrace_insn_s, bfun->insn);

  376.   if (last != NULL)
  377.     {
  378.       CORE_ADDR lpc;

  379.       lpc = last->pc;

  380.       /* Check for returns.  */
  381.       if (gdbarch_insn_is_ret (gdbarch, lpc))
  382.         return ftrace_new_return (gdbarch, bfun, mfun, fun);

  383.       /* Check for calls.  */
  384.       if (gdbarch_insn_is_call (gdbarch, lpc))
  385.         {
  386.           int size;

  387.           size = gdb_insn_length (gdbarch, lpc);

  388.           /* Ignore calls to the next instruction.  They are used for PIC.  */
  389.           if (lpc + size != pc)
  390.             return ftrace_new_call (bfun, mfun, fun);
  391.         }
  392.     }

  393.   /* Check if we're switching functions for some other reason.  */
  394.   if (ftrace_function_switched (bfun, mfun, fun))
  395.     {
  396.       DEBUG_FTRACE ("switching from %s in %s at %s",
  397.                     ftrace_print_insn_addr (last),
  398.                     ftrace_print_function_name (bfun),
  399.                     ftrace_print_filename (bfun));

  400.       if (last != NULL)
  401.         {
  402.           CORE_ADDR start, lpc;

  403.           start = get_pc_function_start (pc);

  404.           /* If we can't determine the function for PC, we treat a jump at
  405.              the end of the block as tail call.  */
  406.           if (start == 0)
  407.             start = pc;

  408.           lpc = last->pc;

  409.           /* Jumps indicate optimized tail calls.  */
  410.           if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
  411.             return ftrace_new_tailcall (bfun, mfun, fun);
  412.         }

  413.       return ftrace_new_switch (bfun, mfun, fun);
  414.     }

  415.   return bfun;
  416. }

  417. /* Update BFUN's source range with respect to the instruction at PC.  */

  418. static void
  419. ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
  420. {
  421.   struct symtab_and_line sal;
  422.   const char *fullname;

  423.   sal = find_pc_line (pc, 0);
  424.   if (sal.symtab == NULL || sal.line == 0)
  425.     {
  426.       DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
  427.       return;
  428.     }

  429.   /* Check if we switched files.  This could happen if, say, a macro that
  430.      is defined in another file is expanded here.  */
  431.   fullname = symtab_to_fullname (sal.symtab);
  432.   if (ftrace_skip_file (bfun, fullname))
  433.     {
  434.       DEBUG_FTRACE ("ignoring file at %s, file=%s",
  435.                     core_addr_to_string_nz (pc), fullname);
  436.       return;
  437.     }

  438.   /* Update the line range.  */
  439.   bfun->lbegin = min (bfun->lbegin, sal.line);
  440.   bfun->lend = max (bfun->lend, sal.line);

  441.   if (record_debug > 1)
  442.     ftrace_debug (bfun, "update lines");
  443. }

  444. /* Add the instruction at PC to BFUN's instructions.  */

  445. static void
  446. ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
  447. {
  448.   struct btrace_insn *insn;

  449.   insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
  450.   insn->pc = pc;

  451.   if (record_debug > 1)
  452.     ftrace_debug (bfun, "update insn");
  453. }

  454. /* Compute the function branch trace from a block branch trace BTRACE for
  455.    a thread given by BTINFO.  */

  456. static void
  457. btrace_compute_ftrace (struct btrace_thread_info *btinfo,
  458.                        VEC (btrace_block_s) *btrace)
  459. {
  460.   struct btrace_function *begin, *end;
  461.   struct gdbarch *gdbarch;
  462.   unsigned int blk;
  463.   int level;

  464.   DEBUG ("compute ftrace");

  465.   gdbarch = target_gdbarch ();
  466.   begin = btinfo->begin;
  467.   end = btinfo->end;
  468.   level = begin != NULL ? -btinfo->level : INT_MAX;
  469.   blk = VEC_length (btrace_block_s, btrace);

  470.   while (blk != 0)
  471.     {
  472.       btrace_block_s *block;
  473.       CORE_ADDR pc;

  474.       blk -= 1;

  475.       block = VEC_index (btrace_block_s, btrace, blk);
  476.       pc = block->begin;

  477.       for (;;)
  478.         {
  479.           int size;

  480.           /* We should hit the end of the block.  Warn if we went too far.  */
  481.           if (block->end < pc)
  482.             {
  483.               warning (_("Recorded trace may be corrupted around %s."),
  484.                        core_addr_to_string_nz (pc));
  485.               break;
  486.             }

  487.           end = ftrace_update_function (gdbarch, end, pc);
  488.           if (begin == NULL)
  489.             begin = end;

  490.           /* Maintain the function level offset.
  491.              For all but the last block, we do it here.  */
  492.           if (blk != 0)
  493.             level = min (level, end->level);

  494.           ftrace_update_insns (end, pc);
  495.           ftrace_update_lines (end, pc);

  496.           /* We're done once we pushed the instruction at the end.  */
  497.           if (block->end == pc)
  498.             break;

  499.           size = gdb_insn_length (gdbarch, pc);

  500.           /* Make sure we terminate if we fail to compute the size.  */
  501.           if (size <= 0)
  502.             {
  503.               warning (_("Recorded trace may be incomplete around %s."),
  504.                        core_addr_to_string_nz (pc));
  505.               break;
  506.             }

  507.           pc += size;

  508.           /* Maintain the function level offset.
  509.              For the last block, we do it here to not consider the last
  510.              instruction.
  511.              Since the last instruction corresponds to the current instruction
  512.              and is not really part of the execution history, it shouldn't
  513.              affect the level.  */
  514.           if (blk == 0)
  515.             level = min (level, end->level);
  516.         }
  517.     }

  518.   btinfo->begin = begin;
  519.   btinfo->end = end;

  520.   /* LEVEL is the minimal function level of all btrace function segments.
  521.      Define the global level offset to -LEVEL so all function levels are
  522.      normalized to start at zero.  */
  523.   btinfo->level = -level;
  524. }

  525. /* Add an entry for the current PC.  */

  526. static void
  527. btrace_add_pc (struct thread_info *tp)
  528. {
  529.   VEC (btrace_block_s) *btrace;
  530.   struct btrace_block *block;
  531.   struct regcache *regcache;
  532.   struct cleanup *cleanup;
  533.   CORE_ADDR pc;

  534.   regcache = get_thread_regcache (tp->ptid);
  535.   pc = regcache_read_pc (regcache);

  536.   btrace = NULL;
  537.   cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);

  538.   block = VEC_safe_push (btrace_block_s, btrace, NULL);
  539.   block->begin = pc;
  540.   block->end = pc;

  541.   btrace_compute_ftrace (&tp->btrace, btrace);

  542.   do_cleanups (cleanup);
  543. }

  544. /* See btrace.h.  */

  545. void
  546. btrace_enable (struct thread_info *tp)
  547. {
  548.   if (tp->btrace.target != NULL)
  549.     return;

  550.   if (!target_supports_btrace ())
  551.     error (_("Target does not support branch tracing."));

  552.   DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));

  553.   tp->btrace.target = target_enable_btrace (tp->ptid);

  554.   /* Add an entry for the current PC so we start tracing from where we
  555.      enabled it.  */
  556.   if (tp->btrace.target != NULL)
  557.     btrace_add_pc (tp);
  558. }

  559. /* See btrace.h.  */

  560. void
  561. btrace_disable (struct thread_info *tp)
  562. {
  563.   struct btrace_thread_info *btp = &tp->btrace;
  564.   int errcode = 0;

  565.   if (btp->target == NULL)
  566.     return;

  567.   DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));

  568.   target_disable_btrace (btp->target);
  569.   btp->target = NULL;

  570.   btrace_clear (tp);
  571. }

  572. /* See btrace.h.  */

  573. void
  574. btrace_teardown (struct thread_info *tp)
  575. {
  576.   struct btrace_thread_info *btp = &tp->btrace;
  577.   int errcode = 0;

  578.   if (btp->target == NULL)
  579.     return;

  580.   DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));

  581.   target_teardown_btrace (btp->target);
  582.   btp->target = NULL;

  583.   btrace_clear (tp);
  584. }

  585. /* Adjust the block trace in order to stitch old and new trace together.
  586.    BTRACE is the new delta trace between the last and the current stop.
  587.    BTINFO is the old branch trace until the last stop.
  588.    May modify BTRACE as well as the existing trace in BTINFO.
  589.    Return 0 on success, -1 otherwise.  */

  590. static int
  591. btrace_stitch_trace (VEC (btrace_block_s) **btrace,
  592.                      const struct btrace_thread_info *btinfo)
  593. {
  594.   struct btrace_function *last_bfun;
  595.   struct btrace_insn *last_insn;
  596.   btrace_block_s *first_new_block;

  597.   /* If we don't have trace, there's nothing to do.  */
  598.   if (VEC_empty (btrace_block_s, *btrace))
  599.     return 0;

  600.   last_bfun = btinfo->end;
  601.   gdb_assert (last_bfun != NULL);

  602.   /* Beware that block trace starts with the most recent block, so the
  603.      chronologically first block in the new trace is the last block in
  604.      the new trace's block vector.  */
  605.   first_new_block = VEC_last (btrace_block_s, *btrace);
  606.   last_insn = VEC_last (btrace_insn_s, last_bfun->insn);

  607.   /* If the current PC at the end of the block is the same as in our current
  608.      trace, there are two explanations:
  609.        1. we executed the instruction and some branch brought us back.
  610.        2. we have not made any progress.
  611.      In the first case, the delta trace vector should contain at least two
  612.      entries.
  613.      In the second case, the delta trace vector should contain exactly one
  614.      entry for the partial block containing the current PC.  Remove it.  */
  615.   if (first_new_block->end == last_insn->pc
  616.       && VEC_length (btrace_block_s, *btrace) == 1)
  617.     {
  618.       VEC_pop (btrace_block_s, *btrace);
  619.       return 0;
  620.     }

  621.   DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
  622.          core_addr_to_string_nz (first_new_block->end));

  623.   /* Do a simple sanity check to make sure we don't accidentally end up
  624.      with a bad block.  This should not occur in practice.  */
  625.   if (first_new_block->end < last_insn->pc)
  626.     {
  627.       warning (_("Error while trying to read delta trace.  Falling back to "
  628.                  "a full read."));
  629.       return -1;
  630.     }

  631.   /* We adjust the last block to start at the end of our current trace.  */
  632.   gdb_assert (first_new_block->begin == 0);
  633.   first_new_block->begin = last_insn->pc;

  634.   /* We simply pop the last insn so we can insert it again as part of
  635.      the normal branch trace computation.
  636.      Since instruction iterators are based on indices in the instructions
  637.      vector, we don't leave any pointers dangling.  */
  638.   DEBUG ("pruning insn at %s for stitching",
  639.          ftrace_print_insn_addr (last_insn));

  640.   VEC_pop (btrace_insn_s, last_bfun->insn);

  641.   /* The instructions vector may become empty temporarily if this has
  642.      been the only instruction in this function segment.
  643.      This violates the invariant but will be remedied shortly by
  644.      btrace_compute_ftrace when we add the new trace.  */
  645.   return 0;
  646. }

  647. /* Clear the branch trace histories in BTINFO.  */

  648. static void
  649. btrace_clear_history (struct btrace_thread_info *btinfo)
  650. {
  651.   xfree (btinfo->insn_history);
  652.   xfree (btinfo->call_history);
  653.   xfree (btinfo->replay);

  654.   btinfo->insn_history = NULL;
  655.   btinfo->call_history = NULL;
  656.   btinfo->replay = NULL;
  657. }

  658. /* See btrace.h.  */

  659. void
  660. btrace_fetch (struct thread_info *tp)
  661. {
  662.   struct btrace_thread_info *btinfo;
  663.   struct btrace_target_info *tinfo;
  664.   VEC (btrace_block_s) *btrace;
  665.   struct cleanup *cleanup;
  666.   int errcode;

  667.   DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));

  668.   btrace = NULL;
  669.   btinfo = &tp->btrace;
  670.   tinfo = btinfo->target;
  671.   if (tinfo == NULL)
  672.     return;

  673.   /* There's no way we could get new trace while replaying.
  674.      On the other hand, delta trace would return a partial record with the
  675.      current PC, which is the replay PC, not the last PC, as expected.  */
  676.   if (btinfo->replay != NULL)
  677.     return;

  678.   cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);

  679.   /* Let's first try to extend the trace we already have.  */
  680.   if (btinfo->end != NULL)
  681.     {
  682.       errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
  683.       if (errcode == 0)
  684.         {
  685.           /* Success.  Let's try to stitch the traces together.  */
  686.           errcode = btrace_stitch_trace (&btrace, btinfo);
  687.         }
  688.       else
  689.         {
  690.           /* We failed to read delta trace.  Let's try to read new trace.  */
  691.           errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);

  692.           /* If we got any new trace, discard what we have.  */
  693.           if (errcode == 0 && !VEC_empty (btrace_block_s, btrace))
  694.             btrace_clear (tp);
  695.         }

  696.       /* If we were not able to read the trace, we start over.  */
  697.       if (errcode != 0)
  698.         {
  699.           btrace_clear (tp);
  700.           errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
  701.         }
  702.     }
  703.   else
  704.     errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);

  705.   /* If we were not able to read the branch trace, signal an error.  */
  706.   if (errcode != 0)
  707.     error (_("Failed to read branch trace."));

  708.   /* Compute the trace, provided we have any.  */
  709.   if (!VEC_empty (btrace_block_s, btrace))
  710.     {
  711.       btrace_clear_history (btinfo);
  712.       btrace_compute_ftrace (btinfo, btrace);
  713.     }

  714.   do_cleanups (cleanup);
  715. }

  716. /* See btrace.h.  */

  717. void
  718. btrace_clear (struct thread_info *tp)
  719. {
  720.   struct btrace_thread_info *btinfo;
  721.   struct btrace_function *it, *trash;

  722.   DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));

  723.   /* Make sure btrace frames that may hold a pointer into the branch
  724.      trace data are destroyed.  */
  725.   reinit_frame_cache ();

  726.   btinfo = &tp->btrace;

  727.   it = btinfo->begin;
  728.   while (it != NULL)
  729.     {
  730.       trash = it;
  731.       it = it->flow.next;

  732.       xfree (trash);
  733.     }

  734.   btinfo->begin = NULL;
  735.   btinfo->end = NULL;

  736.   btrace_clear_history (btinfo);
  737. }

  738. /* See btrace.h.  */

  739. void
  740. btrace_free_objfile (struct objfile *objfile)
  741. {
  742.   struct thread_info *tp;

  743.   DEBUG ("free objfile");

  744.   ALL_NON_EXITED_THREADS (tp)
  745.     btrace_clear (tp);
  746. }

  747. #if defined (HAVE_LIBEXPAT)

  748. /* Check the btrace document version.  */

  749. static void
  750. check_xml_btrace_version (struct gdb_xml_parser *parser,
  751.                           const struct gdb_xml_element *element,
  752.                           void *user_data, VEC (gdb_xml_value_s) *attributes)
  753. {
  754.   const char *version = xml_find_attribute (attributes, "version")->value;

  755.   if (strcmp (version, "1.0") != 0)
  756.     gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
  757. }

  758. /* Parse a btrace "block" xml record.  */

  759. static void
  760. parse_xml_btrace_block (struct gdb_xml_parser *parser,
  761.                         const struct gdb_xml_element *element,
  762.                         void *user_data, VEC (gdb_xml_value_s) *attributes)
  763. {
  764.   VEC (btrace_block_s) **btrace;
  765.   struct btrace_block *block;
  766.   ULONGEST *begin, *end;

  767.   btrace = user_data;
  768.   block = VEC_safe_push (btrace_block_s, *btrace, NULL);

  769.   begin = xml_find_attribute (attributes, "begin")->value;
  770.   end = xml_find_attribute (attributes, "end")->value;

  771.   block->begin = *begin;
  772.   block->end = *end;
  773. }

  774. static const struct gdb_xml_attribute block_attributes[] = {
  775.   { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
  776.   { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
  777.   { NULL, GDB_XML_AF_NONE, NULL, NULL }
  778. };

  779. static const struct gdb_xml_attribute btrace_attributes[] = {
  780.   { "version", GDB_XML_AF_NONE, NULL, NULL },
  781.   { NULL, GDB_XML_AF_NONE, NULL, NULL }
  782. };

  783. static const struct gdb_xml_element btrace_children[] = {
  784.   { "block", block_attributes, NULL,
  785.     GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
  786.   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
  787. };

  788. static const struct gdb_xml_element btrace_elements[] = {
  789.   { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
  790.     check_xml_btrace_version, NULL },
  791.   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
  792. };

  793. #endif /* defined (HAVE_LIBEXPAT) */

  794. /* See btrace.h.  */

  795. VEC (btrace_block_s) *
  796. parse_xml_btrace (const char *buffer)
  797. {
  798.   VEC (btrace_block_s) *btrace = NULL;
  799.   struct cleanup *cleanup;
  800.   int errcode;

  801. #if defined (HAVE_LIBEXPAT)

  802.   cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
  803.   errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
  804.                                  buffer, &btrace);
  805.   if (errcode != 0)
  806.     error (_("Error parsing branch trace."));

  807.   /* Keep parse results.  */
  808.   discard_cleanups (cleanup);

  809. #else  /* !defined (HAVE_LIBEXPAT) */

  810.   error (_("Cannot process branch trace.  XML parsing is not supported."));

  811. #endif  /* !defined (HAVE_LIBEXPAT) */

  812.   return btrace;
  813. }

  814. /* See btrace.h.  */

  815. const struct btrace_insn *
  816. btrace_insn_get (const struct btrace_insn_iterator *it)
  817. {
  818.   const struct btrace_function *bfun;
  819.   unsigned int index, end;

  820.   index = it->index;
  821.   bfun = it->function;

  822.   /* The index is within the bounds of this function's instruction vector.  */
  823.   end = VEC_length (btrace_insn_s, bfun->insn);
  824.   gdb_assert (0 < end);
  825.   gdb_assert (index < end);

  826.   return VEC_index (btrace_insn_s, bfun->insn, index);
  827. }

  828. /* See btrace.h.  */

  829. unsigned int
  830. btrace_insn_number (const struct btrace_insn_iterator *it)
  831. {
  832.   const struct btrace_function *bfun;

  833.   bfun = it->function;
  834.   return bfun->insn_offset + it->index;
  835. }

  836. /* See btrace.h.  */

  837. void
  838. btrace_insn_begin (struct btrace_insn_iterator *it,
  839.                    const struct btrace_thread_info *btinfo)
  840. {
  841.   const struct btrace_function *bfun;

  842.   bfun = btinfo->begin;
  843.   if (bfun == NULL)
  844.     error (_("No trace."));

  845.   it->function = bfun;
  846.   it->index = 0;
  847. }

  848. /* See btrace.h.  */

  849. void
  850. btrace_insn_end (struct btrace_insn_iterator *it,
  851.                  const struct btrace_thread_info *btinfo)
  852. {
  853.   const struct btrace_function *bfun;
  854.   unsigned int length;

  855.   bfun = btinfo->end;
  856.   if (bfun == NULL)
  857.     error (_("No trace."));

  858.   /* The last instruction in the last function is the current instruction.
  859.      We point to it - it is one past the end of the execution trace.  */
  860.   length = VEC_length (btrace_insn_s, bfun->insn);

  861.   it->function = bfun;
  862.   it->index = length - 1;
  863. }

  864. /* See btrace.h.  */

  865. unsigned int
  866. btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
  867. {
  868.   const struct btrace_function *bfun;
  869.   unsigned int index, steps;

  870.   bfun = it->function;
  871.   steps = 0;
  872.   index = it->index;

  873.   while (stride != 0)
  874.     {
  875.       unsigned int end, space, adv;

  876.       end = VEC_length (btrace_insn_s, bfun->insn);

  877.       gdb_assert (0 < end);
  878.       gdb_assert (index < end);

  879.       /* Compute the number of instructions remaining in this segment.  */
  880.       space = end - index;

  881.       /* Advance the iterator as far as possible within this segment.  */
  882.       adv = min (space, stride);
  883.       stride -= adv;
  884.       index += adv;
  885.       steps += adv;

  886.       /* Move to the next function if we're at the end of this one.  */
  887.       if (index == end)
  888.         {
  889.           const struct btrace_function *next;

  890.           next = bfun->flow.next;
  891.           if (next == NULL)
  892.             {
  893.               /* We stepped past the last function.

  894.                  Let's adjust the index to point to the last instruction in
  895.                  the previous function.  */
  896.               index -= 1;
  897.               steps -= 1;
  898.               break;
  899.             }

  900.           /* We now point to the first instruction in the new function.  */
  901.           bfun = next;
  902.           index = 0;
  903.         }

  904.       /* We did make progress.  */
  905.       gdb_assert (adv > 0);
  906.     }

  907.   /* Update the iterator.  */
  908.   it->function = bfun;
  909.   it->index = index;

  910.   return steps;
  911. }

  912. /* See btrace.h.  */

  913. unsigned int
  914. btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
  915. {
  916.   const struct btrace_function *bfun;
  917.   unsigned int index, steps;

  918.   bfun = it->function;
  919.   steps = 0;
  920.   index = it->index;

  921.   while (stride != 0)
  922.     {
  923.       unsigned int adv;

  924.       /* Move to the previous function if we're at the start of this one.  */
  925.       if (index == 0)
  926.         {
  927.           const struct btrace_function *prev;

  928.           prev = bfun->flow.prev;
  929.           if (prev == NULL)
  930.             break;

  931.           /* We point to one after the last instruction in the new function.  */
  932.           bfun = prev;
  933.           index = VEC_length (btrace_insn_s, bfun->insn);

  934.           /* There is at least one instruction in this function segment.  */
  935.           gdb_assert (index > 0);
  936.         }

  937.       /* Advance the iterator as far as possible within this segment.  */
  938.       adv = min (index, stride);
  939.       stride -= adv;
  940.       index -= adv;
  941.       steps += adv;

  942.       /* We did make progress.  */
  943.       gdb_assert (adv > 0);
  944.     }

  945.   /* Update the iterator.  */
  946.   it->function = bfun;
  947.   it->index = index;

  948.   return steps;
  949. }

  950. /* See btrace.h.  */

  951. int
  952. btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
  953.                  const struct btrace_insn_iterator *rhs)
  954. {
  955.   unsigned int lnum, rnum;

  956.   lnum = btrace_insn_number (lhs);
  957.   rnum = btrace_insn_number (rhs);

  958.   return (int) (lnum - rnum);
  959. }

  960. /* See btrace.h.  */

  961. int
  962. btrace_find_insn_by_number (struct btrace_insn_iterator *it,
  963.                             const struct btrace_thread_info *btinfo,
  964.                             unsigned int number)
  965. {
  966.   const struct btrace_function *bfun;
  967.   unsigned int end;

  968.   for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
  969.     if (bfun->insn_offset <= number)
  970.       break;

  971.   if (bfun == NULL)
  972.     return 0;

  973.   end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
  974.   if (end <= number)
  975.     return 0;

  976.   it->function = bfun;
  977.   it->index = number - bfun->insn_offset;

  978.   return 1;
  979. }

  980. /* See btrace.h.  */

  981. const struct btrace_function *
  982. btrace_call_get (const struct btrace_call_iterator *it)
  983. {
  984.   return it->function;
  985. }

  986. /* See btrace.h.  */

  987. unsigned int
  988. btrace_call_number (const struct btrace_call_iterator *it)
  989. {
  990.   const struct btrace_thread_info *btinfo;
  991.   const struct btrace_function *bfun;
  992.   unsigned int insns;

  993.   btinfo = it->btinfo;
  994.   bfun = it->function;
  995.   if (bfun != NULL)
  996.     return bfun->number;

  997.   /* For the end iterator, i.e. bfun == NULL, we return one more than the
  998.      number of the last function.  */
  999.   bfun = btinfo->end;
  1000.   insns = VEC_length (btrace_insn_s, bfun->insn);

  1001.   /* If the function contains only a single instruction (i.e. the current
  1002.      instruction), it will be skipped and its number is already the number
  1003.      we seek.  */
  1004.   if (insns == 1)
  1005.     return bfun->number;

  1006.   /* Otherwise, return one more than the number of the last function.  */
  1007.   return bfun->number + 1;
  1008. }

  1009. /* See btrace.h.  */

  1010. void
  1011. btrace_call_begin (struct btrace_call_iterator *it,
  1012.                    const struct btrace_thread_info *btinfo)
  1013. {
  1014.   const struct btrace_function *bfun;

  1015.   bfun = btinfo->begin;
  1016.   if (bfun == NULL)
  1017.     error (_("No trace."));

  1018.   it->btinfo = btinfo;
  1019.   it->function = bfun;
  1020. }

  1021. /* See btrace.h.  */

  1022. void
  1023. btrace_call_end (struct btrace_call_iterator *it,
  1024.                  const struct btrace_thread_info *btinfo)
  1025. {
  1026.   const struct btrace_function *bfun;

  1027.   bfun = btinfo->end;
  1028.   if (bfun == NULL)
  1029.     error (_("No trace."));

  1030.   it->btinfo = btinfo;
  1031.   it->function = NULL;
  1032. }

  1033. /* See btrace.h.  */

  1034. unsigned int
  1035. btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
  1036. {
  1037.   const struct btrace_function *bfun;
  1038.   unsigned int steps;

  1039.   bfun = it->function;
  1040.   steps = 0;
  1041.   while (bfun != NULL)
  1042.     {
  1043.       const struct btrace_function *next;
  1044.       unsigned int insns;

  1045.       next = bfun->flow.next;
  1046.       if (next == NULL)
  1047.         {
  1048.           /* Ignore the last function if it only contains a single
  1049.              (i.e. the current) instruction.  */
  1050.           insns = VEC_length (btrace_insn_s, bfun->insn);
  1051.           if (insns == 1)
  1052.             steps -= 1;
  1053.         }

  1054.       if (stride == steps)
  1055.         break;

  1056.       bfun = next;
  1057.       steps += 1;
  1058.     }

  1059.   it->function = bfun;
  1060.   return steps;
  1061. }

  1062. /* See btrace.h.  */

  1063. unsigned int
  1064. btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
  1065. {
  1066.   const struct btrace_thread_info *btinfo;
  1067.   const struct btrace_function *bfun;
  1068.   unsigned int steps;

  1069.   bfun = it->function;
  1070.   steps = 0;

  1071.   if (bfun == NULL)
  1072.     {
  1073.       unsigned int insns;

  1074.       btinfo = it->btinfo;
  1075.       bfun = btinfo->end;
  1076.       if (bfun == NULL)
  1077.         return 0;

  1078.       /* Ignore the last function if it only contains a single
  1079.          (i.e. the current) instruction.  */
  1080.       insns = VEC_length (btrace_insn_s, bfun->insn);
  1081.       if (insns == 1)
  1082.         bfun = bfun->flow.prev;

  1083.       if (bfun == NULL)
  1084.         return 0;

  1085.       steps += 1;
  1086.     }

  1087.   while (steps < stride)
  1088.     {
  1089.       const struct btrace_function *prev;

  1090.       prev = bfun->flow.prev;
  1091.       if (prev == NULL)
  1092.         break;

  1093.       bfun = prev;
  1094.       steps += 1;
  1095.     }

  1096.   it->function = bfun;
  1097.   return steps;
  1098. }

  1099. /* See btrace.h.  */

  1100. int
  1101. btrace_call_cmp (const struct btrace_call_iterator *lhs,
  1102.                  const struct btrace_call_iterator *rhs)
  1103. {
  1104.   unsigned int lnum, rnum;

  1105.   lnum = btrace_call_number (lhs);
  1106.   rnum = btrace_call_number (rhs);

  1107.   return (int) (lnum - rnum);
  1108. }

  1109. /* See btrace.h.  */

  1110. int
  1111. btrace_find_call_by_number (struct btrace_call_iterator *it,
  1112.                             const struct btrace_thread_info *btinfo,
  1113.                             unsigned int number)
  1114. {
  1115.   const struct btrace_function *bfun;

  1116.   for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
  1117.     {
  1118.       unsigned int bnum;

  1119.       bnum = bfun->number;
  1120.       if (number == bnum)
  1121.         {
  1122.           it->btinfo = btinfo;
  1123.           it->function = bfun;
  1124.           return 1;
  1125.         }

  1126.       /* Functions are ordered and numbered consecutively.  We could bail out
  1127.          earlier.  On the other hand, it is very unlikely that we search for
  1128.          a nonexistent function.  */
  1129.   }

  1130.   return 0;
  1131. }

  1132. /* See btrace.h.  */

  1133. void
  1134. btrace_set_insn_history (struct btrace_thread_info *btinfo,
  1135.                          const struct btrace_insn_iterator *begin,
  1136.                          const struct btrace_insn_iterator *end)
  1137. {
  1138.   if (btinfo->insn_history == NULL)
  1139.     btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));

  1140.   btinfo->insn_history->begin = *begin;
  1141.   btinfo->insn_history->end = *end;
  1142. }

  1143. /* See btrace.h.  */

  1144. void
  1145. btrace_set_call_history (struct btrace_thread_info *btinfo,
  1146.                          const struct btrace_call_iterator *begin,
  1147.                          const struct btrace_call_iterator *end)
  1148. {
  1149.   gdb_assert (begin->btinfo == end->btinfo);

  1150.   if (btinfo->call_history == NULL)
  1151.     btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));

  1152.   btinfo->call_history->begin = *begin;
  1153.   btinfo->call_history->end = *end;
  1154. }

  1155. /* See btrace.h.  */

  1156. int
  1157. btrace_is_replaying (struct thread_info *tp)
  1158. {
  1159.   return tp->btrace.replay != NULL;
  1160. }

  1161. /* See btrace.h.  */

  1162. int
  1163. btrace_is_empty (struct thread_info *tp)
  1164. {
  1165.   struct btrace_insn_iterator begin, end;
  1166.   struct btrace_thread_info *btinfo;

  1167.   btinfo = &tp->btrace;

  1168.   if (btinfo->begin == NULL)
  1169.     return 1;

  1170.   btrace_insn_begin (&begin, btinfo);
  1171.   btrace_insn_end (&end, btinfo);

  1172.   return btrace_insn_cmp (&begin, &end) == 0;
  1173. }