gdb/amd64-windows-tdep.c - gdb

Global variables defined

Data types defined

Functions defined

Source code

  1. /* Copyright (C) 2009-2015 Free Software Foundation, Inc.

  2.    This file is part of GDB.

  3.    This program is free software; you can redistribute it and/or modify
  4.    it under the terms of the GNU General Public License as published by
  5.    the Free Software Foundation; either version 3 of the License, or
  6.    (at your option) any later version.

  7.    This program is distributed in the hope that it will be useful,
  8.    but WITHOUT ANY WARRANTY; without even the implied warranty of
  9.    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10.    GNU General Public License for more details.

  11.    You should have received a copy of the GNU General Public License
  12.    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */

  13. #include "defs.h"
  14. #include "osabi.h"
  15. #include "amd64-tdep.h"
  16. #include "gdbtypes.h"
  17. #include "gdbcore.h"
  18. #include "regcache.h"
  19. #include "windows-tdep.h"
  20. #include "frame.h"
  21. #include "objfiles.h"
  22. #include "frame-unwind.h"
  23. #include "coff/internal.h"
  24. #include "coff/i386.h"
  25. #include "coff/pe.h"
  26. #include "libcoff.h"
  27. #include "value.h"

  28. /* The registers used to pass integer arguments during a function call.  */
  29. static int amd64_windows_dummy_call_integer_regs[] =
  30. {
  31.   AMD64_RCX_REGNUM,          /* %rcx */
  32.   AMD64_RDX_REGNUM,          /* %rdx */
  33.   AMD64_R8_REGNUM,           /* %r8 */
  34.   AMD64_R9_REGNUM            /* %r9 */
  35. };

  36. /* Return nonzero if an argument of type TYPE should be passed
  37.    via one of the integer registers.  */

  38. static int
  39. amd64_windows_passed_by_integer_register (struct type *type)
  40. {
  41.   switch (TYPE_CODE (type))
  42.     {
  43.       case TYPE_CODE_INT:
  44.       case TYPE_CODE_ENUM:
  45.       case TYPE_CODE_BOOL:
  46.       case TYPE_CODE_RANGE:
  47.       case TYPE_CODE_CHAR:
  48.       case TYPE_CODE_PTR:
  49.       case TYPE_CODE_REF:
  50.       case TYPE_CODE_STRUCT:
  51.       case TYPE_CODE_UNION:
  52.         return (TYPE_LENGTH (type) == 1
  53.                 || TYPE_LENGTH (type) == 2
  54.                 || TYPE_LENGTH (type) == 4
  55.                 || TYPE_LENGTH (type) == 8);

  56.       default:
  57.         return 0;
  58.     }
  59. }

  60. /* Return nonzero if an argument of type TYPE should be passed
  61.    via one of the XMM registers.  */

  62. static int
  63. amd64_windows_passed_by_xmm_register (struct type *type)
  64. {
  65.   return ((TYPE_CODE (type) == TYPE_CODE_FLT
  66.            || TYPE_CODE (type) == TYPE_CODE_DECFLOAT)
  67.           && (TYPE_LENGTH (type) == 4 || TYPE_LENGTH (type) == 8));
  68. }

  69. /* Return non-zero iff an argument of the given TYPE should be passed
  70.    by pointer.  */

  71. static int
  72. amd64_windows_passed_by_pointer (struct type *type)
  73. {
  74.   if (amd64_windows_passed_by_integer_register (type))
  75.     return 0;

  76.   if (amd64_windows_passed_by_xmm_register (type))
  77.     return 0;

  78.   return 1;
  79. }

  80. /* For each argument that should be passed by pointer, reserve some
  81.    stack space, store a copy of the argument on the stack, and replace
  82.    the argument by its address.  Return the new Stack Pointer value.

  83.    NARGS is the number of arguments. ARGS is the array containing
  84.    the value of each argument.  SP is value of the Stack Pointer.  */

  85. static CORE_ADDR
  86. amd64_windows_adjust_args_passed_by_pointer (struct value **args,
  87.                                              int nargs, CORE_ADDR sp)
  88. {
  89.   int i;

  90.   for (i = 0; i < nargs; i++)
  91.     if (amd64_windows_passed_by_pointer (value_type (args[i])))
  92.       {
  93.         struct type *type = value_type (args[i]);
  94.         const gdb_byte *valbuf = value_contents (args[i]);
  95.         const int len = TYPE_LENGTH (type);

  96.         /* Store a copy of that argument on the stack, aligned to
  97.            a 16 bytes boundary, and then use the copy's address as
  98.            the argument.  */

  99.         sp -= len;
  100.         sp &= ~0xf;
  101.         write_memory (sp, valbuf, len);

  102.         args[i]
  103.           = value_addr (value_from_contents_and_address (type, valbuf, sp));
  104.       }

  105.   return sp;
  106. }

  107. /* Store the value of ARG in register REGNO (right-justified).
  108.    REGCACHE is the register cache.  */

  109. static void
  110. amd64_windows_store_arg_in_reg (struct regcache *regcache,
  111.                                 struct value *arg, int regno)
  112. {
  113.   struct type *type = value_type (arg);
  114.   const gdb_byte *valbuf = value_contents (arg);
  115.   gdb_byte buf[8];

  116.   gdb_assert (TYPE_LENGTH (type) <= 8);
  117.   memset (buf, 0, sizeof buf);
  118.   memcpy (buf, valbuf, min (TYPE_LENGTH (type), 8));
  119.   regcache_cooked_write (regcache, regno, buf);
  120. }

  121. /* Push the arguments for an inferior function call, and return
  122.    the updated value of the SP (Stack Pointer).

  123.    All arguments are identical to the arguments used in
  124.    amd64_windows_push_dummy_call.  */

  125. static CORE_ADDR
  126. amd64_windows_push_arguments (struct regcache *regcache, int nargs,
  127.                               struct value **args, CORE_ADDR sp,
  128.                               int struct_return)
  129. {
  130.   int reg_idx = 0;
  131.   int i;
  132.   struct value **stack_args = alloca (nargs * sizeof (struct value *));
  133.   int num_stack_args = 0;
  134.   int num_elements = 0;
  135.   int element = 0;

  136.   /* First, handle the arguments passed by pointer.

  137.      These arguments are replaced by pointers to a copy we are making
  138.      in inferior memory.  So use a copy of the ARGS table, to avoid
  139.      modifying the original one.  */
  140.   {
  141.     struct value **args1 = alloca (nargs * sizeof (struct value *));

  142.     memcpy (args1, args, nargs * sizeof (struct value *));
  143.     sp = amd64_windows_adjust_args_passed_by_pointer (args1, nargs, sp);
  144.     args = args1;
  145.   }

  146.   /* Reserve a register for the "hidden" argument.  */
  147.   if (struct_return)
  148.     reg_idx++;

  149.   for (i = 0; i < nargs; i++)
  150.     {
  151.       struct type *type = value_type (args[i]);
  152.       int len = TYPE_LENGTH (type);
  153.       int on_stack_p = 1;

  154.       if (reg_idx < ARRAY_SIZE (amd64_windows_dummy_call_integer_regs))
  155.         {
  156.           if (amd64_windows_passed_by_integer_register (type))
  157.             {
  158.               amd64_windows_store_arg_in_reg
  159.                 (regcache, args[i],
  160.                  amd64_windows_dummy_call_integer_regs[reg_idx]);
  161.               on_stack_p = 0;
  162.               reg_idx++;
  163.             }
  164.           else if (amd64_windows_passed_by_xmm_register (type))
  165.             {
  166.               amd64_windows_store_arg_in_reg
  167.                 (regcache, args[i], AMD64_XMM0_REGNUM + reg_idx);
  168.               /* In case of varargs, these parameters must also be
  169.                  passed via the integer registers.  */
  170.               amd64_windows_store_arg_in_reg
  171.                 (regcache, args[i],
  172.                  amd64_windows_dummy_call_integer_regs[reg_idx]);
  173.               on_stack_p = 0;
  174.               reg_idx++;
  175.             }
  176.         }

  177.       if (on_stack_p)
  178.         {
  179.           num_elements += ((len + 7) / 8);
  180.           stack_args[num_stack_args++] = args[i];
  181.         }
  182.     }

  183.   /* Allocate space for the arguments on the stack, keeping it
  184.      aligned on a 16 byte boundary.  */
  185.   sp -= num_elements * 8;
  186.   sp &= ~0xf;

  187.   /* Write out the arguments to the stack.  */
  188.   for (i = 0; i < num_stack_args; i++)
  189.     {
  190.       struct type *type = value_type (stack_args[i]);
  191.       const gdb_byte *valbuf = value_contents (stack_args[i]);

  192.       write_memory (sp + element * 8, valbuf, TYPE_LENGTH (type));
  193.       element += ((TYPE_LENGTH (type) + 7) / 8);
  194.     }

  195.   return sp;
  196. }

  197. /* Implement the "push_dummy_call" gdbarch method.  */

  198. static CORE_ADDR
  199. amd64_windows_push_dummy_call
  200.   (struct gdbarch *gdbarch, struct value *function,
  201.    struct regcache *regcache, CORE_ADDR bp_addr,
  202.    int nargs, struct value **args,
  203.    CORE_ADDR sp, int struct_return, CORE_ADDR struct_addr)
  204. {
  205.   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  206.   gdb_byte buf[8];

  207.   /* Pass arguments.  */
  208.   sp = amd64_windows_push_arguments (regcache, nargs, args, sp,
  209.                                      struct_return);

  210.   /* Pass "hidden" argument".  */
  211.   if (struct_return)
  212.     {
  213.       /* The "hidden" argument is passed throught the first argument
  214.          register.  */
  215.       const int arg_regnum = amd64_windows_dummy_call_integer_regs[0];

  216.       store_unsigned_integer (buf, 8, byte_order, struct_addr);
  217.       regcache_cooked_write (regcache, arg_regnum, buf);
  218.     }

  219.   /* Reserve some memory on the stack for the integer-parameter
  220.      registers, as required by the ABI.  */
  221.   sp -= ARRAY_SIZE (amd64_windows_dummy_call_integer_regs) * 8;

  222.   /* Store return address.  */
  223.   sp -= 8;
  224.   store_unsigned_integer (buf, 8, byte_order, bp_addr);
  225.   write_memory (sp, buf, 8);

  226.   /* Update the stack pointer...  */
  227.   store_unsigned_integer (buf, 8, byte_order, sp);
  228.   regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);

  229.   /* ...and fake a frame pointer.  */
  230.   regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);

  231.   return sp + 16;
  232. }

  233. /* Implement the "return_value" gdbarch method for amd64-windows.  */

  234. static enum return_value_convention
  235. amd64_windows_return_value (struct gdbarch *gdbarch, struct value *function,
  236.                             struct type *type, struct regcache *regcache,
  237.                             gdb_byte *readbuf, const gdb_byte *writebuf)
  238. {
  239.   int len = TYPE_LENGTH (type);
  240.   int regnum = -1;

  241.   /* See if our value is returned through a register.  If it is, then
  242.      store the associated register number in REGNUM.  */
  243.   switch (TYPE_CODE (type))
  244.     {
  245.       case TYPE_CODE_FLT:
  246.       case TYPE_CODE_DECFLOAT:
  247.         /* __m128, __m128i, __m128d, floats, and doubles are returned
  248.            via XMM0.  */
  249.         if (len == 4 || len == 8 || len == 16)
  250.           regnum = AMD64_XMM0_REGNUM;
  251.         break;
  252.       default:
  253.         /* All other values that are 1, 2, 4 or 8 bytes long are returned
  254.            via RAX.  */
  255.         if (len == 1 || len == 2 || len == 4 || len == 8)
  256.           regnum = AMD64_RAX_REGNUM;
  257.         break;
  258.     }

  259.   if (regnum < 0)
  260.     {
  261.       /* RAX contains the address where the return value has been stored.  */
  262.       if (readbuf)
  263.         {
  264.           ULONGEST addr;

  265.           regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
  266.           read_memory (addr, readbuf, TYPE_LENGTH (type));
  267.         }
  268.       return RETURN_VALUE_ABI_RETURNS_ADDRESS;
  269.     }
  270.   else
  271.     {
  272.       /* Extract the return value from the register where it was stored.  */
  273.       if (readbuf)
  274.         regcache_raw_read_part (regcache, regnum, 0, len, readbuf);
  275.       if (writebuf)
  276.         regcache_raw_write_part (regcache, regnum, 0, len, writebuf);
  277.       return RETURN_VALUE_REGISTER_CONVENTION;
  278.     }
  279. }

  280. /* Check that the code pointed to by PC corresponds to a call to
  281.    __main, skip it if so.  Return PC otherwise.  */

  282. static CORE_ADDR
  283. amd64_skip_main_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
  284. {
  285.   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  286.   gdb_byte op;

  287.   target_read_memory (pc, &op, 1);
  288.   if (op == 0xe8)
  289.     {
  290.       gdb_byte buf[4];

  291.       if (target_read_memory (pc + 1, buf, sizeof buf) == 0)
  292.          {
  293.            struct bound_minimal_symbol s;
  294.            CORE_ADDR call_dest;

  295.           call_dest = pc + 5 + extract_signed_integer (buf, 4, byte_order);
  296.            s = lookup_minimal_symbol_by_pc (call_dest);
  297.            if (s.minsym != NULL
  298.                && MSYMBOL_LINKAGE_NAME (s.minsym) != NULL
  299.                && strcmp (MSYMBOL_LINKAGE_NAME (s.minsym), "__main") == 0)
  300.              pc += 5;
  301.          }
  302.     }

  303.   return pc;
  304. }

  305. struct amd64_windows_frame_cache
  306. {
  307.   /* ImageBase for the module.  */
  308.   CORE_ADDR image_base;

  309.   /* Function start and end rva.  */
  310.   CORE_ADDR start_rva;
  311.   CORE_ADDR end_rva;

  312.   /* Next instruction to be executed.  */
  313.   CORE_ADDR pc;

  314.   /* Current sp.  */
  315.   CORE_ADDR sp;

  316.   /* Address of saved integer and xmm registers.  */
  317.   CORE_ADDR prev_reg_addr[16];
  318.   CORE_ADDR prev_xmm_addr[16];

  319.   /* These two next fields are set only for machine info frames.  */

  320.   /* Likewise for RIP.  */
  321.   CORE_ADDR prev_rip_addr;

  322.   /* Likewise for RSP.  */
  323.   CORE_ADDR prev_rsp_addr;

  324.   /* Address of the previous frame.  */
  325.   CORE_ADDR prev_sp;
  326. };

  327. /* Convert a Windows register number to gdb.  */
  328. static const enum amd64_regnum amd64_windows_w2gdb_regnum[] =
  329. {
  330.   AMD64_RAX_REGNUM,
  331.   AMD64_RCX_REGNUM,
  332.   AMD64_RDX_REGNUM,
  333.   AMD64_RBX_REGNUM,
  334.   AMD64_RSP_REGNUM,
  335.   AMD64_RBP_REGNUM,
  336.   AMD64_RSI_REGNUM,
  337.   AMD64_RDI_REGNUM,
  338.   AMD64_R8_REGNUM,
  339.   AMD64_R9_REGNUM,
  340.   AMD64_R10_REGNUM,
  341.   AMD64_R11_REGNUM,
  342.   AMD64_R12_REGNUM,
  343.   AMD64_R13_REGNUM,
  344.   AMD64_R14_REGNUM,
  345.   AMD64_R15_REGNUM
  346. };

  347. /* Return TRUE iff PC is the the range of the function corresponding to
  348.    CACHE.  */

  349. static int
  350. pc_in_range (CORE_ADDR pc, const struct amd64_windows_frame_cache *cache)
  351. {
  352.   return (pc >= cache->image_base + cache->start_rva
  353.           && pc < cache->image_base + cache->end_rva);
  354. }

  355. /* Try to recognize and decode an epilogue sequence.

  356.    Return -1 if we fail to read the instructions for any reason.
  357.    Return 1 if an epilogue sequence was recognized, 0 otherwise.  */

  358. static int
  359. amd64_windows_frame_decode_epilogue (struct frame_info *this_frame,
  360.                                      struct amd64_windows_frame_cache *cache)
  361. {
  362.   /* According to MSDN an epilogue "must consist of either an add RSP,constant
  363.      or lea RSP,constant[FPReg], followed by a series of zero or more 8-byte
  364.      register pops and a return or a jmp".

  365.      Furthermore, according to RtlVirtualUnwind, the complete list of
  366.      epilog marker is:
  367.      - ret                      [c3]
  368.      - ret n                    [c2 imm16]
  369.      - rep ret                  [f3 c3]
  370.      - jmp imm8 | imm32         [eb rel8] or [e9 rel32]
  371.      - jmp qword ptr imm32                 - not handled
  372.      - rex.w jmp reg            [4X ff eY]
  373.   */

  374.   CORE_ADDR pc = cache->pc;
  375.   CORE_ADDR cur_sp = cache->sp;
  376.   struct gdbarch *gdbarch = get_frame_arch (this_frame);
  377.   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  378.   gdb_byte op;
  379.   gdb_byte rex;

  380.   /* We don't care about the instruction deallocating the frame:
  381.      if it hasn't been executed, the pc is still in the body,
  382.      if it has been executed, the following epilog decoding will work.  */

  383.   /* First decode:
  384.      -  pop reg                 [41 58-5f] or [58-5f].  */

  385.   while (1)
  386.     {
  387.       /* Read opcode. */
  388.       if (target_read_memory (pc, &op, 1) != 0)
  389.         return -1;

  390.       if (op >= 0x40 && op <= 0x4f)
  391.         {
  392.           /* REX prefix.  */
  393.           rex = op;

  394.           /* Read opcode. */
  395.           if (target_read_memory (pc + 1, &op, 1) != 0)
  396.             return -1;
  397.         }
  398.       else
  399.         rex = 0;

  400.       if (op >= 0x58 && op <= 0x5f)
  401.         {
  402.           /* pop reg  */
  403.           gdb_byte reg = (op & 0x0f) | ((rex & 1) << 3);

  404.           cache->prev_reg_addr[amd64_windows_w2gdb_regnum[reg]] = cur_sp;
  405.           cur_sp += 8;
  406.         }
  407.       else
  408.         break;

  409.       /* Allow the user to break this loop.  This shouldn't happen as the
  410.          number of consecutive pop should be small.  */
  411.       QUIT;
  412.     }

  413.   /* Then decode the marker.  */

  414.   /* Read opcode.  */
  415.   if (target_read_memory (pc, &op, 1) != 0)
  416.     return -1;

  417.   switch (op)
  418.     {
  419.     case 0xc3:
  420.       /* Ret.  */
  421.       cache->prev_rip_addr = cur_sp;
  422.       cache->prev_sp = cur_sp + 8;
  423.       return 1;

  424.     case 0xeb:
  425.       {
  426.         /* jmp rel8  */
  427.         gdb_byte rel8;
  428.         CORE_ADDR npc;

  429.         if (target_read_memory (pc + 1, &rel8, 1) != 0)
  430.           return -1;
  431.         npc = pc + 2 + (signed char) rel8;

  432.         /* If the jump is within the function, then this is not a marker,
  433.            otherwise this is a tail-call.  */
  434.         return !pc_in_range (npc, cache);
  435.       }

  436.     case 0xec:
  437.       {
  438.         /* jmp rel32  */
  439.         gdb_byte rel32[4];
  440.         CORE_ADDR npc;

  441.         if (target_read_memory (pc + 1, rel32, 4) != 0)
  442.           return -1;
  443.         npc = pc + 5 + extract_signed_integer (rel32, 4, byte_order);

  444.         /* If the jump is within the function, then this is not a marker,
  445.            otherwise this is a tail-call.  */
  446.         return !pc_in_range (npc, cache);
  447.       }

  448.     case 0xc2:
  449.       {
  450.         /* ret n  */
  451.         gdb_byte imm16[2];

  452.         if (target_read_memory (pc + 1, imm16, 2) != 0)
  453.           return -1;
  454.         cache->prev_rip_addr = cur_sp;
  455.         cache->prev_sp = cur_sp
  456.           + extract_unsigned_integer (imm16, 4, byte_order);
  457.         return 1;
  458.       }

  459.     case 0xf3:
  460.       {
  461.         /* rep; ret  */
  462.         gdb_byte op1;

  463.         if (target_read_memory (pc + 2, &op1, 1) != 0)
  464.           return -1;
  465.         if (op1 != 0xc3)
  466.           return 0;

  467.         cache->prev_rip_addr = cur_sp;
  468.         cache->prev_sp = cur_sp + 8;
  469.         return 1;
  470.       }

  471.     case 0x40:
  472.     case 0x41:
  473.     case 0x42:
  474.     case 0x43:
  475.     case 0x44:
  476.     case 0x45:
  477.     case 0x46:
  478.     case 0x47:
  479.     case 0x48:
  480.     case 0x49:
  481.     case 0x4a:
  482.     case 0x4b:
  483.     case 0x4c:
  484.     case 0x4d:
  485.     case 0x4e:
  486.     case 0x4f:
  487.       /* Got a REX prefix, read next byte.  */
  488.       rex = op;
  489.       if (target_read_memory (pc + 1, &op, 1) != 0)
  490.         return -1;

  491.       if (op == 0xff)
  492.         {
  493.           /* rex jmp reg  */
  494.           gdb_byte op1;
  495.           unsigned int reg;
  496.           gdb_byte buf[8];

  497.           if (target_read_memory (pc + 2, &op1, 1) != 0)
  498.             return -1;
  499.           return (op1 & 0xf8) == 0xe0;
  500.         }
  501.       else
  502.         return 0;

  503.     default:
  504.       /* Not REX, so unknown.  */
  505.       return 0;
  506.     }
  507. }

  508. /* Decode and execute unwind insns at UNWIND_INFO.  */

  509. static void
  510. amd64_windows_frame_decode_insns (struct frame_info *this_frame,
  511.                                   struct amd64_windows_frame_cache *cache,
  512.                                   CORE_ADDR unwind_info)
  513. {
  514.   CORE_ADDR save_addr = 0;
  515.   CORE_ADDR cur_sp = cache->sp;
  516.   struct gdbarch *gdbarch = get_frame_arch (this_frame);
  517.   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  518.   int j;

  519.   for (j = 0; ; j++)
  520.     {
  521.       struct external_pex64_unwind_info ex_ui;
  522.       /* There are at most 256 16-bit unwind insns.  */
  523.       gdb_byte insns[2 * 256];
  524.       gdb_byte *p;
  525.       gdb_byte *end_insns;
  526.       unsigned char codes_count;
  527.       unsigned char frame_reg;
  528.       unsigned char frame_off;

  529.       /* Read and decode header.  */
  530.       if (target_read_memory (cache->image_base + unwind_info,
  531.                               (gdb_byte *) &ex_ui, sizeof (ex_ui)) != 0)
  532.         return;

  533.       if (frame_debug)
  534.         fprintf_unfiltered
  535.           (gdb_stdlog,
  536.            "amd64_windows_frame_decodes_insn: "
  537.            "%s: ver: %02x, plgsz: %02x, cnt: %02x, frame: %02x\n",
  538.            paddress (gdbarch, unwind_info),
  539.            ex_ui.Version_Flags, ex_ui.SizeOfPrologue,
  540.            ex_ui.CountOfCodes, ex_ui.FrameRegisterOffset);

  541.       /* Check version.  */
  542.       if (PEX64_UWI_VERSION (ex_ui.Version_Flags) != 1
  543.           && PEX64_UWI_VERSION (ex_ui.Version_Flags) != 2)
  544.         return;

  545.       if (j == 0
  546.           && (cache->pc >=
  547.               cache->image_base + cache->start_rva + ex_ui.SizeOfPrologue))
  548.         {
  549.           /* Not in the prologue.  We want to detect if the PC points to an
  550.              epilogue. If so, the epilogue detection+decoding function is
  551.              sufficient.  Otherwise, the unwinder will consider that the PC
  552.              is in the body of the function and will need to decode unwind
  553.              info.  */
  554.           if (amd64_windows_frame_decode_epilogue (this_frame, cache) == 1)
  555.             return;

  556.           /* Not in an epilog.  Clear possible side effects.  */
  557.           memset (cache->prev_reg_addr, 0, sizeof (cache->prev_reg_addr));
  558.         }

  559.       codes_count = ex_ui.CountOfCodes;
  560.       frame_reg = PEX64_UWI_FRAMEREG (ex_ui.FrameRegisterOffset);

  561.       if (frame_reg != 0)
  562.         {
  563.           /* According to msdn:
  564.              If an FP reg is used, then any unwind code taking an offset must
  565.              only be used after the FP reg is established in the prolog.  */
  566.           gdb_byte buf[8];
  567.           int frreg = amd64_windows_w2gdb_regnum[frame_reg];

  568.           get_frame_register (this_frame, frreg, buf);
  569.           save_addr = extract_unsigned_integer (buf, 8, byte_order);

  570.           if (frame_debug)
  571.             fprintf_unfiltered (gdb_stdlog, "   frame_reg=%s, val=%s\n",
  572.                                 gdbarch_register_name (gdbarch, frreg),
  573.                                 paddress (gdbarch, save_addr));
  574.         }

  575.       /* Read opcodes.  */
  576.       if (codes_count != 0
  577.           && target_read_memory (cache->image_base + unwind_info
  578.                                  + sizeof (ex_ui),
  579.                                  insns, codes_count * 2) != 0)
  580.         return;

  581.       end_insns = &insns[codes_count * 2];
  582.       p = insns;

  583.       /* Skip opcodes 6 of version 2.  This opcode is not documented.  */
  584.       if (PEX64_UWI_VERSION (ex_ui.Version_Flags) == 2)
  585.         {
  586.           for (; p < end_insns; p += 2)
  587.             if (PEX64_UNWCODE_CODE (p[1]) != 6)
  588.               break;
  589.         }

  590.       for (; p < end_insns; p += 2)
  591.         {
  592.           int reg;

  593.           if (frame_debug)
  594.             fprintf_unfiltered
  595.               (gdb_stdlog, "   op #%u: off=0x%02x, insn=0x%02x\n",
  596.                (unsigned) (p - insns), p[0], p[1]);

  597.           /* Virtually execute the operation.  */
  598.           if (cache->pc >= cache->image_base + cache->start_rva + p[0])
  599.             {
  600.               /* If there is no frame registers defined, the current value of
  601.                  rsp is used instead.  */
  602.               if (frame_reg == 0)
  603.                 save_addr = cur_sp;

  604.               switch (PEX64_UNWCODE_CODE (p[1]))
  605.                 {
  606.                 case UWOP_PUSH_NONVOL:
  607.                   /* Push pre-decrements RSP.  */
  608.                   reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
  609.                   cache->prev_reg_addr[reg] = cur_sp;
  610.                   cur_sp += 8;
  611.                   break;
  612.                 case UWOP_ALLOC_LARGE:
  613.                   if (PEX64_UNWCODE_INFO (p[1]) == 0)
  614.                     cur_sp +=
  615.                       8 * extract_unsigned_integer (p + 2, 2, byte_order);
  616.                   else if (PEX64_UNWCODE_INFO (p[1]) == 1)
  617.                     cur_sp += extract_unsigned_integer (p + 2, 4, byte_order);
  618.                   else
  619.                     return;
  620.                   break;
  621.                 case UWOP_ALLOC_SMALL:
  622.                   cur_sp += 8 + 8 * PEX64_UNWCODE_INFO (p[1]);
  623.                   break;
  624.                 case UWOP_SET_FPREG:
  625.                   cur_sp = save_addr
  626.                     - PEX64_UWI_FRAMEOFF (ex_ui.FrameRegisterOffset) * 16;
  627.                   break;
  628.                 case UWOP_SAVE_NONVOL:
  629.                   reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
  630.                   cache->prev_reg_addr[reg] = save_addr
  631.                     - 8 * extract_unsigned_integer (p + 2, 2, byte_order);
  632.                   break;
  633.                 case UWOP_SAVE_NONVOL_FAR:
  634.                   reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
  635.                   cache->prev_reg_addr[reg] = save_addr
  636.                     - 8 * extract_unsigned_integer (p + 2, 4, byte_order);
  637.                   break;
  638.                 case UWOP_SAVE_XMM128:
  639.                   cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
  640.                     save_addr
  641.                     - 16 * extract_unsigned_integer (p + 2, 2, byte_order);
  642.                   break;
  643.                 case UWOP_SAVE_XMM128_FAR:
  644.                   cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
  645.                     save_addr
  646.                     - 16 * extract_unsigned_integer (p + 2, 4, byte_order);
  647.                   break;
  648.                 case UWOP_PUSH_MACHFRAME:
  649.                   if (PEX64_UNWCODE_INFO (p[1]) == 0)
  650.                     {
  651.                       cache->prev_rip_addr = cur_sp + 0;
  652.                       cache->prev_rsp_addr = cur_sp + 24;
  653.                       cur_sp += 40;
  654.                     }
  655.                   else if (PEX64_UNWCODE_INFO (p[1]) == 1)
  656.                     {
  657.                       cache->prev_rip_addr = cur_sp + 8;
  658.                       cache->prev_rsp_addr = cur_sp + 32;
  659.                       cur_sp += 48;
  660.                     }
  661.                   else
  662.                     return;
  663.                   break;
  664.                 default:
  665.                   return;
  666.                 }
  667.             }

  668.           /* Adjust with the length of the opcode.  */
  669.           switch (PEX64_UNWCODE_CODE (p[1]))
  670.             {
  671.             case UWOP_PUSH_NONVOL:
  672.             case UWOP_ALLOC_SMALL:
  673.             case UWOP_SET_FPREG:
  674.             case UWOP_PUSH_MACHFRAME:
  675.               break;
  676.             case UWOP_ALLOC_LARGE:
  677.               if (PEX64_UNWCODE_INFO (p[1]) == 0)
  678.                 p += 2;
  679.               else if (PEX64_UNWCODE_INFO (p[1]) == 1)
  680.                 p += 4;
  681.               else
  682.                 return;
  683.               break;
  684.             case UWOP_SAVE_NONVOL:
  685.             case UWOP_SAVE_XMM128:
  686.               p += 2;
  687.               break;
  688.             case UWOP_SAVE_NONVOL_FAR:
  689.             case UWOP_SAVE_XMM128_FAR:
  690.               p += 4;
  691.               break;
  692.             default:
  693.               return;
  694.             }
  695.         }
  696.       if (PEX64_UWI_FLAGS (ex_ui.Version_Flags) != UNW_FLAG_CHAININFO)
  697.         break;
  698.       else
  699.         {
  700.           /* Read the chained unwind info.  */
  701.           struct external_pex64_runtime_function d;
  702.           CORE_ADDR chain_vma;

  703.           chain_vma = cache->image_base + unwind_info
  704.             + sizeof (ex_ui) + ((codes_count + 1) & ~1) * 2;

  705.           if (target_read_memory (chain_vma, (gdb_byte *) &d, sizeof (d)) != 0)
  706.             return;

  707.           cache->start_rva =
  708.             extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
  709.           cache->end_rva =
  710.             extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
  711.           unwind_info =
  712.             extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);

  713.           if (frame_debug)
  714.             fprintf_unfiltered
  715.               (gdb_stdlog,
  716.                "amd64_windows_frame_decodes_insn (next in chain):"
  717.                " unwind_data=%s, start_rva=%s, end_rva=%s\n",
  718.                paddress (gdbarch, unwind_info),
  719.                paddress (gdbarch, cache->start_rva),
  720.                paddress (gdbarch, cache->end_rva));
  721.         }

  722.       /* Allow the user to break this loop.  */
  723.       QUIT;
  724.     }
  725.   /* PC is saved by the call.  */
  726.   if (cache->prev_rip_addr == 0)
  727.     cache->prev_rip_addr = cur_sp;
  728.   cache->prev_sp = cur_sp + 8;

  729.   if (frame_debug)
  730.     fprintf_unfiltered (gdb_stdlog, "   prev_sp: %s, prev_pc @%s\n",
  731.                         paddress (gdbarch, cache->prev_sp),
  732.                         paddress (gdbarch, cache->prev_rip_addr));
  733. }

  734. /* Find SEH unwind info for PC, returning 0 on success.

  735.    UNWIND_INFO is set to the rva of unwind info address, IMAGE_BASE
  736.    to the base address of the corresponding image, and START_RVA
  737.    to the rva of the function containing PC.  */

  738. static int
  739. amd64_windows_find_unwind_info (struct gdbarch *gdbarch, CORE_ADDR pc,
  740.                                 CORE_ADDR *unwind_info,
  741.                                 CORE_ADDR *image_base,
  742.                                 CORE_ADDR *start_rva,
  743.                                 CORE_ADDR *end_rva)
  744. {
  745.   struct obj_section *sec;
  746.   pe_data_type *pe;
  747.   IMAGE_DATA_DIRECTORY *dir;
  748.   struct objfile *objfile;
  749.   unsigned long lo, hi;
  750.   CORE_ADDR base;
  751.   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);

  752.   /* Get the corresponding exception directory.  */
  753.   sec = find_pc_section (pc);
  754.   if (sec == NULL)
  755.     return -1;
  756.   objfile = sec->objfile;
  757.   pe = pe_data (sec->objfile->obfd);
  758.   dir = &pe->pe_opthdr.DataDirectory[PE_EXCEPTION_TABLE];

  759.   base = pe->pe_opthdr.ImageBase
  760.     + ANOFFSET (objfile->section_offsets, SECT_OFF_TEXT (objfile));
  761.   *image_base = base;

  762.   /* Find the entry.

  763.      Note: This does not handle dynamically added entries (for JIT
  764.      engines).  For this, we would need to ask the kernel directly,
  765.      which means getting some info from the native layer.  For the
  766.      rest of the code, however, it's probably faster to search
  767.      the entry ourselves.  */
  768.   lo = 0;
  769.   hi = dir->Size / sizeof (struct external_pex64_runtime_function);
  770.   *unwind_info = 0;
  771.   while (lo <= hi)
  772.     {
  773.       unsigned long mid = lo + (hi - lo) / 2;
  774.       struct external_pex64_runtime_function d;
  775.       CORE_ADDR sa, ea;

  776.       if (target_read_memory (base + dir->VirtualAddress + mid * sizeof (d),
  777.                               (gdb_byte *) &d, sizeof (d)) != 0)
  778.         return -1;

  779.       sa = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
  780.       ea = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
  781.       if (pc < base + sa)
  782.         hi = mid - 1;
  783.       else if (pc >= base + ea)
  784.         lo = mid + 1;
  785.       else if (pc >= base + sa && pc < base + ea)
  786.         {
  787.           /* Got it.  */
  788.           *start_rva = sa;
  789.           *end_rva = ea;
  790.           *unwind_info =
  791.             extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
  792.           break;
  793.         }
  794.       else
  795.         break;
  796.     }

  797.   if (frame_debug)
  798.     fprintf_unfiltered
  799.       (gdb_stdlog,
  800.        "amd64_windows_find_unwind_data:  image_base=%s, unwind_data=%s\n",
  801.        paddress (gdbarch, base), paddress (gdbarch, *unwind_info));

  802.   if (*unwind_info & 1)
  803.     {
  804.       /* Unofficially documented unwind info redirection, when UNWIND_INFO
  805.          address is odd (http://www.codemachine.com/article_x64deepdive.html).
  806.       */
  807.       struct external_pex64_runtime_function d;
  808.       CORE_ADDR sa, ea;

  809.       if (target_read_memory (base + (*unwind_info & ~1),
  810.                               (gdb_byte *) &d, sizeof (d)) != 0)
  811.         return -1;

  812.       *start_rva =
  813.         extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
  814.       *end_rva = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
  815.       *unwind_info =
  816.         extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);

  817.     }
  818.   return 0;
  819. }

  820. /* Fill THIS_CACHE using the native amd64-windows unwinding data
  821.    for THIS_FRAME.  */

  822. static struct amd64_windows_frame_cache *
  823. amd64_windows_frame_cache (struct frame_info *this_frame, void **this_cache)
  824. {
  825.   struct gdbarch *gdbarch = get_frame_arch (this_frame);
  826.   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  827.   struct amd64_windows_frame_cache *cache;
  828.   gdb_byte buf[8];
  829.   struct obj_section *sec;
  830.   pe_data_type *pe;
  831.   IMAGE_DATA_DIRECTORY *dir;
  832.   CORE_ADDR image_base;
  833.   CORE_ADDR pc;
  834.   struct objfile *objfile;
  835.   unsigned long lo, hi;
  836.   CORE_ADDR unwind_info = 0;

  837.   if (*this_cache)
  838.     return *this_cache;

  839.   cache = FRAME_OBSTACK_ZALLOC (struct amd64_windows_frame_cache);
  840.   *this_cache = cache;

  841.   /* Get current PC and SP.  */
  842.   pc = get_frame_pc (this_frame);
  843.   get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
  844.   cache->sp = extract_unsigned_integer (buf, 8, byte_order);
  845.   cache->pc = pc;

  846.   if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
  847.                                       &cache->image_base,
  848.                                       &cache->start_rva,
  849.                                       &cache->end_rva))
  850.     return cache;

  851.   if (unwind_info == 0)
  852.     {
  853.       /* Assume a leaf function.  */
  854.       cache->prev_sp = cache->sp + 8;
  855.       cache->prev_rip_addr = cache->sp;
  856.     }
  857.   else
  858.     {
  859.       /* Decode unwind insns to compute saved addresses.  */
  860.       amd64_windows_frame_decode_insns (this_frame, cache, unwind_info);
  861.     }
  862.   return cache;
  863. }

  864. /* Implement the "prev_register" method of struct frame_unwind
  865.    using the standard Windows x64 SEH info.  */

  866. static struct value *
  867. amd64_windows_frame_prev_register (struct frame_info *this_frame,
  868.                                    void **this_cache, int regnum)
  869. {
  870.   struct gdbarch *gdbarch = get_frame_arch (this_frame);
  871.   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  872.   struct amd64_windows_frame_cache *cache =
  873.     amd64_windows_frame_cache (this_frame, this_cache);
  874.   struct value *val;
  875.   CORE_ADDR prev;

  876.   if (frame_debug)
  877.     fprintf_unfiltered (gdb_stdlog,
  878.                         "amd64_windows_frame_prev_register %s for sp=%s\n",
  879.                         gdbarch_register_name (gdbarch, regnum),
  880.                         paddress (gdbarch, cache->prev_sp));

  881.   if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
  882.       prev = cache->prev_xmm_addr[regnum - AMD64_XMM0_REGNUM];
  883.   else if (regnum == AMD64_RSP_REGNUM)
  884.     {
  885.       prev = cache->prev_rsp_addr;
  886.       if (prev == 0)
  887.         return frame_unwind_got_constant (this_frame, regnum, cache->prev_sp);
  888.     }
  889.   else if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_R15_REGNUM)
  890.     prev = cache->prev_reg_addr[regnum - AMD64_RAX_REGNUM];
  891.   else if (regnum == AMD64_RIP_REGNUM)
  892.     prev = cache->prev_rip_addr;
  893.   else
  894.     prev = 0;

  895.   if (prev && frame_debug)
  896.     fprintf_unfiltered (gdb_stdlog, "  -> at %s\n", paddress (gdbarch, prev));

  897.   if (prev)
  898.     {
  899.       /* Register was saved.  */
  900.       return frame_unwind_got_memory (this_frame, regnum, prev);
  901.     }
  902.   else
  903.     {
  904.       /* Register is either volatile or not modified.  */
  905.       return frame_unwind_got_register (this_frame, regnum, regnum);
  906.     }
  907. }

  908. /* Implement the "this_id" method of struct frame_unwind using
  909.    the standard Windows x64 SEH info.  */

  910. static void
  911. amd64_windows_frame_this_id (struct frame_info *this_frame, void **this_cache,
  912.                    struct frame_id *this_id)
  913. {
  914.   struct gdbarch *gdbarch = get_frame_arch (this_frame);
  915.   struct amd64_windows_frame_cache *cache =
  916.     amd64_windows_frame_cache (this_frame, this_cache);

  917.   *this_id = frame_id_build (cache->prev_sp,
  918.                              cache->image_base + cache->start_rva);
  919. }

  920. /* Windows x64 SEH unwinder.  */

  921. static const struct frame_unwind amd64_windows_frame_unwind =
  922. {
  923.   NORMAL_FRAME,
  924.   default_frame_unwind_stop_reason,
  925.   &amd64_windows_frame_this_id,
  926.   &amd64_windows_frame_prev_register,
  927.   NULL,
  928.   default_frame_sniffer
  929. };

  930. /* Implement the "skip_prologue" gdbarch method.  */

  931. static CORE_ADDR
  932. amd64_windows_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
  933. {
  934.   CORE_ADDR func_addr;
  935.   CORE_ADDR unwind_info = 0;
  936.   CORE_ADDR image_base, start_rva, end_rva;
  937.   struct external_pex64_unwind_info ex_ui;

  938.   /* Use prologue size from unwind info.  */
  939.   if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
  940.                                       &image_base, &start_rva, &end_rva) == 0)
  941.     {
  942.       if (unwind_info == 0)
  943.         {
  944.           /* Leaf function.  */
  945.           return pc;
  946.         }
  947.       else if (target_read_memory (image_base + unwind_info,
  948.                                    (gdb_byte *) &ex_ui, sizeof (ex_ui)) == 0
  949.                && PEX64_UWI_VERSION (ex_ui.Version_Flags) == 1)
  950.         return max (pc, image_base + start_rva + ex_ui.SizeOfPrologue);
  951.     }

  952.   /* See if we can determine the end of the prologue via the symbol
  953.      table.  If so, then return either the PC, or the PC after
  954.      the prologue, whichever is greater.  */
  955.   if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
  956.     {
  957.       CORE_ADDR post_prologue_pc
  958.         = skip_prologue_using_sal (gdbarch, func_addr);

  959.       if (post_prologue_pc != 0)
  960.         return max (pc, post_prologue_pc);
  961.     }

  962.   return pc;
  963. }

  964. /* Check Win64 DLL jmp trampolines and find jump destination.  */

  965. static CORE_ADDR
  966. amd64_windows_skip_trampoline_code (struct frame_info *frame, CORE_ADDR pc)
  967. {
  968.   CORE_ADDR destination = 0;
  969.   struct gdbarch *gdbarch = get_frame_arch (frame);
  970.   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);

  971.   /* Check for jmp *<offset>(%rip) (jump near, absolute indirect (/4)).  */
  972.   if (pc && read_memory_unsigned_integer (pc, 2, byte_order) == 0x25ff)
  973.     {
  974.       /* Get opcode offset and see if we can find a reference in our data.  */
  975.       ULONGEST offset
  976.         = read_memory_unsigned_integer (pc + 2, 4, byte_order);

  977.       /* Get address of function pointer at end of pc.  */
  978.       CORE_ADDR indirect_addr = pc + offset + 6;

  979.       struct minimal_symbol *indsym
  980.         = (indirect_addr
  981.            ? lookup_minimal_symbol_by_pc (indirect_addr).minsym
  982.            : NULL);
  983.       const char *symname = indsym ? MSYMBOL_LINKAGE_NAME (indsym) : NULL;

  984.       if (symname)
  985.         {
  986.           if (strncmp (symname, "__imp_", 6) == 0
  987.               || strncmp (symname, "_imp_", 5) == 0)
  988.             destination
  989.               = read_memory_unsigned_integer (indirect_addr, 8, byte_order);
  990.         }
  991.     }

  992.   return destination;
  993. }

  994. /* Implement the "auto_wide_charset" gdbarch method.  */

  995. static const char *
  996. amd64_windows_auto_wide_charset (void)
  997. {
  998.   return "UTF-16";
  999. }

  1000. static void
  1001. amd64_windows_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
  1002. {
  1003.   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);

  1004.   /* The dwarf2 unwinder (appended very early by i386_gdbarch_init) is
  1005.      preferred over the SEH one.  The reasons are:
  1006.      - binaries without SEH but with dwarf2 debug info are correcly handled
  1007.        (although they aren't ABI compliant, gcc before 4.7 didn't emit SEH
  1008.        info).
  1009.      - dwarf3 DW_OP_call_frame_cfa is correctly handled (it can only be
  1010.        handled if the dwarf2 unwinder is used).

  1011.     The call to amd64_init_abi appends default unwinders, that aren't
  1012.     compatible with the SEH one.
  1013.   */
  1014.   frame_unwind_append_unwinder (gdbarch, &amd64_windows_frame_unwind);

  1015.   amd64_init_abi (info, gdbarch);

  1016.   windows_init_abi (info, gdbarch);

  1017.   /* On Windows, "long"s are only 32bit.  */
  1018.   set_gdbarch_long_bit (gdbarch, 32);

  1019.   /* Function calls.  */
  1020.   set_gdbarch_push_dummy_call (gdbarch, amd64_windows_push_dummy_call);
  1021.   set_gdbarch_return_value (gdbarch, amd64_windows_return_value);
  1022.   set_gdbarch_skip_main_prologue (gdbarch, amd64_skip_main_prologue);
  1023.   set_gdbarch_skip_trampoline_code (gdbarch,
  1024.                                     amd64_windows_skip_trampoline_code);

  1025.   set_gdbarch_skip_prologue (gdbarch, amd64_windows_skip_prologue);

  1026.   set_gdbarch_auto_wide_charset (gdbarch, amd64_windows_auto_wide_charset);
  1027. }

  1028. /* -Wmissing-prototypes */
  1029. extern initialize_file_ftype _initialize_amd64_windows_tdep;

  1030. void
  1031. _initialize_amd64_windows_tdep (void)
  1032. {
  1033.   gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_CYGWIN,
  1034.                           amd64_windows_init_abi);
  1035. }