gdb/gdbserver/linux-x86-low.c - gdb

Global variables defined

Data types defined

Functions defined

Macros defined

Source code

  1. /* GNU/Linux/x86-64 specific low level interface, for the remote server
  2.    for GDB.
  3.    Copyright (C) 2002-2015 Free Software Foundation, Inc.

  4.    This file is part of GDB.

  5.    This program is free software; you can redistribute it and/or modify
  6.    it under the terms of the GNU General Public License as published by
  7.    the Free Software Foundation; either version 3 of the License, or
  8.    (at your option) any later version.

  9.    This program is distributed in the hope that it will be useful,
  10.    but WITHOUT ANY WARRANTY; without even the implied warranty of
  11.    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12.    GNU General Public License for more details.

  13.    You should have received a copy of the GNU General Public License
  14.    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */

  15. #include "server.h"
  16. #include <signal.h>
  17. #include <limits.h>
  18. #include <inttypes.h>
  19. #include "linux-low.h"
  20. #include "i387-fp.h"
  21. #include "x86-low.h"
  22. #include "x86-xstate.h"

  23. #include "gdb_proc_service.h"
  24. /* Don't include elf/common.h if linux/elf.h got included by
  25.    gdb_proc_service.h.  */
  26. #ifndef ELFMAG0
  27. #include "elf/common.h"
  28. #endif

  29. #include "agent.h"
  30. #include "tdesc.h"
  31. #include "tracepoint.h"
  32. #include "ax.h"

  33. #ifdef __x86_64__
  34. /* Defined in auto-generated file amd64-linux.c.  */
  35. void init_registers_amd64_linux (void);
  36. extern const struct target_desc *tdesc_amd64_linux;

  37. /* Defined in auto-generated file amd64-avx-linux.c.  */
  38. void init_registers_amd64_avx_linux (void);
  39. extern const struct target_desc *tdesc_amd64_avx_linux;

  40. /* Defined in auto-generated file amd64-avx512-linux.c.  */
  41. void init_registers_amd64_avx512_linux (void);
  42. extern const struct target_desc *tdesc_amd64_avx512_linux;

  43. /* Defined in auto-generated file amd64-mpx-linux.c.  */
  44. void init_registers_amd64_mpx_linux (void);
  45. extern const struct target_desc *tdesc_amd64_mpx_linux;

  46. /* Defined in auto-generated file x32-linux.c.  */
  47. void init_registers_x32_linux (void);
  48. extern const struct target_desc *tdesc_x32_linux;

  49. /* Defined in auto-generated file x32-avx-linux.c.  */
  50. void init_registers_x32_avx_linux (void);
  51. extern const struct target_desc *tdesc_x32_avx_linux;

  52. /* Defined in auto-generated file x32-avx512-linux.c.  */
  53. void init_registers_x32_avx512_linux (void);
  54. extern const struct target_desc *tdesc_x32_avx512_linux;

  55. #endif

  56. /* Defined in auto-generated file i386-linux.c.  */
  57. void init_registers_i386_linux (void);
  58. extern const struct target_desc *tdesc_i386_linux;

  59. /* Defined in auto-generated file i386-mmx-linux.c.  */
  60. void init_registers_i386_mmx_linux (void);
  61. extern const struct target_desc *tdesc_i386_mmx_linux;

  62. /* Defined in auto-generated file i386-avx-linux.c.  */
  63. void init_registers_i386_avx_linux (void);
  64. extern const struct target_desc *tdesc_i386_avx_linux;

  65. /* Defined in auto-generated file i386-avx512-linux.c.  */
  66. void init_registers_i386_avx512_linux (void);
  67. extern const struct target_desc *tdesc_i386_avx512_linux;

  68. /* Defined in auto-generated file i386-mpx-linux.c.  */
  69. void init_registers_i386_mpx_linux (void);
  70. extern const struct target_desc *tdesc_i386_mpx_linux;

  71. #ifdef __x86_64__
  72. static struct target_desc *tdesc_amd64_linux_no_xml;
  73. #endif
  74. static struct target_desc *tdesc_i386_linux_no_xml;


  75. static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
  76. static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };

  77. /* Backward compatibility for gdb without XML support.  */

  78. static const char *xmltarget_i386_linux_no_xml = "@<target>\
  79. <architecture>i386</architecture>\
  80. <osabi>GNU/Linux</osabi>\
  81. </target>";

  82. #ifdef __x86_64__
  83. static const char *xmltarget_amd64_linux_no_xml = "@<target>\
  84. <architecture>i386:x86-64</architecture>\
  85. <osabi>GNU/Linux</osabi>\
  86. </target>";
  87. #endif

  88. #include <sys/reg.h>
  89. #include <sys/procfs.h>
  90. #include <sys/ptrace.h>
  91. #include <sys/uio.h>

  92. #ifndef PTRACE_GETREGSET
  93. #define PTRACE_GETREGSET        0x4204
  94. #endif

  95. #ifndef PTRACE_SETREGSET
  96. #define PTRACE_SETREGSET        0x4205
  97. #endif


  98. #ifndef PTRACE_GET_THREAD_AREA
  99. #define PTRACE_GET_THREAD_AREA 25
  100. #endif

  101. /* This definition comes from prctl.h, but some kernels may not have it.  */
  102. #ifndef PTRACE_ARCH_PRCTL
  103. #define PTRACE_ARCH_PRCTL      30
  104. #endif

  105. /* The following definitions come from prctl.h, but may be absent
  106.    for certain configurations.  */
  107. #ifndef ARCH_GET_FS
  108. #define ARCH_SET_GS 0x1001
  109. #define ARCH_SET_FS 0x1002
  110. #define ARCH_GET_FS 0x1003
  111. #define ARCH_GET_GS 0x1004
  112. #endif

  113. /* Per-process arch-specific data we want to keep.  */

  114. struct arch_process_info
  115. {
  116.   struct x86_debug_reg_state debug_reg_state;
  117. };

  118. /* Per-thread arch-specific data we want to keep.  */

  119. struct arch_lwp_info
  120. {
  121.   /* Non-zero if our copy differs from what's recorded in the thread.  */
  122.   int debug_registers_changed;
  123. };

  124. #ifdef __x86_64__

  125. /* Mapping between the general-purpose registers in `struct user'
  126.    format and GDB's register array layout.
  127.    Note that the transfer layout uses 64-bit regs.  */
  128. static /*const*/ int i386_regmap[] =
  129. {
  130.   RAX * 8, RCX * 8, RDX * 8, RBX * 8,
  131.   RSP * 8, RBP * 8, RSI * 8, RDI * 8,
  132.   RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
  133.   DS * 8, ES * 8, FS * 8, GS * 8
  134. };

  135. #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))

  136. /* So code below doesn't have to care, i386 or amd64.  */
  137. #define ORIG_EAX ORIG_RAX

  138. static const int x86_64_regmap[] =
  139. {
  140.   RAX * 8, RBX * 8, RCX * 8, RDX * 8,
  141.   RSI * 8, RDI * 8, RBP * 8, RSP * 8,
  142.   R8 * 8, R9 * 8, R10 * 8, R11 * 8,
  143.   R12 * 8, R13 * 8, R14 * 8, R15 * 8,
  144.   RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
  145.   DS * 8, ES * 8, FS * 8, GS * 8,
  146.   -1, -1, -1, -1, -1, -1, -1, -1,
  147.   -1, -1, -1, -1, -1, -1, -1, -1,
  148.   -1, -1, -1, -1, -1, -1, -1, -1,
  149.   -1,
  150.   -1, -1, -1, -1, -1, -1, -1, -1,
  151.   ORIG_RAX * 8,
  152.   -1, -1, -1, -1,                        /* MPX registers BND0 ... BND3.  */
  153.   -1, -1,                                /* MPX registers BNDCFGU, BNDSTATUS.  */
  154.   -1, -1, -1, -1, -1, -1, -1, -1,       /* xmm16 ... xmm31 (AVX512)  */
  155.   -1, -1, -1, -1, -1, -1, -1, -1,
  156.   -1, -1, -1, -1, -1, -1, -1, -1,       /* ymm16 ... ymm31 (AVX512)  */
  157.   -1, -1, -1, -1, -1, -1, -1, -1,
  158.   -1, -1, -1, -1, -1, -1, -1, -1,       /* k0 ... k7 (AVX512)  */
  159.   -1, -1, -1, -1, -1, -1, -1, -1,       /* zmm0 ... zmm31 (AVX512)  */
  160.   -1, -1, -1, -1, -1, -1, -1, -1,
  161.   -1, -1, -1, -1, -1, -1, -1, -1,
  162.   -1, -1, -1, -1, -1, -1, -1, -1
  163. };

  164. #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
  165. #define X86_64_USER_REGS (GS + 1)

  166. #else /* ! __x86_64__ */

  167. /* Mapping between the general-purpose registers in `struct user'
  168.    format and GDB's register array layout.  */
  169. static /*const*/ int i386_regmap[] =
  170. {
  171.   EAX * 4, ECX * 4, EDX * 4, EBX * 4,
  172.   UESP * 4, EBP * 4, ESI * 4, EDI * 4,
  173.   EIP * 4, EFL * 4, CS * 4, SS * 4,
  174.   DS * 4, ES * 4, FS * 4, GS * 4
  175. };

  176. #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))

  177. #endif

  178. #ifdef __x86_64__

  179. /* Returns true if the current inferior belongs to a x86-64 process,
  180.    per the tdesc.  */

  181. static int
  182. is_64bit_tdesc (void)
  183. {
  184.   struct regcache *regcache = get_thread_regcache (current_thread, 0);

  185.   return register_size (regcache->tdesc, 0) == 8;
  186. }

  187. #endif


  188. /* Called by libthread_db.  */

  189. ps_err_e
  190. ps_get_thread_area (const struct ps_prochandle *ph,
  191.                     lwpid_t lwpid, int idx, void **base)
  192. {
  193. #ifdef __x86_64__
  194.   int use_64bit = is_64bit_tdesc ();

  195.   if (use_64bit)
  196.     {
  197.       switch (idx)
  198.         {
  199.         case FS:
  200.           if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
  201.             return PS_OK;
  202.           break;
  203.         case GS:
  204.           if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
  205.             return PS_OK;
  206.           break;
  207.         default:
  208.           return PS_BADADDR;
  209.         }
  210.       return PS_ERR;
  211.     }
  212. #endif

  213.   {
  214.     unsigned int desc[4];

  215.     if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
  216.                 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
  217.       return PS_ERR;

  218.     /* Ensure we properly extend the value to 64-bits for x86_64.  */
  219.     *base = (void *) (uintptr_t) desc[1];
  220.     return PS_OK;
  221.   }
  222. }

  223. /* Get the thread area address.  This is used to recognize which
  224.    thread is which when tracing with the in-process agent library.  We
  225.    don't read anything from the address, and treat it as opaque; it's
  226.    the address itself that we assume is unique per-thread.  */

  227. static int
  228. x86_get_thread_area (int lwpid, CORE_ADDR *addr)
  229. {
  230. #ifdef __x86_64__
  231.   int use_64bit = is_64bit_tdesc ();

  232.   if (use_64bit)
  233.     {
  234.       void *base;
  235.       if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
  236.         {
  237.           *addr = (CORE_ADDR) (uintptr_t) base;
  238.           return 0;
  239.         }

  240.       return -1;
  241.     }
  242. #endif

  243.   {
  244.     struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
  245.     struct thread_info *thr = get_lwp_thread (lwp);
  246.     struct regcache *regcache = get_thread_regcache (thr, 1);
  247.     unsigned int desc[4];
  248.     ULONGEST gs = 0;
  249.     const int reg_thread_area = 3; /* bits to scale down register value.  */
  250.     int idx;

  251.     collect_register_by_name (regcache, "gs", &gs);

  252.     idx = gs >> reg_thread_area;

  253.     if (ptrace (PTRACE_GET_THREAD_AREA,
  254.                 lwpid_of (thr),
  255.                 (void *) (long) idx, (unsigned long) &desc) < 0)
  256.       return -1;

  257.     *addr = desc[1];
  258.     return 0;
  259.   }
  260. }



  261. static int
  262. x86_cannot_store_register (int regno)
  263. {
  264. #ifdef __x86_64__
  265.   if (is_64bit_tdesc ())
  266.     return 0;
  267. #endif

  268.   return regno >= I386_NUM_REGS;
  269. }

  270. static int
  271. x86_cannot_fetch_register (int regno)
  272. {
  273. #ifdef __x86_64__
  274.   if (is_64bit_tdesc ())
  275.     return 0;
  276. #endif

  277.   return regno >= I386_NUM_REGS;
  278. }

  279. static void
  280. x86_fill_gregset (struct regcache *regcache, void *buf)
  281. {
  282.   int i;

  283. #ifdef __x86_64__
  284.   if (register_size (regcache->tdesc, 0) == 8)
  285.     {
  286.       for (i = 0; i < X86_64_NUM_REGS; i++)
  287.         if (x86_64_regmap[i] != -1)
  288.           collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
  289.       return;
  290.     }

  291.   /* 32-bit inferior registers need to be zero-extended.
  292.      Callers would read uninitialized memory otherwise.  */
  293.   memset (buf, 0x00, X86_64_USER_REGS * 8);
  294. #endif

  295.   for (i = 0; i < I386_NUM_REGS; i++)
  296.     collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);

  297.   collect_register_by_name (regcache, "orig_eax",
  298.                             ((char *) buf) + ORIG_EAX * 4);
  299. }

  300. static void
  301. x86_store_gregset (struct regcache *regcache, const void *buf)
  302. {
  303.   int i;

  304. #ifdef __x86_64__
  305.   if (register_size (regcache->tdesc, 0) == 8)
  306.     {
  307.       for (i = 0; i < X86_64_NUM_REGS; i++)
  308.         if (x86_64_regmap[i] != -1)
  309.           supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
  310.       return;
  311.     }
  312. #endif

  313.   for (i = 0; i < I386_NUM_REGS; i++)
  314.     supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);

  315.   supply_register_by_name (regcache, "orig_eax",
  316.                            ((char *) buf) + ORIG_EAX * 4);
  317. }

  318. static void
  319. x86_fill_fpregset (struct regcache *regcache, void *buf)
  320. {
  321. #ifdef __x86_64__
  322.   i387_cache_to_fxsave (regcache, buf);
  323. #else
  324.   i387_cache_to_fsave (regcache, buf);
  325. #endif
  326. }

  327. static void
  328. x86_store_fpregset (struct regcache *regcache, const void *buf)
  329. {
  330. #ifdef __x86_64__
  331.   i387_fxsave_to_cache (regcache, buf);
  332. #else
  333.   i387_fsave_to_cache (regcache, buf);
  334. #endif
  335. }

  336. #ifndef __x86_64__

  337. static void
  338. x86_fill_fpxregset (struct regcache *regcache, void *buf)
  339. {
  340.   i387_cache_to_fxsave (regcache, buf);
  341. }

  342. static void
  343. x86_store_fpxregset (struct regcache *regcache, const void *buf)
  344. {
  345.   i387_fxsave_to_cache (regcache, buf);
  346. }

  347. #endif

  348. static void
  349. x86_fill_xstateregset (struct regcache *regcache, void *buf)
  350. {
  351.   i387_cache_to_xsave (regcache, buf);
  352. }

  353. static void
  354. x86_store_xstateregset (struct regcache *regcache, const void *buf)
  355. {
  356.   i387_xsave_to_cache (regcache, buf);
  357. }

  358. /* ??? The non-biarch i386 case stores all the i387 regs twice.
  359.    Once in i387_.*fsave.* and once in i387_.*fxsave.*.
  360.    This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
  361.    doesn't work.  IWBN to avoid the duplication in the case where it
  362.    does work.  Maybe the arch_setup routine could check whether it works
  363.    and update the supported regsets accordingly.  */

  364. static struct regset_info x86_regsets[] =
  365. {
  366. #ifdef HAVE_PTRACE_GETREGS
  367.   { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
  368.     GENERAL_REGS,
  369.     x86_fill_gregset, x86_store_gregset },
  370.   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
  371.     EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
  372. # ifndef __x86_64__
  373. #  ifdef HAVE_PTRACE_GETFPXREGS
  374.   { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
  375.     EXTENDED_REGS,
  376.     x86_fill_fpxregset, x86_store_fpxregset },
  377. #  endif
  378. # endif
  379.   { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
  380.     FP_REGS,
  381.     x86_fill_fpregset, x86_store_fpregset },
  382. #endif /* HAVE_PTRACE_GETREGS */
  383.   { 0, 0, 0, -1, -1, NULL, NULL }
  384. };

  385. static CORE_ADDR
  386. x86_get_pc (struct regcache *regcache)
  387. {
  388.   int use_64bit = register_size (regcache->tdesc, 0) == 8;

  389.   if (use_64bit)
  390.     {
  391.       unsigned long pc;
  392.       collect_register_by_name (regcache, "rip", &pc);
  393.       return (CORE_ADDR) pc;
  394.     }
  395.   else
  396.     {
  397.       unsigned int pc;
  398.       collect_register_by_name (regcache, "eip", &pc);
  399.       return (CORE_ADDR) pc;
  400.     }
  401. }

  402. static void
  403. x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
  404. {
  405.   int use_64bit = register_size (regcache->tdesc, 0) == 8;

  406.   if (use_64bit)
  407.     {
  408.       unsigned long newpc = pc;
  409.       supply_register_by_name (regcache, "rip", &newpc);
  410.     }
  411.   else
  412.     {
  413.       unsigned int newpc = pc;
  414.       supply_register_by_name (regcache, "eip", &newpc);
  415.     }
  416. }

  417. static const unsigned char x86_breakpoint[] = { 0xCC };
  418. #define x86_breakpoint_len 1

  419. static int
  420. x86_breakpoint_at (CORE_ADDR pc)
  421. {
  422.   unsigned char c;

  423.   (*the_target->read_memory) (pc, &c, 1);
  424.   if (c == 0xCC)
  425.     return 1;

  426.   return 0;
  427. }

  428. /* Support for debug registers.  */

  429. static unsigned long
  430. x86_linux_dr_get (ptid_t ptid, int regnum)
  431. {
  432.   int tid;
  433.   unsigned long value;

  434.   tid = ptid_get_lwp (ptid);

  435.   errno = 0;
  436.   value = ptrace (PTRACE_PEEKUSER, tid,
  437.                   offsetof (struct user, u_debugreg[regnum]), 0);
  438.   if (errno != 0)
  439.     error ("Couldn't read debug register");

  440.   return value;
  441. }

  442. static void
  443. x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
  444. {
  445.   int tid;

  446.   tid = ptid_get_lwp (ptid);

  447.   errno = 0;
  448.   ptrace (PTRACE_POKEUSER, tid,
  449.           offsetof (struct user, u_debugreg[regnum]), value);
  450.   if (errno != 0)
  451.     error ("Couldn't write debug register");
  452. }

  453. static int
  454. update_debug_registers_callback (struct inferior_list_entry *entry,
  455.                                  void *pid_p)
  456. {
  457.   struct thread_info *thr = (struct thread_info *) entry;
  458.   struct lwp_info *lwp = get_thread_lwp (thr);
  459.   int pid = *(int *) pid_p;

  460.   /* Only update the threads of this process.  */
  461.   if (pid_of (thr) == pid)
  462.     {
  463.       /* The actual update is done later just before resuming the lwp,
  464.          we just mark that the registers need updating.  */
  465.       lwp->arch_private->debug_registers_changed = 1;

  466.       /* If the lwp isn't stopped, force it to momentarily pause, so
  467.          we can update its debug registers.  */
  468.       if (!lwp->stopped)
  469.         linux_stop_lwp (lwp);
  470.     }

  471.   return 0;
  472. }

  473. /* Update the inferior's debug register REGNUM from STATE.  */

  474. static void
  475. x86_dr_low_set_addr (int regnum, CORE_ADDR addr)
  476. {
  477.   /* Only update the threads of this process.  */
  478.   int pid = pid_of (current_thread);

  479.   gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);

  480.   find_inferior (&all_threads, update_debug_registers_callback, &pid);
  481. }

  482. /* Return the inferior's debug register REGNUM.  */

  483. static CORE_ADDR
  484. x86_dr_low_get_addr (int regnum)
  485. {
  486.   ptid_t ptid = ptid_of (current_thread);

  487.   gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);

  488.   return x86_linux_dr_get (ptid, regnum);
  489. }

  490. /* Update the inferior's DR7 debug control register from STATE.  */

  491. static void
  492. x86_dr_low_set_control (unsigned long control)
  493. {
  494.   /* Only update the threads of this process.  */
  495.   int pid = pid_of (current_thread);

  496.   find_inferior (&all_threads, update_debug_registers_callback, &pid);
  497. }

  498. /* Return the inferior's DR7 debug control register.  */

  499. static unsigned long
  500. x86_dr_low_get_control (void)
  501. {
  502.   ptid_t ptid = ptid_of (current_thread);

  503.   return x86_linux_dr_get (ptid, DR_CONTROL);
  504. }

  505. /* Get the value of the DR6 debug status register from the inferior
  506.    and record it in STATE.  */

  507. static unsigned long
  508. x86_dr_low_get_status (void)
  509. {
  510.   ptid_t ptid = ptid_of (current_thread);

  511.   return x86_linux_dr_get (ptid, DR_STATUS);
  512. }

  513. /* Low-level function vector.  */
  514. struct x86_dr_low_type x86_dr_low =
  515.   {
  516.     x86_dr_low_set_control,
  517.     x86_dr_low_set_addr,
  518.     x86_dr_low_get_addr,
  519.     x86_dr_low_get_status,
  520.     x86_dr_low_get_control,
  521.     sizeof (void *),
  522.   };

  523. /* Breakpoint/Watchpoint support.  */

  524. static int
  525. x86_supports_z_point_type (char z_type)
  526. {
  527.   switch (z_type)
  528.     {
  529.     case Z_PACKET_SW_BP:
  530.     case Z_PACKET_HW_BP:
  531.     case Z_PACKET_WRITE_WP:
  532.     case Z_PACKET_ACCESS_WP:
  533.       return 1;
  534.     default:
  535.       return 0;
  536.     }
  537. }

  538. static int
  539. x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
  540.                   int size, struct raw_breakpoint *bp)
  541. {
  542.   struct process_info *proc = current_process ();

  543.   switch (type)
  544.     {
  545.     case raw_bkpt_type_sw:
  546.       return insert_memory_breakpoint (bp);

  547.     case raw_bkpt_type_hw:
  548.     case raw_bkpt_type_write_wp:
  549.     case raw_bkpt_type_access_wp:
  550.       {
  551.         enum target_hw_bp_type hw_type
  552.           = raw_bkpt_type_to_target_hw_bp_type (type);
  553.         struct x86_debug_reg_state *state
  554.           = &proc->private->arch_private->debug_reg_state;

  555.         return x86_dr_insert_watchpoint (state, hw_type, addr, size);
  556.       }

  557.     default:
  558.       /* Unsupported.  */
  559.       return 1;
  560.     }
  561. }

  562. static int
  563. x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
  564.                   int size, struct raw_breakpoint *bp)
  565. {
  566.   struct process_info *proc = current_process ();

  567.   switch (type)
  568.     {
  569.     case raw_bkpt_type_sw:
  570.       return remove_memory_breakpoint (bp);

  571.     case raw_bkpt_type_hw:
  572.     case raw_bkpt_type_write_wp:
  573.     case raw_bkpt_type_access_wp:
  574.       {
  575.         enum target_hw_bp_type hw_type
  576.           = raw_bkpt_type_to_target_hw_bp_type (type);
  577.         struct x86_debug_reg_state *state
  578.           = &proc->private->arch_private->debug_reg_state;

  579.         return x86_dr_remove_watchpoint (state, hw_type, addr, size);
  580.       }
  581.     default:
  582.       /* Unsupported.  */
  583.       return 1;
  584.     }
  585. }

  586. static int
  587. x86_stopped_by_watchpoint (void)
  588. {
  589.   struct process_info *proc = current_process ();
  590.   return x86_dr_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
  591. }

  592. static CORE_ADDR
  593. x86_stopped_data_address (void)
  594. {
  595.   struct process_info *proc = current_process ();
  596.   CORE_ADDR addr;
  597.   if (x86_dr_stopped_data_address (&proc->private->arch_private->debug_reg_state,
  598.                                    &addr))
  599.     return addr;
  600.   return 0;
  601. }

  602. /* Called when a new process is created.  */

  603. static struct arch_process_info *
  604. x86_linux_new_process (void)
  605. {
  606.   struct arch_process_info *info = XCNEW (struct arch_process_info);

  607.   x86_low_init_dregs (&info->debug_reg_state);

  608.   return info;
  609. }

  610. /* Called when a new thread is detected.  */

  611. static struct arch_lwp_info *
  612. x86_linux_new_thread (void)
  613. {
  614.   struct arch_lwp_info *info = XCNEW (struct arch_lwp_info);

  615.   info->debug_registers_changed = 1;

  616.   return info;
  617. }

  618. /* Called when resuming a thread.
  619.    If the debug regs have changed, update the thread's copies.  */

  620. static void
  621. x86_linux_prepare_to_resume (struct lwp_info *lwp)
  622. {
  623.   ptid_t ptid = ptid_of (get_lwp_thread (lwp));
  624.   int clear_status = 0;

  625.   if (lwp->arch_private->debug_registers_changed)
  626.     {
  627.       int i;
  628.       int pid = ptid_get_pid (ptid);
  629.       struct process_info *proc = find_process_pid (pid);
  630.       struct x86_debug_reg_state *state
  631.         = &proc->private->arch_private->debug_reg_state;

  632.       x86_linux_dr_set (ptid, DR_CONTROL, 0);

  633.       ALL_DEBUG_ADDRESS_REGISTERS (i)
  634.         if (state->dr_ref_count[i] > 0)
  635.           {
  636.             x86_linux_dr_set (ptid, i, state->dr_mirror[i]);

  637.             /* If we're setting a watchpoint, any change the inferior
  638.                had done itself to the debug registers needs to be
  639.                discarded, otherwise, x86_dr_stopped_data_address can
  640.                get confused.  */
  641.             clear_status = 1;
  642.           }

  643.       if (state->dr_control_mirror != 0)
  644.         x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);

  645.       lwp->arch_private->debug_registers_changed = 0;
  646.     }

  647.   if (clear_status || lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT)
  648.     x86_linux_dr_set (ptid, DR_STATUS, 0);
  649. }

  650. /* When GDBSERVER is built as a 64-bit application on linux, the
  651.    PTRACE_GETSIGINFO data is always presented in 64-bit layout.  Since
  652.    debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
  653.    as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
  654.    conversion in-place ourselves.  */

  655. /* These types below (compat_*) define a siginfo type that is layout
  656.    compatible with the siginfo type exported by the 32-bit userspace
  657.    support.  */

  658. #ifdef __x86_64__

  659. typedef int compat_int_t;
  660. typedef unsigned int compat_uptr_t;

  661. typedef int compat_time_t;
  662. typedef int compat_timer_t;
  663. typedef int compat_clock_t;

  664. struct compat_timeval
  665. {
  666.   compat_time_t tv_sec;
  667.   int tv_usec;
  668. };

  669. typedef union compat_sigval
  670. {
  671.   compat_int_t sival_int;
  672.   compat_uptr_t sival_ptr;
  673. } compat_sigval_t;

  674. typedef struct compat_siginfo
  675. {
  676.   int si_signo;
  677.   int si_errno;
  678.   int si_code;

  679.   union
  680.   {
  681.     int _pad[((128 / sizeof (int)) - 3)];

  682.     /* kill() */
  683.     struct
  684.     {
  685.       unsigned int _pid;
  686.       unsigned int _uid;
  687.     } _kill;

  688.     /* POSIX.1b timers */
  689.     struct
  690.     {
  691.       compat_timer_t _tid;
  692.       int _overrun;
  693.       compat_sigval_t _sigval;
  694.     } _timer;

  695.     /* POSIX.1b signals */
  696.     struct
  697.     {
  698.       unsigned int _pid;
  699.       unsigned int _uid;
  700.       compat_sigval_t _sigval;
  701.     } _rt;

  702.     /* SIGCHLD */
  703.     struct
  704.     {
  705.       unsigned int _pid;
  706.       unsigned int _uid;
  707.       int _status;
  708.       compat_clock_t _utime;
  709.       compat_clock_t _stime;
  710.     } _sigchld;

  711.     /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
  712.     struct
  713.     {
  714.       unsigned int _addr;
  715.     } _sigfault;

  716.     /* SIGPOLL */
  717.     struct
  718.     {
  719.       int _band;
  720.       int _fd;
  721.     } _sigpoll;
  722.   } _sifields;
  723. } compat_siginfo_t;

  724. /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes.  */
  725. typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;

  726. typedef struct compat_x32_siginfo
  727. {
  728.   int si_signo;
  729.   int si_errno;
  730.   int si_code;

  731.   union
  732.   {
  733.     int _pad[((128 / sizeof (int)) - 3)];

  734.     /* kill() */
  735.     struct
  736.     {
  737.       unsigned int _pid;
  738.       unsigned int _uid;
  739.     } _kill;

  740.     /* POSIX.1b timers */
  741.     struct
  742.     {
  743.       compat_timer_t _tid;
  744.       int _overrun;
  745.       compat_sigval_t _sigval;
  746.     } _timer;

  747.     /* POSIX.1b signals */
  748.     struct
  749.     {
  750.       unsigned int _pid;
  751.       unsigned int _uid;
  752.       compat_sigval_t _sigval;
  753.     } _rt;

  754.     /* SIGCHLD */
  755.     struct
  756.     {
  757.       unsigned int _pid;
  758.       unsigned int _uid;
  759.       int _status;
  760.       compat_x32_clock_t _utime;
  761.       compat_x32_clock_t _stime;
  762.     } _sigchld;

  763.     /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
  764.     struct
  765.     {
  766.       unsigned int _addr;
  767.     } _sigfault;

  768.     /* SIGPOLL */
  769.     struct
  770.     {
  771.       int _band;
  772.       int _fd;
  773.     } _sigpoll;
  774.   } _sifields;
  775. } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));

  776. #define cpt_si_pid _sifields._kill._pid
  777. #define cpt_si_uid _sifields._kill._uid
  778. #define cpt_si_timerid _sifields._timer._tid
  779. #define cpt_si_overrun _sifields._timer._overrun
  780. #define cpt_si_status _sifields._sigchld._status
  781. #define cpt_si_utime _sifields._sigchld._utime
  782. #define cpt_si_stime _sifields._sigchld._stime
  783. #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
  784. #define cpt_si_addr _sifields._sigfault._addr
  785. #define cpt_si_band _sifields._sigpoll._band
  786. #define cpt_si_fd _sifields._sigpoll._fd

  787. /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
  788.    In their place is si_timer1,si_timer2.  */
  789. #ifndef si_timerid
  790. #define si_timerid si_timer1
  791. #endif
  792. #ifndef si_overrun
  793. #define si_overrun si_timer2
  794. #endif

  795. static void
  796. compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
  797. {
  798.   memset (to, 0, sizeof (*to));

  799.   to->si_signo = from->si_signo;
  800.   to->si_errno = from->si_errno;
  801.   to->si_code = from->si_code;

  802.   if (to->si_code == SI_TIMER)
  803.     {
  804.       to->cpt_si_timerid = from->si_timerid;
  805.       to->cpt_si_overrun = from->si_overrun;
  806.       to->cpt_si_ptr = (intptr_t) from->si_ptr;
  807.     }
  808.   else if (to->si_code == SI_USER)
  809.     {
  810.       to->cpt_si_pid = from->si_pid;
  811.       to->cpt_si_uid = from->si_uid;
  812.     }
  813.   else if (to->si_code < 0)
  814.     {
  815.       to->cpt_si_pid = from->si_pid;
  816.       to->cpt_si_uid = from->si_uid;
  817.       to->cpt_si_ptr = (intptr_t) from->si_ptr;
  818.     }
  819.   else
  820.     {
  821.       switch (to->si_signo)
  822.         {
  823.         case SIGCHLD:
  824.           to->cpt_si_pid = from->si_pid;
  825.           to->cpt_si_uid = from->si_uid;
  826.           to->cpt_si_status = from->si_status;
  827.           to->cpt_si_utime = from->si_utime;
  828.           to->cpt_si_stime = from->si_stime;
  829.           break;
  830.         case SIGILL:
  831.         case SIGFPE:
  832.         case SIGSEGV:
  833.         case SIGBUS:
  834.           to->cpt_si_addr = (intptr_t) from->si_addr;
  835.           break;
  836.         case SIGPOLL:
  837.           to->cpt_si_band = from->si_band;
  838.           to->cpt_si_fd = from->si_fd;
  839.           break;
  840.         default:
  841.           to->cpt_si_pid = from->si_pid;
  842.           to->cpt_si_uid = from->si_uid;
  843.           to->cpt_si_ptr = (intptr_t) from->si_ptr;
  844.           break;
  845.         }
  846.     }
  847. }

  848. static void
  849. siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
  850. {
  851.   memset (to, 0, sizeof (*to));

  852.   to->si_signo = from->si_signo;
  853.   to->si_errno = from->si_errno;
  854.   to->si_code = from->si_code;

  855.   if (to->si_code == SI_TIMER)
  856.     {
  857.       to->si_timerid = from->cpt_si_timerid;
  858.       to->si_overrun = from->cpt_si_overrun;
  859.       to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
  860.     }
  861.   else if (to->si_code == SI_USER)
  862.     {
  863.       to->si_pid = from->cpt_si_pid;
  864.       to->si_uid = from->cpt_si_uid;
  865.     }
  866.   else if (to->si_code < 0)
  867.     {
  868.       to->si_pid = from->cpt_si_pid;
  869.       to->si_uid = from->cpt_si_uid;
  870.       to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
  871.     }
  872.   else
  873.     {
  874.       switch (to->si_signo)
  875.         {
  876.         case SIGCHLD:
  877.           to->si_pid = from->cpt_si_pid;
  878.           to->si_uid = from->cpt_si_uid;
  879.           to->si_status = from->cpt_si_status;
  880.           to->si_utime = from->cpt_si_utime;
  881.           to->si_stime = from->cpt_si_stime;
  882.           break;
  883.         case SIGILL:
  884.         case SIGFPE:
  885.         case SIGSEGV:
  886.         case SIGBUS:
  887.           to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
  888.           break;
  889.         case SIGPOLL:
  890.           to->si_band = from->cpt_si_band;
  891.           to->si_fd = from->cpt_si_fd;
  892.           break;
  893.         default:
  894.           to->si_pid = from->cpt_si_pid;
  895.           to->si_uid = from->cpt_si_uid;
  896.           to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
  897.           break;
  898.         }
  899.     }
  900. }

  901. static void
  902. compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
  903.                                  siginfo_t *from)
  904. {
  905.   memset (to, 0, sizeof (*to));

  906.   to->si_signo = from->si_signo;
  907.   to->si_errno = from->si_errno;
  908.   to->si_code = from->si_code;

  909.   if (to->si_code == SI_TIMER)
  910.     {
  911.       to->cpt_si_timerid = from->si_timerid;
  912.       to->cpt_si_overrun = from->si_overrun;
  913.       to->cpt_si_ptr = (intptr_t) from->si_ptr;
  914.     }
  915.   else if (to->si_code == SI_USER)
  916.     {
  917.       to->cpt_si_pid = from->si_pid;
  918.       to->cpt_si_uid = from->si_uid;
  919.     }
  920.   else if (to->si_code < 0)
  921.     {
  922.       to->cpt_si_pid = from->si_pid;
  923.       to->cpt_si_uid = from->si_uid;
  924.       to->cpt_si_ptr = (intptr_t) from->si_ptr;
  925.     }
  926.   else
  927.     {
  928.       switch (to->si_signo)
  929.         {
  930.         case SIGCHLD:
  931.           to->cpt_si_pid = from->si_pid;
  932.           to->cpt_si_uid = from->si_uid;
  933.           to->cpt_si_status = from->si_status;
  934.           to->cpt_si_utime = from->si_utime;
  935.           to->cpt_si_stime = from->si_stime;
  936.           break;
  937.         case SIGILL:
  938.         case SIGFPE:
  939.         case SIGSEGV:
  940.         case SIGBUS:
  941.           to->cpt_si_addr = (intptr_t) from->si_addr;
  942.           break;
  943.         case SIGPOLL:
  944.           to->cpt_si_band = from->si_band;
  945.           to->cpt_si_fd = from->si_fd;
  946.           break;
  947.         default:
  948.           to->cpt_si_pid = from->si_pid;
  949.           to->cpt_si_uid = from->si_uid;
  950.           to->cpt_si_ptr = (intptr_t) from->si_ptr;
  951.           break;
  952.         }
  953.     }
  954. }

  955. static void
  956. siginfo_from_compat_x32_siginfo (siginfo_t *to,
  957.                                  compat_x32_siginfo_t *from)
  958. {
  959.   memset (to, 0, sizeof (*to));

  960.   to->si_signo = from->si_signo;
  961.   to->si_errno = from->si_errno;
  962.   to->si_code = from->si_code;

  963.   if (to->si_code == SI_TIMER)
  964.     {
  965.       to->si_timerid = from->cpt_si_timerid;
  966.       to->si_overrun = from->cpt_si_overrun;
  967.       to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
  968.     }
  969.   else if (to->si_code == SI_USER)
  970.     {
  971.       to->si_pid = from->cpt_si_pid;
  972.       to->si_uid = from->cpt_si_uid;
  973.     }
  974.   else if (to->si_code < 0)
  975.     {
  976.       to->si_pid = from->cpt_si_pid;
  977.       to->si_uid = from->cpt_si_uid;
  978.       to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
  979.     }
  980.   else
  981.     {
  982.       switch (to->si_signo)
  983.         {
  984.         case SIGCHLD:
  985.           to->si_pid = from->cpt_si_pid;
  986.           to->si_uid = from->cpt_si_uid;
  987.           to->si_status = from->cpt_si_status;
  988.           to->si_utime = from->cpt_si_utime;
  989.           to->si_stime = from->cpt_si_stime;
  990.           break;
  991.         case SIGILL:
  992.         case SIGFPE:
  993.         case SIGSEGV:
  994.         case SIGBUS:
  995.           to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
  996.           break;
  997.         case SIGPOLL:
  998.           to->si_band = from->cpt_si_band;
  999.           to->si_fd = from->cpt_si_fd;
  1000.           break;
  1001.         default:
  1002.           to->si_pid = from->cpt_si_pid;
  1003.           to->si_uid = from->cpt_si_uid;
  1004.           to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
  1005.           break;
  1006.         }
  1007.     }
  1008. }

  1009. #endif /* __x86_64__ */

  1010. /* Convert a native/host siginfo object, into/from the siginfo in the
  1011.    layout of the inferiors' architecture.  Returns true if any
  1012.    conversion was done; false otherwise.  If DIRECTION is 1, then copy
  1013.    from INF to NATIVE.  If DIRECTION is 0, copy from NATIVE to
  1014.    INF.  */

  1015. static int
  1016. x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
  1017. {
  1018. #ifdef __x86_64__
  1019.   unsigned int machine;
  1020.   int tid = lwpid_of (current_thread);
  1021.   int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);

  1022.   /* Is the inferior 32-bit?  If so, then fixup the siginfo object.  */
  1023.   if (!is_64bit_tdesc ())
  1024.     {
  1025.       gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));

  1026.       if (direction == 0)
  1027.         compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
  1028.       else
  1029.         siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);

  1030.       return 1;
  1031.     }
  1032.   /* No fixup for native x32 GDB.  */
  1033.   else if (!is_elf64 && sizeof (void *) == 8)
  1034.     {
  1035.       gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));

  1036.       if (direction == 0)
  1037.         compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
  1038.                                          native);
  1039.       else
  1040.         siginfo_from_compat_x32_siginfo (native,
  1041.                                          (struct compat_x32_siginfo *) inf);

  1042.       return 1;
  1043.     }
  1044. #endif

  1045.   return 0;
  1046. }

  1047. static int use_xml;

  1048. /* Format of XSAVE extended state is:
  1049.         struct
  1050.         {
  1051.           fxsave_bytes[0..463]
  1052.           sw_usable_bytes[464..511]
  1053.           xstate_hdr_bytes[512..575]
  1054.           avx_bytes[576..831]
  1055.           future_state etc
  1056.         };

  1057.   Same memory layout will be used for the coredump NT_X86_XSTATE
  1058.   representing the XSAVE extended state registers.

  1059.   The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
  1060.   extended state mask, which is the same as the extended control register
  1061.   0 (the XFEATURE_ENABLED_MASK register), XCR0.  We can use this mask
  1062.   together with the mask saved in the xstate_hdr_bytes to determine what
  1063.   states the processor/OS supports and what state, used or initialized,
  1064.   the process/thread is in.  */
  1065. #define I386_LINUX_XSAVE_XCR0_OFFSET 464

  1066. /* Does the current host support the GETFPXREGS request?  The header
  1067.    file may or may not define it, and even if it is defined, the
  1068.    kernel will return EIO if it's running on a pre-SSE processor.  */
  1069. int have_ptrace_getfpxregs =
  1070. #ifdef HAVE_PTRACE_GETFPXREGS
  1071.   -1
  1072. #else
  1073.   0
  1074. #endif
  1075. ;

  1076. /* Does the current host support PTRACE_GETREGSET?  */
  1077. static int have_ptrace_getregset = -1;

  1078. /* Get Linux/x86 target description from running target.  */

  1079. static const struct target_desc *
  1080. x86_linux_read_description (void)
  1081. {
  1082.   unsigned int machine;
  1083.   int is_elf64;
  1084.   int xcr0_features;
  1085.   int tid;
  1086.   static uint64_t xcr0;
  1087.   struct regset_info *regset;

  1088.   tid = lwpid_of (current_thread);

  1089.   is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);

  1090.   if (sizeof (void *) == 4)
  1091.     {
  1092.       if (is_elf64 > 0)
  1093.        error (_("Can't debug 64-bit process with 32-bit GDBserver"));
  1094. #ifndef __x86_64__
  1095.       else if (machine == EM_X86_64)
  1096.        error (_("Can't debug x86-64 process with 32-bit GDBserver"));
  1097. #endif
  1098.     }

  1099. #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
  1100.   if (machine == EM_386 && have_ptrace_getfpxregs == -1)
  1101.     {
  1102.       elf_fpxregset_t fpxregs;

  1103.       if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
  1104.         {
  1105.           have_ptrace_getfpxregs = 0;
  1106.           have_ptrace_getregset = 0;
  1107.           return tdesc_i386_mmx_linux;
  1108.         }
  1109.       else
  1110.         have_ptrace_getfpxregs = 1;
  1111.     }
  1112. #endif

  1113.   if (!use_xml)
  1114.     {
  1115.       x86_xcr0 = X86_XSTATE_SSE_MASK;

  1116.       /* Don't use XML.  */
  1117. #ifdef __x86_64__
  1118.       if (machine == EM_X86_64)
  1119.         return tdesc_amd64_linux_no_xml;
  1120.       else
  1121. #endif
  1122.         return tdesc_i386_linux_no_xml;
  1123.     }

  1124.   if (have_ptrace_getregset == -1)
  1125.     {
  1126.       uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
  1127.       struct iovec iov;

  1128.       iov.iov_base = xstateregs;
  1129.       iov.iov_len = sizeof (xstateregs);

  1130.       /* Check if PTRACE_GETREGSET works.  */
  1131.       if (ptrace (PTRACE_GETREGSET, tid,
  1132.                   (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
  1133.         have_ptrace_getregset = 0;
  1134.       else
  1135.         {
  1136.           have_ptrace_getregset = 1;

  1137.           /* Get XCR0 from XSAVE extended state.  */
  1138.           xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
  1139.                              / sizeof (uint64_t))];

  1140.           /* Use PTRACE_GETREGSET if it is available.  */
  1141.           for (regset = x86_regsets;
  1142.                regset->fill_function != NULL; regset++)
  1143.             if (regset->get_request == PTRACE_GETREGSET)
  1144.               regset->size = X86_XSTATE_SIZE (xcr0);
  1145.             else if (regset->type != GENERAL_REGS)
  1146.               regset->size = 0;
  1147.         }
  1148.     }

  1149.   /* Check the native XCR0 only if PTRACE_GETREGSET is available.  */
  1150.   xcr0_features = (have_ptrace_getregset
  1151.          && (xcr0 & X86_XSTATE_ALL_MASK));

  1152.   if (xcr0_features)
  1153.     x86_xcr0 = xcr0;

  1154.   if (machine == EM_X86_64)
  1155.     {
  1156. #ifdef __x86_64__
  1157.       if (is_elf64)
  1158.         {
  1159.           if (xcr0_features)
  1160.             {
  1161.               switch (xcr0 & X86_XSTATE_ALL_MASK)
  1162.                 {
  1163.                 case X86_XSTATE_AVX512_MASK:
  1164.                   return tdesc_amd64_avx512_linux;

  1165.                 case X86_XSTATE_MPX_MASK:
  1166.                   return tdesc_amd64_mpx_linux;

  1167.                 case X86_XSTATE_AVX_MASK:
  1168.                   return tdesc_amd64_avx_linux;

  1169.                 default:
  1170.                   return tdesc_amd64_linux;
  1171.                 }
  1172.             }
  1173.           else
  1174.             return tdesc_amd64_linux;
  1175.         }
  1176.       else
  1177.         {
  1178.           if (xcr0_features)
  1179.             {
  1180.               switch (xcr0 & X86_XSTATE_ALL_MASK)
  1181.                 {
  1182.                 case X86_XSTATE_AVX512_MASK:
  1183.                   return tdesc_x32_avx512_linux;

  1184.                 case X86_XSTATE_MPX_MASK: /* No MPX on x32.  */
  1185.                 case X86_XSTATE_AVX_MASK:
  1186.                   return tdesc_x32_avx_linux;

  1187.                 default:
  1188.                   return tdesc_x32_linux;
  1189.                 }
  1190.             }
  1191.           else
  1192.             return tdesc_x32_linux;
  1193.         }
  1194. #endif
  1195.     }
  1196.   else
  1197.     {
  1198.       if (xcr0_features)
  1199.         {
  1200.           switch (xcr0 & X86_XSTATE_ALL_MASK)
  1201.             {
  1202.             case (X86_XSTATE_AVX512_MASK):
  1203.               return tdesc_i386_avx512_linux;

  1204.             case (X86_XSTATE_MPX_MASK):
  1205.               return tdesc_i386_mpx_linux;

  1206.             case (X86_XSTATE_AVX_MASK):
  1207.               return tdesc_i386_avx_linux;

  1208.             default:
  1209.               return tdesc_i386_linux;
  1210.             }
  1211.         }
  1212.       else
  1213.         return tdesc_i386_linux;
  1214.     }

  1215.   gdb_assert_not_reached ("failed to return tdesc");
  1216. }

  1217. /* Callback for find_inferior.  Stops iteration when a thread with a
  1218.    given PID is found.  */

  1219. static int
  1220. same_process_callback (struct inferior_list_entry *entry, void *data)
  1221. {
  1222.   int pid = *(int *) data;

  1223.   return (ptid_get_pid (entry->id) == pid);
  1224. }

  1225. /* Callback for for_each_inferior.  Calls the arch_setup routine for
  1226.    each process.  */

  1227. static void
  1228. x86_arch_setup_process_callback (struct inferior_list_entry *entry)
  1229. {
  1230.   int pid = ptid_get_pid (entry->id);

  1231.   /* Look up any thread of this processes.  */
  1232.   current_thread
  1233.     = (struct thread_info *) find_inferior (&all_threads,
  1234.                                             same_process_callback, &pid);

  1235.   the_low_target.arch_setup ();
  1236. }

  1237. /* Update all the target description of all processes; a new GDB
  1238.    connected, and it may or not support xml target descriptions.  */

  1239. static void
  1240. x86_linux_update_xmltarget (void)
  1241. {
  1242.   struct thread_info *saved_thread = current_thread;

  1243.   /* Before changing the register cache's internal layout, flush the
  1244.      contents of the current valid caches back to the threads, and
  1245.      release the current regcache objects.  */
  1246.   regcache_release ();

  1247.   for_each_inferior (&all_processes, x86_arch_setup_process_callback);

  1248.   current_thread = saved_thread;
  1249. }

  1250. /* Process qSupported query, "xmlRegisters=".  Update the buffer size for
  1251.    PTRACE_GETREGSET.  */

  1252. static void
  1253. x86_linux_process_qsupported (const char *query)
  1254. {
  1255.   /* Return if gdb doesn't support XML.  If gdb sends "xmlRegisters="
  1256.      with "i386" in qSupported query, it supports x86 XML target
  1257.      descriptions.  */
  1258.   use_xml = 0;
  1259.   if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
  1260.     {
  1261.       char *copy = xstrdup (query + 13);
  1262.       char *p;

  1263.       for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
  1264.         {
  1265.           if (strcmp (p, "i386") == 0)
  1266.             {
  1267.               use_xml = 1;
  1268.               break;
  1269.             }
  1270.         }

  1271.       free (copy);
  1272.     }

  1273.   x86_linux_update_xmltarget ();
  1274. }

  1275. /* Common for x86/x86-64.  */

  1276. static struct regsets_info x86_regsets_info =
  1277.   {
  1278.     x86_regsets, /* regsets */
  1279.     0, /* num_regsets */
  1280.     NULL, /* disabled_regsets */
  1281.   };

  1282. #ifdef __x86_64__
  1283. static struct regs_info amd64_linux_regs_info =
  1284.   {
  1285.     NULL, /* regset_bitmap */
  1286.     NULL, /* usrregs_info */
  1287.     &x86_regsets_info
  1288.   };
  1289. #endif
  1290. static struct usrregs_info i386_linux_usrregs_info =
  1291.   {
  1292.     I386_NUM_REGS,
  1293.     i386_regmap,
  1294.   };

  1295. static struct regs_info i386_linux_regs_info =
  1296.   {
  1297.     NULL, /* regset_bitmap */
  1298.     &i386_linux_usrregs_info,
  1299.     &x86_regsets_info
  1300.   };

  1301. const struct regs_info *
  1302. x86_linux_regs_info (void)
  1303. {
  1304. #ifdef __x86_64__
  1305.   if (is_64bit_tdesc ())
  1306.     return &amd64_linux_regs_info;
  1307.   else
  1308. #endif
  1309.     return &i386_linux_regs_info;
  1310. }

  1311. /* Initialize the target description for the architecture of the
  1312.    inferior.  */

  1313. static void
  1314. x86_arch_setup (void)
  1315. {
  1316.   current_process ()->tdesc = x86_linux_read_description ();
  1317. }

  1318. static int
  1319. x86_supports_tracepoints (void)
  1320. {
  1321.   return 1;
  1322. }

  1323. static void
  1324. append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
  1325. {
  1326.   write_inferior_memory (*to, buf, len);
  1327.   *to += len;
  1328. }

  1329. static int
  1330. push_opcode (unsigned char *buf, char *op)
  1331. {
  1332.   unsigned char *buf_org = buf;

  1333.   while (1)
  1334.     {
  1335.       char *endptr;
  1336.       unsigned long ul = strtoul (op, &endptr, 16);

  1337.       if (endptr == op)
  1338.         break;

  1339.       *buf++ = ul;
  1340.       op = endptr;
  1341.     }

  1342.   return buf - buf_org;
  1343. }

  1344. #ifdef __x86_64__

  1345. /* Build a jump pad that saves registers and calls a collection
  1346.    function.  Writes a jump instruction to the jump pad to
  1347.    JJUMPAD_INSN.  The caller is responsible to write it in at the
  1348.    tracepoint address.  */

  1349. static int
  1350. amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
  1351.                                         CORE_ADDR collector,
  1352.                                         CORE_ADDR lockaddr,
  1353.                                         ULONGEST orig_size,
  1354.                                         CORE_ADDR *jump_entry,
  1355.                                         CORE_ADDR *trampoline,
  1356.                                         ULONGEST *trampoline_size,
  1357.                                         unsigned char *jjump_pad_insn,
  1358.                                         ULONGEST *jjump_pad_insn_size,
  1359.                                         CORE_ADDR *adjusted_insn_addr,
  1360.                                         CORE_ADDR *adjusted_insn_addr_end,
  1361.                                         char *err)
  1362. {
  1363.   unsigned char buf[40];
  1364.   int i, offset;
  1365.   int64_t loffset;

  1366.   CORE_ADDR buildaddr = *jump_entry;

  1367.   /* Build the jump pad.  */

  1368.   /* First, do tracepoint data collection.  Save registers.  */
  1369.   i = 0;
  1370.   /* Need to ensure stack pointer saved first.  */
  1371.   buf[i++] = 0x54; /* push %rsp */
  1372.   buf[i++] = 0x55; /* push %rbp */
  1373.   buf[i++] = 0x57; /* push %rdi */
  1374.   buf[i++] = 0x56; /* push %rsi */
  1375.   buf[i++] = 0x52; /* push %rdx */
  1376.   buf[i++] = 0x51; /* push %rcx */
  1377.   buf[i++] = 0x53; /* push %rbx */
  1378.   buf[i++] = 0x50; /* push %rax */
  1379.   buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
  1380.   buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
  1381.   buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
  1382.   buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
  1383.   buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
  1384.   buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
  1385.   buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
  1386.   buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
  1387.   buf[i++] = 0x9c; /* pushfq */
  1388.   buf[i++] = 0x48; /* movl <addr>,%rdi */
  1389.   buf[i++] = 0xbf;
  1390.   *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
  1391.   i += sizeof (unsigned long);
  1392.   buf[i++] = 0x57; /* push %rdi */
  1393.   append_insns (&buildaddr, i, buf);

  1394.   /* Stack space for the collecting_t object.  */
  1395.   i = 0;
  1396.   i += push_opcode (&buf[i], "48 83 ec 18");        /* sub $0x18,%rsp */
  1397.   i += push_opcode (&buf[i], "48 b8");          /* mov <tpoint>,%rax */
  1398.   memcpy (buf + i, &tpoint, 8);
  1399.   i += 8;
  1400.   i += push_opcode (&buf[i], "48 89 04 24");    /* mov %rax,(%rsp) */
  1401.   i += push_opcode (&buf[i],
  1402.                     "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
  1403.   i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
  1404.   append_insns (&buildaddr, i, buf);

  1405.   /* spin-lock.  */
  1406.   i = 0;
  1407.   i += push_opcode (&buf[i], "48 be");                /* movl <lockaddr>,%rsi */
  1408.   memcpy (&buf[i], (void *) &lockaddr, 8);
  1409.   i += 8;
  1410.   i += push_opcode (&buf[i], "48 89 e1");       /* mov %rsp,%rcx */
  1411.   i += push_opcode (&buf[i], "31 c0");                /* xor %eax,%eax */
  1412.   i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
  1413.   i += push_opcode (&buf[i], "48 85 c0");        /* test %rax,%rax */
  1414.   i += push_opcode (&buf[i], "75 f4");                /* jne <again> */
  1415.   append_insns (&buildaddr, i, buf);

  1416.   /* Set up the gdb_collect call.  */
  1417.   /* At this point, (stack pointer + 0x18) is the base of our saved
  1418.      register block.  */

  1419.   i = 0;
  1420.   i += push_opcode (&buf[i], "48 89 e6");        /* mov %rsp,%rsi */
  1421.   i += push_opcode (&buf[i], "48 83 c6 18");        /* add $0x18,%rsi */

  1422.   /* tpoint address may be 64-bit wide.  */
  1423.   i += push_opcode (&buf[i], "48 bf");                /* movl <addr>,%rdi */
  1424.   memcpy (buf + i, &tpoint, 8);
  1425.   i += 8;
  1426.   append_insns (&buildaddr, i, buf);

  1427.   /* The collector function being in the shared library, may be
  1428.      >31-bits away off the jump pad.  */
  1429.   i = 0;
  1430.   i += push_opcode (&buf[i], "48 b8");          /* mov $collector,%rax */
  1431.   memcpy (buf + i, &collector, 8);
  1432.   i += 8;
  1433.   i += push_opcode (&buf[i], "ff d0");          /* callq *%rax */
  1434.   append_insns (&buildaddr, i, buf);

  1435.   /* Clear the spin-lock.  */
  1436.   i = 0;
  1437.   i += push_opcode (&buf[i], "31 c0");                /* xor %eax,%eax */
  1438.   i += push_opcode (&buf[i], "48 a3");                /* mov %rax, lockaddr */
  1439.   memcpy (buf + i, &lockaddr, 8);
  1440.   i += 8;
  1441.   append_insns (&buildaddr, i, buf);

  1442.   /* Remove stack that had been used for the collect_t object.  */
  1443.   i = 0;
  1444.   i += push_opcode (&buf[i], "48 83 c4 18");        /* add $0x18,%rsp */
  1445.   append_insns (&buildaddr, i, buf);

  1446.   /* Restore register state.  */
  1447.   i = 0;
  1448.   buf[i++] = 0x48; /* add $0x8,%rsp */
  1449.   buf[i++] = 0x83;
  1450.   buf[i++] = 0xc4;
  1451.   buf[i++] = 0x08;
  1452.   buf[i++] = 0x9d; /* popfq */
  1453.   buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
  1454.   buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
  1455.   buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
  1456.   buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
  1457.   buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
  1458.   buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
  1459.   buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
  1460.   buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
  1461.   buf[i++] = 0x58; /* pop %rax */
  1462.   buf[i++] = 0x5b; /* pop %rbx */
  1463.   buf[i++] = 0x59; /* pop %rcx */
  1464.   buf[i++] = 0x5a; /* pop %rdx */
  1465.   buf[i++] = 0x5e; /* pop %rsi */
  1466.   buf[i++] = 0x5f; /* pop %rdi */
  1467.   buf[i++] = 0x5d; /* pop %rbp */
  1468.   buf[i++] = 0x5c; /* pop %rsp */
  1469.   append_insns (&buildaddr, i, buf);

  1470.   /* Now, adjust the original instruction to execute in the jump
  1471.      pad.  */
  1472.   *adjusted_insn_addr = buildaddr;
  1473.   relocate_instruction (&buildaddr, tpaddr);
  1474.   *adjusted_insn_addr_end = buildaddr;

  1475.   /* Finally, write a jump back to the program.  */

  1476.   loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
  1477.   if (loffset > INT_MAX || loffset < INT_MIN)
  1478.     {
  1479.       sprintf (err,
  1480.                "E.Jump back from jump pad too far from tracepoint "
  1481.                "(offset 0x%" PRIx64 " > int32).", loffset);
  1482.       return 1;
  1483.     }

  1484.   offset = (int) loffset;
  1485.   memcpy (buf, jump_insn, sizeof (jump_insn));
  1486.   memcpy (buf + 1, &offset, 4);
  1487.   append_insns (&buildaddr, sizeof (jump_insn), buf);

  1488.   /* The jump pad is now built.  Wire in a jump to our jump pad.  This
  1489.      is always done last (by our caller actually), so that we can
  1490.      install fast tracepoints with threads running.  This relies on
  1491.      the agent's atomic write support.  */
  1492.   loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
  1493.   if (loffset > INT_MAX || loffset < INT_MIN)
  1494.     {
  1495.       sprintf (err,
  1496.                "E.Jump pad too far from tracepoint "
  1497.                "(offset 0x%" PRIx64 " > int32).", loffset);
  1498.       return 1;
  1499.     }

  1500.   offset = (int) loffset;

  1501.   memcpy (buf, jump_insn, sizeof (jump_insn));
  1502.   memcpy (buf + 1, &offset, 4);
  1503.   memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
  1504.   *jjump_pad_insn_size = sizeof (jump_insn);

  1505.   /* Return the end address of our pad.  */
  1506.   *jump_entry = buildaddr;

  1507.   return 0;
  1508. }

  1509. #endif /* __x86_64__ */

  1510. /* Build a jump pad that saves registers and calls a collection
  1511.    function.  Writes a jump instruction to the jump pad to
  1512.    JJUMPAD_INSN.  The caller is responsible to write it in at the
  1513.    tracepoint address.  */

  1514. static int
  1515. i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
  1516.                                        CORE_ADDR collector,
  1517.                                        CORE_ADDR lockaddr,
  1518.                                        ULONGEST orig_size,
  1519.                                        CORE_ADDR *jump_entry,
  1520.                                        CORE_ADDR *trampoline,
  1521.                                        ULONGEST *trampoline_size,
  1522.                                        unsigned char *jjump_pad_insn,
  1523.                                        ULONGEST *jjump_pad_insn_size,
  1524.                                        CORE_ADDR *adjusted_insn_addr,
  1525.                                        CORE_ADDR *adjusted_insn_addr_end,
  1526.                                        char *err)
  1527. {
  1528.   unsigned char buf[0x100];
  1529.   int i, offset;
  1530.   CORE_ADDR buildaddr = *jump_entry;

  1531.   /* Build the jump pad.  */

  1532.   /* First, do tracepoint data collection.  Save registers.  */
  1533.   i = 0;
  1534.   buf[i++] = 0x60; /* pushad */
  1535.   buf[i++] = 0x68; /* push tpaddr aka $pc */
  1536.   *((int *)(buf + i)) = (int) tpaddr;
  1537.   i += 4;
  1538.   buf[i++] = 0x9c; /* pushf */
  1539.   buf[i++] = 0x1e; /* push %ds */
  1540.   buf[i++] = 0x06; /* push %es */
  1541.   buf[i++] = 0x0f; /* push %fs */
  1542.   buf[i++] = 0xa0;
  1543.   buf[i++] = 0x0f; /* push %gs */
  1544.   buf[i++] = 0xa8;
  1545.   buf[i++] = 0x16; /* push %ss */
  1546.   buf[i++] = 0x0e; /* push %cs */
  1547.   append_insns (&buildaddr, i, buf);

  1548.   /* Stack space for the collecting_t object.  */
  1549.   i = 0;
  1550.   i += push_opcode (&buf[i], "83 ec 08");        /* sub    $0x8,%esp */

  1551.   /* Build the object.  */
  1552.   i += push_opcode (&buf[i], "b8");                /* mov    <tpoint>,%eax */
  1553.   memcpy (buf + i, &tpoint, 4);
  1554.   i += 4;
  1555.   i += push_opcode (&buf[i], "89 04 24");           /* mov %eax,(%esp) */

  1556.   i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
  1557.   i += push_opcode (&buf[i], "89 44 24 04");           /* mov %eax,0x4(%esp) */
  1558.   append_insns (&buildaddr, i, buf);

  1559.   /* spin-lock.  Note this is using cmpxchg, which leaves i386 behind.
  1560.      If we cared for it, this could be using xchg alternatively.  */

  1561.   i = 0;
  1562.   i += push_opcode (&buf[i], "31 c0");                /* xor %eax,%eax */
  1563.   i += push_opcode (&buf[i], "f0 0f b1 25");    /* lock cmpxchg
  1564.                                                    %esp,<lockaddr> */
  1565.   memcpy (&buf[i], (void *) &lockaddr, 4);
  1566.   i += 4;
  1567.   i += push_opcode (&buf[i], "85 c0");                /* test %eax,%eax */
  1568.   i += push_opcode (&buf[i], "75 f2");                /* jne <again> */
  1569.   append_insns (&buildaddr, i, buf);


  1570.   /* Set up arguments to the gdb_collect call.  */
  1571.   i = 0;
  1572.   i += push_opcode (&buf[i], "89 e0");                /* mov %esp,%eax */
  1573.   i += push_opcode (&buf[i], "83 c0 08");        /* add $0x08,%eax */
  1574.   i += push_opcode (&buf[i], "89 44 24 fc");        /* mov %eax,-0x4(%esp) */
  1575.   append_insns (&buildaddr, i, buf);

  1576.   i = 0;
  1577.   i += push_opcode (&buf[i], "83 ec 08");        /* sub $0x8,%esp */
  1578.   append_insns (&buildaddr, i, buf);

  1579.   i = 0;
  1580.   i += push_opcode (&buf[i], "c7 04 24");       /* movl <addr>,(%esp) */
  1581.   memcpy (&buf[i], (void *) &tpoint, 4);
  1582.   i += 4;
  1583.   append_insns (&buildaddr, i, buf);

  1584.   buf[0] = 0xe8; /* call <reladdr> */
  1585.   offset = collector - (buildaddr + sizeof (jump_insn));
  1586.   memcpy (buf + 1, &offset, 4);
  1587.   append_insns (&buildaddr, 5, buf);
  1588.   /* Clean up after the call.  */
  1589.   buf[0] = 0x83; /* add $0x8,%esp */
  1590.   buf[1] = 0xc4;
  1591.   buf[2] = 0x08;
  1592.   append_insns (&buildaddr, 3, buf);


  1593.   /* Clear the spin-lock.  This would need the LOCK prefix on older
  1594.      broken archs.  */
  1595.   i = 0;
  1596.   i += push_opcode (&buf[i], "31 c0");                /* xor %eax,%eax */
  1597.   i += push_opcode (&buf[i], "a3");                /* mov %eax, lockaddr */
  1598.   memcpy (buf + i, &lockaddr, 4);
  1599.   i += 4;
  1600.   append_insns (&buildaddr, i, buf);


  1601.   /* Remove stack that had been used for the collect_t object.  */
  1602.   i = 0;
  1603.   i += push_opcode (&buf[i], "83 c4 08");        /* add $0x08,%esp */
  1604.   append_insns (&buildaddr, i, buf);

  1605.   i = 0;
  1606.   buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
  1607.   buf[i++] = 0xc4;
  1608.   buf[i++] = 0x04;
  1609.   buf[i++] = 0x17; /* pop %ss */
  1610.   buf[i++] = 0x0f; /* pop %gs */
  1611.   buf[i++] = 0xa9;
  1612.   buf[i++] = 0x0f; /* pop %fs */
  1613.   buf[i++] = 0xa1;
  1614.   buf[i++] = 0x07; /* pop %es */
  1615.   buf[i++] = 0x1f; /* pop %ds */
  1616.   buf[i++] = 0x9d; /* popf */
  1617.   buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
  1618.   buf[i++] = 0xc4;
  1619.   buf[i++] = 0x04;
  1620.   buf[i++] = 0x61; /* popad */
  1621.   append_insns (&buildaddr, i, buf);

  1622.   /* Now, adjust the original instruction to execute in the jump
  1623.      pad.  */
  1624.   *adjusted_insn_addr = buildaddr;
  1625.   relocate_instruction (&buildaddr, tpaddr);
  1626.   *adjusted_insn_addr_end = buildaddr;

  1627.   /* Write the jump back to the program.  */
  1628.   offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
  1629.   memcpy (buf, jump_insn, sizeof (jump_insn));
  1630.   memcpy (buf + 1, &offset, 4);
  1631.   append_insns (&buildaddr, sizeof (jump_insn), buf);

  1632.   /* The jump pad is now built.  Wire in a jump to our jump pad.  This
  1633.      is always done last (by our caller actually), so that we can
  1634.      install fast tracepoints with threads running.  This relies on
  1635.      the agent's atomic write support.  */
  1636.   if (orig_size == 4)
  1637.     {
  1638.       /* Create a trampoline.  */
  1639.       *trampoline_size = sizeof (jump_insn);
  1640.       if (!claim_trampoline_space (*trampoline_size, trampoline))
  1641.         {
  1642.           /* No trampoline space available.  */
  1643.           strcpy (err,
  1644.                   "E.Cannot allocate trampoline space needed for fast "
  1645.                   "tracepoints on 4-byte instructions.");
  1646.           return 1;
  1647.         }

  1648.       offset = *jump_entry - (*trampoline + sizeof (jump_insn));
  1649.       memcpy (buf, jump_insn, sizeof (jump_insn));
  1650.       memcpy (buf + 1, &offset, 4);
  1651.       write_inferior_memory (*trampoline, buf, sizeof (jump_insn));

  1652.       /* Use a 16-bit relative jump instruction to jump to the trampoline.  */
  1653.       offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
  1654.       memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
  1655.       memcpy (buf + 2, &offset, 2);
  1656.       memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
  1657.       *jjump_pad_insn_size = sizeof (small_jump_insn);
  1658.     }
  1659.   else
  1660.     {
  1661.       /* Else use a 32-bit relative jump instruction.  */
  1662.       offset = *jump_entry - (tpaddr + sizeof (jump_insn));
  1663.       memcpy (buf, jump_insn, sizeof (jump_insn));
  1664.       memcpy (buf + 1, &offset, 4);
  1665.       memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
  1666.       *jjump_pad_insn_size = sizeof (jump_insn);
  1667.     }

  1668.   /* Return the end address of our pad.  */
  1669.   *jump_entry = buildaddr;

  1670.   return 0;
  1671. }

  1672. static int
  1673. x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
  1674.                                       CORE_ADDR collector,
  1675.                                       CORE_ADDR lockaddr,
  1676.                                       ULONGEST orig_size,
  1677.                                       CORE_ADDR *jump_entry,
  1678.                                       CORE_ADDR *trampoline,
  1679.                                       ULONGEST *trampoline_size,
  1680.                                       unsigned char *jjump_pad_insn,
  1681.                                       ULONGEST *jjump_pad_insn_size,
  1682.                                       CORE_ADDR *adjusted_insn_addr,
  1683.                                       CORE_ADDR *adjusted_insn_addr_end,
  1684.                                       char *err)
  1685. {
  1686. #ifdef __x86_64__
  1687.   if (is_64bit_tdesc ())
  1688.     return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
  1689.                                                    collector, lockaddr,
  1690.                                                    orig_size, jump_entry,
  1691.                                                    trampoline, trampoline_size,
  1692.                                                    jjump_pad_insn,
  1693.                                                    jjump_pad_insn_size,
  1694.                                                    adjusted_insn_addr,
  1695.                                                    adjusted_insn_addr_end,
  1696.                                                    err);
  1697. #endif

  1698.   return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
  1699.                                                 collector, lockaddr,
  1700.                                                 orig_size, jump_entry,
  1701.                                                 trampoline, trampoline_size,
  1702.                                                 jjump_pad_insn,
  1703.                                                 jjump_pad_insn_size,
  1704.                                                 adjusted_insn_addr,
  1705.                                                 adjusted_insn_addr_end,
  1706.                                                 err);
  1707. }

  1708. /* Return the minimum instruction length for fast tracepoints on x86/x86-64
  1709.    architectures.  */

  1710. static int
  1711. x86_get_min_fast_tracepoint_insn_len (void)
  1712. {
  1713.   static int warned_about_fast_tracepoints = 0;

  1714. #ifdef __x86_64__
  1715.   /*  On x86-64, 5-byte jump instructions with a 4-byte offset are always
  1716.       used for fast tracepoints.  */
  1717.   if (is_64bit_tdesc ())
  1718.     return 5;
  1719. #endif

  1720.   if (agent_loaded_p ())
  1721.     {
  1722.       char errbuf[IPA_BUFSIZ];

  1723.       errbuf[0] = '\0';

  1724.       /* On x86, if trampolines are available, then 4-byte jump instructions
  1725.          with a 2-byte offset may be used, otherwise 5-byte jump instructions
  1726.          with a 4-byte offset are used instead.  */
  1727.       if (have_fast_tracepoint_trampoline_buffer (errbuf))
  1728.         return 4;
  1729.       else
  1730.         {
  1731.           /* GDB has no channel to explain to user why a shorter fast
  1732.              tracepoint is not possible, but at least make GDBserver
  1733.              mention that something has gone awry.  */
  1734.           if (!warned_about_fast_tracepoints)
  1735.             {
  1736.               warning ("4-byte fast tracepoints not available; %s\n", errbuf);
  1737.               warned_about_fast_tracepoints = 1;
  1738.             }
  1739.           return 5;
  1740.         }
  1741.     }
  1742.   else
  1743.     {
  1744.       /* Indicate that the minimum length is currently unknown since the IPA
  1745.          has not loaded yet.  */
  1746.       return 0;
  1747.     }
  1748. }

  1749. static void
  1750. add_insns (unsigned char *start, int len)
  1751. {
  1752.   CORE_ADDR buildaddr = current_insn_ptr;

  1753.   if (debug_threads)
  1754.     debug_printf ("Adding %d bytes of insn at %s\n",
  1755.                   len, paddress (buildaddr));

  1756.   append_insns (&buildaddr, len, start);
  1757.   current_insn_ptr = buildaddr;
  1758. }

  1759. /* Our general strategy for emitting code is to avoid specifying raw
  1760.    bytes whenever possible, and instead copy a block of inline asm
  1761.    that is embedded in the function.  This is a little messy, because
  1762.    we need to keep the compiler from discarding what looks like dead
  1763.    code, plus suppress various warnings.  */

  1764. #define EMIT_ASM(NAME, INSNS)                                                \
  1765.   do                                                                        \
  1766.     {                                                                        \
  1767.       extern unsigned char start_ ## NAME, end_ ## NAME;                \
  1768.       add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);        \
  1769.       __asm__ ("jmp end_" #NAME "\n"                                        \
  1770.                "\t" "start_" #NAME ":"                                        \
  1771.                "\t" INSNS "\n"                                                \
  1772.                "\t" "end_" #NAME ":");                                        \
  1773.     } while (0)

  1774. #ifdef __x86_64__

  1775. #define EMIT_ASM32(NAME,INSNS)                                                \
  1776.   do                                                                        \
  1777.     {                                                                        \
  1778.       extern unsigned char start_ ## NAME, end_ ## NAME;                \
  1779.       add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);        \
  1780.       __asm__ (".code32\n"                                                \
  1781.                "\t" "jmp end_" #NAME "\n"                                \
  1782.                "\t" "start_" #NAME ":\n"                                \
  1783.                "\t" INSNS "\n"                                                \
  1784.                "\t" "end_" #NAME ":\n"                                        \
  1785.                ".code64\n");                                                \
  1786.     } while (0)

  1787. #else

  1788. #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)

  1789. #endif

  1790. #ifdef __x86_64__

  1791. static void
  1792. amd64_emit_prologue (void)
  1793. {
  1794.   EMIT_ASM (amd64_prologue,
  1795.             "pushq %rbp\n\t"
  1796.             "movq %rsp,%rbp\n\t"
  1797.             "sub $0x20,%rsp\n\t"
  1798.             "movq %rdi,-8(%rbp)\n\t"
  1799.             "movq %rsi,-16(%rbp)");
  1800. }


  1801. static void
  1802. amd64_emit_epilogue (void)
  1803. {
  1804.   EMIT_ASM (amd64_epilogue,
  1805.             "movq -16(%rbp),%rdi\n\t"
  1806.             "movq %rax,(%rdi)\n\t"
  1807.             "xor %rax,%rax\n\t"
  1808.             "leave\n\t"
  1809.             "ret");
  1810. }

  1811. static void
  1812. amd64_emit_add (void)
  1813. {
  1814.   EMIT_ASM (amd64_add,
  1815.             "add (%rsp),%rax\n\t"
  1816.             "lea 0x8(%rsp),%rsp");
  1817. }

  1818. static void
  1819. amd64_emit_sub (void)
  1820. {
  1821.   EMIT_ASM (amd64_sub,
  1822.             "sub %rax,(%rsp)\n\t"
  1823.             "pop %rax");
  1824. }

  1825. static void
  1826. amd64_emit_mul (void)
  1827. {
  1828.   emit_error = 1;
  1829. }

  1830. static void
  1831. amd64_emit_lsh (void)
  1832. {
  1833.   emit_error = 1;
  1834. }

  1835. static void
  1836. amd64_emit_rsh_signed (void)
  1837. {
  1838.   emit_error = 1;
  1839. }

  1840. static void
  1841. amd64_emit_rsh_unsigned (void)
  1842. {
  1843.   emit_error = 1;
  1844. }

  1845. static void
  1846. amd64_emit_ext (int arg)
  1847. {
  1848.   switch (arg)
  1849.     {
  1850.     case 8:
  1851.       EMIT_ASM (amd64_ext_8,
  1852.                 "cbtw\n\t"
  1853.                 "cwtl\n\t"
  1854.                 "cltq");
  1855.       break;
  1856.     case 16:
  1857.       EMIT_ASM (amd64_ext_16,
  1858.                 "cwtl\n\t"
  1859.                 "cltq");
  1860.       break;
  1861.     case 32:
  1862.       EMIT_ASM (amd64_ext_32,
  1863.                 "cltq");
  1864.       break;
  1865.     default:
  1866.       emit_error = 1;
  1867.     }
  1868. }

  1869. static void
  1870. amd64_emit_log_not (void)
  1871. {
  1872.   EMIT_ASM (amd64_log_not,
  1873.             "test %rax,%rax\n\t"
  1874.             "sete %cl\n\t"
  1875.             "movzbq %cl,%rax");
  1876. }

  1877. static void
  1878. amd64_emit_bit_and (void)
  1879. {
  1880.   EMIT_ASM (amd64_and,
  1881.             "and (%rsp),%rax\n\t"
  1882.             "lea 0x8(%rsp),%rsp");
  1883. }

  1884. static void
  1885. amd64_emit_bit_or (void)
  1886. {
  1887.   EMIT_ASM (amd64_or,
  1888.             "or (%rsp),%rax\n\t"
  1889.             "lea 0x8(%rsp),%rsp");
  1890. }

  1891. static void
  1892. amd64_emit_bit_xor (void)
  1893. {
  1894.   EMIT_ASM (amd64_xor,
  1895.             "xor (%rsp),%rax\n\t"
  1896.             "lea 0x8(%rsp),%rsp");
  1897. }

  1898. static void
  1899. amd64_emit_bit_not (void)
  1900. {
  1901.   EMIT_ASM (amd64_bit_not,
  1902.             "xorq $0xffffffffffffffff,%rax");
  1903. }

  1904. static void
  1905. amd64_emit_equal (void)
  1906. {
  1907.   EMIT_ASM (amd64_equal,
  1908.             "cmp %rax,(%rsp)\n\t"
  1909.             "je .Lamd64_equal_true\n\t"
  1910.             "xor %rax,%rax\n\t"
  1911.             "jmp .Lamd64_equal_end\n\t"
  1912.             ".Lamd64_equal_true:\n\t"
  1913.             "mov $0x1,%rax\n\t"
  1914.             ".Lamd64_equal_end:\n\t"
  1915.             "lea 0x8(%rsp),%rsp");
  1916. }

  1917. static void
  1918. amd64_emit_less_signed (void)
  1919. {
  1920.   EMIT_ASM (amd64_less_signed,
  1921.             "cmp %rax,(%rsp)\n\t"
  1922.             "jl .Lamd64_less_signed_true\n\t"
  1923.             "xor %rax,%rax\n\t"
  1924.             "jmp .Lamd64_less_signed_end\n\t"
  1925.             ".Lamd64_less_signed_true:\n\t"
  1926.             "mov $1,%rax\n\t"
  1927.             ".Lamd64_less_signed_end:\n\t"
  1928.             "lea 0x8(%rsp),%rsp");
  1929. }

  1930. static void
  1931. amd64_emit_less_unsigned (void)
  1932. {
  1933.   EMIT_ASM (amd64_less_unsigned,
  1934.             "cmp %rax,(%rsp)\n\t"
  1935.             "jb .Lamd64_less_unsigned_true\n\t"
  1936.             "xor %rax,%rax\n\t"
  1937.             "jmp .Lamd64_less_unsigned_end\n\t"
  1938.             ".Lamd64_less_unsigned_true:\n\t"
  1939.             "mov $1,%rax\n\t"
  1940.             ".Lamd64_less_unsigned_end:\n\t"
  1941.             "lea 0x8(%rsp),%rsp");
  1942. }

  1943. static void
  1944. amd64_emit_ref (int size)
  1945. {
  1946.   switch (size)
  1947.     {
  1948.     case 1:
  1949.       EMIT_ASM (amd64_ref1,
  1950.                 "movb (%rax),%al");
  1951.       break;
  1952.     case 2:
  1953.       EMIT_ASM (amd64_ref2,
  1954.                 "movw (%rax),%ax");
  1955.       break;
  1956.     case 4:
  1957.       EMIT_ASM (amd64_ref4,
  1958.                 "movl (%rax),%eax");
  1959.       break;
  1960.     case 8:
  1961.       EMIT_ASM (amd64_ref8,
  1962.                 "movq (%rax),%rax");
  1963.       break;
  1964.     }
  1965. }

  1966. static void
  1967. amd64_emit_if_goto (int *offset_p, int *size_p)
  1968. {
  1969.   EMIT_ASM (amd64_if_goto,
  1970.             "mov %rax,%rcx\n\t"
  1971.             "pop %rax\n\t"
  1972.             "cmp $0,%rcx\n\t"
  1973.             ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
  1974.   if (offset_p)
  1975.     *offset_p = 10;
  1976.   if (size_p)
  1977.     *size_p = 4;
  1978. }

  1979. static void
  1980. amd64_emit_goto (int *offset_p, int *size_p)
  1981. {
  1982.   EMIT_ASM (amd64_goto,
  1983.             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
  1984.   if (offset_p)
  1985.     *offset_p = 1;
  1986.   if (size_p)
  1987.     *size_p = 4;
  1988. }

  1989. static void
  1990. amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
  1991. {
  1992.   int diff = (to - (from + size));
  1993.   unsigned char buf[sizeof (int)];

  1994.   if (size != 4)
  1995.     {
  1996.       emit_error = 1;
  1997.       return;
  1998.     }

  1999.   memcpy (buf, &diff, sizeof (int));
  2000.   write_inferior_memory (from, buf, sizeof (int));
  2001. }

  2002. static void
  2003. amd64_emit_const (LONGEST num)
  2004. {
  2005.   unsigned char buf[16];
  2006.   int i;
  2007.   CORE_ADDR buildaddr = current_insn_ptr;

  2008.   i = 0;
  2009.   buf[i++] = 0x48buf[i++] = 0xb8; /* mov $<n>,%rax */
  2010.   memcpy (&buf[i], &num, sizeof (num));
  2011.   i += 8;
  2012.   append_insns (&buildaddr, i, buf);
  2013.   current_insn_ptr = buildaddr;
  2014. }

  2015. static void
  2016. amd64_emit_call (CORE_ADDR fn)
  2017. {
  2018.   unsigned char buf[16];
  2019.   int i;
  2020.   CORE_ADDR buildaddr;
  2021.   LONGEST offset64;

  2022.   /* The destination function being in the shared library, may be
  2023.      >31-bits away off the compiled code pad.  */

  2024.   buildaddr = current_insn_ptr;

  2025.   offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);

  2026.   i = 0;

  2027.   if (offset64 > INT_MAX || offset64 < INT_MIN)
  2028.     {
  2029.       /* Offset is too large for a call.  Use callq, but that requires
  2030.          a register, so avoid it if possible.  Use r10, since it is
  2031.          call-clobbered, we don't have to push/pop it.  */
  2032.       buf[i++] = 0x48; /* mov $fn,%r10 */
  2033.       buf[i++] = 0xba;
  2034.       memcpy (buf + i, &fn, 8);
  2035.       i += 8;
  2036.       buf[i++] = 0xff; /* callq *%r10 */
  2037.       buf[i++] = 0xd2;
  2038.     }
  2039.   else
  2040.     {
  2041.       int offset32 = offset64; /* we know we can't overflow here.  */
  2042.       memcpy (buf + i, &offset32, 4);
  2043.       i += 4;
  2044.     }

  2045.   append_insns (&buildaddr, i, buf);
  2046.   current_insn_ptr = buildaddr;
  2047. }

  2048. static void
  2049. amd64_emit_reg (int reg)
  2050. {
  2051.   unsigned char buf[16];
  2052.   int i;
  2053.   CORE_ADDR buildaddr;

  2054.   /* Assume raw_regs is still in %rdi.  */
  2055.   buildaddr = current_insn_ptr;
  2056.   i = 0;
  2057.   buf[i++] = 0xbe; /* mov $<n>,%esi */
  2058.   memcpy (&buf[i], &reg, sizeof (reg));
  2059.   i += 4;
  2060.   append_insns (&buildaddr, i, buf);
  2061.   current_insn_ptr = buildaddr;
  2062.   amd64_emit_call (get_raw_reg_func_addr ());
  2063. }

  2064. static void
  2065. amd64_emit_pop (void)
  2066. {
  2067.   EMIT_ASM (amd64_pop,
  2068.             "pop %rax");
  2069. }

  2070. static void
  2071. amd64_emit_stack_flush (void)
  2072. {
  2073.   EMIT_ASM (amd64_stack_flush,
  2074.             "push %rax");
  2075. }

  2076. static void
  2077. amd64_emit_zero_ext (int arg)
  2078. {
  2079.   switch (arg)
  2080.     {
  2081.     case 8:
  2082.       EMIT_ASM (amd64_zero_ext_8,
  2083.                 "and $0xff,%rax");
  2084.       break;
  2085.     case 16:
  2086.       EMIT_ASM (amd64_zero_ext_16,
  2087.                 "and $0xffff,%rax");
  2088.       break;
  2089.     case 32:
  2090.       EMIT_ASM (amd64_zero_ext_32,
  2091.                 "mov $0xffffffff,%rcx\n\t"
  2092.                 "and %rcx,%rax");
  2093.       break;
  2094.     default:
  2095.       emit_error = 1;
  2096.     }
  2097. }

  2098. static void
  2099. amd64_emit_swap (void)
  2100. {
  2101.   EMIT_ASM (amd64_swap,
  2102.             "mov %rax,%rcx\n\t"
  2103.             "pop %rax\n\t"
  2104.             "push %rcx");
  2105. }

  2106. static void
  2107. amd64_emit_stack_adjust (int n)
  2108. {
  2109.   unsigned char buf[16];
  2110.   int i;
  2111.   CORE_ADDR buildaddr = current_insn_ptr;

  2112.   i = 0;
  2113.   buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
  2114.   buf[i++] = 0x8d;
  2115.   buf[i++] = 0x64;
  2116.   buf[i++] = 0x24;
  2117.   /* This only handles adjustments up to 16, but we don't expect any more.  */
  2118.   buf[i++] = n * 8;
  2119.   append_insns (&buildaddr, i, buf);
  2120.   current_insn_ptr = buildaddr;
  2121. }

  2122. /* FN's prototype is `LONGEST(*fn)(int)'.  */

  2123. static void
  2124. amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
  2125. {
  2126.   unsigned char buf[16];
  2127.   int i;
  2128.   CORE_ADDR buildaddr;

  2129.   buildaddr = current_insn_ptr;
  2130.   i = 0;
  2131.   buf[i++] = 0xbf; /* movl $<n>,%edi */
  2132.   memcpy (&buf[i], &arg1, sizeof (arg1));
  2133.   i += 4;
  2134.   append_insns (&buildaddr, i, buf);
  2135.   current_insn_ptr = buildaddr;
  2136.   amd64_emit_call (fn);
  2137. }

  2138. /* FN's prototype is `void(*fn)(int,LONGEST)'.  */

  2139. static void
  2140. amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
  2141. {
  2142.   unsigned char buf[16];
  2143.   int i;
  2144.   CORE_ADDR buildaddr;

  2145.   buildaddr = current_insn_ptr;
  2146.   i = 0;
  2147.   buf[i++] = 0xbf; /* movl $<n>,%edi */
  2148.   memcpy (&buf[i], &arg1, sizeof (arg1));
  2149.   i += 4;
  2150.   append_insns (&buildaddr, i, buf);
  2151.   current_insn_ptr = buildaddr;
  2152.   EMIT_ASM (amd64_void_call_2_a,
  2153.             /* Save away a copy of the stack top.  */
  2154.             "push %rax\n\t"
  2155.             /* Also pass top as the second argument.  */
  2156.             "mov %rax,%rsi");
  2157.   amd64_emit_call (fn);
  2158.   EMIT_ASM (amd64_void_call_2_b,
  2159.             /* Restore the stack top, %rax may have been trashed.  */
  2160.             "pop %rax");
  2161. }

  2162. void
  2163. amd64_emit_eq_goto (int *offset_p, int *size_p)
  2164. {
  2165.   EMIT_ASM (amd64_eq,
  2166.             "cmp %rax,(%rsp)\n\t"
  2167.             "jne .Lamd64_eq_fallthru\n\t"
  2168.             "lea 0x8(%rsp),%rsp\n\t"
  2169.             "pop %rax\n\t"
  2170.             /* jmp, but don't trust the assembler to choose the right jump */
  2171.             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2172.             ".Lamd64_eq_fallthru:\n\t"
  2173.             "lea 0x8(%rsp),%rsp\n\t"
  2174.             "pop %rax");

  2175.   if (offset_p)
  2176.     *offset_p = 13;
  2177.   if (size_p)
  2178.     *size_p = 4;
  2179. }

  2180. void
  2181. amd64_emit_ne_goto (int *offset_p, int *size_p)
  2182. {
  2183.   EMIT_ASM (amd64_ne,
  2184.             "cmp %rax,(%rsp)\n\t"
  2185.             "je .Lamd64_ne_fallthru\n\t"
  2186.             "lea 0x8(%rsp),%rsp\n\t"
  2187.             "pop %rax\n\t"
  2188.             /* jmp, but don't trust the assembler to choose the right jump */
  2189.             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2190.             ".Lamd64_ne_fallthru:\n\t"
  2191.             "lea 0x8(%rsp),%rsp\n\t"
  2192.             "pop %rax");

  2193.   if (offset_p)
  2194.     *offset_p = 13;
  2195.   if (size_p)
  2196.     *size_p = 4;
  2197. }

  2198. void
  2199. amd64_emit_lt_goto (int *offset_p, int *size_p)
  2200. {
  2201.   EMIT_ASM (amd64_lt,
  2202.             "cmp %rax,(%rsp)\n\t"
  2203.             "jnl .Lamd64_lt_fallthru\n\t"
  2204.             "lea 0x8(%rsp),%rsp\n\t"
  2205.             "pop %rax\n\t"
  2206.             /* jmp, but don't trust the assembler to choose the right jump */
  2207.             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2208.             ".Lamd64_lt_fallthru:\n\t"
  2209.             "lea 0x8(%rsp),%rsp\n\t"
  2210.             "pop %rax");

  2211.   if (offset_p)
  2212.     *offset_p = 13;
  2213.   if (size_p)
  2214.     *size_p = 4;
  2215. }

  2216. void
  2217. amd64_emit_le_goto (int *offset_p, int *size_p)
  2218. {
  2219.   EMIT_ASM (amd64_le,
  2220.             "cmp %rax,(%rsp)\n\t"
  2221.             "jnle .Lamd64_le_fallthru\n\t"
  2222.             "lea 0x8(%rsp),%rsp\n\t"
  2223.             "pop %rax\n\t"
  2224.             /* jmp, but don't trust the assembler to choose the right jump */
  2225.             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2226.             ".Lamd64_le_fallthru:\n\t"
  2227.             "lea 0x8(%rsp),%rsp\n\t"
  2228.             "pop %rax");

  2229.   if (offset_p)
  2230.     *offset_p = 13;
  2231.   if (size_p)
  2232.     *size_p = 4;
  2233. }

  2234. void
  2235. amd64_emit_gt_goto (int *offset_p, int *size_p)
  2236. {
  2237.   EMIT_ASM (amd64_gt,
  2238.             "cmp %rax,(%rsp)\n\t"
  2239.             "jng .Lamd64_gt_fallthru\n\t"
  2240.             "lea 0x8(%rsp),%rsp\n\t"
  2241.             "pop %rax\n\t"
  2242.             /* jmp, but don't trust the assembler to choose the right jump */
  2243.             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2244.             ".Lamd64_gt_fallthru:\n\t"
  2245.             "lea 0x8(%rsp),%rsp\n\t"
  2246.             "pop %rax");

  2247.   if (offset_p)
  2248.     *offset_p = 13;
  2249.   if (size_p)
  2250.     *size_p = 4;
  2251. }

  2252. void
  2253. amd64_emit_ge_goto (int *offset_p, int *size_p)
  2254. {
  2255.   EMIT_ASM (amd64_ge,
  2256.             "cmp %rax,(%rsp)\n\t"
  2257.             "jnge .Lamd64_ge_fallthru\n\t"
  2258.             ".Lamd64_ge_jump:\n\t"
  2259.             "lea 0x8(%rsp),%rsp\n\t"
  2260.             "pop %rax\n\t"
  2261.             /* jmp, but don't trust the assembler to choose the right jump */
  2262.             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2263.             ".Lamd64_ge_fallthru:\n\t"
  2264.             "lea 0x8(%rsp),%rsp\n\t"
  2265.             "pop %rax");

  2266.   if (offset_p)
  2267.     *offset_p = 13;
  2268.   if (size_p)
  2269.     *size_p = 4;
  2270. }

  2271. struct emit_ops amd64_emit_ops =
  2272.   {
  2273.     amd64_emit_prologue,
  2274.     amd64_emit_epilogue,
  2275.     amd64_emit_add,
  2276.     amd64_emit_sub,
  2277.     amd64_emit_mul,
  2278.     amd64_emit_lsh,
  2279.     amd64_emit_rsh_signed,
  2280.     amd64_emit_rsh_unsigned,
  2281.     amd64_emit_ext,
  2282.     amd64_emit_log_not,
  2283.     amd64_emit_bit_and,
  2284.     amd64_emit_bit_or,
  2285.     amd64_emit_bit_xor,
  2286.     amd64_emit_bit_not,
  2287.     amd64_emit_equal,
  2288.     amd64_emit_less_signed,
  2289.     amd64_emit_less_unsigned,
  2290.     amd64_emit_ref,
  2291.     amd64_emit_if_goto,
  2292.     amd64_emit_goto,
  2293.     amd64_write_goto_address,
  2294.     amd64_emit_const,
  2295.     amd64_emit_call,
  2296.     amd64_emit_reg,
  2297.     amd64_emit_pop,
  2298.     amd64_emit_stack_flush,
  2299.     amd64_emit_zero_ext,
  2300.     amd64_emit_swap,
  2301.     amd64_emit_stack_adjust,
  2302.     amd64_emit_int_call_1,
  2303.     amd64_emit_void_call_2,
  2304.     amd64_emit_eq_goto,
  2305.     amd64_emit_ne_goto,
  2306.     amd64_emit_lt_goto,
  2307.     amd64_emit_le_goto,
  2308.     amd64_emit_gt_goto,
  2309.     amd64_emit_ge_goto
  2310.   };

  2311. #endif /* __x86_64__ */

  2312. static void
  2313. i386_emit_prologue (void)
  2314. {
  2315.   EMIT_ASM32 (i386_prologue,
  2316.             "push %ebp\n\t"
  2317.             "mov %esp,%ebp\n\t"
  2318.             "push %ebx");
  2319.   /* At this point, the raw regs base address is at 8(%ebp), and the
  2320.      value pointer is at 12(%ebp).  */
  2321. }

  2322. static void
  2323. i386_emit_epilogue (void)
  2324. {
  2325.   EMIT_ASM32 (i386_epilogue,
  2326.             "mov 12(%ebp),%ecx\n\t"
  2327.             "mov %eax,(%ecx)\n\t"
  2328.             "mov %ebx,0x4(%ecx)\n\t"
  2329.             "xor %eax,%eax\n\t"
  2330.             "pop %ebx\n\t"
  2331.             "pop %ebp\n\t"
  2332.             "ret");
  2333. }

  2334. static void
  2335. i386_emit_add (void)
  2336. {
  2337.   EMIT_ASM32 (i386_add,
  2338.             "add (%esp),%eax\n\t"
  2339.             "adc 0x4(%esp),%ebx\n\t"
  2340.             "lea 0x8(%esp),%esp");
  2341. }

  2342. static void
  2343. i386_emit_sub (void)
  2344. {
  2345.   EMIT_ASM32 (i386_sub,
  2346.             "subl %eax,(%esp)\n\t"
  2347.             "sbbl %ebx,4(%esp)\n\t"
  2348.             "pop %eax\n\t"
  2349.             "pop %ebx\n\t");
  2350. }

  2351. static void
  2352. i386_emit_mul (void)
  2353. {
  2354.   emit_error = 1;
  2355. }

  2356. static void
  2357. i386_emit_lsh (void)
  2358. {
  2359.   emit_error = 1;
  2360. }

  2361. static void
  2362. i386_emit_rsh_signed (void)
  2363. {
  2364.   emit_error = 1;
  2365. }

  2366. static void
  2367. i386_emit_rsh_unsigned (void)
  2368. {
  2369.   emit_error = 1;
  2370. }

  2371. static void
  2372. i386_emit_ext (int arg)
  2373. {
  2374.   switch (arg)
  2375.     {
  2376.     case 8:
  2377.       EMIT_ASM32 (i386_ext_8,
  2378.                 "cbtw\n\t"
  2379.                 "cwtl\n\t"
  2380.                 "movl %eax,%ebx\n\t"
  2381.                 "sarl $31,%ebx");
  2382.       break;
  2383.     case 16:
  2384.       EMIT_ASM32 (i386_ext_16,
  2385.                 "cwtl\n\t"
  2386.                 "movl %eax,%ebx\n\t"
  2387.                 "sarl $31,%ebx");
  2388.       break;
  2389.     case 32:
  2390.       EMIT_ASM32 (i386_ext_32,
  2391.                 "movl %eax,%ebx\n\t"
  2392.                 "sarl $31,%ebx");
  2393.       break;
  2394.     default:
  2395.       emit_error = 1;
  2396.     }
  2397. }

  2398. static void
  2399. i386_emit_log_not (void)
  2400. {
  2401.   EMIT_ASM32 (i386_log_not,
  2402.             "or %ebx,%eax\n\t"
  2403.             "test %eax,%eax\n\t"
  2404.             "sete %cl\n\t"
  2405.             "xor %ebx,%ebx\n\t"
  2406.             "movzbl %cl,%eax");
  2407. }

  2408. static void
  2409. i386_emit_bit_and (void)
  2410. {
  2411.   EMIT_ASM32 (i386_and,
  2412.             "and (%esp),%eax\n\t"
  2413.             "and 0x4(%esp),%ebx\n\t"
  2414.             "lea 0x8(%esp),%esp");
  2415. }

  2416. static void
  2417. i386_emit_bit_or (void)
  2418. {
  2419.   EMIT_ASM32 (i386_or,
  2420.             "or (%esp),%eax\n\t"
  2421.             "or 0x4(%esp),%ebx\n\t"
  2422.             "lea 0x8(%esp),%esp");
  2423. }

  2424. static void
  2425. i386_emit_bit_xor (void)
  2426. {
  2427.   EMIT_ASM32 (i386_xor,
  2428.             "xor (%esp),%eax\n\t"
  2429.             "xor 0x4(%esp),%ebx\n\t"
  2430.             "lea 0x8(%esp),%esp");
  2431. }

  2432. static void
  2433. i386_emit_bit_not (void)
  2434. {
  2435.   EMIT_ASM32 (i386_bit_not,
  2436.             "xor $0xffffffff,%eax\n\t"
  2437.             "xor $0xffffffff,%ebx\n\t");
  2438. }

  2439. static void
  2440. i386_emit_equal (void)
  2441. {
  2442.   EMIT_ASM32 (i386_equal,
  2443.             "cmpl %ebx,4(%esp)\n\t"
  2444.             "jne .Li386_equal_false\n\t"
  2445.             "cmpl %eax,(%esp)\n\t"
  2446.             "je .Li386_equal_true\n\t"
  2447.             ".Li386_equal_false:\n\t"
  2448.             "xor %eax,%eax\n\t"
  2449.             "jmp .Li386_equal_end\n\t"
  2450.             ".Li386_equal_true:\n\t"
  2451.             "mov $1,%eax\n\t"
  2452.             ".Li386_equal_end:\n\t"
  2453.             "xor %ebx,%ebx\n\t"
  2454.             "lea 0x8(%esp),%esp");
  2455. }

  2456. static void
  2457. i386_emit_less_signed (void)
  2458. {
  2459.   EMIT_ASM32 (i386_less_signed,
  2460.             "cmpl %ebx,4(%esp)\n\t"
  2461.             "jl .Li386_less_signed_true\n\t"
  2462.             "jne .Li386_less_signed_false\n\t"
  2463.             "cmpl %eax,(%esp)\n\t"
  2464.             "jl .Li386_less_signed_true\n\t"
  2465.             ".Li386_less_signed_false:\n\t"
  2466.             "xor %eax,%eax\n\t"
  2467.             "jmp .Li386_less_signed_end\n\t"
  2468.             ".Li386_less_signed_true:\n\t"
  2469.             "mov $1,%eax\n\t"
  2470.             ".Li386_less_signed_end:\n\t"
  2471.             "xor %ebx,%ebx\n\t"
  2472.             "lea 0x8(%esp),%esp");
  2473. }

  2474. static void
  2475. i386_emit_less_unsigned (void)
  2476. {
  2477.   EMIT_ASM32 (i386_less_unsigned,
  2478.             "cmpl %ebx,4(%esp)\n\t"
  2479.             "jb .Li386_less_unsigned_true\n\t"
  2480.             "jne .Li386_less_unsigned_false\n\t"
  2481.             "cmpl %eax,(%esp)\n\t"
  2482.             "jb .Li386_less_unsigned_true\n\t"
  2483.             ".Li386_less_unsigned_false:\n\t"
  2484.             "xor %eax,%eax\n\t"
  2485.             "jmp .Li386_less_unsigned_end\n\t"
  2486.             ".Li386_less_unsigned_true:\n\t"
  2487.             "mov $1,%eax\n\t"
  2488.             ".Li386_less_unsigned_end:\n\t"
  2489.             "xor %ebx,%ebx\n\t"
  2490.             "lea 0x8(%esp),%esp");
  2491. }

  2492. static void
  2493. i386_emit_ref (int size)
  2494. {
  2495.   switch (size)
  2496.     {
  2497.     case 1:
  2498.       EMIT_ASM32 (i386_ref1,
  2499.                 "movb (%eax),%al");
  2500.       break;
  2501.     case 2:
  2502.       EMIT_ASM32 (i386_ref2,
  2503.                 "movw (%eax),%ax");
  2504.       break;
  2505.     case 4:
  2506.       EMIT_ASM32 (i386_ref4,
  2507.                 "movl (%eax),%eax");
  2508.       break;
  2509.     case 8:
  2510.       EMIT_ASM32 (i386_ref8,
  2511.                 "movl 4(%eax),%ebx\n\t"
  2512.                 "movl (%eax),%eax");
  2513.       break;
  2514.     }
  2515. }

  2516. static void
  2517. i386_emit_if_goto (int *offset_p, int *size_p)
  2518. {
  2519.   EMIT_ASM32 (i386_if_goto,
  2520.             "mov %eax,%ecx\n\t"
  2521.             "or %ebx,%ecx\n\t"
  2522.             "pop %eax\n\t"
  2523.             "pop %ebx\n\t"
  2524.             "cmpl $0,%ecx\n\t"
  2525.             /* Don't trust the assembler to choose the right jump */
  2526.             ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");

  2527.   if (offset_p)
  2528.     *offset_p = 11; /* be sure that this matches the sequence above */
  2529.   if (size_p)
  2530.     *size_p = 4;
  2531. }

  2532. static void
  2533. i386_emit_goto (int *offset_p, int *size_p)
  2534. {
  2535.   EMIT_ASM32 (i386_goto,
  2536.             /* Don't trust the assembler to choose the right jump */
  2537.             ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
  2538.   if (offset_p)
  2539.     *offset_p = 1;
  2540.   if (size_p)
  2541.     *size_p = 4;
  2542. }

  2543. static void
  2544. i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
  2545. {
  2546.   int diff = (to - (from + size));
  2547.   unsigned char buf[sizeof (int)];

  2548.   /* We're only doing 4-byte sizes at the moment.  */
  2549.   if (size != 4)
  2550.     {
  2551.       emit_error = 1;
  2552.       return;
  2553.     }

  2554.   memcpy (buf, &diff, sizeof (int));
  2555.   write_inferior_memory (from, buf, sizeof (int));
  2556. }

  2557. static void
  2558. i386_emit_const (LONGEST num)
  2559. {
  2560.   unsigned char buf[16];
  2561.   int i, hi, lo;
  2562.   CORE_ADDR buildaddr = current_insn_ptr;

  2563.   i = 0;
  2564.   buf[i++] = 0xb8; /* mov $<n>,%eax */
  2565.   lo = num & 0xffffffff;
  2566.   memcpy (&buf[i], &lo, sizeof (lo));
  2567.   i += 4;
  2568.   hi = ((num >> 32) & 0xffffffff);
  2569.   if (hi)
  2570.     {
  2571.       buf[i++] = 0xbb; /* mov $<n>,%ebx */
  2572.       memcpy (&buf[i], &hi, sizeof (hi));
  2573.       i += 4;
  2574.     }
  2575.   else
  2576.     {
  2577.       buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
  2578.     }
  2579.   append_insns (&buildaddr, i, buf);
  2580.   current_insn_ptr = buildaddr;
  2581. }

  2582. static void
  2583. i386_emit_call (CORE_ADDR fn)
  2584. {
  2585.   unsigned char buf[16];
  2586.   int i, offset;
  2587.   CORE_ADDR buildaddr;

  2588.   buildaddr = current_insn_ptr;
  2589.   i = 0;
  2590.   buf[i++] = 0xe8; /* call <reladdr> */
  2591.   offset = ((int) fn) - (buildaddr + 5);
  2592.   memcpy (buf + 1, &offset, 4);
  2593.   append_insns (&buildaddr, 5, buf);
  2594.   current_insn_ptr = buildaddr;
  2595. }

  2596. static void
  2597. i386_emit_reg (int reg)
  2598. {
  2599.   unsigned char buf[16];
  2600.   int i;
  2601.   CORE_ADDR buildaddr;

  2602.   EMIT_ASM32 (i386_reg_a,
  2603.             "sub $0x8,%esp");
  2604.   buildaddr = current_insn_ptr;
  2605.   i = 0;
  2606.   buf[i++] = 0xb8; /* mov $<n>,%eax */
  2607.   memcpy (&buf[i], &reg, sizeof (reg));
  2608.   i += 4;
  2609.   append_insns (&buildaddr, i, buf);
  2610.   current_insn_ptr = buildaddr;
  2611.   EMIT_ASM32 (i386_reg_b,
  2612.             "mov %eax,4(%esp)\n\t"
  2613.             "mov 8(%ebp),%eax\n\t"
  2614.             "mov %eax,(%esp)");
  2615.   i386_emit_call (get_raw_reg_func_addr ());
  2616.   EMIT_ASM32 (i386_reg_c,
  2617.             "xor %ebx,%ebx\n\t"
  2618.             "lea 0x8(%esp),%esp");
  2619. }

  2620. static void
  2621. i386_emit_pop (void)
  2622. {
  2623.   EMIT_ASM32 (i386_pop,
  2624.             "pop %eax\n\t"
  2625.             "pop %ebx");
  2626. }

  2627. static void
  2628. i386_emit_stack_flush (void)
  2629. {
  2630.   EMIT_ASM32 (i386_stack_flush,
  2631.             "push %ebx\n\t"
  2632.             "push %eax");
  2633. }

  2634. static void
  2635. i386_emit_zero_ext (int arg)
  2636. {
  2637.   switch (arg)
  2638.     {
  2639.     case 8:
  2640.       EMIT_ASM32 (i386_zero_ext_8,
  2641.                 "and $0xff,%eax\n\t"
  2642.                 "xor %ebx,%ebx");
  2643.       break;
  2644.     case 16:
  2645.       EMIT_ASM32 (i386_zero_ext_16,
  2646.                 "and $0xffff,%eax\n\t"
  2647.                 "xor %ebx,%ebx");
  2648.       break;
  2649.     case 32:
  2650.       EMIT_ASM32 (i386_zero_ext_32,
  2651.                 "xor %ebx,%ebx");
  2652.       break;
  2653.     default:
  2654.       emit_error = 1;
  2655.     }
  2656. }

  2657. static void
  2658. i386_emit_swap (void)
  2659. {
  2660.   EMIT_ASM32 (i386_swap,
  2661.             "mov %eax,%ecx\n\t"
  2662.             "mov %ebx,%edx\n\t"
  2663.             "pop %eax\n\t"
  2664.             "pop %ebx\n\t"
  2665.             "push %edx\n\t"
  2666.             "push %ecx");
  2667. }

  2668. static void
  2669. i386_emit_stack_adjust (int n)
  2670. {
  2671.   unsigned char buf[16];
  2672.   int i;
  2673.   CORE_ADDR buildaddr = current_insn_ptr;

  2674.   i = 0;
  2675.   buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
  2676.   buf[i++] = 0x64;
  2677.   buf[i++] = 0x24;
  2678.   buf[i++] = n * 8;
  2679.   append_insns (&buildaddr, i, buf);
  2680.   current_insn_ptr = buildaddr;
  2681. }

  2682. /* FN's prototype is `LONGEST(*fn)(int)'.  */

  2683. static void
  2684. i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
  2685. {
  2686.   unsigned char buf[16];
  2687.   int i;
  2688.   CORE_ADDR buildaddr;

  2689.   EMIT_ASM32 (i386_int_call_1_a,
  2690.             /* Reserve a bit of stack space.  */
  2691.             "sub $0x8,%esp");
  2692.   /* Put the one argument on the stack.  */
  2693.   buildaddr = current_insn_ptr;
  2694.   i = 0;
  2695.   buf[i++] = 0xc7/* movl $<arg1>,(%esp) */
  2696.   buf[i++] = 0x04;
  2697.   buf[i++] = 0x24;
  2698.   memcpy (&buf[i], &arg1, sizeof (arg1));
  2699.   i += 4;
  2700.   append_insns (&buildaddr, i, buf);
  2701.   current_insn_ptr = buildaddr;
  2702.   i386_emit_call (fn);
  2703.   EMIT_ASM32 (i386_int_call_1_c,
  2704.             "mov %edx,%ebx\n\t"
  2705.             "lea 0x8(%esp),%esp");
  2706. }

  2707. /* FN's prototype is `void(*fn)(int,LONGEST)'.  */

  2708. static void
  2709. i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
  2710. {
  2711.   unsigned char buf[16];
  2712.   int i;
  2713.   CORE_ADDR buildaddr;

  2714.   EMIT_ASM32 (i386_void_call_2_a,
  2715.             /* Preserve %eax only; we don't have to worry about %ebx.  */
  2716.             "push %eax\n\t"
  2717.             /* Reserve a bit of stack space for arguments.  */
  2718.             "sub $0x10,%esp\n\t"
  2719.             /* Copy "top" to the second argument position.  (Note that
  2720.                we can't assume function won't scribble on its
  2721.                arguments, so don't try to restore from this.)  */
  2722.             "mov %eax,4(%esp)\n\t"
  2723.             "mov %ebx,8(%esp)");
  2724.   /* Put the first argument on the stack.  */
  2725.   buildaddr = current_insn_ptr;
  2726.   i = 0;
  2727.   buf[i++] = 0xc7/* movl $<arg1>,(%esp) */
  2728.   buf[i++] = 0x04;
  2729.   buf[i++] = 0x24;
  2730.   memcpy (&buf[i], &arg1, sizeof (arg1));
  2731.   i += 4;
  2732.   append_insns (&buildaddr, i, buf);
  2733.   current_insn_ptr = buildaddr;
  2734.   i386_emit_call (fn);
  2735.   EMIT_ASM32 (i386_void_call_2_b,
  2736.             "lea 0x10(%esp),%esp\n\t"
  2737.             /* Restore original stack top.  */
  2738.             "pop %eax");
  2739. }


  2740. void
  2741. i386_emit_eq_goto (int *offset_p, int *size_p)
  2742. {
  2743.   EMIT_ASM32 (eq,
  2744.               /* Check low half first, more likely to be decider  */
  2745.               "cmpl %eax,(%esp)\n\t"
  2746.               "jne .Leq_fallthru\n\t"
  2747.               "cmpl %ebx,4(%esp)\n\t"
  2748.               "jne .Leq_fallthru\n\t"
  2749.               "lea 0x8(%esp),%esp\n\t"
  2750.               "pop %eax\n\t"
  2751.               "pop %ebx\n\t"
  2752.               /* jmp, but don't trust the assembler to choose the right jump */
  2753.               ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2754.               ".Leq_fallthru:\n\t"
  2755.               "lea 0x8(%esp),%esp\n\t"
  2756.               "pop %eax\n\t"
  2757.               "pop %ebx");

  2758.   if (offset_p)
  2759.     *offset_p = 18;
  2760.   if (size_p)
  2761.     *size_p = 4;
  2762. }

  2763. void
  2764. i386_emit_ne_goto (int *offset_p, int *size_p)
  2765. {
  2766.   EMIT_ASM32 (ne,
  2767.               /* Check low half first, more likely to be decider  */
  2768.               "cmpl %eax,(%esp)\n\t"
  2769.               "jne .Lne_jump\n\t"
  2770.               "cmpl %ebx,4(%esp)\n\t"
  2771.               "je .Lne_fallthru\n\t"
  2772.               ".Lne_jump:\n\t"
  2773.               "lea 0x8(%esp),%esp\n\t"
  2774.               "pop %eax\n\t"
  2775.               "pop %ebx\n\t"
  2776.               /* jmp, but don't trust the assembler to choose the right jump */
  2777.               ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2778.               ".Lne_fallthru:\n\t"
  2779.               "lea 0x8(%esp),%esp\n\t"
  2780.               "pop %eax\n\t"
  2781.               "pop %ebx");

  2782.   if (offset_p)
  2783.     *offset_p = 18;
  2784.   if (size_p)
  2785.     *size_p = 4;
  2786. }

  2787. void
  2788. i386_emit_lt_goto (int *offset_p, int *size_p)
  2789. {
  2790.   EMIT_ASM32 (lt,
  2791.               "cmpl %ebx,4(%esp)\n\t"
  2792.               "jl .Llt_jump\n\t"
  2793.               "jne .Llt_fallthru\n\t"
  2794.               "cmpl %eax,(%esp)\n\t"
  2795.               "jnl .Llt_fallthru\n\t"
  2796.               ".Llt_jump:\n\t"
  2797.               "lea 0x8(%esp),%esp\n\t"
  2798.               "pop %eax\n\t"
  2799.               "pop %ebx\n\t"
  2800.               /* jmp, but don't trust the assembler to choose the right jump */
  2801.               ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2802.               ".Llt_fallthru:\n\t"
  2803.               "lea 0x8(%esp),%esp\n\t"
  2804.               "pop %eax\n\t"
  2805.               "pop %ebx");

  2806.   if (offset_p)
  2807.     *offset_p = 20;
  2808.   if (size_p)
  2809.     *size_p = 4;
  2810. }

  2811. void
  2812. i386_emit_le_goto (int *offset_p, int *size_p)
  2813. {
  2814.   EMIT_ASM32 (le,
  2815.               "cmpl %ebx,4(%esp)\n\t"
  2816.               "jle .Lle_jump\n\t"
  2817.               "jne .Lle_fallthru\n\t"
  2818.               "cmpl %eax,(%esp)\n\t"
  2819.               "jnle .Lle_fallthru\n\t"
  2820.               ".Lle_jump:\n\t"
  2821.               "lea 0x8(%esp),%esp\n\t"
  2822.               "pop %eax\n\t"
  2823.               "pop %ebx\n\t"
  2824.               /* jmp, but don't trust the assembler to choose the right jump */
  2825.               ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2826.               ".Lle_fallthru:\n\t"
  2827.               "lea 0x8(%esp),%esp\n\t"
  2828.               "pop %eax\n\t"
  2829.               "pop %ebx");

  2830.   if (offset_p)
  2831.     *offset_p = 20;
  2832.   if (size_p)
  2833.     *size_p = 4;
  2834. }

  2835. void
  2836. i386_emit_gt_goto (int *offset_p, int *size_p)
  2837. {
  2838.   EMIT_ASM32 (gt,
  2839.               "cmpl %ebx,4(%esp)\n\t"
  2840.               "jg .Lgt_jump\n\t"
  2841.               "jne .Lgt_fallthru\n\t"
  2842.               "cmpl %eax,(%esp)\n\t"
  2843.               "jng .Lgt_fallthru\n\t"
  2844.               ".Lgt_jump:\n\t"
  2845.               "lea 0x8(%esp),%esp\n\t"
  2846.               "pop %eax\n\t"
  2847.               "pop %ebx\n\t"
  2848.               /* jmp, but don't trust the assembler to choose the right jump */
  2849.               ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2850.               ".Lgt_fallthru:\n\t"
  2851.               "lea 0x8(%esp),%esp\n\t"
  2852.               "pop %eax\n\t"
  2853.               "pop %ebx");

  2854.   if (offset_p)
  2855.     *offset_p = 20;
  2856.   if (size_p)
  2857.     *size_p = 4;
  2858. }

  2859. void
  2860. i386_emit_ge_goto (int *offset_p, int *size_p)
  2861. {
  2862.   EMIT_ASM32 (ge,
  2863.               "cmpl %ebx,4(%esp)\n\t"
  2864.               "jge .Lge_jump\n\t"
  2865.               "jne .Lge_fallthru\n\t"
  2866.               "cmpl %eax,(%esp)\n\t"
  2867.               "jnge .Lge_fallthru\n\t"
  2868.               ".Lge_jump:\n\t"
  2869.               "lea 0x8(%esp),%esp\n\t"
  2870.               "pop %eax\n\t"
  2871.               "pop %ebx\n\t"
  2872.               /* jmp, but don't trust the assembler to choose the right jump */
  2873.               ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2874.               ".Lge_fallthru:\n\t"
  2875.               "lea 0x8(%esp),%esp\n\t"
  2876.               "pop %eax\n\t"
  2877.               "pop %ebx");

  2878.   if (offset_p)
  2879.     *offset_p = 20;
  2880.   if (size_p)
  2881.     *size_p = 4;
  2882. }

  2883. struct emit_ops i386_emit_ops =
  2884.   {
  2885.     i386_emit_prologue,
  2886.     i386_emit_epilogue,
  2887.     i386_emit_add,
  2888.     i386_emit_sub,
  2889.     i386_emit_mul,
  2890.     i386_emit_lsh,
  2891.     i386_emit_rsh_signed,
  2892.     i386_emit_rsh_unsigned,
  2893.     i386_emit_ext,
  2894.     i386_emit_log_not,
  2895.     i386_emit_bit_and,
  2896.     i386_emit_bit_or,
  2897.     i386_emit_bit_xor,
  2898.     i386_emit_bit_not,
  2899.     i386_emit_equal,
  2900.     i386_emit_less_signed,
  2901.     i386_emit_less_unsigned,
  2902.     i386_emit_ref,
  2903.     i386_emit_if_goto,
  2904.     i386_emit_goto,
  2905.     i386_write_goto_address,
  2906.     i386_emit_const,
  2907.     i386_emit_call,
  2908.     i386_emit_reg,
  2909.     i386_emit_pop,
  2910.     i386_emit_stack_flush,
  2911.     i386_emit_zero_ext,
  2912.     i386_emit_swap,
  2913.     i386_emit_stack_adjust,
  2914.     i386_emit_int_call_1,
  2915.     i386_emit_void_call_2,
  2916.     i386_emit_eq_goto,
  2917.     i386_emit_ne_goto,
  2918.     i386_emit_lt_goto,
  2919.     i386_emit_le_goto,
  2920.     i386_emit_gt_goto,
  2921.     i386_emit_ge_goto
  2922.   };


  2923. static struct emit_ops *
  2924. x86_emit_ops (void)
  2925. {
  2926. #ifdef __x86_64__
  2927.   if (is_64bit_tdesc ())
  2928.     return &amd64_emit_ops;
  2929.   else
  2930. #endif
  2931.     return &i386_emit_ops;
  2932. }

  2933. static int
  2934. x86_supports_range_stepping (void)
  2935. {
  2936.   return 1;
  2937. }

  2938. /* This is initialized assuming an amd64 target.
  2939.    x86_arch_setup will correct it for i386 or amd64 targets.  */

  2940. struct linux_target_ops the_low_target =
  2941. {
  2942.   x86_arch_setup,
  2943.   x86_linux_regs_info,
  2944.   x86_cannot_fetch_register,
  2945.   x86_cannot_store_register,
  2946.   NULL, /* fetch_register */
  2947.   x86_get_pc,
  2948.   x86_set_pc,
  2949.   x86_breakpoint,
  2950.   x86_breakpoint_len,
  2951.   NULL,
  2952.   1,
  2953.   x86_breakpoint_at,
  2954.   x86_supports_z_point_type,
  2955.   x86_insert_point,
  2956.   x86_remove_point,
  2957.   x86_stopped_by_watchpoint,
  2958.   x86_stopped_data_address,
  2959.   /* collect_ptrace_register/supply_ptrace_register are not needed in the
  2960.      native i386 case (no registers smaller than an xfer unit), and are not
  2961.      used in the biarch case (HAVE_LINUX_USRREGS is not defined).  */
  2962.   NULL,
  2963.   NULL,
  2964.   /* need to fix up i386 siginfo if host is amd64 */
  2965.   x86_siginfo_fixup,
  2966.   x86_linux_new_process,
  2967.   x86_linux_new_thread,
  2968.   x86_linux_prepare_to_resume,
  2969.   x86_linux_process_qsupported,
  2970.   x86_supports_tracepoints,
  2971.   x86_get_thread_area,
  2972.   x86_install_fast_tracepoint_jump_pad,
  2973.   x86_emit_ops,
  2974.   x86_get_min_fast_tracepoint_insn_len,
  2975.   x86_supports_range_stepping,
  2976. };

  2977. void
  2978. initialize_low_arch (void)
  2979. {
  2980.   /* Initialize the Linux target descriptions.  */
  2981. #ifdef __x86_64__
  2982.   init_registers_amd64_linux ();
  2983.   init_registers_amd64_avx_linux ();
  2984.   init_registers_amd64_avx512_linux ();
  2985.   init_registers_amd64_mpx_linux ();

  2986.   init_registers_x32_linux ();
  2987.   init_registers_x32_avx_linux ();
  2988.   init_registers_x32_avx512_linux ();

  2989.   tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
  2990.   copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
  2991.   tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
  2992. #endif
  2993.   init_registers_i386_linux ();
  2994.   init_registers_i386_mmx_linux ();
  2995.   init_registers_i386_avx_linux ();
  2996.   init_registers_i386_avx512_linux ();
  2997.   init_registers_i386_mpx_linux ();

  2998.   tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
  2999.   copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
  3000.   tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;

  3001.   initialize_regsets_info (&x86_regsets_info);
  3002. }