gdb/aarch64-tdep.c - gdb

Global variables defined

Data types defined

Functions defined

Macros defined

Source code

  1. /* Common target dependent code for GDB on AArch64 systems.

  2.    Copyright (C) 2009-2015 Free Software Foundation, Inc.
  3.    Contributed by ARM Ltd.

  4.    This file is part of GDB.

  5.    This program is free software; you can redistribute it and/or modify
  6.    it under the terms of the GNU General Public License as published by
  7.    the Free Software Foundation; either version 3 of the License, or
  8.    (at your option) any later version.

  9.    This program is distributed in the hope that it will be useful,
  10.    but WITHOUT ANY WARRANTY; without even the implied warranty of
  11.    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12.    GNU General Public License for more details.

  13.    You should have received a copy of the GNU General Public License
  14.    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */

  15. #include "defs.h"

  16. #include "frame.h"
  17. #include "inferior.h"
  18. #include "gdbcmd.h"
  19. #include "gdbcore.h"
  20. #include "dis-asm.h"
  21. #include "regcache.h"
  22. #include "reggroups.h"
  23. #include "doublest.h"
  24. #include "value.h"
  25. #include "arch-utils.h"
  26. #include "osabi.h"
  27. #include "frame-unwind.h"
  28. #include "frame-base.h"
  29. #include "trad-frame.h"
  30. #include "objfiles.h"
  31. #include "dwarf2-frame.h"
  32. #include "gdbtypes.h"
  33. #include "prologue-value.h"
  34. #include "target-descriptions.h"
  35. #include "user-regs.h"
  36. #include "language.h"
  37. #include "infcall.h"

  38. #include "aarch64-tdep.h"

  39. #include "elf-bfd.h"
  40. #include "elf/aarch64.h"

  41. #include "vec.h"

  42. #include "features/aarch64.c"

  43. /* Pseudo register base numbers.  */
  44. #define AARCH64_Q0_REGNUM 0
  45. #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
  46. #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
  47. #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
  48. #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)

  49. /* The standard register names, and all the valid aliases for them.  */
  50. static const struct
  51. {
  52.   const char *const name;
  53.   int regnum;
  54. } aarch64_register_aliases[] =
  55. {
  56.   /* 64-bit register names.  */
  57.   {"fp", AARCH64_FP_REGNUM},
  58.   {"lr", AARCH64_LR_REGNUM},
  59.   {"sp", AARCH64_SP_REGNUM},

  60.   /* 32-bit register names.  */
  61.   {"w0", AARCH64_X0_REGNUM + 0},
  62.   {"w1", AARCH64_X0_REGNUM + 1},
  63.   {"w2", AARCH64_X0_REGNUM + 2},
  64.   {"w3", AARCH64_X0_REGNUM + 3},
  65.   {"w4", AARCH64_X0_REGNUM + 4},
  66.   {"w5", AARCH64_X0_REGNUM + 5},
  67.   {"w6", AARCH64_X0_REGNUM + 6},
  68.   {"w7", AARCH64_X0_REGNUM + 7},
  69.   {"w8", AARCH64_X0_REGNUM + 8},
  70.   {"w9", AARCH64_X0_REGNUM + 9},
  71.   {"w10", AARCH64_X0_REGNUM + 10},
  72.   {"w11", AARCH64_X0_REGNUM + 11},
  73.   {"w12", AARCH64_X0_REGNUM + 12},
  74.   {"w13", AARCH64_X0_REGNUM + 13},
  75.   {"w14", AARCH64_X0_REGNUM + 14},
  76.   {"w15", AARCH64_X0_REGNUM + 15},
  77.   {"w16", AARCH64_X0_REGNUM + 16},
  78.   {"w17", AARCH64_X0_REGNUM + 17},
  79.   {"w18", AARCH64_X0_REGNUM + 18},
  80.   {"w19", AARCH64_X0_REGNUM + 19},
  81.   {"w20", AARCH64_X0_REGNUM + 20},
  82.   {"w21", AARCH64_X0_REGNUM + 21},
  83.   {"w22", AARCH64_X0_REGNUM + 22},
  84.   {"w23", AARCH64_X0_REGNUM + 23},
  85.   {"w24", AARCH64_X0_REGNUM + 24},
  86.   {"w25", AARCH64_X0_REGNUM + 25},
  87.   {"w26", AARCH64_X0_REGNUM + 26},
  88.   {"w27", AARCH64_X0_REGNUM + 27},
  89.   {"w28", AARCH64_X0_REGNUM + 28},
  90.   {"w29", AARCH64_X0_REGNUM + 29},
  91.   {"w30", AARCH64_X0_REGNUM + 30},

  92.   /*  specials */
  93.   {"ip0", AARCH64_X0_REGNUM + 16},
  94.   {"ip1", AARCH64_X0_REGNUM + 17}
  95. };

  96. /* The required core 'R' registers.  */
  97. static const char *const aarch64_r_register_names[] =
  98. {
  99.   /* These registers must appear in consecutive RAW register number
  100.      order and they must begin with AARCH64_X0_REGNUM! */
  101.   "x0", "x1", "x2", "x3",
  102.   "x4", "x5", "x6", "x7",
  103.   "x8", "x9", "x10", "x11",
  104.   "x12", "x13", "x14", "x15",
  105.   "x16", "x17", "x18", "x19",
  106.   "x20", "x21", "x22", "x23",
  107.   "x24", "x25", "x26", "x27",
  108.   "x28", "x29", "x30", "sp",
  109.   "pc", "cpsr"
  110. };

  111. /* The FP/SIMD 'V' registers.  */
  112. static const char *const aarch64_v_register_names[] =
  113. {
  114.   /* These registers must appear in consecutive RAW register number
  115.      order and they must begin with AARCH64_V0_REGNUM! */
  116.   "v0", "v1", "v2", "v3",
  117.   "v4", "v5", "v6", "v7",
  118.   "v8", "v9", "v10", "v11",
  119.   "v12", "v13", "v14", "v15",
  120.   "v16", "v17", "v18", "v19",
  121.   "v20", "v21", "v22", "v23",
  122.   "v24", "v25", "v26", "v27",
  123.   "v28", "v29", "v30", "v31",
  124.   "fpsr",
  125.   "fpcr"
  126. };

  127. /* AArch64 prologue cache structure.  */
  128. struct aarch64_prologue_cache
  129. {
  130.   /* The stack pointer at the time this frame was created; i.e. the
  131.      caller's stack pointer when this function was called.  It is used
  132.      to identify this frame.  */
  133.   CORE_ADDR prev_sp;

  134.   /* The frame base for this frame is just prev_sp - frame size.
  135.      FRAMESIZE is the distance from the frame pointer to the
  136.      initial stack pointer.  */
  137.   int framesize;

  138.   /* The register used to hold the frame pointer for this frame.  */
  139.   int framereg;

  140.   /* Saved register offsets.  */
  141.   struct trad_frame_saved_reg *saved_regs;
  142. };

  143. /* Toggle this file's internal debugging dump.  */
  144. static int aarch64_debug;

  145. static void
  146. show_aarch64_debug (struct ui_file *file, int from_tty,
  147.                     struct cmd_list_element *c, const char *value)
  148. {
  149.   fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
  150. }

  151. /* Extract a signed value from a bit field within an instruction
  152.    encoding.

  153.    INSN is the instruction opcode.

  154.    WIDTH specifies the width of the bit field to extract (in bits).

  155.    OFFSET specifies the least significant bit of the field where bits
  156.    are numbered zero counting from least to most significant.  */

  157. static int32_t
  158. extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
  159. {
  160.   unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
  161.   unsigned shift_r = sizeof (int32_t) * 8 - width;

  162.   return ((int32_t) insn << shift_l) >> shift_r;
  163. }

  164. /* Determine if specified bits within an instruction opcode matches a
  165.    specific pattern.

  166.    INSN is the instruction opcode.

  167.    MASK specifies the bits within the opcode that are to be tested
  168.    agsinst for a match with PATTERN.  */

  169. static int
  170. decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
  171. {
  172.   return (insn & mask) == pattern;
  173. }

  174. /* Decode an opcode if it represents an immediate ADD or SUB instruction.

  175.    ADDR specifies the address of the opcode.
  176.    INSN specifies the opcode to test.
  177.    RD receives the 'rd' field from the decoded instruction.
  178.    RN receives the 'rn' field from the decoded instruction.

  179.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */
  180. static int
  181. decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn,
  182.                     int32_t *imm)
  183. {
  184.   if ((insn & 0x9f000000) == 0x91000000)
  185.     {
  186.       unsigned shift;
  187.       unsigned op_is_sub;

  188.       *rd = (insn >> 0) & 0x1f;
  189.       *rn = (insn >> 5) & 0x1f;
  190.       *imm = (insn >> 10) & 0xfff;
  191.       shift = (insn >> 22) & 0x3;
  192.       op_is_sub = (insn >> 30) & 0x1;

  193.       switch (shift)
  194.         {
  195.         case 0:
  196.           break;
  197.         case 1:
  198.           *imm <<= 12;
  199.           break;
  200.         default:
  201.           /* UNDEFINED */
  202.           return 0;
  203.         }

  204.       if (op_is_sub)
  205.         *imm = -*imm;

  206.       if (aarch64_debug)
  207.         fprintf_unfiltered (gdb_stdlog,
  208.                             "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
  209.                             core_addr_to_string_nz (addr), insn, *rd, *rn,
  210.                             *imm);
  211.       return 1;
  212.     }
  213.   return 0;
  214. }

  215. /* Decode an opcode if it represents an ADRP instruction.

  216.    ADDR specifies the address of the opcode.
  217.    INSN specifies the opcode to test.
  218.    RD receives the 'rd' field from the decoded instruction.

  219.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  220. static int
  221. decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd)
  222. {
  223.   if (decode_masked_match (insn, 0x9f000000, 0x90000000))
  224.     {
  225.       *rd = (insn >> 0) & 0x1f;

  226.       if (aarch64_debug)
  227.         fprintf_unfiltered (gdb_stdlog,
  228.                             "decode: 0x%s 0x%x adrp x%u, #?\n",
  229.                             core_addr_to_string_nz (addr), insn, *rd);
  230.       return 1;
  231.     }
  232.   return 0;
  233. }

  234. /* Decode an opcode if it represents an branch immediate or branch
  235.    and link immediate instruction.

  236.    ADDR specifies the address of the opcode.
  237.    INSN specifies the opcode to test.
  238.    LINK receives the 'link' bit from the decoded instruction.
  239.    OFFSET receives the immediate offset from the decoded instruction.

  240.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  241. static int
  242. decode_b (CORE_ADDR addr, uint32_t insn, unsigned *link, int32_t *offset)
  243. {
  244.   /* b  0001 01ii iiii iiii iiii iiii iiii iiii */
  245.   /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
  246.   if (decode_masked_match (insn, 0x7c000000, 0x14000000))
  247.     {
  248.       *link = insn >> 31;
  249.       *offset = extract_signed_bitfield (insn, 26, 0) << 2;

  250.       if (aarch64_debug)
  251.         fprintf_unfiltered (gdb_stdlog,
  252.                             "decode: 0x%s 0x%x %s 0x%s\n",
  253.                             core_addr_to_string_nz (addr), insn,
  254.                             *link ? "bl" : "b",
  255.                             core_addr_to_string_nz (addr + *offset));

  256.       return 1;
  257.     }
  258.   return 0;
  259. }

  260. /* Decode an opcode if it represents a conditional branch instruction.

  261.    ADDR specifies the address of the opcode.
  262.    INSN specifies the opcode to test.
  263.    COND receives the branch condition field from the decoded
  264.    instruction.
  265.    OFFSET receives the immediate offset from the decoded instruction.

  266.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  267. static int
  268. decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset)
  269. {
  270.   if (decode_masked_match (insn, 0xfe000000, 0x54000000))
  271.     {
  272.       *cond = (insn >> 0) & 0xf;
  273.       *offset = extract_signed_bitfield (insn, 19, 5) << 2;

  274.       if (aarch64_debug)
  275.         fprintf_unfiltered (gdb_stdlog,
  276.                             "decode: 0x%s 0x%x b<%u> 0x%s\n",
  277.                             core_addr_to_string_nz (addr), insn, *cond,
  278.                             core_addr_to_string_nz (addr + *offset));
  279.       return 1;
  280.     }
  281.   return 0;
  282. }

  283. /* Decode an opcode if it represents a branch via register instruction.

  284.    ADDR specifies the address of the opcode.
  285.    INSN specifies the opcode to test.
  286.    LINK receives the 'link' bit from the decoded instruction.
  287.    RN receives the 'rn' field from the decoded instruction.

  288.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  289. static int
  290. decode_br (CORE_ADDR addr, uint32_t insn, unsigned *link, unsigned *rn)
  291. {
  292.   /*         8   4   0   6   2   8   4   0 */
  293.   /* blr  110101100011111100000000000rrrrr */
  294.   /* br   110101100001111100000000000rrrrr */
  295.   if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
  296.     {
  297.       *link = (insn >> 21) & 1;
  298.       *rn = (insn >> 5) & 0x1f;

  299.       if (aarch64_debug)
  300.         fprintf_unfiltered (gdb_stdlog,
  301.                             "decode: 0x%s 0x%x %s 0x%x\n",
  302.                             core_addr_to_string_nz (addr), insn,
  303.                             *link ? "blr" : "br", *rn);

  304.       return 1;
  305.     }
  306.   return 0;
  307. }

  308. /* Decode an opcode if it represents a CBZ or CBNZ instruction.

  309.    ADDR specifies the address of the opcode.
  310.    INSN specifies the opcode to test.
  311.    IS64 receives the 'sf' field from the decoded instruction.
  312.    OP receives the 'op' field from the decoded instruction.
  313.    RN receives the 'rn' field from the decoded instruction.
  314.    OFFSET receives the 'imm19' field from the decoded instruction.

  315.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  316. static int
  317. decode_cb (CORE_ADDR addr,
  318.            uint32_t insn, int *is64, unsigned *op, unsigned *rn,
  319.            int32_t *offset)
  320. {
  321.   if (decode_masked_match (insn, 0x7e000000, 0x34000000))
  322.     {
  323.       /* cbz  T011 010o iiii iiii iiii iiii iiir rrrr */
  324.       /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */

  325.       *rn = (insn >> 0) & 0x1f;
  326.       *is64 = (insn >> 31) & 0x1;
  327.       *op = (insn >> 24) & 0x1;
  328.       *offset = extract_signed_bitfield (insn, 19, 5) << 2;

  329.       if (aarch64_debug)
  330.         fprintf_unfiltered (gdb_stdlog,
  331.                             "decode: 0x%s 0x%x %s 0x%s\n",
  332.                             core_addr_to_string_nz (addr), insn,
  333.                             *op ? "cbnz" : "cbz",
  334.                             core_addr_to_string_nz (addr + *offset));
  335.       return 1;
  336.     }
  337.   return 0;
  338. }

  339. /* Decode an opcode if it represents a ERET instruction.

  340.    ADDR specifies the address of the opcode.
  341.    INSN specifies the opcode to test.

  342.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  343. static int
  344. decode_eret (CORE_ADDR addr, uint32_t insn)
  345. {
  346.   /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
  347.   if (insn == 0xd69f03e0)
  348.     {
  349.       if (aarch64_debug)
  350.         fprintf_unfiltered (gdb_stdlog, "decode: 0x%s 0x%x eret\n",
  351.                             core_addr_to_string_nz (addr), insn);
  352.       return 1;
  353.     }
  354.   return 0;
  355. }

  356. /* Decode an opcode if it represents a MOVZ instruction.

  357.    ADDR specifies the address of the opcode.
  358.    INSN specifies the opcode to test.
  359.    RD receives the 'rd' field from the decoded instruction.

  360.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  361. static int
  362. decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
  363. {
  364.   if (decode_masked_match (insn, 0xff800000, 0x52800000))
  365.     {
  366.       *rd = (insn >> 0) & 0x1f;

  367.       if (aarch64_debug)
  368.         fprintf_unfiltered (gdb_stdlog,
  369.                             "decode: 0x%s 0x%x movz x%u, #?\n",
  370.                             core_addr_to_string_nz (addr), insn, *rd);
  371.       return 1;
  372.     }
  373.   return 0;
  374. }

  375. /* Decode an opcode if it represents a ORR (shifted register)
  376.    instruction.

  377.    ADDR specifies the address of the opcode.
  378.    INSN specifies the opcode to test.
  379.    RD receives the 'rd' field from the decoded instruction.
  380.    RN receives the 'rn' field from the decoded instruction.
  381.    RM receives the 'rm' field from the decoded instruction.
  382.    IMM receives the 'imm6' field from the decoded instruction.

  383.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  384. static int
  385. decode_orr_shifted_register_x (CORE_ADDR addr,
  386.                                uint32_t insn, unsigned *rd, unsigned *rn,
  387.                                unsigned *rm, int32_t *imm)
  388. {
  389.   if (decode_masked_match (insn, 0xff200000, 0xaa000000))
  390.     {
  391.       *rd = (insn >> 0) & 0x1f;
  392.       *rn = (insn >> 5) & 0x1f;
  393.       *rm = (insn >> 16) & 0x1f;
  394.       *imm = (insn >> 10) & 0x3f;

  395.       if (aarch64_debug)
  396.         fprintf_unfiltered (gdb_stdlog,
  397.                             "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
  398.                             core_addr_to_string_nz (addr), insn, *rd,
  399.                             *rn, *rm, *imm);
  400.       return 1;
  401.     }
  402.   return 0;
  403. }

  404. /* Decode an opcode if it represents a RET instruction.

  405.    ADDR specifies the address of the opcode.
  406.    INSN specifies the opcode to test.
  407.    RN receives the 'rn' field from the decoded instruction.

  408.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  409. static int
  410. decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
  411. {
  412.   if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
  413.     {
  414.       *rn = (insn >> 5) & 0x1f;
  415.       if (aarch64_debug)
  416.         fprintf_unfiltered (gdb_stdlog,
  417.                             "decode: 0x%s 0x%x ret x%u\n",
  418.                             core_addr_to_string_nz (addr), insn, *rn);
  419.       return 1;
  420.     }
  421.   return 0;
  422. }

  423. /* Decode an opcode if it represents the following instruction:
  424.    STP rt, rt2, [rn, #imm]

  425.    ADDR specifies the address of the opcode.
  426.    INSN specifies the opcode to test.
  427.    RT1 receives the 'rt' field from the decoded instruction.
  428.    RT2 receives the 'rt2' field from the decoded instruction.
  429.    RN receives the 'rn' field from the decoded instruction.
  430.    IMM receives the 'imm' field from the decoded instruction.

  431.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  432. static int
  433. decode_stp_offset (CORE_ADDR addr,
  434.                    uint32_t insn,
  435.                    unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm)
  436. {
  437.   if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
  438.     {
  439.       *rt1 = (insn >> 0) & 0x1f;
  440.       *rn = (insn >> 5) & 0x1f;
  441.       *rt2 = (insn >> 10) & 0x1f;
  442.       *imm = extract_signed_bitfield (insn, 7, 15);
  443.       *imm <<= 3;

  444.       if (aarch64_debug)
  445.         fprintf_unfiltered (gdb_stdlog,
  446.                             "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
  447.                             core_addr_to_string_nz (addr), insn,
  448.                             *rt1, *rt2, *rn, *imm);
  449.       return 1;
  450.     }
  451.   return 0;
  452. }

  453. /* Decode an opcode if it represents the following instruction:
  454.    STP rt, rt2, [rn, #imm]!

  455.    ADDR specifies the address of the opcode.
  456.    INSN specifies the opcode to test.
  457.    RT1 receives the 'rt' field from the decoded instruction.
  458.    RT2 receives the 'rt2' field from the decoded instruction.
  459.    RN receives the 'rn' field from the decoded instruction.
  460.    IMM receives the 'imm' field from the decoded instruction.

  461.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  462. static int
  463. decode_stp_offset_wb (CORE_ADDR addr,
  464.                       uint32_t insn,
  465.                       unsigned *rt1, unsigned *rt2, unsigned *rn,
  466.                       int32_t *imm)
  467. {
  468.   if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
  469.     {
  470.       *rt1 = (insn >> 0) & 0x1f;
  471.       *rn = (insn >> 5) & 0x1f;
  472.       *rt2 = (insn >> 10) & 0x1f;
  473.       *imm = extract_signed_bitfield (insn, 7, 15);
  474.       *imm <<= 3;

  475.       if (aarch64_debug)
  476.         fprintf_unfiltered (gdb_stdlog,
  477.                             "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
  478.                             core_addr_to_string_nz (addr), insn,
  479.                             *rt1, *rt2, *rn, *imm);
  480.       return 1;
  481.     }
  482.   return 0;
  483. }

  484. /* Decode an opcode if it represents the following instruction:
  485.    STUR rt, [rn, #imm]

  486.    ADDR specifies the address of the opcode.
  487.    INSN specifies the opcode to test.
  488.    IS64 receives size field from the decoded instruction.
  489.    RT receives the 'rt' field from the decoded instruction.
  490.    RN receives the 'rn' field from the decoded instruction.
  491.    IMM receives the 'imm' field from the decoded instruction.

  492.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  493. static int
  494. decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt,
  495.              unsigned *rn, int32_t *imm)
  496. {
  497.   if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
  498.     {
  499.       *is64 = (insn >> 30) & 1;
  500.       *rt = (insn >> 0) & 0x1f;
  501.       *rn = (insn >> 5) & 0x1f;
  502.       *imm = extract_signed_bitfield (insn, 9, 12);

  503.       if (aarch64_debug)
  504.         fprintf_unfiltered (gdb_stdlog,
  505.                             "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
  506.                             core_addr_to_string_nz (addr), insn,
  507.                             *is64 ? 'x' : 'w', *rt, *rn, *imm);
  508.       return 1;
  509.     }
  510.   return 0;
  511. }

  512. /* Decode an opcode if it represents a TB or TBNZ instruction.

  513.    ADDR specifies the address of the opcode.
  514.    INSN specifies the opcode to test.
  515.    OP receives the 'op' field from the decoded instruction.
  516.    BIT receives the bit position field from the decoded instruction.
  517.    RT receives 'rt' field from the decoded instruction.
  518.    IMM receives 'imm' field from the decoded instruction.

  519.    Return 1 if the opcodes matches and is decoded, otherwise 0.  */

  520. static int
  521. decode_tb (CORE_ADDR addr,
  522.            uint32_t insn, unsigned *op, unsigned *bit, unsigned *rt,
  523.            int32_t *imm)
  524. {
  525.   if (decode_masked_match (insn, 0x7e000000, 0x36000000))
  526.     {
  527.       /* tbz  b011 0110 bbbb biii iiii iiii iiir rrrr */
  528.       /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */

  529.       *rt = (insn >> 0) & 0x1f;
  530.       *op = insn & (1 << 24);
  531.       *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
  532.       *imm = extract_signed_bitfield (insn, 14, 5) << 2;

  533.       if (aarch64_debug)
  534.         fprintf_unfiltered (gdb_stdlog,
  535.                             "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
  536.                             core_addr_to_string_nz (addr), insn,
  537.                             *op ? "tbnz" : "tbz", *rt, *bit,
  538.                             core_addr_to_string_nz (addr + *imm));
  539.       return 1;
  540.     }
  541.   return 0;
  542. }

  543. /* Analyze a prologue, looking for a recognizable stack frame
  544.    and frame pointer.  Scan until we encounter a store that could
  545.    clobber the stack frame unexpectedly, or an unknown instruction.  */

  546. static CORE_ADDR
  547. aarch64_analyze_prologue (struct gdbarch *gdbarch,
  548.                           CORE_ADDR start, CORE_ADDR limit,
  549.                           struct aarch64_prologue_cache *cache)
  550. {
  551.   enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
  552.   int i;
  553.   pv_t regs[AARCH64_X_REGISTER_COUNT];
  554.   struct pv_area *stack;
  555.   struct cleanup *back_to;

  556.   for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
  557.     regs[i] = pv_register (i, 0);
  558.   stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
  559.   back_to = make_cleanup_free_pv_area (stack);

  560.   for (; start < limit; start += 4)
  561.     {
  562.       uint32_t insn;
  563.       unsigned rd;
  564.       unsigned rn;
  565.       unsigned rm;
  566.       unsigned rt;
  567.       unsigned rt1;
  568.       unsigned rt2;
  569.       int op_is_sub;
  570.       int32_t imm;
  571.       unsigned cond;
  572.       int is64;
  573.       unsigned is_link;
  574.       unsigned op;
  575.       unsigned bit;
  576.       int32_t offset;

  577.       insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);

  578.       if (decode_add_sub_imm (start, insn, &rd, &rn, &imm))
  579.         regs[rd] = pv_add_constant (regs[rn], imm);
  580.       else if (decode_adrp (start, insn, &rd))
  581.         regs[rd] = pv_unknown ();
  582.       else if (decode_b (start, insn, &is_link, &offset))
  583.         {
  584.           /* Stop analysis on branch.  */
  585.           break;
  586.         }
  587.       else if (decode_bcond (start, insn, &cond, &offset))
  588.         {
  589.           /* Stop analysis on branch.  */
  590.           break;
  591.         }
  592.       else if (decode_br (start, insn, &is_link, &rn))
  593.         {
  594.           /* Stop analysis on branch.  */
  595.           break;
  596.         }
  597.       else if (decode_cb (start, insn, &is64, &op, &rn, &offset))
  598.         {
  599.           /* Stop analysis on branch.  */
  600.           break;
  601.         }
  602.       else if (decode_eret (start, insn))
  603.         {
  604.           /* Stop analysis on branch.  */
  605.           break;
  606.         }
  607.       else if (decode_movz (start, insn, &rd))
  608.         regs[rd] = pv_unknown ();
  609.       else
  610.         if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm))
  611.         {
  612.           if (imm == 0 && rn == 31)
  613.             regs[rd] = regs[rm];
  614.           else
  615.             {
  616.               if (aarch64_debug)
  617.                 fprintf_unfiltered
  618.                   (gdb_stdlog,
  619.                    "aarch64: prologue analysis gave up addr=0x%s "
  620.                    "opcode=0x%x (orr x register)\n",
  621.                    core_addr_to_string_nz (start),
  622.                    insn);
  623.               break;
  624.             }
  625.         }
  626.       else if (decode_ret (start, insn, &rn))
  627.         {
  628.           /* Stop analysis on branch.  */
  629.           break;
  630.         }
  631.       else if (decode_stur (start, insn, &is64, &rt, &rn, &offset))
  632.         {
  633.           pv_area_store (stack, pv_add_constant (regs[rn], offset),
  634.                          is64 ? 8 : 4, regs[rt]);
  635.         }
  636.       else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm))
  637.         {
  638.           /* If recording this store would invalidate the store area
  639.              (perhaps because rn is not known) then we should abandon
  640.              further prologue analysis.  */
  641.           if (pv_area_store_would_trash (stack,
  642.                                          pv_add_constant (regs[rn], imm)))
  643.             break;

  644.           if (pv_area_store_would_trash (stack,
  645.                                          pv_add_constant (regs[rn], imm + 8)))
  646.             break;

  647.           pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
  648.                          regs[rt1]);
  649.           pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
  650.                          regs[rt2]);
  651.         }
  652.       else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm))
  653.         {
  654.           /* If recording this store would invalidate the store area
  655.              (perhaps because rn is not known) then we should abandon
  656.              further prologue analysis.  */
  657.           if (pv_area_store_would_trash (stack,
  658.                                          pv_add_constant (regs[rn], imm)))
  659.             break;

  660.           if (pv_area_store_would_trash (stack,
  661.                                          pv_add_constant (regs[rn], imm + 8)))
  662.             break;

  663.           pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
  664.                          regs[rt1]);
  665.           pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
  666.                          regs[rt2]);
  667.           regs[rn] = pv_add_constant (regs[rn], imm);
  668.         }
  669.       else if (decode_tb (start, insn, &op, &bit, &rn, &offset))
  670.         {
  671.           /* Stop analysis on branch.  */
  672.           break;
  673.         }
  674.       else
  675.         {
  676.           if (aarch64_debug)
  677.             fprintf_unfiltered (gdb_stdlog,
  678.                                 "aarch64: prologue analysis gave up addr=0x%s"
  679.                                 " opcode=0x%x\n",
  680.                                 core_addr_to_string_nz (start), insn);
  681.           break;
  682.         }
  683.     }

  684.   if (cache == NULL)
  685.     {
  686.       do_cleanups (back_to);
  687.       return start;
  688.     }

  689.   if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
  690.     {
  691.       /* Frame pointer is fpFrame size is constant.  */
  692.       cache->framereg = AARCH64_FP_REGNUM;
  693.       cache->framesize = -regs[AARCH64_FP_REGNUM].k;
  694.     }
  695.   else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
  696.     {
  697.       /* Try the stack pointer.  */
  698.       cache->framesize = -regs[AARCH64_SP_REGNUM].k;
  699.       cache->framereg = AARCH64_SP_REGNUM;
  700.     }
  701.   else
  702.     {
  703.       /* We're just out of luck.  We don't know where the frame is.  */
  704.       cache->framereg = -1;
  705.       cache->framesize = 0;
  706.     }

  707.   for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
  708.     {
  709.       CORE_ADDR offset;

  710.       if (pv_area_find_reg (stack, gdbarch, i, &offset))
  711.         cache->saved_regs[i].addr = offset;
  712.     }

  713.   do_cleanups (back_to);
  714.   return start;
  715. }

  716. /* Implement the "skip_prologue" gdbarch method.  */

  717. static CORE_ADDR
  718. aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
  719. {
  720.   unsigned long inst;
  721.   CORE_ADDR skip_pc;
  722.   CORE_ADDR func_addr, limit_pc;
  723.   struct symtab_and_line sal;

  724.   /* See if we can determine the end of the prologue via the symbol
  725.      table.  If so, then return either PC, or the PC after the
  726.      prologue, whichever is greater.  */
  727.   if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
  728.     {
  729.       CORE_ADDR post_prologue_pc
  730.         = skip_prologue_using_sal (gdbarch, func_addr);

  731.       if (post_prologue_pc != 0)
  732.         return max (pc, post_prologue_pc);
  733.     }

  734.   /* Can't determine prologue from the symbol table, need to examine
  735.      instructions.  */

  736.   /* Find an upper limit on the function prologue using the debug
  737.      information.  If the debug information could not be used to
  738.      provide that bound, then use an arbitrary large number as the
  739.      upper bound.  */
  740.   limit_pc = skip_prologue_using_sal (gdbarch, pc);
  741.   if (limit_pc == 0)
  742.     limit_pc = pc + 128;        /* Magic.  */

  743.   /* Try disassembling prologue.  */
  744.   return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
  745. }

  746. /* Scan the function prologue for THIS_FRAME and populate the prologue
  747.    cache CACHE.  */

  748. static void
  749. aarch64_scan_prologue (struct frame_info *this_frame,
  750.                        struct aarch64_prologue_cache *cache)
  751. {
  752.   CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
  753.   CORE_ADDR prologue_start;
  754.   CORE_ADDR prologue_end;
  755.   CORE_ADDR prev_pc = get_frame_pc (this_frame);
  756.   struct gdbarch *gdbarch = get_frame_arch (this_frame);

  757.   /* Assume we do not find a frame.  */
  758.   cache->framereg = -1;
  759.   cache->framesize = 0;

  760.   if (find_pc_partial_function (block_addr, NULL, &prologue_start,
  761.                                 &prologue_end))
  762.     {
  763.       struct symtab_and_line sal = find_pc_line (prologue_start, 0);

  764.       if (sal.line == 0)
  765.         {
  766.           /* No line info so use the current PC.  */
  767.           prologue_end = prev_pc;
  768.         }
  769.       else if (sal.end < prologue_end)
  770.         {
  771.           /* The next line begins after the function end.  */
  772.           prologue_end = sal.end;
  773.         }

  774.       prologue_end = min (prologue_end, prev_pc);
  775.       aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
  776.     }
  777.   else
  778.     {
  779.       CORE_ADDR frame_loc;
  780.       LONGEST saved_fp;
  781.       LONGEST saved_lr;
  782.       enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);

  783.       frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
  784.       if (frame_loc == 0)
  785.         return;

  786.       cache->framereg = AARCH64_FP_REGNUM;
  787.       cache->framesize = 16;
  788.       cache->saved_regs[29].addr = 0;
  789.       cache->saved_regs[30].addr = 8;
  790.     }
  791. }

  792. /* Allocate an aarch64_prologue_cache and fill it with information
  793.    about the prologue of *THIS_FRAME.  */

  794. static struct aarch64_prologue_cache *
  795. aarch64_make_prologue_cache (struct frame_info *this_frame)
  796. {
  797.   struct aarch64_prologue_cache *cache;
  798.   CORE_ADDR unwound_fp;
  799.   int reg;

  800.   cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
  801.   cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);

  802.   aarch64_scan_prologue (this_frame, cache);

  803.   if (cache->framereg == -1)
  804.     return cache;

  805.   unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
  806.   if (unwound_fp == 0)
  807.     return cache;

  808.   cache->prev_sp = unwound_fp + cache->framesize;

  809.   /* Calculate actual addresses of saved registers using offsets
  810.      determined by aarch64_analyze_prologue.  */
  811.   for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
  812.     if (trad_frame_addr_p (cache->saved_regs, reg))
  813.       cache->saved_regs[reg].addr += cache->prev_sp;

  814.   return cache;
  815. }

  816. /* Our frame ID for a normal frame is the current function's starting
  817.    PC and the caller's SP when we were called.  */

  818. static void
  819. aarch64_prologue_this_id (struct frame_info *this_frame,
  820.                           void **this_cache, struct frame_id *this_id)
  821. {
  822.   struct aarch64_prologue_cache *cache;
  823.   struct frame_id id;
  824.   CORE_ADDR pc, func;

  825.   if (*this_cache == NULL)
  826.     *this_cache = aarch64_make_prologue_cache (this_frame);
  827.   cache = *this_cache;

  828.   /* This is meant to halt the backtrace at "_start".  */
  829.   pc = get_frame_pc (this_frame);
  830.   if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
  831.     return;

  832.   /* If we've hit a wall, stop.  */
  833.   if (cache->prev_sp == 0)
  834.     return;

  835.   func = get_frame_func (this_frame);
  836.   id = frame_id_build (cache->prev_sp, func);
  837.   *this_id = id;
  838. }

  839. /* Implement the "prev_register" frame_unwind method.  */

  840. static struct value *
  841. aarch64_prologue_prev_register (struct frame_info *this_frame,
  842.                                 void **this_cache, int prev_regnum)
  843. {
  844.   struct gdbarch *gdbarch = get_frame_arch (this_frame);
  845.   struct aarch64_prologue_cache *cache;

  846.   if (*this_cache == NULL)
  847.     *this_cache = aarch64_make_prologue_cache (this_frame);
  848.   cache = *this_cache;

  849.   /* If we are asked to unwind the PC, then we need to return the LR
  850.      instead.  The prologue may save PC, but it will point into this
  851.      frame's prologue, not the next frame's resume location.  */
  852.   if (prev_regnum == AARCH64_PC_REGNUM)
  853.     {
  854.       CORE_ADDR lr;

  855.       lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
  856.       return frame_unwind_got_constant (this_frame, prev_regnum, lr);
  857.     }

  858.   /* SP is generally not saved to the stack, but this frame is
  859.      identified by the next frame's stack pointer at the time of the
  860.      call.  The value was already reconstructed into PREV_SP.  */
  861.   /*
  862.          +----------+  ^
  863.          | saved lr |  |
  864.       +->| saved fp |--+
  865.       |  |          |
  866.       |  |          |     <- Previous SP
  867.       |  +----------+
  868.       |  | saved lr |
  869.       +--| saved fp |<- FP
  870.          |          |
  871.          |          |<- SP
  872.          +----------+  */
  873.   if (prev_regnum == AARCH64_SP_REGNUM)
  874.     return frame_unwind_got_constant (this_frame, prev_regnum,
  875.                                       cache->prev_sp);

  876.   return trad_frame_get_prev_register (this_frame, cache->saved_regs,
  877.                                        prev_regnum);
  878. }

  879. /* AArch64 prologue unwinder.  */
  880. struct frame_unwind aarch64_prologue_unwind =
  881. {
  882.   NORMAL_FRAME,
  883.   default_frame_unwind_stop_reason,
  884.   aarch64_prologue_this_id,
  885.   aarch64_prologue_prev_register,
  886.   NULL,
  887.   default_frame_sniffer
  888. };

  889. /* Allocate an aarch64_prologue_cache and fill it with information
  890.    about the prologue of *THIS_FRAME.  */

  891. static struct aarch64_prologue_cache *
  892. aarch64_make_stub_cache (struct frame_info *this_frame)
  893. {
  894.   int reg;
  895.   struct aarch64_prologue_cache *cache;
  896.   CORE_ADDR unwound_fp;

  897.   cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
  898.   cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);

  899.   cache->prev_sp
  900.     = get_frame_register_unsigned (this_frame, AARCH64_SP_REGNUM);

  901.   return cache;
  902. }

  903. /* Our frame ID for a stub frame is the current SP and LR.  */

  904. static void
  905. aarch64_stub_this_id (struct frame_info *this_frame,
  906.                       void **this_cache, struct frame_id *this_id)
  907. {
  908.   struct aarch64_prologue_cache *cache;

  909.   if (*this_cache == NULL)
  910.     *this_cache = aarch64_make_stub_cache (this_frame);
  911.   cache = *this_cache;

  912.   *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
  913. }

  914. /* Implement the "sniffer" frame_unwind method.  */

  915. static int
  916. aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
  917.                              struct frame_info *this_frame,
  918.                              void **this_prologue_cache)
  919. {
  920.   CORE_ADDR addr_in_block;
  921.   gdb_byte dummy[4];

  922.   addr_in_block = get_frame_address_in_block (this_frame);
  923.   if (in_plt_section (addr_in_block)
  924.       /* We also use the stub winder if the target memory is unreadable
  925.          to avoid having the prologue unwinder trying to read it.  */
  926.       || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
  927.     return 1;

  928.   return 0;
  929. }

  930. /* AArch64 stub unwinder.  */
  931. struct frame_unwind aarch64_stub_unwind =
  932. {
  933.   NORMAL_FRAME,
  934.   default_frame_unwind_stop_reason,
  935.   aarch64_stub_this_id,
  936.   aarch64_prologue_prev_register,
  937.   NULL,
  938.   aarch64_stub_unwind_sniffer
  939. };

  940. /* Return the frame base address of *THIS_FRAME.  */

  941. static CORE_ADDR
  942. aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
  943. {
  944.   struct aarch64_prologue_cache *cache;

  945.   if (*this_cache == NULL)
  946.     *this_cache = aarch64_make_prologue_cache (this_frame);
  947.   cache = *this_cache;

  948.   return cache->prev_sp - cache->framesize;
  949. }

  950. /* AArch64 default frame base information.  */
  951. struct frame_base aarch64_normal_base =
  952. {
  953.   &aarch64_prologue_unwind,
  954.   aarch64_normal_frame_base,
  955.   aarch64_normal_frame_base,
  956.   aarch64_normal_frame_base
  957. };

  958. /* Assuming THIS_FRAME is a dummy, return the frame ID of that
  959.    dummy frame.  The frame ID's base needs to match the TOS value
  960.    saved by save_dummy_frame_tos () and returned from
  961.    aarch64_push_dummy_call, and the PC needs to match the dummy
  962.    frame's breakpoint.  */

  963. static struct frame_id
  964. aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
  965. {
  966.   return frame_id_build (get_frame_register_unsigned (this_frame,
  967.                                                       AARCH64_SP_REGNUM),
  968.                          get_frame_pc (this_frame));
  969. }

  970. /* Implement the "unwind_pc" gdbarch method.  */

  971. static CORE_ADDR
  972. aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
  973. {
  974.   CORE_ADDR pc
  975.     = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);

  976.   return pc;
  977. }

  978. /* Implement the "unwind_sp" gdbarch method.  */

  979. static CORE_ADDR
  980. aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
  981. {
  982.   return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
  983. }

  984. /* Return the value of the REGNUM register in the previous frame of
  985.    *THIS_FRAME.  */

  986. static struct value *
  987. aarch64_dwarf2_prev_register (struct frame_info *this_frame,
  988.                               void **this_cache, int regnum)
  989. {
  990.   struct gdbarch *gdbarch = get_frame_arch (this_frame);
  991.   CORE_ADDR lr;

  992.   switch (regnum)
  993.     {
  994.     case AARCH64_PC_REGNUM:
  995.       lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
  996.       return frame_unwind_got_constant (this_frame, regnum, lr);

  997.     default:
  998.       internal_error (__FILE__, __LINE__,
  999.                       _("Unexpected register %d"), regnum);
  1000.     }
  1001. }

  1002. /* Implement the "init_reg" dwarf2_frame_ops method.  */

  1003. static void
  1004. aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
  1005.                                struct dwarf2_frame_state_reg *reg,
  1006.                                struct frame_info *this_frame)
  1007. {
  1008.   switch (regnum)
  1009.     {
  1010.     case AARCH64_PC_REGNUM:
  1011.       reg->how = DWARF2_FRAME_REG_FN;
  1012.       reg->loc.fn = aarch64_dwarf2_prev_register;
  1013.       break;
  1014.     case AARCH64_SP_REGNUM:
  1015.       reg->how = DWARF2_FRAME_REG_CFA;
  1016.       break;
  1017.     }
  1018. }

  1019. /* When arguments must be pushed onto the stack, they go on in reverse
  1020.    order.  The code below implements a FILO (stack) to do this.  */

  1021. typedef struct
  1022. {
  1023.   /* Value to pass on stack.  */
  1024.   const void *data;

  1025.   /* Size in bytes of value to pass on stack.  */
  1026.   int len;
  1027. } stack_item_t;

  1028. DEF_VEC_O (stack_item_t);

  1029. /* Return the alignment (in bytes) of the given type.  */

  1030. static int
  1031. aarch64_type_align (struct type *t)
  1032. {
  1033.   int n;
  1034.   int align;
  1035.   int falign;

  1036.   t = check_typedef (t);
  1037.   switch (TYPE_CODE (t))
  1038.     {
  1039.     default:
  1040.       /* Should never happen.  */
  1041.       internal_error (__FILE__, __LINE__, _("unknown type alignment"));
  1042.       return 4;

  1043.     case TYPE_CODE_PTR:
  1044.     case TYPE_CODE_ENUM:
  1045.     case TYPE_CODE_INT:
  1046.     case TYPE_CODE_FLT:
  1047.     case TYPE_CODE_SET:
  1048.     case TYPE_CODE_RANGE:
  1049.     case TYPE_CODE_BITSTRING:
  1050.     case TYPE_CODE_REF:
  1051.     case TYPE_CODE_CHAR:
  1052.     case TYPE_CODE_BOOL:
  1053.       return TYPE_LENGTH (t);

  1054.     case TYPE_CODE_ARRAY:
  1055.     case TYPE_CODE_COMPLEX:
  1056.       return aarch64_type_align (TYPE_TARGET_TYPE (t));

  1057.     case TYPE_CODE_STRUCT:
  1058.     case TYPE_CODE_UNION:
  1059.       align = 1;
  1060.       for (n = 0; n < TYPE_NFIELDS (t); n++)
  1061.         {
  1062.           falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
  1063.           if (falign > align)
  1064.             align = falign;
  1065.         }
  1066.       return align;
  1067.     }
  1068. }

  1069. /* Return 1 if *TY is a homogeneous floating-point aggregate as
  1070.    defined in the AAPCS64 ABI document; otherwise return 0.  */

  1071. static int
  1072. is_hfa (struct type *ty)
  1073. {
  1074.   switch (TYPE_CODE (ty))
  1075.     {
  1076.     case TYPE_CODE_ARRAY:
  1077.       {
  1078.         struct type *target_ty = TYPE_TARGET_TYPE (ty);
  1079.         if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
  1080.           return 1;
  1081.         break;
  1082.       }

  1083.     case TYPE_CODE_UNION:
  1084.     case TYPE_CODE_STRUCT:
  1085.       {
  1086.         if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
  1087.           {
  1088.             struct type *member0_type;

  1089.             member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
  1090.             if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
  1091.               {
  1092.                 int i;

  1093.                 for (i = 0; i < TYPE_NFIELDS (ty); i++)
  1094.                   {
  1095.                     struct type *member1_type;

  1096.                     member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
  1097.                     if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
  1098.                         || (TYPE_LENGTH (member0_type)
  1099.                             != TYPE_LENGTH (member1_type)))
  1100.                       return 0;
  1101.                   }
  1102.                 return 1;
  1103.               }
  1104.           }
  1105.         return 0;
  1106.       }

  1107.     default:
  1108.       break;
  1109.     }

  1110.   return 0;
  1111. }

  1112. /* AArch64 function call information structure.  */
  1113. struct aarch64_call_info
  1114. {
  1115.   /* the current argument number.  */
  1116.   unsigned argnum;

  1117.   /* The next general purpose register number, equivalent to NGRN as
  1118.      described in the AArch64 Procedure Call Standard.  */
  1119.   unsigned ngrn;

  1120.   /* The next SIMD and floating point register number, equivalent to
  1121.      NSRN as described in the AArch64 Procedure Call Standard.  */
  1122.   unsigned nsrn;

  1123.   /* The next stacked argument address, equivalent to NSAA as
  1124.      described in the AArch64 Procedure Call Standard.  */
  1125.   unsigned nsaa;

  1126.   /* Stack item vector.  */
  1127.   VEC(stack_item_t) *si;
  1128. };

  1129. /* Pass a value in a sequence of consecutive X registers.  The caller
  1130.    is responsbile for ensuring sufficient registers are available.  */

  1131. static void
  1132. pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
  1133.            struct aarch64_call_info *info, struct type *type,
  1134.            const bfd_byte *buf)
  1135. {
  1136.   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  1137.   int len = TYPE_LENGTH (type);
  1138.   enum type_code typecode = TYPE_CODE (type);
  1139.   int regnum = AARCH64_X0_REGNUM + info->ngrn;

  1140.   info->argnum++;

  1141.   while (len > 0)
  1142.     {
  1143.       int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
  1144.       CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
  1145.                                                    byte_order);


  1146.       /* Adjust sub-word struct/union args when big-endian.  */
  1147.       if (byte_order == BFD_ENDIAN_BIG
  1148.           && partial_len < X_REGISTER_SIZE
  1149.           && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
  1150.         regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);

  1151.       if (aarch64_debug)
  1152.         fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
  1153.                             info->argnum,
  1154.                             gdbarch_register_name (gdbarch, regnum),
  1155.                             phex (regval, X_REGISTER_SIZE));
  1156.       regcache_cooked_write_unsigned (regcache, regnum, regval);
  1157.       len -= partial_len;
  1158.       buf += partial_len;
  1159.       regnum++;
  1160.     }
  1161. }

  1162. /* Attempt to marshall a value in a V register.  Return 1 if
  1163.    successful, or 0 if insufficient registers are available.  This
  1164.    function, unlike the equivalent pass_in_x() function does not
  1165.    handle arguments spread across multiple registers.  */

  1166. static int
  1167. pass_in_v (struct gdbarch *gdbarch,
  1168.            struct regcache *regcache,
  1169.            struct aarch64_call_info *info,
  1170.            const bfd_byte *buf)
  1171. {
  1172.   if (info->nsrn < 8)
  1173.     {
  1174.       enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  1175.       int regnum = AARCH64_V0_REGNUM + info->nsrn;

  1176.       info->argnum++;
  1177.       info->nsrn++;

  1178.       regcache_cooked_write (regcache, regnum, buf);
  1179.       if (aarch64_debug)
  1180.         fprintf_unfiltered (gdb_stdlog, "arg %d in %s\n",
  1181.                             info->argnum,
  1182.                             gdbarch_register_name (gdbarch, regnum));
  1183.       return 1;
  1184.     }
  1185.   info->nsrn = 8;
  1186.   return 0;
  1187. }

  1188. /* Marshall an argument onto the stack.  */

  1189. static void
  1190. pass_on_stack (struct aarch64_call_info *info, struct type *type,
  1191.                const bfd_byte *buf)
  1192. {
  1193.   int len = TYPE_LENGTH (type);
  1194.   int align;
  1195.   stack_item_t item;

  1196.   info->argnum++;

  1197.   align = aarch64_type_align (type);

  1198.   /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
  1199.      Natural alignment of the argument's type.  */
  1200.   align = align_up (align, 8);

  1201.   /* The AArch64 PCS requires at most doubleword alignment.  */
  1202.   if (align > 16)
  1203.     align = 16;

  1204.   if (aarch64_debug)
  1205.     fprintf_unfiltered (gdb_stdlog, "arg %d len=%d @ sp + %d\n",
  1206.                         info->argnum, len, info->nsaa);

  1207.   item.len = len;
  1208.   item.data = buf;
  1209.   VEC_safe_push (stack_item_t, info->si, &item);

  1210.   info->nsaa += len;
  1211.   if (info->nsaa & (align - 1))
  1212.     {
  1213.       /* Push stack alignment padding.  */
  1214.       int pad = align - (info->nsaa & (align - 1));

  1215.       item.len = pad;
  1216.       item.data = buf;

  1217.       VEC_safe_push (stack_item_t, info->si, &item);
  1218.       info->nsaa += pad;
  1219.     }
  1220. }

  1221. /* Marshall an argument into a sequence of one or more consecutive X
  1222.    registers or, if insufficient X registers are available then onto
  1223.    the stack.  */

  1224. static void
  1225. pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
  1226.                     struct aarch64_call_info *info, struct type *type,
  1227.                     const bfd_byte *buf)
  1228. {
  1229.   int len = TYPE_LENGTH (type);
  1230.   int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;

  1231.   /* PCS C.13 - Pass in registers if we have enough spare */
  1232.   if (info->ngrn + nregs <= 8)
  1233.     {
  1234.       pass_in_x (gdbarch, regcache, info, type, buf);
  1235.       info->ngrn += nregs;
  1236.     }
  1237.   else
  1238.     {
  1239.       info->ngrn = 8;
  1240.       pass_on_stack (info, type, buf);
  1241.     }
  1242. }

  1243. /* Pass a value in a V register, or on the stack if insufficient are
  1244.    available.  */

  1245. static void
  1246. pass_in_v_or_stack (struct gdbarch *gdbarch,
  1247.                     struct regcache *regcache,
  1248.                     struct aarch64_call_info *info,
  1249.                     struct type *type,
  1250.                     const bfd_byte *buf)
  1251. {
  1252.   if (!pass_in_v (gdbarch, regcache, info, buf))
  1253.     pass_on_stack (info, type, buf);
  1254. }

  1255. /* Implement the "push_dummy_call" gdbarch method.  */

  1256. static CORE_ADDR
  1257. aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
  1258.                          struct regcache *regcache, CORE_ADDR bp_addr,
  1259.                          int nargs,
  1260.                          struct value **args, CORE_ADDR sp, int struct_return,
  1261.                          CORE_ADDR struct_addr)
  1262. {
  1263.   int nstack = 0;
  1264.   int argnum;
  1265.   int x_argreg;
  1266.   int v_argreg;
  1267.   struct aarch64_call_info info;
  1268.   struct type *func_type;
  1269.   struct type *return_type;
  1270.   int lang_struct_return;

  1271.   memset (&info, 0, sizeof (info));

  1272.   /* We need to know what the type of the called function is in order
  1273.      to determine the number of named/anonymous arguments for the
  1274.      actual argument placement, and the return type in order to handle
  1275.      return value correctly.

  1276.      The generic code above us views the decision of return in memory
  1277.      or return in registers as a two stage processes.  The language
  1278.      handler is consulted first and may decide to return in memory (eg
  1279.      class with copy constructor returned by value), this will cause
  1280.      the generic code to allocate space AND insert an initial leading
  1281.      argument.

  1282.      If the language code does not decide to pass in memory then the
  1283.      target code is consulted.

  1284.      If the language code decides to pass in memory we want to move
  1285.      the pointer inserted as the initial argument from the argument
  1286.      list and into X8, the conventional AArch64 struct return pointer
  1287.      register.

  1288.      This is slightly awkward, ideally the flag "lang_struct_return"
  1289.      would be passed to the targets implementation of push_dummy_call.
  1290.      Rather that change the target interface we call the language code
  1291.      directly ourselves.  */

  1292.   func_type = check_typedef (value_type (function));

  1293.   /* Dereference function pointer types.  */
  1294.   if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
  1295.     func_type = TYPE_TARGET_TYPE (func_type);

  1296.   gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
  1297.               || TYPE_CODE (func_type) == TYPE_CODE_METHOD);

  1298.   /* If language_pass_by_reference () returned true we will have been
  1299.      given an additional initial argument, a hidden pointer to the
  1300.      return slot in memory.  */
  1301.   return_type = TYPE_TARGET_TYPE (func_type);
  1302.   lang_struct_return = language_pass_by_reference (return_type);

  1303.   /* Set the return address.  For the AArch64, the return breakpoint
  1304.      is always at BP_ADDR.  */
  1305.   regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);

  1306.   /* If we were given an initial argument for the return slot because
  1307.      lang_struct_return was true, lose it.  */
  1308.   if (lang_struct_return)
  1309.     {
  1310.       args++;
  1311.       nargs--;
  1312.     }

  1313.   /* The struct_return pointer occupies X8.  */
  1314.   if (struct_return || lang_struct_return)
  1315.     {
  1316.       if (aarch64_debug)
  1317.         fprintf_unfiltered (gdb_stdlog, "struct return in %s = 0x%s\n",
  1318.                             gdbarch_register_name
  1319.                             (gdbarch,
  1320.                              AARCH64_STRUCT_RETURN_REGNUM),
  1321.                             paddress (gdbarch, struct_addr));
  1322.       regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
  1323.                                       struct_addr);
  1324.     }

  1325.   for (argnum = 0; argnum < nargs; argnum++)
  1326.     {
  1327.       struct value *arg = args[argnum];
  1328.       struct type *arg_type;
  1329.       int len;

  1330.       arg_type = check_typedef (value_type (arg));
  1331.       len = TYPE_LENGTH (arg_type);

  1332.       switch (TYPE_CODE (arg_type))
  1333.         {
  1334.         case TYPE_CODE_INT:
  1335.         case TYPE_CODE_BOOL:
  1336.         case TYPE_CODE_CHAR:
  1337.         case TYPE_CODE_RANGE:
  1338.         case TYPE_CODE_ENUM:
  1339.           if (len < 4)
  1340.             {
  1341.               /* Promote to 32 bit integer.  */
  1342.               if (TYPE_UNSIGNED (arg_type))
  1343.                 arg_type = builtin_type (gdbarch)->builtin_uint32;
  1344.               else
  1345.                 arg_type = builtin_type (gdbarch)->builtin_int32;
  1346.               arg = value_cast (arg_type, arg);
  1347.             }
  1348.           pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
  1349.                               value_contents (arg));
  1350.           break;

  1351.         case TYPE_CODE_COMPLEX:
  1352.           if (info.nsrn <= 6)
  1353.             {
  1354.               const bfd_byte *buf = value_contents (arg);
  1355.               struct type *target_type =
  1356.                 check_typedef (TYPE_TARGET_TYPE (arg_type));

  1357.               pass_in_v (gdbarch, regcache, &info, buf);
  1358.               pass_in_v (gdbarch, regcache, &info,
  1359.                          buf + TYPE_LENGTH (target_type));
  1360.             }
  1361.           else
  1362.             {
  1363.               info.nsrn = 8;
  1364.               pass_on_stack (&info, arg_type, value_contents (arg));
  1365.             }
  1366.           break;
  1367.         case TYPE_CODE_FLT:
  1368.           pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
  1369.                               value_contents (arg));
  1370.           break;

  1371.         case TYPE_CODE_STRUCT:
  1372.         case TYPE_CODE_ARRAY:
  1373.         case TYPE_CODE_UNION:
  1374.           if (is_hfa (arg_type))
  1375.             {
  1376.               int elements = TYPE_NFIELDS (arg_type);

  1377.               /* Homogeneous Aggregates */
  1378.               if (info.nsrn + elements < 8)
  1379.                 {
  1380.                   int i;

  1381.                   for (i = 0; i < elements; i++)
  1382.                     {
  1383.                       /* We know that we have sufficient registers
  1384.                          available therefore this will never fallback
  1385.                          to the stack.  */
  1386.                       struct value *field =
  1387.                         value_primitive_field (arg, 0, i, arg_type);
  1388.                       struct type *field_type =
  1389.                         check_typedef (value_type (field));

  1390.                       pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
  1391.                                           value_contents_writeable (field));
  1392.                     }
  1393.                 }
  1394.               else
  1395.                 {
  1396.                   info.nsrn = 8;
  1397.                   pass_on_stack (&info, arg_type, value_contents (arg));
  1398.                 }
  1399.             }
  1400.           else if (len > 16)
  1401.             {
  1402.               /* PCS B.7 Aggregates larger than 16 bytes are passed by
  1403.                  invisible reference.  */

  1404.               /* Allocate aligned storage.  */
  1405.               sp = align_down (sp - len, 16);

  1406.               /* Write the real data into the stack.  */
  1407.               write_memory (sp, value_contents (arg), len);

  1408.               /* Construct the indirection.  */
  1409.               arg_type = lookup_pointer_type (arg_type);
  1410.               arg = value_from_pointer (arg_type, sp);
  1411.               pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
  1412.                                   value_contents (arg));
  1413.             }
  1414.           else
  1415.             /* PCS C.15 / C.18 multiple values pass.  */
  1416.             pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
  1417.                                 value_contents (arg));
  1418.           break;

  1419.         default:
  1420.           pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
  1421.                               value_contents (arg));
  1422.           break;
  1423.         }
  1424.     }

  1425.   /* Make sure stack retains 16 byte alignment.  */
  1426.   if (info.nsaa & 15)
  1427.     sp -= 16 - (info.nsaa & 15);

  1428.   while (!VEC_empty (stack_item_t, info.si))
  1429.     {
  1430.       stack_item_t *si = VEC_last (stack_item_t, info.si);

  1431.       sp -= si->len;
  1432.       write_memory (sp, si->data, si->len);
  1433.       VEC_pop (stack_item_t, info.si);
  1434.     }

  1435.   VEC_free (stack_item_t, info.si);

  1436.   /* Finally, update the SP register.  */
  1437.   regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);

  1438.   return sp;
  1439. }

  1440. /* Implement the "frame_align" gdbarch method.  */

  1441. static CORE_ADDR
  1442. aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
  1443. {
  1444.   /* Align the stack to sixteen bytes.  */
  1445.   return sp & ~(CORE_ADDR) 15;
  1446. }

  1447. /* Return the type for an AdvSISD Q register.  */

  1448. static struct type *
  1449. aarch64_vnq_type (struct gdbarch *gdbarch)
  1450. {
  1451.   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);

  1452.   if (tdep->vnq_type == NULL)
  1453.     {
  1454.       struct type *t;
  1455.       struct type *elem;

  1456.       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
  1457.                                TYPE_CODE_UNION);

  1458.       elem = builtin_type (gdbarch)->builtin_uint128;
  1459.       append_composite_type_field (t, "u", elem);

  1460.       elem = builtin_type (gdbarch)->builtin_int128;
  1461.       append_composite_type_field (t, "s", elem);

  1462.       tdep->vnq_type = t;
  1463.     }

  1464.   return tdep->vnq_type;
  1465. }

  1466. /* Return the type for an AdvSISD D register.  */

  1467. static struct type *
  1468. aarch64_vnd_type (struct gdbarch *gdbarch)
  1469. {
  1470.   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);

  1471.   if (tdep->vnd_type == NULL)
  1472.     {
  1473.       struct type *t;
  1474.       struct type *elem;

  1475.       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
  1476.                                TYPE_CODE_UNION);

  1477.       elem = builtin_type (gdbarch)->builtin_double;
  1478.       append_composite_type_field (t, "f", elem);

  1479.       elem = builtin_type (gdbarch)->builtin_uint64;
  1480.       append_composite_type_field (t, "u", elem);

  1481.       elem = builtin_type (gdbarch)->builtin_int64;
  1482.       append_composite_type_field (t, "s", elem);

  1483.       tdep->vnd_type = t;
  1484.     }

  1485.   return tdep->vnd_type;
  1486. }

  1487. /* Return the type for an AdvSISD S register.  */

  1488. static struct type *
  1489. aarch64_vns_type (struct gdbarch *gdbarch)
  1490. {
  1491.   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);

  1492.   if (tdep->vns_type == NULL)
  1493.     {
  1494.       struct type *t;
  1495.       struct type *elem;

  1496.       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
  1497.                                TYPE_CODE_UNION);

  1498.       elem = builtin_type (gdbarch)->builtin_float;
  1499.       append_composite_type_field (t, "f", elem);

  1500.       elem = builtin_type (gdbarch)->builtin_uint32;
  1501.       append_composite_type_field (t, "u", elem);

  1502.       elem = builtin_type (gdbarch)->builtin_int32;
  1503.       append_composite_type_field (t, "s", elem);

  1504.       tdep->vns_type = t;
  1505.     }

  1506.   return tdep->vns_type;
  1507. }

  1508. /* Return the type for an AdvSISD H register.  */

  1509. static struct type *
  1510. aarch64_vnh_type (struct gdbarch *gdbarch)
  1511. {
  1512.   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);

  1513.   if (tdep->vnh_type == NULL)
  1514.     {
  1515.       struct type *t;
  1516.       struct type *elem;

  1517.       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
  1518.                                TYPE_CODE_UNION);

  1519.       elem = builtin_type (gdbarch)->builtin_uint16;
  1520.       append_composite_type_field (t, "u", elem);

  1521.       elem = builtin_type (gdbarch)->builtin_int16;
  1522.       append_composite_type_field (t, "s", elem);

  1523.       tdep->vnh_type = t;
  1524.     }

  1525.   return tdep->vnh_type;
  1526. }

  1527. /* Return the type for an AdvSISD B register.  */

  1528. static struct type *
  1529. aarch64_vnb_type (struct gdbarch *gdbarch)
  1530. {
  1531.   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);

  1532.   if (tdep->vnb_type == NULL)
  1533.     {
  1534.       struct type *t;
  1535.       struct type *elem;

  1536.       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
  1537.                                TYPE_CODE_UNION);

  1538.       elem = builtin_type (gdbarch)->builtin_uint8;
  1539.       append_composite_type_field (t, "u", elem);

  1540.       elem = builtin_type (gdbarch)->builtin_int8;
  1541.       append_composite_type_field (t, "s", elem);

  1542.       tdep->vnb_type = t;
  1543.     }

  1544.   return tdep->vnb_type;
  1545. }

  1546. /* Implement the "dwarf2_reg_to_regnum" gdbarch method.  */

  1547. static int
  1548. aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
  1549. {
  1550.   if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
  1551.     return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;

  1552.   if (reg == AARCH64_DWARF_SP)
  1553.     return AARCH64_SP_REGNUM;

  1554.   if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
  1555.     return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;

  1556.   return -1;
  1557. }


  1558. /* Implement the "print_insn" gdbarch method.  */

  1559. static int
  1560. aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
  1561. {
  1562.   info->symbols = NULL;
  1563.   return print_insn_aarch64 (memaddr, info);
  1564. }

  1565. /* AArch64 BRK software debug mode instruction.
  1566.    Note that AArch64 code is always little-endian.
  1567.    1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000.  */
  1568. static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};

  1569. /* Implement the "breakpoint_from_pc" gdbarch method.  */

  1570. static const gdb_byte *
  1571. aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
  1572.                             int *lenptr)
  1573. {
  1574.   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);

  1575.   *lenptr = sizeof (aarch64_default_breakpoint);
  1576.   return aarch64_default_breakpoint;
  1577. }

  1578. /* Extract from an array REGS containing the (raw) register state a
  1579.    function return value of type TYPE, and copy that, in virtual
  1580.    format, into VALBUF.  */

  1581. static void
  1582. aarch64_extract_return_value (struct type *type, struct regcache *regs,
  1583.                               gdb_byte *valbuf)
  1584. {
  1585.   struct gdbarch *gdbarch = get_regcache_arch (regs);
  1586.   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);

  1587.   if (TYPE_CODE (type) == TYPE_CODE_FLT)
  1588.     {
  1589.       bfd_byte buf[V_REGISTER_SIZE];
  1590.       int len = TYPE_LENGTH (type);

  1591.       regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
  1592.       memcpy (valbuf, buf, len);
  1593.     }
  1594.   else if (TYPE_CODE (type) == TYPE_CODE_INT
  1595.            || TYPE_CODE (type) == TYPE_CODE_CHAR
  1596.            || TYPE_CODE (type) == TYPE_CODE_BOOL
  1597.            || TYPE_CODE (type) == TYPE_CODE_PTR
  1598.            || TYPE_CODE (type) == TYPE_CODE_REF
  1599.            || TYPE_CODE (type) == TYPE_CODE_ENUM)
  1600.     {
  1601.       /* If the the type is a plain integer, then the access is
  1602.          straight-forward.  Otherwise we have to play around a bit
  1603.          more.  */
  1604.       int len = TYPE_LENGTH (type);
  1605.       int regno = AARCH64_X0_REGNUM;
  1606.       ULONGEST tmp;

  1607.       while (len > 0)
  1608.         {
  1609.           /* By using store_unsigned_integer we avoid having to do
  1610.              anything special for small big-endian values.  */
  1611.           regcache_cooked_read_unsigned (regs, regno++, &tmp);
  1612.           store_unsigned_integer (valbuf,
  1613.                                   (len > X_REGISTER_SIZE
  1614.                                    ? X_REGISTER_SIZE : len), byte_order, tmp);
  1615.           len -= X_REGISTER_SIZE;
  1616.           valbuf += X_REGISTER_SIZE;
  1617.         }
  1618.     }
  1619.   else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
  1620.     {
  1621.       int regno = AARCH64_V0_REGNUM;
  1622.       bfd_byte buf[V_REGISTER_SIZE];
  1623.       struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
  1624.       int len = TYPE_LENGTH (target_type);

  1625.       regcache_cooked_read (regs, regno, buf);
  1626.       memcpy (valbuf, buf, len);
  1627.       valbuf += len;
  1628.       regcache_cooked_read (regs, regno + 1, buf);
  1629.       memcpy (valbuf, buf, len);
  1630.       valbuf += len;
  1631.     }
  1632.   else if (is_hfa (type))
  1633.     {
  1634.       int elements = TYPE_NFIELDS (type);
  1635.       struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
  1636.       int len = TYPE_LENGTH (member_type);
  1637.       int i;

  1638.       for (i = 0; i < elements; i++)
  1639.         {
  1640.           int regno = AARCH64_V0_REGNUM + i;
  1641.           bfd_byte buf[X_REGISTER_SIZE];

  1642.           if (aarch64_debug)
  1643.             fprintf_unfiltered (gdb_stdlog,
  1644.                                 "read HFA return value element %d from %s\n",
  1645.                                 i + 1,
  1646.                                 gdbarch_register_name (gdbarch, regno));
  1647.           regcache_cooked_read (regs, regno, buf);

  1648.           memcpy (valbuf, buf, len);
  1649.           valbuf += len;
  1650.         }
  1651.     }
  1652.   else
  1653.     {
  1654.       /* For a structure or union the behaviour is as if the value had
  1655.          been stored to word-aligned memory and then loaded into
  1656.          registers with 64-bit load instruction(s).  */
  1657.       int len = TYPE_LENGTH (type);
  1658.       int regno = AARCH64_X0_REGNUM;
  1659.       bfd_byte buf[X_REGISTER_SIZE];

  1660.       while (len > 0)
  1661.         {
  1662.           regcache_cooked_read (regs, regno++, buf);
  1663.           memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
  1664.           len -= X_REGISTER_SIZE;
  1665.           valbuf += X_REGISTER_SIZE;
  1666.         }
  1667.     }
  1668. }


  1669. /* Will a function return an aggregate type in memory or in a
  1670.    register?  Return 0 if an aggregate type can be returned in a
  1671.    register, 1 if it must be returned in memory.  */

  1672. static int
  1673. aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
  1674. {
  1675.   int nRc;
  1676.   enum type_code code;

  1677.   CHECK_TYPEDEF (type);

  1678.   /* In the AArch64 ABI, "integer" like aggregate types are returned
  1679.      in registers.  For an aggregate type to be integer like, its size
  1680.      must be less than or equal to 4 * X_REGISTER_SIZE.  */

  1681.   if (is_hfa (type))
  1682.     {
  1683.       /* PCS B.5 If the argument is a Named HFA, then the argument is
  1684.          used unmodified.  */
  1685.       return 0;
  1686.     }

  1687.   if (TYPE_LENGTH (type) > 16)
  1688.     {
  1689.       /* PCS B.6 Aggregates larger than 16 bytes are passed by
  1690.          invisible reference.  */

  1691.       return 1;
  1692.     }

  1693.   return 0;
  1694. }

  1695. /* Write into appropriate registers a function return value of type
  1696.    TYPE, given in virtual format.  */

  1697. static void
  1698. aarch64_store_return_value (struct type *type, struct regcache *regs,
  1699.                             const gdb_byte *valbuf)
  1700. {
  1701.   struct gdbarch *gdbarch = get_regcache_arch (regs);
  1702.   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);

  1703.   if (TYPE_CODE (type) == TYPE_CODE_FLT)
  1704.     {
  1705.       bfd_byte buf[V_REGISTER_SIZE];
  1706.       int len = TYPE_LENGTH (type);

  1707.       memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
  1708.       regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
  1709.     }
  1710.   else if (TYPE_CODE (type) == TYPE_CODE_INT
  1711.            || TYPE_CODE (type) == TYPE_CODE_CHAR
  1712.            || TYPE_CODE (type) == TYPE_CODE_BOOL
  1713.            || TYPE_CODE (type) == TYPE_CODE_PTR
  1714.            || TYPE_CODE (type) == TYPE_CODE_REF
  1715.            || TYPE_CODE (type) == TYPE_CODE_ENUM)
  1716.     {
  1717.       if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
  1718.         {
  1719.           /* Values of one word or less are zero/sign-extended and
  1720.              returned in r0.  */
  1721.           bfd_byte tmpbuf[X_REGISTER_SIZE];
  1722.           LONGEST val = unpack_long (type, valbuf);

  1723.           store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
  1724.           regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
  1725.         }
  1726.       else
  1727.         {
  1728.           /* Integral values greater than one word are stored in
  1729.              consecutive registers starting with r0.  This will always
  1730.              be a multiple of the regiser size.  */
  1731.           int len = TYPE_LENGTH (type);
  1732.           int regno = AARCH64_X0_REGNUM;

  1733.           while (len > 0)
  1734.             {
  1735.               regcache_cooked_write (regs, regno++, valbuf);
  1736.               len -= X_REGISTER_SIZE;
  1737.               valbuf += X_REGISTER_SIZE;
  1738.             }
  1739.         }
  1740.     }
  1741.   else if (is_hfa (type))
  1742.     {
  1743.       int elements = TYPE_NFIELDS (type);
  1744.       struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
  1745.       int len = TYPE_LENGTH (member_type);
  1746.       int i;

  1747.       for (i = 0; i < elements; i++)
  1748.         {
  1749.           int regno = AARCH64_V0_REGNUM + i;
  1750.           bfd_byte tmpbuf[MAX_REGISTER_SIZE];

  1751.           if (aarch64_debug)
  1752.             fprintf_unfiltered (gdb_stdlog,
  1753.                                 "write HFA return value element %d to %s\n",
  1754.                                 i + 1,
  1755.                                 gdbarch_register_name (gdbarch, regno));

  1756.           memcpy (tmpbuf, valbuf, len);
  1757.           regcache_cooked_write (regs, regno, tmpbuf);
  1758.           valbuf += len;
  1759.         }
  1760.     }
  1761.   else
  1762.     {
  1763.       /* For a structure or union the behaviour is as if the value had
  1764.          been stored to word-aligned memory and then loaded into
  1765.          registers with 64-bit load instruction(s).  */
  1766.       int len = TYPE_LENGTH (type);
  1767.       int regno = AARCH64_X0_REGNUM;
  1768.       bfd_byte tmpbuf[X_REGISTER_SIZE];

  1769.       while (len > 0)
  1770.         {
  1771.           memcpy (tmpbuf, valbuf,
  1772.                   len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
  1773.           regcache_cooked_write (regs, regno++, tmpbuf);
  1774.           len -= X_REGISTER_SIZE;
  1775.           valbuf += X_REGISTER_SIZE;
  1776.         }
  1777.     }
  1778. }

  1779. /* Implement the "return_value" gdbarch method.  */

  1780. static enum return_value_convention
  1781. aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
  1782.                       struct type *valtype, struct regcache *regcache,
  1783.                       gdb_byte *readbuf, const gdb_byte *writebuf)
  1784. {
  1785.   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);

  1786.   if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
  1787.       || TYPE_CODE (valtype) == TYPE_CODE_UNION
  1788.       || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
  1789.     {
  1790.       if (aarch64_return_in_memory (gdbarch, valtype))
  1791.         {
  1792.           if (aarch64_debug)
  1793.             fprintf_unfiltered (gdb_stdlog, "return value in memory\n");
  1794.           return RETURN_VALUE_STRUCT_CONVENTION;
  1795.         }
  1796.     }

  1797.   if (writebuf)
  1798.     aarch64_store_return_value (valtype, regcache, writebuf);

  1799.   if (readbuf)
  1800.     aarch64_extract_return_value (valtype, regcache, readbuf);

  1801.   if (aarch64_debug)
  1802.     fprintf_unfiltered (gdb_stdlog, "return value in registers\n");

  1803.   return RETURN_VALUE_REGISTER_CONVENTION;
  1804. }

  1805. /* Implement the "get_longjmp_target" gdbarch method.  */

  1806. static int
  1807. aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
  1808. {
  1809.   CORE_ADDR jb_addr;
  1810.   gdb_byte buf[X_REGISTER_SIZE];
  1811.   struct gdbarch *gdbarch = get_frame_arch (frame);
  1812.   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
  1813.   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);

  1814.   jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);

  1815.   if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
  1816.                           X_REGISTER_SIZE))
  1817.     return 0;

  1818.   *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
  1819.   return 1;
  1820. }


  1821. /* Return the pseudo register name corresponding to register regnum.  */

  1822. static const char *
  1823. aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
  1824. {
  1825.   static const char *const q_name[] =
  1826.     {
  1827.       "q0", "q1", "q2", "q3",
  1828.       "q4", "q5", "q6", "q7",
  1829.       "q8", "q9", "q10", "q11",
  1830.       "q12", "q13", "q14", "q15",
  1831.       "q16", "q17", "q18", "q19",
  1832.       "q20", "q21", "q22", "q23",
  1833.       "q24", "q25", "q26", "q27",
  1834.       "q28", "q29", "q30", "q31",
  1835.     };

  1836.   static const char *const d_name[] =
  1837.     {
  1838.       "d0", "d1", "d2", "d3",
  1839.       "d4", "d5", "d6", "d7",
  1840.       "d8", "d9", "d10", "d11",
  1841.       "d12", "d13", "d14", "d15",
  1842.       "d16", "d17", "d18", "d19",
  1843.       "d20", "d21", "d22", "d23",
  1844.       "d24", "d25", "d26", "d27",
  1845.       "d28", "d29", "d30", "d31",
  1846.     };

  1847.   static const char *const s_name[] =
  1848.     {
  1849.       "s0", "s1", "s2", "s3",
  1850.       "s4", "s5", "s6", "s7",
  1851.       "s8", "s9", "s10", "s11",
  1852.       "s12", "s13", "s14", "s15",
  1853.       "s16", "s17", "s18", "s19",
  1854.       "s20", "s21", "s22", "s23",
  1855.       "s24", "s25", "s26", "s27",
  1856.       "s28", "s29", "s30", "s31",
  1857.     };

  1858.   static const char *const h_name[] =
  1859.     {
  1860.       "h0", "h1", "h2", "h3",
  1861.       "h4", "h5", "h6", "h7",
  1862.       "h8", "h9", "h10", "h11",
  1863.       "h12", "h13", "h14", "h15",
  1864.       "h16", "h17", "h18", "h19",
  1865.       "h20", "h21", "h22", "h23",
  1866.       "h24", "h25", "h26", "h27",
  1867.       "h28", "h29", "h30", "h31",
  1868.     };

  1869.   static const char *const b_name[] =
  1870.     {
  1871.       "b0", "b1", "b2", "b3",
  1872.       "b4", "b5", "b6", "b7",
  1873.       "b8", "b9", "b10", "b11",
  1874.       "b12", "b13", "b14", "b15",
  1875.       "b16", "b17", "b18", "b19",
  1876.       "b20", "b21", "b22", "b23",
  1877.       "b24", "b25", "b26", "b27",
  1878.       "b28", "b29", "b30", "b31",
  1879.     };

  1880.   regnum -= gdbarch_num_regs (gdbarch);

  1881.   if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
  1882.     return q_name[regnum - AARCH64_Q0_REGNUM];

  1883.   if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
  1884.     return d_name[regnum - AARCH64_D0_REGNUM];

  1885.   if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
  1886.     return s_name[regnum - AARCH64_S0_REGNUM];

  1887.   if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
  1888.     return h_name[regnum - AARCH64_H0_REGNUM];

  1889.   if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
  1890.     return b_name[regnum - AARCH64_B0_REGNUM];

  1891.   internal_error (__FILE__, __LINE__,
  1892.                   _("aarch64_pseudo_register_name: bad register number %d"),
  1893.                   regnum);
  1894. }

  1895. /* Implement the "pseudo_register_type" tdesc_arch_data method.  */

  1896. static struct type *
  1897. aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
  1898. {
  1899.   regnum -= gdbarch_num_regs (gdbarch);

  1900.   if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
  1901.     return aarch64_vnq_type (gdbarch);

  1902.   if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
  1903.     return aarch64_vnd_type (gdbarch);

  1904.   if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
  1905.     return aarch64_vns_type (gdbarch);

  1906.   if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
  1907.     return aarch64_vnh_type (gdbarch);

  1908.   if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
  1909.     return aarch64_vnb_type (gdbarch);

  1910.   internal_error (__FILE__, __LINE__,
  1911.                   _("aarch64_pseudo_register_type: bad register number %d"),
  1912.                   regnum);
  1913. }

  1914. /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method.  */

  1915. static int
  1916. aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
  1917.                                     struct reggroup *group)
  1918. {
  1919.   regnum -= gdbarch_num_regs (gdbarch);

  1920.   if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
  1921.     return group == all_reggroup || group == vector_reggroup;
  1922.   else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
  1923.     return (group == all_reggroup || group == vector_reggroup
  1924.             || group == float_reggroup);
  1925.   else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
  1926.     return (group == all_reggroup || group == vector_reggroup
  1927.             || group == float_reggroup);
  1928.   else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
  1929.     return group == all_reggroup || group == vector_reggroup;
  1930.   else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
  1931.     return group == all_reggroup || group == vector_reggroup;

  1932.   return group == all_reggroup;
  1933. }

  1934. /* Implement the "pseudo_register_read_value" gdbarch method.  */

  1935. static struct value *
  1936. aarch64_pseudo_read_value (struct gdbarch *gdbarch,
  1937.                            struct regcache *regcache,
  1938.                            int regnum)
  1939. {
  1940.   gdb_byte reg_buf[MAX_REGISTER_SIZE];
  1941.   struct value *result_value;
  1942.   gdb_byte *buf;

  1943.   result_value = allocate_value (register_type (gdbarch, regnum));
  1944.   VALUE_LVAL (result_value) = lval_register;
  1945.   VALUE_REGNUM (result_value) = regnum;
  1946.   buf = value_contents_raw (result_value);

  1947.   regnum -= gdbarch_num_regs (gdbarch);

  1948.   if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
  1949.     {
  1950.       enum register_status status;
  1951.       unsigned v_regnum;

  1952.       v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
  1953.       status = regcache_raw_read (regcache, v_regnum, reg_buf);
  1954.       if (status != REG_VALID)
  1955.         mark_value_bytes_unavailable (result_value, 0,
  1956.                                       TYPE_LENGTH (value_type (result_value)));
  1957.       else
  1958.         memcpy (buf, reg_buf, Q_REGISTER_SIZE);
  1959.       return result_value;
  1960.     }

  1961.   if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
  1962.     {
  1963.       enum register_status status;
  1964.       unsigned v_regnum;

  1965.       v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
  1966.       status = regcache_raw_read (regcache, v_regnum, reg_buf);
  1967.       if (status != REG_VALID)
  1968.         mark_value_bytes_unavailable (result_value, 0,
  1969.                                       TYPE_LENGTH (value_type (result_value)));
  1970.       else
  1971.         memcpy (buf, reg_buf, D_REGISTER_SIZE);
  1972.       return result_value;
  1973.     }

  1974.   if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
  1975.     {
  1976.       enum register_status status;
  1977.       unsigned v_regnum;

  1978.       v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
  1979.       status = regcache_raw_read (regcache, v_regnum, reg_buf);
  1980.       memcpy (buf, reg_buf, S_REGISTER_SIZE);
  1981.       return result_value;
  1982.     }

  1983.   if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
  1984.     {
  1985.       enum register_status status;
  1986.       unsigned v_regnum;

  1987.       v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
  1988.       status = regcache_raw_read (regcache, v_regnum, reg_buf);
  1989.       if (status != REG_VALID)
  1990.         mark_value_bytes_unavailable (result_value, 0,
  1991.                                       TYPE_LENGTH (value_type (result_value)));
  1992.       else
  1993.         memcpy (buf, reg_buf, H_REGISTER_SIZE);
  1994.       return result_value;
  1995.     }

  1996.   if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
  1997.     {
  1998.       enum register_status status;
  1999.       unsigned v_regnum;

  2000.       v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
  2001.       status = regcache_raw_read (regcache, v_regnum, reg_buf);
  2002.       if (status != REG_VALID)
  2003.         mark_value_bytes_unavailable (result_value, 0,
  2004.                                       TYPE_LENGTH (value_type (result_value)));
  2005.       else
  2006.         memcpy (buf, reg_buf, B_REGISTER_SIZE);
  2007.       return result_value;
  2008.     }

  2009.   gdb_assert_not_reached ("regnum out of bound");
  2010. }

  2011. /* Implement the "pseudo_register_write" gdbarch method.  */

  2012. static void
  2013. aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
  2014.                       int regnum, const gdb_byte *buf)
  2015. {
  2016.   gdb_byte reg_buf[MAX_REGISTER_SIZE];

  2017.   /* Ensure the register buffer is zero, we want gdb writes of the
  2018.      various 'scalar' pseudo registers to behavior like architectural
  2019.      writes, register width bytes are written the remainder are set to
  2020.      zero.  */
  2021.   memset (reg_buf, 0, sizeof (reg_buf));

  2022.   regnum -= gdbarch_num_regs (gdbarch);

  2023.   if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
  2024.     {
  2025.       /* pseudo Q registers */
  2026.       unsigned v_regnum;

  2027.       v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
  2028.       memcpy (reg_buf, buf, Q_REGISTER_SIZE);
  2029.       regcache_raw_write (regcache, v_regnum, reg_buf);
  2030.       return;
  2031.     }

  2032.   if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
  2033.     {
  2034.       /* pseudo D registers */
  2035.       unsigned v_regnum;

  2036.       v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
  2037.       memcpy (reg_buf, buf, D_REGISTER_SIZE);
  2038.       regcache_raw_write (regcache, v_regnum, reg_buf);
  2039.       return;
  2040.     }

  2041.   if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
  2042.     {
  2043.       unsigned v_regnum;

  2044.       v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
  2045.       memcpy (reg_buf, buf, S_REGISTER_SIZE);
  2046.       regcache_raw_write (regcache, v_regnum, reg_buf);
  2047.       return;
  2048.     }

  2049.   if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
  2050.     {
  2051.       /* pseudo H registers */
  2052.       unsigned v_regnum;

  2053.       v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
  2054.       memcpy (reg_buf, buf, H_REGISTER_SIZE);
  2055.       regcache_raw_write (regcache, v_regnum, reg_buf);
  2056.       return;
  2057.     }

  2058.   if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
  2059.     {
  2060.       /* pseudo B registers */
  2061.       unsigned v_regnum;

  2062.       v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
  2063.       memcpy (reg_buf, buf, B_REGISTER_SIZE);
  2064.       regcache_raw_write (regcache, v_regnum, reg_buf);
  2065.       return;
  2066.     }

  2067.   gdb_assert_not_reached ("regnum out of bound");
  2068. }

  2069. /* Callback function for user_reg_add.  */

  2070. static struct value *
  2071. value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
  2072. {
  2073.   const int *reg_p = baton;

  2074.   return value_of_register (*reg_p, frame);
  2075. }


  2076. /* Implement the "software_single_step" gdbarch method, needed to
  2077.    single step through atomic sequences on AArch64.  */

  2078. static int
  2079. aarch64_software_single_step (struct frame_info *frame)
  2080. {
  2081.   struct gdbarch *gdbarch = get_frame_arch (frame);
  2082.   struct address_space *aspace = get_frame_address_space (frame);
  2083.   enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
  2084.   const int insn_size = 4;
  2085.   const int atomic_sequence_length = 16; /* Instruction sequence length.  */
  2086.   CORE_ADDR pc = get_frame_pc (frame);
  2087.   CORE_ADDR breaks[2] = { -1, -1 };
  2088.   CORE_ADDR loc = pc;
  2089.   CORE_ADDR closing_insn = 0;
  2090.   uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
  2091.                                                 byte_order_for_code);
  2092.   int index;
  2093.   int insn_count;
  2094.   int bc_insn_count = 0; /* Conditional branch instruction count.  */
  2095.   int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed).  */

  2096.   /* Look for a Load Exclusive instruction which begins the sequence.  */
  2097.   if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
  2098.     return 0;

  2099.   for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
  2100.     {
  2101.       int32_t offset;
  2102.       unsigned cond;

  2103.       loc += insn_size;
  2104.       insn = read_memory_unsigned_integer (loc, insn_size,
  2105.                                            byte_order_for_code);

  2106.       /* Check if the instruction is a conditional branch.  */
  2107.       if (decode_bcond (loc, insn, &cond, &offset))
  2108.         {
  2109.           if (bc_insn_count >= 1)
  2110.             return 0;

  2111.           /* It is, so we'll try to set a breakpoint at the destination.  */
  2112.           breaks[1] = loc + offset;

  2113.           bc_insn_count++;
  2114.           last_breakpoint++;
  2115.         }

  2116.       /* Look for the Store Exclusive which closes the atomic sequence.  */
  2117.       if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
  2118.         {
  2119.           closing_insn = loc;
  2120.           break;
  2121.         }
  2122.     }

  2123.   /* We didn't find a closing Store Exclusive instruction, fall back.  */
  2124.   if (!closing_insn)
  2125.     return 0;

  2126.   /* Insert breakpoint after the end of the atomic sequence.  */
  2127.   breaks[0] = loc + insn_size;

  2128.   /* Check for duplicated breakpoints, and also check that the second
  2129.      breakpoint is not within the atomic sequence.  */
  2130.   if (last_breakpoint
  2131.       && (breaks[1] == breaks[0]
  2132.           || (breaks[1] >= pc && breaks[1] <= closing_insn)))
  2133.     last_breakpoint = 0;

  2134.   /* Insert the breakpoint at the end of the sequence, and one at the
  2135.      destination of the conditional branch, if it exists.  */
  2136.   for (index = 0; index <= last_breakpoint; index++)
  2137.     insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);

  2138.   return 1;
  2139. }

  2140. /* Initialize the current architecture based on INFO.  If possible,
  2141.    re-use an architecture from ARCHES, which is a list of
  2142.    architectures already created during this debugging session.

  2143.    Called e.g. at program startup, when reading a core file, and when
  2144.    reading a binary file.  */

  2145. static struct gdbarch *
  2146. aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
  2147. {
  2148.   struct gdbarch_tdep *tdep;
  2149.   struct gdbarch *gdbarch;
  2150.   struct gdbarch_list *best_arch;
  2151.   struct tdesc_arch_data *tdesc_data = NULL;
  2152.   const struct target_desc *tdesc = info.target_desc;
  2153.   int i;
  2154.   int have_fpa_registers = 1;
  2155.   int valid_p = 1;
  2156.   const struct tdesc_feature *feature;
  2157.   int num_regs = 0;
  2158.   int num_pseudo_regs = 0;

  2159.   /* Ensure we always have a target descriptor.  */
  2160.   if (!tdesc_has_registers (tdesc))
  2161.     tdesc = tdesc_aarch64;

  2162.   gdb_assert (tdesc);

  2163.   feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");

  2164.   if (feature == NULL)
  2165.     return NULL;

  2166.   tdesc_data = tdesc_data_alloc ();

  2167.   /* Validate the descriptor provides the mandatory core R registers
  2168.      and allocate their numbers.  */
  2169.   for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
  2170.     valid_p &=
  2171.       tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
  2172.                                aarch64_r_register_names[i]);

  2173.   num_regs = AARCH64_X0_REGNUM + i;

  2174.   /* Look for the V registers.  */
  2175.   feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
  2176.   if (feature)
  2177.     {
  2178.       /* Validate the descriptor provides the mandatory V registers
  2179.          and allocate their numbers.  */
  2180.       for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
  2181.         valid_p &=
  2182.           tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
  2183.                                    aarch64_v_register_names[i]);

  2184.       num_regs = AARCH64_V0_REGNUM + i;

  2185.       num_pseudo_regs += 32;        /* add the Qn scalar register pseudos */
  2186.       num_pseudo_regs += 32;        /* add the Dn scalar register pseudos */
  2187.       num_pseudo_regs += 32;        /* add the Sn scalar register pseudos */
  2188.       num_pseudo_regs += 32;        /* add the Hn scalar register pseudos */
  2189.       num_pseudo_regs += 32;        /* add the Bn scalar register pseudos */
  2190.     }

  2191.   if (!valid_p)
  2192.     {
  2193.       tdesc_data_cleanup (tdesc_data);
  2194.       return NULL;
  2195.     }

  2196.   /* AArch64 code is always little-endian.  */
  2197.   info.byte_order_for_code = BFD_ENDIAN_LITTLE;

  2198.   /* If there is already a candidate, use it.  */
  2199.   for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
  2200.        best_arch != NULL;
  2201.        best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
  2202.     {
  2203.       /* Found a match.  */
  2204.       break;
  2205.     }

  2206.   if (best_arch != NULL)
  2207.     {
  2208.       if (tdesc_data != NULL)
  2209.         tdesc_data_cleanup (tdesc_data);
  2210.       return best_arch->gdbarch;
  2211.     }

  2212.   tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
  2213.   gdbarch = gdbarch_alloc (&info, tdep);

  2214.   /* This should be low enough for everything.  */
  2215.   tdep->lowest_pc = 0x20;
  2216.   tdep->jb_pc = -1;                /* Longjump support not enabled by default.  */
  2217.   tdep->jb_elt_size = 8;

  2218.   set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
  2219.   set_gdbarch_frame_align (gdbarch, aarch64_frame_align);

  2220.   /* Frame handling.  */
  2221.   set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
  2222.   set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
  2223.   set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);

  2224.   /* Advance PC across function entry code.  */
  2225.   set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);

  2226.   /* The stack grows downward.  */
  2227.   set_gdbarch_inner_than (gdbarch, core_addr_lessthan);

  2228.   /* Breakpoint manipulation.  */
  2229.   set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
  2230.   set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
  2231.   set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
  2232.   set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);

  2233.   /* Information about registers, etc.  */
  2234.   set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
  2235.   set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
  2236.   set_gdbarch_num_regs (gdbarch, num_regs);

  2237.   set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
  2238.   set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
  2239.   set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
  2240.   set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
  2241.   set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
  2242.   set_tdesc_pseudo_register_reggroup_p (gdbarch,
  2243.                                         aarch64_pseudo_register_reggroup_p);

  2244.   /* ABI */
  2245.   set_gdbarch_short_bit (gdbarch, 16);
  2246.   set_gdbarch_int_bit (gdbarch, 32);
  2247.   set_gdbarch_float_bit (gdbarch, 32);
  2248.   set_gdbarch_double_bit (gdbarch, 64);
  2249.   set_gdbarch_long_double_bit (gdbarch, 128);
  2250.   set_gdbarch_long_bit (gdbarch, 64);
  2251.   set_gdbarch_long_long_bit (gdbarch, 64);
  2252.   set_gdbarch_ptr_bit (gdbarch, 64);
  2253.   set_gdbarch_char_signed (gdbarch, 0);
  2254.   set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
  2255.   set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
  2256.   set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);

  2257.   /* Internal <-> external register number maps.  */
  2258.   set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);

  2259.   /* Returning results.  */
  2260.   set_gdbarch_return_value (gdbarch, aarch64_return_value);

  2261.   /* Disassembly.  */
  2262.   set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);

  2263.   /* Virtual tables.  */
  2264.   set_gdbarch_vbit_in_delta (gdbarch, 1);

  2265.   /* Hook in the ABI-specific overrides, if they have been registered.  */
  2266.   info.target_desc = tdesc;
  2267.   info.tdep_info = (void *) tdesc_data;
  2268.   gdbarch_init_osabi (info, gdbarch);

  2269.   dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);

  2270.   /* Add some default predicates.  */
  2271.   frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
  2272.   dwarf2_append_unwinders (gdbarch);
  2273.   frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);

  2274.   frame_base_set_default (gdbarch, &aarch64_normal_base);

  2275.   /* Now we have tuned the configuration, set a few final things,
  2276.      based on what the OS ABI has told us.  */

  2277.   if (tdep->jb_pc >= 0)
  2278.     set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);

  2279.   tdesc_use_registers (gdbarch, tdesc, tdesc_data);

  2280.   /* Add standard register aliases.  */
  2281.   for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
  2282.     user_reg_add (gdbarch, aarch64_register_aliases[i].name,
  2283.                   value_of_aarch64_user_reg,
  2284.                   &aarch64_register_aliases[i].regnum);

  2285.   return gdbarch;
  2286. }

  2287. static void
  2288. aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
  2289. {
  2290.   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);

  2291.   if (tdep == NULL)
  2292.     return;

  2293.   fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
  2294.                       paddress (gdbarch, tdep->lowest_pc));
  2295. }

  2296. /* Suppress warning from -Wmissing-prototypes.  */
  2297. extern initialize_file_ftype _initialize_aarch64_tdep;

  2298. void
  2299. _initialize_aarch64_tdep (void)
  2300. {
  2301.   gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
  2302.                     aarch64_dump_tdep);

  2303.   initialize_tdesc_aarch64 ();

  2304.   /* Debug this file's internals.  */
  2305.   add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
  2306. Set AArch64 debugging."), _("\
  2307. Show AArch64 debugging."), _("\
  2308. When on, AArch64 specific debugging is enabled."),
  2309.                             NULL,
  2310.                             show_aarch64_debug,
  2311.                             &setdebuglist, &showdebuglist);
  2312. }