gdb/ia64-hpux-nat.c - gdb

Global variables defined

Functions defined

Source code

  1. /* Copyright (C) 2010-2015 Free Software Foundation, Inc.

  2.    This file is part of GDB.

  3.    This program is free software; you can redistribute it and/or modify
  4.    it under the terms of the GNU General Public License as published by
  5.    the Free Software Foundation; either version 3 of the License, or
  6.    (at your option) any later version.

  7.    This program is distributed in the hope that it will be useful,
  8.    but WITHOUT ANY WARRANTY; without even the implied warranty of
  9.    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10.    GNU General Public License for more details.

  11.    You should have received a copy of the GNU General Public License
  12.    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */

  13. #include "defs.h"
  14. #include "ia64-tdep.h"
  15. #include "inferior.h"
  16. #include "inf-ttrace.h"
  17. #include "regcache.h"
  18. #include "solib-ia64-hpux.h"

  19. #include <ia64/sys/uregs.h>
  20. #include <sys/ttrace.h>

  21. /* The offsets used with ttrace to read the value of the raw registers.  */

  22. static int u_offsets[] =
  23. { /* Static General Registers.  */
  24.   -1,     __r1,   __r2,   __r3,   __r4,   __r5,   __r6,   __r7,
  25.   __r8,   __r9,   __r10,  __r11,  __r12,  __r13,  __r14,  __r15,
  26.   __r16,  __r17,  __r18,  __r19,  __r20,  __r21,  __r22,  __r23,
  27.   __r24,  __r25,  __r26,  __r27,  __r28,  __r29,  __r30,  __r31,
  28.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  29.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  30.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  31.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  32.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  33.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  34.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  35.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  36.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  37.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  38.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  39.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,

  40.   /* Static Floating-Point Registers.  */
  41.     -1,     -1,   __f2,   __f3,   __f4,   __f5,   __f6,   __f7,
  42.   __f8,   __f9,   __f10,  __f11,  __f12,  __f13,  __f14,  __f15,
  43.   __f16,  __f17,  __f18,  __f19,  __f20,  __f21,  __f22,  __f23,
  44.   __f24,  __f25,  __f26,  __f27,  __f28,  __f29,  __f30,  __f31,
  45.   __f32,  __f33,  __f34,  __f35,  __f36,  __f37,  __f38,  __f39,
  46.   __f40,  __f41,  __f42,  __f43,  __f44,  __f45,  __f46,  __f47,
  47.   __f48,  __f49,  __f50,  __f51,  __f52,  __f53,  __f54,  __f55,
  48.   __f56,  __f57,  __f58,  __f59,  __f60,  __f61,  __f62,  __f63,
  49.   __f64,  __f65,  __f66,  __f67,  __f68,  __f69,  __f70,  __f71,
  50.   __f72,  __f73,  __f74,  __f75,  __f76,  __f77,  __f78,  __f79,
  51.   __f80,  __f81,  __f82,  __f83,  __f84,  __f85,  __f86,  __f87,
  52.   __f88,  __f89,  __f90,  __f91,  __f92,  __f93,  __f94,  __f95,
  53.   __f96,  __f97,  __f98,  __f99,  __f100, __f101, __f102, __f103,
  54.   __f104, __f105, __f106, __f107, __f108, __f109, __f110, __f111,
  55.   __f112, __f113, __f114, __f115, __f116, __f117, __f118, __f119,
  56.   __f120, __f121, __f122, __f123, __f124, __f125, __f126, __f127,

  57.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  58.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  59.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  60.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  61.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  62.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  63.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,
  64.   -1,     -1,     -1,     -1,     -1,     -1,     -1,     -1,

  65.   /* Branch Registers.  */
  66.   __b0,   __b1,   __b2,   __b3,   __b4,   __b5,   __b6,   __b7,

  67.   /* Virtual frame pointer and virtual return address pointer.  */
  68.   -1, -1,

  69.   /* Other registers.  */
  70.   __pr, __ip, __cr_ipsr, __cfm,

  71.   /* Kernel registers.  */
  72.   -1,   -1,   -1,   -1,
  73.   -1,   -1,   -1,   -1,

  74.   -1, -1, -1, -1, -1, -1, -1, -1,

  75.   /* Some application registers.  */
  76.   __ar_rsc, __ar_bsp, __ar_bspstore, __ar_rnat,

  77.   -1,
  78.   -1/* Not available: FCR, IA32 floating control register.  */
  79.   -1, -1,

  80.   -1,         /* Not available: EFLAG.  */
  81.   -1,         /* Not available: CSD.  */
  82.   -1,         /* Not available: SSD.  */
  83.   -1,         /* Not available: CFLG.  */
  84.   -1,         /* Not available: FSR.  */
  85.   -1,         /* Not available: FIR.  */
  86.   -1,         /* Not available: FDR.  */
  87.   -1,
  88.   __ar_ccv, -1, -1, -1, __ar_unat, -1, -1, -1,
  89.   __ar_fpsr, -1, -1, -1,
  90.   -1,         /* Not available: ITC.  */
  91.   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  92.   -1, -1, -1, -1, -1, -1, -1, -1, -1,
  93.   __ar_pfs, __ar_lc, __ar_ec,
  94.   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  95.   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  96.   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  97.   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  98.   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  99.   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  100.   -1
  101.   /* All following registers, starting with nat0, are handled as
  102.      pseudo registers, and hence are handled separately.  */
  103. };

  104. /* Some register have a fixed value and can not be modified.
  105.    Store their value in static constant buffers that can be used
  106.    later to fill the register cache.  */
  107. static const char r0_value[8] = {0x00, 0x00, 0x00, 0x00,
  108.                                  0x00, 0x00, 0x00, 0x00};
  109. static const char f0_value[16] = {0x00, 0x00, 0x00, 0x00,
  110.                                   0x00, 0x00, 0x00, 0x00,
  111.                                   0x00, 0x00, 0x00, 0x00,
  112.                                   0x00, 0x00, 0x00, 0x00};
  113. static const char f1_value[16] = {0x00, 0x00, 0x00, 0x00,
  114.                                   0x00, 0x00, 0xff, 0xff,
  115.                                   0x80, 0x00, 0x00, 0x00,
  116.                                   0x00, 0x00, 0x00, 0x00};

  117. /* The "to_wait" routine from the "inf-ttrace" layer.  */

  118. static ptid_t (*super_to_wait) (struct target_ops *, ptid_t,
  119.                                 struct target_waitstatus *, int);

  120. /* The "to_wait" target_ops routine routine for ia64-hpux.  */

  121. static ptid_t
  122. ia64_hpux_wait (struct target_ops *ops, ptid_t ptid,
  123.                 struct target_waitstatus *ourstatus, int options)
  124. {
  125.   ptid_t new_ptid;

  126.   new_ptid = super_to_wait (ops, ptid, ourstatus, options);

  127.   /* If this is a DLD event (hard-coded breakpoint instruction
  128.      that was activated by the solib-ia64-hpux module), we need to
  129.      process it, and then resume the execution as if the event did
  130.      not happen.  */
  131.   if (ourstatus->kind == TARGET_WAITKIND_STOPPED
  132.       && ourstatus->value.sig == GDB_SIGNAL_TRAP
  133.       && ia64_hpux_at_dld_breakpoint_p (new_ptid))
  134.     {
  135.       ia64_hpux_handle_dld_breakpoint (new_ptid);

  136.       target_resume (new_ptid, 0, GDB_SIGNAL_0);
  137.       ourstatus->kind = TARGET_WAITKIND_IGNORE;
  138.     }

  139.   return new_ptid;
  140. }

  141. /* Fetch the RNAT register and supply it to the REGCACHE.  */

  142. static void
  143. ia64_hpux_fetch_rnat_register (struct regcache *regcache)
  144. {
  145.   CORE_ADDR addr;
  146.   gdb_byte buf[8];
  147.   int status;

  148.   /* The value of RNAT is stored at bsp|0x1f8, and must be read using
  149.      TT_LWP_RDRSEBS.  */

  150.   regcache_raw_read_unsigned (regcache, IA64_BSP_REGNUM, &addr);
  151.   addr |= 0x1f8;

  152.   status = ttrace (TT_LWP_RDRSEBS, ptid_get_pid (inferior_ptid),
  153.                    ptid_get_lwp (inferior_ptid), addr, sizeof (buf),
  154.                    (uintptr_t) buf);
  155.   if (status < 0)
  156.     error (_("failed to read RNAT register at %s"),
  157.            paddress (get_regcache_arch(regcache), addr));

  158.   regcache_raw_supply (regcache, IA64_RNAT_REGNUM, buf);
  159. }

  160. /* Read the value of the register saved at OFFSET in the save_state_t
  161.    structure, and store its value in BUF.  LEN is the size of the register
  162.    to be read.  */

  163. static int
  164. ia64_hpux_read_register_from_save_state_t (int offset, gdb_byte *buf, int len)
  165. {
  166.   int status;

  167. status = ttrace (TT_LWP_RUREGS, ptid_get_pid (inferior_ptid),
  168.                   ptid_get_lwp (inferior_ptid), offset, len, (uintptr_t) buf);

  169.   return status;
  170. }

  171. /* Fetch register REGNUM from the inferior.  */

  172. static void
  173. ia64_hpux_fetch_register (struct regcache *regcache, int regnum)
  174. {
  175.   struct gdbarch *gdbarch = get_regcache_arch (regcache);
  176.   int offset, len, status;
  177.   gdb_byte *buf;

  178.   if (regnum == IA64_GR0_REGNUM)
  179.     {
  180.       /* r0 is always 0.  */
  181.       regcache_raw_supply (regcache, regnum, r0_value);
  182.       return;
  183.     }

  184.   if (regnum == IA64_FR0_REGNUM)
  185.     {
  186.       /* f0 is always 0.0.  */
  187.       regcache_raw_supply (regcache, regnum, f0_value);
  188.       return;
  189.     }

  190.   if (regnum == IA64_FR1_REGNUM)
  191.     {
  192.       /* f1 is always 1.0.  */
  193.       regcache_raw_supply (regcache, regnum, f1_value);
  194.       return;
  195.     }

  196.   if (regnum == IA64_RNAT_REGNUM)
  197.     {
  198.       ia64_hpux_fetch_rnat_register (regcache);
  199.       return;
  200.     }

  201.   /* Get the register location. If the register can not be fetched,
  202.      then return now.  */
  203.   offset = u_offsets[regnum];
  204.   if (offset == -1)
  205.     return;

  206.   len = register_size (gdbarch, regnum);
  207.   buf = alloca (len * sizeof (gdb_byte));
  208.   status = ia64_hpux_read_register_from_save_state_t (offset, buf, len);
  209.   if (status < 0)
  210.     warning (_("Failed to read register value for %s."),
  211.              gdbarch_register_name (gdbarch, regnum));

  212.   regcache_raw_supply (regcache, regnum, buf);
  213. }

  214. /* The "to_fetch_registers" target_ops routine for ia64-hpux.  */

  215. static void
  216. ia64_hpux_fetch_registers (struct target_ops *ops,
  217.                            struct regcache *regcache, int regnum)
  218. {
  219.   if (regnum == -1)
  220.     for (regnum = 0;
  221.          regnum < gdbarch_num_regs (get_regcache_arch (regcache));
  222.          regnum++)
  223.       ia64_hpux_fetch_register (regcache, regnum);
  224.   else
  225.     ia64_hpux_fetch_register (regcache, regnum);
  226. }

  227. /* Save register REGNUM (stored in BUF) in the save_state_t structure.
  228.    LEN is the size of the register in bytes.

  229.    Return the value from the corresponding ttrace call (a negative value
  230.    means that the operation failed).  */

  231. static int
  232. ia64_hpux_write_register_to_saved_state_t (int offset, gdb_byte *buf, int len)
  233. {
  234.   return ttrace (TT_LWP_WUREGS, ptid_get_pid (inferior_ptid),
  235.                  ptid_get_lwp (inferior_ptid), offset, len, (uintptr_t) buf);
  236. }

  237. /* Store register REGNUM into the inferior.  */

  238. static void
  239. ia64_hpux_store_register (const struct regcache *regcache, int regnum)
  240. {
  241.   struct gdbarch *gdbarch = get_regcache_arch (regcache);
  242.   int offset = u_offsets[regnum];
  243.   gdb_byte *buf;
  244.   int len, status;

  245.   /* If the register can not be stored, then return now.  */
  246.   if (offset == -1)
  247.     return;

  248.   /* I don't know how to store that register for now.  So just ignore any
  249.      request to store it, to avoid an internal error.  */
  250.   if (regnum == IA64_PSR_REGNUM)
  251.     return;

  252.   len = register_size (gdbarch, regnum);
  253.   buf = alloca (len * sizeof (gdb_byte));
  254.   regcache_raw_collect (regcache, regnum, buf);

  255.   status = ia64_hpux_write_register_to_saved_state_t (offset, buf, len);

  256.   if (status < 0)
  257.     error (_("failed to write register value for %s."),
  258.            gdbarch_register_name (gdbarch, regnum));
  259. }

  260. /* The "to_store_registers" target_ops routine for ia64-hpux.  */

  261. static void
  262. ia64_hpux_store_registers (struct target_ops *ops,
  263.                            struct regcache *regcache, int regnum)
  264. {
  265.   if (regnum == -1)
  266.     for (regnum = 0;
  267.          regnum < gdbarch_num_regs (get_regcache_arch (regcache));
  268.          regnum++)
  269.       ia64_hpux_store_register (regcache, regnum);
  270.   else
  271.     ia64_hpux_store_register (regcache, regnum);
  272. }

  273. /* The "xfer_partial" routine from the "inf-ttrace" target layer.
  274.    Ideally, we would like to use this routine for all transfer
  275.    requests, but this platforms has a lot of special cases that
  276.    need to be handled manually.  So we override this routine and
  277.    delegate back if we detect that we are not in a special case.  */

  278. static target_xfer_partial_ftype *super_xfer_partial;

  279. /* The "xfer_partial" routine for a memory region that is completely
  280.    outside of the backing-store region.  */

  281. static enum target_xfer_status
  282. ia64_hpux_xfer_memory_no_bs (struct target_ops *ops, const char *annex,
  283.                              gdb_byte *readbuf, const gdb_byte *writebuf,
  284.                              CORE_ADDR addr, LONGEST len,
  285.                              ULONGEST *xfered_len)
  286. {
  287.   /* Memory writes need to be aligned on 16byte boundaries, at least
  288.      when writing in the text section.  On the other hand, the size
  289.      of the buffer does not need to be a multiple of 16bytes.

  290.      No such restriction when performing memory reads.  */

  291.   if (writebuf && addr & 0x0f)
  292.     {
  293.       const CORE_ADDR aligned_addr = addr & ~0x0f;
  294.       const int aligned_len = len + (addr - aligned_addr);
  295.       gdb_byte *aligned_buf = alloca (aligned_len * sizeof (gdb_byte));
  296.       LONGEST status;

  297.       /* Read the portion of memory between ALIGNED_ADDR and ADDR, so
  298.          that we can write it back during our aligned memory write.  */
  299.       status = super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex,
  300.                                    aligned_buf /* read */,
  301.                                    NULL /* write */,
  302.                                    aligned_addr, addr - aligned_addr);
  303.       if (status <= 0)
  304.         return TARGET_XFER_EOF;
  305.       memcpy (aligned_buf + (addr - aligned_addr), writebuf, len);

  306.       return super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex,
  307.                                  NULL /* read */, aligned_buf /* write */,
  308.                                  aligned_addr, aligned_len, xfered_len);
  309.     }
  310.   else
  311.     /* Memory read or properly aligned memory write.  */
  312.     return super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex, readbuf,
  313.                                writebuf, addr, len, xfered_len);
  314. }

  315. /* Read LEN bytes at ADDR from memory, and store it in BUF.  This memory
  316.    region is assumed to be inside the backing store.

  317.    Return zero if the operation failed.  */

  318. static int
  319. ia64_hpux_read_memory_bs (gdb_byte *buf, CORE_ADDR addr, int len)
  320. {
  321.   gdb_byte tmp_buf[8];
  322.   CORE_ADDR tmp_addr = addr & ~0x7;

  323.   while (tmp_addr < addr + len)
  324.     {
  325.       int status;
  326.       int skip_lo = 0;
  327.       int skip_hi = 0;

  328.       status = ttrace (TT_LWP_RDRSEBS, ptid_get_pid (inferior_ptid),
  329.                        ptid_get_lwp (inferior_ptid), tmp_addr,
  330.                        sizeof (tmp_buf), (uintptr_t) tmp_buf);
  331.       if (status < 0)
  332.         return 0;

  333.       if (tmp_addr < addr)
  334.         skip_lo = addr - tmp_addr;

  335.       if (tmp_addr + sizeof (tmp_buf) > addr + len)
  336.         skip_hi = (tmp_addr + sizeof (tmp_buf)) - (addr + len);

  337.       memcpy (buf + (tmp_addr + skip_lo - addr),
  338.               tmp_buf + skip_lo,
  339.               sizeof (tmp_buf) - skip_lo - skip_hi);

  340.       tmp_addr += sizeof (tmp_buf);
  341.     }

  342.   return 1;
  343. }

  344. /* Write LEN bytes from BUF in memory at ADDR.  This memory region is assumed
  345.    to be inside the backing store.

  346.    Return zero if the operation failed.  */

  347. static int
  348. ia64_hpux_write_memory_bs (const gdb_byte *buf, CORE_ADDR addr, int len)
  349. {
  350.   gdb_byte tmp_buf[8];
  351.   CORE_ADDR tmp_addr = addr & ~0x7;

  352.   while (tmp_addr < addr + len)
  353.     {
  354.       int status;
  355.       int lo = 0;
  356.       int hi = 7;

  357.       if (tmp_addr < addr || tmp_addr + sizeof (tmp_buf) > addr + len)
  358.         /* Part of the 8byte region pointed by tmp_addr needs to be preserved.
  359.            So read it in before we copy the data that needs to be changed.  */
  360.         if (!ia64_hpux_read_memory_bs (tmp_buf, tmp_addr, sizeof (tmp_buf)))
  361.           return 0;

  362.       if (tmp_addr < addr)
  363.         lo = addr - tmp_addr;

  364.       if (tmp_addr + sizeof (tmp_buf) > addr + len)
  365.         hi = addr - tmp_addr + len - 1;

  366.       memcpy (tmp_buf + lo, buf + tmp_addr - addr  + lo, hi - lo + 1);

  367.       status = ttrace (TT_LWP_WRRSEBS, ptid_get_pid (inferior_ptid),
  368.                        ptid_get_lwp (inferior_ptid), tmp_addr,
  369.                        sizeof (tmp_buf), (uintptr_t) tmp_buf);
  370.       if (status < 0)
  371.         return 0;

  372.       tmp_addr += sizeof (tmp_buf);
  373.     }

  374.   return 1;
  375. }

  376. /* The "xfer_partial" routine for a memory region that is completely
  377.    inside of the backing-store region.  */

  378. static LONGEST
  379. ia64_hpux_xfer_memory_bs (struct target_ops *ops, const char *annex,
  380.                           gdb_byte *readbuf, const gdb_byte *writebuf,
  381.                           CORE_ADDR addr, LONGEST len)
  382. {
  383.   int success;

  384.   if (readbuf)
  385.     success = ia64_hpux_read_memory_bs (readbuf, addr, len);
  386.   else
  387.     success = ia64_hpux_write_memory_bs (writebuf, addr, len);

  388.   if (success)
  389.     return len;
  390.   else
  391.     return 0;
  392. }

  393. /* Get a register value as a unsigned value directly from the system,
  394.    instead of going through the regcache.

  395.    This function is meant to be used when inferior_ptid is not
  396.    a thread/process known to GDB.  */

  397. static ULONGEST
  398. ia64_hpux_get_register_from_save_state_t (int regnum, int reg_size)
  399. {
  400.   gdb_byte *buf = alloca (reg_size);
  401.   int offset = u_offsets[regnum];
  402.   int status;

  403.   /* The register is assumed to be available for fetching.  */
  404.   gdb_assert (offset != -1);

  405.   status = ia64_hpux_read_register_from_save_state_t (offset, buf, reg_size);
  406.   if (status < 0)
  407.     {
  408.       /* This really should not happen.  If it does, emit a warning
  409.          and pretend the register value is zero.  Not exactly the best
  410.          error recovery mechanism, but better than nothing.  We will
  411.          try to do better if we can demonstrate that this can happen
  412.          under normal circumstances.  */
  413.       warning (_("Failed to read value of register number %d."), regnum);
  414.       return 0;
  415.     }

  416.   return extract_unsigned_integer (buf, reg_size, BFD_ENDIAN_BIG);
  417. }

  418. /* The "xfer_partial" target_ops routine for ia64-hpux, in the case
  419.    where the requested object is TARGET_OBJECT_MEMORY.  */

  420. static enum target_xfer_status
  421. ia64_hpux_xfer_memory (struct target_ops *ops, const char *annex,
  422.                        gdb_byte *readbuf, const gdb_byte *writebuf,
  423.                        CORE_ADDR addr, ULONGEST len, ULONGEST *xfered_len)
  424. {
  425.   CORE_ADDR bsp, bspstore;
  426.   CORE_ADDR start_addr, short_len;
  427.   int status = 0;

  428.   /* The back-store region cannot be read/written by the standard memory
  429.      read/write operations.  So we handle the memory region piecemeal:
  430.        (1) and (2) The regions before and after the backing-store region,
  431.            which can be treated as normal memory;
  432.        (3) The region inside the backing-store, which needs to be
  433.            read/written specially.  */

  434.   if (in_inferior_list (ptid_get_pid (inferior_ptid)))
  435.     {
  436.       struct regcache *regcache = get_current_regcache ();

  437.       regcache_raw_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
  438.       regcache_raw_read_unsigned (regcache, IA64_BSPSTORE_REGNUM, &bspstore);
  439.     }
  440.   else
  441.     {
  442.       /* This is probably a child of our inferior created by a fork.
  443.          Because this process has not been added to our inferior list
  444.          (we are probably in the process of handling that child
  445.          process), we do not have a regcache to read the registers
  446.          from.  So get those values directly from the kernel.  */
  447.       bsp = ia64_hpux_get_register_from_save_state_t (IA64_BSP_REGNUM, 8);
  448.       bspstore =
  449.         ia64_hpux_get_register_from_save_state_t (IA64_BSPSTORE_REGNUM, 8);
  450.     }

  451.   /* 1. Memory region before BSPSTORE.  */

  452.   if (addr < bspstore)
  453.     {
  454.       short_len = len;
  455.       if (addr + len > bspstore)
  456.         short_len = bspstore - addr;

  457.       status = ia64_hpux_xfer_memory_no_bs (ops, annex, readbuf, writebuf,
  458.                                             addr, short_len);
  459.       if (status <= 0)
  460.         return TARGET_XFER_EOF;
  461.     }

  462.   /* 2. Memory region after BSP.  */

  463.   if (addr + len > bsp)
  464.     {
  465.       start_addr = addr;
  466.       if (start_addr < bsp)
  467.         start_addr = bsp;
  468.       short_len = len + addr - start_addr;

  469.       status = ia64_hpux_xfer_memory_no_bs
  470.                 (ops, annex,
  471.                  readbuf ? readbuf + (start_addr - addr) : NULL,
  472.                  writebuf ? writebuf + (start_addr - addr) : NULL,
  473.                  start_addr, short_len);
  474.       if (status <= 0)
  475.         return TARGET_XFER_EOF;
  476.     }

  477.   /* 3. Memory region between BSPSTORE and BSP.  */

  478.   if (bspstore != bsp
  479.       && ((addr < bspstore && addr + len > bspstore)
  480.           || (addr + len <= bsp && addr + len > bsp)))
  481.     {
  482.       start_addr = addr;
  483.       if (addr < bspstore)
  484.         start_addr = bspstore;
  485.       short_len = len + addr - start_addr;

  486.       if (start_addr + short_len > bsp)
  487.         short_len = bsp - start_addr;

  488.       gdb_assert (short_len > 0);

  489.       status = ia64_hpux_xfer_memory_bs
  490.                  (ops, annex,
  491.                   readbuf ? readbuf + (start_addr - addr) : NULL,
  492.                   writebuf ? writebuf + (start_addr - addr) : NULL,
  493.                   start_addr, short_len);
  494.       if (status < 0)
  495.         return TARGET_XFER_EOF;
  496.     }

  497.   *xfered_len = len;
  498.   return TARGET_XFER_OK;
  499. }

  500. /* Handle the transfer of TARGET_OBJECT_HPUX_UREGS objects on ia64-hpux.
  501.    ANNEX is currently ignored.

  502.    The current implementation does not support write transfers (because
  503.    we do not currently do not need these transfers), and will raise
  504.    a failed assertion if WRITEBUF is not NULL.  */

  505. static enum target_xfer_status
  506. ia64_hpux_xfer_uregs (struct target_ops *ops, const char *annex,
  507.                       gdb_byte *readbuf, const gdb_byte *writebuf,
  508.                       ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
  509. {
  510.   int status;

  511.   gdb_assert (writebuf == NULL);

  512.   status = ia64_hpux_read_register_from_save_state_t (offset, readbuf, len);
  513.   if (status < 0)
  514.     return TARGET_XFER_E_IO;

  515.   *xfered_len = (ULONGEST) len;
  516.   return TARGET_XFER_OK;
  517. }

  518. /* Handle the transfer of TARGET_OBJECT_HPUX_SOLIB_GOT objects on ia64-hpux.

  519.    The current implementation does not support write transfers (because
  520.    we do not currently do not need these transfers), and will raise
  521.    a failed assertion if WRITEBUF is not NULL.  */

  522. static enum target_xfer_status
  523. ia64_hpux_xfer_solib_got (struct target_ops *ops, const char *annex,
  524.                           gdb_byte *readbuf, const gdb_byte *writebuf,
  525.                           ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
  526. {
  527.   CORE_ADDR fun_addr;
  528.   /* The linkage pointer.  We use a uint64_t to make sure that the size
  529.      of the object we are returning is always 64 bits long, as explained
  530.      in the description of the TARGET_OBJECT_HPUX_SOLIB_GOT object.
  531.      This is probably paranoia, but we do not use a CORE_ADDR because
  532.      it could conceivably be larger than uint64_t.  */
  533.   uint64_t got;

  534.   gdb_assert (writebuf == NULL);

  535.   if (offset > sizeof (got))
  536.     return TARGET_XFER_EOF;

  537.   fun_addr = string_to_core_addr (annex);
  538.   got = ia64_hpux_get_solib_linkage_addr (fun_addr);

  539.   if (len > sizeof (got) - offset)
  540.     len = sizeof (got) - offset;
  541.   memcpy (readbuf, &got + offset, len);

  542.   *xfered_len = (ULONGEST) len;
  543.   return TARGET_XFER_OK;
  544. }

  545. /* The "to_xfer_partial" target_ops routine for ia64-hpux.  */

  546. static enum target_xfer_status
  547. ia64_hpux_xfer_partial (struct target_ops *ops, enum target_object object,
  548.                         const char *annex, gdb_byte *readbuf,
  549.                         const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
  550.                         ULONGEST *xfered_len)
  551. {
  552.   enum target_xfer_status val;

  553.   if (object == TARGET_OBJECT_MEMORY)
  554.     val = ia64_hpux_xfer_memory (ops, annex, readbuf, writebuf, offset, len,
  555.                                  xfered_len);
  556.   else if (object == TARGET_OBJECT_HPUX_UREGS)
  557.     val = ia64_hpux_xfer_uregs (ops, annex, readbuf, writebuf, offset, len,
  558.                                 xfered_len);
  559.   else if (object == TARGET_OBJECT_HPUX_SOLIB_GOT)
  560.     val = ia64_hpux_xfer_solib_got (ops, annex, readbuf, writebuf, offset,
  561.                                     len, xfered_len);
  562.   else
  563.     val = super_xfer_partial (ops, object, annex, readbuf, writebuf, offset,
  564.                               len, xfered_len);

  565.   return val;
  566. }

  567. /* The "to_can_use_hw_breakpoint" target_ops routine for ia64-hpux.  */

  568. static int
  569. ia64_hpux_can_use_hw_breakpoint (struct target_ops *self,
  570.                                  int type, int cnt, int othertype)
  571. {
  572.   /* No hardware watchpoint/breakpoint support yet.  */
  573.   return 0;
  574. }

  575. /* The "to_mourn_inferior" routine from the "inf-ttrace" target_ops layer.  */

  576. static void (*super_mourn_inferior) (struct target_ops *);

  577. /* The "to_mourn_inferior" target_ops routine for ia64-hpux.  */

  578. static void
  579. ia64_hpux_mourn_inferior (struct target_ops *ops)
  580. {
  581.   const int pid = ptid_get_pid (inferior_ptid);
  582.   int status;

  583.   super_mourn_inferior (ops);

  584.   /* On this platform, the process still exists even after we received
  585.      an exit event.  Detaching from the process isn't sufficient either,
  586.      as it only turns the process into a zombie.  So the only solution
  587.      we found is to kill it.  */
  588.   ttrace (TT_PROC_EXIT, pid, 0, 0, 0, 0);
  589.   wait (&status);
  590. }

  591. /* Prevent warning from -Wmissing-prototypes.  */
  592. void _initialize_ia64_hpux_nat (void);

  593. void
  594. _initialize_ia64_hpux_nat (void)
  595. {
  596.   struct target_ops *t;

  597.   t = inf_ttrace_target ();
  598.   super_to_wait = t->to_wait;
  599.   super_xfer_partial = t->to_xfer_partial;
  600.   super_mourn_inferior = t->to_mourn_inferior;

  601.   t->to_wait = ia64_hpux_wait;
  602.   t->to_fetch_registers = ia64_hpux_fetch_registers;
  603.   t->to_store_registers = ia64_hpux_store_registers;
  604.   t->to_xfer_partial = ia64_hpux_xfer_partial;
  605.   t->to_can_use_hw_breakpoint = ia64_hpux_can_use_hw_breakpoint;
  606.   t->to_mourn_inferior = ia64_hpux_mourn_inferior;
  607.   t->to_attach_no_wait = 1;

  608.   add_target (t);
  609. }