gdb/gdbserver/linux-low.h - gdb

Data types defined

Macros defined

Source code

  1. /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
  2.    Copyright (C) 2002-2015 Free Software Foundation, Inc.

  3.    This file is part of GDB.

  4.    This program is free software; you can redistribute it and/or modify
  5.    it under the terms of the GNU General Public License as published by
  6.    the Free Software Foundation; either version 3 of the License, or
  7.    (at your option) any later version.

  8.    This program is distributed in the hope that it will be useful,
  9.    but WITHOUT ANY WARRANTY; without even the implied warranty of
  10.    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11.    GNU General Public License for more details.

  12.    You should have received a copy of the GNU General Public License
  13.    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */

  14. #include "nat/gdb_thread_db.h"
  15. #include <signal.h>

  16. #include "gdbthread.h"
  17. #include "gdb_proc_service.h"

  18. /* Included for ptrace type definitions.  */
  19. #include "nat/linux-ptrace.h"

  20. #define PTRACE_XFER_TYPE long

  21. #ifdef HAVE_LINUX_REGSETS
  22. typedef void (*regset_fill_func) (struct regcache *, void *);
  23. typedef void (*regset_store_func) (struct regcache *, const void *);
  24. enum regset_type {
  25.   GENERAL_REGS,
  26.   FP_REGS,
  27.   EXTENDED_REGS,
  28. };

  29. struct regset_info
  30. {
  31.   int get_request, set_request;
  32.   /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
  33.      argument and the 4th argument should be "const struct iovec *".  */
  34.   int nt_type;
  35.   int size;
  36.   enum regset_type type;
  37.   regset_fill_func fill_function;
  38.   regset_store_func store_function;
  39. };

  40. /* Aggregation of all the supported regsets of a given
  41.    architecture/mode.  */

  42. struct regsets_info
  43. {
  44.   /* The regsets array.  */
  45.   struct regset_info *regsets;

  46.   /* The number of regsets in the REGSETS array.  */
  47.   int num_regsets;

  48.   /* If we get EIO on a regset, do not try it again.  Note the set of
  49.      supported regsets may depend on processor mode on biarch
  50.      machines.  This is a (lazily allocated) array holding one boolean
  51.      byte (0/1) per regset, with each element corresponding to the
  52.      regset in the REGSETS array above at the same offset.  */
  53.   char *disabled_regsets;
  54. };

  55. #endif

  56. /* Mapping between the general-purpose registers in `struct user'
  57.    format and GDB's register array layout.  */

  58. struct usrregs_info
  59. {
  60.   /* The number of registers accessible.  */
  61.   int num_regs;

  62.   /* The registers map.  */
  63.   int *regmap;
  64. };

  65. /* All info needed to access an architecture/mode's registers.  */

  66. struct regs_info
  67. {
  68.   /* Regset support bitmap: 1 for registers that are transferred as a part
  69.      of a regset, 0 for ones that need to be handled individually.  This
  70.      can be NULL if all registers are transferred with regsets or regsets
  71.      are not supported.  */
  72.   unsigned char *regset_bitmap;

  73.   /* Info used when accessing registers with PTRACE_PEEKUSER /
  74.      PTRACE_POKEUSER.  This can be NULL if all registers are
  75.      transferred with regsets  .*/
  76.   struct usrregs_info *usrregs;

  77. #ifdef HAVE_LINUX_REGSETS
  78.   /* Info used when accessing registers with regsets.  */
  79.   struct regsets_info *regsets_info;
  80. #endif
  81. };

  82. struct process_info_private
  83. {
  84.   /* Arch-specific additions.  */
  85.   struct arch_process_info *arch_private;

  86.   /* libthread_db-specific additions.  Not NULL if this process has loaded
  87.      thread_db, and it is active.  */
  88.   struct thread_db *thread_db;

  89.   /* &_r_debug.  0 if not yet determined.  -1 if no PT_DYNAMIC in Phdrs.  */
  90.   CORE_ADDR r_debug;

  91.   /* This flag is true iff we've just created or attached to the first
  92.      LWP of this process but it has not stopped yet.  As soon as it
  93.      does, we need to call the low target's arch_setup callback.  */
  94.   int new_inferior;
  95. };

  96. struct lwp_info;

  97. struct linux_target_ops
  98. {
  99.   /* Architecture-specific setup.  */
  100.   void (*arch_setup) (void);

  101.   const struct regs_info *(*regs_info) (void);
  102.   int (*cannot_fetch_register) (int);

  103.   /* Returns 0 if we can store the register, 1 if we can not
  104.      store the register, and 2 if failure to store the register
  105.      is acceptable.  */
  106.   int (*cannot_store_register) (int);

  107.   /* Hook to fetch a register in some non-standard way.  Used for
  108.      example by backends that have read-only registers with hardcoded
  109.      values (e.g., IA64's gr0/fr0/fr1).  Returns true if register
  110.      REGNO was supplied, false if not, and we should fallback to the
  111.      standard ptrace methods.  */
  112.   int (*fetch_register) (struct regcache *regcache, int regno);

  113.   CORE_ADDR (*get_pc) (struct regcache *regcache);
  114.   void (*set_pc) (struct regcache *regcache, CORE_ADDR newpc);
  115.   const unsigned char *breakpoint;
  116.   int breakpoint_len;
  117.   CORE_ADDR (*breakpoint_reinsert_addr) (void);

  118.   int decr_pc_after_break;
  119.   int (*breakpoint_at) (CORE_ADDR pc);

  120.   /* Breakpoint and watchpoint related functions.  See target.h for
  121.      comments.  */
  122.   int (*supports_z_point_type) (char z_type);
  123.   int (*insert_point) (enum raw_bkpt_type type, CORE_ADDR addr,
  124.                        int size, struct raw_breakpoint *bp);
  125.   int (*remove_point) (enum raw_bkpt_type type, CORE_ADDR addr,
  126.                        int size, struct raw_breakpoint *bp);

  127.   int (*stopped_by_watchpoint) (void);
  128.   CORE_ADDR (*stopped_data_address) (void);

  129.   /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
  130.      for registers smaller than an xfer unit).  */
  131.   void (*collect_ptrace_register) (struct regcache *regcache,
  132.                                    int regno, char *buf);
  133.   void (*supply_ptrace_register) (struct regcache *regcache,
  134.                                   int regno, const char *buf);

  135.   /* Hook to convert from target format to ptrace format and back.
  136.      Returns true if any conversion was done; false otherwise.
  137.      If DIRECTION is 1, then copy from INF to NATIVE.
  138.      If DIRECTION is 0, copy from NATIVE to INF.  */
  139.   int (*siginfo_fixup) (siginfo_t *native, void *inf, int direction);

  140.   /* Hook to call when a new process is created or attached to.
  141.      If extra per-process architecture-specific data is needed,
  142.      allocate it here.  */
  143.   struct arch_process_info * (*new_process) (void);

  144.   /* Hook to call when a new thread is detected.
  145.      If extra per-thread architecture-specific data is needed,
  146.      allocate it here.  */
  147.   struct arch_lwp_info * (*new_thread) (void);

  148.   /* Hook to call prior to resuming a thread.  */
  149.   void (*prepare_to_resume) (struct lwp_info *);

  150.   /* Hook to support target specific qSupported.  */
  151.   void (*process_qsupported) (const char *);

  152.   /* Returns true if the low target supports tracepoints.  */
  153.   int (*supports_tracepoints) (void);

  154.   /* Fill ADDRP with the thread area address of LWPID.  Returns 0 on
  155.      success, -1 on failure.  */
  156.   int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);

  157.   /* Install a fast tracepoint jump pad.  See target.h for
  158.      comments.  */
  159.   int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
  160.                                            CORE_ADDR collector,
  161.                                            CORE_ADDR lockaddr,
  162.                                            ULONGEST orig_size,
  163.                                            CORE_ADDR *jump_entry,
  164.                                            CORE_ADDR *trampoline,
  165.                                            ULONGEST *trampoline_size,
  166.                                            unsigned char *jjump_pad_insn,
  167.                                            ULONGEST *jjump_pad_insn_size,
  168.                                            CORE_ADDR *adjusted_insn_addr,
  169.                                            CORE_ADDR *adjusted_insn_addr_end,
  170.                                            char *err);

  171.   /* Return the bytecode operations vector for the current inferior.
  172.      Returns NULL if bytecode compilation is not supported.  */
  173.   struct emit_ops *(*emit_ops) (void);

  174.   /* Return the minimum length of an instruction that can be safely overwritten
  175.      for use as a fast tracepoint.  */
  176.   int (*get_min_fast_tracepoint_insn_len) (void);

  177.   /* Returns true if the low target supports range stepping.  */
  178.   int (*supports_range_stepping) (void);
  179. };

  180. extern struct linux_target_ops the_low_target;

  181. #define get_thread_lwp(thr) ((struct lwp_info *) (inferior_target_data (thr)))
  182. #define get_lwp_thread(lwp) ((lwp)->thread)

  183. /* Reasons an LWP last stopped.  */

  184. enum lwp_stop_reason
  185. {
  186.   /* Either not stopped, or stopped for a reason that doesn't require
  187.      special tracking.  */
  188.   LWP_STOPPED_BY_NO_REASON,

  189.   /* Stopped by a software breakpoint.  */
  190.   LWP_STOPPED_BY_SW_BREAKPOINT,

  191.   /* Stopped by a hardware breakpoint.  */
  192.   LWP_STOPPED_BY_HW_BREAKPOINT,

  193.   /* Stopped by a watchpoint.  */
  194.   LWP_STOPPED_BY_WATCHPOINT
  195. };

  196. /* This struct is recorded in the target_data field of struct thread_info.

  197.    On linux ``all_threads'' is keyed by the LWP ID, which we use as the
  198.    GDB protocol representation of the thread ID.  Threads also have
  199.    a "process ID" (poorly named) which is (presently) the same as the
  200.    LWP ID.

  201.    There is also ``all_processes'' is keyed by the "overall process ID",
  202.    which GNU/Linux calls tgid, "thread group ID".  */

  203. struct lwp_info
  204. {
  205.   /* Backlink to the parent object.  */
  206.   struct thread_info *thread;

  207.   /* If this flag is set, the next SIGSTOP will be ignored (the
  208.      process will be immediately resumed).  This means that either we
  209.      sent the SIGSTOP to it ourselves and got some other pending event
  210.      (so the SIGSTOP is still pending), or that we stopped the
  211.      inferior implicitly via PTRACE_ATTACH and have not waited for it
  212.      yet.  */
  213.   int stop_expected;

  214.   /* When this is true, we shall not try to resume this thread, even
  215.      if last_resume_kind isn't resume_stop.  */
  216.   int suspended;

  217.   /* If this flag is set, the lwp is known to be stopped right now (stop
  218.      event already received in a wait()).  */
  219.   int stopped;

  220.   /* If this flag is set, the lwp is known to be dead already (exit
  221.      event already received in a wait(), and is cached in
  222.      status_pending).  */
  223.   int dead;

  224.   /* When stopped is set, the last wait status recorded for this lwp.  */
  225.   int last_status;

  226.   /* When stopped is set, this is where the lwp last stopped, with
  227.      decr_pc_after_break already accounted for.  If the LWP is
  228.      running, this is the address at which the lwp was resumed.  */
  229.   CORE_ADDR stop_pc;

  230.   /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
  231.      been reported.  */
  232.   int status_pending_p;
  233.   int status_pending;

  234.   /* The reason the LWP last stopped, if we need to track it
  235.      (breakpoint, watchpoint, etc.)  */
  236.   enum lwp_stop_reason stop_reason;

  237.   /* On architectures where it is possible to know the data address of
  238.      a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
  239.      contains such data address.  Only valid if STOPPED_BY_WATCHPOINT
  240.      is true.  */
  241.   CORE_ADDR stopped_data_address;

  242.   /* If this is non-zero, it is a breakpoint to be reinserted at our next
  243.      stop (SIGTRAP stops only).  */
  244.   CORE_ADDR bp_reinsert;

  245.   /* If this flag is set, the last continue operation at the ptrace
  246.      level on this process was a single-step.  */
  247.   int stepping;

  248.   /* Range to single step within.  This is a copy of the step range
  249.      passed along the last resume request.  See 'struct
  250.      thread_resume'.  */
  251.   CORE_ADDR step_range_start;        /* Inclusive */
  252.   CORE_ADDR step_range_end;        /* Exclusive */

  253.   /* If this flag is set, we need to set the event request flags the
  254.      next time we see this LWP stop.  */
  255.   int must_set_ptrace_flags;

  256.   /* If this is non-zero, it points to a chain of signals which need to
  257.      be delivered to this process.  */
  258.   struct pending_signals *pending_signals;

  259.   /* A link used when resuming.  It is initialized from the resume request,
  260.      and then processed and cleared in linux_resume_one_lwp.  */
  261.   struct thread_resume *resume;

  262.   /* True if it is known that this lwp is presently collecting a fast
  263.      tracepoint (it is in the jump pad or in some code that will
  264.      return to the jump pad.  Normally, we won't care about this, but
  265.      we will if a signal arrives to this lwp while it is
  266.      collecting.  */
  267.   int collecting_fast_tracepoint;

  268.   /* If this is non-zero, it points to a chain of signals which need
  269.      to be reported to GDB.  These were deferred because the thread
  270.      was doing a fast tracepoint collect when they arrived.  */
  271.   struct pending_signals *pending_signals_to_report;

  272.   /* When collecting_fast_tracepoint is first found to be 1, we insert
  273.      a exit-jump-pad-quickly breakpoint.  This is it.  */
  274.   struct breakpoint *exit_jump_pad_bkpt;

  275.   /* True if the LWP was seen stop at an internal breakpoint and needs
  276.      stepping over later when it is resumed.  */
  277.   int need_step_over;

  278. #ifdef USE_THREAD_DB
  279.   int thread_known;
  280.   /* The thread handle, used for e.g. TLS access.  Only valid if
  281.      THREAD_KNOWN is set.  */
  282.   td_thrhandle_t th;
  283. #endif

  284.   /* Arch-specific additions.  */
  285.   struct arch_lwp_info *arch_private;
  286. };

  287. int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);

  288. /* Attach to PTID.  Returns 0 on success, non-zero otherwise (an
  289.    errno).  */
  290. int linux_attach_lwp (ptid_t ptid);

  291. struct lwp_info *find_lwp_pid (ptid_t ptid);
  292. void linux_stop_lwp (struct lwp_info *lwp);

  293. #ifdef HAVE_LINUX_REGSETS
  294. void initialize_regsets_info (struct regsets_info *regsets_info);
  295. #endif

  296. void initialize_low_arch (void);

  297. /* From thread-db.c  */
  298. int thread_db_init (int use_events);
  299. void thread_db_detach (struct process_info *);
  300. void thread_db_mourn (struct process_info *);
  301. int thread_db_handle_monitor_command (char *);
  302. int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
  303.                                CORE_ADDR load_module, CORE_ADDR *address);
  304. int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);