2013-03-11 08:24:07 +00:00
|
|
|
/* Linux-dependent part of branch trace support for GDB, and GDBserver.
|
|
|
|
|
2015-01-01 09:32:14 +00:00
|
|
|
Copyright (C) 2013-2015 Free Software Foundation, Inc.
|
2013-03-11 08:24:07 +00:00
|
|
|
|
|
|
|
Contributed by Intel Corp. <markus.t.metzger@intel.com>
|
|
|
|
|
|
|
|
This file is part of GDB.
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>. */
|
|
|
|
|
2014-09-12 09:11:42 +00:00
|
|
|
#include "common-defs.h"
|
2013-03-11 08:24:07 +00:00
|
|
|
#include "linux-btrace.h"
|
2014-09-12 09:11:42 +00:00
|
|
|
#include "common-regcache.h"
|
2013-05-07 13:04:58 +00:00
|
|
|
#include "gdb_wait.h"
|
Rename 32- and 64-bit Intel files from "i386" to "x86"
This commit renames nine files that contain code used by both 32- and
64-bit Intel ports such that their names are prefixed with "x86"
rather than "i386". All types, functions and variables within these
files are likewise renamed such that their names are prefixed with
"x86" rather than "i386". This makes GDB follow the convention used
by gdbserver such that 32-bit Intel code lives in files called
"i386-*", 64-bit Intel code lives in files called "amd64-*", and code
for both 32- and 64-bit Intel lives in files called "x86-*".
This commit only renames OS-independent files. The Linux ports of
both GDB and gdbserver now follow the i386/amd64/x86 convention fully.
Some ports still use the old convention where "i386" in file/function/
type/variable names can mean "32-bit only" or "32- and 64-bit" but I
don't want to touch ports I can't fully test except where absolutely
necessary.
gdb/ChangeLog:
* i386-nat.h: Renamed as...
* x86-nat.h: New file. All type, function and variable name
prefixes changed from "i386_" to "x86_". All references updated.
* i386-nat.c: Renamed as...
* x86-nat.c: New file. All type, function and variable name
prefixes changed from "i386_" to "x86_". All references updated.
* common/i386-xstate.h: Renamed as...
* common/x86-xstate.h: New file. All type, function and variable
name prefixes changed from "i386_" to "x86_". All references
updated.
* nat/i386-cpuid.h: Renamed as...
* nat/x86-cpuid.h: New file. All type, function and variable name
prefixes changed from "i386_" to "x86_". All references updated.
* nat/i386-gcc-cpuid.h: Renamed as...
* nat/x86-gcc-cpuid.h: New file. All type, function and variable
name prefixes changed from "i386_" to "x86_". All references
updated.
* nat/i386-dregs.h: Renamed as...
* nat/x86-dregs.h: New file. All type, function and variable name
prefixes changed from "i386_" to "x86_". All references updated.
* nat/i386-dregs.c: Renamed as...
* nat/x86-dregs.c: New file. All type, function and variable name
prefixes changed from "i386_" to "x86_". All references updated.
gdb/gdbserver/ChangeLog:
* i386-low.h: Renamed as...
* x86-low.h: New file. All type, function and variable name
prefixes changed from "i386_" to "x86_". All references updated.
* i386-low.c: Renamed as...
* x86-low.c: New file. All type, function and variable name
prefixes changed from "i386_" to "x86_". All references updated.
2014-08-19 14:16:11 +00:00
|
|
|
#include "x86-cpuid.h"
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2013-09-29 03:57:34 +00:00
|
|
|
#ifdef HAVE_SYS_SYSCALL_H
|
|
|
|
#include <sys/syscall.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
|
2013-03-11 08:24:07 +00:00
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <sys/user.h>
|
2013-03-11 08:38:27 +00:00
|
|
|
#include <sys/ptrace.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <signal.h>
|
2015-01-29 09:43:05 +00:00
|
|
|
#include <sys/utsname.h>
|
2013-03-11 08:24:07 +00:00
|
|
|
|
|
|
|
/* A branch trace record in perf_event. */
|
|
|
|
struct perf_event_bts
|
|
|
|
{
|
|
|
|
/* The linear address of the branch source. */
|
|
|
|
uint64_t from;
|
|
|
|
|
|
|
|
/* The linear address of the branch destination. */
|
|
|
|
uint64_t to;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* A perf_event branch trace sample. */
|
|
|
|
struct perf_event_sample
|
|
|
|
{
|
|
|
|
/* The perf_event sample header. */
|
|
|
|
struct perf_event_header header;
|
|
|
|
|
|
|
|
/* The perf_event branch tracing payload. */
|
|
|
|
struct perf_event_bts bts;
|
|
|
|
};
|
|
|
|
|
2014-02-14 08:25:40 +00:00
|
|
|
/* Identify the cpu we're running on. */
|
|
|
|
static struct btrace_cpu
|
|
|
|
btrace_this_cpu (void)
|
|
|
|
{
|
|
|
|
struct btrace_cpu cpu;
|
|
|
|
unsigned int eax, ebx, ecx, edx;
|
|
|
|
int ok;
|
|
|
|
|
|
|
|
memset (&cpu, 0, sizeof (cpu));
|
|
|
|
|
|
|
|
ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
|
|
|
|
if (ok != 0)
|
|
|
|
{
|
|
|
|
if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
|
|
|
|
&& edx == signature_INTEL_edx)
|
|
|
|
{
|
|
|
|
unsigned int cpuid, ignore;
|
|
|
|
|
|
|
|
ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
|
|
|
|
if (ok != 0)
|
|
|
|
{
|
|
|
|
cpu.vendor = CV_INTEL;
|
|
|
|
|
|
|
|
cpu.family = (cpuid >> 8) & 0xf;
|
|
|
|
cpu.model = (cpuid >> 4) & 0xf;
|
|
|
|
|
|
|
|
if (cpu.family == 0x6)
|
|
|
|
cpu.model += (cpuid >> 12) & 0xf0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return cpu;
|
|
|
|
}
|
|
|
|
|
2014-01-17 13:40:02 +00:00
|
|
|
/* Return non-zero if there is new data in PEVENT; zero otherwise. */
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2014-01-17 13:40:02 +00:00
|
|
|
static int
|
|
|
|
perf_event_new_data (const struct perf_event_buffer *pev)
|
2013-03-11 08:24:07 +00:00
|
|
|
{
|
2014-01-17 13:40:02 +00:00
|
|
|
return *pev->data_head != pev->last_head;
|
2013-03-11 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
2015-01-29 09:43:05 +00:00
|
|
|
/* Try to determine the size of a pointer in bits for the OS.
|
|
|
|
|
|
|
|
This is the same as the size of a pointer for the inferior process
|
|
|
|
except when a 32-bit inferior is running on a 64-bit OS. */
|
|
|
|
|
|
|
|
static int
|
|
|
|
linux_determine_kernel_ptr_bits (void)
|
|
|
|
{
|
|
|
|
struct utsname utsn;
|
|
|
|
int errcode;
|
|
|
|
|
|
|
|
memset (&utsn, 0, sizeof (utsn));
|
|
|
|
|
|
|
|
errcode = uname (&utsn);
|
|
|
|
if (errcode < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* We only need to handle the 64-bit host case, here. For 32-bit host,
|
|
|
|
the pointer size can be filled in later based on the inferior. */
|
|
|
|
if (strcmp (utsn.machine, "x86_64") == 0)
|
|
|
|
return 64;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-11 08:24:07 +00:00
|
|
|
/* Check whether an address is in the kernel. */
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
perf_event_is_kernel_addr (const struct btrace_target_info *tinfo,
|
|
|
|
uint64_t addr)
|
|
|
|
{
|
|
|
|
uint64_t mask;
|
|
|
|
|
|
|
|
/* If we don't know the size of a pointer, we can't check. Let's assume it's
|
|
|
|
not a kernel address in this case. */
|
|
|
|
if (tinfo->ptr_bits == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* A bit mask for the most significant bit in an address. */
|
|
|
|
mask = (uint64_t) 1 << (tinfo->ptr_bits - 1);
|
|
|
|
|
|
|
|
/* Check whether the most significant bit in the address is set. */
|
|
|
|
return (addr & mask) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check whether a perf event record should be skipped. */
|
|
|
|
|
|
|
|
static inline int
|
2013-11-28 14:44:13 +00:00
|
|
|
perf_event_skip_bts_record (const struct btrace_target_info *tinfo,
|
|
|
|
const struct perf_event_bts *bts)
|
2013-03-11 08:24:07 +00:00
|
|
|
{
|
|
|
|
/* The hardware may report branches from kernel into user space. Branches
|
|
|
|
from user into kernel space will be suppressed. We filter the former to
|
|
|
|
provide a consistent branch trace excluding kernel. */
|
|
|
|
return perf_event_is_kernel_addr (tinfo, bts->from);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform a few consistency checks on a perf event sample record. This is
|
|
|
|
meant to catch cases when we get out of sync with the perf event stream. */
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
perf_event_sample_ok (const struct perf_event_sample *sample)
|
|
|
|
{
|
|
|
|
if (sample->header.type != PERF_RECORD_SAMPLE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (sample->header.size != sizeof (*sample))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
|
|
|
|
and to addresses (plus a header).
|
|
|
|
|
|
|
|
Start points into that buffer at the next sample position.
|
|
|
|
We read the collected samples backwards from start.
|
|
|
|
|
|
|
|
While reading the samples, we convert the information into a list of blocks.
|
|
|
|
For two adjacent samples s1 and s2, we form a block b such that b.begin =
|
|
|
|
s1.to and b.end = s2.from.
|
|
|
|
|
|
|
|
In case the buffer overflows during sampling, one sample may have its lower
|
|
|
|
part at the end and its upper part at the beginning of the buffer. */
|
|
|
|
|
|
|
|
static VEC (btrace_block_s) *
|
|
|
|
perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
|
2014-01-17 13:40:02 +00:00
|
|
|
const uint8_t *end, const uint8_t *start,
|
|
|
|
unsigned long long size)
|
2013-03-11 08:24:07 +00:00
|
|
|
{
|
|
|
|
VEC (btrace_block_s) *btrace = NULL;
|
|
|
|
struct perf_event_sample sample;
|
2014-01-17 13:40:02 +00:00
|
|
|
unsigned long long read = 0;
|
2013-03-11 08:24:07 +00:00
|
|
|
struct btrace_block block = { 0, 0 };
|
|
|
|
struct regcache *regcache;
|
|
|
|
|
|
|
|
gdb_assert (begin <= start);
|
|
|
|
gdb_assert (start <= end);
|
|
|
|
|
|
|
|
/* The first block ends at the current pc. */
|
2014-09-12 09:11:42 +00:00
|
|
|
regcache = get_thread_regcache_for_ptid (tinfo->ptid);
|
2013-03-11 08:24:07 +00:00
|
|
|
block.end = regcache_read_pc (regcache);
|
|
|
|
|
|
|
|
/* The buffer may contain a partial record as its last entry (i.e. when the
|
|
|
|
buffer size is not a multiple of the sample size). */
|
|
|
|
read = sizeof (sample) - 1;
|
|
|
|
|
|
|
|
for (; read < size; read += sizeof (sample))
|
|
|
|
{
|
|
|
|
const struct perf_event_sample *psample;
|
|
|
|
|
|
|
|
/* Find the next perf_event sample in a backwards traversal. */
|
|
|
|
start -= sizeof (sample);
|
|
|
|
|
|
|
|
/* If we're still inside the buffer, we're done. */
|
|
|
|
if (begin <= start)
|
|
|
|
psample = (const struct perf_event_sample *) start;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
int missing;
|
|
|
|
|
|
|
|
/* We're to the left of the ring buffer, we will wrap around and
|
|
|
|
reappear at the very right of the ring buffer. */
|
|
|
|
|
|
|
|
missing = (begin - start);
|
|
|
|
start = (end - missing);
|
|
|
|
|
|
|
|
/* If the entire sample is missing, we're done. */
|
|
|
|
if (missing == sizeof (sample))
|
|
|
|
psample = (const struct perf_event_sample *) start;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
uint8_t *stack;
|
|
|
|
|
|
|
|
/* The sample wrapped around. The lower part is at the end and
|
|
|
|
the upper part is at the beginning of the buffer. */
|
|
|
|
stack = (uint8_t *) &sample;
|
|
|
|
|
|
|
|
/* Copy the two parts so we have a contiguous sample. */
|
|
|
|
memcpy (stack, start, missing);
|
|
|
|
memcpy (stack + missing, begin, sizeof (sample) - missing);
|
|
|
|
|
|
|
|
psample = &sample;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!perf_event_sample_ok (psample))
|
|
|
|
{
|
|
|
|
warning (_("Branch trace may be incomplete."));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
if (perf_event_skip_bts_record (tinfo, &psample->bts))
|
2013-03-11 08:24:07 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* We found a valid sample, so we can complete the current block. */
|
|
|
|
block.begin = psample->bts.to;
|
|
|
|
|
|
|
|
VEC_safe_push (btrace_block_s, btrace, &block);
|
|
|
|
|
|
|
|
/* Start the next block. */
|
|
|
|
block.end = psample->bts.from;
|
|
|
|
}
|
|
|
|
|
2013-06-03 13:39:35 +00:00
|
|
|
/* Push the last block (i.e. the first one of inferior execution), as well.
|
|
|
|
We don't know where it ends, but we know where it starts. If we're
|
|
|
|
reading delta trace, we can fill in the start address later on.
|
|
|
|
Otherwise we will prune it. */
|
|
|
|
block.begin = 0;
|
|
|
|
VEC_safe_push (btrace_block_s, btrace, &block);
|
|
|
|
|
2013-03-11 08:24:07 +00:00
|
|
|
return btrace;
|
|
|
|
}
|
|
|
|
|
2014-01-17 12:29:19 +00:00
|
|
|
/* Check whether the kernel supports BTS. */
|
2013-03-11 08:38:27 +00:00
|
|
|
|
|
|
|
static int
|
2014-01-17 12:29:19 +00:00
|
|
|
kernel_supports_bts (void)
|
2013-03-11 08:38:27 +00:00
|
|
|
{
|
|
|
|
struct perf_event_attr attr;
|
|
|
|
pid_t child, pid;
|
|
|
|
int status, file;
|
|
|
|
|
|
|
|
errno = 0;
|
|
|
|
child = fork ();
|
|
|
|
switch (child)
|
|
|
|
{
|
|
|
|
case -1:
|
2014-01-17 12:29:19 +00:00
|
|
|
warning (_("test bts: cannot fork: %s."), strerror (errno));
|
2013-03-11 08:38:27 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
case 0:
|
|
|
|
status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
|
|
|
|
if (status != 0)
|
|
|
|
{
|
2014-01-17 12:29:19 +00:00
|
|
|
warning (_("test bts: cannot PTRACE_TRACEME: %s."),
|
2013-03-11 08:38:27 +00:00
|
|
|
strerror (errno));
|
|
|
|
_exit (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
status = raise (SIGTRAP);
|
|
|
|
if (status != 0)
|
|
|
|
{
|
2014-01-17 12:29:19 +00:00
|
|
|
warning (_("test bts: cannot raise SIGTRAP: %s."),
|
2013-03-11 08:38:27 +00:00
|
|
|
strerror (errno));
|
|
|
|
_exit (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
_exit (1);
|
|
|
|
|
|
|
|
default:
|
|
|
|
pid = waitpid (child, &status, 0);
|
|
|
|
if (pid != child)
|
|
|
|
{
|
2014-01-17 12:29:19 +00:00
|
|
|
warning (_("test bts: bad pid %ld, error: %s."),
|
2013-03-11 08:38:27 +00:00
|
|
|
(long) pid, strerror (errno));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!WIFSTOPPED (status))
|
|
|
|
{
|
2014-01-17 12:29:19 +00:00
|
|
|
warning (_("test bts: expected stop. status: %d."),
|
2013-03-11 08:38:27 +00:00
|
|
|
status);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset (&attr, 0, sizeof (attr));
|
|
|
|
|
|
|
|
attr.type = PERF_TYPE_HARDWARE;
|
|
|
|
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
|
|
|
|
attr.sample_period = 1;
|
|
|
|
attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
|
|
|
|
attr.exclude_kernel = 1;
|
|
|
|
attr.exclude_hv = 1;
|
|
|
|
attr.exclude_idle = 1;
|
|
|
|
|
|
|
|
file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
|
|
|
|
if (file >= 0)
|
|
|
|
close (file);
|
|
|
|
|
|
|
|
kill (child, SIGKILL);
|
|
|
|
ptrace (PTRACE_KILL, child, NULL, NULL);
|
|
|
|
|
|
|
|
pid = waitpid (child, &status, 0);
|
|
|
|
if (pid != child)
|
|
|
|
{
|
2014-01-17 12:29:19 +00:00
|
|
|
warning (_("test bts: bad pid %ld, error: %s."),
|
2013-03-11 08:38:27 +00:00
|
|
|
(long) pid, strerror (errno));
|
|
|
|
if (!WIFSIGNALED (status))
|
2014-01-17 12:29:19 +00:00
|
|
|
warning (_("test bts: expected killed. status: %d."),
|
2013-03-11 08:38:27 +00:00
|
|
|
status);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (file >= 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-17 12:29:19 +00:00
|
|
|
/* Check whether an Intel cpu supports BTS. */
|
2013-03-11 08:38:27 +00:00
|
|
|
|
|
|
|
static int
|
2014-02-14 08:25:40 +00:00
|
|
|
intel_supports_bts (const struct btrace_cpu *cpu)
|
2013-03-11 08:38:27 +00:00
|
|
|
{
|
2014-02-14 08:25:40 +00:00
|
|
|
switch (cpu->family)
|
2013-05-06 18:03:33 +00:00
|
|
|
{
|
|
|
|
case 0x6:
|
2014-02-14 08:25:40 +00:00
|
|
|
switch (cpu->model)
|
2013-05-06 18:03:33 +00:00
|
|
|
{
|
|
|
|
case 0x1a: /* Nehalem */
|
|
|
|
case 0x1f:
|
|
|
|
case 0x1e:
|
|
|
|
case 0x2e:
|
|
|
|
case 0x25: /* Westmere */
|
|
|
|
case 0x2c:
|
|
|
|
case 0x2f:
|
|
|
|
case 0x2a: /* Sandy Bridge */
|
|
|
|
case 0x2d:
|
|
|
|
case 0x3a: /* Ivy Bridge */
|
|
|
|
|
|
|
|
/* AAJ122: LBR, BTM, or BTS records may have incorrect branch
|
|
|
|
"from" information afer an EIST transition, T-states, C1E, or
|
|
|
|
Adaptive Thermal Throttling. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2013-03-11 08:38:27 +00:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2014-01-17 12:29:19 +00:00
|
|
|
/* Check whether the cpu supports BTS. */
|
2013-03-11 08:38:27 +00:00
|
|
|
|
|
|
|
static int
|
2014-01-17 12:29:19 +00:00
|
|
|
cpu_supports_bts (void)
|
2013-03-11 08:38:27 +00:00
|
|
|
{
|
2014-02-14 08:25:40 +00:00
|
|
|
struct btrace_cpu cpu;
|
2013-03-11 08:38:27 +00:00
|
|
|
|
2014-02-14 08:25:40 +00:00
|
|
|
cpu = btrace_this_cpu ();
|
|
|
|
switch (cpu.vendor)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
/* Don't know about others. Let's assume they do. */
|
|
|
|
return 1;
|
2013-03-11 08:38:27 +00:00
|
|
|
|
2014-02-14 08:25:40 +00:00
|
|
|
case CV_INTEL:
|
|
|
|
return intel_supports_bts (&cpu);
|
|
|
|
}
|
2013-03-11 08:38:27 +00:00
|
|
|
}
|
|
|
|
|
2014-01-17 12:29:19 +00:00
|
|
|
/* Check whether the linux target supports BTS. */
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2014-01-17 12:29:19 +00:00
|
|
|
static int
|
|
|
|
linux_supports_bts (void)
|
2013-03-11 08:24:07 +00:00
|
|
|
{
|
2013-03-11 08:38:27 +00:00
|
|
|
static int cached;
|
|
|
|
|
|
|
|
if (cached == 0)
|
|
|
|
{
|
2014-01-17 12:29:19 +00:00
|
|
|
if (!kernel_supports_bts ())
|
2013-03-11 08:38:27 +00:00
|
|
|
cached = -1;
|
2014-01-17 12:29:19 +00:00
|
|
|
else if (!cpu_supports_bts ())
|
2013-03-11 08:38:27 +00:00
|
|
|
cached = -1;
|
|
|
|
else
|
|
|
|
cached = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cached > 0;
|
2013-03-11 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See linux-btrace.h. */
|
|
|
|
|
2014-01-17 12:29:19 +00:00
|
|
|
int
|
|
|
|
linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
|
|
|
|
{
|
|
|
|
switch (format)
|
|
|
|
{
|
|
|
|
case BTRACE_FORMAT_NONE:
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
case BTRACE_FORMAT_BTS:
|
|
|
|
return linux_supports_bts ();
|
|
|
|
}
|
|
|
|
|
|
|
|
internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
|
|
|
|
}
|
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
/* Enable branch tracing in BTS format. */
|
2014-01-17 12:29:19 +00:00
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
static struct btrace_target_info *
|
2013-11-28 15:39:12 +00:00
|
|
|
linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
|
2013-03-11 08:24:07 +00:00
|
|
|
{
|
2014-01-17 13:40:02 +00:00
|
|
|
struct perf_event_mmap_page *header;
|
2013-03-11 08:24:07 +00:00
|
|
|
struct btrace_target_info *tinfo;
|
2013-11-28 14:44:13 +00:00
|
|
|
struct btrace_tinfo_bts *bts;
|
2013-11-28 15:39:12 +00:00
|
|
|
unsigned long long size, pages;
|
2013-04-10 11:43:41 +00:00
|
|
|
int pid, pg;
|
2013-03-11 08:24:07 +00:00
|
|
|
|
|
|
|
tinfo = xzalloc (sizeof (*tinfo));
|
|
|
|
tinfo->ptid = ptid;
|
2015-01-29 09:43:05 +00:00
|
|
|
tinfo->ptr_bits = linux_determine_kernel_ptr_bits ();
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
tinfo->conf.format = BTRACE_FORMAT_BTS;
|
|
|
|
bts = &tinfo->variant.bts;
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
bts->attr.size = sizeof (bts->attr);
|
|
|
|
bts->attr.type = PERF_TYPE_HARDWARE;
|
|
|
|
bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
|
|
|
|
bts->attr.sample_period = 1;
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
/* We sample from and to address. */
|
|
|
|
bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
bts->attr.exclude_kernel = 1;
|
|
|
|
bts->attr.exclude_hv = 1;
|
|
|
|
bts->attr.exclude_idle = 1;
|
2013-03-11 08:24:07 +00:00
|
|
|
|
|
|
|
pid = ptid_get_lwp (ptid);
|
|
|
|
if (pid == 0)
|
|
|
|
pid = ptid_get_pid (ptid);
|
|
|
|
|
|
|
|
errno = 0;
|
2013-11-28 14:44:13 +00:00
|
|
|
bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
|
|
|
|
if (bts->file < 0)
|
2013-03-11 08:24:07 +00:00
|
|
|
goto err;
|
|
|
|
|
2013-11-28 15:39:12 +00:00
|
|
|
/* Convert the requested size in bytes to pages (rounding up). */
|
|
|
|
pages = (((unsigned long long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
|
|
|
|
/* We need at least one page. */
|
|
|
|
if (pages == 0)
|
|
|
|
pages = 1;
|
|
|
|
|
|
|
|
/* The buffer size can be requested in powers of two pages. Adjust PAGES
|
|
|
|
to the next power of two. */
|
|
|
|
for (pg = 0; pages != (1u << pg); ++pg)
|
|
|
|
if ((pages & (1u << pg)) != 0)
|
|
|
|
pages += (1u << pg);
|
|
|
|
|
|
|
|
/* We try to allocate the requested size.
|
|
|
|
If that fails, try to get as much as we can. */
|
|
|
|
for (; pages > 0; pages >>= 1)
|
2013-04-10 11:43:41 +00:00
|
|
|
{
|
2013-11-28 15:39:12 +00:00
|
|
|
size_t length;
|
|
|
|
|
|
|
|
size = pages * PAGE_SIZE;
|
|
|
|
length = size + PAGE_SIZE;
|
|
|
|
|
|
|
|
/* Check for overflows. */
|
|
|
|
if ((unsigned long long) length < size)
|
|
|
|
continue;
|
|
|
|
|
2013-04-10 11:43:41 +00:00
|
|
|
/* The number of pages we request needs to be a power of two. */
|
2013-11-28 15:39:12 +00:00
|
|
|
header = mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0);
|
2014-01-17 13:40:02 +00:00
|
|
|
if (header != MAP_FAILED)
|
|
|
|
break;
|
2013-04-10 11:43:41 +00:00
|
|
|
}
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2014-01-17 13:40:02 +00:00
|
|
|
if (header == MAP_FAILED)
|
|
|
|
goto err_file;
|
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
bts->header = header;
|
|
|
|
bts->bts.mem = ((const uint8_t *) header) + PAGE_SIZE;
|
2013-11-28 15:39:12 +00:00
|
|
|
bts->bts.size = size;
|
2013-11-28 14:44:13 +00:00
|
|
|
bts->bts.data_head = &header->data_head;
|
|
|
|
bts->bts.last_head = 0;
|
2014-01-17 13:40:02 +00:00
|
|
|
|
2013-11-28 15:39:12 +00:00
|
|
|
tinfo->conf.bts.size = size;
|
2014-01-17 13:40:02 +00:00
|
|
|
return tinfo;
|
|
|
|
|
|
|
|
err_file:
|
2013-04-10 11:43:41 +00:00
|
|
|
/* We were not able to allocate any buffer. */
|
2013-11-28 14:44:13 +00:00
|
|
|
close (bts->file);
|
2013-03-11 08:24:07 +00:00
|
|
|
|
|
|
|
err:
|
|
|
|
xfree (tinfo);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See linux-btrace.h. */
|
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
struct btrace_target_info *
|
|
|
|
linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
|
|
|
|
{
|
|
|
|
struct btrace_target_info *tinfo;
|
|
|
|
|
|
|
|
tinfo = NULL;
|
|
|
|
switch (conf->format)
|
|
|
|
{
|
|
|
|
case BTRACE_FORMAT_NONE:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BTRACE_FORMAT_BTS:
|
2013-11-28 15:39:12 +00:00
|
|
|
tinfo = linux_enable_bts (ptid, &conf->bts);
|
2013-11-28 14:44:13 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return tinfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable BTS tracing. */
|
|
|
|
|
|
|
|
static enum btrace_error
|
|
|
|
linux_disable_bts (struct btrace_tinfo_bts *tinfo)
|
2013-03-11 08:24:07 +00:00
|
|
|
{
|
2014-01-17 13:40:02 +00:00
|
|
|
munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
|
2013-03-11 08:24:07 +00:00
|
|
|
close (tinfo->file);
|
|
|
|
|
2013-06-03 13:39:35 +00:00
|
|
|
return BTRACE_ERR_NONE;
|
2013-03-11 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
/* See linux-btrace.h. */
|
|
|
|
|
|
|
|
enum btrace_error
|
|
|
|
linux_disable_btrace (struct btrace_target_info *tinfo)
|
|
|
|
{
|
|
|
|
enum btrace_error errcode;
|
|
|
|
|
|
|
|
errcode = BTRACE_ERR_NOT_SUPPORTED;
|
|
|
|
switch (tinfo->conf.format)
|
|
|
|
{
|
|
|
|
case BTRACE_FORMAT_NONE:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BTRACE_FORMAT_BTS:
|
|
|
|
errcode = linux_disable_bts (&tinfo->variant.bts);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (errcode == BTRACE_ERR_NONE)
|
|
|
|
xfree (tinfo);
|
|
|
|
|
|
|
|
return errcode;
|
|
|
|
}
|
|
|
|
|
2013-11-13 14:31:07 +00:00
|
|
|
/* Read branch trace data in BTS format for the thread given by TINFO into
|
|
|
|
BTRACE using the TYPE reading method. */
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2013-11-13 14:31:07 +00:00
|
|
|
static enum btrace_error
|
|
|
|
linux_read_bts (struct btrace_data_bts *btrace,
|
|
|
|
struct btrace_target_info *tinfo,
|
|
|
|
enum btrace_read_type type)
|
2013-03-11 08:24:07 +00:00
|
|
|
{
|
2014-01-17 13:40:02 +00:00
|
|
|
struct perf_event_buffer *pevent;
|
2013-03-11 08:24:07 +00:00
|
|
|
const uint8_t *begin, *end, *start;
|
2014-01-17 13:40:02 +00:00
|
|
|
unsigned long long data_head, data_tail, buffer_size, size;
|
|
|
|
unsigned int retries = 5;
|
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
pevent = &tinfo->variant.bts.bts;
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2013-06-03 13:39:35 +00:00
|
|
|
/* For delta reads, we return at least the partial last block containing
|
|
|
|
the current PC. */
|
2014-01-17 13:40:02 +00:00
|
|
|
if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
|
2013-06-03 13:39:35 +00:00
|
|
|
return BTRACE_ERR_NONE;
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2014-01-17 13:40:02 +00:00
|
|
|
buffer_size = pevent->size;
|
|
|
|
data_tail = pevent->last_head;
|
2013-03-11 08:24:07 +00:00
|
|
|
|
|
|
|
/* We may need to retry reading the trace. See below. */
|
|
|
|
while (retries--)
|
|
|
|
{
|
2014-01-17 13:40:02 +00:00
|
|
|
data_head = *pevent->data_head;
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2013-06-03 12:32:15 +00:00
|
|
|
/* Delete any leftover trace from the previous iteration. */
|
2013-11-13 14:31:07 +00:00
|
|
|
VEC_free (btrace_block_s, btrace->blocks);
|
2013-06-03 12:32:15 +00:00
|
|
|
|
2013-06-03 13:39:35 +00:00
|
|
|
if (type == BTRACE_READ_DELTA)
|
2013-03-11 08:24:07 +00:00
|
|
|
{
|
2013-06-03 13:39:35 +00:00
|
|
|
/* Determine the number of bytes to read and check for buffer
|
|
|
|
overflows. */
|
|
|
|
|
|
|
|
/* Check for data head overflows. We might be able to recover from
|
|
|
|
those but they are very unlikely and it's not really worth the
|
|
|
|
effort, I think. */
|
|
|
|
if (data_head < data_tail)
|
|
|
|
return BTRACE_ERR_OVERFLOW;
|
|
|
|
|
|
|
|
/* If the buffer is smaller than the trace delta, we overflowed. */
|
|
|
|
size = data_head - data_tail;
|
|
|
|
if (buffer_size < size)
|
|
|
|
return BTRACE_ERR_OVERFLOW;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Read the entire buffer. */
|
|
|
|
size = buffer_size;
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2013-06-03 13:39:35 +00:00
|
|
|
/* Adjust the size if the buffer has not overflowed, yet. */
|
|
|
|
if (data_head < size)
|
|
|
|
size = data_head;
|
2013-03-11 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
2013-06-03 13:39:35 +00:00
|
|
|
/* Data_head keeps growing; the buffer itself is circular. */
|
2014-01-17 13:40:02 +00:00
|
|
|
begin = pevent->mem;
|
2013-06-03 13:39:35 +00:00
|
|
|
start = begin + data_head % buffer_size;
|
|
|
|
|
|
|
|
if (data_head <= buffer_size)
|
|
|
|
end = start;
|
|
|
|
else
|
2014-01-17 13:40:02 +00:00
|
|
|
end = begin + pevent->size;
|
2013-06-03 13:39:35 +00:00
|
|
|
|
2013-11-13 14:31:07 +00:00
|
|
|
btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
|
2013-06-03 13:39:35 +00:00
|
|
|
|
2013-03-11 08:24:07 +00:00
|
|
|
/* The stopping thread notifies its ptracer before it is scheduled out.
|
|
|
|
On multi-core systems, the debugger might therefore run while the
|
|
|
|
kernel might be writing the last branch trace records.
|
|
|
|
|
|
|
|
Let's check whether the data head moved while we read the trace. */
|
2014-01-17 13:40:02 +00:00
|
|
|
if (data_head == *pevent->data_head)
|
2013-03-11 08:24:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-01-17 13:40:02 +00:00
|
|
|
pevent->last_head = data_head;
|
2013-03-11 08:24:07 +00:00
|
|
|
|
2013-06-03 13:39:35 +00:00
|
|
|
/* Prune the incomplete last block (i.e. the first one of inferior execution)
|
|
|
|
if we're not doing a delta read. There is no way of filling in its zeroed
|
|
|
|
BEGIN element. */
|
2013-11-13 14:31:07 +00:00
|
|
|
if (!VEC_empty (btrace_block_s, btrace->blocks)
|
|
|
|
&& type != BTRACE_READ_DELTA)
|
|
|
|
VEC_pop (btrace_block_s, btrace->blocks);
|
2013-06-03 13:39:35 +00:00
|
|
|
|
|
|
|
return BTRACE_ERR_NONE;
|
2013-03-11 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
2013-11-13 14:31:07 +00:00
|
|
|
/* See linux-btrace.h. */
|
|
|
|
|
|
|
|
enum btrace_error
|
|
|
|
linux_read_btrace (struct btrace_data *btrace,
|
|
|
|
struct btrace_target_info *tinfo,
|
|
|
|
enum btrace_read_type type)
|
|
|
|
{
|
2013-11-28 14:44:13 +00:00
|
|
|
switch (tinfo->conf.format)
|
|
|
|
{
|
|
|
|
case BTRACE_FORMAT_NONE:
|
|
|
|
return BTRACE_ERR_NOT_SUPPORTED;
|
|
|
|
|
|
|
|
case BTRACE_FORMAT_BTS:
|
|
|
|
/* We read btrace in BTS format. */
|
|
|
|
btrace->format = BTRACE_FORMAT_BTS;
|
|
|
|
btrace->variant.bts.blocks = NULL;
|
|
|
|
|
|
|
|
return linux_read_bts (&btrace->variant.bts, tinfo, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See linux-btrace.h. */
|
2013-11-13 14:31:07 +00:00
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
const struct btrace_config *
|
|
|
|
linux_btrace_conf (const struct btrace_target_info *tinfo)
|
|
|
|
{
|
|
|
|
return &tinfo->conf;
|
2013-11-13 14:31:07 +00:00
|
|
|
}
|
|
|
|
|
2013-03-11 08:24:07 +00:00
|
|
|
#else /* !HAVE_LINUX_PERF_EVENT_H */
|
|
|
|
|
|
|
|
/* See linux-btrace.h. */
|
|
|
|
|
|
|
|
int
|
2014-01-17 12:29:19 +00:00
|
|
|
linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
|
2013-03-11 08:24:07 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See linux-btrace.h. */
|
|
|
|
|
|
|
|
struct btrace_target_info *
|
2013-11-28 14:44:13 +00:00
|
|
|
linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
|
2013-03-11 08:24:07 +00:00
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See linux-btrace.h. */
|
|
|
|
|
2013-06-03 13:39:35 +00:00
|
|
|
enum btrace_error
|
2013-03-11 08:24:07 +00:00
|
|
|
linux_disable_btrace (struct btrace_target_info *tinfo)
|
|
|
|
{
|
2013-06-03 13:39:35 +00:00
|
|
|
return BTRACE_ERR_NOT_SUPPORTED;
|
2013-03-11 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See linux-btrace.h. */
|
|
|
|
|
2013-06-03 13:39:35 +00:00
|
|
|
enum btrace_error
|
2013-11-13 14:31:07 +00:00
|
|
|
linux_read_btrace (struct btrace_data *btrace,
|
2013-06-03 13:39:35 +00:00
|
|
|
struct btrace_target_info *tinfo,
|
2013-03-11 08:24:07 +00:00
|
|
|
enum btrace_read_type type)
|
|
|
|
{
|
2013-06-03 13:39:35 +00:00
|
|
|
return BTRACE_ERR_NOT_SUPPORTED;
|
2013-03-11 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
2013-11-28 14:44:13 +00:00
|
|
|
/* See linux-btrace.h. */
|
|
|
|
|
|
|
|
const struct btrace_config *
|
|
|
|
linux_btrace_conf (const struct btrace_target_info *tinfo)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-03-11 08:24:07 +00:00
|
|
|
#endif /* !HAVE_LINUX_PERF_EVENT_H */
|