PR remote/1966
* dcache.c (dcache_write_line): Use target_write. (dcache_read_line): Use target_read. * mi/mi-main.c (mi_cmd_data_read_memory): Use target_read. * symfile.c (struct load_section_data): Add new per-section members. (load_progress): New function. (load_section_callback): Pass load_progress to the new target_write_with_progress. * target.c (current_xfer_partial, memory_xfer_partial): New. (target_xfer_partial): New prototype. (target_xfer_memory, target_xfer_partial_p, xfer_using_stratum) (do_xfer_memory, target_xfer_memory_partial) (target_read_memory_partial, target_write_memory_partial): Delete. (trust_readonly): Move higher in the file. (update_current_target): Use current_xer_partial. (target_xfer_partial): Use memory_xfer_partial. Handle TARGET_OBJECT_RAW_MEMORY specially. (target_read_memory): Use target_read. (target_write_memory): Use target_write. (default_xfer_partial): Call to_xfer_partial directly. (target_write_with_progress): New function, based on target_write. (target_write): Call it. * target.h (enum target_object): Add TARGET_OBJECT_RAW_MEMORY. (target_write_with_progress): New prototype. (do_xfer_memory, target_read_memory_partial) (target_write_memory_partial): Delete prototypes.
This commit is contained in:
parent
8992f0d7c2
commit
cf7a04e8fb
6 changed files with 325 additions and 489 deletions
|
@ -1,3 +1,33 @@
|
|||
2006-08-15 Daniel Jacobowitz <dan@codesourcery.com>
|
||||
|
||||
PR remote/1966
|
||||
* dcache.c (dcache_write_line): Use target_write.
|
||||
(dcache_read_line): Use target_read.
|
||||
* mi/mi-main.c (mi_cmd_data_read_memory): Use target_read.
|
||||
* symfile.c (struct load_section_data): Add new per-section
|
||||
members.
|
||||
(load_progress): New function.
|
||||
(load_section_callback): Pass load_progress to the new
|
||||
target_write_with_progress.
|
||||
* target.c (current_xfer_partial, memory_xfer_partial): New.
|
||||
(target_xfer_partial): New prototype.
|
||||
(target_xfer_memory, target_xfer_partial_p, xfer_using_stratum)
|
||||
(do_xfer_memory, target_xfer_memory_partial)
|
||||
(target_read_memory_partial, target_write_memory_partial): Delete.
|
||||
(trust_readonly): Move higher in the file.
|
||||
(update_current_target): Use current_xer_partial.
|
||||
(target_xfer_partial): Use memory_xfer_partial. Handle
|
||||
TARGET_OBJECT_RAW_MEMORY specially.
|
||||
(target_read_memory): Use target_read.
|
||||
(target_write_memory): Use target_write.
|
||||
(default_xfer_partial): Call to_xfer_partial directly.
|
||||
(target_write_with_progress): New function, based on target_write.
|
||||
(target_write): Call it.
|
||||
* target.h (enum target_object): Add TARGET_OBJECT_RAW_MEMORY.
|
||||
(target_write_with_progress): New prototype.
|
||||
(do_xfer_memory, target_read_memory_partial)
|
||||
(target_write_memory_partial): Delete prototypes.
|
||||
|
||||
2006-08-15 Daniel Jacobowitz <dan@codesourcery.com>
|
||||
|
||||
* remote.c (remote_write_bytes): Take a const buffer argument.
|
||||
|
|
38
gdb/dcache.c
38
gdb/dcache.c
|
@ -302,19 +302,15 @@ dcache_write_line (DCACHE *dcache, struct dcache_block *db)
|
|||
}
|
||||
|
||||
dirty_len = e - s;
|
||||
while (dirty_len > 0)
|
||||
{
|
||||
res = do_xfer_memory(memaddr, myaddr, dirty_len, 1,
|
||||
®ion->attrib);
|
||||
if (res <= 0)
|
||||
return 0;
|
||||
res = target_write (¤t_target, TARGET_OBJECT_RAW_MEMORY,
|
||||
NULL, myaddr, memaddr, dirty_len);
|
||||
if (res < dirty_len)
|
||||
return 0;
|
||||
|
||||
memset (&db->state[XFORM(memaddr)], ENTRY_OK, res);
|
||||
memaddr += res;
|
||||
myaddr += res;
|
||||
len -= res;
|
||||
dirty_len -= res;
|
||||
}
|
||||
memset (&db->state[XFORM(memaddr)], ENTRY_OK, res);
|
||||
memaddr += res;
|
||||
myaddr += res;
|
||||
len -= res;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -361,18 +357,14 @@ dcache_read_line (DCACHE *dcache, struct dcache_block *db)
|
|||
continue;
|
||||
}
|
||||
|
||||
while (reg_len > 0)
|
||||
{
|
||||
res = do_xfer_memory (memaddr, myaddr, reg_len, 0,
|
||||
®ion->attrib);
|
||||
if (res <= 0)
|
||||
return 0;
|
||||
res = target_read (¤t_target, TARGET_OBJECT_RAW_MEMORY,
|
||||
NULL, myaddr, memaddr, reg_len);
|
||||
if (res < reg_len)
|
||||
return 0;
|
||||
|
||||
memaddr += res;
|
||||
myaddr += res;
|
||||
len -= res;
|
||||
reg_len -= res;
|
||||
}
|
||||
memaddr += res;
|
||||
myaddr += res;
|
||||
len -= res;
|
||||
}
|
||||
|
||||
memset (db->state, ENTRY_OK, sizeof (db->data));
|
||||
|
|
|
@ -853,16 +853,14 @@ mi_cmd_data_read_memory (char *command, char **argv, int argc)
|
|||
total_bytes = word_size * nr_rows * nr_cols;
|
||||
mbuf = xcalloc (total_bytes, 1);
|
||||
make_cleanup (xfree, mbuf);
|
||||
nr_bytes = 0;
|
||||
while (nr_bytes < total_bytes)
|
||||
|
||||
nr_bytes = target_read (¤t_target, TARGET_OBJECT_MEMORY, NULL,
|
||||
mbuf, addr, total_bytes);
|
||||
if (nr_bytes <= 0)
|
||||
{
|
||||
int error;
|
||||
long num = target_read_memory_partial (addr + nr_bytes, mbuf + nr_bytes,
|
||||
total_bytes - nr_bytes,
|
||||
&error);
|
||||
if (num <= 0)
|
||||
break;
|
||||
nr_bytes += num;
|
||||
do_cleanups (cleanups);
|
||||
mi_error_message = xstrdup ("Unable to read memory.");
|
||||
return MI_CMD_ERROR;
|
||||
}
|
||||
|
||||
/* output the header information. */
|
||||
|
|
153
gdb/symfile.c
153
gdb/symfile.c
|
@ -1540,93 +1540,104 @@ struct load_section_data {
|
|||
unsigned long write_count;
|
||||
unsigned long data_count;
|
||||
bfd_size_type total_size;
|
||||
|
||||
/* Per-section data for load_progress. */
|
||||
const char *section_name;
|
||||
ULONGEST section_sent;
|
||||
ULONGEST section_size;
|
||||
CORE_ADDR lma;
|
||||
gdb_byte *buffer;
|
||||
};
|
||||
|
||||
/* Target write callback routine for load_section_callback. */
|
||||
|
||||
static void
|
||||
load_progress (ULONGEST bytes, void *untyped_arg)
|
||||
{
|
||||
struct load_section_data *args = untyped_arg;
|
||||
|
||||
if (validate_download)
|
||||
{
|
||||
/* Broken memories and broken monitors manifest themselves here
|
||||
when bring new computers to life. This doubles already slow
|
||||
downloads. */
|
||||
/* NOTE: cagney/1999-10-18: A more efficient implementation
|
||||
might add a verify_memory() method to the target vector and
|
||||
then use that. remote.c could implement that method using
|
||||
the ``qCRC'' packet. */
|
||||
gdb_byte *check = xmalloc (bytes);
|
||||
struct cleanup *verify_cleanups = make_cleanup (xfree, check);
|
||||
|
||||
if (target_read_memory (args->lma, check, bytes) != 0)
|
||||
error (_("Download verify read failed at 0x%s"),
|
||||
paddr (args->lma));
|
||||
if (memcmp (args->buffer, check, bytes) != 0)
|
||||
error (_("Download verify compare failed at 0x%s"),
|
||||
paddr (args->lma));
|
||||
do_cleanups (verify_cleanups);
|
||||
}
|
||||
args->data_count += bytes;
|
||||
args->lma += bytes;
|
||||
args->buffer += bytes;
|
||||
args->write_count += 1;
|
||||
args->section_sent += bytes;
|
||||
if (quit_flag
|
||||
|| (deprecated_ui_load_progress_hook != NULL
|
||||
&& deprecated_ui_load_progress_hook (args->section_name,
|
||||
args->section_sent)))
|
||||
error (_("Canceled the download"));
|
||||
|
||||
if (deprecated_show_load_progress != NULL)
|
||||
deprecated_show_load_progress (args->section_name,
|
||||
args->section_sent,
|
||||
args->section_size,
|
||||
args->data_count,
|
||||
args->total_size);
|
||||
}
|
||||
|
||||
/* Callback service function for generic_load (bfd_map_over_sections). */
|
||||
|
||||
static void
|
||||
load_section_callback (bfd *abfd, asection *asec, void *data)
|
||||
{
|
||||
struct load_section_data *args = data;
|
||||
bfd_size_type size = bfd_get_section_size (asec);
|
||||
gdb_byte *buffer;
|
||||
struct cleanup *old_chain;
|
||||
const char *sect_name = bfd_get_section_name (abfd, asec);
|
||||
LONGEST transferred;
|
||||
|
||||
if (bfd_get_section_flags (abfd, asec) & SEC_LOAD)
|
||||
{
|
||||
bfd_size_type size = bfd_get_section_size (asec);
|
||||
if (size > 0)
|
||||
{
|
||||
gdb_byte *buffer;
|
||||
struct cleanup *old_chain;
|
||||
CORE_ADDR lma = bfd_section_lma (abfd, asec) + args->load_offset;
|
||||
bfd_size_type block_size;
|
||||
int err;
|
||||
const char *sect_name = bfd_get_section_name (abfd, asec);
|
||||
bfd_size_type sent;
|
||||
if ((bfd_get_section_flags (abfd, asec) & SEC_LOAD) == 0)
|
||||
return;
|
||||
|
||||
buffer = xmalloc (size);
|
||||
old_chain = make_cleanup (xfree, buffer);
|
||||
if (size == 0)
|
||||
return;
|
||||
|
||||
/* Is this really necessary? I guess it gives the user something
|
||||
to look at during a long download. */
|
||||
ui_out_message (uiout, 0, "Loading section %s, size 0x%s lma 0x%s\n",
|
||||
sect_name, paddr_nz (size), paddr_nz (lma));
|
||||
buffer = xmalloc (size);
|
||||
old_chain = make_cleanup (xfree, buffer);
|
||||
|
||||
bfd_get_section_contents (abfd, asec, buffer, 0, size);
|
||||
args->section_name = sect_name;
|
||||
args->section_sent = 0;
|
||||
args->section_size = size;
|
||||
args->lma = bfd_section_lma (abfd, asec) + args->load_offset;
|
||||
args->buffer = buffer;
|
||||
|
||||
sent = 0;
|
||||
do
|
||||
{
|
||||
int len;
|
||||
bfd_size_type this_transfer = size - sent;
|
||||
/* Is this really necessary? I guess it gives the user something
|
||||
to look at during a long download. */
|
||||
ui_out_message (uiout, 0, "Loading section %s, size 0x%s lma 0x%s\n",
|
||||
sect_name, paddr_nz (size), paddr_nz (args->lma));
|
||||
|
||||
len = target_write_memory_partial (lma, buffer,
|
||||
this_transfer, &err);
|
||||
if (err)
|
||||
break;
|
||||
if (validate_download)
|
||||
{
|
||||
/* Broken memories and broken monitors manifest
|
||||
themselves here when bring new computers to
|
||||
life. This doubles already slow downloads. */
|
||||
/* NOTE: cagney/1999-10-18: A more efficient
|
||||
implementation might add a verify_memory()
|
||||
method to the target vector and then use
|
||||
that. remote.c could implement that method
|
||||
using the ``qCRC'' packet. */
|
||||
gdb_byte *check = xmalloc (len);
|
||||
struct cleanup *verify_cleanups =
|
||||
make_cleanup (xfree, check);
|
||||
bfd_get_section_contents (abfd, asec, buffer, 0, size);
|
||||
|
||||
if (target_read_memory (lma, check, len) != 0)
|
||||
error (_("Download verify read failed at 0x%s"),
|
||||
paddr (lma));
|
||||
if (memcmp (buffer, check, len) != 0)
|
||||
error (_("Download verify compare failed at 0x%s"),
|
||||
paddr (lma));
|
||||
do_cleanups (verify_cleanups);
|
||||
}
|
||||
args->data_count += len;
|
||||
lma += len;
|
||||
buffer += len;
|
||||
args->write_count += 1;
|
||||
sent += len;
|
||||
if (quit_flag
|
||||
|| (deprecated_ui_load_progress_hook != NULL
|
||||
&& deprecated_ui_load_progress_hook (sect_name, sent)))
|
||||
error (_("Canceled the download"));
|
||||
transferred = target_write_with_progress (¤t_target,
|
||||
TARGET_OBJECT_MEMORY,
|
||||
NULL, buffer, args->lma,
|
||||
size, load_progress, args);
|
||||
if (transferred < size)
|
||||
error (_("Memory access error while loading section %s."),
|
||||
sect_name);
|
||||
|
||||
if (deprecated_show_load_progress != NULL)
|
||||
deprecated_show_load_progress (sect_name, sent, size,
|
||||
args->data_count,
|
||||
args->total_size);
|
||||
}
|
||||
while (sent < size);
|
||||
|
||||
if (err != 0)
|
||||
error (_("Memory access error while loading section %s."), sect_name);
|
||||
|
||||
do_cleanups (old_chain);
|
||||
}
|
||||
}
|
||||
do_cleanups (old_chain);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
546
gdb/target.c
546
gdb/target.c
|
@ -76,14 +76,17 @@ static LONGEST default_xfer_partial (struct target_ops *ops,
|
|||
const gdb_byte *writebuf,
|
||||
ULONGEST offset, LONGEST len);
|
||||
|
||||
/* Transfer LEN bytes between target address MEMADDR and GDB address
|
||||
MYADDR. Returns 0 for success, errno code for failure (which
|
||||
includes partial transfers -- if you want a more useful response to
|
||||
partial transfers, try either target_read_memory_partial or
|
||||
target_write_memory_partial). */
|
||||
static LONGEST current_xfer_partial (struct target_ops *ops,
|
||||
enum target_object object,
|
||||
const char *annex, gdb_byte *readbuf,
|
||||
const gdb_byte *writebuf,
|
||||
ULONGEST offset, LONGEST len);
|
||||
|
||||
static int target_xfer_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len,
|
||||
int write);
|
||||
static LONGEST target_xfer_partial (struct target_ops *ops,
|
||||
enum target_object object,
|
||||
const char *annex,
|
||||
void *readbuf, const void *writebuf,
|
||||
ULONGEST offset, LONGEST len);
|
||||
|
||||
static void init_dummy_target (void);
|
||||
|
||||
|
@ -195,6 +198,11 @@ static struct cmd_list_element *targetlist = NULL;
|
|||
|
||||
int attach_flag;
|
||||
|
||||
/* Nonzero if we should trust readonly sections from the
|
||||
executable when reading memory. */
|
||||
|
||||
static int trust_readonly = 0;
|
||||
|
||||
/* Non-zero if we want to see trace of target level stuff. */
|
||||
|
||||
static int targetdebug = 0;
|
||||
|
@ -607,7 +615,7 @@ update_current_target (void)
|
|||
de_fault (to_stop,
|
||||
(void (*) (void))
|
||||
target_ignore);
|
||||
current_target.to_xfer_partial = default_xfer_partial;
|
||||
current_target.to_xfer_partial = current_xfer_partial;
|
||||
de_fault (to_rcmd,
|
||||
(void (*) (char *, struct ui_file *))
|
||||
tcomplain);
|
||||
|
@ -838,13 +846,97 @@ target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* Return non-zero when the target vector has supplied an xfer_partial
|
||||
method and it, rather than xfer_memory, should be used. */
|
||||
static int
|
||||
target_xfer_partial_p (void)
|
||||
/* Perform a partial memory transfer. The arguments and return
|
||||
value are just as for target_xfer_partial. */
|
||||
|
||||
static LONGEST
|
||||
memory_xfer_partial (struct target_ops *ops, void *readbuf, const void *writebuf,
|
||||
ULONGEST memaddr, LONGEST len)
|
||||
{
|
||||
return (target_stack != NULL
|
||||
&& target_stack->to_xfer_partial != default_xfer_partial);
|
||||
LONGEST res;
|
||||
int reg_len;
|
||||
struct mem_region *region;
|
||||
|
||||
/* Zero length requests are ok and require no work. */
|
||||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
/* Try the executable file, if "trust-readonly-sections" is set. */
|
||||
if (readbuf != NULL && trust_readonly)
|
||||
{
|
||||
struct section_table *secp;
|
||||
|
||||
secp = target_section_by_addr (ops, memaddr);
|
||||
if (secp != NULL
|
||||
&& (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
|
||||
& SEC_READONLY))
|
||||
return xfer_memory (memaddr, readbuf, len, 0, NULL, ops);
|
||||
}
|
||||
|
||||
/* Try GDB's internal data cache. */
|
||||
region = lookup_mem_region (memaddr);
|
||||
if (memaddr + len < region->hi)
|
||||
reg_len = len;
|
||||
else
|
||||
reg_len = region->hi - memaddr;
|
||||
|
||||
switch (region->attrib.mode)
|
||||
{
|
||||
case MEM_RO:
|
||||
if (writebuf != NULL)
|
||||
return -1;
|
||||
break;
|
||||
|
||||
case MEM_WO:
|
||||
if (readbuf != NULL)
|
||||
return -1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (region->attrib.cache)
|
||||
{
|
||||
/* FIXME drow/2006-08-09: This call discards OPS, so the raw
|
||||
memory request will start back at current_target. */
|
||||
if (readbuf != NULL)
|
||||
res = dcache_xfer_memory (target_dcache, memaddr, readbuf,
|
||||
reg_len, 0);
|
||||
else
|
||||
/* FIXME drow/2006-08-09: If we're going to preserve const
|
||||
correctness dcache_xfer_memory should take readbuf and
|
||||
writebuf. */
|
||||
res = dcache_xfer_memory (target_dcache, memaddr,
|
||||
(void *) writebuf,
|
||||
reg_len, 1);
|
||||
if (res <= 0)
|
||||
return -1;
|
||||
else
|
||||
return res;
|
||||
}
|
||||
|
||||
/* If none of those methods found the memory we wanted, fall back
|
||||
to a target partial transfer. Normally a single call to
|
||||
to_xfer_partial is enough; if it doesn't recognize an object
|
||||
it will call the to_xfer_partial of the next target down.
|
||||
But for memory this won't do. Memory is the only target
|
||||
object which can be read from more than one valid target.
|
||||
A core file, for instance, could have some of memory but
|
||||
delegate other bits to the target below it. So, we must
|
||||
manually try all targets. */
|
||||
|
||||
do
|
||||
{
|
||||
res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
|
||||
readbuf, writebuf, memaddr, len);
|
||||
if (res > 0)
|
||||
return res;
|
||||
|
||||
ops = ops->beneath;
|
||||
}
|
||||
while (ops != NULL);
|
||||
|
||||
/* If we still haven't got anything, return the last error. We
|
||||
give up. */
|
||||
return res;
|
||||
}
|
||||
|
||||
static LONGEST
|
||||
|
@ -856,8 +948,25 @@ target_xfer_partial (struct target_ops *ops,
|
|||
LONGEST retval;
|
||||
|
||||
gdb_assert (ops->to_xfer_partial != NULL);
|
||||
retval = ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
|
||||
offset, len);
|
||||
|
||||
/* If this is a memory transfer, let the memory-specific code
|
||||
have a look at it instead. Memory transfers are more
|
||||
complicated. */
|
||||
if (object == TARGET_OBJECT_MEMORY)
|
||||
retval = memory_xfer_partial (ops, readbuf, writebuf, offset, len);
|
||||
else
|
||||
{
|
||||
enum target_object raw_object = object;
|
||||
|
||||
/* If this is a raw memory transfer, request the normal
|
||||
memory object from other layers. */
|
||||
if (raw_object == TARGET_OBJECT_RAW_MEMORY)
|
||||
raw_object = TARGET_OBJECT_MEMORY;
|
||||
|
||||
retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
|
||||
writebuf, offset, len);
|
||||
}
|
||||
|
||||
if (targetdebug)
|
||||
{
|
||||
const unsigned char *myaddr = NULL;
|
||||
|
@ -900,85 +1009,6 @@ target_xfer_partial (struct target_ops *ops,
|
|||
return retval;
|
||||
}
|
||||
|
||||
/* Attempt a transfer all LEN bytes starting at OFFSET between the
|
||||
inferior's KIND:ANNEX space and GDB's READBUF/WRITEBUF buffer. If
|
||||
the transfer succeeds, return zero, otherwize the host ERRNO is
|
||||
returned.
|
||||
|
||||
The inferior is formed from several layers. In the case of
|
||||
corefiles, inf-corefile is layered above inf-exec and a request for
|
||||
text (corefiles do not include text pages) will be first sent to
|
||||
the core-stratum, fail, and then sent to the object-file where it
|
||||
will succeed.
|
||||
|
||||
NOTE: cagney/2004-09-30:
|
||||
|
||||
The old code tried to use four separate mechanisms for mapping an
|
||||
object:offset:len tuple onto an inferior and its address space: the
|
||||
target stack; the inferior's TO_SECTIONS; solib's SO_LIST;
|
||||
overlays.
|
||||
|
||||
This is stupid.
|
||||
|
||||
The code below is instead using a single mechanism (currently
|
||||
strata). If that mechanism proves insufficient then re-factor it
|
||||
implementing another singluar mechanism (for instance, a generic
|
||||
object:annex onto inferior:object:annex say). */
|
||||
|
||||
static LONGEST
|
||||
xfer_using_stratum (enum target_object object, const char *annex,
|
||||
ULONGEST offset, LONGEST len, void *readbuf,
|
||||
const void *writebuf)
|
||||
{
|
||||
LONGEST xfered;
|
||||
struct target_ops *target;
|
||||
|
||||
/* Always successful. */
|
||||
if (len == 0)
|
||||
return 0;
|
||||
/* Never successful. */
|
||||
if (target_stack == NULL)
|
||||
return EIO;
|
||||
|
||||
target = target_stack;
|
||||
while (1)
|
||||
{
|
||||
xfered = target_xfer_partial (target, object, annex,
|
||||
readbuf, writebuf, offset, len);
|
||||
if (xfered > 0)
|
||||
{
|
||||
/* The partial xfer succeeded, update the counts, check that
|
||||
the xfer hasn't finished and if it hasn't set things up
|
||||
for the next round. */
|
||||
len -= xfered;
|
||||
if (len <= 0)
|
||||
return 0;
|
||||
offset += xfered;
|
||||
if (readbuf != NULL)
|
||||
readbuf = (gdb_byte *) readbuf + xfered;
|
||||
if (writebuf != NULL)
|
||||
writebuf = (gdb_byte *) writebuf + xfered;
|
||||
target = target_stack;
|
||||
}
|
||||
else if (xfered < 0)
|
||||
{
|
||||
/* Something totally screwed up, abandon the attempt to
|
||||
xfer. */
|
||||
if (errno)
|
||||
return errno;
|
||||
else
|
||||
return EIO;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* This "stratum" didn't work, try the next one down. */
|
||||
target = target->beneath;
|
||||
if (target == NULL)
|
||||
return EIO;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Read LEN bytes of target memory at address MEMADDR, placing the results in
|
||||
GDB's memory at MYADDR. Returns either 0 for success or an errno value
|
||||
if any error occurs.
|
||||
|
@ -987,28 +1017,27 @@ xfer_using_stratum (enum target_object object, const char *annex,
|
|||
MYADDR. In particular, the caller should not depend upon partial reads
|
||||
filling the buffer with good data. There is no way for the caller to know
|
||||
how much good data might have been transfered anyway. Callers that can
|
||||
deal with partial reads should call target_read_memory_partial. */
|
||||
deal with partial reads should call target_read (which will retry until
|
||||
it makes no progress, and then return how much was transferred). */
|
||||
|
||||
int
|
||||
target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
|
||||
{
|
||||
if (target_xfer_partial_p ())
|
||||
return xfer_using_stratum (TARGET_OBJECT_MEMORY, NULL,
|
||||
memaddr, len, myaddr, NULL);
|
||||
if (target_read (¤t_target, TARGET_OBJECT_MEMORY, NULL,
|
||||
myaddr, memaddr, len) == len)
|
||||
return 0;
|
||||
else
|
||||
return target_xfer_memory (memaddr, myaddr, len, 0);
|
||||
return EIO;
|
||||
}
|
||||
|
||||
int
|
||||
target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
|
||||
{
|
||||
gdb_byte *bytes = alloca (len);
|
||||
memcpy (bytes, myaddr, len);
|
||||
if (target_xfer_partial_p ())
|
||||
return xfer_using_stratum (TARGET_OBJECT_MEMORY, NULL,
|
||||
memaddr, len, NULL, bytes);
|
||||
if (target_write (¤t_target, TARGET_OBJECT_MEMORY, NULL,
|
||||
myaddr, memaddr, len) == len)
|
||||
return 0;
|
||||
else
|
||||
return target_xfer_memory (memaddr, bytes, len, 1);
|
||||
return EIO;
|
||||
}
|
||||
|
||||
#ifndef target_stopped_data_address_p
|
||||
|
@ -1026,7 +1055,6 @@ target_stopped_data_address_p (struct target_ops *target)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int trust_readonly = 0;
|
||||
static void
|
||||
show_trust_readonly (struct ui_file *file, int from_tty,
|
||||
struct cmd_list_element *c, const char *value)
|
||||
|
@ -1036,263 +1064,6 @@ Mode for reading from readonly sections is %s.\n"),
|
|||
value);
|
||||
}
|
||||
|
||||
/* Move memory to or from the targets. The top target gets priority;
|
||||
if it cannot handle it, it is offered to the next one down, etc.
|
||||
|
||||
Result is -1 on error, or the number of bytes transfered. */
|
||||
|
||||
int
|
||||
do_xfer_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len, int write,
|
||||
struct mem_attrib *attrib)
|
||||
{
|
||||
int res;
|
||||
int done = 0;
|
||||
struct target_ops *t;
|
||||
|
||||
/* Zero length requests are ok and require no work. */
|
||||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
/* deprecated_xfer_memory is not guaranteed to set errno, even when
|
||||
it returns 0. */
|
||||
errno = 0;
|
||||
|
||||
if (!write && trust_readonly)
|
||||
{
|
||||
struct section_table *secp;
|
||||
/* User-settable option, "trust-readonly-sections". If true,
|
||||
then memory from any SEC_READONLY bfd section may be read
|
||||
directly from the bfd file. */
|
||||
secp = target_section_by_addr (¤t_target, memaddr);
|
||||
if (secp != NULL
|
||||
&& (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
|
||||
& SEC_READONLY))
|
||||
return xfer_memory (memaddr, myaddr, len, 0, attrib, ¤t_target);
|
||||
}
|
||||
|
||||
/* The quick case is that the top target can handle the transfer. */
|
||||
res = current_target.deprecated_xfer_memory
|
||||
(memaddr, myaddr, len, write, attrib, ¤t_target);
|
||||
|
||||
/* If res <= 0 then we call it again in the loop. Ah well. */
|
||||
if (res <= 0)
|
||||
{
|
||||
for (t = target_stack; t != NULL; t = t->beneath)
|
||||
{
|
||||
if (!t->to_has_memory)
|
||||
continue;
|
||||
|
||||
res = t->deprecated_xfer_memory (memaddr, myaddr, len, write, attrib, t);
|
||||
if (res > 0)
|
||||
break; /* Handled all or part of xfer */
|
||||
if (t->to_has_all_memory)
|
||||
break;
|
||||
}
|
||||
|
||||
if (res <= 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/* Perform a memory transfer. Iterate until the entire region has
|
||||
been transfered.
|
||||
|
||||
Result is 0 or errno value. */
|
||||
|
||||
static int
|
||||
target_xfer_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len, int write)
|
||||
{
|
||||
int res;
|
||||
int reg_len;
|
||||
struct mem_region *region;
|
||||
|
||||
/* Zero length requests are ok and require no work. */
|
||||
if (len == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (len > 0)
|
||||
{
|
||||
region = lookup_mem_region(memaddr);
|
||||
if (memaddr + len < region->hi)
|
||||
reg_len = len;
|
||||
else
|
||||
reg_len = region->hi - memaddr;
|
||||
|
||||
switch (region->attrib.mode)
|
||||
{
|
||||
case MEM_RO:
|
||||
if (write)
|
||||
return EIO;
|
||||
break;
|
||||
|
||||
case MEM_WO:
|
||||
if (!write)
|
||||
return EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
while (reg_len > 0)
|
||||
{
|
||||
if (region->attrib.cache)
|
||||
res = dcache_xfer_memory (target_dcache, memaddr, myaddr,
|
||||
reg_len, write);
|
||||
else
|
||||
res = do_xfer_memory (memaddr, myaddr, reg_len, write,
|
||||
®ion->attrib);
|
||||
|
||||
if (res <= 0)
|
||||
{
|
||||
/* If this address is for nonexistent memory, read zeros
|
||||
if reading, or do nothing if writing. Return
|
||||
error. */
|
||||
if (!write)
|
||||
memset (myaddr, 0, len);
|
||||
if (errno == 0)
|
||||
return EIO;
|
||||
else
|
||||
return errno;
|
||||
}
|
||||
|
||||
memaddr += res;
|
||||
myaddr += res;
|
||||
len -= res;
|
||||
reg_len -= res;
|
||||
}
|
||||
}
|
||||
|
||||
return 0; /* We managed to cover it all somehow. */
|
||||
}
|
||||
|
||||
|
||||
/* Perform a partial memory transfer.
|
||||
|
||||
If we succeed, set *ERR to zero and return the number of bytes transferred.
|
||||
If we fail, set *ERR to a non-zero errno value, and return -1. */
|
||||
|
||||
static int
|
||||
target_xfer_memory_partial (CORE_ADDR memaddr, gdb_byte *myaddr, int len,
|
||||
int write_p, int *err)
|
||||
{
|
||||
int res;
|
||||
int reg_len;
|
||||
struct mem_region *region;
|
||||
|
||||
/* Zero length requests are ok and require no work. */
|
||||
if (len == 0)
|
||||
{
|
||||
*err = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
region = lookup_mem_region(memaddr);
|
||||
if (memaddr + len < region->hi)
|
||||
reg_len = len;
|
||||
else
|
||||
reg_len = region->hi - memaddr;
|
||||
|
||||
switch (region->attrib.mode)
|
||||
{
|
||||
case MEM_RO:
|
||||
if (write_p)
|
||||
{
|
||||
*err = EIO;
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
|
||||
case MEM_WO:
|
||||
if (write_p)
|
||||
{
|
||||
*err = EIO;
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (region->attrib.cache)
|
||||
res = dcache_xfer_memory (target_dcache, memaddr, myaddr,
|
||||
reg_len, write_p);
|
||||
else
|
||||
res = do_xfer_memory (memaddr, myaddr, reg_len, write_p,
|
||||
®ion->attrib);
|
||||
|
||||
if (res <= 0)
|
||||
{
|
||||
if (errno != 0)
|
||||
*err = errno;
|
||||
else
|
||||
*err = EIO;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
*err = 0;
|
||||
return res;
|
||||
}
|
||||
|
||||
int
|
||||
target_read_memory_partial (CORE_ADDR memaddr, gdb_byte *buf,
|
||||
int len, int *err)
|
||||
{
|
||||
if (target_xfer_partial_p ())
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = target_xfer_partial (target_stack, TARGET_OBJECT_MEMORY,
|
||||
NULL, buf, NULL, memaddr, len);
|
||||
|
||||
if (retval <= 0)
|
||||
{
|
||||
if (errno)
|
||||
*err = errno;
|
||||
else
|
||||
*err = EIO;
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
*err = 0;
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
else
|
||||
return target_xfer_memory_partial (memaddr, buf, len, 0, err);
|
||||
}
|
||||
|
||||
int
|
||||
target_write_memory_partial (CORE_ADDR memaddr, gdb_byte *buf,
|
||||
int len, int *err)
|
||||
{
|
||||
if (target_xfer_partial_p ())
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = target_xfer_partial (target_stack, TARGET_OBJECT_MEMORY,
|
||||
NULL, NULL, buf, memaddr, len);
|
||||
|
||||
if (retval <= 0)
|
||||
{
|
||||
if (errno)
|
||||
*err = errno;
|
||||
else
|
||||
*err = EIO;
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
*err = 0;
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
else
|
||||
return target_xfer_memory_partial (memaddr, buf, len, 1, err);
|
||||
}
|
||||
|
||||
/* More generic transfers. */
|
||||
|
||||
static LONGEST
|
||||
|
@ -1329,8 +1100,24 @@ default_xfer_partial (struct target_ops *ops, enum target_object object,
|
|||
return -1;
|
||||
}
|
||||
else if (ops->beneath != NULL)
|
||||
return target_xfer_partial (ops->beneath, object, annex,
|
||||
readbuf, writebuf, offset, len);
|
||||
return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
|
||||
readbuf, writebuf, offset, len);
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* The xfer_partial handler for the topmost target. Unlike the default,
|
||||
it does not need to handle memory specially; it just passes all
|
||||
requests down the stack. */
|
||||
|
||||
static LONGEST
|
||||
current_xfer_partial (struct target_ops *ops, enum target_object object,
|
||||
const char *annex, gdb_byte *readbuf,
|
||||
const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
|
||||
{
|
||||
if (ops->beneath != NULL)
|
||||
return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
|
||||
readbuf, writebuf, offset, len);
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
@ -1383,11 +1170,14 @@ target_read (struct target_ops *ops,
|
|||
return len;
|
||||
}
|
||||
|
||||
/* An alternative to target_write with progress callbacks. */
|
||||
|
||||
LONGEST
|
||||
target_write (struct target_ops *ops,
|
||||
enum target_object object,
|
||||
const char *annex, const gdb_byte *buf,
|
||||
ULONGEST offset, LONGEST len)
|
||||
target_write_with_progress (struct target_ops *ops,
|
||||
enum target_object object,
|
||||
const char *annex, const gdb_byte *buf,
|
||||
ULONGEST offset, LONGEST len,
|
||||
void (*progress) (ULONGEST, void *), void *baton)
|
||||
{
|
||||
LONGEST xfered = 0;
|
||||
while (xfered < len)
|
||||
|
@ -1395,17 +1185,31 @@ target_write (struct target_ops *ops,
|
|||
LONGEST xfer = target_write_partial (ops, object, annex,
|
||||
(gdb_byte *) buf + xfered,
|
||||
offset + xfered, len - xfered);
|
||||
/* Call an observer, notifying them of the xfer progress? */
|
||||
|
||||
if (xfer == 0)
|
||||
return xfered;
|
||||
if (xfer < 0)
|
||||
return -1;
|
||||
|
||||
if (progress)
|
||||
(*progress) (xfer, baton);
|
||||
|
||||
xfered += xfer;
|
||||
QUIT;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
LONGEST
|
||||
target_write (struct target_ops *ops,
|
||||
enum target_object object,
|
||||
const char *annex, const gdb_byte *buf,
|
||||
ULONGEST offset, LONGEST len)
|
||||
{
|
||||
return target_write_with_progress (ops, object, annex, buf, offset, len,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
/* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
|
||||
the size of the transferred data. PADDING additional bytes are
|
||||
available in *BUF_P. This is a helper function for
|
||||
|
|
31
gdb/target.h
31
gdb/target.h
|
@ -189,6 +189,10 @@ enum target_object
|
|||
TARGET_OBJECT_AVR,
|
||||
/* Transfer up-to LEN bytes of memory starting at OFFSET. */
|
||||
TARGET_OBJECT_MEMORY,
|
||||
/* Memory, avoiding GDB's data cache and trusting the executable.
|
||||
Target implementations of to_xfer_partial never need to handle
|
||||
this object, and most callers should not use it. */
|
||||
TARGET_OBJECT_RAW_MEMORY,
|
||||
/* Kernel Unwind Table. See "ia64-tdep.c". */
|
||||
TARGET_OBJECT_UNWIND_TABLE,
|
||||
/* Transfer auxilliary vector. */
|
||||
|
@ -220,6 +224,18 @@ extern LONGEST target_write (struct target_ops *ops,
|
|||
const char *annex, const gdb_byte *buf,
|
||||
ULONGEST offset, LONGEST len);
|
||||
|
||||
/* Similar to target_write, except that it also calls PROGRESS
|
||||
with the number of bytes written and the opaque BATON after
|
||||
every partial write. This is useful for progress reporting
|
||||
and user interaction while writing data. To abort the transfer,
|
||||
the progress callback can throw an exception. */
|
||||
LONGEST target_write_with_progress (struct target_ops *ops,
|
||||
enum target_object object,
|
||||
const char *annex, const gdb_byte *buf,
|
||||
ULONGEST offset, LONGEST len,
|
||||
void (*progress) (ULONGEST, void *),
|
||||
void *baton);
|
||||
|
||||
/* Wrapper to perform a full read of unknown size. OBJECT/ANNEX will
|
||||
be read using OPS. The return value will be -1 if the transfer
|
||||
fails or is not supported; 0 if the object is empty; or the length
|
||||
|
@ -547,9 +563,6 @@ extern void target_disconnect (char *, int);
|
|||
|
||||
extern DCACHE *target_dcache;
|
||||
|
||||
extern int do_xfer_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len,
|
||||
int write, struct mem_attrib *attrib);
|
||||
|
||||
extern int target_read_string (CORE_ADDR, char **, int, int *);
|
||||
|
||||
extern int target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len);
|
||||
|
@ -563,18 +576,6 @@ extern int xfer_memory (CORE_ADDR, gdb_byte *, int, int,
|
|||
extern int child_xfer_memory (CORE_ADDR, gdb_byte *, int, int,
|
||||
struct mem_attrib *, struct target_ops *);
|
||||
|
||||
/* Make a single attempt at transfering LEN bytes. On a successful
|
||||
transfer, the number of bytes actually transfered is returned and
|
||||
ERR is set to 0. When a transfer fails, -1 is returned (the number
|
||||
of bytes actually transfered is not defined) and ERR is set to a
|
||||
non-zero error indication. */
|
||||
|
||||
extern int target_read_memory_partial (CORE_ADDR addr, gdb_byte *buf,
|
||||
int len, int *err);
|
||||
|
||||
extern int target_write_memory_partial (CORE_ADDR addr, gdb_byte *buf,
|
||||
int len, int *err);
|
||||
|
||||
extern char *child_pid_to_exec_file (int);
|
||||
|
||||
extern char *child_core_file_to_sym_file (char *);
|
||||
|
|
Loading…
Reference in a new issue