26585198a1
not dump a large core file.
251 lines
7.1 KiB
Text
251 lines
7.1 KiB
Text
# Copyright 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2004, 2005
|
|
# Free Software Foundation, Inc.
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
# it under the terms of the GNU General Public License as published by
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
# (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program; if not, write to the Free Software
|
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
# Please email any bugs, comments, and/or additions to this file to:
|
|
# bug-gdb@prep.ai.mit.edu
|
|
|
|
# This file is based on corefile.exp which was written by Fred
|
|
# Fish. (fnf@cygnus.com)
|
|
|
|
if $tracelevel then {
|
|
strace $tracelevel
|
|
}
|
|
|
|
set prms_id 0
|
|
set bug_id 0
|
|
|
|
# Are we on a target board? As of 2004-02-12, GDB didn't have a
|
|
# mechanism that would let it efficiently access a remote corefile.
|
|
|
|
if ![isnative] then {
|
|
untested "Remote system"
|
|
return
|
|
}
|
|
|
|
# Can the system run this test (in particular support sparse
|
|
# corefiles)? On systems that lack sparse corefile support this test
|
|
# consumes too many resources - gigabytes worth of disk space and and
|
|
# I/O bandwith.
|
|
|
|
if { [istarget "*-*-*bsd*"]
|
|
|| [istarget "*-*-hpux*"]
|
|
|| [istarget "*-*-solaris*"]
|
|
|| [istarget "*-*-cygwin*"] } {
|
|
untested "Kernel lacks sparse corefile support (PR gdb/1551)"
|
|
return
|
|
}
|
|
|
|
# This testcase causes too much stress (in terms of memory usage)
|
|
# on certain systems...
|
|
if { [istarget "*-*-*irix*"] } {
|
|
untested "Testcase too stressful for this system"
|
|
return
|
|
}
|
|
|
|
set testfile "bigcore"
|
|
set srcfile ${testfile}.c
|
|
set binfile ${objdir}/${subdir}/${testfile}
|
|
set corefile ${objdir}/${subdir}/${testfile}.corefile
|
|
|
|
if { [gdb_compile "${srcdir}/${subdir}/${srcfile}" "${binfile}" executable {debug}] != "" } {
|
|
gdb_suppress_entire_file "Testcase compile failed, so all tests in this file will automatically fail."
|
|
}
|
|
|
|
# Run GDB on the bigcore program up-to where it will dump core.
|
|
|
|
gdb_exit
|
|
gdb_start
|
|
gdb_reinitialize_dir $srcdir/$subdir
|
|
gdb_load ${binfile}
|
|
gdb_test "set print sevenbit-strings" "" \
|
|
"set print sevenbit-strings; ${testfile}"
|
|
gdb_test "set width 0" "" \
|
|
"set width 0; ${testfile}"
|
|
if { ![runto_main] } then {
|
|
gdb_suppress_tests;
|
|
}
|
|
set print_core_line [gdb_get_line_number "Dump core"]
|
|
gdb_test "tbreak $print_core_line"
|
|
gdb_test continue ".*print_string.*"
|
|
gdb_test next ".*0 = 0.*"
|
|
|
|
# Traverse part of bigcore's linked list of memory chunks (forward or
|
|
# backward), saving each chunk's address.
|
|
|
|
proc extract_heap { dir } {
|
|
global gdb_prompt
|
|
global expect_out
|
|
set heap ""
|
|
set test "extract ${dir} heap"
|
|
set lim 0
|
|
gdb_test_multiple "print heap.${dir}" "$test" {
|
|
-re " = \\(struct list \\*\\) 0x0.*$gdb_prompt $" {
|
|
pass "$test"
|
|
}
|
|
-re " = \\(struct list \\*\\) (0x\[0-9a-f\]*).*$gdb_prompt $" {
|
|
set heap [concat $heap $expect_out(1,string)]
|
|
if { $lim >= 50 } {
|
|
pass "$test (stop at $lim)"
|
|
} else {
|
|
incr lim
|
|
send_gdb "print \$.${dir}\n"
|
|
exp_continue
|
|
}
|
|
}
|
|
-re ".*$gdb_prompt $" {
|
|
fail "$test (entry $lim)"
|
|
}
|
|
timeout {
|
|
fail "$test (timeout)"
|
|
}
|
|
}
|
|
return $heap;
|
|
}
|
|
set next_heap [extract_heap next]
|
|
set prev_heap [extract_heap prev]
|
|
|
|
# Save the total allocated size within GDB so that we can check
|
|
# the core size later.
|
|
gdb_test "set \$bytes_allocated = bytes_allocated" "" "save heap size"
|
|
|
|
# Now create a core dump
|
|
|
|
# Rename the core file to "TESTFILE.corefile" rather than just "core",
|
|
# to avoid problems with sys admin types that like to regularly prune
|
|
# all files named "core" from the system.
|
|
|
|
# Some systems append "core" to the name of the program; others append
|
|
# the name of the program to "core"; still others (like Linux, as of
|
|
# May 2003) create cores named "core.PID".
|
|
|
|
# Save the process ID. Some systems dump the core into core.PID.
|
|
set test "grab pid"
|
|
gdb_test_multiple "info program" $test {
|
|
-re "child process (\[0-9\]+).*$gdb_prompt $" {
|
|
set inferior_pid $expect_out(1,string)
|
|
pass $test
|
|
}
|
|
-re "$gdb_prompt $" {
|
|
set inferior_pid unknown
|
|
pass $test
|
|
}
|
|
}
|
|
|
|
# Dump core using SIGABRT
|
|
set oldtimeout $timeout
|
|
set timeout 600
|
|
gdb_test "signal SIGABRT" "Program terminated with signal SIGABRT, .*"
|
|
|
|
# Find the corefile
|
|
set file ""
|
|
foreach pat [list core.${inferior_pid} ${testfile}.core core] {
|
|
set names [glob -nocomplain $pat]
|
|
if {[llength $names] == 1} {
|
|
set file [lindex $names 0]
|
|
remote_exec build "mv $file $corefile"
|
|
break
|
|
}
|
|
}
|
|
|
|
if { $file == "" } {
|
|
untested "Can't generate a core file"
|
|
return 0
|
|
}
|
|
|
|
# Check that the corefile is plausibly large enough. We're trying to
|
|
# detect the case where the operating system has truncated the file
|
|
# just before signed wraparound. TCL, unfortunately, has a similar
|
|
# problem - so use catch. It can handle the "bad" size but not
|
|
# necessarily the "good" one. And we must use GDB for the comparison,
|
|
# similarly.
|
|
|
|
if {[catch {file size $corefile} core_size] == 0} {
|
|
set core_ok 0
|
|
gdb_test_multiple "print \$bytes_allocated < $core_size" "check core size" {
|
|
-re " = 1\r\n$gdb_prompt $" {
|
|
pass "check core size"
|
|
set core_ok 1
|
|
}
|
|
-re " = 0\r\n$gdb_prompt $" {
|
|
pass "check core size"
|
|
set core_ok 0
|
|
}
|
|
}
|
|
} {
|
|
# Probably failed due to the TCL build having problems with very
|
|
# large values. Since GDB uses a 64-bit off_t (when possible) it
|
|
# shouldn't have this problem. Assume that things are going to
|
|
# work. Without this assumption the test is skiped on systems
|
|
# (such as i386 GNU/Linux with patched kernel) which do pass.
|
|
pass "check core size"
|
|
set core_ok 1
|
|
}
|
|
if {! $core_ok} {
|
|
untested "check core size (system does not support large corefiles)"
|
|
return 0
|
|
}
|
|
|
|
# Now load up that core file
|
|
|
|
set test "load corefile"
|
|
gdb_test_multiple "core $corefile" "$test" {
|
|
-re "A program is being debugged already. Kill it. .y or n. " {
|
|
send_gdb "y\n"
|
|
exp_continue
|
|
}
|
|
-re "Core was generated by.*$gdb_prompt $" {
|
|
pass "$test"
|
|
}
|
|
}
|
|
|
|
# Finally, re-traverse bigcore's linked list, checking each chunk's
|
|
# address against the executable. Don't use gdb_test_multiple as want
|
|
# only one pass/fail. Don't use exp_continue as the regular
|
|
# expression involving $heap needs to be re-evaluated for each new
|
|
# response.
|
|
|
|
proc check_heap { dir heap } {
|
|
global gdb_prompt
|
|
set test "check ${dir} heap"
|
|
set ok 1
|
|
set lim 0
|
|
send_gdb "print heap.${dir}\n"
|
|
while { $ok } {
|
|
gdb_expect {
|
|
-re " = \\(struct list \\*\\) [lindex $heap $lim].*$gdb_prompt $" {
|
|
if { $lim >= [llength $heap] } {
|
|
pass "$test"
|
|
set ok 0
|
|
} else {
|
|
incr lim
|
|
send_gdb "print \$.${dir}\n"
|
|
}
|
|
}
|
|
-re ".*$gdb_prompt $" {
|
|
fail "$test (address [lindex $heap $lim])"
|
|
set ok 0
|
|
}
|
|
timeout {
|
|
fail "$test (timeout)"
|
|
set ok 0
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
check_heap next $next_heap
|
|
check_heap prev $prev_heap
|