2e0488d33f
If we are generating non-relocatable object and --emit-relocs specified, aarch64 ld is actually generating wrong addend for rela entry when relocate against local symbol. for example, for simple testcase foo.c === const char * const a = "foo"; const char * foo () { return a; } bar.c === const char * const b = "bar"; const char * bar () { return b; } aarch64-none-linux-gnu-ld --emit-relocs -o x.o foo.o bar.o aarch64-none-linux-gnu-readelf -r x.o ... R_AARCH64_ADR_PRE 0000000000400018 .rodata + 0 ... R_AARCH64_ADD_ABS 0000000000400018 .rodata + 0 ... R_AARCH64_ADR_PRE 0000000000400018 .rodata + 0 ... R_AARCH64_ADD_ABS 0000000000400018 .rodata + 0 while it should be: ... R_AARCH64_ADR_PRE 0000000000400018 .rodata + 0 ... R_AARCH64_ADD_ABS 0000000000400018 .rodata + 0 ... R_AARCH64_ADR_PRE 0000000000400018 .rodata + 10 ... R_AARCH64_ADD_ABS 0000000000400018 .rodata + 10 bfd generic code could actually handle this properly, but only when elf_backend_rela_normal set to '1'. this patch enable this and remove those target specific hack. bfd/ * elfnn-aarch64.c (elf_backend_rela_normal): Set to 1. (elfNN_aarch64_relocate_section): Remove duplicated addend adjustment when info->relocatable be true. ld/testsuite/ * ld-aarch64/emit-relocs-local-addend-bar.s: * New source file. * ld-aarch64/emit-relocs-local-addend-foo.s: * Likewise. * ld-aarch64/emit-relocs-local-addend.d: * New testcase. * ld-aarch64/local-addend-r.d: Likewise.
20 lines
257 B
ArmAsm
20 lines
257 B
ArmAsm
.cpu generic+fp+simd
|
|
.global a
|
|
.section .rodata
|
|
.align 3
|
|
.LC0:
|
|
.string "foo"
|
|
.align 3
|
|
.type a, %object
|
|
.size a, 8
|
|
a:
|
|
.xword .LC0
|
|
.text
|
|
.align 2
|
|
.global foo
|
|
.type foo, %function
|
|
foo:
|
|
adrp x0, .LC0
|
|
add x0, x0, :lo12:.LC0
|
|
ret
|
|
.size foo, .-foo
|