Release 4.14 arch/alpha/kernel/module.c
/* Kernel module help for Alpha.
Copyright (C) 2002 Richard Henderson.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt...)
#endif
/* Allocate the GOT at the end of the core sections. */
struct got_entry {
struct got_entry *next;
Elf64_Sxword r_addend;
int got_offset;
};
static inline void
process_reloc_for_got(Elf64_Rela *rela,
struct got_entry *chains, Elf64_Xword *poffset)
{
unsigned long r_sym = ELF64_R_SYM (rela->r_info);
unsigned long r_type = ELF64_R_TYPE (rela->r_info);
Elf64_Sxword r_addend = rela->r_addend;
struct got_entry *g;
if (r_type != R_ALPHA_LITERAL)
return;
for (g = chains + r_sym; g ; g = g->next)
if (g->r_addend == r_addend) {
if (g->got_offset == 0) {
g->got_offset = *poffset;
*poffset += 8;
}
goto found_entry;
}
g = kmalloc (sizeof (*g), GFP_KERNEL);
g->next = chains[r_sym].next;
g->r_addend = r_addend;
g->got_offset = *poffset;
*poffset += 8;
chains[r_sym].next = g;
found_entry:
/* Trick: most of the ELF64_R_TYPE field is unused. There are
42 valid relocation types, and a 32-bit field. Co-opt the
bits above 256 to store the got offset for this reloc. */
rela->r_info |= g->got_offset << 8;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 169 | 96.02% | 1 | 50.00% |
Chaskiel Grundman | 7 | 3.98% | 1 | 50.00% |
Total | 176 | 100.00% | 2 | 100.00% |
int
module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs,
char *secstrings, struct module *me)
{
struct got_entry *chains;
Elf64_Rela *rela;
Elf64_Shdr *esechdrs, *symtab, *s, *got;
unsigned long nsyms, nrela, i;
esechdrs = sechdrs + hdr->e_shnum;
symtab = got = NULL;
/* Find out how large the symbol table is. Allocate one got_entry
head per symbol. Normally this will be enough, but not always.
We'll chain different offsets for the symbol down each head. */
for (s = sechdrs; s < esechdrs; ++s)
if (s->sh_type == SHT_SYMTAB)
symtab = s;
else if (!strcmp(".got", secstrings + s->sh_name)) {
got = s;
me->arch.gotsecindex = s - sechdrs;
}
if (!symtab) {
printk(KERN_ERR "module %s: no symbol table\n", me->name);
return -ENOEXEC;
}
if (!got) {
printk(KERN_ERR "module %s: no got section\n", me->name);
return -ENOEXEC;
}
nsyms = symtab->sh_size / sizeof(Elf64_Sym);
chains = kcalloc(nsyms, sizeof(struct got_entry), GFP_KERNEL);
if (!chains) {
printk(KERN_ERR
"module %s: no memory for symbol chain buffer\n",
me->name);
return -ENOMEM;
}
got->sh_size = 0;
got->sh_addralign = 8;
got->sh_type = SHT_NOBITS;
/* Examine all LITERAL relocations to find out what GOT entries
are required. This sizes the GOT section as well. */
for (s = sechdrs; s < esechdrs; ++s)
if (s->sh_type == SHT_RELA) {
nrela = s->sh_size / sizeof(Elf64_Rela);
rela = (void *)hdr + s->sh_offset;
for (i = 0; i < nrela; ++i)
process_reloc_for_got(rela+i, chains,
&got->sh_size);
}
/* Free the memory we allocated. */
for (i = 0; i < nsyms; ++i) {
struct got_entry *g, *n;
for (g = chains[i].next; g ; g = n) {
n = g->next;
kfree(g);
}
}
kfree(chains);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 343 | 93.72% | 2 | 50.00% |
Jim Meyering | 21 | 5.74% | 1 | 25.00% |
Yoann Padioleau | 2 | 0.55% | 1 | 25.00% |
Total | 366 | 100.00% | 4 | 100.00% |
int
apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela);
Elf64_Sym *symtab, *sym;
void *base, *location;
unsigned long got, gp;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr;
symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr;
/* The small sections were sorted to the end of the segment.
The following should definitely cover them. */
gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
got = sechdrs[me->arch.gotsecindex].sh_addr;
for (i = 0; i < n; i++) {
unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);
unsigned long r_type = ELF64_R_TYPE (rela[i].r_info);
unsigned long r_got_offset = r_type >> 8;
unsigned long value, hi, lo;
r_type &= 0xff;
/* This is where to make the change. */
location = base + rela[i].r_offset;
/* This is the symbol it is referring to. Note that all
unresolved symbols have been resolved. */
sym = symtab + r_sym;
value = sym->st_value + rela[i].r_addend;
switch (r_type) {
case R_ALPHA_NONE:
break;
case R_ALPHA_REFLONG:
*(u32 *)location = value;
break;
case R_ALPHA_REFQUAD:
/* BUG() can produce misaligned relocations. */
((u32 *)location)[0] = value;
((u32 *)location)[1] = value >> 32;
break;
case R_ALPHA_GPREL32:
value -= gp;
if ((int)value != value)
goto reloc_overflow;
*(u32 *)location = value;
break;
case R_ALPHA_LITERAL:
hi = got + r_got_offset;
lo = hi - gp;
if ((short)lo != lo)
goto reloc_overflow;
*(u16 *)location = lo;
*(u64 *)hi = value;
break;
case R_ALPHA_LITUSE:
break;
case R_ALPHA_GPDISP:
value = gp - (u64)location;
lo = (short)value;
hi = (int)(value - lo);
if (hi + lo != value)
goto reloc_overflow;
*(u16 *)location = hi >> 16;
*(u16 *)(location + rela[i].r_addend) = lo;
break;
case R_ALPHA_BRSGP:
/* BRSGP is only allowed to bind to local symbols.
If the section is undef, this means that the
value was resolved from somewhere else. */
if (sym->st_shndx == SHN_UNDEF)
goto reloc_overflow;
if ((sym->st_other & STO_ALPHA_STD_GPLOAD) ==
STO_ALPHA_STD_GPLOAD)
/* Omit the prologue. */
value += 8;
/* FALLTHRU */
case R_ALPHA_BRADDR:
value -= (u64)location + 4;
if (value & 3)
goto reloc_overflow;
value = (long)value >> 2;
if (value + (1<<21) >= 1<<22)
goto reloc_overflow;
value &= 0x1fffff;
value |= *(u32 *)location & ~0x1fffff;
*(u32 *)location = value;
break;
case R_ALPHA_HINT:
break;
case R_ALPHA_SREL32:
value -= (u64)location;
if ((int)value != value)
goto reloc_overflow;
*(u32 *)location = value;
break;
case R_ALPHA_SREL64:
value -= (u64)location;
*(u64 *)location = value;
break;
case R_ALPHA_GPRELHIGH:
value = (long)(value - gp + 0x8000) >> 16;
if ((short) value != value)
goto reloc_overflow;
*(u16 *)location = value;
break;
case R_ALPHA_GPRELLOW:
value -= gp;
*(u16 *)location = value;
break;
case R_ALPHA_GPREL16:
value -= gp;
if ((short) value != value)
goto reloc_overflow;
*(u16 *)location = value;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %lu\n",
me->name, r_type);
return -ENOEXEC;
reloc_overflow:
if (ELF64_ST_TYPE (sym->st_info) == STT_SECTION)
printk(KERN_ERR
"module %s: Relocation (type %lu) overflow vs section %d\n",
me->name, r_type, sym->st_shndx);
else
printk(KERN_ERR
"module %s: Relocation (type %lu) overflow vs %s\n",
me->name, r_type, strtab + sym->st_name);
return -ENOEXEC;
}
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 729 | 93.46% | 3 | 37.50% |
Ivan Kokshaysky | 27 | 3.46% | 2 | 25.00% |
Michael Cree | 13 | 1.67% | 1 | 12.50% |
Rusty Russell | 6 | 0.77% | 1 | 12.50% |
Andrew Morton | 5 | 0.64% | 1 | 12.50% |
Total | 780 | 100.00% | 8 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Henderson | 1296 | 93.98% | 4 | 33.33% |
Ivan Kokshaysky | 27 | 1.96% | 2 | 16.67% |
Jim Meyering | 21 | 1.52% | 1 | 8.33% |
Michael Cree | 13 | 0.94% | 1 | 8.33% |
Chaskiel Grundman | 9 | 0.65% | 1 | 8.33% |
Rusty Russell | 6 | 0.44% | 1 | 8.33% |
Andrew Morton | 5 | 0.36% | 1 | 8.33% |
Yoann Padioleau | 2 | 0.15% | 1 | 8.33% |
Total | 1379 | 100.00% | 12 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.