Release 4.7 drivers/lguest/segments.c
/*P:600
* The x86 architecture has segments, which involve a table of descriptors
* which can be used to do funky things with virtual address interpretation.
* We originally used to use segments so the Guest couldn't alter the
* Guest<->Host Switcher, and then we had to trim Guest segments, and restore
* for userspace per-thread segments, but trim again for on userspace->kernel
* transitions... This nightmarish creation was contained within this file,
* where we knew not to tread without heavy armament and a change of underwear.
*
* In these modern times, the segment handling code consists of simple sanity
* checks, and the worst you'll experience reading this code is butterfly-rash
* from frolicking through its parklike serenity.
:*/
#include "lg.h"
/*H:600
* Segments & The Global Descriptor Table
*
* (That title sounds like a bad Nerdcore group. Not to suggest that there are
* any good Nerdcore groups, but in high school a friend of mine had a band
* called Joe Fish and the Chips, so there are definitely worse band names).
*
* To refresh: the GDT is a table of 8-byte values describing segments. Once
* set up, these segments can be loaded into one of the 6 "segment registers".
*
* GDT entries are passed around as "struct desc_struct"s, which like IDT
* entries are split into two 32-bit members, "a" and "b". One day, someone
* will clean that up, and be declared a Hero. (No pressure, I'm just saying).
*
* Anyway, the GDT entry contains a base (the start address of the segment), a
* limit (the size of the segment - 1), and some flags. Sounds simple, and it
* would be, except those zany Intel engineers decided that it was too boring
* to put the base at one end, the limit at the other, and the flags in
* between. They decided to shotgun the bits at random throughout the 8 bytes,
* like so:
*
* 0 16 40 48 52 56 63
* [ limit part 1 ][ base part 1 ][ flags ][li][fl][base ]
* mit ags part 2
* part 2
*
* As a result, this file contains a certain amount of magic numeracy. Let's
* begin.
*/
/*
* There are several entries we don't let the Guest set. The TSS entry is the
* "Task State Segment" which controls all kinds of delicate things. The
* LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the
* the Guest can't be trusted to deal with double faults.
*/
static bool ignored_gdt(unsigned int num)
{
return (num == GDT_ENTRY_TSS
|| num == GDT_ENTRY_LGUEST_CS
|| num == GDT_ENTRY_LGUEST_DS
|| num == GDT_ENTRY_DOUBLEFAULT_TSS);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rusty russell | rusty russell | 28 | 96.55% | 1 | 50.00% |
matias zabaljauregui | matias zabaljauregui | 1 | 3.45% | 1 | 50.00% |
| Total | 29 | 100.00% | 2 | 100.00% |
/*H:630
* Once the Guest gave us new GDT entries, we fix them up a little. We
* don't care if they're invalid: the worst that can happen is a General
* Protection Fault in the Switcher when it restores a Guest segment register
* which tries to use that entry. Then we kill the Guest for causing such a
* mess: the message will be "unhandled trap 256".
*/
static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end)
{
unsigned int i;
for (i = start; i < end; i++) {
/*
* We never copy these ones to real GDT, so we don't care what
* they say
*/
if (ignored_gdt(i))
continue;
/*
* Segment descriptors contain a privilege level: the Guest is
* sometimes careless and leaves this as 0, even though it's
* running at privilege level 1. If so, we fix it here.
*/
if (cpu->arch.gdt[i].dpl == 0)
cpu->arch.gdt[i].dpl |= GUEST_PL;
/*
* Each descriptor has an "accessed" bit. If we don't set it
* now, the CPU will try to set it when the Guest first loads
* that entry into a segment register. But the GDT isn't
* writable by the Guest, so bad things can happen.
*/
cpu->arch.gdt[i].type |= 0x1;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rusty russell | rusty russell | 73 | 82.95% | 2 | 40.00% |
jes sorensen | jes sorensen | 6 | 6.82% | 1 | 20.00% |
glauber de oliveira costa | glauber de oliveira costa | 5 | 5.68% | 1 | 20.00% |
jacek galowicz | jacek galowicz | 4 | 4.55% | 1 | 20.00% |
| Total | 88 | 100.00% | 5 | 100.00% |
/*H:610
* Like the IDT, we never simply use the GDT the Guest gives us. We keep
* a GDT for each CPU, and copy across the Guest's entries each time we want to
* run the Guest on that CPU.
*
* This routine is called at boot or modprobe time for each CPU to set up the
* constant GDT entries: the ones which are the same no matter what Guest we're
* running.
*/
void setup_default_gdt_entries(struct lguest_ro_state *state)
{
struct desc_struct *gdt = state->guest_gdt;
unsigned long tss = (unsigned long)&state->guest_tss;
/* The Switcher segments are full 0-4G segments, privilege level 0 */
gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
/*
* The TSS segment refers to the TSS entry for this particular CPU.
*/
gdt[GDT_ENTRY_TSS].a = 0;
gdt[GDT_ENTRY_TSS].b = 0;
gdt[GDT_ENTRY_TSS].limit0 = 0x67;
gdt[GDT_ENTRY_TSS].base0 = tss & 0xFFFF;
gdt[GDT_ENTRY_TSS].base1 = (tss >> 16) & 0xFF;
gdt[GDT_ENTRY_TSS].base2 = tss >> 24;
gdt[GDT_ENTRY_TSS].type = 0x9; /* 32-bit TSS (available) */
gdt[GDT_ENTRY_TSS].p = 0x1; /* Entry is present */
gdt[GDT_ENTRY_TSS].dpl = 0x0; /* Privilege level 0 */
gdt[GDT_ENTRY_TSS].s = 0x0; /* system segment */
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jacek galowicz | jacek galowicz | 81 | 53.29% | 1 | 33.33% |
rusty russell | rusty russell | 71 | 46.71% | 2 | 66.67% |
| Total | 152 | 100.00% | 3 | 100.00% |
/*
* This routine sets up the initial Guest GDT for booting. All entries start
* as 0 (unusable).
*/
void setup_guest_gdt(struct lg_cpu *cpu)
{
/*
* Start with full 0-4G segments...except the Guest is allowed to use
* them, so set the privilege level appropriately in the flags.
*/
cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].dpl |= GUEST_PL;
cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].dpl |= GUEST_PL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rusty russell | rusty russell | 43 | 72.88% | 2 | 40.00% |
jes sorensen | jes sorensen | 8 | 13.56% | 1 | 20.00% |
glauber de oliveira costa | glauber de oliveira costa | 6 | 10.17% | 1 | 20.00% |
jacek galowicz | jacek galowicz | 2 | 3.39% | 1 | 20.00% |
| Total | 59 | 100.00% | 5 | 100.00% |
/*H:650
* An optimization of copy_gdt(), for just the three "thead-local storage"
* entries.
*/
void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt)
{
unsigned int i;
for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++)
gdt[i] = cpu->arch.gdt[i];
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rusty russell | rusty russell | 42 | 89.36% | 1 | 33.33% |
glauber de oliveira costa | glauber de oliveira costa | 3 | 6.38% | 1 | 33.33% |
jes sorensen | jes sorensen | 2 | 4.26% | 1 | 33.33% |
| Total | 47 | 100.00% | 3 | 100.00% |
/*H:640
* When the Guest is run on a different CPU, or the GDT entries have changed,
* copy_gdt() is called to copy the Guest's GDT entries across to this CPU's
* GDT.
*/
void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt)
{
unsigned int i;
/*
* The default entries from setup_default_gdt_entries() are not
* replaced. See ignored_gdt() above.
*/
for (i = 0; i < GDT_ENTRIES; i++)
if (!ignored_gdt(i))
gdt[i] = cpu->arch.gdt[i];
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rusty russell | rusty russell | 51 | 91.07% | 2 | 50.00% |
glauber de oliveira costa | glauber de oliveira costa | 3 | 5.36% | 1 | 25.00% |
jes sorensen | jes sorensen | 2 | 3.57% | 1 | 25.00% |
| Total | 56 | 100.00% | 4 | 100.00% |
/*H:620
* This is where the Guest asks us to load a new GDT entry
* (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in.
*/
void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
{
/*
* We assume the Guest has the same number of GDT entries as the
* Host, otherwise we'd have to dynamically allocate the Guest GDT.
*/
if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
kill_guest(cpu, "too many gdt entries %i", num);
return;
}
/* Set it up, then fix it. */
cpu->arch.gdt[num].a = lo;
cpu->arch.gdt[num].b = hi;
fixup_gdt_table(cpu, num, num+1);
/*
* Mark that the GDT changed so the core knows it has to copy it again,
* even if the Guest is run on the same CPU.
*/
cpu->changed |= CHANGED_GDT;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rusty russell | rusty russell | 75 | 83.33% | 4 | 44.44% |
glauber de oliveira costa | glauber de oliveira costa | 8 | 8.89% | 3 | 33.33% |
jes sorensen | jes sorensen | 6 | 6.67% | 1 | 11.11% |
roel kluin | roel kluin | 1 | 1.11% | 1 | 11.11% |
| Total | 90 | 100.00% | 9 | 100.00% |
/*
* This is the fast-track version for just changing the three TLS entries.
* Remember that this happens on every context switch, so it's worth
* optimizing. But wouldn't it be neater to have a single hypercall to cover
* both cases?
*/
void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls)
{
struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN];
__lgread(cpu, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES);
fixup_gdt_table(cpu, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1);
/* Note that just the TLS entries have changed. */
cpu->changed |= CHANGED_GDT_TLS;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rusty russell | rusty russell | 56 | 87.50% | 3 | 42.86% |
glauber de oliveira costa | glauber de oliveira costa | 6 | 9.38% | 3 | 42.86% |
jes sorensen | jes sorensen | 2 | 3.12% | 1 | 14.29% |
| Total | 64 | 100.00% | 7 | 100.00% |
/*H:660
* With this, we have finished the Host.
*
* Five of the seven parts of our task are complete. You have made it through
* the Bit of Despair (I think that's somewhere in the page table code,
* myself).
*
* Next, we examine "make Switcher". It's short, but intense.
*/
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rusty russell | rusty russell | 453 | 75.63% | 6 | 46.15% |
jacek galowicz | jacek galowicz | 87 | 14.52% | 1 | 7.69% |
glauber de oliveira costa | glauber de oliveira costa | 31 | 5.18% | 3 | 23.08% |
jes sorensen | jes sorensen | 26 | 4.34% | 1 | 7.69% |
matias zabaljauregui | matias zabaljauregui | 1 | 0.17% | 1 | 7.69% |
roel kluin | roel kluin | 1 | 0.17% | 1 | 7.69% |
| Total | 599 | 100.00% | 13 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.