Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Leonid Yegoshin | 6655 | 93.29% | 6 | 13.95% |
Linus Torvalds (pre-git) | 147 | 2.06% | 4 | 9.30% |
Maneesh Soni | 67 | 0.94% | 1 | 2.33% |
Al Viro | 60 | 0.84% | 1 | 2.33% |
Maciej W. Rozycki | 36 | 0.50% | 6 | 13.95% |
Markos Chandras | 33 | 0.46% | 2 | 4.65% |
Linus Torvalds | 30 | 0.42% | 2 | 4.65% |
Ralf Baechle | 29 | 0.41% | 8 | 18.60% |
Masahiro Yamada | 17 | 0.24% | 1 | 2.33% |
Paul Burton | 16 | 0.22% | 3 | 6.98% |
Thomas Bogendoerfer | 12 | 0.17% | 1 | 2.33% |
Atsushi Nemoto | 10 | 0.14% | 2 | 4.65% |
Yangtao Li | 9 | 0.13% | 1 | 2.33% |
Matt Redfearn | 4 | 0.06% | 1 | 2.33% |
Nathan T. Lynch | 3 | 0.04% | 1 | 2.33% |
Aleksandar Markovic | 2 | 0.03% | 1 | 2.33% |
Liangliang Huang | 2 | 0.03% | 1 | 2.33% |
Jason Wessel | 2 | 0.03% | 1 | 2.33% |
Total | 7134 | 43 |
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2014 Imagination Technologies Ltd. * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com> * Author: Markos Chandras <markos.chandras@imgtec.com> * * MIPS R2 user space instruction emulator for MIPS R6 * */ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/debugfs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/seq_file.h> #include <asm/asm.h> #include <asm/branch.h> #include <asm/break.h> #include <asm/debug.h> #include <asm/fpu.h> #include <asm/fpu_emulator.h> #include <asm/inst.h> #include <asm/mips-r2-to-r6-emul.h> #include <asm/local.h> #include <asm/mipsregs.h> #include <asm/ptrace.h> #include <linux/uaccess.h> #ifdef CONFIG_64BIT #define ADDIU "daddiu " #define INS "dins " #define EXT "dext " #else #define ADDIU "addiu " #define INS "ins " #define EXT "ext " #endif /* CONFIG_64BIT */ #define SB "sb " #define LB "lb " #define LL "ll " #define SC "sc " #ifdef CONFIG_DEBUG_FS static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); static DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); #endif extern const unsigned int fpucondbit[8]; #define MIPS_R2_EMUL_TOTAL_PASS 10 int mipsr2_emulation = 0; static int __init mipsr2emu_enable(char *s) { mipsr2_emulation = 1; pr_info("MIPS R2-to-R6 Emulator Enabled!"); return 1; } __setup("mipsr2emu", mipsr2emu_enable); /** * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot * for performance instead of the traditional way of using a stack trampoline * which is rather slow. * @regs: Process register set * @ir: Instruction */ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) { switch (MIPSInst_OPCODE(ir)) { case addiu_op: if (MIPSInst_RT(ir)) regs->regs[MIPSInst_RT(ir)] = (s32)regs->regs[MIPSInst_RS(ir)] + (s32)MIPSInst_SIMM(ir); return 0; case daddiu_op: if (IS_ENABLED(CONFIG_32BIT)) break; if (MIPSInst_RT(ir)) regs->regs[MIPSInst_RT(ir)] = (s64)regs->regs[MIPSInst_RS(ir)] + (s64)MIPSInst_SIMM(ir); return 0; case lwc1_op: case swc1_op: case cop1_op: case cop1x_op: /* FPU instructions in delay slot */ return -SIGFPE; case spec_op: switch (MIPSInst_FUNC(ir)) { case or_op: if (MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)] | regs->regs[MIPSInst_RT(ir)]; return 0; case sll_op: if (MIPSInst_RS(ir)) break; if (MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) << MIPSInst_FD(ir)); return 0; case srl_op: if (MIPSInst_RS(ir)) break; if (MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >> MIPSInst_FD(ir)); return 0; case addu_op: if (MIPSInst_FD(ir)) break; if (MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = (s32)((u32)regs->regs[MIPSInst_RS(ir)] + (u32)regs->regs[MIPSInst_RT(ir)]); return 0; case subu_op: if (MIPSInst_FD(ir)) break; if (MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = (s32)((u32)regs->regs[MIPSInst_RS(ir)] - (u32)regs->regs[MIPSInst_RT(ir)]); return 0; case dsll_op: if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir)) break; if (MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) << MIPSInst_FD(ir)); return 0; case dsrl_op: if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir)) break; if (MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >> MIPSInst_FD(ir)); return 0; case daddu_op: if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir)) break; if (MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = (u64)regs->regs[MIPSInst_RS(ir)] + (u64)regs->regs[MIPSInst_RT(ir)]; return 0; case dsubu_op: if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir)) break; if (MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = (s64)((u64)regs->regs[MIPSInst_RS(ir)] - (u64)regs->regs[MIPSInst_RT(ir)]); return 0; } break; default: pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n", ir, MIPSInst_OPCODE(ir)); } return SIGILL; } /** * movf_func - Emulate a MOVF instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int movf_func(struct pt_regs *regs, u32 ir) { u32 csr; u32 cond; csr = current->thread.fpu.fcr31; cond = fpucondbit[MIPSInst_RT(ir) >> 2]; if (((csr & cond) == 0) && MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; MIPS_R2_STATS(movs); return 0; } /** * movt_func - Emulate a MOVT instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int movt_func(struct pt_regs *regs, u32 ir) { u32 csr; u32 cond; csr = current->thread.fpu.fcr31; cond = fpucondbit[MIPSInst_RT(ir) >> 2]; if (((csr & cond) != 0) && MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; MIPS_R2_STATS(movs); return 0; } /** * jr_func - Emulate a JR instruction. * @pt_regs: Process register set * @ir: Instruction * * Returns SIGILL if JR was in delay slot, SIGEMT if we * can't compute the EPC, SIGSEGV if we can't access the * userland instruction or 0 on success. */ static int jr_func(struct pt_regs *regs, u32 ir) { int err; unsigned long cepc, epc, nepc; u32 nir; if (delay_slot(regs)) return SIGILL; /* EPC after the RI/JR instruction */ nepc = regs->cp0_epc; /* Roll back to the reserved R2 JR instruction */ regs->cp0_epc -= 4; epc = regs->cp0_epc; err = __compute_return_epc(regs); if (err < 0) return SIGEMT; /* Computed EPC */ cepc = regs->cp0_epc; /* Get DS instruction */ err = __get_user(nir, (u32 __user *)nepc); if (err) return SIGSEGV; MIPS_R2BR_STATS(jrs); /* If nir == 0(NOP), then nothing else to do */ if (nir) { /* * Negative err means FPU instruction in BD-slot, * Zero err means 'BD-slot emulation done' * For anything else we go back to trampoline emulation. */ err = mipsr6_emul(regs, nir); if (err > 0) { regs->cp0_epc = nepc; err = mips_dsemul(regs, nir, epc, cepc); if (err == SIGILL) err = SIGEMT; MIPS_R2_STATS(dsemul); } } return err; } /** * movz_func - Emulate a MOVZ instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int movz_func(struct pt_regs *regs, u32 ir) { if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; MIPS_R2_STATS(movs); return 0; } /** * movn_func - Emulate a MOVZ instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int movn_func(struct pt_regs *regs, u32 ir) { if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; MIPS_R2_STATS(movs); return 0; } /** * mfhi_func - Emulate a MFHI instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int mfhi_func(struct pt_regs *regs, u32 ir) { if (MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = regs->hi; MIPS_R2_STATS(hilo); return 0; } /** * mthi_func - Emulate a MTHI instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int mthi_func(struct pt_regs *regs, u32 ir) { regs->hi = regs->regs[MIPSInst_RS(ir)]; MIPS_R2_STATS(hilo); return 0; } /** * mflo_func - Emulate a MFLO instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int mflo_func(struct pt_regs *regs, u32 ir) { if (MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = regs->lo; MIPS_R2_STATS(hilo); return 0; } /** * mtlo_func - Emulate a MTLO instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int mtlo_func(struct pt_regs *regs, u32 ir) { regs->lo = regs->regs[MIPSInst_RS(ir)]; MIPS_R2_STATS(hilo); return 0; } /** * mult_func - Emulate a MULT instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int mult_func(struct pt_regs *regs, u32 ir) { s64 res; s32 rt, rs; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; res = (s64)rt * (s64)rs; rs = res; regs->lo = (s64)rs; rt = res >> 32; res = (s64)rt; regs->hi = res; MIPS_R2_STATS(muls); return 0; } /** * multu_func - Emulate a MULTU instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int multu_func(struct pt_regs *regs, u32 ir) { u64 res; u32 rt, rs; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; res = (u64)rt * (u64)rs; rt = res; regs->lo = (s64)(s32)rt; regs->hi = (s64)(s32)(res >> 32); MIPS_R2_STATS(muls); return 0; } /** * div_func - Emulate a DIV instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int div_func(struct pt_regs *regs, u32 ir) { s32 rt, rs; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; regs->lo = (s64)(rs / rt); regs->hi = (s64)(rs % rt); MIPS_R2_STATS(divs); return 0; } /** * divu_func - Emulate a DIVU instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int divu_func(struct pt_regs *regs, u32 ir) { u32 rt, rs; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; regs->lo = (s64)(rs / rt); regs->hi = (s64)(rs % rt); MIPS_R2_STATS(divs); return 0; } /** * dmult_func - Emulate a DMULT instruction * @regs: Process register set * @ir: Instruction * * Returns 0 on success or SIGILL for 32-bit kernels. */ static int dmult_func(struct pt_regs *regs, u32 ir) { s64 res; s64 rt, rs; if (IS_ENABLED(CONFIG_32BIT)) return SIGILL; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; res = rt * rs; regs->lo = res; __asm__ __volatile__( "dmuh %0, %1, %2\t\n" : "=r"(res) : "r"(rt), "r"(rs)); regs->hi = res; MIPS_R2_STATS(muls); return 0; } /** * dmultu_func - Emulate a DMULTU instruction * @regs: Process register set * @ir: Instruction * * Returns 0 on success or SIGILL for 32-bit kernels. */ static int dmultu_func(struct pt_regs *regs, u32 ir) { u64 res; u64 rt, rs; if (IS_ENABLED(CONFIG_32BIT)) return SIGILL; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; res = rt * rs; regs->lo = res; __asm__ __volatile__( "dmuhu %0, %1, %2\t\n" : "=r"(res) : "r"(rt), "r"(rs)); regs->hi = res; MIPS_R2_STATS(muls); return 0; } /** * ddiv_func - Emulate a DDIV instruction * @regs: Process register set * @ir: Instruction * * Returns 0 on success or SIGILL for 32-bit kernels. */ static int ddiv_func(struct pt_regs *regs, u32 ir) { s64 rt, rs; if (IS_ENABLED(CONFIG_32BIT)) return SIGILL; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; regs->lo = rs / rt; regs->hi = rs % rt; MIPS_R2_STATS(divs); return 0; } /** * ddivu_func - Emulate a DDIVU instruction * @regs: Process register set * @ir: Instruction * * Returns 0 on success or SIGILL for 32-bit kernels. */ static int ddivu_func(struct pt_regs *regs, u32 ir) { u64 rt, rs; if (IS_ENABLED(CONFIG_32BIT)) return SIGILL; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; regs->lo = rs / rt; regs->hi = rs % rt; MIPS_R2_STATS(divs); return 0; } /* R6 removed instructions for the SPECIAL opcode */ static const struct r2_decoder_table spec_op_table[] = { { 0xfc1ff83f, 0x00000008, jr_func }, { 0xfc00ffff, 0x00000018, mult_func }, { 0xfc00ffff, 0x00000019, multu_func }, { 0xfc00ffff, 0x0000001c, dmult_func }, { 0xfc00ffff, 0x0000001d, dmultu_func }, { 0xffff07ff, 0x00000010, mfhi_func }, { 0xfc1fffff, 0x00000011, mthi_func }, { 0xffff07ff, 0x00000012, mflo_func }, { 0xfc1fffff, 0x00000013, mtlo_func }, { 0xfc0307ff, 0x00000001, movf_func }, { 0xfc0307ff, 0x00010001, movt_func }, { 0xfc0007ff, 0x0000000a, movz_func }, { 0xfc0007ff, 0x0000000b, movn_func }, { 0xfc00ffff, 0x0000001a, div_func }, { 0xfc00ffff, 0x0000001b, divu_func }, { 0xfc00ffff, 0x0000001e, ddiv_func }, { 0xfc00ffff, 0x0000001f, ddivu_func }, {} }; /** * madd_func - Emulate a MADD instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int madd_func(struct pt_regs *regs, u32 ir) { s64 res; s32 rt, rs; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; res = (s64)rt * (s64)rs; rt = regs->hi; rs = regs->lo; res += ((((s64)rt) << 32) | (u32)rs); rt = res; regs->lo = (s64)rt; rs = res >> 32; regs->hi = (s64)rs; MIPS_R2_STATS(dsps); return 0; } /** * maddu_func - Emulate a MADDU instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int maddu_func(struct pt_regs *regs, u32 ir) { u64 res; u32 rt, rs; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; res = (u64)rt * (u64)rs; rt = regs->hi; rs = regs->lo; res += ((((s64)rt) << 32) | (u32)rs); rt = res; regs->lo = (s64)(s32)rt; rs = res >> 32; regs->hi = (s64)(s32)rs; MIPS_R2_STATS(dsps); return 0; } /** * msub_func - Emulate a MSUB instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int msub_func(struct pt_regs *regs, u32 ir) { s64 res; s32 rt, rs; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; res = (s64)rt * (s64)rs; rt = regs->hi; rs = regs->lo; res = ((((s64)rt) << 32) | (u32)rs) - res; rt = res; regs->lo = (s64)rt; rs = res >> 32; regs->hi = (s64)rs; MIPS_R2_STATS(dsps); return 0; } /** * msubu_func - Emulate a MSUBU instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int msubu_func(struct pt_regs *regs, u32 ir) { u64 res; u32 rt, rs; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; res = (u64)rt * (u64)rs; rt = regs->hi; rs = regs->lo; res = ((((s64)rt) << 32) | (u32)rs) - res; rt = res; regs->lo = (s64)(s32)rt; rs = res >> 32; regs->hi = (s64)(s32)rs; MIPS_R2_STATS(dsps); return 0; } /** * mul_func - Emulate a MUL instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int mul_func(struct pt_regs *regs, u32 ir) { s64 res; s32 rt, rs; if (!MIPSInst_RD(ir)) return 0; rt = regs->regs[MIPSInst_RT(ir)]; rs = regs->regs[MIPSInst_RS(ir)]; res = (s64)rt * (s64)rs; rs = res; regs->regs[MIPSInst_RD(ir)] = (s64)rs; MIPS_R2_STATS(muls); return 0; } /** * clz_func - Emulate a CLZ instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int clz_func(struct pt_regs *regs, u32 ir) { u32 res; u32 rs; if (!MIPSInst_RD(ir)) return 0; rs = regs->regs[MIPSInst_RS(ir)]; __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs)); regs->regs[MIPSInst_RD(ir)] = res; MIPS_R2_STATS(bops); return 0; } /** * clo_func - Emulate a CLO instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int clo_func(struct pt_regs *regs, u32 ir) { u32 res; u32 rs; if (!MIPSInst_RD(ir)) return 0; rs = regs->regs[MIPSInst_RS(ir)]; __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs)); regs->regs[MIPSInst_RD(ir)] = res; MIPS_R2_STATS(bops); return 0; } /** * dclz_func - Emulate a DCLZ instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int dclz_func(struct pt_regs *regs, u32 ir) { u64 res; u64 rs; if (IS_ENABLED(CONFIG_32BIT)) return SIGILL; if (!MIPSInst_RD(ir)) return 0; rs = regs->regs[MIPSInst_RS(ir)]; __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs)); regs->regs[MIPSInst_RD(ir)] = res; MIPS_R2_STATS(bops); return 0; } /** * dclo_func - Emulate a DCLO instruction * @regs: Process register set * @ir: Instruction * * Returns 0 since it always succeeds. */ static int dclo_func(struct pt_regs *regs, u32 ir) { u64 res; u64 rs; if (IS_ENABLED(CONFIG_32BIT)) return SIGILL; if (!MIPSInst_RD(ir)) return 0; rs = regs->regs[MIPSInst_RS(ir)]; __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs)); regs->regs[MIPSInst_RD(ir)] = res; MIPS_R2_STATS(bops); return 0; } /* R6 removed instructions for the SPECIAL2 opcode */ static const struct r2_decoder_table spec2_op_table[] = { { 0xfc00ffff, 0x70000000, madd_func }, { 0xfc00ffff, 0x70000001, maddu_func }, { 0xfc0007ff, 0x70000002, mul_func }, { 0xfc00ffff, 0x70000004, msub_func }, { 0xfc00ffff, 0x70000005, msubu_func }, { 0xfc0007ff, 0x70000020, clz_func }, { 0xfc0007ff, 0x70000021, clo_func }, { 0xfc0007ff, 0x70000024, dclz_func }, { 0xfc0007ff, 0x70000025, dclo_func }, { } }; static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, const struct r2_decoder_table *table) { const struct r2_decoder_table *p; int err; for (p = table; p->func; p++) { if ((inst & p->mask) == p->code) { err = (p->func)(regs, inst); return err; } } return SIGILL; } /** * mipsr2_decoder: Decode and emulate a MIPS R2 instruction * @regs: Process register set * @inst: Instruction to decode and emulate * @fcr31: Floating Point Control and Status Register Cause bits returned */ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) { int err = 0; unsigned long vaddr; u32 nir; unsigned long cpc, epc, nepc, r31, res, rs, rt; void __user *fault_addr = NULL; int pass = 0; repeat: r31 = regs->regs[31]; epc = regs->cp0_epc; err = compute_return_epc(regs); if (err < 0) { BUG(); return SIGEMT; } pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n", inst, epc, pass); switch (MIPSInst_OPCODE(inst)) { case spec_op: err = mipsr2_find_op_func(regs, inst, spec_op_table); if (err < 0) { /* FPU instruction under JR */ regs->cp0_cause |= CAUSEF_BD; goto fpu_emul; } break; case spec2_op: err = mipsr2_find_op_func(regs, inst, spec2_op_table); break; case bcond_op: rt = MIPSInst_RT(inst); rs = MIPSInst_RS(inst); switch (rt) { case tgei_op: if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) do_trap_or_bp(regs, 0, 0, "TGEI"); MIPS_R2_STATS(traps); break; case tgeiu_op: if (regs->regs[rs] >= MIPSInst_UIMM(inst)) do_trap_or_bp(regs, 0, 0, "TGEIU"); MIPS_R2_STATS(traps); break; case tlti_op: if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) do_trap_or_bp(regs, 0, 0, "TLTI"); MIPS_R2_STATS(traps); break; case tltiu_op: if (regs->regs[rs] < MIPSInst_UIMM(inst)) do_trap_or_bp(regs, 0, 0, "TLTIU"); MIPS_R2_STATS(traps); break; case teqi_op: if (regs->regs[rs] == MIPSInst_SIMM(inst)) do_trap_or_bp(regs, 0, 0, "TEQI"); MIPS_R2_STATS(traps); break; case tnei_op: if (regs->regs[rs] != MIPSInst_SIMM(inst)) do_trap_or_bp(regs, 0, 0, "TNEI"); MIPS_R2_STATS(traps); break; case bltzl_op: case bgezl_op: case bltzall_op: case bgezall_op: if (delay_slot(regs)) { err = SIGILL; break; } regs->regs[31] = r31; regs->cp0_epc = epc; err = __compute_return_epc(regs); if (err < 0) return SIGEMT; if (err != BRANCH_LIKELY_TAKEN) break; cpc = regs->cp0_epc; nepc = epc + 4; err = __get_user(nir, (u32 __user *)nepc); if (err) { err = SIGSEGV; break; } /* * This will probably be optimized away when * CONFIG_DEBUG_FS is not enabled */ switch (rt) { case bltzl_op: MIPS_R2BR_STATS(bltzl); break; case bgezl_op: MIPS_R2BR_STATS(bgezl); break; case bltzall_op: MIPS_R2BR_STATS(bltzall); break; case bgezall_op: MIPS_R2BR_STATS(bgezall); break; } switch (MIPSInst_OPCODE(nir)) { case cop1_op: case cop1x_op: case lwc1_op: case swc1_op: regs->cp0_cause |= CAUSEF_BD; goto fpu_emul; } if (nir) { err = mipsr6_emul(regs, nir); if (err > 0) { err = mips_dsemul(regs, nir, epc, cpc); if (err == SIGILL) err = SIGEMT; MIPS_R2_STATS(dsemul); } } break; case bltzal_op: case bgezal_op: if (delay_slot(regs)) { err = SIGILL; break; } regs->regs[31] = r31; regs->cp0_epc = epc; err = __compute_return_epc(regs); if (err < 0) return SIGEMT; cpc = regs->cp0_epc; nepc = epc + 4; err = __get_user(nir, (u32 __user *)nepc); if (err) { err = SIGSEGV; break; } /* * This will probably be optimized away when * CONFIG_DEBUG_FS is not enabled */ switch (rt) { case bltzal_op: MIPS_R2BR_STATS(bltzal); break; case bgezal_op: MIPS_R2BR_STATS(bgezal); break; } switch (MIPSInst_OPCODE(nir)) { case cop1_op: case cop1x_op: case lwc1_op: case swc1_op: regs->cp0_cause |= CAUSEF_BD; goto fpu_emul; } if (nir) { err = mipsr6_emul(regs, nir); if (err > 0) { err = mips_dsemul(regs, nir, epc, cpc); if (err == SIGILL) err = SIGEMT; MIPS_R2_STATS(dsemul); } } break; default: regs->regs[31] = r31; regs->cp0_epc = epc; err = SIGILL; break; } break; case blezl_op: case bgtzl_op: /* * For BLEZL and BGTZL, rt field must be set to 0. If this * is not the case, this may be an encoding of a MIPS R6 * instruction, so return to CPU execution if this occurs */ if (MIPSInst_RT(inst)) { err = SIGILL; break; } fallthrough; case beql_op: case bnel_op: if (delay_slot(regs)) { err = SIGILL; break; } regs->regs[31] = r31; regs->cp0_epc = epc; err = __compute_return_epc(regs); if (err < 0) return SIGEMT; if (err != BRANCH_LIKELY_TAKEN) break; cpc = regs->cp0_epc; nepc = epc + 4; err = __get_user(nir, (u32 __user *)nepc); if (err) { err = SIGSEGV; break; } /* * This will probably be optimized away when * CONFIG_DEBUG_FS is not enabled */ switch (MIPSInst_OPCODE(inst)) { case beql_op: MIPS_R2BR_STATS(beql); break; case bnel_op: MIPS_R2BR_STATS(bnel); break; case blezl_op: MIPS_R2BR_STATS(blezl); break; case bgtzl_op: MIPS_R2BR_STATS(bgtzl); break; } switch (MIPSInst_OPCODE(nir)) { case cop1_op: case cop1x_op: case lwc1_op: case swc1_op: regs->cp0_cause |= CAUSEF_BD; goto fpu_emul; } if (nir) { err = mipsr6_emul(regs, nir); if (err > 0) { err = mips_dsemul(regs, nir, epc, cpc); if (err == SIGILL) err = SIGEMT; MIPS_R2_STATS(dsemul); } } break; case lwc1_op: case swc1_op: case cop1_op: case cop1x_op: fpu_emul: regs->regs[31] = r31; regs->cp0_epc = epc; err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, &fault_addr); /* * We can't allow the emulated instruction to leave any * enabled Cause bits set in $fcr31. */ *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31); current->thread.fpu.fcr31 &= ~res; /* * this is a tricky issue - lose_fpu() uses LL/SC atomics * if FPU is owned and effectively cancels user level LL/SC. * So, it could be logical to don't restore FPU ownership here. * But the sequence of multiple FPU instructions is much much * more often than LL-FPU-SC and I prefer loop here until * next scheduler cycle cancels FPU ownership */ own_fpu(1); /* Restore FPU state. */ if (err) current->thread.cp0_baduaddr = (unsigned long)fault_addr; MIPS_R2_STATS(fpus); break; case lwl_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); if (!access_ok((void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; } __asm__ __volatile__( " .set push\n" " .set reorder\n" #ifdef CONFIG_CPU_LITTLE_ENDIAN "1:" LB "%1, 0(%2)\n" INS "%0, %1, 24, 8\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" ADDIU "%2, %2, -1\n" "2:" LB "%1, 0(%2)\n" INS "%0, %1, 16, 8\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" ADDIU "%2, %2, -1\n" "3:" LB "%1, 0(%2)\n" INS "%0, %1, 8, 8\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" ADDIU "%2, %2, -1\n" "4:" LB "%1, 0(%2)\n" INS "%0, %1, 0, 8\n" #else /* !CONFIG_CPU_LITTLE_ENDIAN */ "1:" LB "%1, 0(%2)\n" INS "%0, %1, 24, 8\n" ADDIU "%2, %2, 1\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" "2:" LB "%1, 0(%2)\n" INS "%0, %1, 16, 8\n" ADDIU "%2, %2, 1\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" "3:" LB "%1, 0(%2)\n" INS "%0, %1, 8, 8\n" ADDIU "%2, %2, 1\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" "4:" LB "%1, 0(%2)\n" INS "%0, %1, 0, 8\n" #endif /* CONFIG_CPU_LITTLE_ENDIAN */ "9: sll %0, %0, 0\n" "10:\n" " .insn\n" " .section .fixup,\"ax\"\n" "8: li %3,%4\n" " j 10b\n" " .previous\n" " .section __ex_table,\"a\"\n" STR(PTR_WD) " 1b,8b\n" STR(PTR_WD) " 2b,8b\n" STR(PTR_WD) " 3b,8b\n" STR(PTR_WD) " 4b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), "+&r"(vaddr), "+&r"(err) : "i"(SIGSEGV)); if (MIPSInst_RT(inst) && !err) regs->regs[MIPSInst_RT(inst)] = rt; MIPS_R2_STATS(loads); break; case lwr_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); if (!access_ok((void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; } __asm__ __volatile__( " .set push\n" " .set reorder\n" #ifdef CONFIG_CPU_LITTLE_ENDIAN "1:" LB "%1, 0(%2)\n" INS "%0, %1, 0, 8\n" ADDIU "%2, %2, 1\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" "2:" LB "%1, 0(%2)\n" INS "%0, %1, 8, 8\n" ADDIU "%2, %2, 1\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" "3:" LB "%1, 0(%2)\n" INS "%0, %1, 16, 8\n" ADDIU "%2, %2, 1\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" "4:" LB "%1, 0(%2)\n" INS "%0, %1, 24, 8\n" " sll %0, %0, 0\n" #else /* !CONFIG_CPU_LITTLE_ENDIAN */ "1:" LB "%1, 0(%2)\n" INS "%0, %1, 0, 8\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" ADDIU "%2, %2, -1\n" "2:" LB "%1, 0(%2)\n" INS "%0, %1, 8, 8\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" ADDIU "%2, %2, -1\n" "3:" LB "%1, 0(%2)\n" INS "%0, %1, 16, 8\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" ADDIU "%2, %2, -1\n" "4:" LB "%1, 0(%2)\n" INS "%0, %1, 24, 8\n" " sll %0, %0, 0\n" #endif /* CONFIG_CPU_LITTLE_ENDIAN */ "9:\n" "10:\n" " .insn\n" " .section .fixup,\"ax\"\n" "8: li %3,%4\n" " j 10b\n" " .previous\n" " .section __ex_table,\"a\"\n" STR(PTR_WD) " 1b,8b\n" STR(PTR_WD) " 2b,8b\n" STR(PTR_WD) " 3b,8b\n" STR(PTR_WD) " 4b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), "+&r"(vaddr), "+&r"(err) : "i"(SIGSEGV)); if (MIPSInst_RT(inst) && !err) regs->regs[MIPSInst_RT(inst)] = rt; MIPS_R2_STATS(loads); break; case swl_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); if (!access_ok((void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; } __asm__ __volatile__( " .set push\n" " .set reorder\n" #ifdef CONFIG_CPU_LITTLE_ENDIAN EXT "%1, %0, 24, 8\n" "1:" SB "%1, 0(%2)\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" ADDIU "%2, %2, -1\n" EXT "%1, %0, 16, 8\n" "2:" SB "%1, 0(%2)\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" ADDIU "%2, %2, -1\n" EXT "%1, %0, 8, 8\n" "3:" SB "%1, 0(%2)\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" ADDIU "%2, %2, -1\n" EXT "%1, %0, 0, 8\n" "4:" SB "%1, 0(%2)\n" #else /* !CONFIG_CPU_LITTLE_ENDIAN */ EXT "%1, %0, 24, 8\n" "1:" SB "%1, 0(%2)\n" ADDIU "%2, %2, 1\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" EXT "%1, %0, 16, 8\n" "2:" SB "%1, 0(%2)\n" ADDIU "%2, %2, 1\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" EXT "%1, %0, 8, 8\n" "3:" SB "%1, 0(%2)\n" ADDIU "%2, %2, 1\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" EXT "%1, %0, 0, 8\n" "4:" SB "%1, 0(%2)\n" #endif /* CONFIG_CPU_LITTLE_ENDIAN */ "9:\n" " .insn\n" " .section .fixup,\"ax\"\n" "8: li %3,%4\n" " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" STR(PTR_WD) " 1b,8b\n" STR(PTR_WD) " 2b,8b\n" STR(PTR_WD) " 3b,8b\n" STR(PTR_WD) " 4b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), "+&r"(vaddr), "+&r"(err) : "i"(SIGSEGV) : "memory"); MIPS_R2_STATS(stores); break; case swr_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); if (!access_ok((void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; } __asm__ __volatile__( " .set push\n" " .set reorder\n" #ifdef CONFIG_CPU_LITTLE_ENDIAN EXT "%1, %0, 0, 8\n" "1:" SB "%1, 0(%2)\n" ADDIU "%2, %2, 1\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" EXT "%1, %0, 8, 8\n" "2:" SB "%1, 0(%2)\n" ADDIU "%2, %2, 1\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" EXT "%1, %0, 16, 8\n" "3:" SB "%1, 0(%2)\n" ADDIU "%2, %2, 1\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" EXT "%1, %0, 24, 8\n" "4:" SB "%1, 0(%2)\n" #else /* !CONFIG_CPU_LITTLE_ENDIAN */ EXT "%1, %0, 0, 8\n" "1:" SB "%1, 0(%2)\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" ADDIU "%2, %2, -1\n" EXT "%1, %0, 8, 8\n" "2:" SB "%1, 0(%2)\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" ADDIU "%2, %2, -1\n" EXT "%1, %0, 16, 8\n" "3:" SB "%1, 0(%2)\n" " andi %1, %2, 0x3\n" " beq $0, %1, 9f\n" ADDIU "%2, %2, -1\n" EXT "%1, %0, 24, 8\n" "4:" SB "%1, 0(%2)\n" #endif /* CONFIG_CPU_LITTLE_ENDIAN */ "9:\n" " .insn\n" " .section .fixup,\"ax\"\n" "8: li %3,%4\n" " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" STR(PTR_WD) " 1b,8b\n" STR(PTR_WD) " 2b,8b\n" STR(PTR_WD) " 3b,8b\n" STR(PTR_WD) " 4b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), "+&r"(vaddr), "+&r"(err) : "i"(SIGSEGV) : "memory"); MIPS_R2_STATS(stores); break; case ldl_op: if (IS_ENABLED(CONFIG_32BIT)) { err = SIGILL; break; } rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); if (!access_ok((void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; } __asm__ __volatile__( " .set push\n" " .set reorder\n" #ifdef CONFIG_CPU_LITTLE_ENDIAN "1: lb %1, 0(%2)\n" " dinsu %0, %1, 56, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "2: lb %1, 0(%2)\n" " dinsu %0, %1, 48, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "3: lb %1, 0(%2)\n" " dinsu %0, %1, 40, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "4: lb %1, 0(%2)\n" " dinsu %0, %1, 32, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "5: lb %1, 0(%2)\n" " dins %0, %1, 24, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "6: lb %1, 0(%2)\n" " dins %0, %1, 16, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "7: lb %1, 0(%2)\n" " dins %0, %1, 8, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "0: lb %1, 0(%2)\n" " dins %0, %1, 0, 8\n" #else /* !CONFIG_CPU_LITTLE_ENDIAN */ "1: lb %1, 0(%2)\n" " dinsu %0, %1, 56, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "2: lb %1, 0(%2)\n" " dinsu %0, %1, 48, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "3: lb %1, 0(%2)\n" " dinsu %0, %1, 40, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "4: lb %1, 0(%2)\n" " dinsu %0, %1, 32, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "5: lb %1, 0(%2)\n" " dins %0, %1, 24, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "6: lb %1, 0(%2)\n" " dins %0, %1, 16, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "7: lb %1, 0(%2)\n" " dins %0, %1, 8, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "0: lb %1, 0(%2)\n" " dins %0, %1, 0, 8\n" #endif /* CONFIG_CPU_LITTLE_ENDIAN */ "9:\n" " .insn\n" " .section .fixup,\"ax\"\n" "8: li %3,%4\n" " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" STR(PTR_WD) " 1b,8b\n" STR(PTR_WD) " 2b,8b\n" STR(PTR_WD) " 3b,8b\n" STR(PTR_WD) " 4b,8b\n" STR(PTR_WD) " 5b,8b\n" STR(PTR_WD) " 6b,8b\n" STR(PTR_WD) " 7b,8b\n" STR(PTR_WD) " 0b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), "+&r"(vaddr), "+&r"(err) : "i"(SIGSEGV)); if (MIPSInst_RT(inst) && !err) regs->regs[MIPSInst_RT(inst)] = rt; MIPS_R2_STATS(loads); break; case ldr_op: if (IS_ENABLED(CONFIG_32BIT)) { err = SIGILL; break; } rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); if (!access_ok((void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; } __asm__ __volatile__( " .set push\n" " .set reorder\n" #ifdef CONFIG_CPU_LITTLE_ENDIAN "1: lb %1, 0(%2)\n" " dins %0, %1, 0, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "2: lb %1, 0(%2)\n" " dins %0, %1, 8, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "3: lb %1, 0(%2)\n" " dins %0, %1, 16, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "4: lb %1, 0(%2)\n" " dins %0, %1, 24, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "5: lb %1, 0(%2)\n" " dinsu %0, %1, 32, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "6: lb %1, 0(%2)\n" " dinsu %0, %1, 40, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "7: lb %1, 0(%2)\n" " dinsu %0, %1, 48, 8\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" "0: lb %1, 0(%2)\n" " dinsu %0, %1, 56, 8\n" #else /* !CONFIG_CPU_LITTLE_ENDIAN */ "1: lb %1, 0(%2)\n" " dins %0, %1, 0, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "2: lb %1, 0(%2)\n" " dins %0, %1, 8, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "3: lb %1, 0(%2)\n" " dins %0, %1, 16, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "4: lb %1, 0(%2)\n" " dins %0, %1, 24, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "5: lb %1, 0(%2)\n" " dinsu %0, %1, 32, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "6: lb %1, 0(%2)\n" " dinsu %0, %1, 40, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "7: lb %1, 0(%2)\n" " dinsu %0, %1, 48, 8\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" "0: lb %1, 0(%2)\n" " dinsu %0, %1, 56, 8\n" #endif /* CONFIG_CPU_LITTLE_ENDIAN */ "9:\n" " .insn\n" " .section .fixup,\"ax\"\n" "8: li %3,%4\n" " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" STR(PTR_WD) " 1b,8b\n" STR(PTR_WD) " 2b,8b\n" STR(PTR_WD) " 3b,8b\n" STR(PTR_WD) " 4b,8b\n" STR(PTR_WD) " 5b,8b\n" STR(PTR_WD) " 6b,8b\n" STR(PTR_WD) " 7b,8b\n" STR(PTR_WD) " 0b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), "+&r"(vaddr), "+&r"(err) : "i"(SIGSEGV)); if (MIPSInst_RT(inst) && !err) regs->regs[MIPSInst_RT(inst)] = rt; MIPS_R2_STATS(loads); break; case sdl_op: if (IS_ENABLED(CONFIG_32BIT)) { err = SIGILL; break; } rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); if (!access_ok((void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; } __asm__ __volatile__( " .set push\n" " .set reorder\n" #ifdef CONFIG_CPU_LITTLE_ENDIAN " dextu %1, %0, 56, 8\n" "1: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dextu %1, %0, 48, 8\n" "2: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dextu %1, %0, 40, 8\n" "3: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dextu %1, %0, 32, 8\n" "4: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dext %1, %0, 24, 8\n" "5: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dext %1, %0, 16, 8\n" "6: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dext %1, %0, 8, 8\n" "7: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dext %1, %0, 0, 8\n" "0: sb %1, 0(%2)\n" #else /* !CONFIG_CPU_LITTLE_ENDIAN */ " dextu %1, %0, 56, 8\n" "1: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dextu %1, %0, 48, 8\n" "2: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dextu %1, %0, 40, 8\n" "3: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dextu %1, %0, 32, 8\n" "4: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dext %1, %0, 24, 8\n" "5: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dext %1, %0, 16, 8\n" "6: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dext %1, %0, 8, 8\n" "7: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dext %1, %0, 0, 8\n" "0: sb %1, 0(%2)\n" #endif /* CONFIG_CPU_LITTLE_ENDIAN */ "9:\n" " .insn\n" " .section .fixup,\"ax\"\n" "8: li %3,%4\n" " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" STR(PTR_WD) " 1b,8b\n" STR(PTR_WD) " 2b,8b\n" STR(PTR_WD) " 3b,8b\n" STR(PTR_WD) " 4b,8b\n" STR(PTR_WD) " 5b,8b\n" STR(PTR_WD) " 6b,8b\n" STR(PTR_WD) " 7b,8b\n" STR(PTR_WD) " 0b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), "+&r"(vaddr), "+&r"(err) : "i"(SIGSEGV) : "memory"); MIPS_R2_STATS(stores); break; case sdr_op: if (IS_ENABLED(CONFIG_32BIT)) { err = SIGILL; break; } rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); if (!access_ok((void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; } __asm__ __volatile__( " .set push\n" " .set reorder\n" #ifdef CONFIG_CPU_LITTLE_ENDIAN " dext %1, %0, 0, 8\n" "1: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dext %1, %0, 8, 8\n" "2: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dext %1, %0, 16, 8\n" "3: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dext %1, %0, 24, 8\n" "4: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dextu %1, %0, 32, 8\n" "5: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dextu %1, %0, 40, 8\n" "6: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dextu %1, %0, 48, 8\n" "7: sb %1, 0(%2)\n" " daddiu %2, %2, 1\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " dextu %1, %0, 56, 8\n" "0: sb %1, 0(%2)\n" #else /* !CONFIG_CPU_LITTLE_ENDIAN */ " dext %1, %0, 0, 8\n" "1: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dext %1, %0, 8, 8\n" "2: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dext %1, %0, 16, 8\n" "3: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dext %1, %0, 24, 8\n" "4: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dextu %1, %0, 32, 8\n" "5: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dextu %1, %0, 40, 8\n" "6: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dextu %1, %0, 48, 8\n" "7: sb %1, 0(%2)\n" " andi %1, %2, 0x7\n" " beq $0, %1, 9f\n" " daddiu %2, %2, -1\n" " dextu %1, %0, 56, 8\n" "0: sb %1, 0(%2)\n" #endif /* CONFIG_CPU_LITTLE_ENDIAN */ "9:\n" " .insn\n" " .section .fixup,\"ax\"\n" "8: li %3,%4\n" " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" STR(PTR_WD) " 1b,8b\n" STR(PTR_WD) " 2b,8b\n" STR(PTR_WD) " 3b,8b\n" STR(PTR_WD) " 4b,8b\n" STR(PTR_WD) " 5b,8b\n" STR(PTR_WD) " 6b,8b\n" STR(PTR_WD) " 7b,8b\n" STR(PTR_WD) " 0b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), "+&r"(vaddr), "+&r"(err) : "i"(SIGSEGV) : "memory"); MIPS_R2_STATS(stores); break; case ll_op: vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); if (vaddr & 0x3) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; } if (!access_ok((void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; } if (!cpu_has_rw_llb) { /* * An LL/SC block can't be safely emulated without * a Config5/LLB availability. So it's probably time to * kill our process before things get any worse. This is * because Config5/LLB allows us to use ERETNC so that * the LLAddr/LLB bit is not cleared when we return from * an exception. MIPS R2 LL/SC instructions trap with an * RI exception so once we emulate them here, we return * back to userland with ERETNC. That preserves the * LLAddr/LLB so the subsequent SC instruction will * succeed preserving the atomic semantics of the LL/SC * block. Without that, there is no safe way to emulate * an LL/SC block in MIPSR2 userland. */ pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); err = SIGKILL; break; } __asm__ __volatile__( "1:\n" "ll %0, 0(%2)\n" "2:\n" ".insn\n" ".section .fixup,\"ax\"\n" "3:\n" "li %1, %3\n" "j 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" STR(PTR_WD) " 1b,3b\n" ".previous\n" : "=&r"(res), "+&r"(err) : "r"(vaddr), "i"(SIGSEGV) : "memory"); if (MIPSInst_RT(inst) && !err) regs->regs[MIPSInst_RT(inst)] = res; MIPS_R2_STATS(llsc); break; case sc_op: vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); if (vaddr & 0x3) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; } if (!access_ok((void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; } if (!cpu_has_rw_llb) { /* * An LL/SC block can't be safely emulated without * a Config5/LLB availability. So it's probably time to * kill our process before things get any worse. This is * because Config5/LLB allows us to use ERETNC so that * the LLAddr/LLB bit is not cleared when we return from * an exception. MIPS R2 LL/SC instructions trap with an * RI exception so once we emulate them here, we return * back to userland with ERETNC. That preserves the * LLAddr/LLB so the subsequent SC instruction will * succeed preserving the atomic semantics of the LL/SC * block. Without that, there is no safe way to emulate * an LL/SC block in MIPSR2 userland. */ pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); err = SIGKILL; break; } res = regs->regs[MIPSInst_RT(inst)]; __asm__ __volatile__( "1:\n" "sc %0, 0(%2)\n" "2:\n" ".insn\n" ".section .fixup,\"ax\"\n" "3:\n" "li %1, %3\n" "j 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" STR(PTR_WD) " 1b,3b\n" ".previous\n" : "+&r"(res), "+&r"(err) : "r"(vaddr), "i"(SIGSEGV)); if (MIPSInst_RT(inst) && !err) regs->regs[MIPSInst_RT(inst)] = res; MIPS_R2_STATS(llsc); break; case lld_op: if (IS_ENABLED(CONFIG_32BIT)) { err = SIGILL; break; } vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); if (vaddr & 0x7) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; } if (!access_ok((void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; } if (!cpu_has_rw_llb) { /* * An LL/SC block can't be safely emulated without * a Config5/LLB availability. So it's probably time to * kill our process before things get any worse. This is * because Config5/LLB allows us to use ERETNC so that * the LLAddr/LLB bit is not cleared when we return from * an exception. MIPS R2 LL/SC instructions trap with an * RI exception so once we emulate them here, we return * back to userland with ERETNC. That preserves the * LLAddr/LLB so the subsequent SC instruction will * succeed preserving the atomic semantics of the LL/SC * block. Without that, there is no safe way to emulate * an LL/SC block in MIPSR2 userland. */ pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); err = SIGKILL; break; } __asm__ __volatile__( "1:\n" "lld %0, 0(%2)\n" "2:\n" ".insn\n" ".section .fixup,\"ax\"\n" "3:\n" "li %1, %3\n" "j 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" STR(PTR_WD) " 1b,3b\n" ".previous\n" : "=&r"(res), "+&r"(err) : "r"(vaddr), "i"(SIGSEGV) : "memory"); if (MIPSInst_RT(inst) && !err) regs->regs[MIPSInst_RT(inst)] = res; MIPS_R2_STATS(llsc); break; case scd_op: if (IS_ENABLED(CONFIG_32BIT)) { err = SIGILL; break; } vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); if (vaddr & 0x7) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; } if (!access_ok((void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; } if (!cpu_has_rw_llb) { /* * An LL/SC block can't be safely emulated without * a Config5/LLB availability. So it's probably time to * kill our process before things get any worse. This is * because Config5/LLB allows us to use ERETNC so that * the LLAddr/LLB bit is not cleared when we return from * an exception. MIPS R2 LL/SC instructions trap with an * RI exception so once we emulate them here, we return * back to userland with ERETNC. That preserves the * LLAddr/LLB so the subsequent SC instruction will * succeed preserving the atomic semantics of the LL/SC * block. Without that, there is no safe way to emulate * an LL/SC block in MIPSR2 userland. */ pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); err = SIGKILL; break; } res = regs->regs[MIPSInst_RT(inst)]; __asm__ __volatile__( "1:\n" "scd %0, 0(%2)\n" "2:\n" ".insn\n" ".section .fixup,\"ax\"\n" "3:\n" "li %1, %3\n" "j 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" STR(PTR_WD) " 1b,3b\n" ".previous\n" : "+&r"(res), "+&r"(err) : "r"(vaddr), "i"(SIGSEGV)); if (MIPSInst_RT(inst) && !err) regs->regs[MIPSInst_RT(inst)] = res; MIPS_R2_STATS(llsc); break; case pref_op: /* skip it */ break; default: err = SIGILL; } /* * Let's not return to userland just yet. It's costly and * it's likely we have more R2 instructions to emulate */ if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) { regs->cp0_cause &= ~CAUSEF_BD; err = get_user(inst, (u32 __user *)regs->cp0_epc); if (!err) goto repeat; if (err < 0) err = SIGSEGV; } if (err && (err != SIGEMT)) { regs->regs[31] = r31; regs->cp0_epc = epc; } /* Likely a MIPS R6 compatible instruction */ if (pass && (err == SIGILL)) err = 0; return err; } #ifdef CONFIG_DEBUG_FS static int mipsr2_emul_show(struct seq_file *s, void *unused) { seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n"); seq_printf(s, "movs\t\t%ld\t%ld\n", (unsigned long)__this_cpu_read(mipsr2emustats.movs), (unsigned long)__this_cpu_read(mipsr2bdemustats.movs)); seq_printf(s, "hilo\t\t%ld\t%ld\n", (unsigned long)__this_cpu_read(mipsr2emustats.hilo), (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo)); seq_printf(s, "muls\t\t%ld\t%ld\n", (unsigned long)__this_cpu_read(mipsr2emustats.muls), (unsigned long)__this_cpu_read(mipsr2bdemustats.muls)); seq_printf(s, "divs\t\t%ld\t%ld\n", (unsigned long)__this_cpu_read(mipsr2emustats.divs), (unsigned long)__this_cpu_read(mipsr2bdemustats.divs)); seq_printf(s, "dsps\t\t%ld\t%ld\n", (unsigned long)__this_cpu_read(mipsr2emustats.dsps), (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps)); seq_printf(s, "bops\t\t%ld\t%ld\n", (unsigned long)__this_cpu_read(mipsr2emustats.bops), (unsigned long)__this_cpu_read(mipsr2bdemustats.bops)); seq_printf(s, "traps\t\t%ld\t%ld\n", (unsigned long)__this_cpu_read(mipsr2emustats.traps), (unsigned long)__this_cpu_read(mipsr2bdemustats.traps)); seq_printf(s, "fpus\t\t%ld\t%ld\n", (unsigned long)__this_cpu_read(mipsr2emustats.fpus), (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus)); seq_printf(s, "loads\t\t%ld\t%ld\n", (unsigned long)__this_cpu_read(mipsr2emustats.loads), (unsigned long)__this_cpu_read(mipsr2bdemustats.loads)); seq_printf(s, "stores\t\t%ld\t%ld\n", (unsigned long)__this_cpu_read(mipsr2emustats.stores), (unsigned long)__this_cpu_read(mipsr2bdemustats.stores)); seq_printf(s, "llsc\t\t%ld\t%ld\n", (unsigned long)__this_cpu_read(mipsr2emustats.llsc), (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc)); seq_printf(s, "dsemul\t\t%ld\t%ld\n", (unsigned long)__this_cpu_read(mipsr2emustats.dsemul), (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul)); seq_printf(s, "jr\t\t%ld\n", (unsigned long)__this_cpu_read(mipsr2bremustats.jrs)); seq_printf(s, "bltzl\t\t%ld\n", (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl)); seq_printf(s, "bgezl\t\t%ld\n", (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl)); seq_printf(s, "bltzll\t\t%ld\n", (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll)); seq_printf(s, "bgezll\t\t%ld\n", (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll)); seq_printf(s, "bltzal\t\t%ld\n", (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal)); seq_printf(s, "bgezal\t\t%ld\n", (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal)); seq_printf(s, "beql\t\t%ld\n", (unsigned long)__this_cpu_read(mipsr2bremustats.beql)); seq_printf(s, "bnel\t\t%ld\n", (unsigned long)__this_cpu_read(mipsr2bremustats.bnel)); seq_printf(s, "blezl\t\t%ld\n", (unsigned long)__this_cpu_read(mipsr2bremustats.blezl)); seq_printf(s, "bgtzl\t\t%ld\n", (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl)); return 0; } static int mipsr2_clear_show(struct seq_file *s, void *unused) { mipsr2_emul_show(s, unused); __this_cpu_write((mipsr2emustats).movs, 0); __this_cpu_write((mipsr2bdemustats).movs, 0); __this_cpu_write((mipsr2emustats).hilo, 0); __this_cpu_write((mipsr2bdemustats).hilo, 0); __this_cpu_write((mipsr2emustats).muls, 0); __this_cpu_write((mipsr2bdemustats).muls, 0); __this_cpu_write((mipsr2emustats).divs, 0); __this_cpu_write((mipsr2bdemustats).divs, 0); __this_cpu_write((mipsr2emustats).dsps, 0); __this_cpu_write((mipsr2bdemustats).dsps, 0); __this_cpu_write((mipsr2emustats).bops, 0); __this_cpu_write((mipsr2bdemustats).bops, 0); __this_cpu_write((mipsr2emustats).traps, 0); __this_cpu_write((mipsr2bdemustats).traps, 0); __this_cpu_write((mipsr2emustats).fpus, 0); __this_cpu_write((mipsr2bdemustats).fpus, 0); __this_cpu_write((mipsr2emustats).loads, 0); __this_cpu_write((mipsr2bdemustats).loads, 0); __this_cpu_write((mipsr2emustats).stores, 0); __this_cpu_write((mipsr2bdemustats).stores, 0); __this_cpu_write((mipsr2emustats).llsc, 0); __this_cpu_write((mipsr2bdemustats).llsc, 0); __this_cpu_write((mipsr2emustats).dsemul, 0); __this_cpu_write((mipsr2bdemustats).dsemul, 0); __this_cpu_write((mipsr2bremustats).jrs, 0); __this_cpu_write((mipsr2bremustats).bltzl, 0); __this_cpu_write((mipsr2bremustats).bgezl, 0); __this_cpu_write((mipsr2bremustats).bltzll, 0); __this_cpu_write((mipsr2bremustats).bgezll, 0); __this_cpu_write((mipsr2bremustats).bltzall, 0); __this_cpu_write((mipsr2bremustats).bgezall, 0); __this_cpu_write((mipsr2bremustats).bltzal, 0); __this_cpu_write((mipsr2bremustats).bgezal, 0); __this_cpu_write((mipsr2bremustats).beql, 0); __this_cpu_write((mipsr2bremustats).bnel, 0); __this_cpu_write((mipsr2bremustats).blezl, 0); __this_cpu_write((mipsr2bremustats).bgtzl, 0); return 0; } DEFINE_SHOW_ATTRIBUTE(mipsr2_emul); DEFINE_SHOW_ATTRIBUTE(mipsr2_clear); static int __init mipsr2_init_debugfs(void) { debugfs_create_file("r2_emul_stats", S_IRUGO, mips_debugfs_dir, NULL, &mipsr2_emul_fops); debugfs_create_file("r2_emul_stats_clear", S_IRUGO, mips_debugfs_dir, NULL, &mipsr2_clear_fops); return 0; } device_initcall(mipsr2_init_debugfs); #endif /* CONFIG_DEBUG_FS */
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1